repo_name
stringlengths 6
97
| path
stringlengths 3
341
| text
stringlengths 8
1.02M
|
|---|---|---|
wejdeneHaouari/blockbench-sawtooth
|
benchmark/sawtooth_v1_2/block_server_subscriber/main.py
|
import argparse
import logging
import sys
from block_server_subscriber.subscriber import Subscriber
from block_server_subscriber.databaseImp import DatabaseImp
from block_server_subscriber.event_handling import EventHandler
LOGGER = logging.getLogger(__name__)
def parse_args(args):
parser = argparse.ArgumentParser(add_help=False)
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument(
'-v', '--verbose',
action='count',
default=0,
help='Increase output sent to stderr')
parser.add_argument(
'-C', '--connect',
help='The url of the validator to subscribe to',
default='tcp://localhost:4004')
parser.add_argument(
'--url',
type=str,
help='specify URL of REST API',
default='http://127.0.0.1:8008')
parser.add_argument(
'--uri',
type=str,
help='database URI',
default='mongodb://127.0.0.1:27017/')
return parser.parse_args(args)
def init_logger(level):
logger = logging.getLogger()
logger.addHandler(logging.StreamHandler())
if level == 1:
logger.setLevel(logging.INFO)
elif level > 1:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.WARN)
def do_subscribe(opts):
LOGGER.info('Starting block server...')
subscriber = Subscriber(opts.connect)
eventHandler = EventHandler(opts.url)
subscriber.add_handler(eventHandler.get_events_handler())
subscriber.listen_to_event()
def main():
opts = parse_args(sys.argv[1:])
init_logger(opts.verbose)
try:
LOGGER.warning("## initialize db ##")
DatabaseImp.initialize(opts.uri)
do_subscribe(opts)
except KeyboardInterrupt:
pass
main()
|
wejdeneHaouari/blockbench-sawtooth
|
src/macro/kvstore/parse_output.py
|
#! /usr/bin/env python
import sys
import re
def main():
if len(sys.argv) != 3 or sys.argv[1] == '-h':
print("Usage: %s InputFileName OutputFileName" % sys.argv[0])
print("Statistics (.cv file) for each workload " + \
"will be written to the target file.")
sys.exit(-1)
path = sys.argv[1]
target = 'results/' + sys.argv[2]
with open(path) as file_in:
with open(target, "a") as f, open("results/block.csv", "a") as b:
for line in file_in:
block = re.search('polled block (.*) ', line)
txt_count = re.search('tx count = (.+?) ', line)
latency = re.search('latency = (.+?) ', line)
outstanding_request = re.search('outstanding request = (.+?) ', line)
time = re.search('time = (.*)', line)
threads = re.search('threads = (.+?) ', line)
rates = re.search('rates = (.+?) ', line)
timeout = re.search('timeout = (.*)', line)
if latency and txt_count and outstanding_request and time:
txt_count_res = txt_count.group(1)
latency_res = latency.group(1)
outstanding_request_res = outstanding_request.group(1)
time_res = time.group(1)
f.write(time_res + "," + txt_count_res + "," + latency_res + "," + outstanding_request_res + "\n")
if block:
block_res = block.group(1)
b.write(block_res + "\n")
if threads and rates and timeout:
f.write("threads,rate,timeout,\n")
f.write(threads.group(1) + "," + rates.group(1) + "," + timeout.group(1) + "," + "\n")
f.write("time,txt_count,latency,outstanding\n")
if __name__ == '__main__':
main()
|
wejdeneHaouari/blockbench-sawtooth
|
src/macro/kvstore/parse_result.py
|
#! /usr/bin/env python
import sys
import os
import numpy
__author__ = "<NAME>"
__copyright__ = "Copyright (c) 2014 <NAME>"
__email__ = "<EMAIL>"
def main():
if len(sys.argv) != 2 or sys.argv[1] == '-h':
print("Usage: %s OutputFileName" % sys.argv[0])
print("Statistics (.result file) for each workload " + \
"will be written to the workload directory.")
sys.exit(-1)
path = sys.argv[1]
lines = [line.strip().split('\t') for line in open(path)]
results = {}
db_index = set() # db name
tn_index = set() # thread number
for line in lines:
if line[0][0] == '#':
continue
db_name = line[0]
db_index.add(db_name)
dir_name, workload = os.path.split(line[1])
workload = os.path.splitext(workload)[0]
num_threads = int(line[2])
tn_index.add(num_threads)
throughput = float(line[3])
if not results.has_key(workload):
results[workload] = {}
if not results[workload].has_key(db_name):
results[workload][db_name] = {}
if not results[workload][db_name].has_key(num_threads):
results[workload][db_name][num_threads] = []
results[workload][db_name][num_threads].append(throughput)
db_index = sorted(db_index)
tn_index = sorted(tn_index)
for wl in sorted(results.keys()):
out_file = open(os.path.join(dir_name, wl + ".result"), 'w+')
# Prints header
line = "#"
for db in db_index:
line += '\t' + db
out_file.write(line + '\n')
# Prints results
for tn in tn_index:
line = str(tn)
for db in db_index:
data = results[wl][db][tn]
line += '\t' + str(numpy.median(numpy.array(data)))
out_file.write(line + '\n')
if __name__ == '__main__':
main()
|
wejdeneHaouari/blockbench-sawtooth
|
benchmark/sawtooth_v1_2/block_server_api/main.py
|
import argparse
import asyncio
import logging
import sys
import threading
from aiohttp import web
from block_server_api.route_handler import RouteHandler
from zmq.asyncio import ZMQEventLoop
from block_server_api.databaseImp import DatabaseImp
LOGGER = logging.getLogger(__name__)
def parse_args(args):
parser = argparse.ArgumentParser(add_help=False)
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument(
'-v', '--verbose',
action='count',
default=0,
help='Increase output sent to stderr')
parser.add_argument(
'--uri',
type=str,
help='database URI',
default='mongodb://127.0.0.1:27017/')
parser.add_argument(
'-b', '--bind',
help='identify host and port for api to run on',
default='block-server-rest-api:9001')
return parser.parse_args(args)
def init_logger(level):
logger = logging.getLogger()
logger.addHandler(logging.StreamHandler())
if level == 1:
logger.setLevel(logging.INFO)
elif level > 1:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.WARN)
def start_rest_api(host, port, opts, loop):
# start REST API
app = web.Application(loop=loop)
handler = RouteHandler()
app.router.add_get('/height', handler.get_height)
app.router.add_get('/block', handler.get_block_transactions)
LOGGER.warning('Starting REST API on %s:%s', host, port)
web.run_app(
app,
host=host,
port=port,
access_log=LOGGER)
def main():
LOGGER.warning("## in api ##")
opts = parse_args(sys.argv[1:])
init_logger(opts.verbose)
try:
host, port = opts.bind.split(":")
port = int(port)
except ValueError:
print("Unable to parse binding {}: Must be in the format"
" host:port".format(opts.bind))
sys.exit(1)
loop = ZMQEventLoop()
asyncio.set_event_loop(loop)
try:
DatabaseImp.initialize(opts.uri)
start_rest_api(host, port, opts, loop)
except KeyboardInterrupt:
pass
finally:
print("Closing Loop")
loop.close()
main()
|
wejdeneHaouari/blockbench-sawtooth
|
benchmark/sawtooth_v1_2/block_server_subscriber/subscriber.py
|
<filename>benchmark/sawtooth_v1_2/block_server_subscriber/subscriber.py
import asyncio
import logging
from sawtooth_sdk.protobuf.client_event_pb2 import ClientEventsSubscribeRequest, ClientEventsSubscribeResponse
from sawtooth_sdk.protobuf.events_pb2 import EventSubscription, EventList
LOGGER = logging.getLogger(__name__)
from sawtooth_sdk.messaging.stream import Stream
from sawtooth_sdk.protobuf.validator_pb2 import Message
class Subscriber(object):
def __init__(self, validator_url):
LOGGER.info('Connecting to validator: %s', validator_url)
self._stream = Stream(validator_url)
self._event_handlers = []
def add_handler(self, handler):
self._event_handlers.append(handler)
def listen_to_event(self):
self._stream.wait_for_ready()
# Step 1: Construct a Subscription
block_sub = EventSubscription(event_type='sawtooth/block-commit')
# Step 2: Submit the Event Subscription
request = ClientEventsSubscribeRequest(
subscriptions=[block_sub])
response_future = self._stream.send(
Message.CLIENT_EVENTS_SUBSCRIBE_REQUEST,
request.SerializeToString())
response = ClientEventsSubscribeResponse()
response.ParseFromString(response_future.result().content)
# Listen for events in an infinite loop
LOGGER.warning("Listening to events.")
while True:
msg = self._stream.receive()
event_list = EventList()
event_list.ParseFromString(msg.result().content)
for handler in self._event_handlers:
handler(event_list.events)
|
wejdeneHaouari/blockbench-sawtooth
|
benchmark/sawtooth_v1_2/block_server_subscriber/event_handling.py
|
<reponame>wejdeneHaouari/blockbench-sawtooth
import logging
import requests
import yaml
from block_server_subscriber.databaseImp import DatabaseImp
import time
LOGGER = logging.getLogger(__name__)
class EventHandler(object):
def __init__(self, rest_api_url):
self._url = rest_api_url
self.retry = 2
def get_events_handler(self):
return lambda events: self._handle_events(events)
def _handle_events(self, events):
block_num, block_id = self._parse_new_block(events)
if block_num is not None:
try:
transactionIDS = self._get_txnts(block_id)
DatabaseImp.insert("blkTxns", {"block_num": block_num, "transactions": transactionIDS})
# get transactions id of the new block
DatabaseImp.insert("height", {"height": block_num})
except Exception as ex:
LOGGER.warning(ex)
def _parse_new_block(self, events):
try:
block_attr = next(e.attributes for e in events
if e.event_type == 'sawtooth/block-commit')
except StopIteration:
return None, None
block_num = int(next(a.value for a in block_attr if a.key == 'block_num'))
block_id = next(a.value for a in block_attr if a.key == 'block_id')
LOGGER.debug('Handling deltas for block: %s', block_num)
return block_num, block_id
def _send_get_txts(self,block_id):
try:
result = self._send_request(
'blocks/{}'.format(block_id))
return result
except Exception as ex:
return 0
def _get_txnts(self, block_id):
try:
transactionIDS = []
result = 0
i = 0
while result == 0:
i = i+1
LOGGER.warning("## getting transactions ## " + str(i))
result = self._send_get_txts(block_id)
i = 0
res = yaml.safe_load(result)
for batch in res['data']['batches']:
for transactionId in batch['header']['transaction_ids']:
transactionIDS.append(transactionId)
return transactionIDS
except Exception as err:
raise Exception(err) from err
def _send_request(self, suffix, data=None, content_type=None):
if self._url.startswith("http://"):
url = "{}/{}".format(self._url, suffix)
else:
url = "http://{}/{}".format(self._url, suffix)
headers = {}
if content_type is not None:
headers['Content-Type'] = content_type
try:
if data is not None:
result = requests.post(url, headers=headers, data=data)
else:
result = requests.get(url, headers=headers)
# if result.status_code == 404:
# raise Exception("No such key: {}".format(name))
if not result.ok:
raise Exception("Error {}: {}".format(
result.status_code, result.reason))
except requests.ConnectionError as err:
raise Exception(
'Failed to connect to REST API: {}'.format(err)) from err
except BaseException as err:
raise Exception(err) from err
return result.text
|
wejdeneHaouari/blockbench-sawtooth
|
src/macro/kvstore/saturation.py
|
<gh_stars>1-10
#!/bin/python
import os
import time
import sys
# running experiments
EXPS = [(1, 5), (1, 10), (1,30)]
SC = "ycsb"
WAIT_TIME = 20
IS_INT = 0
OUTPUT_FILE = "output.txt"
WORKLOAD = "workloada.spec"
def run_exp():
cmd = './driver -db {} -threads {} -P workloads/{} -txrate {} -endpoint {} -wl {} -wt {} -isint {} 2>&1 | tee {}'
start = time.time()
for (t, r) in EXPS:
print('********************** **********************')
begin_exp = time.time() - start
print("start = ", begin_exp)
print("rate =", r)
os.system(cmd.format(TARGET, t, WORKLOAD, r, ENDPOINT, SC, WAIT_TIME, IS_INT, OUTPUT_FILE))
end_exp = time.time() - start
print("end = ", end_exp)
print("interval = ", end_exp - begin_exp)
if __name__ == '__main__':
if len(sys.argv) != 2 or sys.argv[1] == '-h':
print("Usage: %s (-e or -s or -f)" % sys.argv[0])
sys.exit(-1)
target = sys.argv[1]
if target == "-e":
TARGET = "ethereum"
ENDPOINT = "localhost:8545"
elif target == "-f":
TARGET = "fabric-v2.2"
ENDPOINT = "localhost:8800,localhost:8801"
elif target == "-s":
TARGET = "sawtooth-v1.2"
ENDPOINT = "localhost:9001,localhost:8000"
else:
print("argument must be -f -e or -s")
sys.exit(-1)
run_exp()
print("done")
|
wejdeneHaouari/blockbench-sawtooth
|
benchmark/sawtooth_v1_2/block_server_subscriber/databaseImp.py
|
import pymongo
import logging
LOGGER = logging.getLogger(__name__)
class DatabaseImp(object):
#URI = "mongodb://root:password@bb:27017/"
DATABASE = None
@staticmethod
def initialize(uri):
try:
client = pymongo.MongoClient(uri)
DatabaseImp.DATABASE = client['blocks']
DatabaseImp.DATABASE["blkTxns"].insert({"block_num": 0, "transactions": []})
DatabaseImp.DATABASE["height"].insert({"height": 0})
except Exception as ex:
LOGGER.warning(ex)
@staticmethod
def insert(collection, data):
try:
DatabaseImp.DATABASE[collection].insert(data)
except Exception as ex:
LOGGER.warning(ex)
@staticmethod
def find(collection, query):
return DatabaseImp.DATABASE[collection].find(query)
@staticmethod
def find_one(collection, query):
return DatabaseImp.DATABASE[collection].find_one(query)
@staticmethod
def find_last_record(collection):
try:
record = DatabaseImp.DATABASE[collection].find({}).sort("_id", -1).limit(1)
except Exception as ex:
LOGGER.warning(ex)
return record.next()
|
wejdeneHaouari/blockbench-sawtooth
|
benchmark/sawtooth_v1_2/rest_api/route_handler.py
|
import os
import getpass
from json import JSONDecodeError
import time
import yaml
import logging
from aiohttp.web_response import json_response
from sawtooth_sdk.processor.context import Context
from rest_api.errors import ApiBadRequest
DEFAULT_URL = 'http://127.0.0.1:8008'
LOGGER = logging.getLogger(__file__)
class RouteHandler(object):
def __init__(self, loop, client):
self._loop = loop
self._client = client
async def invoke_function(self, request):
body = await decode_request(request)
required_fields = ['function', 'args']
validate_fields(required_fields, body)
function = body.get('function')
args = body.get("args")
if function == "Write":
LOGGER.warning("######")
LOGGER.warning(args)
res = await self.create_record(args)
return json_response(res)
else:
raise ApiBadRequest(
"'{}' is not supported".format(function))
async def create_record(self, args):
start = time.time()
if len(args) != 2:
raise ApiBadRequest("write function must invoke two parameters")
name = args[0]
value = args[1]
response = self._client.set(name, value)
end = time.time() - start
res = {"txnID": response, "latency_sec": end}
return res
async def delete_record(self, arg):
pass
async def decode_request(request):
try:
return await request.json()
except JSONDecodeError:
raise ApiBadRequest('Improper JSON format')
def validate_fields(required_fields, body):
for field in required_fields:
if body.get(field) is None:
raise ApiBadRequest(
"'{}' parameter is required".format(field))
|
wejdeneHaouari/blockbench-sawtooth
|
benchmark/sawtooth_v1_2/block_server_api/databaseImp.py
|
<filename>benchmark/sawtooth_v1_2/block_server_api/databaseImp.py
import pymongo
import logging
LOGGER = logging.getLogger(__name__)
class DatabaseImp(object):
DATABASE = None
@staticmethod
def initialize(uri):
try:
client = pymongo.MongoClient(uri)
DatabaseImp.DATABASE = client['blocks']
except Exception as ex:
LOGGER.warning(ex)
@staticmethod
def insert(collection, data):
try:
DatabaseImp.DATABASE[collection].insert(data)
except Exception as ex:
LOGGER.warning(ex)
@staticmethod
def find(collection, query):
return DatabaseImp.DATABASE[collection].find(query)
@staticmethod
def find_one(collection, query):
return DatabaseImp.DATABASE[collection].find_one(query)
@staticmethod
def find_last_record(collection):
doc = None
try:
record = DatabaseImp.DATABASE[collection].find({}).sort("_id", -1).limit(1)
except Exception as ex:
LOGGER.warning(ex)
try:
doc = record.next()
except StopIteration:
return None
return doc
|
wejdeneHaouari/blockbench-sawtooth
|
scripts/fabric-cpu.py
|
<gh_stars>1-10
import os
import sys
import getopt
import re
import time
from datetime import datetime
use_cpu_utilization = True
url = "http://localhost:8086/status"
min_containers = 1
max_containers = 6
peer = "peer"
order = "order"
check_interval = 5
up_threshold = 85 if use_cpu_utilization else 20
down_threshold = 60 if use_cpu_utilization else -1
log_cpu_path = "fabric.csv"
def printSetting():
print("Min containers: \t%d" % min_containers)
print("Max containers: \t%d" % max_containers)
print("Check interval: \t%d seconds" % check_interval)
if use_cpu_utilization:
print("Up threshold: \t> %.2f%% cpu utilization" % up_threshold)
print("Down threshold: \t< %.2f%% cpu utilization" % down_threshold)
else:
print("Up threshold: \t> %d waiting requests" % int(up_threshold))
print("Down threshold: \t< %d waiting requests" % int(down_threshold))
def printUsage():
print(
"""
Usage: %s [options]
-h or --help: show help info
-l url or --link=url: the status url of nginx
-m min_containers or --min=min_containers: the min number of containers
-M max_containers or --max=max_containers: the max number of containers
-t target_container or --target=target_container: the target container
-i check_interval or --interval=check_interval: the checking interval
-u up_threshold or --up=up_threshold: the threshold for scaling up
-d down_threshold or --down = down_threshold: the threshold for scaling down
"""
% (sys.argv[0],))
def check_cpu_utilization(log_file):
pattern = re.compile(r".*%s.*" % peer)
pattern1 = re.compile(r".*%s.*" % order)
cpus = []
mems = []
with os.popen("sudo docker stats --no-stream") as f:
for s in f.readlines():
ss = s.split()
if len(ss) >= 3 and (pattern.match(ss[1]) or pattern1.match(ss[1])):
cu = float(ss[2].replace("%", ""))
cpus.append(cu)
name = ss[1]
mem = float(ss[6].replace("%", ""))
mems.append(mem)
print("INFO: container %s: cpu %.2f%%, mem %.2f%%" % (ss[1], cu, mem))
num = len(cpus)
avg_cpu = sum(cpus) / num if num > 0 else -1
avg_mem = sum(mems) / num if num > 0 else -1
log_file.write("%s,%d,%.2f,%.2f,%s\n" % (datetime.now().strftime("%H:%M:%S"),
num, avg_cpu, avg_mem,
",".join("%.2f,%.2f" % (cpus[i], mems[i]) for i in range(num))))
log_file.flush()
return avg_cpu
try:
opts, args = getopt.getopt(sys.argv[1:],
"hl:m:M:t:i:u:d:",
["help", "link=", "min=", "max=", "target=", "interval=", "up=", "down="]
)
except getopt.GetoptError:
print("Error: Invalid arguments!")
sys.exit(-1)
for cmd, arg in opts:
if cmd in ("-h", "--help"):
printSetting()
print("")
printUsage()
sys.exit(0)
elif cmd in ("-l", "--link"):
url = arg
elif cmd in ("-m", "--min"):
min_containers = max(1, int(arg))
elif cmd in ("-M", "--max"):
max_containers = int(arg)
elif cmd in ("-t", "--target"):
target_container = target_container
elif cmd in ("-u", "--up"):
up_threshold = float(arg)
elif cmd in ("-d", "--down"):
down_threshold = float(arg)
elif cmd in ("-i", "--interval"):
check_interval = int(arg)
printSetting()
print("")
log_file = open(log_cpu_path, "w+")
log_file.write("Time,Num,AvgCPU,AvgMEM,C1CPU,CIMEM,...\n")
while True:
start_time = time.time()
print("INFO:\tStart checking ...")
if use_cpu_utilization:
avg_cu = check_cpu_utilization(log_file)
print("avg cpu .. ", avg_cu)
end_time = time.time()
sleep_time = check_interval - (end_time - start_time)
print("INFO:\tFinish checking. Sleeping %.2f seconds ...\n" % sleep_time)
if sleep_time > 0:
time.sleep(sleep_time)
log_file.close()
|
wejdeneHaouari/blockbench-sawtooth
|
benchmark/sawtooth_v1_2/block_server_api/route_handler.py
|
from json import JSONDecodeError
import logging
from aiohttp.web_response import json_response
from block_server_api.databaseImp import DatabaseImp
from block_server_api.errors import ApiBadRequest
LOGGER = logging.getLogger(__name__)
class RouteHandler(object):
def __init__(self):
pass
def get_height(self, request):
height = DatabaseImp.find_last_record("height")
field = 0
if height is not None:
field = height.get("height")
field += 1
return json_response({"status": "0", "height": str(field)})
async def get_block_transactions(self, request):
if request.rel_url.query['num'] is None:
raise ApiBadRequest(
"missing num query parameters")
try:
blknum = int(request.rel_url.query['num'])
except Exception:
raise ApiBadRequest(
"block number must be int")
blockTxs = DatabaseImp.find_one("blkTxns", {"block_num": blknum})
if blockTxs is None:
raise ApiBadRequest(
"block num '{}' does not exist ".format(blknum))
transactions = blockTxs.get("transactions")
return json_response({"status": "0", "txns": transactions})
async def decode_request(request):
try:
return await request.json()
except JSONDecodeError:
raise ApiBadRequest('Improper JSON format')
def validate_fields(required_fields, body):
for field in required_fields:
if body.get(field) is None:
raise ApiBadRequest(
"'{}' parameter is required".format(field))
|
wejdeneHaouari/blockbench-sawtooth
|
src/macro/kvstore/exprience.py
|
#!/bin/python
import os
import subprocess
import time
import sys
from threading import Event
import signal
# running experiments
EXPS = [(1, 5, 60), (1, 5, 60), (1, 10, 60)]
SC = "ycsb"
WAIT_TIME = 20
IS_INT = 0
WORKLOAD = "workloada.spec"
# Event object used to send signals from one thread to another
stop_event = Event()
def replaceTotalReq(oldTotal, newTotal):
total = "recordcount=" + str(newTotal)
with open("workloads/workloada.spec", 'r+') as f:
contents = f.read().replace('recordcount=' + str(oldTotal), total)
f.seek(0)
f.truncate()
f.write(contents)
def run_exp_change_total_req():
cmd = './driver -db {} -threads {} -P workloads/{} -txrate {} -endpoint {} -wl {} -wt {} -isint {} 2>&1 | tee {}'
start = time.time()
i = 0
oldTotal = 200
for (t, r, q) in EXPS:
print('********************** **********************')
replaceTotalReq(oldTotal, q)
begin_exp = time.time() - start
OUTPUT_FILE = "logs/" + str(i) + str(t) + "_threads_" + str(r) + "_rates" + "_start" + str(begin_exp)
print("start = ", begin_exp)
print("rate = ", r)
os.system(cmd.format(TARGET, t, WORKLOAD, r, ENDPOINT, SC, WAIT_TIME, IS_INT, OUTPUT_FILE))
end_exp = time.time() - start
print("end = ", end_exp)
interval = end_exp - begin_exp
if interval < 60:
print("sleep ", (60 - interval))
time.sleep(60 - interval)
i += 1
def send_transactions(i, t, r, o, begin_exp):
cmd = './driver -db {} -threads {} -P workloads/{} -txrate {} -endpoint {} -wl {} -wt {} -isint {} | tee {}'
# OUTPUT_FILE = "logs/" + str(i) + "_" + str(t) + "_threads_" + str(r) + "_rates" + "_start" + str(begin_exp) + ".txt"
OUTPUT_FILE = "logs/" + str(i) + "_threads_" + str(t) + "_rates_" + str(r) + "_timeout_" + str(o) + "_.txt"
cmd_f = cmd.format(TARGET, t, WORKLOAD, r, ENDPOINT, SC, WAIT_TIME, IS_INT, OUTPUT_FILE)
try:
process = subprocess.Popen(cmd_f,
shell=True, preexec_fn=os.setsid)
print('Running in process', process.pid, ' rate ', r, ' timeout ', o)
## python version 3.3 or higher
process.wait(timeout=o)
except subprocess.TimeoutExpired:
print('Timed out - killing', process.pid)
os.killpg(os.getpgid(process.pid), signal.SIGTERM)
time.sleep(1)
print("Done")
class TimeoutException(Exception): # Custom exception class
pass
def timeout_handler(signum, frame): # Custom signal handler
raise TimeoutException
def saturation():
start = time.time()
i = 0
for (t, r, o) in EXPS:
print("********exp ************", i)
begin_exp = time.time() - start
i = i + 1
send_transactions(i, t, r,o, begin_exp)
if __name__ == '__main__':
if len(sys.argv) != 2 or sys.argv[1] == '-h':
print("Usage: %s (-e or -s or -f)" % sys.argv[0])
sys.exit(-1)
target = sys.argv[1]
if target == "-e":
TARGET = "ethereum"
ENDPOINT = "localhost:8545"
elif target == "-f":
TARGET = "fabric-v2.2"
ENDPOINT = "localhost:8800,localhost:8801"
elif target == "-s":
TARGET = "sawtooth-v1.2"
ENDPOINT = "localhost:9001,localhost:8000"
else:
print("argument must be -f -e or -s")
sys.exit(-1)
saturation()
print("done")
|
christopher-chandler/Pythonpourlescurieux
|
sandbox/basicProgram.py
|
import simple_functions
res = simple_functions.add(2, 4)
print(res)
|
christopher-chandler/Pythonpourlescurieux
|
sandbox/HelloWorld.py
|
<reponame>christopher-chandler/Pythonpourlescurieux
def add(a, b):
'''
add adds two numbers together.
'''
res = a - b
return a+b
def minus(a, b):
res = a - b
return res
a = 2
b = 4
res = minus(a, b)
resTwo = add(res, 4)
# ==
# True
# False
# == equal
# != not equal
# < less than
# > greater than
# <= less than or equal
# >= greater than or equal
# True
# False
True
False
1
"1"
a = 1
b = "1"
# bool = boolean
list_of_numbers = [1, 2, 3, 4]
new_list = list()
new_list.append("Apple")
list_of_numbers = [1, 2, 3, 4, 5, 6, 7, 8]
# For loop
for i in list_of_numbers:
print(i)
'''
for variable in list
print (variable)
'''
# underscore
list_of_numbers = [1, 2, 3, 4, 5, 6, 7, 8]
dogs = "bob", "mary", "John"
# _
# snake case
house_of_cards = "House of cards"
# Camel Case
houseOfCards = "House of cards"
houseofcards = "Hose of cards"
print(dogs)
# Hello, this is a comment.
'''
first line
second line
third line
fourth line
etc
etc
etc
'''
# etc
# livereloade
# rere
|
christopher-chandler/Pythonpourlescurieux
|
sandbox/simple_functions.py
|
def add(a, b):
return a + b
def minus(a, b):
return a - b
def multiply(a, b):
return a * b
def divide(a, b):
return a / b
|
yperbasis/linear-vs-binary-search
|
collectdata.py
|
import re, os, glob
def CollectData(N, mem):
for fn in glob.glob("search_tmp.*"):
os.remove(fn)
with open("search.cpp", "rt") as f:
src = f.read()
ints = mem / 8
src = re.sub(r"const int SIZE = (\d*);", r"const int SIZE = %d;" % N, src)
src = re.sub(r"const int ARR_SAMPLES = (\(.*?\))", r"const int ARR_SAMPLES = %d" % ints, src)
src = re.sub(r"const int KEY_SAMPLES = (\(.*?\))", r"const int KEY_SAMPLES = %d" % ints, src)
with open("search_tmp.cpp", "wt") as f:
f.write(src)
with open("c.bat", "rt") as f:
bat = f.read()
bat = bat.replace("search.cpp", "search_tmp.cpp")
os.system(bat)
logname = "res_%04d_%d.log" % (N, mem)
os.system("search_tmp >res/" + logname)
os.system("search_tmp >res/" + logname)
for fn in glob.glob("res/*"):
os.remove(fn)
sizes = [16, 32, 64, 128, 256, 512, 1024]
#sizes = [128, 256, 512, 1024, 2048, 4096]
for s in sizes:
CollectData(s, 64<<10)
# CollectData(s, 512<<10)
|
yperbasis/linear-vs-binary-search
|
makeplots.py
|
<reponame>yperbasis/linear-vs-binary-search
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import sys, os, glob, re, math
results = {}
def AddRes(name, mem, N, eltime):
global results
results.setdefault(mem, {}).setdefault(name, {})[N] = eltime
def ReadRes(fn):
with open(fn, "rt") as f:
data = f.read()
match = re.search(r"Arrays: (\d*) x (\d*)", data)
N = int(match.group(2))
match = re.search(r"Memory: (\d*)", data)
mem = int(match.group(1))
for match in re.finditer(r"\s*([0-9.]+)\s*ns\s*:\s*(\S+)", data):
eltime = float(match.group(1))
name = match.group(2)
AddRes(name, mem, N, eltime)
for fn in glob.glob("res/*.log"):
ReadRes(fn)
# plt.loglog([1,2,3,4], [1,4,9,16], 'bo', [1,2,3,4], [16,9,9,10], 'ro', basex=2, basey=2, linestyle='-')
# plt.show()
styles = ['yx', 'rx', 'r+', 'mx', 'm+', 'k.', 'ko', 'bo', 'bs', 'yo', 'g*', 'gP', 'gd', 'm*', 'c*']
dpi = 150
for mem, graphs in results.items():
args = []
names = []
argsPE = []
argsLog = []
idx = 0
for name, graph in graphs.items():
if ('linear' in name and 'scalar' in name):
continue
X = []
Y = []
Z = []
W = []
for N, eltime in graph.items():
X.append(N)
Y.append(eltime)
Z.append(eltime / N)
W.append(eltime / math.log(N, 2.0))
args += [X, Y, styles[idx]]
argsPE += [X, Z, styles[idx]]
argsLog += [X, W, styles[idx]]
names.append(name)
idx += 1
print("%s: %s" % (name, args[-1]))
title = "(memory = %dB)" % mem
if len(sys.argv) > 1:
title = sys.argv[1] + " " + title
ax = plt.axes()
ax.set_title(title)
ax.loglog(*args, basex=2, basey=2, linestyle='-')
ax.set_xlabel("Array length (N)")
ax.set_ylabel("Time per search, ns")
ax.grid(True, which="major")
ax.grid(True, which="minor", color='0.8', linestyle=':')
ax.legend(names, loc=2, prop={'size': 6})
ax.get_yaxis().get_minor_locator().subs([1.25, 1.5, 1.75])
ax.get_yaxis().set_minor_formatter(ticker.FuncFormatter(lambda x,p: str(int(x))))
ax.get_yaxis().set_major_formatter(ticker.ScalarFormatter())
ax.get_xaxis().set_major_formatter(ticker.ScalarFormatter())
#plt.show()
plt.savefig('res/plot_search_%d.png' % mem, bbox_inches='tight', dpi=dpi)
plt.gcf().clear()
ax = plt.axes()
ax.set_title(title)
ax.semilogx(*argsPE, basex=2, linestyle='-')
ax.set_xlabel("Array length (N)")
ax.set_ylabel("Time per element, ns")
ax.grid(True, which="major")
ax.grid(True, which="minor", color='0.8', linestyle=':')
ax.legend(names, loc=1, prop={'size': 6})
ax.set_ylim(0.0, 0.5)
ax.get_yaxis().set_minor_locator(ticker.MultipleLocator(0.01))
ax.get_yaxis().tick_right()
ax.get_xaxis().set_major_formatter(ticker.ScalarFormatter())
#plt.show()
plt.savefig('res/plot_elem_%d.png' % mem, bbox_inches='tight', dpi=dpi)
plt.gcf().clear()
ax = plt.axes()
ax.set_title(title)
ax.semilogx(*argsLog, basex=2, linestyle='-')
ax.set_xlabel("Array length (N)")
ax.set_ylabel("Time per one bin.search comparison, ns")
ax.grid(True, which="major")
ax.grid(True, which="minor", color='0.8', linestyle=':')
ax.legend(names, loc=2, prop={'size': 6})
ax.set_ylim(1.0, 7.0)
ax.get_yaxis().set_minor_locator(ticker.MultipleLocator(0.5))
ax.get_yaxis().tick_right()
ax.get_xaxis().set_major_formatter(ticker.ScalarFormatter())
#plt.show()
plt.savefig('res/plot_log_%d.png' % mem, bbox_inches='tight', dpi=dpi)
plt.gcf().clear()
|
sterben25/Savior
|
Utils/InferenceHelpers/TritonHelper.py
|
<filename>Utils/InferenceHelpers/TritonHelper.py<gh_stars>100-1000
from abc import ABC
import numpy as np
import tritonclient.grpc as grpcclient
from Utils.Exceptions import TritonServerCannotConnectException, TritonServerNotReadyException, \
InferenceTensorCheckFailException
from Utils.InferenceHelpers.BaseInferenceHelper import CustomInferenceHelper, ImageTensorInfo
class TritonInferenceHelper(CustomInferenceHelper, ABC):
numpy_data_type_mapper = {
np.half.__name__: "FP16",
np.float32.__name__: "FP32",
np.float64.__name__: "FP64",
np.bool8.__name__: "BOOL",
np.uint8.__name__: "UINT8",
np.int8.__name__: "INT8",
np.short.__name__: "INT16",
np.int32.__name__: "INT32",
np.int64.__name__: "INT64",
}
def __init__(self, _algorithm_name, _server_url, _server_port, _model_name, _model_version):
super().__init__()
self.name = _algorithm_name
self.type_name = 'triton'
self.target_url = '%s:%s' % (_server_url, _server_port)
self.model_name = _model_name
self.model_version = str(_model_version)
self.triton_client = None
def check_ready(self):
try:
# 新版本的triton client的send和receive的length都超过了1GB,足够霍霍了
self.triton_client = grpcclient.InferenceServerClient(url=self.target_url)
except Exception as e:
raise TritonServerCannotConnectException(f'triton server {self.target_url} connect fail')
if not self.triton_client.is_server_ready():
raise TritonServerNotReadyException(f'triton server {self.target_url} not ready')
def add_image_input(self, _input_name, _input_shape, _input_description, _mean_and_std):
self.all_inputs[_input_name] = ImageTensorInfo(_input_name, _input_shape, _input_description, _mean_and_std)
def infer(self, _need_tensor_check=False, **_input_tensor):
self.check_ready()
inputs = []
assert _input_tensor.keys() == set(self.all_inputs.keys()), f'{self.model_name} the input tensor not match'
for m_name, m_tensor_info in self.all_inputs.items():
m_tensor = _input_tensor[m_name]
if not (isinstance(m_tensor, np.ndarray) and m_tensor.dtype.name in self.numpy_data_type_mapper):
raise InferenceTensorCheckFailException(f'tensor {m_name} is unavailable numpy array')
if _need_tensor_check:
check_status, check_result = m_tensor_info.tensor_check(m_tensor)
if not check_status:
raise InferenceTensorCheckFailException(check_result)
if isinstance(m_tensor_info, ImageTensorInfo):
m_to_input_tensor = m_tensor_info.normalize(m_tensor, _tensor_format='chw').astype(m_tensor.dtype)
else:
m_to_input_tensor = m_tensor
m_infer_input = grpcclient.InferInput(m_name,
m_to_input_tensor.shape,
self.numpy_data_type_mapper[m_to_input_tensor.dtype.name]
)
m_infer_input.set_data_from_numpy(m_to_input_tensor)
inputs.append(m_infer_input)
results = self.triton_client.infer(model_name=self.model_name,
model_version=self.model_version,
inputs=inputs)
to_return_result = dict()
for m_result_name in self.all_outputs.keys():
to_return_result[m_result_name] = results.as_numpy(m_result_name)
return to_return_result
|
sterben25/Savior
|
Deployment/ConsumerServices/GeneralService.py
|
from Deployment.ConsumerWorker import celery_worker_app
from Deployment.server_config import IS_TEST
from Operators.ExampleDownloadOperator import ImageDownloadOperator
from Operators.ExampleImageStringParseOperator import ImageParseFromBase64
from Utils.ServiceUtils import ServiceTask
from Utils.Storage import get_oss_handler
image_download_op = ImageDownloadOperator(IS_TEST)
base64_image_decode_op = ImageParseFromBase64(IS_TEST)
@celery_worker_app.task(name="ConsumerServices.GeneralService.download_image_from_url")
def download_image_from_url(_image_url):
oss_helper = get_oss_handler()
download_result = image_download_op.execute(_image_url, oss_helper, _image_size_threshold=None)
return {
'image_info': {
'bucket_name': download_result['bucket_name'],
'path': download_result['saved_path'],
'height': download_result['image_height'],
'width': download_result['image_width'],
'channel': download_result['image_channel'],
},
}
class DownloadImageFromURLServiceTask(ServiceTask):
service_version = 'v1.0.20210317'
service_name = 'download_image_from_url'
mock_result = {
'image_info': {
'bucket_name': '',
'path': '',
'height': 0,
'width': 0,
'channel': 1,
},
}
require_field = {
"_image_url",
}
binding_service = download_image_from_url
@celery_worker_app.task(name="ConsumerServices.GeneralService.parse_image_from_base64")
def parse_image_from_base64(_base64_string):
oss_helper = get_oss_handler()
decode_result = base64_image_decode_op.execute(_base64_string, oss_helper)
return {
'image_info': {
'bucket_name': decode_result['bucket_name'],
'path': decode_result['saved_path'],
'height': decode_result['image_height'],
'width': decode_result['image_width'],
'channel': decode_result['image_channel'],
},
}
class ParseImageFromBase64ServiceTask(ServiceTask):
service_version = 'v1.0.20210524'
service_name = 'parse_image_from_base64'
mock_result = {
'image_info': {
'bucket_name': '',
'path': '',
'height': 0,
'width': 0,
'channel': 1,
},
}
require_field = {
"_base64_string",
}
binding_service = parse_image_from_base64
|
sterben25/Savior
|
Operators/ExampleTextRecognizeOperator/CaptchaRecognizeOperator.py
|
import os
import cv2
import numpy as np
from scipy.special import softmax
from Operators.DummyAlgorithmWithModel import DummyAlgorithmWithModel
from Utils.GeometryUtils import force_convert_image_to_bgr, resize_with_height, pad_image_with_specific_base
from Utils.InferenceHelpers import TritonInferenceHelper
class Captcha1RecognizeWithMaster(DummyAlgorithmWithModel):
"""
基于Master对于验证码种类1进行识别
@cite
@article{Lu2021MASTER,
title={{MASTER}: Multi-Aspect Non-local Network for Scene Text Recognition},
author={<NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME>},
journal={Pattern Recognition},
year={2021}
}
原始master:https://github.com/wenwenyu/MASTER-pytorch
训练使用的master的版本:https://github.com/novioleo/MASTER-pytorch 持续更新
"""
name = "基于Master的常用验证码种类1识别"
__version__ = 'v1.0.20210515'
def __init__(self, _inference_config, _alphabet_config_name, _is_test):
self.encoder_inference_helper = None
self.decoder_inference_helper = None
self.target_height = 100
self.target_width = 150
self.probability_threshold = 0.8
super().__init__(_inference_config, _is_test)
alphabet_file_path = os.path.join(os.path.dirname(__file__), 'assets', _alphabet_config_name + '.txt')
with open(alphabet_file_path, mode='r') as to_read_alphabet:
self.keys = [m_line.strip() for m_line in to_read_alphabet]
def get_inference_helper(self):
if self.inference_config['name'] == 'triton':
encoder_helper = TritonInferenceHelper(
'Captcha1RecognizeEncoder',
self.inference_config['triton_url'],
self.inference_config['triton_port'],
'Captcha1RecognizeEncoder',
1
)
decoder_helper = TritonInferenceHelper(
'Captcha1RecognizeDecoder',
self.inference_config['triton_url'],
self.inference_config['triton_port'],
'Captcha1RecognizeDecoder',
1
)
encoder_helper.add_image_input('INPUT__0', (self.target_width, self.target_height, 3), '识别用的图像',
([127.5, 127.5, 127.5], [127.5, 127.5, 127.5]))
encoder_helper.add_output('OUTPUT__0', (-1, 512), 'memory')
decoder_helper.add_input('INPUT__0', (-1,), '已预测的label')
decoder_helper.add_input('INPUT__1', (-1, 512), 'memory')
decoder_helper.add_output('OUTPUT__0', (-1, -1), '预测的label')
self.encoder_inference_helper = encoder_helper
self.decoder_inference_helper = decoder_helper
else:
raise NotImplementedError(
f"{self.inference_config['name']} helper for captcha 1 recognize with master not implement")
def predict(self, _memory, _max_length, _sos_symbol, _eos_symbol, _padding_symbol):
batch_size = 1
to_return_label = np.ones((batch_size, _max_length + 2), dtype=np.int64) * _padding_symbol
probabilities = np.ones((batch_size, _max_length + 2), dtype=np.float32)
to_return_label[:, 0] = _sos_symbol
for i in range(_max_length + 1):
if isinstance(self.decoder_inference_helper, TritonInferenceHelper):
result = self.decoder_inference_helper.infer(_need_tensor_check=False,
INPUT__0=to_return_label,
INPUT__1=_memory)
m_label = result['OUTPUT__0']
else:
raise NotImplementedError(
f"{self.decoder_inference_helper.type_name} helper for captcha 1 recognize decoder not implement")
m_probability = softmax(m_label, axis=-1)
m_next_word = np.argmax(m_probability, axis=-1)
m_max_probs = np.max(m_probability, axis=-1)
if m_next_word[:, i] == _eos_symbol:
break
to_return_label[:, i + 1] = m_next_word[:, i]
probabilities[:, i + 1] = m_max_probs[:, i]
return to_return_label.squeeze(0), probabilities.squeeze(0)
def execute(self, _image):
to_return_result = {
'text': '',
'probability': 1.0
}
bgr_image = force_convert_image_to_bgr(_image)
rgb_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2RGB)
candidate_image = cv2.resize(rgb_image, (self.target_width, self.target_height))
if isinstance(self.encoder_inference_helper, TritonInferenceHelper):
result = self.encoder_inference_helper.infer(_need_tensor_check=False,
INPUT__0=candidate_image.astype(np.float32))
memory = result['OUTPUT__0']
else:
raise NotImplementedError(
f"{self.encoder_inference_helper.type_name} helper for captcha 1 recognize encoder not implement")
candidate_label_length = 10
# sos:2 eos:1 pad:0 unk:3
label, label_probability = self.predict(memory, candidate_label_length, 2, 1, 0)
total_probability = 0
for m_label, m_probability in zip(label, label_probability):
# 包括了unk,sos,eos,pad,所以要删除
if m_probability >= self.probability_threshold and m_label >= 4:
to_return_result['text'] += self.keys[m_label - 4]
total_probability += m_probability
to_return_result['probability'] = total_probability / len(to_return_result['text'])
return to_return_result
if __name__ == '__main__':
from argparse import ArgumentParser
from pprint import pprint
ag = ArgumentParser('Captcha Recognize Example')
ag.add_argument('-i', '--image_path', dest='image_path', type=str, required=True, help='本地图像路径')
ag.add_argument('-u', '--triton_url', dest='triton_url', type=str, required=True, help='triton url')
ag.add_argument('-p', '--triton_port', dest='triton_port', type=int, default=8001, help='triton grpc 端口')
args = ag.parse_args()
img = cv2.imread(args.image_path)
master_handler = Captcha1RecognizeWithMaster({
'name': 'triton',
'triton_url': args.triton_url,
'triton_port': args.triton_port}, 'keyboard', True)
pprint(master_handler.execute(img))
|
sterben25/Savior
|
Operators/ExampleTextRecognizeOperator/__init__.py
|
from Operators.ExampleTextRecognizeOperator.TextRecognizeOperator import GeneralCRNN
from Operators.ExampleTextRecognizeOperator.CaptchaRecognizeOperator import Captcha1RecognizeWithMaster
|
sterben25/Savior
|
Utils/ServiceUtils.py
|
<reponame>sterben25/Savior
import asyncio
import time
from collections import OrderedDict
from celery import exceptions as celery_exceptions
from Deployment.server_config import SUBTASK_EXECUTE_TIME_LIMIT_SECONDS, TASK_QUEUE
from Utils.DAG import DAG
from Utils.Exceptions import CustomException, \
RetryExceedLimitException, DAGAbortException
from Utils.misc import get_uuid_name
class ServiceResult:
def __init__(self, _version, _mock_result):
self.service_version = _version
self.start_time = None
self.cost_time = 0
self.return_code = 0
self.return_message = 'success'
self.service_result = _mock_result.copy()
def get_result_info(self):
service_status = {
'cost_time(seconds)': self.cost_time,
'return_code': self.return_code,
'return_message': self.return_message,
'service_version': self.service_version
}
return service_status, self.service_result
def __enter__(self):
self.start_time = time.time()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.cost_time = int(time.time() - self.start_time)
def finish(self, _result):
for m_key, m_value in _result.items():
self.service_result[m_key] = m_value
def fail(self, _exception):
if isinstance(_exception, CustomException):
self.return_code, self.return_message = _exception.format_exception()
else:
self.return_code, self.return_message = -1, str(_exception)
class ServiceTask:
"""
用于在interface层调用service
"""
service_version = 'default version'
service_name = 'default_service'
mock_result = dict()
require_field = set()
binding_service = None
def __init__(self, _dag: DAG, _count_down=0, _task_name=None, _is_mock=False, _retry_count=5):
"""
初始化task
Args:
_dag: workflow的dag
_count_down: 启动时间(秒),启动时间如果非0,则会放入后台等待设定的秒数后进行,不会回传结果。
_task_name: 任务名称
_is_mock: 是否是mock状态
"""
self.filled_field = dict()
if _task_name is not None:
self.task_name = _task_name
else:
self.task_name = self.service_name
self.task_id = get_uuid_name()
self.is_mock = _is_mock
self.count_down = _count_down
if self.is_mock:
self.service_version = 'Mock Version'
if not self.is_mock:
assert self.binding_service is not None, f'{self.task_name} not bind to service'
self.start_time = time.time()
self.task = None
self.create_task()
self.dag = _dag
self.retry_count = _retry_count
def create_task(self):
if self.filled_field.keys() == self.require_field:
self.task = asyncio.create_task(self.execute())
self.dag.create_task_node(self)
def __await__(self):
assert self.task is not None, 'task not setup success'
task_result = yield from self.task.__await__()
if task_result.return_code != 0:
raise DAGAbortException(
f'service {self.service_name} cannot go on',
self.service_name,
task_result.return_message,
)
return task_result
async def get_request_data(self):
to_return_request_data = dict()
all_missed_field = []
for m_field in self.require_field:
if m_field not in self.filled_field:
all_missed_field.append(m_field)
if len(all_missed_field):
raise AssertionError(
f'service {self.service_name} parameter missing:[ {",".join(all_missed_field)} ]')
all_dependent_task = []
for m_field_name, m_field_value in self.filled_field.items():
if isinstance(m_field_value, tuple):
m_task, m_task_field_name = m_field_value
all_dependent_task.append((m_task, m_task_field_name, m_field_name))
self.dag.create_task_dependency(m_task, m_task_field_name, self, m_field_name)
else:
to_return_request_data[m_field_name] = m_field_value
self.dag.create_value_dependency(m_field_value, self, m_field_name)
if len(all_dependent_task):
all_dependent_task_results = await asyncio.gather(
*[_[0] for _ in all_dependent_task]
)
for m_result, (m_task, m_task_field_name, m_field_name) in zip(all_dependent_task_results,
all_dependent_task):
m_value = m_result.service_result[m_task_field_name]
m_value = m_value if not isinstance(m_value, bytes) else m_value.decode('utf-8')
to_return_request_data[m_field_name] = m_value
return to_return_request_data
def add_dependency_from_value(self, _field_name, _field_value):
assert _field_name in self.require_field, f'{_field_name} DONT NEED in {self.service_name}'
available_type = [int, float, str, dict, list]
assert any([isinstance(_field_value, m_type) for m_type in available_type]), \
f'field value type is {type(_field_value)},which is not support now.'
self.filled_field[_field_name] = _field_value
self.create_task()
def add_dependency_from_task(self, _field_name, _task, _task_field_name):
assert _field_name in self.require_field, f'{_field_name} DONT NEED in {self.service_name}'
assert _task_field_name in _task.mock_result, f'Task {_task.service_name} DONT HAVE "{_task_field_name}"'
self.filled_field[_field_name] = (_task, _task_field_name)
self.create_task()
async def execute(self):
# 如果说有task name则算子的名称按task name来定
if self.task_name is None:
self.task_name = self.service_name
with ServiceResult(self.service_version, self.mock_result) as to_return_result:
if self.is_mock:
return to_return_result
request_data = await self.get_request_data()
# 获取实际运行启动时间
self.start_time = time.time()
try:
celery_task_async_result = self.binding_service.apply_async(
kwargs=request_data,
countdown=self.count_down,
queue=TASK_QUEUE,
)
if self.count_down == 0:
start_time = time.time()
api_result_dict = None
while time.time() - start_time <= SUBTASK_EXECUTE_TIME_LIMIT_SECONDS:
if celery_task_async_result.ready():
api_result_dict = celery_task_async_result.get()
else:
await asyncio.sleep(0.1)
if api_result_dict is None:
celery_task_async_result.revoke(terminate=True,)
raise celery_exceptions.TimeoutError
self.dag.set_task_node_result(self, api_result_dict)
to_return_result.finish(api_result_dict)
except (celery_exceptions.TimeoutError, celery_exceptions.TimeLimitExceeded) as retry_exception:
self.retry_count -= 1
if self.retry_count > 0:
return await self.execute()
else:
to_return_result.fail(RetryExceedLimitException(f'{self.service_name} retried exceed limit.'))
except Exception as e:
to_return_result.fail(e)
return to_return_result
async def wait_and_compose_all_task_result(*tasks):
"""
打包所有task的结果
Args:
*tasks: 所有的task
Returns: 所有task的结果的detail
"""
to_return_result = OrderedDict()
all_task_results = await asyncio.gather(*tasks)
for m_task, m_task_result in zip(tasks, all_task_results):
to_return_result[m_task.task_name] = m_task_result.service_result
return to_return_result
|
sterben25/Savior
|
Deployment/DispatchInterfaces/RecaptchaInterface.py
|
from typing import Optional
from fastapi import APIRouter, Form
from Deployment.ConsumerServices.GeneralService import DownloadImageFromURLServiceTask, ParseImageFromBase64ServiceTask
from Deployment.ConsumerServices.RecaptchaService import Captcha1RecognizeServiceTask
from Deployment.server_config import IS_MOCK
from Utils.DAG import DAG
from Utils.Exceptions import InputParameterAbsentException, InputParameterAbnormalException
from Utils.InterfaceUtils import base_interface_result_decorator, DictInterfaceResult
router = APIRouter()
@router.post('/captcha1_recognize')
@base_interface_result_decorator()
async def general_ocr(
image_url: Optional[str] = Form(None),
image_base64: Optional[str] = Form(None),
):
dag = DAG()
# 如果mock数据过长,可以写在其他文件,然后这边引用
mock_result = {
'text': '',
}
with DictInterfaceResult(mock_result) as to_return_result:
if IS_MOCK:
return to_return_result
if not (image_url or image_base64):
raise InputParameterAbsentException('至少有一个参数image_url或者image_base64')
if image_url and image_base64:
raise InputParameterAbnormalException('只能有一个image_url或者image_base64')
if image_url:
download_image_task = DownloadImageFromURLServiceTask(_dag=dag)
download_image_task.add_dependency_from_value('_image_url', image_url)
image_info = (await download_image_task).service_result['image_info']
elif image_base64:
decode_image_task = ParseImageFromBase64ServiceTask(_dag=dag)
decode_image_task.add_dependency_from_value('_base64_string', image_base64)
image_info = (await decode_image_task).service_result['image_info']
captcha1_recognize_task = Captcha1RecognizeServiceTask(_dag=dag)
captcha1_recognize_task.add_dependency_from_value('_image_info', image_info)
recognize_result = (await captcha1_recognize_task).service_result['text']
to_return_result.add_sub_result('text', recognize_result)
return to_return_result
|
sterben25/Savior
|
Operators/ExampleImageStringParseOperator/ImageParseFromBase64.py
|
<gh_stars>100-1000
import base64
import os
import re
import traceback as tb
from io import BytesIO
from PIL import Image
from Operators.DummyOperator import DummyOperator
from Utils.Exceptions import CustomException, ConsumerAlgorithmUncatchException
from Utils.Storage import CloudObjectStorage
from Utils.misc import get_uuid_name, get_date_string, convert_pil_to_numpy
class ImageParseFromBase64(DummyOperator):
def __init__(self, _is_test):
super().__init__(_is_test)
self.bucket_name = 'base64_images'
def execute(self, _base64_string, _oss_handler: CloudObjectStorage = None):
try:
base64_data = re.sub('^data:image/.+;base64,', '', _base64_string)
byte_data = base64.b64decode(base64_data)
image_data = BytesIO(byte_data)
decoded_image = Image.open(image_data)
image_c = len(decoded_image.getbands())
image_h = decoded_image.height
image_w = decoded_image.width
decoded_image_np = convert_pil_to_numpy(decoded_image)
saved_path = ''
target_bucket_name = ''
if _oss_handler:
file_name = get_uuid_name()
oss_path = os.path.join(get_date_string(), file_name)
saved_path = _oss_handler.upload_image_file(self.bucket_name, oss_path, decoded_image,
_enable_compress=False)
target_bucket_name = self.bucket_name
to_return_result = dict()
to_return_result['bucket_name'] = target_bucket_name
to_return_result['saved_path'] = saved_path
to_return_result['image_height'] = image_h
to_return_result['image_width'] = image_w
to_return_result['image_channel'] = image_c
to_return_result['image'] = decoded_image_np
return to_return_result
except CustomException as ce:
raise ce
except Exception as e:
raise ConsumerAlgorithmUncatchException(tb.format_exc())
if __name__ == '__main__':
import cv2
import argparse
ag = argparse.ArgumentParser('Base64 String Decode To Image Test')
ag.add_argument('--file', type=str, required=True, help='base64字符串所在文本文件路径')
args = ag.parse_args()
base64_string_file = args.file
image_decode_op = ImageParseFromBase64(True)
with open(base64_string_file, mode='r', encoding='utf-8') as to_read:
base64_string = to_read.read()
decode_result = image_decode_op.execute(base64_string, None)
cv2.imshow('decode_image', decode_result['image'])
cv2.waitKey(0)
|
sterben25/Savior
|
Operators/ExampleImageStringParseOperator/__init__.py
|
from Operators.ExampleImageStringParseOperator.ImageParseFromBase64 import ImageParseFromBase64
|
sterben25/Savior
|
Deployment/ConsumerServices/RecaptchaService.py
|
<gh_stars>100-1000
from Deployment.ConsumerWorker import celery_worker_app
from Operators.ExampleImageStringParseOperator import ImageParseFromBase64
from Operators.ExampleTextRecognizeOperator import Captcha1RecognizeWithMaster
from Deployment.server_config import RECAPTCHA_TRITON_URL, RECAPTCHA_TRITON_PORT, IS_TEST
from Utils.ServiceUtils import ServiceTask
from Utils.Storage import get_oss_handler
captcha1_recognize_handler = Captcha1RecognizeWithMaster({
'name': 'triton',
'triton_url': RECAPTCHA_TRITON_URL,
'triton_port': RECAPTCHA_TRITON_PORT}, 'keyboard', IS_TEST
)
base64_image_decode_op = ImageParseFromBase64(IS_TEST)
@celery_worker_app.task(name="ConsumerServices.RecaptchaService.captcha1_recognize")
def captcha1_recognize(_image_info):
"""
验证码种类1的识别
Args:
_image_info: 待识别的完整图像
Returns: 文本区域位置的识别结果
"""
to_return_result = {'text': ''}
oss_handler = get_oss_handler()
img = oss_handler.download_image_file(
_image_info['bucket_name'],
_image_info['path']
)
recognize_result = captcha1_recognize_handler.execute(img)
to_return_result['text'] = recognize_result['text']
return to_return_result
class Captcha1RecognizeServiceTask(ServiceTask):
service_version = 'v1.0.20210524'
service_name = 'captcha1_recognize'
mock_result = {
'text': '',
}
require_field = {
"_image_info",
}
binding_service = captcha1_recognize
|
smobarry/VideoGame_326Project
|
oldfiles/project_salahmethod.py
|
<gh_stars>0
def user_information():
""" (Salah)Prompts user for inforamtions and passes it to the recommendation method
Returns:
A dictionary of users information
Side effects:
appends the games a user has (of the same genre) into a list
"""
name = input("What is your name? ")
age = input("Hello How old are you? ")
preferred_genre= input("What are 10 games you have played that you enjoyed? ")
owned_games_list = []
#ask for games owned for the particular genre, so that we don't recommend a game he/she already owns
console =input("What console do you use? ")
response = input(str(" Do you have any games for that genre? y/n "))
response.lower()
if response == "y":
owned_games = input(str("what games do you have for that genre " ))
owned_games_list.append(owned_games)
user_info = {"name": name, "Console": console, "age: ": age, "Preferred genre: " : preferred_genre, "games owned": owned_games_list}
recommend_games(user_info)
def recommend_games(user_info):
for values in user_info:
for keys in user_info:
#if user_info[1] <= 17:
print(values, keys, sep=",")
"""if (age >= 17) and (ESRB_rat in {'E', 'T', 'M'}):
print("You can play games You can play games with an ESRB rating of E,T,M.")
oktobuy = True
elif ( age >= 13) and (ERSB_rat in {'E', 'T'}):
print ("You can play games with an ESRB rating of E, T.")
oktobuy = True
elif(age >= 6) and (ESRB_rat == 'E'):
print("You can only play games with an ESRB rating of E.")
oktobuy = True
"""
if __name__ == "__main__":
user_information()
|
smobarry/VideoGame_326Project
|
oldfiles/videogamestest.py
|
"""
Names:
<NAME>
<NAME>
<NAME>
<NAME>
Assignment: Final Project Check In 1
INST 326
"""
import csv
import string
import random
import pandas as pd
def inputdata():
"""
(Scott) I created three csv files for the past game store data that will
be used to tailor game recommendations. This will be used to recommend games of the
same genre as the user has bought in the past.
Later we will fill filter down games they can actually own due to their
age and game console type they own.
"""
gametest = pd.read_csv("video games project - games.csv")
gametest.set_index('id', inplace=True)
print(gametest)
games = list(csv.DictReader(open("video games project - games.csv")))
#print(games)
df_users = pd.read_csv("video games project - users.csv")
df_users.set_index('id', inplace=True)
print(df_users)
users = list(csv.DictReader(open("video games project - users.csv")))
#print(users)
ownedgamestest = pd.read_csv("video games project - ownedgames.csv")
print(ownedgamestest)
ownedgames = list(csv.DictReader(open("video games project - ownedgames.csv")))
return games, users, ownedgames, df_users
def console_v5():
"""
(Chidima)
Creates an object containing the terminal inputs of the user as attributes of the object
Args:
None
Side effects:
Prints out the type of game console chosen by the user, either Xbox or PlayStation
"""
parser = argparse.ArgumentParser()
parser.add_argument('console', choices=['Xbox', 'PlayStation'],
help="user must choose between one of the two consoles")
args = parser.parse_args()
c = args.console
if c == 'Xbox':
print(f'Gaming console entered is {args.console}')
elif c == 'PlayStation':
print(f'Gaming console entered is {args.console}')
def get_userid_from_name(users, name):
"""
From the name we will get a User id
"""
byname = {}
for d in users:
n = d['Name']
byname[n] = d
user = byname[name]
return user['id']
def get_userid_from_name_df(userstest, name):
"""
From the name we will get a User id
"""
return userstest[userstest.Name == name]
def get_user_age_from_userid(users, userid):
"""
From the user id we will get an age.
I created a dictionary to find the right user records.
"""
byid = {}
for d in users:
n = d['id']
byid[n] = d
user = byid[userid]
return user['Age']
def get_games_from_userid(owned, userid):
"""
From the name we will get the games bought by this person
"""
games = []
for d in owned:
n = d['User_id']
if n == userid:
games.append(d['Game_id'])
return games
def get_genres_from_games(games, their_games):
"""
From the games we will get the same genres
"""
genres = set()
for d in games:
n = d['id']
if n in their_games:
genres.add(d['Genre'])
return genres
def age_limitcheck(age, ESRB_rat,):
"""
This script will be limiting age of potential players
This function will check for the age limit of the user and approve the game based on
the ESRB rating.
Args:
age(int)
ESRB(str)
"""
oktobuy = False
if (age >= 17) and (ESRB_rat in {'E', 'T', 'M'}):
print("You can play games You can play games with an ESRB rating of E,T,M.")
oktobuy = True
elif ( age >= 13) and (ERSB_rat in {'E', 'T'}):
print ("You can play games with an ESRB rating of E, T.")
oktobuy = True
elif(age >= 6) and (ESRB_rat == 'E'):
print("You can only play games with an ESRB rating of E.")
oktobuy = True
return oktobuy
def main():
games, users, owned_games, df_users = inputdata()
name = input("Please enter your name : ")
their_userid = get_userid_from_name(users, name)
their_age = get_user_age_from_userid(users, their_userid)
df_userid = get_userid_from_name_df(df_users, name)
their_games = get_games_from_userid(owned_games, their_userid)
their_genres = get_genres_from_games(games, their_games)
print(repr(their_userid))
print(df_userid)
print(repr(their_age))
print(repr(their_games))
print(repr(their_genres))
for game in games:
print(repr(game))
for user in users:
print(repr(user))
for purchase in owned_games:
print(repr(purchase))
#(Scott)
# from the name we will get an age (done)
# from the name we will find a genre bought by this person(done)
# from the genre we will find a list of possible games to recommend
# from the age of the person we will only keep the games we can recommend
# we will display the recommendation
#UTA max
#df = pandas.DataFrame(holder)
#Exported to a csv for later use
#df.to_csv('shoe_dict_exported.csv', index = False )
if __name__ == "__main__":
main()
|
smobarry/VideoGame_326Project
|
test_videogamessales.py
|
# <NAME>
# INST 326
"""
This is how you run these tests:
python -m pylint videogamessales.py
"""
import pytest
import pandas
from videogamessales import make_games_clean, videogames_more_like_this, videogames_filtering, videogames_sampling, suggest_games
def startup():
"""
A common set of values for these tests.
"""
my_df = pandas.DataFrame({
"Name": ["Game A", "Game B", "Game C", "Game D", "Game E", "Game F"],
"Platform": ["XB", "Wii", "XB", "PS", "XB", "PS"],
"Genre": ["Racing", "Sports", "Shooter", "Action", "Action", "Racing"],
"Rating": ["E", "E", "M", "T", "T", "E"],
})
my_titles = [
'Game A',
'Game D',
'Game X',
'Game Y']
my_platforms = ['XB', 'PS']
my_age = 16
num_suggestions = 2
return (my_df, my_titles, my_platforms, my_age, num_suggestions)
def test_videogames_more_like_this():
"""
Testing the happy path for the more like this.
"""
(my_df, my_titles, my_platforms, my_age, num_suggestions) = startup()
found_genres = videogames_more_like_this(my_df, my_titles)
assert set(found_genres) == set(["Racing", "Action"])
def test_videogames_filtering():
"""
Testing the happy path.
"""
(my_df, my_titles, my_platforms, my_age, num_suggestions) = startup()
found_genres = pandas.Series(["Racing", "Action"])
df_can_suggest = videogames_filtering(my_df, found_genres, my_platforms, my_age, my_titles)
assert set(df_can_suggest['Name'].to_list()) == set(['Game E', 'Game F'])
def test_videogames_sampling():
"""
Testing the happy path. Because the sampling was random the random state needed to be set to a constant.
"""
df_can_suggest = pandas.DataFrame({
"Name": ["Game A", "Game B", "Game C", "Game D", "Game E", "Game F"],
"Platform": ["XB", "Wii", "XB", "PS", "XB", "PS"],
"Genre": ["Racing", "Sports", "Shooter", "Action", "Action", "Racing"],
"Rating": ["E", "E", "M", "T", "T", "E"],
})
num_suggestions = 2
suggestions = videogames_sampling(df_can_suggest, num_suggestions, random_state=1)
assert set(suggestions) == set(['Game B', 'Game C'])
def test_suggest_games():
"""
It tests calling the three sub methods all at once.
"""
(my_df, my_titles, my_platforms, my_age, num_suggestions) = startup()
sugestions = suggest_games(
my_df,
my_titles,
my_platforms,
my_age,
num_suggestions,
)
assert set(sugestions) == set(['Game E', 'Game F'])
|
smobarry/VideoGame_326Project
|
videogame_generator_GUI_beta.py
|
import argparse
import os
import sys
import tkinter as tk
from tkinter import *
from tkinter import messagebox
from tkinter import ttk
import csv
import pandas
import videogamessales
class game_Generator:
def __init__(self):
"""The super contructor class containing the window frame for the GUI,
lists for consoles and games and pulling the csv file from videogamesales.py
"""
self.root = tk.Tk()#refers to the window
self.root.title("Videogame Suggestion Generator")
self.root.geometry("1000x400")
self.v = tk.IntVar()
#self.consoles =[""]#this needs to be populated first
#self.titles = [""]#this needs to be populated first
self.consoles =['']#for testing purposes
self.titles= ['']#for testing purposes
self.fname = 'Video_Games_Sales_as_at_22_Dec_2016.csv'
self.df = videogamessales.make_games_clean(self.fname)
self.gameList=['']
#self.games=["GTA, Super Mario"]#list is only here for testing purposes. will be commented out/removed/updated later
#self.root.mainloop()
def run_tk(self):
"""Allows for GUI components to run continuosly until exited by user
"""
self.root.mainloop()
def user_name(self):
"""creates a label and entry for user to enter thier name
"""
self.name_var = tk.StringVar()
#self.name_var.set("Enter your name here")
tk.Label(self.root,
text="Enter your name:"
).grid(row=0, column=0)
self.entry_name = tk.Entry(self.root, textvariable = self.name_var, bd =5).grid(row=0, column=1, padx=1)
def console(self):
"""creates an option menu for users to select a game console of thier choice
"""
#self.v.set(1)#Default value i.e. Playstation
tk.Label(self.root,
text="Owned gaming console:"
).grid(row=1, column=0)
# self.rbtn_Play = tk.Radiobutton(self.root,
# text="PlayStation",
# #padx = 20,
# variable=self.v,
# value=1).grid(row=1, column=1)
# self.rbtn_Xbox = tk.Radiobutton(self.root,
# text="Xbox",
# #padx = 20,
# variable=self.v,
# value=2).grid(row=1, column=2)
OPTIONS = [
"3DS", "DC", "GBA", "GC", "PC",
"PS", "PS2", "PS3", "PS4", "PSP", "PSV",
"Wii", "WiiU", "XB", "X360", "XOne"
] #list of game consoles
self.gc = StringVar()
self.gc.set(OPTIONS[0]) # default value
w = OptionMenu(self.root, self.gc,*OPTIONS).grid(row=1, column=1)
#self.root.mainloop()
def list_of_games(self):
"""generates a list of games by pulling the name of games available on the csv file and appending it to a list
"""
with open("Video_Games_Sales_as_at_22_Dec_2016.csv") as gameFile:
self.csv_reader = csv.DictReader(gameFile, delimiter=',')
for lines in self.csv_reader:
self.gameList.append(lines['Name'])
return self.gameList
def user_age(self):
"""creates a label and entry box for user to enter in thier age
"""
self.age_var = tk.IntVar()
#self.age_var.set(6)
self.lbl_age = tk.Label(self.root, text="Enter your age").grid(row=2, column=0)
self.entry_age = tk.Entry(self.root, textvariable = self.age_var, bd =5).grid(row=2, column=1, padx=1)
#entry_age.pack(side = RIGHT)
#Notes: Cannot use grid and pack in same root/window
#pass
def game_owned(self):
"""creates a label and combobox of games from the csv file for the user to choose from
"""
self.gameType_var = tk.StringVar()
self.lbl_game = tk.Label(self.root, text = "Owned games: ").grid(row=3, column=0,padx=5, pady=5)
self.combo_games = ttk.Combobox(self.root, textvariable= self.gameType_var , values=self.gameList).grid(row=3,column=1, padx=1, pady=1)
#self.entry_gameType = tk.Entry(self.root, textvariable= self.gameType_var, bd =5)
def gen_lbl(self):
"""creates a label that will display the recommendations for the games
"""
#self.recommend = tk.StringVar()
#self.recommend.set("Recommendations will be displayed here")
self.label_gen = Label(self.root, text="Recommendations will be displayed here")
self.label_gen.grid(row=10, column= 1, padx=5, pady=5)
def record_entries(self):
"""pulls the entries from the name entry box, console option menu, game selection combobox and age entry box
"""
self.name_input = self.name_var.get()#retieves the value in the name textbox
self.game_console = self.gc.get()#retrieves console input from the user
self.game_input = self.gameType_var.get()#retrieves game input from the user
self.age_input = self.age_var.get()#retrieves age input from the user
#videogamessales.my_age = self.age_input
def add_titles(self):
"""uses try/except/finally to force code to run, even if user inputs nothing
"""
#self.record_entries()#just pre-loads entries inputed by user
try:
self.record_entries()
#self.gameType_var.get()
#self.titles.append(self.game_input)
#self.titles.append(self.game_input)
except:
print("List cannot be empty!")
finally:
self.titles.append(self.game_input)
#self.recommend.set(self.titles)
def add_consoles(self):
"""appends current selection of consoles to console list by force using try/except/finally
"""
try:
self.record_entries()
#self.gameType_var.get()
#self.titles.append(self.game_input)
#self.titles.append(self.game_input)
except:
print("List cannot be empty!")
finally:
self.consoles.append(self.game_console)
#self.record_entries()#just pre-loads entries inputed by user
# if self.game_console == 1:
# self.consoles.append("PS")
# else:
#self.consoles.append(self.game_console)
#self.recommend.set(self.consoles
def show_titles_lbl(self):
"""provides preview of list of games added to a list
"""
self.record_entries()
#self.games = tk.StringVar()
#self.games.set("List will be displayed here")
self.lbl_games = Label(self.root, text="\n".join(self.titles)).grid(row=3, column= 4, padx=5, pady=5)
#self.lbl_games.config(text='')
#self.lbl_games.config(test=("\n".join(self.gameList)))
def show_consoles_lbl(self):
"""provides preview of list of consoles added to a list
"""
self.record_entries()
#self.games = tk.StringVar()
#self.games.set("List will be displayed here")
self.lbl_console = Label(self.root, text=", ".join(self.consoles)).grid(row=1, column= 4, padx=5, pady=5)
#self.lbl_games.config(text='')
#self.lbl_games.config(test=("\n".join(self.gameList)))
def btn_add_titles(self):
"""creates the "add games" button which calls on the add_titles() method
"""
btn_gen = tk.Button(self.root, text = "Add Games", command = self.add_titles).grid(row=3, column=2, padx=5, pady=5)
def btn_show_titles(self):
"""creates the preview button which calls on the show_titles_lbl() method
"""
self.record_entries()
"""creates the preview button which calls on the show_titles_lbl() method
"""
btn_gen = tk.Button(self.root, text = "Preview Added Titles", command = self.show_titles_lbl).grid(row=3, column=3, padx=5, pady=5)
def btn_add_consoles(self):
"""creates the preview button which calls on the add_consoles() method
"""
btn_gen = tk.Button(self.root, text = "Add Console", command = self.add_consoles).grid(row=1, column=2, padx=5, pady=5)
def btn_show_consoles(self):
"""creates the preview button which calls on the show_consoles_lbl() method
"""
self.record_entries()
btn_gen = tk.Button(self.root, text = "Preview Added Consoles", command = self.show_consoles_lbl).grid(row=1, column=3, padx=5, pady=5)
def generate(self):
"""generates output of game suggestions
"""
#try:
# if self.name_var != "Name":
# if self.age_var.get() > 4:
# messagebox.showinfo("Age", "Age is {}".format(self.age_input)) #just to test code. not final edit
# pass
# messagebox.showinfo("Name", "Name is {}".format(self.name_input)) #not final edit
try:
self.record_entries()
self.label_gen.configure(text="{}, we recommend ".format(self.name_input)+
str(videogamessales.suggest_games(self.df,self.titles, self.consoles, self.age_input,3))
+ " to play next")#for list or csv file with game recommendations
pass
except AttributeError:
messagebox.showinfo("Information Needed", "Please make sure all fields are filled")
finally:
pass
def btn_generate(self):
"""creates a button executing the generate method
"""
self.btn_gen = tk.Button(self.root, text = "Generate", command = self.generate, state=NORMAL).grid(row=5, column=1, padx=5, pady=5)
#pass
#def switch_settings(self):
#if not self.titles and self.consoles:
#self.btn_gen["state"] = tk.DISABLED
def main():
"""method calls on vital portions of code to be executed
"""
#methods to function the buttons
gen = game_Generator()
gen.list_of_games()
gen.user_name()
gen.console()
gen.user_age()
gen.game_owned()
#methods for labels
gen.show_titles_lbl()
gen.gen_lbl()
gen.show_consoles_lbl()
#gen.record_entries()
#methods for buttons
gen.btn_add_consoles()
gen.btn_add_titles()
gen.btn_show_titles()
gen.btn_generate()
gen.btn_show_consoles()
#gen.switch_settings()
gen.run_tk()
if __name__ == "__main__":
main()
|
smobarry/VideoGame_326Project
|
videogamessales.py
|
<gh_stars>0
#<NAME>
#<NAME>
#Directory ID: smobarry
#INST 326
"""
In this module the video game sales data is used to provide a suggestion for
video games that a user might want to buy.
We downloaded a pre-scraped csv file from:
https://www.kaggle.com/rush4ratio/video-game-sales-with-ratings
Documentation for how to use pandas start at:
https://pandas.pydata.org/Pandas_Cheat_Sheet.pdf
For more documentation use this website:
https://pandas.pydata.org/docs/reference/frame.html
"""
import pandas as pd
# To get the minimum age for each ESRB rating use this website
# https://en.wikipedia.org/wiki/Entertainment_Software_Rating_Board
ESRB_MIN_AGE = {'E': 4, 'T': 13, 'M': 17, 'E10+': 10, 'EC': 2, 'K-A': 6, 'RP': 18, 'A0': 18}
# This is a pandas dictionary of minimum ages for each rating in the dataset
"""
E 3991
T 2961
M 1563
E10+ 1420
EC 8
K-A 3
RP 3
AO 1
"""
def make_games_clean(fname):
"""
The method make_games_clean() creates a dataframe from the cleaned video games data file.
This throws away data we can't use.
We downloaded the video games sales csv file from Kaggle
and checked it into GitHub in the same directory as this script.
The input csv file needs at least these 4 columns: ['Name', 'Platform', 'Genre', 'Rating']
Args:
fname (str): File name of csv file.
Returns:
mydf (pandas.DataFrame): the cleaned video games data.
(Written by <NAME>)
Driver: <NAME>
Navigator: <NAME>
"""
mydf = pd.read_csv(fname)
#print(f'mydf.head() = {mydf.head(10)}')
#print(mydf.describe())
#print(mydf.info())
mydf = mydf[['Name', 'Platform', 'Genre', 'Rating']]
# We kept only the columns that we wanted.
# Indexing using a list means the list of four columns to keep from the 16 original columns.
#print(f'mydf.head() = {mydf.head(10)}')
rows_to_keep = mydf['Rating'].notna()
#print(f'rows_to_keep = {rows_to_keep}')
mydf = mydf[rows_to_keep]
#print(mydf.info())
# I just removed all rows with NaN as the rating. The remaining rows all have ratings.
#print(f'mydf.head() = {mydf.head(10)}')
#print(f'mydf.tail() = {mydf.tail(10)}')
return mydf
def videogames_more_like_this(mydf, my_titles):
"""
The videogames_more_like_this() method returns the genres the user likes.
Args:
mydf (pandas.DataFrame): the cleaned video games data.
my_titles (list of str): the titles that the user has played and liked
but not to be suggested.
Returns:
found_genres (list of str): the found genres in games that the user likes.
(Written by <NAME>)
Driver: <NAME>
Navigator: <NAME>
"""
mydf_found_games = mydf[mydf['Name'].isin(my_titles)]
# games found in the database from the titles that were given from the user.
#print(f'mydf_found_games = {mydf_found_games}')
#found_titles = mydf_found_games['Name'].value_counts()
#found_platforms = mydf_found_games['Platform'].value_counts()
found_genres = mydf_found_games['Genre'].value_counts().index.tolist()
#found the genres in the dataset of games the user liked.
#found_ratings = mydf_found_games['Rating'].value_counts()
#print(f'found_titles = {found_titles}')
#print(f'found_platforms = {found_platforms}')
#print(f'found_genres = {found_genres}')
#print(f'found_ratings = {found_ratings}')
return found_genres
def videogames_filtering(mydf, found_genres, my_platforms, my_age, my_titles):
"""
This method filters the video game data frame rows by this criteria:
1. Only keep genres of games the user likes.
2. Only keep platforms the user wants.
3. Only keep the ratings the user can buy due to their age restrictions.
4. Don't keep the titles the user gave they already like.
Args:
mydf (pandas.DataFrame): the cleaned video games data.
found_genres (list of str): the found genres in games that the user likes.
my_platforms (list of str): the list of interesting platforms available
in the dataset.
my_age (int): the age of the user in years.
my_titles (list of str): the list of video game titles not to be
considered in the suggester.
Returns:
df_can_suggest (pandas.DataFrame): the subset of videogames that can be
suggested to the user.
(Written by <NAME>)
Driver: <NAME>
Navigator: <NAME>
"""
my_ratings = [ rating for rating in
ESRB_MIN_AGE if
ESRB_MIN_AGE[rating] <= my_age]
# These are the ratings that the user can buy for their age.
#print(f'my_ratings = {my_ratings}')
#print(f'head of mydf_games = {mydf.head()}')
#print(f'describe Ratings = {mydf['Rating'].describe()}')
#print(f'value_counts = {mydf['Rating'].value_counts()}')
df_can_suggest = mydf[
(~ mydf['Name'].isin(my_titles))
& mydf['Genre'].isin(found_genres)
& mydf['Platform'].isin(my_platforms)
& mydf['Rating'].isin(my_ratings)
]
return df_can_suggest
def videogames_sampling(df_can_suggest, num_suggestions, random_state=None):
"""
This method samples the games that survived the filtering.
We now choose a few video game titles to suggest.
Args:
df_can_suggest (pandas.DataFrame): the subset of videogames that can be
suggested to the user.
num_suggestions (int): the number of suggestions displayed to the user.
random_state (int): Leave set to None except for unit test.
Returns:
list of str: the suggestions of titles for the GUI.
(Written by <NAME>)
Driver: <NAME>
Navigator: <NAME>
"""
# print(f'df_can_suggest.count() = {df_can_suggest.count()}')
print(f'df_can_suggest = \n{df_can_suggest}')
#df_suggestions = df_can_suggest.head(num_suggestions)
df_suggestions = df_can_suggest.sample(n = num_suggestions, random_state=random_state)
# The next statement extracts a python list of suggested titles
suggestions = df_suggestions['Name'].tolist()
return suggestions
def suggest_games(
mydf,
my_titles,
my_platforms,
my_age,
num_suggestions,
):
"""
The method suggest_games() implements a video game suggestor that returns reccomendations
to the user based off of the established criteria.
Args:
mydf (pandas.DataFrame): the cleaned video games data.
my_titles (list of str): the titles of video games that the user has
played and liked but not to be suggested.
my_platforms (list of str): the list of interesting platforms available
in the dataset.
my_age (int): the age of the user in years.
num_suggestions (int): the number of suggestions displayed to the user.
Returns:
list of str: the game titles that are suggested to the user.
(Written by <NAME>)
Driver: <NAME>
Navigator: Salah Waji
"""
# the value counts method is like GroupBy but simpler.
# It groupby's the columns giving the counts.
#avail_titles = mydf['Name'].value_counts()
#avail_platforms = mydf['Platform'].value_counts()
#avail_genres = mydf['Genre'].value_counts()
#avail_ratings = mydf['Rating'].value_counts()
#print(f'avail_titles = \n{avail_titles}')
#print(f'avail_platforms = \n{avail_platforms}')
#print(f'avail_genres = \n{avail_genres}')
#print(f'avail_ratings = \n{avail_ratings}')
found_genres = videogames_more_like_this(mydf, my_titles)
df_can_suggest = videogames_filtering(mydf, found_genres, my_platforms, my_age, my_titles)
suggestions = videogames_sampling(df_can_suggest, num_suggestions)
return suggestions
if __name__ == '__main__':
# This is an example useage via the command line.
the_df = make_games_clean('Video_Games_Sales_as_at_22_Dec_2016.csv')
the_titles = [
'Star Wars: Battlefront',
'Madden NFL 06',
'STORM: Frontline Nation',
'Men in Black II: Alien Escape']
the_platforms = ['XB', 'PS']
the_age = 16
the_num_suggestions = 10
the_suggestions = suggest_games(
the_df,
the_titles,
the_platforms,
the_age,
the_num_suggestions,
)
print(f'{len(the_suggestions)} suggestions = {the_suggestions}')
|
smobarry/VideoGame_326Project
|
oldfiles/tests.py
|
#Happy path
#inputs and expected outputs are "normal"
#Edge cases ("unhappy path")
#inputs are unusual or special values
#may trigger exceptions
test age_limit by giving it edge def funcname(self, parameter_list):
"""
docstring
"""
raise NotImplementedError
|
ChreSyr/countdown
|
countdown.py
|
from baopig import *
import pygame
# pyapp.set_inapp_debugging()
class TimesUpScene(Scene):
def __init__(self):
Scene.__init__(self, app)
Text(
parent=self,
text="The end !",
pos=(0, -100),
sticky="center",
height=500,
font_height=50
)
pygame.mixer.init()
self.beep = pygame.mixer.Sound("turn-off-sound.mp3") # TODO : fix sound not played
Button(
parent=self,
text="Silence",
pos=(-100, 0),
sticky="center",
command=pygame.mixer.stop
)
def restart():
pygame.mixer.stop()
self.app.mainscene.countdown.start()
self.app.open("MainScene")
Button(
parent=self,
text="Restart",
pos=(100, 0),
sticky="center",
command=restart
)
def close(self):
app.set_display_mode(0)
def open(self):
app.set_display_mode(pygame.FULLSCREEN)
self.beep.play(loops=20)
def receive(self, event):
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_f:
app.set_display_mode(pygame.FULLSCREEN)
if event.key == pygame.K_g:
app.set_display_mode(0)
if event.key == pygame.K_SPACE:
# app.mainscene.countdown.start()
app.open("MainScene")
class MainScene(Scene):
def __init__(self):
app.set_caption("Programmer Countdown")
Scene.__init__(
self,
app,
background_color=(170, 170, 170)
)
h = 50
def handle_enter(text):
self.countdown.cancel()
self.countdown.set_interval(float(text))
self.countdown.start()
def end():
app.open("TimesUpScene")
print("YO")
self.countdown = Timer(
1,
# PrefilledFunction(print, "End of countdown"),
# display.enter_fullscreen_mode,
PrefilledFunction(app.open, "TimesUpScene"))
self.pause_button = Button(
parent=self,
text="PAUSE",
pos=(10, self.bottom - 40 - 10),
command=self.countdown.pause
)
self.resume_button = Button(
parent=self,
text="RESUME",
pos=(self.pause_button.right + 10, self.pause_button.top),
command=self.countdown.resume
)
self.hide_button = Button(
parent=self,
text="Hide",
pos=(self.width - 10, self.pause_button.top),
pos_location="topright",
command=pygame.display.iconify
)
def get_time_left():
return format_time(float(self.countdown.get_time_left()))
self.time_left = DynamicText(
parent=self,
get_text=get_time_left,
pos=(10, 65),
# pos=(0, 0),
# h=h,
font_height=h,
# text_location="right",
# pos_location="midright",
# background_color=(240, 30, 30),
)
# self.time_left.right = self.right - 10
self.input_box = NumEntry(
parent=self,
text=str(self.countdown.interval),
pos=(10, 10),
size=(self.w - 20, 40),
command=handle_enter,
# presentation_text="Entrez un temps en secondes",
name="timeinput"
)
def close(self):
if self.countdown.is_running:
self.countdown.cancel()
def receive(self, event):
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_f:
app.set_display_mode(pygame.FULLSCREEN)
if event.key == pygame.K_g:
app.set_display_mode(0)
if event.key == pygame.K_SPACE:
if self.countdown.is_running:
self.countdown.pause()
else:
self.countdown.resume()
app = Application()
app.set_style_for(Button, height=40)
app.mainscene = MainScene()
app.timesupscene = TimesUpScene()
keep_going = True
while keep_going:
keep_going = False
app.launch()
if keep_going:
i = input("Type something to restart the UI : ")
|
Kvr0/ProgramCore
|
helper/programdata/CreateProgramData.py
|
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--d',help='Specify Target Path',type=str)
parser.add_argument('--f',help='Output file path',type=str)
target_path = ''
args = parser.parse_args()
if args.d:
target_path = args.d
else:
target_path = input('TargetPath: ')
with open(target_path,'r') as file:
order_list = []
lines = file.read().splitlines()
for line in lines:
words = line.split(' ')
wnum = len(words)
if wnum > 0 and words[0] != '#':
id = None
value0 = None
value1 = None
if wnum > 0:
id = words[0]
if wnum > 1:
value0 = words[1]
if wnum > 2:
value1 = words[2]
if id:
order = 'id:"'+id+'"'
if value0:
order += ',value0:'+value0
if value1:
order += ',value1:'+value1
order_list.append('{'+order+'}')
program = '{Orders:['+','.join(order_list)+'],EOP:false}'
print(program)
if args.f:
with open(args.f,'w') as outf:
ss = [
'data modify storage programcore: Program set value '+program,
'function programcore:reset_rundata',
'function programcore:load_program'
]
outf.writelines([s+'\n' for s in ss])
|
luke5sky/skill-playback-control
|
__init__.py
|
# Copyright 2018 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from adapt.intent import IntentBuilder
from mycroft.messagebus.message import Message
from mycroft.skills.core import MycroftSkill, intent_handler
from mycroft.skills.audioservice import AudioService
class PlaybackControlSkill(MycroftSkill):
def __init__(self):
super(PlaybackControlSkill, self).__init__('Playback Control Skill')
self.query_replies = {} # cache of received replies
self.query_extensions = {} # maintains query timeout extensions
def initialize(self):
self.audio_service = AudioService(self.bus)
self.add_event('play:query.response',
self.handle_play_query_response)
# Handle common audio intents. 'Audio' skills should listen for the
# common messages:
# self.add_event('mycroft.audio.service.next', SKILL_HANDLER)
# self.add_event('mycroft.audio.service.prev', SKILL_HANDLER)
# self.add_event('mycroft.audio.service.pause', SKILL_HANDLER)
# self.add_event('mycroft.audio.service.resume', SKILL_HANDLER)
@intent_handler(IntentBuilder('').require('Next').require("Track"))
def handle_next(self, message):
self.audio_service.next()
@intent_handler(IntentBuilder('').require('Prev').require("Track"))
def handle_prev(self, message):
self.audio_service.prev()
@intent_handler(IntentBuilder('').require('Pause'))
def handle_pause(self, message):
self.audio_service.pause()
@intent_handler(IntentBuilder('').one_of('PlayResume', 'Resume'))
def handle_play(self, message):
"""Resume playback if paused"""
self.audio_service.resume()
def stop(self, message=None):
if self.audio_service.is_playing:
self.audio_service.stop()
return True
else:
return False
@intent_handler(IntentBuilder('').require('Play').require('Phrase'))
def play(self, message):
# Remove everything up to and including "Play"
# NOTE: This requires a Play.voc which holds any synomyms for 'Play'
# and a .rx that contains each of those synonyms. E.g.
# Play.voc
# play
# bork
# phrase.rx
# play (?P<Phrase>.*)
# bork (?P<Phrase>.*)
# This really just hacks around limitations of the Adapt regex system,
# which will only return the first word of the target phrase
utt = message.data.get('utterance')
phrase = re.sub('^.*?' + message.data['Play'], '', utt).strip()
self.log.info("Resolving Player for: "+phrase)
self.enclosure.mouth_think()
# Now we place a query on the messsagebus for anyone who wants to
# attempt to service a 'play.request' message. E.g.:
# {
# "type": "play.query",
# "phrase": "the news" / "tom waits" / "madonna on Pandora"
# }
#
# One or more skills can reply with a 'play.request.reply', e.g.:
# {
# "type": "play.request.response",
# "target": "the news",
# "skill_id": "<self.skill_id>",
# "conf": "0.7",
# "callback_data": "<optional data>"
# }
# This means the skill has a 70% confidence they can handle that
# request. The "callback_data" is optional, but can provide data
# that eliminates the need to re-parse if this reply is chosen.
#
self.query_replies[phrase] = []
self.query_extensions[phrase] = []
self.bus.emit(Message('play:query', data={"phrase": phrase}))
self.schedule_event(self._play_query_timeout, 1,
data={"phrase": phrase}, name='PlayQueryTimeout')
def handle_play_query_response(self, message):
search_phrase = message.data["phrase"]
if "searching" in message.data and search_phrase in self.query_extensions:
# Manage requests for time to complete searches
skill_id = message.data["skill_id"]
if message.data["searching"]:
# extend the timeout by 5 seconds
self.cancel_scheduled_event("PlayQueryTimeout")
self.schedule_event(self._play_query_timeout, 5,
data={"phrase": search_phrase},
name='PlayQueryTimeout')
# TODO: Perhaps block multiple extensions?
if skill_id not in self.query_extensions[search_phrase]:
self.query_extensions[search_phrase].append(skill_id)
else:
# Search complete, don't wait on this skill any longer
if skill_id in self.query_extensions[search_phrase]:
self.query_extensions[search_phrase].remove(skill_id)
if not self.query_extensions[search_phrase]:
self.cancel_scheduled_event("PlayQueryTimeout")
self.schedule_event(self._play_query_timeout, 0,
data={"phrase": search_phrase},
name='PlayQueryTimeout')
elif search_phrase in self.query_replies:
# Collect all replies until the timeout
self.query_replies[message.data["phrase"]].append(message.data)
def _play_query_timeout(self, message):
# Prevent any late-comers from retriggering this query handler
search_phrase = message.data["phrase"]
self.query_extensions[search_phrase] = []
self.enclosure.mouth_reset()
# Look at any replies that arrived before the timeout
# Find response(s) with the highest confidence
best = None
ties = []
for handler in self.query_replies[search_phrase]:
if not best or handler["conf"] > best["conf"]:
best = handler
ties = []
elif handler["conf"] == best["conf"]:
ties.append(handler)
if best:
if ties:
# TODO: Ask user to pick between ties or do it automagically
pass
# invoke best match
self.log.info("Playing with: " + str(best["skill_id"]))
self.bus.emit(Message('play:start',
data={"skill_id": best["skill_id"],
"phrase": search_phrase,
"callback_data":
best.get("callback_data")}))
else:
self.speak_dialog("cant.play", data={"phrase": search_phrase})
if search_phrase in self.query_replies:
del self.query_replies[search_phrase]
if search_phrase in self.query_extensions:
del self.query_extensions[search_phrase]
def create_skill():
return PlaybackControlSkill()
|
EricMountain/weather_risk_metrics
|
bin/vigilance-server.py
|
<filename>bin/vigilance-server.py
#!/usr/bin/python3
from prometheus_client import start_http_server, Gauge
import urllib.request
import random
from datetime import datetime
import re
import time
test = False
risks = ["vent violent", "pluie-inondation", "orages", "inondation", "neige-verglas", "canicule", "grand-froid", "avalanches", "vagues-submersion"]
# Maps a (dept, risk, startZ, endZ) tuple to the round in which it was last set
cache = {}
# Create metrics to track time spent and requests made.
gauge_full = Gauge('meteorological_risk_full', 'Weather risk', ['dept', 'risk', 'startZ', 'endZ'])
gauge = Gauge('meteorological_risk', 'Weather risk', ['dept', 'risk'])
def getTimeHash():
d = datetime.now()
return d.year*365*24*60+d.month*30*24*60+d.day*24*60+d.hour*60+d.minute
def getStream():
url = "http://www.vigimeteo.com/data/NXFR49_LFPW_.xml?{}".format(getTimeHash())
stream = None
if test:
stream = open('test/jaune-vent-violent+littoral-vagues.xml')
else:
try:
stream = urllib.request.urlopen(url)
except urllib.error.URLError as e:
print(f'Error fetching URL: {e}')
pass
return stream
def getVigilanceData():
regex = r'<PHENOMENE departement="(?P<dept>\w+)" phenomene="(?P<risk>\d+)" couleur="(?P<level>\d)" dateDebutEvtTU="(?P<start>\d{14})" dateFinEvtTU="(?P<end>\d{14})"/>'
pattern = re.compile(regex)
results = []
stream = getStream()
if stream is None: return results
for line in stream:
try:
line = line.decode('utf-8')
except AttributeError:
pass
matches = pattern.match(line)
if matches:
data = matches.groupdict()
results.append(data)
return results
def latestVigilanceMetrics(gauge=Gauge, cacheRound=int):
now = datetime.utcnow().strftime("%Y%m%d%H%M%S")
deptRiskLevelMap = dict()
for result in getVigilanceData():
if result['end'] > now:
level = int(result['level'])
else:
level = 0
risk = risks[int(result['risk'])-1]
key = (result['dept'], risk, result['start'], result['end'])
cache[key] = cacheRound
dept = result['dept']
gauge_full.labels(dept=dept, risk=risk, startZ=result['start'], endZ=result['end']).set(level)
if (dept, risk) not in deptRiskLevelMap:
deptRiskLevelMap[(dept, risk)] = level
gauge.labels(dept=dept, risk=risk).set(level)
elif level > deptRiskLevelMap[(dept, risk)]:
deptRiskLevelMap[(dept, risk)] = level
gauge.labels(dept=dept, risk=risk).set(level)
print(f'{key!r} --> {level}, added to cache with round {cacheRound}')
def checkDeadCacheEntries(gauge=Gauge, cacheRound=int):
'''
Checks if a particular combination has been dropped from the output
produced by vigimeteo. We need to zero these entries else they will stay stuck
at whatever their last value was.
'''
for key, value in list(cache.items()):
if value != cacheRound:
print(f'{key!r} --> {0}, deleting cache entry')
gauge.labels(dept=key[0], risk=key[1], startZ=key[2], endZ=key[3]).set(0)
del cache[key]
if __name__ == '__main__':
# Start up the server to expose the metrics.
start_http_server(9696)
cacheRound = 0
while True:
cacheRound = 1 - cacheRound
print(f'Starting new round… (index {cacheRound})')
latestVigilanceMetrics(gauge, cacheRound)
checkDeadCacheEntries(gauge, cacheRound)
print('Round completed.')
time.sleep(3600)
|
saskeuday/sasoke
|
userbot/plugins/fpost.py
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
""" Command: .fpost word
credit: @pureindialover"""
import string
from uniborg.util import admin_cmd
msg_cache = {}
@borg.on(admin_cmd(pattern=r"fpost\s+(.*)"))
async def _(event):
await event.delete()
text = event.pattern_match.group(1)
destination = await event.get_input_chat()
for c in text.lower():
if c not in string.ascii_lowercase:
continue
if c not in msg_cache:
async for msg in borg.iter_messages(None, search=c):
if msg.raw_text.lower() == c and msg.media is None:
msg_cache[c] = msg
break
await borg.forward_messages(destination, msg_cache[c])
|
saskeuday/sasoke
|
userbot/plugins/pmpermit_menu.py
|
# Copyright 2019 - 2020 DarkPrinc3
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# if you change credits, you get anal cancer and get murdered by russians in 3 days.
"""
Support chatbox for pmpermit.
Used by incoming messages with trigger as start
Will not work for already approved people.
Credits: written by ༺αиυвιѕ༻ {@A_Dark_Princ3}
"""
import asyncio
import io
import telethon.sync
from telethon.tl.functions.users import GetFullUserRequest
import userbot.plugins.sql_helper.pmpermit_sql as pmpermit_sql
from telethon import events, errors, functions, types
from userbot import ALIVE_NAME
from userbot.utils import admin_cmd
DEFAULTUSER = str(ALIVE_NAME) if ALIVE_NAME else "No name set yet nibba, check pinned message in @XtraTgBot"
PREV_REPLY_MESSAGE = {}
@command(pattern=r"start", incoming=True)
async def _(event):
chat_id = event.from_id
userid = event.sender_id
if not pmpermit_sql.is_approved(chat_id):
chat = await event.get_chat()
if event.fwd_from:
return
if event.is_private:
PM = ("`مرحباً. لقد تم ايصالك إلى القائمة المتاحة للسيد 𝚂𝙰𝚂𝙺𝙴🎭 ,`"
f"{DEFAULTUSER}.\n"
"__دعونا نجعل هذا سلسًا وأخبرني لماذا أنت هنا ಠ_ಠ__\n"
"**اختر أحد الأسباب التالية لوجودك هنا رجائاََ ارسل رقم الاختيار (1،2،3،4) 🥀:**\n\n"
"`1`. للدردشة مع سيدي 😺\n"
"`2`. لازعاج 𝚂𝙰𝚂𝙺𝙴 ಠ_ಠ.\n"
"`3`. للاستفسار عن شيء ما (⌐■_■)\n"
"`4`. لطلب شيء 🎭\n")
ONE = ("حسناً. تم تسجيل طلبك. لا ترسل المزيد من الرسائل المزعجه إلى سيدي. يمكنك توقع الرد في غضون 24 سنة ضوئية. إنهُ رجل مشغول ، على عكسك على الأرجح(¬‿¬) .\n\n"
"**⚠️ سيتم حظرك والإبلاغ عنك إذا قمت بإرسال رسائل غير مرغوب فيها. ⚠️**\n\n")
TWO = (" `███████▄▄███████████▄ \n▓▓▓▓▓▓█░░░░░░░░░░░░░░█\n▓▓▓▓▓▓█░░░░░░░░░░░░░░█\n▓▓▓▓▓▓█░░░░░░░░░░░░░░█\n▓▓▓▓▓▓█░░░░░░░░░░░░░░█\n▓▓▓▓▓▓█░░░░░░░░░░░░░░█\n▓▓▓▓▓▓███░░░░░░░░░░░░█\n██████▀▀▀█░░░░██████▀ \n░░░░░░░░░█░░░░█ \n░░░░░░░░░░█░░░█ \n░░░░░░░░░░░█░░█ \n░░░░░░░░░░░█░░█ \n░░░░░░░░░░░░▀▀ `\n\n**لست مرحاً ، هذا ليس منزلك. اذهب لأزعاج شخص آخر. لقد تم حظرك والإبلاغ عنك حتى إشعار آخر.**")
FOUR = ("__حسنا. لم يشاهد سيدي رسالتك حتى الآن ، وعادةً ما يرد على الزواحف ، مع ذلك ساقوم بابلاغ سيدي برسالتك🥀 ..__\n __سيرد عندما يعود ، إذا رغب في ذلك ، فهناك بالفعل الكثير من الرسائل المعلقة 😶__\n **من فضلك لا ترسل رسائل مزعجه إلا إذا كنت ترغب في أن يتم حظرك والإبلاغ عنك.**")
FIVE = ("`حسناً.. يرجى الحصول على الأخلاق الأساسية لعدم إزعاج سيدي كثيرا. إذا رغب في مساعدتك فسوف يرد عليك قريبًاᓚᘏᗢ.`\n** لا تسأل مرارًا وتكرارًا والا سيتم حظرك والإبلاغ عنك (⌐■_■).**")
LWARN = ("**هذا هو التحذير الأخير الخاص بك. لا ترسل رسالة أخرى وإلا سيتم حظرك والإبلاغ عنك. كن صبور. سيرد عليك سيدي في اسرع وقت ممكن 🌝🌿.**")
async with borg.conversation(chat) as conv:
await borg.send_message(chat, PM)
chat_id = event.from_id
response = await conv.get_response(chat)
y = response.text
if y == "1":
await borg.send_message(chat, ONE)
response = await conv.get_response(chat)
await event.delete()
if not response.text == "start":
await response.delete()
await borg.send_message(chat, LWARN)
response = await conv.get_response(chat)
await event.delete()
await response.delete()
response = await conv.get_response(chat)
if not response.text == "start":
await borg.send_message(chat, TWO)
await asyncio.sleep(3)
await event.client(functions.contacts.BlockRequest(chat_id))
elif y == "2":
await borg.send_message(chat, LWARN)
response = await conv.get_response(chat)
if not response.text == "start":
await borg.send_message(chat, TWO)
await asyncio.sleep(3)
await event.client(functions.contacts.BlockRequest(chat_id))
elif y == "3":
await borg.send_message(chat, FOUR)
response = await conv.get_response(chat)
await event.delete()
await response.delete()
if not response.text == "start":
await borg.send_message(chat, LWARN)
await event.delete()
response = await conv.get_response(chat)
if not response.text == "start":
await borg.send_message(chat, TWO)
await asyncio.sleep(3)
await event.client(functions.contacts.BlockRequest(chat_id))
elif y == "4":
await borg.send_message(chat,FIVE)
response = await conv.get_response(chat)
if not response.text == "start":
await borg.send_message(chat, LWARN)
response = await conv.get_response(chat)
if not response.text == "start":
await borg.send_message(chat, TWO)
await asyncio.sleep(3)
await event.client(functions.contacts.BlockRequest(chat_id))
else:
await borg.send_message(chat, "`لقد قمت بإدخال أمر غير صالح👨💻. ارجوك ارسل start مرة أخرى أو لا ترسل رسالة أخرى إذا كنت لا ترغب في ان يتم حظرك والإبلاغ عنك.`")
response = await conv.get_response(chat)
z = response.text
if not z == "start":
await borg.send_message(chat, LWARN)
await conv.get_response(chat)
if not response.text == "start":
await borg.send_message(chat, TWO)
await asyncio.sleep(3)
await event.client(functions.contacts.BlockRequest(chat_id))
|
saskeuday/sasoke
|
userbot/plugins/labstack.py
|
# Copyright 2019 - 2020 DarkPrinc3
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
import json
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import subprocess
import requests
@command(pattern="^.labstack ?(.*)")
async def labstack(event):
if event.fwd_from:
return
await event.edit("Processing...")
input_str = event.pattern_match.group(1)
reply = await event.get_reply_message()
if input_str:
filebase = input_str
elif reply:
filebase = await event.client.download_media(
reply.media, Var.TEMP_DOWNLOAD_DIRECTORY
)
else:
await event.edit(
"Reply to a media file or provide a directory to upload the file to labstack"
)
return
filesize = os.path.getsize(filebase)
filename = os.path.basename(filebase)
headers2 = {"Up-User-ID": "IZfFbjUcgoo3Ao3m"}
files2 = {
"ttl": 604800,
"files": [{"name": filename, "type": "", "size": filesize}],
}
r2 = requests.post(
"https://up.labstack.com/api/v1/links", json=files2, headers=headers2
)
r2json = json.loads(r2.text)
url = "https://up.labstack.com/api/v1/links/{}/send".format(r2json["code"])
max_days = 7
command_to_exec = [
"curl",
"-F",
"files=@" + filebase,
"-H",
"Transfer-Encoding: chunked",
"-H",
"Up-User-ID: IZfFbjUcgoo3Ao3m",
url,
]
try:
logger.info(command_to_exec)
t_response = subprocess.check_output(command_to_exec, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as exc:
logger.info("Status : FAIL", exc.returncode, exc.output)
await event.edit(exc.output.decode("UTF-8"))
return
else:
logger.info(t_response)
t_response_arry = "https://up.labstack.com/api/v1/links/{}/receive".format(
r2json["code"]
)
await event.edit(
t_response_arry + "\nMax Days:" + str(max_days), link_preview=False
)
|
saskeuday/sasoke
|
userbot/plugins/news.py
|
# Copyright (C) By StarkGang [@STARKXD]
# Don't edit credits
# Works On Bases Of Cyberboysumanjay's Inshorts News Api
# Test
import requests
from userbot.utils import admin_cmd, edit_or_reply, sudo_cmd
from var import Var
newslog = Var.NEWS_CHANNEL_ID
@borg.on(admin_cmd("news (.*)"))
@borg.on(sudo_cmd("news (.*)", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
if Var.NEWS_CHANNEL_ID is None:
await edit_or_reply(
event, "`Please ADD NEWS_CHANNEL_ID For This Module To Work`"
)
return
infintyvar = event.pattern_match.group(1)
main_url = f"https://inshortsapi.vercel.app/news?category={infintyvar}"
stuber = await edit_or_reply(
event,
f"Ok ! Fectching {infintyvar} From inshortsapi Server And Sending To News Channel",
)
await stuber.edit("All News Has Been Sucessfully Send To News Channel")
starknews = requests.get(main_url).json()
for item in starknews["data"]:
sedlyf = item["content"]
img = item["imageUrl"]
writter = item["author"]
dateis = item["date"]
readthis = item["readMoreUrl"]
titles = item["title"]
sed1 = img
sedm = f"**Title : {titles}** \n{sedlyf} \nDate : {dateis} \nAuthor : {writter} \nReadMore : {readthis}"
await borg.send_file(newslog, sed1, caption=sedm)
|
saskeuday/sasoke
|
userbot/plugins/assistant/translater.py
|
<filename>userbot/plugins/assistant/translater.py
# Copyright (C) <NAME>
#
# Please Don't Kang Without Credits
# A Plugin For Assistant Bot
# x0x
from telethon import events, custom, Button
from telethon.tl.types import (
Channel,
Chat,
User
)
import emoji
from googletrans import Translator
from userbot.utils import admin_cmd, edit_or_reply, sudo_cmd
from telethon.utils import get_display_name
from userbot.utils import admin_cmd, sudo_cmd
from userbot.uniborgConfig import Config
from telethon import events
from userbot import bot
from datetime import datetime
from userbot.utils import admin_cmd, edit_or_reply, sudo_cmd
import time
from userbot import Lastupdate
@tgbot.on(events.NewMessage(pattern="^/tr ?(.*)"))
async def _(event):
input_str = event.pattern_match.group(1)
if event.reply_to_msg_id:
previous_message = await event.get_reply_message()
text = previous_message.message
lan = input_str or "gu"
elif "|" in input_str:
lan, text = input_str.split("|")
else:
await tgbot.send_message(event.chat_id, "`.tr LanguageCode` as reply to a message")
return
text = emoji.demojize(text.strip())
lan = lan.strip()
translator = Translator()
translated = translator.translate(text, dest=lan)
after_tr_text = translated.text
output_str = (f"**Translated By Friday Assistant Bot** \n"
f"Source {translated.src} \nTranslation {lan} \nWhat I Can Translate From This {after_tr_text}")
if event.from_id is not bot.uid:
await tgbot.send_message(event.chat_id, "You Can't Access Me")
elif event.from_id == bot.uid:
await tgbot.send_message(event.chat_id, output_str)
else:
await tgbot.send_message(event.chat_id, "Something Went Wrong 🤔")
|
saskeuday/sasoke
|
userbot/plugins/assistant/ping.py
|
<reponame>saskeuday/sasoke
# Copyright (C) <NAME>
#
# Please Don't Kang Without Credits
# A Plugin For Assistant Bot
# x0x
from telethon import events, custom, Button
from telethon.tl.types import (
Channel,
Chat,
User
)
import emoji
from googletrans import Translator
from userbot.utils import admin_cmd, edit_or_reply, sudo_cmd
from telethon.utils import get_display_name
from userbot.utils import admin_cmd, sudo_cmd
from userbot.uniborgConfig import Config
from telethon import events
from datetime import datetime
from userbot.utils import admin_cmd, edit_or_reply, sudo_cmd
import time
from userbot import Lastupdate, bot
def get_readable_time(seconds: int) -> str:
count = 0
ping_time = ""
time_list = []
time_suffix_list = ["s", "m", "h", "days"]
while count < 4:
count += 1
if count < 3:
remainder, result = divmod(seconds, 60)
else:
remainder, result = divmod(seconds, 24)
if seconds == 0 and remainder == 0:
break
time_list.append(int(result))
seconds = int(remainder)
for x in range(len(time_list)):
time_list[x] = str(time_list[x]) + time_suffix_list[x]
if len(time_list) == 4:
ping_time += time_list.pop() + ", "
time_list.reverse()
ping_time += ":".join(time_list)
return ping_time
@tgbot.on(events.NewMessage(pattern="^/ping"))
async def _(event):
start = datetime.now()
vent = event.chat_id
starttext = ("Hi! This Bot is Part of @FridayOT \nThis Bot is Used For "
"Some Features That Can Be Used Via Bot. \nIf you want your"
"Own Assistant Bot Then Deploy From Button Bellow")
end = datetime.now()
ms = (end - start).microseconds / 1000
uptime = get_readable_time((time.time() - Lastupdate))
if event.from_id == bot.uid:
await tgbot.send_message(event.chat_id, f"**█▀█ █▀█ █▄░█ █▀▀ █ \n█▀▀ █▄█ █░▀█ █▄█ ▄**\n ➲ `{ms}` \n ➲ `{uptime}`")
else:
await tgbot.send_message(
event.chat_id,
message=starttext,
link_preview=False,
buttons = [
[Button.url("Repo 🛡️", "https://github.com/StarkGang/FridayUserbot")],
[Button.url("Join Channel 📃", "t.me/Fridayot")]
]
)
|
saskeuday/sasoke
|
userbot/plugins/inline_fun.py
|
from userbot.utils import admin_cmd, sudo_cmd, edit_or_reply
from var import Var
@borg.on(admin_cmd(pattern="stat$"))
async def stats(event):
if event.fwd_from:
return
botusername = Var.TG_BOT_USER_NAME_BF_HER
noob = "stats"
if event.reply_to_msg_id:
reply_to_id = await event.get_reply_message()
tap = await bot.inline_query(botusername, noob)
await tap[0].click(event.chat_id)
await event.delete()
@borg.on(admin_cmd(pattern="xogame$"))
async def stats(event):
if event.fwd_from:
return
botusername = "@xobot"
noob = "play"
if event.reply_to_msg_id:
reply_to_id = await event.get_reply_message()
tap = await bot.inline_query(botusername, noob)
await tap[0].click(event.chat_id)
await event.delete()
|
saskeuday/sasoke
|
userbot/plugins/autoname.py
|
<filename>userbot/plugins/autoname.py
"""Auto Profile Updation Commands
.autoname"""
import asyncio
import time
from telethon.errors import FloodWaitError
from telethon.tl import functions
from uniborg.util import admin_cmd, edit_or_reply, sudo_cmd
from userbot import ALIVE_NAME
DEL_TIME_OUT = 60
DEFAULTUSER = str(ALIVE_NAME) if ALIVE_NAME else "FridayUserbot"
@borg.on(admin_cmd(pattern="autoname")) # pylint:disable=E0602
@borg.on(sudo_cmd(pattern="autoname", allow_sudo=True))
async def _(event):
sed = await edit_or_reply(event, "`Starting AutoName Please Wait`")
if event.fwd_from:
return
while True:
DM = time.strftime("%d-%m-%y")
HM = time.strftime("%H:%M")
name = f"🕒{HM} ⚡{DEFAULTUSER}⚡ 📅{DM}"
logger.info(name)
try:
await borg(
functions.account.UpdateProfileRequest( # pylint:disable=E0602
first_name=name
)
)
except FloodWaitError as ex:
logger.warning(str(e))
await asyncio.sleep(ex.seconds)
# else:
# logger.info(r.stringify())
# await borg.send_message( # pylint:disable=E0602
# Config.PRIVATE_GROUP_BOT_API_ID, # pylint:disable=E0602
# "Successfully Changed Profile Name"
# )
await asyncio.sleep(DEL_TIME_OUT)
await sed.edit(f"Auto Name has been started my Master")
|
saskeuday/sasoke
|
userbot/plugins/assistant/start.py
|
<gh_stars>0
# Copyright (C) <NAME>
#
# Please Don't Kang Without Credits
# A Plugin For Assistant Bot
# x0x
from telethon import events, custom, Button
from telethon.tl.types import (
Channel,
Chat,
User
)
import emoji
from googletrans import Translator
from userbot.utils import admin_cmd, edit_or_reply, sudo_cmd
from telethon.utils import get_display_name
from userbot.utils import admin_cmd, sudo_cmd
from userbot.uniborgConfig import Config
from telethon import events
from datetime import datetime
from userbot.utils import admin_cmd, edit_or_reply, sudo_cmd
import time
from userbot import Lastupdate, bot
@tgbot.on(events.NewMessage(pattern="^/start"))
async def start(event):
vent = event.chat_id
starttext = ("Hi! This Bot is Part of @FridayOT \nThis Bot is Used For "
"Some Features That Can Be Used Via Bot. \nIf you want your"
"Own Assistant Bot Then Deploy From Button Bellow")
if event.from_id == bot.uid:
await tgbot.send_message(
vent,
message="Hi Master, It's Me Your Assistant.",
buttons = [
[Button.url("Repo 🛡️", "https://github.com/StarkGang/FridayUserbot")],
[Button.url("Join Channel 📃", "t.me/Fridayot")]
]
)
else:
await tgbot.send_message(
event.chat_id,
message=starttext,
link_preview=False,
buttons = [
[Button.url("Repo 🛡️", "https://github.com/StarkGang/FridayUserbot")],
[Button.url("Join Channel 📃", "t.me/Fridayot")]
]
)
|
saskeuday/sasoke
|
userbot/_core.py
|
<filename>userbot/_core.py
from userbot.utils import admin_cmd, sudo_cmd, load_module, remove_plugin
import asyncio
import os
from datetime import datetime
from pathlib import Path
# Copyright (C) By @StarkGang
# FridayUserbot 🇮🇳
|
saskeuday/sasoke
|
userbot/plugins/list.py
|
"""
List Files plugin for userbot //Simple Module for people who dont wanna use shell executor for listing files.
cmd: .ls // will return files from current working directory
.ls path // will return output according to path
By:- @Zero_cool7870
"""
import os
from uniborg.util import admin_cmd, edit_or_reply
@borg.on(admin_cmd(pattern="ls ?(.*)"))
@borg.on(admin_cmd(pattern="ls ?(.*)", allow_sudo=True))
async def lst(event):
genesis = await edit_or_reply(event, "Processing")
if event.fwd_from:
return
input_str = event.pattern_match.group(1)
if input_str:
msg = "**Files in {} :**\n".format(input_str)
files = os.listdir(input_str)
else:
msg = "**Files in Current Directory :**\n"
files = os.listdir(os.getcwd())
for file in files:
msg += "`{}`\n".format(file)
if len(msg) <= Config.MAX_MESSAGE_SIZE_LIMIT:
await genesis.edit(msg)
else:
msg = msg.replace("`", "")
out = "filesList.txt"
with open(out, "w") as f:
f.write(f)
await borg.send_file(
event.chat_id,
out,
force_document=True,
allow_cache=False,
caption="`Output is huge. Sending as a file...`",
)
await event.delete()
|
saskeuday/sasoke
|
userbot/function/__init__.py
|
# Copyright (C) Is Distributed Between @StarkGang And @ZeltraxRockz
# Please Ask At @FridayOT Before Copying Any Module
# FridayUserbot (2020-21)
|
saskeuday/sasoke
|
userbot/plugins/bye.py
|
# For @UniBorg
# Courtesy @yasirsiddiqui
"""
.bye
"""
import time
from telethon.tl.functions.channels import LeaveChannelRequest
from userbot.utils import admin_cmd, edit_or_reply, sudo_cmd
@borg.on(admin_cmd("bye", outgoing=True))
@borg.on(sudo_cmd("bye", allow_sudo=True))
async def leave(e):
starkgang = await edit_or_reply(e, "Bye Kek")
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await starkgang.edit("`I am leaving this chat.....!`")
time.sleep(3)
if "-" in str(e.chat_id):
await borg(LeaveChannelRequest(e.chat_id))
else:
await starkgang.edit("`Sir This is Not A Chat`")
|
saskeuday/sasoke
|
userbot/plugins/cbutton.py
|
"""Create Button Posts
"""
import re
from telethon import custom
from uniborg.util import admin_cmd
# regex obtained from: https://github.com/PaulSonOfLars/tgbot/blob/master/tg_bot/modules/helper_funcs/string_handling.py#L23
BTN_URL_REGEX = re.compile(r"(\{([^\[]+?)\}\<buttonurl:(?:/{0,2})(.+?)(:same)?\>)")
@borg.on(admin_cmd(pattern="cbutton")) # pylint:disable=E0602
async def _(event):
if Config.TG_BOT_USER_NAME_BF_HER is None or tgbot is None:
await event.edit("need to set up a @BotFather bot for this module to work")
return
if Config.PRIVATE_CHANNEL_BOT_API_ID is None:
await event.edit(
"need to have a `PRIVATE_CHANNEL_BOT_API_ID` for this module to work"
)
return
reply_message = await event.get_reply_message()
if reply_message is None:
await event.edit("reply to a message that I need to parse the magic on")
return
markdown_note = reply_message.text
prev = 0
note_data = ""
buttons = []
for match in BTN_URL_REGEX.finditer(markdown_note):
# Check if btnurl is escaped
n_escapes = 0
to_check = match.start(1) - 1
while to_check > 0 and markdown_note[to_check] == "\\":
n_escapes += 1
to_check -= 1
# if even, not escaped -> create button
if n_escapes % 2 == 0:
# create a thruple with button label, url, and newline status
buttons.append((match.group(2), match.group(3), bool(match.group(4))))
note_data += markdown_note[prev : match.start(1)]
prev = match.end(1)
# if odd, escaped -> move along
else:
note_data += markdown_note[prev:to_check]
prev = match.start(1) - 1
note_data += markdown_note[prev:]
message_text = note_data.strip()
tl_ib_buttons = build_keyboard(buttons)
# logger.info(message_text)
# logger.info(tl_ib_buttons)
tgbot_reply_message = None
if reply_message.media is not None:
message_id_in_channel = reply_message.id
tgbot_reply_message = await tgbot.get_messages(
entity=Config.PRIVATE_CHANNEL_BOT_API_ID, ids=message_id_in_channel
)
tgbot_reply_message = tgbot_reply_message.media
await tgbot.send_message(
entity=Config.PRIVATE_CHANNEL_BOT_API_ID,
message=message_text,
parse_mode="html",
file=tgbot_reply_message,
link_preview=False,
buttons=tl_ib_buttons,
silent=True,
)
# Helpers
def build_keyboard(buttons):
keyb = []
for btn in buttons:
if btn[2] and keyb:
keyb[-1].append(custom.Button.url(btn[0], btn[1]))
else:
keyb.append([custom.Button.url(btn[0], btn[1])])
return keyb
|
saskeuday/sasoke
|
userbot/plugins/thumbnail.py
|
"""Thumbnail Utilities, © @AnyDLBot
Available Commands:
.savethumbnail
.clearthumbnail
.getthumbnail"""
import os
import subprocess
from hachoir.metadata import extractMetadata
from hachoir.parser import createParser
from PIL import Image
from uniborg.util import admin_cmd
thumb_image_path = Config.TMP_DOWNLOAD_DIRECTORY + "/thumb_image.jpg"
def get_video_thumb(file, output=None, width=320):
output = file + ".jpg"
metadata = extractMetadata(createParser(file))
p = subprocess.Popen(
[
"ffmpeg",
"-i",
file,
"-ss",
str(
int((0, metadata.get("duration").seconds)[metadata.has("duration")] / 2)
),
# '-filter:v', 'scale={}:-1'.format(width),
"-vframes",
"1",
output,
],
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL,
)
p.communicate()
if not p.returncode and os.path.lexists(file):
os.remove(file)
return output
@borg.on(admin_cmd(pattern="savethumbnail"))
async def _(event):
if event.fwd_from:
return
await event.edit("Processing ...")
if not os.path.isdir(Config.TMP_DOWNLOAD_DIRECTORY):
os.makedirs(Config.TMP_DOWNLOAD_DIRECTORY)
if event.reply_to_msg_id:
downloaded_file_name = await borg.download_media(
await event.get_reply_message(), Config.TMP_DOWNLOAD_DIRECTORY
)
if downloaded_file_name.endswith(".mp4"):
downloaded_file_name = get_video_thumb(downloaded_file_name)
metadata = extractMetadata(createParser(downloaded_file_name))
height = 0
if metadata.has("height"):
height = metadata.get("height")
# resize image
# ref: https://t.me/PyrogramChat/44663
# https://stackoverflow.com/a/21669827/4723940
Image.open(downloaded_file_name).convert("RGB").save(downloaded_file_name)
img = Image.open(downloaded_file_name)
# https://stackoverflow.com/a/37631799/4723940
# img.thumbnail((320, 320))
img.resize((320, height))
img.save(thumb_image_path, "JPEG")
# https://pillow.readthedocs.io/en/3.1.x/reference/Image.html#create-thumbnails
os.remove(downloaded_file_name)
await event.edit(
"Custom video / file thumbnail saved. "
+ "This image will be used in the upload, till `.clearthumbnail`."
)
else:
await event.edit("Reply to a photo to save custom thumbnail")
@borg.on(admin_cmd(pattern="clearthumbnail"))
async def _(event):
if event.fwd_from:
return
if os.path.exists(thumb_image_path):
os.remove(thumb_image_path)
await event.edit("✅ Custom thumbnail cleared succesfully.")
@borg.on(admin_cmd(pattern="getthumbnail"))
async def _(event):
if event.fwd_from:
return
if event.reply_to_msg_id:
r = await event.get_reply_message()
try:
a = await borg.download_media(
r.media.document.thumbs[0], Config.TMP_DOWNLOAD_DIRECTORY
)
except Exception as e:
await event.edit(str(e))
try:
await borg.send_file(
event.chat_id,
a,
force_document=False,
allow_cache=False,
reply_to=event.reply_to_msg_id,
)
os.remove(a)
await event.delete()
except Exception as e:
await event.edit(str(e))
elif os.path.exists(thumb_image_path):
caption_str = "Currently Saved Thumbnail. Clear with `.clearthumbnail`"
await borg.send_file(
event.chat_id,
thumb_image_path,
caption=caption_str,
force_document=False,
allow_cache=False,
reply_to=event.message.id,
)
await event.edit(caption_str)
else:
await event.edit("Reply `.gethumbnail` as a reply to a media")
|
airspot-dev/krules-dispatcher-cloudevents
|
setup.py
|
import os
import setuptools
from setuptools import setup, find_packages
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name='krules-dispatcher-cloudevents',
version="0.8.5.2",
author="<NAME>",
author_email="<EMAIL>",
description="KRules cloudevents dispatcher",
licence="Apache Licence 2.0",
keywords="krules cloudevents router",
url="https://github.com/airspot-dev/krules-dispatcher-cloudevents",
packages=setuptools.find_packages(),
long_description=read('README.md'),
classifiers=[
"Development Status :: 3 - Alpha",
"License :: OSI Approved :: Apache Software License",
],
install_requires=[
'krules-core==0.8.5.1',
'cloudevents==1.2.0',
'pytz==2020.5',
'requests==2.25.1'
],
setup_requires=[
'pytest-runner',
],
tests_require=[
'pytest',
'pytest-localserver',
],
)
|
airspot-dev/krules-dispatcher-cloudevents
|
krules_cloudevents/route/dispatcher.py
|
# Copyright 2019 The KRules Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import uuid
from datetime import datetime
import pytz
import json
import inspect
from krules_core.subject import PayloadConst
from krules_core.providers import subject_factory
from krules_core.route.dispatcher import BaseDispatcher
from cloudevents.sdk import converters
from cloudevents.sdk import marshaller
from cloudevents.sdk.converters import structured, binary
from cloudevents.sdk.event import v1
import requests
class _JSONEncoder(json.JSONEncoder):
def default(self, obj):
if inspect.isfunction(obj):
return obj.__name__
elif isinstance(obj, object):
return str(type(obj))
return json.JSONEncoder.default(self, obj)
class CloudEventsDispatcher(BaseDispatcher):
def __init__(self, dispatch_url, source, test=False):
self._dispatch_url = dispatch_url
self._source = source
self._test = test
def dispatch(self, event_type, subject, payload):
if isinstance(subject, str):
subject = subject_factory(subject)
_event_info = subject.event_info()
_id = str(uuid.uuid4())
logging.debug("new event id: {}".format(_id))
event = v1.Event()
event.SetContentType('application/json')
event.SetEventID(_id)
event.SetSource(self._source)
event.SetSubject(str(subject))
event.SetEventTime(datetime.utcnow().replace(tzinfo=pytz.UTC).isoformat())
event.SetEventType(event_type)
# set extended properties
ext_props = subject.get_ext_props()
property_name = payload.get(PayloadConst.PROPERTY_NAME, None)
if property_name is not None:
ext_props.update({"propertyname": property_name})
event.SetExtensions(ext_props)
event.Set('Originid', str(_event_info.get("originid", _id)))
event.SetData(payload)
m = marshaller.NewHTTPMarshaller([binary.NewBinaryHTTPCloudEventConverter()])
headers, body = m.ToRequest(event, converters.TypeBinary, lambda x: json.dumps(x, cls=_JSONEncoder))
# headers['Ce-Originid'] = str(_event_info.get("Originid", _id))
if callable(self._dispatch_url):
dispatch_url = self._dispatch_url(subject, event_type)
else:
dispatch_url = self._dispatch_url
response = requests.post(dispatch_url,
headers=headers,
data=body)
response.raise_for_status()
if self._test:
return _id, response.status_code, headers
return _id
# url = self._dispatch_url.replace("{{message}}", message)
# print(url)
# #_pool.apply_async(requests.post, args=(url,), kwds={'headers': headers, 'data': data.getvalue()},
# # callback=_on_success)
# #requests.post(url, headers=headers, data=data.getvalue())
# req = Request(url, data=data.getvalue())
# print(req)
# for k, v in headers.items():
# req.add_header(k, v)
# req.get_method = lambda: "POST"
# print("posting")
# urlopen(req)
# print("posted")
#return event
|
airspot-dev/krules-dispatcher-cloudevents
|
krules_cloudevents/test_dispatcher.py
|
<reponame>airspot-dev/krules-dispatcher-cloudevents
# Copyright 2019 The KRules Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import json
import pytest
from cloudevents.sdk import marshaller
from cloudevents.sdk.event import v1
from krules_core.subject import PayloadConst
from pytest_localserver import plugin
from dependency_injector import providers
from krules_core.providers import (
configs_factory,
event_router_factory,
event_dispatcher_factory,
subject_storage_factory,
subject_factory)
from krules_core.route.router import EventRouter
from .route.dispatcher import CloudEventsDispatcher
from krules_core.tests.subject.sqlite_storage import SQLLiteSubjectStorage
from pytest_localserver.http import WSGIServer
from werkzeug.wrappers import Request
httpserver = plugin.httpserver
configs_factory.override(
providers.Singleton(lambda: {})
)
event_router_factory.override(
providers.Singleton(EventRouter)
)
subject_storage_factory.override(
providers.Factory(lambda x, **kwargs: SQLLiteSubjectStorage(x, ":memory:"))
)
def fake_receiver_app(environ, start_response):
"""Simplest possible WSGI application"""
request = Request(environ)
m = marshaller.NewDefaultHTTPMarshaller()
event = m.FromRequest(v1.Event(), request.headers, io.BytesIO(request.data), lambda x: json.load(x))
event_info = event.Properties()
event_info.update(event_info.pop("extensions"))
subject = subject_factory(event_info.get("subject", "sys-0"))
assert "originid" in event_info
assert "subject" in event_info
assert "data" in event_info
status = '200 OK'
response_headers = [('Content-type', 'text/plain')]
start_response(status, response_headers)
return ['Ok']
@pytest.fixture
def fake_receiver(request):
"""Defines the testserver funcarg"""
server = WSGIServer(application=fake_receiver_app)
server.start()
request.addfinalizer(server.stop)
return server
def test_dispatched_event(fake_receiver):
from krules_core import event_types
event_dispatcher_factory.override(
providers.Singleton(lambda: CloudEventsDispatcher(fake_receiver.url, "pytest", test=True))
)
router = event_router_factory()
subject = subject_factory("test-subject")
subject.set_ext("ext1", "val1")
subject.set_ext("ext2", "2")
_id, code, sent_headers = router.route("test-type", subject, {"key1": "hello"})
assert(200 <= code < 300)
assert (sent_headers.get("ce-id") == _id)
assert(sent_headers.get("ce-source") == "pytest")
assert(sent_headers.get("ce-subject") == "test-subject")
assert(sent_headers.get("ce-type") == "test-type")
assert(sent_headers.get("ce-Originid") == _id)
assert(sent_headers.get("ce-ext1") == "val1")
assert(sent_headers.get("ce-ext2") == "2")
# with event info
subject = subject_factory("test-subject", event_info={"originid": 1234})
_, _, sent_headers = router.route("test-type", subject, {"key1": "hello"})
assert(sent_headers.get("id") != sent_headers.get("ce-Originid"))
assert(sent_headers.get("ce-Originid") == '1234')
# property name
_, _, sent_headers = router.route(event_types.SUBJECT_PROPERTY_CHANGED, subject, {PayloadConst.PROPERTY_NAME: "foo"})
assert (sent_headers.get("ce-propertyname") == 'foo')
def test_callable_dispatch_url(fake_receiver):
def _get_dispatch_url(subject, type):
assert not isinstance(subject, str)
return fake_receiver.url
event_dispatcher_factory.override(
providers.Singleton(
lambda: CloudEventsDispatcher(_get_dispatch_url, "pytest", test=True)
)
)
router = event_router_factory()
_id, code, sent_headers = router.route("test-type", "test_subject", {"key1": "hello"})
assert (200 <= code < 300)
|
vicelikedust/Ingress_max_recharge_distance
|
main.py
|
<gh_stars>1-10
from os import system, name
leveldist = [0,250,500,750,1000,1250,1500,1750,2000,2250,2500,2750,3000,3250,3500,3750,4000]
def clear():
# for windows
if name == 'nt':
_ = system('cls')
# for mac and linux
else:
_ = system('clear')
def calc():
try:
level = int(input('What is your player level? '))
while level == 0 or level == 17:
print(f"That is an invaild level \nLevel {level} doesn't exist")
level = int(input('What is your player level? '))
except ValueError:
print('You entered an incorrect value, it must be a number.')
level = int(input('What is your player level? '))
while level == 0 or level == 17:
print(f"That is an invaild level \nLevel {level} doesn't exist")
level = int(input('What is your player level? '))
try:
distance = int(input('What is the distance in km? '))
except ValueError:
print('You entered an incorrect value, it must be a number.')
distance = int(input('What is the distance in km? '))
if distance > leveldist[level]:
print(f'Portal is too far \nThe max distance for level {level} is {leveldist[level]}km')
else:
efficiency = 100 - (distance)/(5 * level)
print(f'Recharge Efficiency is {round(efficiency,2)}{"%"}')
ans = 'yes'
if __name__ == "__main__":
try:
while ans == 'yes' or ans =='y':
calc()
ans = input('Do you want to calculate another one? ').lower()
clear()
except KeyboardInterrupt:
pass
print('\nProgram Closing...')
|
MadhuriKadam9/caravel_avsdopamp_3v3_sky130_v2
|
caravel/scripts/set_user_id.py
|
#!/usr/bin/env python3
# SPDX-FileCopyrightText: 2020 Efabless Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# SPDX-License-Identifier: Apache-2.0
#----------------------------------------------------------------------
#
# set_user_id.py ---
#
# Manipulate the magic database, GDS, and verilog source files for the
# user_id_programming block to set the user ID number.
#
# The user ID number is a 32-bit value that is passed to this routine
# as an 8-digit hex number. If not given as an option, then the script
# will look for the value of the key "project_id" in the info.yaml file
# in the project top level directory
#
# user_id_programming layout map:
# Positions marked (in microns) for value = 0. For value = 1, move
# the via 0.92um to the left.
#
# Layout grid is 0.46um x 0.34um with half-pitch offset (0.23um, 0.17um)
#
# Signal Via position (um)
# name X Y
#--------------------------------
# mask_rev[0] 14.49 9.35
# mask_rev[1] 16.33 9.35
# mask_rev[2] 10.35 20.23
# mask_rev[3] 8.05 9.35
# mask_rev[4] 28.29 9.35
# mask_rev[5] 21.85 25.67
# mask_rev[6] 8.05 20.23
# mask_rev[7] 20.47 9.35
# mask_rev[8] 17.25 17.85
# mask_rev[9] 25.53 12.07
# mask_rev[10] 22.31 20.23
# mask_rev[11] 13.11 9.35
# mask_rev[12] 23.69 23.29
# mask_rev[13] 24.15 12.07
# mask_rev[14] 13.57 17.85
# mask_rev[15] 23.23 6.97
# mask_rev[16] 24.15 17.85
# mask_rev[17] 8.51 17.85
# mask_rev[18] 23.69 20.23
# mask_rev[19] 10.81 23.29
# mask_rev[20] 14.95 6.97
# mask_rev[21] 18.17 23.29
# mask_rev[22] 21.39 17.85
# mask_rev[23] 26.45 25.67
# mask_rev[24] 9.89 17.85
# mask_rev[25] 15.87 17.85
# mask_rev[26] 26.45 17.85
# mask_rev[27] 8.51 6.97
# mask_rev[28] 10.81 9.35
# mask_rev[29] 27.83 20.23
# mask_rev[30] 16.33 23.29
# mask_rev[31] 8.05 14.79
#----------------------------------------------------------------------
import os
import sys
import re
def usage():
print("Usage:")
print("set_user_id.py [<user_id_value>] [<path_to_project>]")
print("")
print("where:")
print(" <user_id_value> is a character string of eight hex digits, and")
print(" <path_to_project> is the path to the project top level directory.")
print("")
print(" If <user_id_value> is not given, then it must exist in the info.yaml file.")
print(" If <path_to_project> is not given, then it is assumed to be the cwd.")
return 0
if __name__ == '__main__':
# Coordinate pairs in microns for the zero position on each bit
mask_rev = (
(14.49, 9.35), (16.33, 9.35), (10.35, 20.23), ( 8.05, 9.35),
(28.29, 9.35), (21.85, 25.67), ( 8.05, 20.23), (20.47, 9.35),
(17.25, 17.85), (25.53, 12.07), (22.31, 20.23), (13.11, 9.35),
(23.69, 23.29), (24.15, 12.07), (13.57, 17.85), (23.23, 6.97),
(24.15, 17.85), ( 8.51, 17.85), (23.69, 20.23), (10.81, 23.29),
(14.95, 6.97), (18.17, 23.29), (21.39, 17.85), (26.45, 25.67),
( 9.89, 17.85), (15.87, 17.85), (26.45, 17.85), ( 8.51, 6.97),
(10.81, 9.35), (27.83, 20.23), (16.33, 23.29), ( 8.05, 14.79));
optionlist = []
arguments = []
debugmode = False
reportmode = False
for option in sys.argv[1:]:
if option.find('-', 0) == 0:
optionlist.append(option)
else:
arguments.append(option)
if len(arguments) > 2:
print("Wrong number of arguments given to set_user_id.py.")
usage()
sys.exit(0)
if '-debug' in optionlist:
debugmode = True
if '-report' in optionlist:
reportmode = True
user_id_value = None
user_project_path = None
if len(arguments) > 0:
user_id_value = arguments[0]
# Convert to binary
try:
user_id_int = int('0x' + user_id_value, 0)
user_id_bits = '{0:032b}'.format(user_id_int)
except:
user_project_path = arguments[0]
if len(arguments) == 0:
user_project_path = os.getcwd()
elif len(arguments) == 2:
user_project_path = arguments[1]
elif user_project_path == None:
user_project_path = arguments[0]
else:
user_project_path = os.getcwd()
if not os.path.isdir(user_project_path):
print('Error: Project path "' + user_project_path + '" does not exist or is not readable.')
sys.exit(1)
# Check for valid directories
if not user_id_value:
if os.path.isfile(user_project_path + '/info.yaml'):
with open(user_project_path + '/info.yaml', 'r') as ifile:
infolines = ifile.read().splitlines()
for line in infolines:
kvpair = line.split(':')
if len(kvpair) == 2:
key = kvpair[0].strip()
value = kvpair[1].strip()
if key == 'project_id':
user_id_value = value.strip('"\'')
break
if not user_id_value:
print('Error: No project_id key:value pair found in project info.yaml.')
sys.exit(1)
try:
user_id_int = int('0x' + user_id_value, 0)
user_id_bits = '{0:032b}'.format(user_id_int)
except:
print('Error: Cannot parse user ID "' + user_id_value + '" as an 8-digit hex number.')
sys.exit(1)
else:
print('Error: No info.yaml file and no user ID argument given.')
sys.exit(1)
if reportmode:
print(str(user_id_int))
sys.exit(0)
print('Setting project user ID to: ' + user_id_value)
magpath = user_project_path + '/mag'
gdspath = user_project_path + '/gds'
vpath = user_project_path + '/verilog'
errors = 0
if not os.path.isdir(gdspath):
print('No directory ' + gdspath + ' found (path to GDS).')
sys.exit(1)
if not os.path.isdir(vpath):
print('No directory ' + vpath + ' found (path to verilog).')
sys.exit(1)
if not os.path.isdir(magpath):
print('No directory ' + magpath + ' found (path to magic databases).')
sys.exit(1)
print('Step 1: Modify GDS of the user_id_programming subcell')
# Bytes leading up to via position are:
viarec = "00 06 0d 02 00 43 00 06 0e 02 00 2c 00 2c 10 03 "
viabytes = bytes.fromhex(viarec)
# Read the GDS file. If a backup was made of the zero-value
# program, then use it.
gdsbak = gdspath + '/user_id_prog_zero.gds'
gdsfile = gdspath + '/user_id_programming.gds'
if os.path.isfile(gdsbak):
with open(gdsbak, 'rb') as ifile:
gdsdata = ifile.read()
else:
with open(gdsfile, 'rb') as ifile:
gdsdata = ifile.read()
for i in range(0,32):
# Ignore any zero bits.
if user_id_bits[i] == '0':
continue
coords = mask_rev[i]
xum = coords[0]
yum = coords[1]
# Contact is 0.17 x 0.17, so add and subtract 0.085 to get
# the corner positions.
xllum = xum - 0.085
yllum = yum - 0.085
xurum = xum + 0.085
yurum = yum + 0.085
# Get the 4-byte hex values for the corner coordinates
xllnm = round(xllum * 1000)
yllnm = round(yllum * 1000)
xllhex = '{0:08x}'.format(xllnm)
yllhex = '{0:08x}'.format(yllnm)
xurnm = round(xurum * 1000)
yurnm = round(yurum * 1000)
xurhex = '{0:08x}'.format(xurnm)
yurhex = '{0:08x}'.format(yurnm)
# Magic's GDS output for vias always starts at the lower left
# corner and goes counterclockwise, repeating the first point.
viaoldposdata = viarec + xllhex + yllhex + xurhex + yllhex
viaoldposdata += xurhex + yurhex + xllhex + yurhex + xllhex + yllhex
# For "one" bits, the X position is moved 0.92 microns to the left
newxllum = xllum - 0.92
newxurum = xurum - 0.92
# Get the 4-byte hex values for the new corner coordinates
newxllnm = round(newxllum * 1000)
newxllhex = '{0:08x}'.format(newxllnm)
newxurnm = round(newxurum * 1000)
newxurhex = '{0:08x}'.format(newxurnm)
vianewposdata = viarec + newxllhex + yllhex + newxurhex + yllhex
vianewposdata += newxurhex + yurhex + newxllhex + yurhex + newxllhex + yllhex
# Diagnostic
if debugmode:
print('Bit ' + str(i) + ':')
print('Via position ({0:3.2f}, {1:3.2f}) to ({2:3.2f}, {3:3.2f})'.format(xllum, yllum, xurum, yurum))
print('Old hex string = ' + viaoldposdata)
print('New hex string = ' + vianewposdata)
# Convert hex strings to byte arrays
viaoldbytedata = bytearray.fromhex(viaoldposdata)
vianewbytedata = bytearray.fromhex(vianewposdata)
# Replace the old data with the new
if viaoldbytedata not in gdsdata:
print('Error: via not found for bit position ' + str(i))
errors += 1
else:
gdsdata = gdsdata.replace(viaoldbytedata, vianewbytedata)
if errors == 0:
# Keep a copy of the original
if not os.path.isfile(gdsbak):
os.rename(gdsfile, gdsbak)
with open(gdsfile, 'wb') as ofile:
ofile.write(gdsdata)
print('Done!')
else:
print('There were errors in processing. No file written.')
print('Ending process.')
sys.exit(1)
print('Step 2: Add user project ID parameter to verilog.')
changed = False
with open(vpath + '/rtl/caravel.v', 'r') as ifile:
vlines = ifile.read().splitlines()
outlines = []
for line in vlines:
oline = re.sub("parameter USER_PROJECT_ID = 32'h[0-9A-F]+;",
"parameter USER_PROJECT_ID = 32'h" + user_id_value + ";",
line)
if oline != line:
changed = True
outlines.append(oline)
if changed:
with open(vpath + '/rtl/caravel.v', 'w') as ofile:
for line in outlines:
print(line, file=ofile)
print('Done!')
else:
print('Error: No substitutions done on verilog/rtl/caravel.v.')
print('Ending process.')
sys.exit(1)
print('Step 3: Add user project ID text to top level layout.')
with open(magpath + '/user_id_textblock.mag', 'r') as ifile:
maglines = ifile.read().splitlines()
outlines = []
digit = 0
for line in maglines:
if 'alphaX_' in line:
dchar = user_id_value[digit].upper()
oline = re.sub('alpha_[0-9A-F]', 'alpha_' + dchar, line)
outlines.append(oline)
digit += 1
else:
outlines.append(line)
if digit == 8:
with open(magpath + '/user_id_textblock.mag', 'w') as ofile:
for line in outlines:
print(line, file=ofile)
print('Done!')
elif digit == 0:
print('Error: No digits were replaced in the layout.')
else:
print('Error: Only ' + str(digit) + ' digits were replaced in the layout.')
sys.exit(0)
|
MadhuriKadam9/caravel_avsdopamp_3v3_sky130_v2
|
caravel/scripts/generate_fill.py
|
<filename>caravel/scripts/generate_fill.py
#!/usr/bin/env python3
# SPDX-FileCopyrightText: 2020 Efabless Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# SPDX-License-Identifier: Apache-2.0
#
# generate_fill.py ---
#
# Run the fill generation on a layout top level.
#
import sys
import os
import re
import glob
import subprocess
import multiprocessing
def usage():
print("Usage:")
print("generate_fill.py <user_id_value> <project> <path_to_project> [-keep] [-test] [-dist]")
print("")
print("where:")
print(" <user_id_value> is a character string of eight hex digits, and")
print(" <path_to_project> is the path to the project top level directory.")
print("")
print(" If <user_id_value> is not given, then it must exist in the info.yaml file.")
print(" If <path_to_project> is not given, then it is assumed to be the cwd.")
print(" If '-keep' is specified, then keep the generation script.")
print(" If '-test' is specified, then create but do not run the generation script.")
print(" If '-dist' is specified, then run distributed (multi-processing).")
return 0
def makegds(file):
# Procedure for multiprocessing run only: Run the distributed processing
# script to load a .mag file of one flattened square area of the layout,
# and run the fill generator to produce a .gds file output from it.
magpath = os.path.split(file)[0]
filename = os.path.split(file)[1]
myenv = os.environ.copy()
myenv['MAGTYPE'] = 'mag'
mproc = subprocess.run(['magic', '-dnull', '-noconsole',
'-rcfile', rcfile, magpath + '/generate_fill_dist.tcl',
filename],
stdin = subprocess.DEVNULL,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE,
cwd = magpath,
env = myenv,
universal_newlines = True)
if mproc.stdout:
for line in mproc.stdout.splitlines():
print(line)
if mproc.stderr:
print('Error message output from magic:')
for line in mproc.stderr.splitlines():
print(line)
if mproc.returncode != 0:
print('ERROR: Magic exited with status ' + str(mproc.returncode))
if __name__ == '__main__':
optionlist = []
arguments = []
debugmode = False
keepmode = False
testmode = False
distmode = False
for option in sys.argv[1:]:
if option.find('-', 0) == 0:
optionlist.append(option)
else:
arguments.append(option)
if len(arguments) < 3:
print("Wrong number of arguments given to generate_fill.py.")
usage()
sys.exit(1)
user_id_value = arguments[0]
project = arguments[1]
user_project_path = arguments[2]
try:
# Convert to binary
user_id_int = int('0x' + user_id_value, 0)
user_id_bits = '{0:032b}'.format(user_id_int)
except:
print("User ID not recognized")
usage()
sys.exit(1)
# if len(arguments) == 0:
# user_project_path = os.getcwd()
# elif len(arguments) == 2:
# user_project_path = arguments[1]
# elif user_project_path == None:
# user_project_path = arguments[0]
# else:
# user_project_path = os.getcwd()
if not os.path.isdir(user_project_path):
print('Error: Project path "' + user_project_path + '" does not exist or is not readable.')
sys.exit(1)
# Check for valid user ID
# if not user_id_value:
# if os.path.isfile(user_project_path + '/info.yaml'):
# with open(user_project_path + '/info.yaml', 'r') as ifile:
# infolines = ifile.read().splitlines()
# for line in infolines:
# kvpair = line.split(':')
# if len(kvpair) == 2:
# key = kvpair[0].strip()
# value = kvpair[1].strip()
# if key == 'project_id':
# user_id_value = value.strip('"\'')
# break
if user_id_value:
project_with_id = 'caravel_' + user_id_value
else:
print('Error: No project_id found in info.yaml file.')
sys.exit(1)
if '-debug' in optionlist:
debugmode = True
if '-keep' in optionlist:
keepmode = True
if '-test' in optionlist:
testmode = True
if '-dist' in optionlist:
distmode = True
magpath = user_project_path + '/mag'
rcfile = magpath + '/.magicrc'
if not os.path.isfile(rcfile):
rcfile = None
topdir = user_project_path
gdsdir = topdir + '/gds'
hasgdsdir = True if os.path.isdir(gdsdir) else False
ofile = open(magpath + '/generate_fill.tcl', 'w')
print('#!/bin/env wish', file=ofile)
print('drc off', file=ofile)
print('tech unlock *', file=ofile)
print('snap internal', file=ofile)
print('box values 0 0 0 0', file=ofile)
print('box size 700um 700um', file=ofile)
print('set stepbox [box values]', file=ofile)
print('set stepwidth [lindex $stepbox 2]', file=ofile)
print('set stepheight [lindex $stepbox 3]', file=ofile)
print('', file=ofile)
print('set starttime [orig_clock format [orig_clock seconds] -format "%D %T"]', file=ofile)
print('puts stdout "Started: $starttime"', file=ofile)
print('', file=ofile)
# Read the user project from GDS, as there is not necessarily a magic database file
# to go along with this.
# print('gds read ../gds/user_project_wrapper', file=ofile)
# Now read the full caravel project
# print('load ' + project + ' -dereference', file=ofile)
print('gds readonly true', file=ofile)
print('gds rescale false', file=ofile)
print('gds read ../gds/' + project, file=ofile)
print('select top cell', file=ofile)
print('expand', file=ofile)
if not distmode:
print('cif ostyle wafflefill(tiled)', file=ofile)
print('', file=ofile)
print('set fullbox [box values]', file=ofile)
print('set xmax [lindex $fullbox 2]', file=ofile)
print('set xmin [lindex $fullbox 0]', file=ofile)
print('set fullwidth [expr {$xmax - $xmin}]', file=ofile)
print('set xtiles [expr {int(ceil(($fullwidth + 0.0) / $stepwidth))}]', file=ofile)
print('set ymax [lindex $fullbox 3]', file=ofile)
print('set ymin [lindex $fullbox 1]', file=ofile)
print('set fullheight [expr {$ymax - $ymin}]', file=ofile)
print('set ytiles [expr {int(ceil(($fullheight + 0.0) / $stepheight))}]', file=ofile)
print('box size $stepwidth $stepheight', file=ofile)
print('set xbase [lindex $fullbox 0]', file=ofile)
print('set ybase [lindex $fullbox 1]', file=ofile)
print('', file=ofile)
# Break layout into tiles and process each separately
print('for {set y 0} {$y < $ytiles} {incr y} {', file=ofile)
print(' for {set x 0} {$x < $xtiles} {incr x} {', file=ofile)
print(' set xlo [expr $xbase + $x * $stepwidth]', file=ofile)
print(' set ylo [expr $ybase + $y * $stepheight]', file=ofile)
print(' set xhi [expr $xlo + $stepwidth]', file=ofile)
print(' set yhi [expr $ylo + $stepheight]', file=ofile)
print(' if {$xhi > $fullwidth} {set xhi $fullwidth}', file=ofile)
print(' if {$yhi > $fullheight} {set yhi $fullheight}', file=ofile)
print(' box values $xlo $ylo $xhi $yhi', file=ofile)
# The flattened area must be larger than the fill tile by >1.5um
print(' box grow c 1.6um', file=ofile)
# Flatten into a cell with a new name
print(' puts stdout "Flattening layout of tile x=$x y=$y. . . "', file=ofile)
print(' flush stdout', file=ofile)
print(' update idletasks', file=ofile)
print(' flatten -dobox -nolabels ' + project_with_id + '_fill_pattern_${x}_$y', file=ofile)
print(' load ' + project_with_id + '_fill_pattern_${x}_$y', file=ofile)
# Remove any GDS_FILE reference (there should not be any?)
print(' property GDS_FILE ""', file=ofile)
# Set boundary using comment layer, to the size of the step box
# This corresponds to the "topbox" rule in the wafflefill(tiled) style
print(' select top cell', file=ofile)
print(' erase comment', file=ofile)
print(' box values $xlo $ylo $xhi $yhi', file=ofile)
print(' paint comment', file=ofile)
if not distmode:
print(' puts stdout "Writing GDS. . . "', file=ofile)
print(' flush stdout', file=ofile)
print(' update idletasks', file=ofile)
if distmode:
print(' writeall force ' + project_with_id + '_fill_pattern_${x}_$y', file=ofile)
else:
print(' gds write ' + project_with_id + '_fill_pattern_${x}_$y.gds', file=ofile)
# Reload project top
print(' load ' + project, file=ofile)
# Remove last generated cell to save memory
print(' cellname delete ' + project_with_id + '_fill_pattern_${x}_$y', file=ofile)
print(' }', file=ofile)
print('}', file=ofile)
if distmode:
print('set ofile [open fill_gen_info.txt w]', file=ofile)
print('puts $ofile "$stepwidth"', file=ofile)
print('puts $ofile "$stepheight"', file=ofile)
print('puts $ofile "$xtiles"', file=ofile)
print('puts $ofile "$ytiles"', file=ofile)
print('puts $ofile "$xbase"', file=ofile)
print('puts $ofile "$ybase"', file=ofile)
print('close $ofile', file=ofile)
print('quit -noprompt', file=ofile)
ofile.close()
with open(magpath + '/generate_fill_dist.tcl', 'w') as ofile:
print('#!/bin/env wish', file=ofile)
print('drc off', file=ofile)
print('tech unlock *', file=ofile)
print('snap internal', file=ofile)
print('box values 0 0 0 0', file=ofile)
print('set filename [file root [lindex $argv $argc-1]]', file=ofile)
print('load $filename', file=ofile)
print('cif ostyle wafflefill(tiled)', file=ofile)
print('gds write [file root $filename].gds', file=ofile)
print('quit -noprompt', file=ofile)
ofile = open(magpath + '/generate_fill_final.tcl', 'w')
print('#!/bin/env wish', file=ofile)
print('drc off', file=ofile)
print('tech unlock *', file=ofile)
print('snap internal', file=ofile)
print('box values 0 0 0 0', file=ofile)
print('set ifile [open fill_gen_info.txt r]', file=ofile)
print('gets $ifile stepwidth', file=ofile)
print('gets $ifile stepheight', file=ofile)
print('gets $ifile xtiles', file=ofile)
print('gets $ifile ytiles', file=ofile)
print('gets $ifile xbase', file=ofile)
print('gets $ifile ybase', file=ofile)
print('close $ifile', file=ofile)
print('cif ostyle wafflefill(tiled)', file=ofile)
# Now create simple "fake" views of all the tiles.
print('gds readonly true', file=ofile)
print('gds rescale false', file=ofile)
print('for {set y 0} {$y < $ytiles} {incr y} {', file=ofile)
print(' for {set x 0} {$x < $xtiles} {incr x} {', file=ofile)
print(' set xlo [expr $xbase + $x * $stepwidth]', file=ofile)
print(' set ylo [expr $ybase + $y * $stepheight]', file=ofile)
print(' set xhi [expr $xlo + $stepwidth]', file=ofile)
print(' set yhi [expr $ylo + $stepheight]', file=ofile)
print(' load ' + project_with_id + '_fill_pattern_${x}_$y -quiet', file=ofile)
print(' box values $xlo $ylo $xhi $yhi', file=ofile)
print(' paint comment', file=ofile)
print(' property FIXED_BBOX "$xlo $ylo $xhi $yhi"', file=ofile)
print(' property GDS_FILE ' + project_with_id + '_fill_pattern_${x}_${y}.gds', file=ofile)
print(' property GDS_START 0', file=ofile)
print(' }', file=ofile)
print('}', file=ofile)
# Now tile everything back together
print('load ' + project_with_id + '_fill_pattern -quiet', file=ofile)
print('for {set y 0} {$y < $ytiles} {incr y} {', file=ofile)
print(' for {set x 0} {$x < $xtiles} {incr x} {', file=ofile)
print(' box values 0 0 0 0', file=ofile)
print(' getcell ' + project_with_id + '_fill_pattern_${x}_$y child 0 0', file=ofile)
print(' }', file=ofile)
print('}', file=ofile)
# And write final GDS
print('puts stdout "Writing final GDS"', file=ofile)
print('cif *hier write disable', file=ofile)
print('cif *array write disable', file=ofile)
if hasgdsdir:
print('gds write ../gds/' + project_with_id + '_fill_pattern.gds', file=ofile)
else:
print('gds write ' + project_with_id + '_fill_pattern.gds', file=ofile)
print('set endtime [orig_clock format [orig_clock seconds] -format "%D %T"]', file=ofile)
print('puts stdout "Ended: $endtime"', file=ofile)
print('quit -noprompt', file=ofile)
ofile.close()
myenv = os.environ.copy()
myenv['MAGTYPE'] = 'mag'
if not testmode:
# Diagnostic
# print('This script will generate file ' + project_with_id + '_fill_pattern.gds')
print('This script will generate files ' + project_with_id + '_fill_pattern_x_y.gds')
print('Now generating fill patterns. This may take. . . quite. . . a while.', flush=True)
mproc = subprocess.run(['magic', '-dnull', '-noconsole',
'-rcfile', rcfile, magpath + '/generate_fill.tcl'],
stdin = subprocess.DEVNULL,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE,
cwd = magpath,
env = myenv,
universal_newlines = True)
if mproc.stdout:
for line in mproc.stdout.splitlines():
print(line)
if mproc.stderr:
print('Error message output from magic:')
for line in mproc.stderr.splitlines():
print(line)
if mproc.returncode != 0:
print('ERROR: Magic exited with status ' + str(mproc.returncode))
if distmode:
# If using distributed mode, then run magic on each of the generated
# layout files
pool = multiprocessing.Pool()
magfiles = glob.glob(magpath + '/' + project_with_id + '_fill_pattern_*.mag')
# NOTE: Adding 'x' to the end of each filename, or else magic will
# try to read it from the command line as well as passing it as an
# argument to the script. We only want it passed as an argument.
magxfiles = list(item + 'x' for item in magfiles)
pool.map(makegds, magxfiles)
# If using distributed mode, then remove all of the temporary .mag files
# and then run the final generation script.
for file in magfiles:
os.remove(file)
mproc = subprocess.run(['magic', '-dnull', '-noconsole',
'-rcfile', rcfile, magpath + '/generate_fill_final.tcl'],
stdin = subprocess.DEVNULL,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE,
cwd = magpath,
env = myenv,
universal_newlines = True)
if mproc.stdout:
for line in mproc.stdout.splitlines():
print(line)
if mproc.stderr:
print('Error message output from magic:')
for line in mproc.stderr.splitlines():
print(line)
if mproc.returncode != 0:
print('ERROR: Magic exited with status ' + str(mproc.returncode))
if not keepmode:
# Remove fill generation script
os.remove(magpath + '/generate_fill.tcl')
# Remove all individual fill tiles, leaving only the composite GDS.
filelist = os.listdir(magpath)
for file in filelist:
if os.path.splitext(magpath + '/' + file)[1] == '.gds':
if file.startswith(project_with_id + '_fill_pattern_'):
os.remove(magpath + '/' + file)
if distmode:
os.remove(magpath + '/generate_fill_dist.tcl')
os.remove(magpath + '/generate_fill_final.tcl')
os.remove(magpath + '/fill_gen_info.txt')
if testmode:
magfiles = glob.glob(magpath + '/' + project_with_id + '_fill_pattern_*.mag')
for file in magfiles:
os.remove(file)
print('Done!')
exit(0)
|
bermeom/quadruped-robot
|
tensorblock/layers/layer_conv1d.py
|
<reponame>bermeom/quadruped-robot
import numpy as np
import tensorflow as tf
import tensorblock as tb
class layer_conv1d:
####### Data
def name(): return 'Conv1D'
def shapeMult(): return 1
def dims(): return 1
def allowPooling(): return True
####### Function
def function( x , W , b , recipe , pars ):
strides = pars['strides']
layer = tf.nn.conv1d( x , W , name = 'Conv1D' ,
stride = strides[0] ,
padding = pars['padding'] )
layer = tb.extras.bias( layer , b )
return [ layer ] , pars , None
####### Shapes
def shapes( input_shape , pars ):
in_channels = pars['in_channels']
out_channels = pars['out_channels']
ksize = pars['ksize']
weight_shape = [ ksize[0] , in_channels , out_channels ]
bias_shape = [ out_channels ]
return weight_shape , bias_shape
|
bermeom/quadruped-robot
|
learning/execute.py
|
<filename>learning/execute.py<gh_stars>1-10
import argparse
import importlib
import numpy as np
# Parse args
parser = argparse.ArgumentParser( description = 'Input Arguments' )
parser.add_argument( 'type' , nargs = 1 ) # reinforcement or imitation
parser.add_argument( 'inputs' , nargs = 2 ) # player and source
parser.add_argument( '--load' , dest = 'load' , default = None )
parser.add_argument( '--save' , dest = 'save' , default = [ None ] , nargs = '*' )
parser.add_argument( '--epis' , dest = 'epis' , default = 1e9 )
parser.add_argument( '--run' , dest = 'run' , action = 'store_true' )
parser.add_argument( '--record', dest = 'record', action = 'store_true' )
args = parser.parse_args()
# Import modules
type_string = args.type[0]
source_string , player_string = args.inputs
source_module = importlib.import_module( 'sources.' + source_string )
player_module = importlib.import_module( 'players_' + type_string + '.' + player_string )
# Get instances
source = getattr( source_module , source_string )()
player = getattr( player_module , player_string )()
player.parse_args( args )
# Start source and player
obsv = source.start()
state = player.start( source , obsv )
# Define some variables
states_buffer, actions_buffer = [],[]
done = False
episode, step, n_episodes = 0, 0, int( args.epis )
# Learning loop
while episode < n_episodes:
# Run a step and get info
actn = player.act( state ) # Choose Next Action
obsv , rewd , env_done = source.move( actn ) # Run Next Action On Source
step +=1
# For metric/plotting only, episode defined as 1000 steps
if step % 1000 == 0: done = True
# Save trajectories if recording
if args.record:
states_buffer.append( state )
actions_buffer.append( actn )
if done:
np.save("datasets/states", np.array(states_buffer), allow_pickle=True, fix_imports=True)
np.save("datasets/actions", np.array(actions_buffer), allow_pickle=True, fix_imports=True)
# Train the algorithm
state = player.learn( state , obsv , actn , rewd , env_done, episode ) # Learn From This Action
# Verbose
source.verbose( episode , rewd , done ) # Source Text Output
player.verbose( episode , rewd , done ) # Player Text Output
# If environment episode is over
if env_done:
obsv = source.start()
state = player.restart( obsv )
# If metric episode is over
if done:
episode += 1
done = False
|
bermeom/quadruped-robot
|
tensorblock/recipe/recipe_init.py
|
import tensorflow as tf
import tensorblock as tb
class recipe_init:
####### Initialize Variables
def initVariables( self ):
self.labels = {}
self.cnt = 0
self.curr_input = None
self.blocks , self.order = [] , []
self.layers , self.extras = [] , []
self.inputs , self.variables = [] , []
self.weights , self.biases , self.dropouts = [] , [] , []
self.operations = []
self.summaries , self.writers = [] , []
self.savers , self.plotters = [] , []
####### Initialize Defaults
def initDefaults( self ):
self.defs_block = {
'src' : None , 'dst' : None , 'type' : None ,
'mod_inputs' : True , 'mod_variables' : True , 'mod_layers' : True ,
'no_ops' : False ,
}
self.defs_input = {
'name' : None , 'shape' : None , 'tensor' : None ,
'out_sides' : None , 'out_channels' : None ,
'copy' : None , 'share' : None , 'first_none' : True ,
'dtype' : tf.float32 ,
}
self.defs_variable = {
'name' : None , 'shape' : None , 'tensor' : None ,
'out_sides' : None , 'out_channels' : None ,
'first_none' : False ,
'type' : tb.vars.truncated_normal ,
'copy' : None , 'share' : None ,
'mean' : 0.0 , 'stddev' : 0.1 ,
'value' : 0.0 , 'min' : 0.0 , 'max' : 1.0 ,
'trainable' : True , 'seed' : None ,
}
self.defs_operation = {
'name' : None , 'function' : None ,
'input' : None , 'extra' : None , 'src' : None , 'dst' : None ,
'learning_rate' : 1e-4 ,
}
self.defs_train = {
'train_data' : None , 'train_labels' : None , 'train_seqlen' : None , 'train_length' : None ,
'test_data' : None , 'test_labels' : None , 'test_seqlen' : None , 'test_length' : None ,
'size_batch' : 100 , 'num_epochs' : 10 ,
'optimizer' : None ,
'summary' : None , 'writer' : None ,
'saver' : None , 'save_freq' : 10 ,
'eval_function' : None , 'eval_freq' : 1 ,
'plot_function' : None , 'plot_freq' : 1 ,
}
self.defs_plotter = {
'name' : None , 'function' : None ,
'dir' : 'figures' , 'shape' : [ 2 , 5 ] ,
}
self.defs_layer = {
'input' : None , 'type' : None , 'name' : None ,
'copy' : None , 'share' : None , 'label' : None ,
'weight_type' : tb.vars.truncated_normal ,
'weight_name' : None , 'weight_copy' : None , 'weight_share' : None ,
'weight_mean' : 0.0 , 'weight_stddev' : 0.1 ,
'weight_value' : 0.0 , 'weight_min' : 0.0 , 'weight_max' : 1.0 ,
'weight_trainable' : True , 'weight_seed' : None ,
'bias_type' : tb.vars.truncated_normal ,
'bias_name' : None , 'bias_copy' : None , 'bias_share' : None ,
'bias_mean' : 0.0 , 'bias_stddev' : 0.1 ,
'bias_value' : 0.0 , 'bias_min' : 0.0 , 'bias_max' : 1.0 ,
'bias_trainable' : True , 'bias_seed' : None ,
'dropout_name' : None , 'dropout' : 0.0 ,
'dropout_copy' : None , 'dropout_share' : None ,
'in_sides' : None , 'out_sides' : None ,
'in_channels' : None , 'out_channels' : None ,
'pooling' : 1 , 'pooling_ksize' : None ,
'pooling_strides' : None , 'pooling_padding' : None ,
'cell_type' : 'LSTM' , 'num_cells' : None ,
'in_dropout' : 0.0 , 'in_dropout_name' : None ,
'out_dropout' : 0.0 , 'out_dropout_name' : None ,
'seqlen' : None ,
'strides' : 1 , 'ksize' : 3 , 'padding' : 'SAME' ,
'activation' : tb.activs.relu , 'activation_pars' : None
}
####### Set Input Defaults
def setInputDefaults( self , **args ):
self.defs_input = { **self.defs_input , **args }
####### Set Layer Defaults
def setLayerDefaults( self , **args ):
self.defs_layer = { **self.defs_layer , **args }
####### Set Operation Defaults
def setOperationDefaults( self , **args ):
self.defs_operation = { **self.defs_operation , **args }
####### Set Variable Defaults
def setVariableDefaults( self , **args ):
self.defs_variable = { **self.defs_variable , **args }
####### Initialize
def initialize( self , vars = None ):
if vars is None : vars = self.folder
collection = self.collection( vars )
self.sess.run( tf.variables_initializer( collection ) )
|
bermeom/quadruped-robot
|
learning/sources/pygames/pygame_catch.py
|
<reponame>bermeom/quadruped-robot<filename>learning/sources/pygames/pygame_catch.py
import pygame
import pygame.surfarray as surfarray
import random
class pygame_catch:
### START SIMULATION
def reset( self ):
pygame.init()
self.black = [ 0 , 0 , 0 ]
self.white = [ 255 , 255 , 255 ]
self.screen_size = [ 320 , 240 ]
self.bar_size = [ 6 , 30 ]
self.bar_pos = [ 10 , self.screen_size[1] / 2 - self.bar_size[1] / 2 ]
self.bar_spd , self.bullet_spd = 7.0 , [ 8.0 , 0.0 ]
self.max_bullets , self.prob_bullets = 1 , 0.1
self.wait_frames , self.count_frames = 25 , 0
self.bullet_diam = 8
self.bullet_rad = int( self.bullet_diam / 2 )
self.bullets = []
self.screen = pygame.display.set_mode( self.screen_size , 0 , 32 )
background_surf = pygame.Surface( self.screen_size )
self.background = background_surf.convert()
self.background.fill( self.black )
self.bar = pygame.Surface( self.bar_size ).convert()
self.bar.fill( self.white )
bullet_surf = pygame.Surface( [ self.bullet_diam , self.bullet_diam ] )
pygame.draw.circle( bullet_surf , self.white , [ self.bullet_rad , self.bullet_rad ] , self.bullet_rad )
self.bullet = bullet_surf.convert()
self.bullet.set_colorkey( self.black )
self.bar_max_top = 0
self.bar_max_bot = self.screen_size[1] - self.bar_size[1]
self.catch , self.miss = 0 , 0
return self.draw()
### INFO ON SCREEN
def info( self ):
print( ' Score : %3d x %3d |' % \
( self.catch , self.miss ) , end = '' )
### DRAW SCREEN
def draw( self ):
self.screen.blit( self.background, ( 0 , 0 ) )
self.screen.blit( self.bar , self.bar_pos )
for bullet in self.bullets:
self.screen.blit( self.bullet , bullet[0] )
pygame.display.update()
return pygame.surfarray.array3d( pygame.display.get_surface() )
### MOVE ONE STEP
def step( self , action ):
# Initialize Reward
rewd = 0.0
# Execute Action
if action == 0 : self.bar_pos[1] -= self.bar_spd
if action == 2 : self.bar_pos[1] += self.bar_spd
# Restrict Up and Down Motion
if self.bar_pos[1] < self.bar_max_top: self.bar_pos[1] = self.bar_max_top
if self.bar_pos[1] > self.bar_max_bot: self.bar_pos[1] = self.bar_max_bot
# Move Bullets and Check for Collision
remove = None
for i , bullet in enumerate( self.bullets ):
bullet[0][0] -= bullet[1]
if bullet[0][0] < self.bar_pos[0] + self.bar_size[0]:
if bullet[0][1] + self.bullet_rad > self.bar_pos[1] and \
bullet[0][1] + self.bullet_rad < self.bar_pos[1] + self.bar_size[1]:
self.catch += 1 ; rewd += + 1.0 # Positive Reward
else:
self.miss += 1 ; rewd += - 1.0 # Negative Reward
remove = i
# Remove Bullet
if remove is not None:
self.bullets.pop( remove )
# Add Bullets
if self.count_frames == 0:
if len( self.bullets ) < self.max_bullets:
if random.random() < self.prob_bullets or len( self.bullets ) == 0:
self.count_frames = self.wait_frames
self.bullets.append( [
[ self.screen_size[0] - 10 ,
self.bar_size[1] / 2 + ( self.screen_size[1] - self.bar_size[1] ) * random.random() ] ,
self.bullet_spd[0] + self.bullet_spd[1] * random.random() ] )
else:
self.count_frames -= 1
# Determine Done
done = self.catch == 25 or self.miss == 25
# Return Data
return self.draw() , rewd , done
############################################################################################
|
bermeom/quadruped-robot
|
tensorblock/recipe/recipe_summary.py
|
import tensorflow as tf
import tensorblock as tb
class recipe_summary:
####### Add Summary Base
def addSummaryBase( self , inputs , name , function ):
inputs = tb.aux.parse_pairs( inputs )
name = self.add_label(
self.summaries , 'Summary' , name , add_order = False )
list = []
for input in inputs:
if input[1] is None: input[1] = self.folder + input[0]
tensor , tag = self.tensor( input[0] ) , input[1]
list.append( function( tag , tensor ) )
self.summaries.append( [ list , name ] )
####### Add Summary Scalar
def addSummaryScalar( self , input , name = None ):
return self.addSummaryBase( input , name , tf.summary.scalar )
####### Add Summary Histogram
def addSummaryHistogram( self , input , name = None ):
return self.addSummaryBase( input , name , tf.summary.histogram )
####### Add Summary
def addSummary( self , input = None , name = None ):
name = self.add_label(
self.summaries , 'Summary' , name , add_order = False )
if input is None: input = tf.summary.merge_all()
else: input = tf.merge_summary( self.tensor_list( input ) )
self.summaries.append( [ input , name ] )
####### Add Writer
def addWriter( self , name = None , dir = 'logs' ):
name = self.add_label(
self.writers , 'Writer' , name , add_order = False )
self.writers.append(
[ tf.summary.FileWriter( dir , graph = self.sess.graph ) , name ] )
####### Write
def write( self , name = None , summary = None , iter = None ):
if name is None: name = self.writers[-1][1]
self.tensor( name ).add_summary( summary , iter )
|
bermeom/quadruped-robot
|
tensorblock/recipe/recipe_save.py
|
import os
import tensorflow as tf
class recipe_save:
####### Add Saver
def addSaver( self , name = None , input = None , dir = 'models' , pref = 'model' ):
name = self.add_label(
self.savers , 'Saver' , name , add_order = False )
if input is None :
input = self.folder
collection = self.collection( input )
self.savers.append( [ tf.train.Saver( collection ) , [ name , dir , pref ] ] )
####### Save
def save( self , name = None , dir = None , pref = None , iter = None ):
if name is None: name = self.savers[-1][1][0]
if dir is None: dir = self.pars( name )[1]
if pref is None: pref = self.pars( name )[2]
if dir[-1] is not '/': dir += '/'
if not os.path.exists( dir ):
os.makedirs( dir )
self.tensor( name ).save( self.sess ,
global_step = iter , save_path = dir + pref )
####### Restore
def restore( self , name = None , dir = None , pref = None ):
if name is None: name = self.savers[-1][1][0]
if dir is None: dir = self.pars( name )[1]
if pref is None: pref = self.pars( name )[2]
if dir[-1] is not '/': dir += '/'
if os.path.exists( dir ):
self.tensor( name ).restore( self.sess , save_path = dir + pref )
|
bermeom/quadruped-robot
|
learning/players_reinforcement/player_dql_bayesian_1A.py
|
from players_reinforcement.player_dql_bayesian_1 import *
# PLAYER DQL BAYESIAN
class player_dql_bayesian_1A( player_dql_bayesian_1 ):
NUM_FRAMES = 3
BATCH_SIZE = 50
LEARNING_RATE = 1e-4
REWARD_DISCOUNT = 0.99
START_RANDOM_PROB = 1.0
FINAL_RANDOM_PROB = 0.05
NUM_EXPLORATION_EPISODES = 200
EXPERIENCES_LEN = 100000
STEPS_BEFORE_TRAIN = 150
### __INIT__
def __init__( self ):
player_dql_bayesian_1.__init__( self )
# PROCESS OBSERVATION
def process(self, obsv):
return np.stack( tuple( self.obsv_list[i] for i in range(self.NUM_FRAMES) ), axis=1 )
# PREPARE NETWORK
def network(self):
# Input Placeholder
self.brain.addInput( shape = [ None, self.obsv_shape[0], self.NUM_FRAMES ],
name = 'Observation' )
# Fully Connected Layers
self.brain.setLayerDefaults( type = tb.layers.fully,
activation = tb.activs.relu )
self.brain.addLayer( out_channels = 512,
dropout = True,
dropout_name = 'Drop',
name = 'Hidden' )
self.brain.addLayer( input = 'Hidden',
out_channels = self.num_actions,
activation = None,
name = 'Output')
|
bermeom/quadruped-robot
|
learning/players_reinforcement/player_reinforce_2A.py
|
from players_reinforcement.player_reinforce_2 import *
# PLAYER REINFORCE
class player_reinforce_2A( player_reinforce_2 ):
NUM_FRAMES = 3
LEARNING_RATE = 1e-4
REWARD_DISCOUNT = 0.99
### __INIT__
def __init__( self ):
player_reinforce_2.__init__( self )
# PROCESS OBSERVATION
def process(self, obsv):
return np.stack( tuple( self.obsv_list[i] for i in range( self.NUM_FRAMES ) ), axis = 2 )
# PREPARE NETWORK
def network( self ):
# Input Placeholder
self.brain.addInput( shape = [ None, self.obsv_shape[0], self.obsv_shape[1], self.NUM_FRAMES ],
name = 'Observation' )
# Convolutional Layers
self.brain.setLayerDefaults( type = tb.layers.conv2d,
activation = tb.activs.relu,
pooling = 2,
weight_stddev = 0.01,
bias_stddev = 0.01 )
self.brain.addLayer( out_channels = 32, ksize = 8, strides = 4, input = 'Observation' )
self.brain.addLayer( out_channels = 64, ksize = 4, strides = 2 )
self.brain.addLayer( out_channels = 64, ksize = 3, strides = 1 )
# Fully Connected Layers
self.brain.setLayerDefaults( type = tb.layers.fully ,
activation = tb.activs.relu ,
weight_stddev = 0.01,
bias_stddev = 0.01 )
self.brain.addLayer( out_channels = 256 )
self.brain.addLayer( out_channels = self.num_actions,
activation = tb.activs.softmax,
name = 'Output' )
|
bermeom/quadruped-robot
|
tensorblock/recipe/recipe_input.py
|
<filename>tensorblock/recipe/recipe_input.py<gh_stars>1-10
import tensorflow as tf
import tensorblock as tb
import numpy as np
class recipe_input:
####### Add Input
def addInput( self , **args ):
pars = { **self.defs_input , **args }
pars['name'] = self.add_label(
self.inputs , 'Input' , pars['name'] , add_order = True )
pars = self.parse_input_pars( pars )
if pars['share'] is not None:
self.inputs.append( [ self.node( pars['share'] ) , pars ] )
else:
if pars['tensor'] is None:
with tf.variable_scope( self.folder + pars['name'] , reuse = False ):
self.inputs.append( [ tb.vars.placeholder( shape = pars['shape'] ,
dtype = pars['dtype'] ) , pars ] )
else: self.inputs.append( [ pars['tensor'] , pars ] )
self.curr_input = pars['name']
return self.inputs[-1][0]
####### Add Variable
def addVariable( self , **args ):
pars = { **self.defs_variable , **args }
pars['name'] = self.add_label(
self.variables , 'Variable' , pars['name'] , add_order = True )
pars = self.parse_input_pars( pars )
if pars['share'] is not None:
self.variables.append( [ self.node( pars['share'] ) , pars ] )
else:
if pars['tensor'] is None:
with tf.variable_scope( self.folder + pars['name'] , reuse = False ):
self.variables.append( [ pars['type']( pars['shape'] , pars ) , pars ] )
else:
if callable( pars['tensor'] ):
with tf.variable_scope( self.folder + pars['name'] , reuse = False ):
self.variables.append( [ pars['tensor']( pars['shape'] , pars ) , pars ] )
else:
if isinstance( pars['tensor'] , np.ndarray ):
self.variables.append( [ tb.vars.numpy( pars['tensor'] , pars ) , pars ] )
else:
self.variables.append( [ pars['tensor'] , pars ] )
return self.variables[-1][0]
####### Parse Pars
def parse_input_pars( self , pars ):
if pars['tensor'] is not None:
pars['first_none'] = False
if isinstance( pars['tensor'] , np.ndarray ):
pars['shape'] = pars['tensor'].shape
else:
pars['shape'] = tb.aux.tf_shape( pars['tensor'] )
if pars['copy'] is not None: # Copying
pars['type'] = tb.vars.copy
pars['shape'] = self.node( pars['copy'] )
copy_pars = self.pars( pars['copy'] )
pars['out_sides'] = copy_pars['out_sides']
pars['out_channels'] = copy_pars['out_channels']
else: # Nothing
pars['shape'] = list( pars['shape'] )
if pars['first_none'] and len( pars['shape'] ) > 1: pars['shape'][0] = None
shape = pars['shape']
if pars['out_sides'] is None:
if len( shape ) == 2: pars['out_sides'] = shape[1:2] ;
if len( shape ) == 4: pars['out_sides'] = shape[1:3] ;
if len( shape ) == 5: pars['out_sides'] = shape[1:4] ;
if pars['out_channels'] is None:
if len( shape ) == 2: pars['out_channels'] = 1
else: pars['out_channels'] = shape[-1]
return pars
|
bermeom/quadruped-robot
|
learning/players_reinforcement/player_DDPG_1A.py
|
<reponame>bermeom/quadruped-robot
from players_reinforcement.player_DDPG_1 import *
import tensorflow as tf
import numpy as np
# PLAYER DDPG
class player_DDPG_1A( player_DDPG_1 ):
EXPERIENCES_LEN = 1e6
BATCH_SIZE = 128
STEPS_BEFORE_TRAIN = 5000
NUM_FRAMES = 1
C_LEARNING_RATE = 1e-3
A_LEARNING_RATE = 1e-4
TAU = 0.01
REWARD_DISCOUNT = 0.99
### __INIT__
def __init__( self ):
player_DDPG_1.__init__( self )
# PROCESS OBSERVATION
def process(self, obsv):
return np.stack( tuple(self.obsv_list[i] for i in range(self.NUM_FRAMES)), axis = 1 )
# PREPARE NETWORK
def network( self ):
# Critic
NormalCritic = self.brain.addBlock( 'NormalCritic' )
NormalCritic.addInput( shape = [ None, self.obsv_shape[0], self.NUM_FRAMES ], name = 'Observation' )
NormalCritic.addInput( shape = [ None, self.num_actions ], name = 'Actions' )
NormalCritic.setLayerDefaults( type = tb.layers.hlfully,
activation = tb.activs.relu,
bias = False)
NormalCritic.addLayer( input = 'Observation',out_channels = 128, name = 'Hidden1' )
H1 = NormalCritic.tensor( 'Hidden1' )
H2 = NormalCritic.tensor( 'Actions' )
H3 = tf.concat( [H1,H2], 1 )
NormalCritic.addInput( tensor = H3, name = 'Hidden3' )
NormalCritic.addLayer( input = 'Hidden3', out_channels = 200, name = 'Hidden4' )
NormalCritic.addLayer( input = 'Hidden4', out_channels = 1, activation = None , name = 'Value' )
# Target Critic
TargetCritic = self.brain.addBlock( 'TargetCritic' )
TargetCritic.addInput( shape = [ None, self.obsv_shape[0], self.NUM_FRAMES ], name = 'Observation')
TargetCritic.addInput( shape = [ None, self.num_actions ], name = 'Actions' )
TargetCritic.setLayerDefaults( type = tb.layers.hlfully,
activation = tb.activs.relu,
bias = False )
TargetCritic.addLayer( input = 'Observation', out_channels = 128, name = 'Hidden1', copy_weights = '../NormalCritic/W_Hidden1' )
H1 = TargetCritic.tensor ('Hidden1' )
H2 = TargetCritic.tensor( 'Actions' )
H3 = tf.concat( [H1,H2],1 )
TargetCritic.addInput( tensor = H3, name = 'Hidden3')
TargetCritic.addLayer( input = 'Hidden3', out_channels = 200, name = 'Hidden4', copy_weights = '../NormalCritic/W_Hidden4' )
TargetCritic.addLayer( input = 'Hidden4', out_channels = 1, activation = None , name = 'Value', copy_weights = '../NormalCritic/W_Value' )
# Actor
NormalActor = self.brain.addBlock( 'NormalActor' )
NormalActor.addInput( shape=[ None, self.obsv_shape[0], self.NUM_FRAMES ], name='Observation' )
NormalActor.setLayerDefaults( type = tb.layers.hlfully,
activation = tb.activs.relu,
bias = False )
NormalActor.addLayer( input = 'Observation', out_channels = 128, name = 'Hidden1' )
NormalActor.addLayer( input = 'Hidden1', out_channels = 200, name = 'Hidden2' )
NormalActor.addLayer( input = 'Hidden2', out_channels = self.num_actions,
activation = tb.activs.tanh, name = 'Out', min = -0.003, max = 0.003)
out = NormalActor.tensor('Out')
out = tf.multiply( out, self.range_actions )
NormalActor.addInput( tensor = out, name = 'Output')
# Target Actor
TargetActor = self.brain.addBlock( 'TargetActor' )
TargetActor.addInput( shape=[ None, self.obsv_shape[0], self.NUM_FRAMES ], name='Observation' )
TargetActor.setLayerDefaults( type = tb.layers.hlfully,
activation = tb.activs.relu,
bias = False )
TargetActor.addLayer( input = 'Observation', out_channels = 128, name = 'Hidden1', copy_weights = '../NormalActor/W_Hidden1')
TargetActor.addLayer( input = 'Hidden1', out_channels = 200, name = 'Hidden2', copy_weights = '../NormalActor/W_Hidden2')
TargetActor.addLayer( input = 'Hidden2', out_channels = self.num_actions, name = 'Out',
activation = tb.activs.tanh, min = -0.003, max = 0.003, copy_weights = '../NormalActor/W_Out')
out = TargetActor.tensor('Out')
out = tf.multiply( out, self.range_actions )
TargetActor.addInput( tensor = out, name = 'Output')
|
bermeom/quadruped-robot
|
tensorblock/layers/layer_deconv2d.py
|
import numpy as np
import tensorflow as tf
import tensorblock as tb
class layer_deconv2d:
####### Data
def name(): return 'Deconv2D'
def shapeMult(): return 1
def dims(): return 2
def allowPooling(): return False
####### Function
def function( x , W , b , recipe , pars ):
if tb.aux.tf_length( x ) == 2:
x = tb.aux.tf_fold2D( x , tb.aux.tf_shape( W )[-1] )
pars['in_sides'] = tb.aux.tf_shape( x )[1:3]
out_channels = tb.aux.tf_shape( W )[-2]
size_batch = tf.shape( x , name = 'batch' )[0]
in_sides , out_sides , strides = pars['in_sides'] , pars['out_sides'] , pars['strides']
if np.prod( out_sides ) == 1:
for i in range( len( out_sides ) ):
out_sides[i] = in_sides[i] * strides[i]
strides = [ int( np.ceil( out_sides[0] / in_sides[0] ) ) ,
int( np.ceil( out_sides[1] / in_sides[1] ) ) ]
out_shape = tf.stack( [ size_batch , out_sides[0] ,
out_sides[1] , out_channels ] , name = 'shape' )
layer = tf.nn.conv2d_transpose( x , W , name = 'Deconv2D' ,
output_shape = out_shape ,
strides = [ 1 , strides[0] , strides[1] , 1 ] ,
padding = pars['padding'] )
dummy = tb.vars.dummy( [ out_sides[0] , out_sides[1] , out_channels ] , name = 'dummy' )
layer = tf.add( layer , dummy , name = 'DummyAdd' )
layer = tb.extras.bias( layer , b )
return [ layer ] , pars , None
####### Shapes
def shapes( input_shape , pars ):
in_channels = pars['in_channels']
out_channels = pars['out_channels']
ksize = pars['ksize'];
weight_shape = [ ksize[0] , ksize[1] , out_channels , in_channels ]
bias_shape = [ out_channels ]
return weight_shape , bias_shape
|
bermeom/quadruped-robot
|
tensorblock/aux/aux_reshape.py
|
import math
import numpy as np
### Flat Dimension
def flat_dim( shape ):
return np.prod( shape[1:] )
### Flatten
def flatten( x ):
return x.reshape( [ -1 , flat_dim( x ) ] )
### 2D Side Dimension
def side2D( x , d = 1 ):
if isinstance( x , list ) : x = x[1]
return int( round( math.pow( x / d , 1.0 / 2.0 ) ) )
### 3D Side Dimension
def side3D( x , d = 1 ):
if isinstance( x , list ) : x = x[1]
return int( round( math.pow( x / d , 1.0 / 3.0 ) ) )
### Shape 1D
def shape1D( x ):
return [ None , x.shape[1] ]
### Shape 2D
def shape2D( x , channels = 1 ):
side = side2D( x.shape[1] , channels )
return [ None , side , side , channels ]
### Shape 3D
def shape3D( x , channels = 1 ):
side = side3D( x.shape[1] , channels )
return [ None , side , side , side , channels ]
### Spread
def spread( x , n ):
return x if isinstance( x , list ) else [ x ] * n
### Flatten List
def flatten_list( list1 ):
list2 = []
for item1 in list1:
if isinstance( item1 , list ):
for item2 in item1:
list2.append( item2 )
else:
list2.append( item1 )
return list2
|
bermeom/quadruped-robot
|
learning/players_imitation/player_GAIL_1A.py
|
from players_imitation.player_GAIL_1 import *
##### PLAYER GAIL
class player_GAIL_1A( player_GAIL_1 ):
A_LEARNING_RATE = 1e-6
C_LEARNING_RATE = 1e-3
D_LEARNING_RATE = 1e-4
NUM_FRAMES = 1
UPDATE_SIZE = 5
BATCH_SIZE = 128
EPSILON = 0.10
GAMMA = 0.995
LAM = 0.97
B_CLONING = True # True to start with good policy
BC_LEARNING_RATE = 1e-4
INIT_NO_EPOCHS = 50
BC_BATCH_SIZE = 50
DS_SIZE = 100000
DATASET = 'cartpole'
### __INIT__
def __init__( self ):
player_GAIL_1.__init__( self )
# PROCESS OBSERVATION
def process(self, obsv):
return np.stack( tuple( self.obsv_list[i] for i in range( self.NUM_FRAMES ) ), axis = 1 )
### PREPARE NETWORK
def network( self ):
# Critic
Critic = self.brain.addBlock( 'Critic' )
Critic.addInput( shape = [ None, self.obsv_shape[0], self.NUM_FRAMES ], name='Observation' )
Critic.setLayerDefaults( type = tb.layers.hlfully,
activation = tb.activs.tanh )
Critic.addLayer( out_channels = 256, input = 'Observation' )
#Critic.addLayer( out_channels = 200 )
Critic.addLayer( out_channels = 1, name = 'Value', activation = None )
# Actor
Actor = self.brain.addBlock( 'Actor' )
Actor.addInput( shape = [ None, self.obsv_shape[0], self.NUM_FRAMES ], name = 'Observation' )
Actor.setLayerDefaults( type = tb.layers.hlfully,
activation = tb.activs.tanh )
Actor.addLayer( out_channels = 256 , input = 'Observation', name = 'Hidden' )
#Actor.addLayer( out_channels = 200, name = 'Hidden' )
Actor.addLayer( out_channels = self.num_actions , input = 'Hidden', activation = None, name = 'Mu')
Actor.addLayer( out_channels = self.num_actions , input = 'Hidden', activation = tb.activs.softplus, name = 'Sigma', activation_pars = 0.001 )
Actor.addLayer( out_channels = self.num_actions , input = 'Hidden', activation = tb.activs.softmax, name = 'Discrete' )
mu = Actor.tensor( 'Mu' )
sigma = Actor.tensor( 'Sigma' )
dist = tb.extras.dist_normal( mu, sigma )
action = tb.aux.tf_squeeze( dist.sample( 1 ), 0 )
Actor.addInput( tensor = action, name = 'Output')
# OldActor
Old = self.brain.addBlock( 'Old' )
Old.addInput( shape = [ None, self.obsv_shape[0], self.NUM_FRAMES ], name = 'Observation' )
Old.setLayerDefaults( type = tb.layers.hlfully,
activation = tb.activs.tanh )
Old.addLayer( out_channels = 256 , input = 'Observation', name = 'Hidden' )
#Old.addLayer( out_channels = 200, name = 'Hidden' )
Old.addLayer( out_channels = self.num_actions , input = 'Hidden', activation = None, name = 'Mu')
Old.addLayer( out_channels = self.num_actions , input = 'Hidden', activation = tb.activs.softplus, name = 'Sigma', activation_pars = 0.001 )
Old.addLayer( out_channels = self.num_actions , input = 'Hidden', activation = tb.activs.softmax, name = 'Discrete' )
# Discriminator
Disc = self.brain.addBlock( 'Disc' )
Disc.addInput( shape = [ None, self.obsv_shape[0], self.NUM_FRAMES ], name = 'Observation' )
Disc.addInput( shape = [ None, self.num_actions ], name = 'Action' )
Disc.addLayer( input = 'Observation', type = tb.layers.flatten, name = 'ObservationFlat' )
traj = tb.aux.tf_concat( Disc.tensor( 'ObservationFlat' ), Disc.tensor( 'Action' ), 1 )
Disc.addInput( tensor = traj, name = 'Trajectory')
Disc.setLayerDefaults( type = tb.layers.hlfully,
activation = tb.activs.tanh,
weight_stddev = 0.01,
bias_stddev = 0.01 )
Disc.addLayer( out_channels = 256, input = 'Trajectory', name = 'Hidden' )
#Disc.addLayer( out_channels = 200, name = 'Hidden' )
Disc.addLayer( out_channels = 1, name = 'Output', activation = None)
logits = Disc.tensor( 'Output' )
reward = tb.extras.log_sig( logits )
Disc.addInput( tensor = reward, name = 'DiscRew')
|
bermeom/quadruped-robot
|
learning/sources/vrep/vrepper/utils.py
|
import functools
import warnings
import subprocess as sp
import os
import psutil
from vrepper.lib.vrepConst import simx_opmode_oneshot, simx_opmode_blocking, simx_return_ok
list_of_instances = []
import atexit
def cleanup(): # kill all spawned subprocesses on exit
for i in list_of_instances:
i.end()
atexit.register(cleanup)
blocking = simx_opmode_blocking
oneshot = simx_opmode_oneshot
# the class holding a subprocess instance.
class instance():
def __init__(self, args, suppress_output=True):
self.args = args
self.suppress_output = suppress_output
list_of_instances.append(self)
def start(self):
print('(instance) starting...')
try:
if self.suppress_output:
stdout = open(os.devnull, 'w')
else:
stdout = sp.STDOUT
self.inst = sp.Popen(self.args, stdout=stdout, stderr=sp.STDOUT)
except EnvironmentError:
print('(instance) Error: cannot find executable at', self.args[0])
raise
return self
def isAlive(self):
return True if self.inst.poll() is None else False
def end(self):
print('(instance) terminating...')
if self.isAlive():
pid = self.inst.pid
parent = psutil.Process(pid)
for _ in parent.children(recursive=True):
_.kill()
retcode = parent.kill()
else:
retcode = self.inst.returncode
print('(instance) retcode:', retcode)
return self
# check return tuple, raise error if retcode is not OK,
# return remaining data otherwise
def check_ret(ret_tuple, ignore_one=False):
istuple = isinstance(ret_tuple, tuple)
if not istuple:
ret = ret_tuple
else:
ret = ret_tuple[0]
if (not ignore_one and ret != simx_return_ok) or (ignore_one and ret > 1):
raise RuntimeError('retcode(' + str(ret) + ') not OK, API call failed. Check the paramters!')
return ret_tuple[1:] if istuple else None
def deprecated(msg=''):
def dep(func):
'''This is a decorator which can be used to mark functions
as deprecated. It will result in a warning being emitted
when the function is used.'''
@functools.wraps(func)
def new_func(*args, **kwargs):
warnings.warn_explicit(
"Call to deprecated function {}. {}".format(func.__name__, msg),
category=DeprecationWarning,
filename=func.func_code.co_filename,
lineno=func.func_code.co_firstlineno + 1
)
return func(*args, **kwargs)
return new_func
return deprecated
|
bermeom/quadruped-robot
|
envs/quadruped.py
|
<gh_stars>0
import numpy as np
from gym import utils
from os import path
from gym.envs.mujoco import mujoco_env
import math
import os
def mass_center(model, sim):
mass = np.expand_dims(model.body_mass, 1)
xpos = sim.data.xipos
return (np.sum(mass * xpos, 0) / np.sum(mass))[0]
class QuadrupedEnv(mujoco_env.MujocoEnv, utils.EzPickle):
vel_x_ref = 2;
vel_y_ref = 0;
ori_ref = 0;
err_vel_x = 0;
err_vel_y = 0;
err_vel_x_1 = 0;
err_ori = 0;
err_vel_y_1 = 0;
err_ori_1 = 0;
count_step = 0;
def __init__(self):
xmlpath = ((os.path.join((os.path.split(os.path.abspath(__file__))[0]), "xml/quadrupedrobot.xml")));
mujoco_env.MujocoEnv.__init__(self, xmlpath, 5)
utils.EzPickle.__init__(self)
self.vel_x_ref = 1;
self.vel_y_ref = 0;
self.err_vel_x = 0;
self.err_vel_y = 0;
self.count_step = 0;
def step(self, action):
if (len(action)==1):
action=action[0];
# for i in range(0,len(action)):
# action[i] = np.cos(self.count_step/self.dt+action[i])
mc_before = mass_center(self.model, self.sim)
qpos_before = np.array(self.sim.data.qpos);
qvel_before = np.array(self.sim.data.qvel);
xpos_before = np.array(self.sim.data.xipos);
self.do_simulation(action, self.frame_skip)
mc_after = mass_center(self.model, self.sim)
qpos_after = np.array(self.sim.data.qpos);
qvel_after = np.array(self.sim.data.qvel);
xpos_after = np.array(self.sim.data.xipos);
orientation = qpos_after.flat[3:7]; # w x y z
rot_axis_z = np.arctan2(2*(orientation[0]*orientation[3]+orientation[1]*orientation[2]),1-2*(orientation[2]*orientation[2]+orientation[3]*orientation[3]));
# print("qpos : ",qpos_before[0:4]," ",qpos_after[0:4])
# print("xpos : ",xpos_before," ",xpos_after)
# print("qvel : ",qvel_before[0:3]," ",qvel_after[0:3])
# print("mc : ",mc_after," ",mc_after)
data = self.sim.data
self.vel_x = (qpos_after[0] - qpos_before[0])/self.dt;
self.vel_y = (qpos_after[1] - qpos_before[1])/self.dt;
# print("veel",[self.vel_x,self.vel_y ]," vs ",qvel_after[0:2])
self.err_vel_x = (self.vel_x_ref-self.vel_x);
self.err_vel_y = (self.vel_y_ref-self.vel_y);
self.err_ori = (self.ori_ref-rot_axis_z);
alive_bonus = 0.0
bonus_for_doing_well = (self.err_vel_x_1-self.err_vel_x)+ (self.err_vel_y_1-self.err_vel_y)+ (self.err_ori_1-self.err_ori);
lin_vel_cost = self.err_vel_x*self.err_vel_x + 0.0*self.err_vel_y*self.err_vel_y;
quad_ctrl_cost = 0.01 * np.square(data.ctrl).sum();
quad_impact_cost = .1e-7 * np.square(data.cfrc_ext).sum();
quad_impact_cost = min(quad_impact_cost, 10.0);
quad_impact_cost = 0.0*(quad_impact_cost*quad_impact_cost);
orie_cost = math.fabs(orientation[1]) + math.fabs(orientation[2]) + math.fabs(self.err_ori);
# reward = alive_bonus+bonus_for_doing_well-((lin_vel_cost + quad_ctrl_cost + quad_impact_cost+orie_cost));
c = 0.2;# (1/np.sqrt(2*np.pi*c))*
vel_x_bonus = np.exp(-math.fabs(self.err_vel_x)/(2*c));
vel_y_bonus = np.exp(-math.fabs(self.err_vel_y)/(2*c));
ori_bonus = np.exp(-math.fabs(self.err_ori)/(2*c));
ori_cost = ((np.exp(-(math.fabs(orientation[1]))/(2*c))+np.exp(-(math.fabs(orientation[2]))/(2*c)))/2);
# reward = alive_bonus+vel_x_bonus+vel_y_bonus+ori_bonus+ori_cost-(quad_ctrl_cost + quad_impact_cost);
reward = (0.50*vel_x_bonus+0.2*vel_y_bonus+0.15*ori_bonus+0.15*ori_cost)-0.5;
done = bool((math.fabs(orientation[1])+math.fabs(orientation[2]))>0.5) # rotational angles |x|+|y|
# print("Done : ",done," ",reward," ",vel_x_bonus," ",self.vel_x," ",vel_y_bonus," ",ori_bonus," ",ori_cost," ",(quad_ctrl_cost + quad_impact_cost));
# reward = reward -100*done;
# print("Done : ",done," ",reward,lin_vel_cost,quad_ctrl_cost,quad_impact_cost,"vel [",self.err_vel_x,",",self.err_vel_y,"]");
# print("Orientation",RotationAxis_z)
# done = False;
ob = self._get_obs()
self.err_vel_x_1 = self.err_vel_x;
self.err_vel_y_1 = self.err_vel_y;
self.err_ori = self.err_ori;
self.count_step = ( not done)*(self.count_step+1)
# print("len",len(ob))
return ob, reward, done, dict(reward_linvel=lin_vel_cost, reward_quadctrl=-quad_ctrl_cost, reward_alive=alive_bonus, reward_impact=-quad_impact_cost)
def _get_obs(self):
# print("obs -> ",len(self.sim.data.qpos.flat))
return np.concatenate([
self.sim.data.qpos.flat,
self.sim.data.qvel.flat,
self.sim.data.cinert.flat,
self.sim.data.cvel.flat,
self.sim.data.qfrc_actuator.flat,
self.sim.data.cfrc_ext.flat,
[self.err_vel_x,self.err_vel_y,self.err_ori]
])
def reset_model(self):
c = 0.01
self.set_state(
self.init_qpos + self.np_random.uniform(low=-c, high=c, size=self.model.nq),
self.init_qvel + self.np_random.uniform(low=-c, high=c, size=self.model.nv,)
)
return self._get_obs()
def viewer_setup(self):
# self.viewer.cam.distance = self.model.stat.extent * 0.5
self.viewer.cam.trackbodyid = 1
self.viewer.cam.distance = self.model.stat.extent * 1.0
self.viewer.cam.lookat[2] = 2.0
self.viewer.cam.elevation = -20
|
bermeom/quadruped-robot
|
learning/sources/source_unity_3dball.py
|
<reponame>bermeom/quadruped-robot
from sources.source_unity import source_unity
import numpy as np
##### SOURCE UNITY 3D BALL
class source_unity_3dball( source_unity ):
### __INIT__
def __init__( self ):
source_unity.__init__( self , "3dball" )
self.range_act = 2
### INFORMATION
def range_actions( self ): return self.range_act
### MAP KEYS
def map_keys( self , actn ):
return np.clip( actn, -self.range_act, self.range_act)
### PROCESS OBSERVATION
def process( self , obsv ):
return obsv
|
bermeom/quadruped-robot
|
tensorblock/recipe/recipe.py
|
import tensorflow as tf
import tensorblock as tb
class recipe( tb.recipe.base , tb.recipe.block , tb.recipe.init ,
tb.recipe.input , tb.recipe.layer , tb.recipe.operation ,
tb.recipe.plot , tb.recipe.print , tb.recipe.save ,
tb.recipe.summary , tb.recipe.train ):
####### Initialize
def __init__( self , sess = None , prev = None , name = None ):
self.sess = tf.Session() if sess is None else sess
self.root = self if prev is None else prev.root
self.folder = '' if name is None else name + '/'
if prev is not None: self.folder = prev.folder + self.folder
self.initDefaults()
self.initVariables()
self.strname = name
def __str__( self ):
return self.strname
####### Add Block
def addBlock( self , name = None ):
name = self.add_label(
self.blocks , 'Block' , name , add_order = True )
self.blocks.append( [ tb.recipe( sess = self.sess , prev = self , name = name ) , name ] )
return self.tensor( name )
####### Eval
def eval( self , names , dict = None ):
tensors = self.tensor_list( names )
if not isinstance( names , list ) and len( tensors ) == 1 : tensors = tensors[0]
return self.sess.run( tensors , feed_dict = dict )
####### Run
def run( self , names , inputs , use_dropout = True ):
dict = {}
inputs = tb.aux.parse_pairs( inputs )
for data in inputs: dict[ self.node( data[0] ) ] = data[1]
for i in range( len( self.root.dropouts ) ):
if use_dropout: dict[ self.root.dropouts[i][0] ] = self.root.dropouts[i][1][1]
else: dict[ self.root.dropouts[i][0] ] = 1.0
return self.eval( names , dict )
####### Assign
def assign( self , names , values ):
if not isinstance( values , list ): values = [ values ]
tensors = self.tensor_list( names )
for i , tensor in enumerate( tensors ):
tensor.assign( values[i] ).eval( session = self.sess )
|
bermeom/quadruped-robot
|
learning/sources/source_vrep.py
|
<reponame>bermeom/quadruped-robot
from sources.vrep.vrepper.core import vrepper
from sources.source import source
import sys, os, time, signal
import numpy as np
##### SOURCE VREP
class source_vrep( source ):
### __INIT__
def __init__( self, scene ):
source.__init__( self )
self.env = vrepper( headless = not self.RENDER )
self.env.start()
self.env.load_scene(os.path.dirname(os.path.realpath(__file__)) + '/vrep/scenes/' + scene + '.ttt')
def signal_handler(signal, frame):
print('\nProgram closed!')
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
### START SIMULATION
def start( self ):
self.INITIAL_POSITION = np.array([0,0,0,0,0,0]) #np.random.uniform(-90, 90, 6)
self.env.start_simulation(is_sync=False)
time.sleep(.2)
for i, m in enumerate(self.objects):
if i == 0:
self.INITIAL_POSITION[i] *= -1
m.force_position(self.INITIAL_POSITION[i])
time.sleep(.2)
self.env.make_simulation_synchronous(True)
# Get first observation
obsv, rewd, done = self._get_obsv(self.DESIRED_POSITION)
return obsv
### MOVE ONE STEP
def move( self , actn ):
# Act
self.step( self.map_keys(actn) )
self.env.step_blocking_simulation()
# Get observation
obsv, rewd, done = self._get_obsv(self.DESIRED_POSITION)
return obsv, rewd, done
### CHILD METHODS
def get_obsv(self, desired_position):
raise NotImplementedError
def step(self, positions, speeds=None):
raise NotImplementedError
|
bermeom/quadruped-robot
|
tensorblock/layers/layer_rnn.py
|
import numpy as np
import tensorflow as tf
import tensorflow.contrib.rnn as rnn
import tensorblock as tb
class layer_rnn:
####### Data
def name(): return 'RNN'
def shapeMult(): return 1
def dims(): return 1
def allowPooling(): return False
####### Function
def function( x , W , b , recipe , pars ):
# Fold Data if Necessary
if tb.aux.tf_length( x ) == 2:
in_sides = pars['in_sides']
if len( in_sides ) == 1: x = tb.aux.tf_fold2D( x , 1 )
else: x = tb.aux.tf_fold( x , in_sides )
# Prepare Network
with tf.variable_scope( 'RNN' ):
# Create Cell
if pars['cell_type'] == 'LSTM': # LSTM
cell = rnn.BasicLSTMCell( pars['out_channels'] , forget_bias = 1.0 , state_is_tuple = True )
else: # GRU
cell = rnn.GRUCell( pars['out_channels'] )
# Input Dropout
if pars['in_dropout'] > 0.0 :
in_name = pars['in_dropout_name']
if in_name is None : in_name = 'indrop_' + pars['name']
idx = len( recipe.root.dropouts ) ; recipe.labels[ in_name ] = ( 'dropout' , idx )
recipe.root.dropouts.append( [ tb.vars.placeholder( name = 'drop_Input' ) , [ idx , pars['in_dropout'] ] ] )
cell = rnn.DropoutWrapper( cell , input_keep_prob = recipe.root.dropouts[-1][0] )
# Output Dropout
if pars['out_dropout'] > 0.0 :
out_name = pars['out_dropout_name']
if out_name is None : out_name = 'outdrop_' + pars['name']
idx = len( recipe.root.dropouts ) ; recipe.labels[ out_name ] = ( 'dropout' , idx )
recipe.root.dropouts.append( [ tb.vars.placeholder( name = 'drop_Output' ) , [ idx , pars['out_dropout'] ] ] )
cell = rnn.DropoutWrapper( cell , output_keep_prob = recipe.root.dropouts[-1][0] )
# Stack Cells
if pars['num_cells'] is not None:
def lstm_cell(): return rnn.BasicLSTMCell( pars['out_channels'] , forget_bias = 1.0 , state_is_tuple = True )
cell = rnn.MultiRNNCell( [ lstm_cell() for _ in range(pars['num_cells'])], state_is_tuple = True )
# Create RNN
outputs , states = tf.nn.dynamic_rnn( cell , x , dtype = tf.float32 , sequence_length = pars['seqlen'] )
# Check Sequence Length
if pars['seqlen'] is None: # Without Sequence Length
shape = tb.aux.tf_shape( outputs )
trans = list( range( len( shape ) ) )
trans[0] , trans[1] = trans[1] , trans[0]
lasts = tf.transpose( outputs , trans )[-1]
else: # With Sequence Length
with tf.variable_scope( 'Gather' ):
batch_shape , batch_size = tb.aux.tf_shape( x ) , tf.shape( outputs )[0]
index = tf.range( 0 , batch_size ) * batch_shape[1] + ( pars['seqlen'] - 1 )
lasts = tf.gather( tf.reshape( outputs , [ -1 , pars['out_channels'] ] ) , index )
# Store Weights and Biases
if pars['num_cells'] is None: # Single Cell
if pars['cell_type'] == 'LSTM':
path = pars['folder'] + pars['name'] + '/RNN/rnn/basic_lstm_cell/'
WW = tf.get_collection( tf.GraphKeys.GLOBAL_VARIABLES , path + 'kernel:0' )[0]
bb = tf.get_collection( tf.GraphKeys.GLOBAL_VARIABLES , path + 'bias:0' )[0]
elif pars['cell_type'] == 'GRU':
path = pars['folder'] + pars['name'] + '/RNN/rnn/gru_cell/'
WW , bb = [] , []
WW.append( tf.get_collection( tf.GraphKeys.GLOBAL_VARIABLES , path + 'gates/kernel:0' )[0] )
bb.append( tf.get_collection( tf.GraphKeys.GLOBAL_VARIABLES , path + 'gates/bias:0' )[0] )
WW.append( tf.get_collection( tf.GraphKeys.GLOBAL_VARIABLES , path + 'candidate/kernel:0' )[0] )
bb.append( tf.get_collection( tf.GraphKeys.GLOBAL_VARIABLES , path + 'candidate/bias:0' )[0] )
else: # Stacked Cells
path = pars['folder'] + pars['name'] + '/RNN/rnn/multi_rnn_cell/cell_'
WW , bb = [] , []
if pars['cell_type'] == 'LSTM':
for i in range( pars['num_cells'] ):
pathi = path + str( i ) + '/basic_lstm_cell/'
path = pars['folder'] + pars['name'] + '/RNN/rnn/basic_lstm_cell/'
WW.append( tf.get_collection( tf.GraphKeys.GLOBAL_VARIABLES , pathi + 'kernel:0' ) )
bb.append( tf.get_collection( tf.GraphKeys.GLOBAL_VARIABLES , pathi + 'bias:0' ) )
elif pars['cell_type'] == 'GRU':
for i in range( pars['num_cells'] ):
pathi = path + str( i ) + '/gru_cell/'
path = pars['folder'] + pars['name'] + '/RNN/rnn/gru_cell/'
WW.append( tf.get_collection( tf.GraphKeys.GLOBAL_VARIABLES , pathi + 'gates/kernel:0' ) )
bb.append( tf.get_collection( tf.GraphKeys.GLOBAL_VARIABLES , pathi + 'gates/bias:0' ) )
WW.append( tf.get_collection( tf.GraphKeys.GLOBAL_VARIABLES , pathi + 'candidate/kernel:0' ) )
bb.append( tf.get_collection( tf.GraphKeys.GLOBAL_VARIABLES , pathi + 'candidate/bias:0' ) )
# Return Layer
return [ lasts , outputs , states ] , pars , [ WW , bb ]
####### Shapes
def shapes( input_shape , pars ):
return None , None
|
bermeom/quadruped-robot
|
tensorblock/aux/aux_tf_reshape.py
|
#import os
#os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import tensorflow as tf
from tensorblock.aux.aux_reshape import *
### Get Shape
def tf_shape( x ):
if x is None: return None
return x.get_shape().as_list()
### Get Length
def tf_length( x ):
return len( tf_shape( x ) )
### Flat Dimension
def tf_flat_dim( x ):
return flat_dim( tf_shape( x ) )
### Flatten
def tf_flatten( x ):
return tf.reshape( x , [ -1 , tf_flat_dim( x ) ] )
### 2D Side Dimension
def tf_side2D( x , d = 1 ):
return side2D( tf_shape( x ) , d )
### 3D Side Dimension
def tf_side3D( x , d = 1 ):
return side3D( tf_shape( x ) , d )
### Fold
def tf_fold( x , dims ):
return tf.reshape( x , [ -1 ] + dims )
### 2D Fold
def tf_fold2D( x , d = 1 ):
s = tf_side2D( x , d )
return tf.reshape( x , shape = [ -1 , s , s , d ] )
### 3D Fold
def tf_fold3D( x , d = 1 ):
s = tf_side3D( x , d )
return tf.reshape( x , shape = [ -1 , s , s , s , d ] )
### Squeeze
def tf_squeeze( x , d ):
return tf.squeeze( x, d )
## Concat
def tf_concat( x , y, d ):
return tf.concat( [x, y], d )
|
bermeom/quadruped-robot
|
envs/__init__.py
|
from envs.quadruped import QuadrupedEnv
|
bermeom/quadruped-robot
|
tensorblock/functions/func_extras.py
|
<gh_stars>1-10
import tensorflow as tf
import tensorblock as tb
### 2D Max Pooling
def maxpool2d( x , pars ):
if not pars['pooling_ksize' ]: pars['pooling_ksize'] = pars['pooling']
if not pars['pooling_strides']: pars['pooling_strides'] = pars['pooling']
if not pars['pooling_padding']: pars['pooling_padding'] = pars['padding' ]
ksize = tb.aux.spread( pars['pooling_ksize' ] , 2 )
strides = tb.aux.spread( pars['pooling_strides'] , 2 )
return tf.nn.max_pool( x , ksize = [ 1 , ksize[0] , ksize[1] , 1 ] ,
strides = [ 1 , strides[0] , strides[1] , 1 ] ,
padding = pars['pooling_padding'] )
### 2D Max Pooling
def maxpool3d( x , pars ):
if not pars['pooling_ksize' ]: pars['pooling_ksize'] = pars['pooling']
if not pars['pooling_strides']: pars['pooling_strides'] = pars['pooling']
if not pars['pooling_padding']: pars['pooling_padding'] = pars['padding']
ksize = tb.aux.spread( pars['pooling_ksize' ] , 3 )
strides = tb.aux.spread( pars['pooling_strides'] , 3 )
return tf.nn.max_pool3d( x , ksize = [ 1 , ksize[0] , ksize[1] , ksize[2] , 1 ] ,
strides = [ 1 , strides[0] , strides[1] , strides[2] , 1 ] ,
padding = pars['pooling_padding'] )
### Bias
def bias( x , b ):
if b != None:
x = tf.nn.bias_add( x , b )
return x
### Dropout
def dropout( x , dropout ):
return tf.nn.dropout( x , dropout )
### Normal Distribution
def dist_normal( mu, sigma ):
return tf.distributions.Normal( mu, sigma )
### Log Sigmoid
def log_sig( x ):
return - tf.log( 1 - tf.nn.sigmoid( x ) + 1e-8 )
|
bermeom/quadruped-robot
|
learning/sources/vrep/vrepper/core.py
|
# V-REP as tethered robotics simulation environment
# Python Wrapper
# <NAME> 20170410
# import the vrep library
from vrepper.lib import vrep
from vrepper.lib.vrepConst import sim_handle_all, simx_headeroffset_server_state, \
sim_scripttype_childscript
from inspect import getargspec
import types
import numpy as np
import socket
from contextlib import closing
from vrepper.utils import check_ret, blocking, oneshot, instance, deprecated
from vrepper.vrep_object import vrepobject
class vrepper(object):
""" class holding a v-rep simulation environment and
allowing to call all the V-Rep remote functions ("simx..")
"""
def __init__(self, port_num=None, dir_vrep='', headless=False, suppress_output=True):
if port_num is None:
port_num = self.find_free_port_to_use()
self.port_num = port_num
if dir_vrep == '':
print('(vrepper) trying to find V-REP executable in your PATH')
import distutils.spawn as dsp
path_vrep = dsp.find_executable('vrep.sh') # fix for linux
if path_vrep == None:
path_vrep = dsp.find_executable('vrep')
else:
path_vrep = dir_vrep + 'vrep'
print('(vrepper) path to your V-REP executable is:', path_vrep)
if path_vrep is None:
raise Exception("Sorry I couldn't find V-Rep binary. "
"Please make sure it's in the PATH environmental variable")
# start V-REP in a sub process
# vrep.exe -gREMOTEAPISERVERSERVICE_PORT_DEBUG_PREENABLESYNC
# where PORT -> 19997, DEBUG -> FALSE, PREENABLESYNC -> TRUE
# by default the server will start at 19997,
# use the -g argument if you want to start the server on a different port.
args = [path_vrep, '-gREMOTEAPISERVERSERVICE_' + str(self.port_num) + '_FALSE_TRUE']
if headless:
args.append('-h')
# instance created but not started.
self.instance = instance(args, suppress_output)
self.cid = -1
# clientID of the instance when connected to server,
# to differentiate between instances in the driver
self.started = False
# is the simulation currently running (as far as we know)
self.sim_running = False
# assign every API function call from vrep to self
vrep_methods = [a for a in dir(vrep) if
not a.startswith('__') and isinstance(getattr(vrep, a), types.FunctionType)]
def assign_from_vrep_to_self(name):
wrapee = getattr(vrep, name)
arg0 = getargspec(wrapee)[0][0]
if arg0 == 'clientID':
def func(*args, **kwargs):
return wrapee(self.cid, *args, **kwargs)
else:
def func(*args, **kwargs):
return wrapee(*args, **kwargs)
setattr(self, name, func)
for name in vrep_methods:
assign_from_vrep_to_self(name)
def find_free_port_to_use(
self): # https://stackoverflow.com/questions/1365265/on-localhost-how-do-i-pick-a-free-port-number
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
s.bind(('', 0))
return s.getsockname()[1]
# start everything
def start(self):
if self.started == True:
raise RuntimeError('you should not call start() more than once')
print('(vrepper) starting an instance of V-REP...')
self.instance.start()
# try to connect to V-REP instance via socket
retries = 0
while True:
print('(vrepper) trying to connect to server on port', self.port_num, 'retry:', retries)
# vrep.simxFinish(-1) # just in case, close all opened connections
self.cid = self.simxStart(
'127.0.0.1', self.port_num,
waitUntilConnected=True,
doNotReconnectOnceDisconnected=True,
timeOutInMs=1000,
commThreadCycleInMs=0) # Connect to V-REP
if self.cid != -1:
print('(vrepper) Connected to remote API server!')
break
else:
retries += 1
if retries > 15:
self.end()
raise RuntimeError('(vrepper) Unable to connect to V-REP after 15 retries.')
# Now try to retrieve data in a blocking fashion (i.e. a service call):
objs, = check_ret(self.simxGetObjects(
sim_handle_all,
blocking))
print('(vrepper) Number of objects in the scene: ', len(objs))
# Now send some data to V-REP in a non-blocking fashion:
self.simxAddStatusbarMessage(
'(vrepper)Hello V-REP!',
oneshot)
# setup a useless signal
self.simxSetIntegerSignal('asdf', 1, blocking)
print('(vrepper) V-REP instance started, remote API connection created. Everything seems to be ready.')
self.started = True
return self
# kill everything, clean up
def end(self):
print('(vrepper) shutting things down...')
# Before closing the connection to V-REP, make sure that the last command sent out had time to arrive. You can guarantee this with (for example):
# vrep.simxGetPingTime(clientID)
# Now close the connection to V-REP:
if self.sim_running:
self.stop_simulation()
self.simxFinish()
self.instance.end()
print('(vrepper) everything shut down.')
return self
def load_scene(self, fullpathname):
print('(vrepper) loading scene from', fullpathname)
try:
check_ret(self.simxLoadScene(fullpathname,
0, # assume file is at server side
blocking))
except:
print('(vrepper) scene loading failure')
raise
print('(vrepper) scene successfully loaded')
def start_blocking_simulation(self):
self.start_simulation(True)
def start_nonblocking_simulation(self):
self.start_simulation(False)
def start_simulation(self, is_sync):
# IMPORTANT
# you should poll the server state to make sure
# the simulation completely stops before starting a new one
while True:
# poll the useless signal (to receive a message from server)
check_ret(self.simxGetIntegerSignal(
'asdf', blocking))
# check server state (within the received message)
e = self.simxGetInMessageInfo(
simx_headeroffset_server_state)
# check bit0
not_stopped = e[1] & 1
if not not_stopped:
break
# enter sync mode
check_ret(self.simxSynchronous(is_sync))
check_ret(self.simxStartSimulation(blocking))
self.sim_running = True
def make_simulation_synchronous(self, sync):
if not self.sim_running:
print('(vrepper) simulation doesn\'t seem to be running. starting up')
self.start_simulation(sync)
else:
check_ret(self.simxSynchronous(sync))
def stop_simulation(self):
check_ret(self.simxStopSimulation(oneshot), ignore_one=True)
self.sim_running = False
@deprecated('Please use method "stop_simulation" instead.')
def stop_blocking_simulation(self):
self.stop_simulation()
def step_blocking_simulation(self):
check_ret(self.simxSynchronousTrigger())
def get_object_handle(self, name):
handle, = check_ret(self.simxGetObjectHandle(name, blocking))
return handle
def get_object_by_handle(self, handle, is_joint=True):
"""
Get the vrep object for a given handle
:param int handle: handle code
:param bool is_joint: True if the object is a joint that can be moved
:returns: vrepobject
"""
return vrepobject(self, handle, is_joint)
def get_object_by_name(self, name, is_joint=True):
"""
Get the vrep object for a given name
:param str name: name of the object
:param bool is_joint: True if the object is a joint that can be moved
:returns: vrepobject
"""
return self.get_object_by_handle(self.get_object_handle(name), is_joint)
@staticmethod
def create_params(ints=[], floats=[], strings=[], bytes=''):
if bytes == '':
bytes_in = bytearray()
else:
bytes_in = bytes
return (ints, floats, strings, bytes_in)
def call_script_function(self, function_name, params, script_name="remoteApiCommandServer"):
"""
Calls a function in a script that is mounted as child in the scene
:param str script_name: the name of the script that contains the function
:param str function_name: the name of the function to call
:param tuple params: the parameters to call the function with (must be 4 parameters: list of integers, list of floats, list of string, and bytearray
:returns: tuple (res_ints, res_floats, res_strs, res_bytes)
WHERE
list res_ints is a list of integer results
list res_floats is a list of floating point results
list res_strs is a list of string results
bytearray res_bytes is a bytearray containing the resulting bytes
"""
assert type(params) is tuple
assert len(params) == 4
return check_ret(self.simxCallScriptFunction(
script_name,
sim_scripttype_childscript,
function_name,
params[0], # integers
params[1], # floats
params[2], # strings
params[3], # bytes
blocking
))
def get_global_variable(self, name, is_first_time):
if is_first_time:
return vrep.simxGetFloatSignal(self.cid, name, vrep.simx_opmode_streaming)
else:
return vrep.simxGetFloatSignal(self.cid, name, vrep.simx_opmode_buffer)
def _convert_byte_image_to_color(self, res, img):
reds = np.zeros(res[0] * res[1], dtype=np.uint8)
greens = np.zeros(res[0] * res[1], dtype=np.uint8)
blues = np.zeros(res[0] * res[1], dtype=np.uint8)
for i in range(0, len(img), 3):
reds[int(i / 3)] = img[i] & 255
greens[int(i / 3)] = img[i + 1] & 255
blues[int(i / 3)] = img[i + 2] & 255
img_out = np.zeros((res[0], res[1], 3), dtype=np.uint8)
img_out[:, :, 0] = np.array(reds).reshape(res)
img_out[:, :, 1] = np.array(greens).reshape(res)
img_out[:, :, 2] = np.array(blues).reshape(res)
return img_out
def get_image(self, object_id):
res, img = check_ret(self.simxGetVisionSensorImage(object_id, 0, blocking))
return self._convert_byte_image_to_color(res, img)
@staticmethod
def flip180(image):
return np.rot90(image, 2, (0, 1))
def _convert_depth_to_image(self, res, depth):
reshaped_scaled = 255 - np.array(depth).reshape(res) * 255 # because is in range [0,1] and inverted
rounded = np.around(reshaped_scaled, 0).astype(np.uint8)
return rounded
def _convert_depth_to_rgb(self, res, depth):
rounded = self._convert_depth_to_image(res, depth)
img = np.zeros((res[0], res[1], 3), dtype=np.uint8)
img[:, :, 0] = rounded
img[:, :, 1] = rounded
img[:, :, 2] = rounded
return img
def get_depth_image(self, object_id):
res, depth = check_ret(self.simxGetVisionSensorDepthBuffer(object_id, blocking))
return self._convert_depth_to_image(res, depth)
def get_depth_image_as_rgb(self, object_id):
res, depth = check_ret(self.simxGetVisionSensorDepthBuffer(object_id, blocking))
return self._convert_depth_to_rgb(res, depth)
def get_image_and_depth(self, object_id):
img = self.get_image(object_id)
depth = self.get_depth_image(object_id)
out = np.zeros((img.shape[0], img.shape[1], 4), dtype=np.uint8)
out[:, :, :3] = img
out[:, :, 3] = depth
return out
def get_collision_handle(self, name_of_collision_obj):
""" In order to use this you first have to open the scene in V-Rep, then
click on "calculation module properties" on the left side (the button
that looks like "f(x)"), then click "add new collision object", chose the
two things between which you want to check for collision (one of them can be a collection
which you can create in yet another window), and finally double click on the new
collision object in order to rename it to something more catchy than "Collision".
You can find more information here:
http://www.coppeliarobotics.com/helpFiles/en/collisionDetection.htm
Also don't forget to save the scene after adding the collision object.
:param name_of_collision_obj: the "#" is added automatically at the end
:return: collision_handle (this is an integer that you need for check_collision)
"""
return check_ret(self.simxGetCollisionHandle(name_of_collision_obj + "#", blocking))[0]
def check_collision(self, collision_handle):
""" At any point in time call this function to get a boolean value if the
collision object is currently detecting a collision. True for collision.
:param collision_handle: integer, the handle that you obtaind from
"get_collision_handle(name_of_collision_obj)"
:return: boolean
"""
return check_ret(self.simxReadCollision(collision_handle, blocking))[0]
def get_collision_object(self, name_of_collision_obj):
""" this is effectively the same as "get_collision_handle" but instead of an
integer (the handle) it instead returns an object that has a ".is_colliding()"
function, which is super marginally more convenient.
:param name_of_collision_obj: string, name of the collision object in V-Rep
:return: Collision object that you can check with ".is_colliding()->bool"
"""
handle = check_ret(self.simxGetCollisionHandle(name_of_collision_obj + "#", blocking))[0]
col = Collision(env=self, handle=handle)
return col
class Collision(object):
def __init__(self, env, handle):
self.handle = handle
self.env = env
def is_colliding(self):
return check_ret(self.env.simxReadCollision(self.handle, blocking))[0]
|
bermeom/quadruped-robot
|
tensorblock/recipe/recipe_operation.py
|
<reponame>bermeom/quadruped-robot
import tensorflow as tf
class recipe_operation:
####### Add Operation
def addOperation( self , **args ):
pars = { **self.defs_operation , **args }
pars['name'] = self.add_label(
self.operations , 'Operation' , pars['name'] , add_order = True )
if pars['input'] is not None:
tensors = self.tensor_list( pars['input'] )
if pars['src'] is not None and pars['dst'] is not None:
tensors = [ self.tensor_list( pars['src'] ) ,
self.tensor_list( pars['dst'] ) ]
extras = pars['extra']
if extras is not None:
extras = self.info( extras )
if callable( pars['function'] ):
with tf.variable_scope( self.folder + pars['name'] ):
self.operations.append( [ pars['function']( tensors , extras , pars ) , pars ] )
else:
self.operations.append( [ pars['function'] , pars ] )
return self.operations[-1][0]
|
bermeom/quadruped-robot
|
learning/sources/pygames/pygame_chase.py
|
<reponame>bermeom/quadruped-robot
import pygame
import pygame.surfarray as surfarray
import random
import time
import numpy as np
class pygame_chase:
### START SIMULATION
def reset( self ):
pygame.init()
self.black = [ 0 , 0 , 0 ]
self.white = [ 255 , 255 , 255 ]
self.blue = [ 0 , 0 , 255 ]
self.red = [ 255 , 0 , 0 ]
self.green = [ 0 , 255 , 0 ]
self.screen_size = [ 640 , 480 ]
self.border_pos = [ 20 , 20 ]
self.n_enemies = 5
self.n_friends = 10
self.circ_dia = 20
self.circ_rad = int( self.circ_dia / 2 )
self.circ_rad2 = self.circ_rad ** 2
self.hero_pos = [ self.screen_size[0] / 2 , self.screen_size[1] / 2 ]
self.hero_spd , self.hero_acl = [ 0 , 0 ] , 3
self.res , self.dst = 10 , 100
self.angles = list( np.arange( 0 , 360 , self.res ) * np.pi / 180.0 )
self.coses = [ np.cos( ang ) for ang in self.angles ]
self.sines = [ np.sin( ang ) for ang in self.angles ]
self.left = self.border_pos[0]
self.right = self.screen_size[0] - self.border_pos[0] - self.circ_dia
self.top = self.border_pos[1]
self.bottom = self.screen_size[1] - self.border_pos[1] - self.circ_dia
self.spd_min , self.spd_max = -5 , 5
self.screen = pygame.display.set_mode( self.screen_size , 0 , 32 )
self.border_size = [ self.screen_size[0] - 2 * self.border_pos[0] ,
self.screen_size[1] - 2 * self.border_pos[1] ]
background_surf = pygame.Surface( self.screen_size )
self.background = background_surf.convert()
self.background.fill( self.black )
self.background.fill( self.white , ( self.border_pos[0] , self.border_pos[1] ,
self.border_size[0] , self.border_size[1] ) )
hero_surf = pygame.Surface( [ self.circ_dia , self.circ_dia ] )
pygame.draw.circle( hero_surf , self.blue , [ self.circ_rad , self.circ_rad ] , self.circ_rad )
self.hero = hero_surf.convert() ; self.hero.set_colorkey( self.black )
friend_surf = pygame.Surface( [ self.circ_dia , self.circ_dia ] )
pygame.draw.circle( friend_surf , self.green , [ self.circ_rad , self.circ_rad ] , self.circ_rad )
self.friend = friend_surf.convert() ; self.friend.set_colorkey( self.black )
enemy_surf = pygame.Surface( [ self.circ_dia , self.circ_dia ] )
pygame.draw.circle( enemy_surf , self.red , [ self.circ_rad , self.circ_rad ] , self.circ_rad )
self.enemy = enemy_surf.convert() ; self.enemy.set_colorkey( self.black )
self.friends = []
for _ in range( self.n_friends ):
self.friends.append( self.create() )
self.enemies = []
for _ in range( self.n_enemies ):
self.enemies.append( self.create() )
self.catch , self.miss = 0 , 0
return self.draw()
### INFO ON SCREEN
def info( self ):
print( ' Score : %3d x %3d |' % \
( self.catch , self.miss ) , end = '' )
### DRAW SCREEN
def draw( self ):
self.screen.blit( self.background, ( 0 , 0 ) )
for friend in self.friends:
self.screen.blit( self.friend , friend[0] )
for enemy in self.enemies:
self.screen.blit( self.enemy , enemy[0] )
ret_pos = [ 0 , 0 ]
self.sensor_pos = [ self.hero_pos[0] + self.circ_rad , self.hero_pos[1] + self.circ_rad ]
f_poss , e_poss = self.possibles()
observation = []
for i in range( len( self.angles ) ):
hit = 0
for d in range( 10 , self.dst , 5 ):
ret_pos[0] = self.hero_pos[0] + d * self.coses[i]
ret_pos[1] = self.hero_pos[1] + d * self.sines[i]
hit = self.visible( ret_pos , f_poss , e_poss )
if hit > 0: break
ret_pos[0] += self.circ_rad
ret_pos[1] += self.circ_rad
color = self.blue if hit == 0 else self.green if hit == 1 else self.red if hit == 2 else self.black
pygame.draw.line( self.screen , color , self.sensor_pos , ret_pos , 2 )
if hit == 0: observation.append( self.dst )
else: observation.append( self.distance( ret_pos , self.sensor_pos ) )
observation.append( hit )
self.screen.blit( self.hero , self.hero_pos )
pygame.display.update()
return np.array( observation )
### MOVE ONE STEP
def step( self , action ):
# Execute Action
self.hero_spd[0] *= 0.5
self.hero_spd[1] *= 0.5
if action == 1: self.hero_spd[0] += self.hero_acl
if action == 2: self.hero_spd[0] -= self.hero_acl
if action == 3: self.hero_spd[1] += self.hero_acl
if action == 4: self.hero_spd[1] -= self.hero_acl
# Propagate
self.hero_pos[0] += self.hero_spd[0]
self.hero_pos[1] += self.hero_spd[1]
self.bounce( [ self.hero_pos , self.hero_spd ] )
for friend in self.friends:
self.move( friend )
self.bounce( friend )
for enemy in self.enemies:
self.move( enemy )
self.bounce( enemy )
# Create Observation
obsv = self.draw()
# Collect Rewards and Check Done
rewd = self.collect( obsv )
done = self.catch >= 10 or self.miss >= 10
# Return Data
return obsv , rewd , done
############################################################################################
### Create Ball
def create( self ):
pos = [ random.randint( self.left + 2 * self.circ_rad , self.right - 2 * self.circ_rad ) ,
random.randint( self.top + 2 * self.circ_rad , self.bottom - 2 * self.circ_rad ) ]
spd = [ random.randint( self.spd_min , self.spd_max ) ,
random.randint( self.spd_min , self.spd_max ) ]
if spd[0] == 0: spd[0] = +1
if spd[1] == 0: spd[1] = -1
return [ pos , spd ]
### Move Ball
def move( self , posspd ):
posspd[0][0] += posspd[1][0]
posspd[0][1] += posspd[1][1]
### Bounce Ball
def bounce( self , posspd ):
pos , spd = posspd
if pos[0] < self.left: pos[0] , spd[0] = 2 * self.left - pos[0] , - spd[0]
if pos[0] > self.right: pos[0] , spd[0] = 2 * self.right - pos[0] , - spd[0]
if pos[1] < self.top: pos[1] , spd[1] = 2 * self.top - pos[1] , - spd[1]
if pos[1] > self.bottom: pos[1] , spd[1] = 2 * self.bottom - pos[1] , - spd[1]
### Collide
def collide( self , pos ):
return self.distance( self.hero_pos , pos ) < self.circ_dia
### Distance
def distance( self , pos1 , pos2 ):
return np.sqrt( ( pos1[0] - pos2[0] )**2 +
( pos1[1] - pos2[1] )**2 )
### Possibles
def possibles( self ):
friends_possibles = []
for i , friend in enumerate( self.friends ):
if self.distance( friend[0] , self.hero_pos ) < self.dst:
friends_possibles.append( i )
enemies_possibles = []
for i , enemy in enumerate( self.enemies ):
if self.distance( enemy[0] , self.hero_pos ) < self.dst:
enemies_possibles.append( i )
return friends_possibles , enemies_possibles
### Visible
def visible( self , pos , f_poss , e_poss ):
if pos[0] + self.circ_rad < self.left or pos[0] - self.circ_rad > self.right or \
pos[1] + self.circ_rad < self.top or pos[1] - self.circ_rad > self.bottom:
return 3
for i in f_poss:
if self.distance( self.friends[i][0] , pos ) < self.circ_rad:
return 1
for i in e_poss:
if self.distance( self.enemies[i][0] , pos ) < self.circ_rad:
return 2
return 0
### Collect
def collect( self , obsv ):
reward = 0
for i , friend in enumerate( self.friends ):
if self.collide( friend[0] ): # If Collided with Friend
self.friends[i] = self.create()
reward -= 1 ; self.miss += 1
for i , enemy in enumerate( self.enemies ):
if self.collide( enemy[0] ): # If Collided with Enemy
self.enemies[i] = self.create()
reward += 1 ; self.catch += 1
for i in range( 0 , obsv.shape[0] , 2 ):
if obsv[ i + 1 ] == 3 : # If Near a Wall
reward -= ( 1.0 - obsv[ i ] / self.dst ) / len( self.angles )
return reward
|
bermeom/quadruped-robot
|
learning/sources/source_carla.py
|
<reponame>bermeom/quadruped-robot
from sources.source import source
from sources.carla.control import *
import signal
import sys
import cv2
import numpy as np
import time
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
##### SOURCE CARLA
class source_carla( source ):
# Discrete actions:
# 0 - Throttle
# 1 - Throttle and right steer
# 2 - Throttle and left steer
# 3 - Brake
### __INIT__
def __init__( self ):
source.__init__( self )
class Args:
debug = True
host = '127.0.0.1'
port = 2000
autopilot = False
res = '600x400'
width, height = [int(x) for x in res.split('x')]
self.args = Args()
self.env = CarlaEnv()
# Open Server
self.env.open_server(self.args)
# Open Client
self.env.init(self.args)
def signal_handler(signal, frame):
print('\nProgram closed!')
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
### INFORMATION
def num_actions( self ):
return 4
def range_actions( self ):
return -1
### START SIMULATION
def start( self ):
obsv, rewd, done = self.env.step([0,0,0,0])
return self.process(obsv)
### MOVE ONE STEP
def move( self , actn ):
obsv, rewd, done = self.env.step(actn)
return self.process(obsv), rewd, done
### PROCESS OBSERVATION
def process( self , obsv ):
# Convert image to gray
obsv = np.uint8(obsv)
obsv = cv2.resize( obsv , ( 84 , 84 ) )
obsv = cv2.cvtColor( obsv , cv2.COLOR_BGR2GRAY )
# Plot the ANN image input:
#fig = plt.figure()
#for i in range( 1 ):
# plt.subplot( 2 , 1 , i + 1 )
# plt.imshow( obsv[:,:] , cmap = 'gray' )
#plt.savefig('./auxiliar/rgb.png')
#plt.close()
return obsv
|
bermeom/quadruped-robot
|
learning/sources/source_gym_hopper.py
|
from sources.source_gym import source_gym
import numpy as np
##### SOURCE GYM HOPPER
class source_gym_hopper( source_gym ):
### __INIT__
def __init__( self ):
source_gym.__init__( self , 'Hopper-v2' )
### INFORMATION
def num_actions( self ): return self.env.action_space.shape[0]
def range_actions( self ): return abs(self.env.action_space.high[0])
### MAP KEYS
def map_keys( self , actn ):
actn = np.clip( actn, self.env.action_space.low[0], self.env.action_space.high[0])
return np.expand_dims(actn,0)
### PROCESS OBSERVATION
def process( self , obsv ):
return obsv
|
bermeom/quadruped-robot
|
tensorblock/layers/layer_variational.py
|
import numpy as np
import tensorflow as tf
import tensorblock as tb
class layer_variational:
####### Data
def name(): return 'Variational'
def shapeMult(): return 2
def dims(): return 1
def allowPooling(): return False
####### Function
def function( x , W , b , recipe , pars ):
if tb.aux.tf_length( x ) > 2:
x = tb.aux.tf_flatten( x )
in_channels = pars['in_channels']
out_channels = pars['out_channels']
size_batch = tf.shape( x , name = 'batch' )[0]
layer = tf.matmul( x , W , name = 'MatMul' )
layer = tb.extras.bias( layer , b )
z_mu = layer[ : , :out_channels ]
z_sig = layer[ : , out_channels: ]
out_shape = tf.stack( [ size_batch , out_channels ] , name = 'shape' )
epsilon = tf.random_normal( out_shape , mean = 0.0 , stddev = 1.0 , seed = 1 )
layer = tf.add( z_mu , tf.multiply( z_sig , epsilon ) )
return [ layer , z_mu , z_sig ] , pars , None
####### Shapes
def shapes( input_shape , pars ):
in_channels = tb.aux.flat_dim( input_shape )
out_channels = pars['out_channels'] * np.prod( pars['out_sides'] )
weight_shape = [ in_channels , out_channels ]
bias_shape = [ out_channels ]
return weight_shape , bias_shape
|
bermeom/quadruped-robot
|
learning/players_reinforcement/player_dql_rnn_egreedy_1A.py
|
from players_reinforcement.player_dql_rnn_egreedy import *
##### PLAYER DQL RNN EGREEDY 1A
class player_dql_rnn_egreedy_1A( player_dql_rnn_egreedy ):
NUM_FRAMES = 3
BATCH_SIZE = 50
LEARNING_RATE = 1e-4
REWARD_DISCOUNT = 0.99
START_RANDOM_PROB = 1.00
FINAL_RANDOM_PROB = 0.05
NUM_EXPLORATION_EPISODES = 250
EXPERIENCES_LEN = 100000
STEPS_BEFORE_TRAIN = 150
### __INIT__
def __init__( self ):
player_dql_rnn_egreedy.__init__( self )
### PREPARE NETWORK
def network( self ):
# Input Placeholder
self.brain.addInput( shape = [ None , self.NUM_FRAMES , self.obsv_shape[0] ] ,
name = 'Observation' )
# Fully Connected Layers
self.brain.addLayer( type = tb.layers.rnn , input = 'Observation' , name = 'RNN' ,
num_cells = 1 , out_channels = 64 ,
activation = tb.activs.relu )
self.brain.setLayerDefaults( type = tb.layers.fully ,
activation = tb.activs.relu ,
weight_stddev = 0.01 , bias_stddev = 0.01 )
self.brain.addLayer( out_channels = 64 )
self.brain.addLayer( out_channels = self.num_actions ,
activation = None , name = 'Output' )
|
bermeom/quadruped-robot
|
tensorblock/recipe/recipe_train.py
|
<filename>tensorblock/recipe/recipe_train.py
import numpy as np
import tensorblock as tb
class recipe_train:
####### Train
def train( self , **args ):
pars = { **self.defs_train , **args }
if pars['train_length'] is not None:
if pars['train_data'] is not None: pars['train_data'] = pars['train_data'][ :pars['train_length'] , ]
if pars['train_labels'] is not None: pars['train_labels'] = pars['train_labels'][ :pars['train_length'] , ]
if pars['test_length'] is not None:
if pars['test_data'] is not None: pars['test_data'] = pars['test_data'][ :pars['test_length'] , ]
if pars['test_labels'] is not None: pars['test_labels'] = pars['test_labels'][ :pars['test_length'] , ]
if not isinstance( pars['eval_function'] , list ):
pars['eval_function'] = [ pars['eval_function'] ]
flag_eval = pars['eval_function'] is not None
flag_plot = pars['plot_function'] is not None
flag_save = pars['saver'] is not None
flag_summary = pars['summary'] is not None and \
pars['writer'] is not None
print( '######################################################################## TRAINING' )
self.train_evaluate( pars , flag_summary )
if flag_plot:
pars_plot = self.pars( pars['plot_function'] )
tb.plotters.initialize( pars_plot['shape'] )
self.train_plot( pars , pars_plot )
if isinstance( pars['train_data'] , list ): num_samples = len( pars['train_data'] )
if isinstance( pars['train_data'] , np.ndarray ): num_samples = pars['train_data'].shape[0]
num_batches = int( num_samples / pars['size_batch'] ) + 1
for epoch in range( pars['num_epochs'] ):
for batch in range( num_batches ):
self.train_optimize( pars , batch )
if flag_eval and ( epoch + 1 ) % pars['eval_freq'] == 0:
self.train_evaluate( pars , flag_summary , epoch )
if flag_plot and ( epoch + 1 ) % pars['plot_freq'] == 0:
self.train_plot( pars , pars_plot , epoch )
if flag_save and ( epoch + 1 ) % pars['save_freq'] == 0 :
self.train_save( pars )
print( '######################################################################## END TRAINING' )
####### Optimize
def train_optimize( self , pars , batch ):
train_dict = self.prepare( pars['train_data'] , pars['train_labels'] , pars['train_seqlen'] ,
pars['size_batch'] , batch )
self.run( pars['optimizer'] , train_dict , use_dropout = True )
####### Evaluate
def train_evaluate( self , pars , flag_summary , epoch = -1 ):
test_dict = self.prepare( pars['test_data'] , pars['test_labels'] , pars['test_seqlen'] )
eval = self.run( pars['eval_function'] , test_dict , use_dropout = False )
if flag_summary:
summ = self.run( pars['summary'] , test_dict , use_dropout = False )
self.write( name = pars['writer'] , summary = summ , iter = epoch + 1 )
print( '*** Epoch' , epoch + 1 , '| ' , end = '' )
for i , function in enumerate( pars['eval_function'] ):
print( function + ' :' , eval[i] , '| ' , end = '' )
print()
####### Plot
def train_plot( self , pars , pars_plot , epoch = -1 ):
x = pars['test_data'][ : np.prod( pars_plot['shape'] ) , : ]
y = self.run( 'Output' , [ [ 'Input' , x ] ] , use_dropout = False )
self.tensor( pars['plot_function'] )( x , y , epoch = epoch + 1 ,
dir = pars_plot['dir'] , shape = pars_plot['shape'] )
####### Save
def train_save( self , pars ):
self.save( name = pars['saver'] )
####### Prepare Data
def prepare( self , data , labels , seqlen , size_batch = None , batch = None ):
dict = []
if data is not None:
if size_batch is None: batch_data = data
else: batch_data = tb.aux.get_batch( data , size_batch , batch )
dict.append( [ 'Input' , batch_data ] )
if labels is not None:
if size_batch is None: batch_labels = labels
else: batch_labels = tb.aux.get_batch( labels , size_batch , batch )
dict.append( [ 'Label' , batch_labels ] )
if seqlen is not None:
if size_batch is None: batch_seqlen = seqlen
else: batch_seqlen = tb.aux.get_batch( seqlen , size_batch , batch )
dict.append( [ 'SeqLen' , batch_seqlen ] )
return dict
|
bermeom/quadruped-robot
|
learning/sources/source_gym.py
|
<reponame>bermeom/quadruped-robot
import signal
import sys
import gym
from sources.source import source
from gym import wrappers
##### SOURCE GYM
class source_gym( source ):
render = True
### __INIT__
def __init__( self , game ):
source.__init__( self )
self.env = gym.make( game )
#self.env = wrappers.Monitor(self.env, ".") #record
def signal_handler(signal, frame):
print('\nProgram closed!')
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
### START SIMULATION
def start( self ):
obsv = self.env.reset()
return self.process( obsv )
### MOVE ONE STEP
def move( self , actn ):
obsv , rewd , done, info = self.env.step( self.map_keys( actn ) )
if self.render: self.env.render()
return self.process( obsv ) , rewd , done
|
bermeom/quadruped-robot
|
learning/players_reinforcement/player_PPO_1.py
|
<gh_stars>1-10
from players_reinforcement.player import player
from auxiliar.aux_plot import *
import sys
sys.path.append('..')
import tensorblock as tb
import numpy as np
# PLAYER PPO
class player_PPO_1(player):
# __INIT__
def __init__(self):
player.__init__(self)
self.experiences = []
# CHOOSE NEXT ACTION
def act(self, state):
return self.calculate(state)
# CALCULATE NETWORK
def calculate(self, state):
if self.continuous:
action = self.brain.run( 'Actor/Output', [ [ 'Actor/Observation', [state] ] ] )
action = np.reshape( action, self.num_actions )
if not self.continuous:
output = np.squeeze( self.brain.run( 'Actor/Discrete', [ [ 'Actor/Observation', [state] ] ] ) )
action = np.random.choice( np.arange( len( output ) ), p = output )
return self.create_action( action )
# PREPARE NETWORK
def operations(self):
# Placeholders
self.brain.addInput( shape = [ None, self.num_actions ], name = 'Actions' )
self.brain.addInput( shape = [ None, self.num_actions ], name = 'O_Mu' )
self.brain.addInput( shape = [ None, self.num_actions ], name = 'O_Sigma' )
self.brain.addInput( shape = [ None, self.num_actions ], name = 'O_Discrete' )
self.brain.addInput( shape = [ None, 1 ] , name = 'Advantage')
# Operations
# Critic
self.brain.addOperation( function = tb.ops.hlmean_squared_error,
input = [ 'Critic/Value','Advantage' ],
name = 'CriticCost' )
self.brain.addOperation( function = tb.optims.adam,
input = 'CriticCost',
learning_rate = self.LEARNING_RATE,
name = 'CriticOptimizer' )
# Actor
if self.continuous:
self.brain.addOperation( function = tb.ops.ppocost_distrib,
input = [ 'Actor/Mu',
'Actor/Sigma',
'O_Mu',
'O_Sigma',
'Actions',
'Advantage',
self.EPSILON ],
name = 'ActorCost' )
if not self.continuous:
self.brain.addOperation( function = tb.ops.ppocost,
input = [ 'Actor/Discrete',
'O_Discrete',
'Actions',
'Advantage',
self.EPSILON ],
name = 'ActorCost' )
self.brain.addOperation( function = tb.optims.adam,
input = 'ActorCost',
learning_rate = self.LEARNING_RATE,
name = 'ActorOptimizer' )
# Assign Old Actor
self.brain.addOperation( function = tb.ops.assign,
input = ['Old', 'Actor'],
name = 'Assign' )
# TRAIN NETWORK
def train( self, prev_state, curr_state, actn, rewd, done, episode ):
# Store New Experience Until Train
self.experiences.append( (prev_state, curr_state, actn, rewd, done) )
# Check for Train
if ( len(self.experiences) >= self.BATCH_SIZE ):
batch = self.experiences
# Separate Batch Data
prev_states = [d[0] for d in batch]
curr_states = [d[1] for d in batch]
actions = [d[2] for d in batch]
rewards = [d[3] for d in batch]
dones = [d[4] for d in batch]
# States Values
prev_values = np.squeeze( self.brain.run( 'Critic/Value' , [ [ 'Critic/Observation', prev_states ] ] ) )
curr_values = np.squeeze( self.brain.run( 'Critic/Value' , [ [ 'Critic/Observation', curr_states ] ] ) )
# Calculate Generalized Advantage Estimation
running_add_y = 0
running_add_a = 0
y = np.zeros_like(rewards)
advantage = rewards + (self.GAMMA * curr_values) - prev_values
for t in reversed ( range( 0, len( advantage ) ) ):
if dones[t]:
curr_values[t] = 0
running_add_a = 0
running_add_y = curr_values[t] * self.GAMMA + rewards [t]
running_add_a = running_add_a * self.GAMMA * self.LAM + advantage [t]
y [t] = running_add_y
advantage [t] = running_add_a
y = np.expand_dims( y, 1 )
advantage = np.expand_dims( advantage, 1 )
# Assign Old Pi
self.brain.run( ['Assign'], [] )
# Get Old Probabilities
if self.continuous:
o_Mu, o_Sigma = self.brain.run( [ 'Old/Mu', 'Old/Sigma' ], [ [ 'Old/Observation', prev_states ] ] )
if not self.continuous:
o_Discrete = self.brain.run( 'Old/Discrete' , [ [ 'Old/Observation', prev_states ] ] )
# Optimize
for _ in range (self.UPDATE_SIZE):
if self.continuous:
self.brain.run( [ 'ActorOptimizer' ], [ [ 'Actor/Observation', prev_states ],
[ 'O_Mu', o_Mu ],
[ 'O_Sigma', o_Sigma ],
[ 'Actions', actions ],
[ 'Advantage', advantage ] ] )
if not self.continuous:
self.brain.run( [ 'ActorOptimizer' ], [ [ 'Actor/Observation', prev_states ],
[ 'O_Discrete', o_Discrete ],
[ 'Actions', actions ],
[ 'Advantage', advantage ] ] )
self.brain.run( [ 'CriticOptimizer' ], [ [ 'Critic/Observation', prev_states ],
[ 'Advantage', y ] ] )
# Reset
self.experiences = []
|
bermeom/quadruped-robot
|
learning/sources/source_pygame_chase.py
|
from sources.source_pygame import source_pygame
##### SOURCE PYGAME CHASE
class source_pygame_chase( source_pygame ):
### __INIT__
def __init__( self ):
source_pygame.__init__( self , 'pygame_chase' )
### INFORMATION
def num_actions( self ): return 5
### MAP KEYS
def map_keys( self , actn ):
if actn[0] : return 0
if actn[1] : return 1
if actn[2] : return 2
if actn[3] : return 3
if actn[4] : return 4
### PROCESS OBSERVATION
def process( self , obsv ):
return obsv
|
bermeom/quadruped-robot
|
tensorblock/functions/func_variables.py
|
<reponame>bermeom/quadruped-robot<gh_stars>1-10
import tensorflow as tf
### Placeholder
def placeholder( shape = None , name = None , dtype = tf.float32 ):
if shape is None: return tf.placeholder( dtype , name = name )
return tf.placeholder( dtype , shape , name = name )
### Numpy
def numpy( tensor , pars , name = None , dtype = tf.float32 ):
return tf.Variable( tensor , trainable = pars['trainable'] , dtype = dtype )
### Copy
def copy( tensor , pars , name = None , dtype = tf.float32 ):
return tf.Variable( tensor.initialized_value() , trainable = pars['trainable'] )
### Dummy
def dummy( shape , name = None , dtype = tf.float32 ):
if name is None: name = 'dummy'
return tf.get_variable( initializer = tf.zeros( shape = shape ) ,
trainable = False , dtype = dtype , name = name )
### None
def none( shape , pars , name = None , dtype = tf.float32 ):
return None
### Constant
def constant( shape , pars , name = None , dtype = tf.float32 ):
if name is None: name = 'constant'
return tf.get_variable( initializer = tf.constant( pars['value'] , shape = shape ) ,
trainable = pars['trainable'] , dtype = dtype , name = name )
### Random Normal
def random_normal( shape , pars , name = None , dtype = tf.float32 ):
if name is None: name = 'random_normal'
return tf.get_variable( initializer = tf.random_normal( shape = shape ,
mean = pars['mean'] , stddev = pars['stddev'] , seed = pars['seed'] ) ,
trainable = pars['trainable'] , dtype = dtype , name = name )
### Truncated Normal
def truncated_normal( shape , pars , name = None , dtype = tf.float32 ):
if name is None: name = 'truncated_normal'
return tf.get_variable( initializer = tf.truncated_normal( shape = shape ,
mean = pars['mean'] , stddev = pars['stddev'] , seed = pars['seed'] ) ,
trainable = pars['trainable'] , dtype = dtype , name = name )
### Random Uniform
def random_uniform( shape , pars , name = None , dtype = tf.float32 ):
if name is None: name = 'random_uniform'
return tf.get_variable( initializer = tf.random_uniform( shape = shape ,
minval = pars['min'] , maxval = pars['max'] , seed = pars['seed'] ) ,
trainable = pars['trainable'] , dtype = dtype , name = name )
|
bermeom/quadruped-robot
|
tensorblock/aux/aux_load.py
|
<filename>tensorblock/aux/aux_load.py
import pickle
import numpy as np
import tensorflow as tf
import tensorblock as tb
import cv2
### Create Dataset
def create_dataset( tensors, extras, pars ):
s_dataset = tf.data.Dataset.list_files(pars['data_path'])
def _s_parse_function(filename):
image_string = tf.read_file(filename)
image_decoded = tf.image.decode_png(image_string, channels = 3)
image_decoded = tf.py_func( lambda x : cv2.resize( x , ( pars['width'] , pars['height'] ) ), [image_decoded], tf.uint8 )
image_decoded = tf.py_func( lambda x : cv2.cvtColor( x, cv2.COLOR_RGB2GRAY ), [image_decoded], tf.uint8 )
return image_decoded
s_dataset = s_dataset.map(_s_parse_function)
s_dataset = s_dataset.batch(pars['b_size'])
s_dataset = s_dataset.repeat()
a_dataset = tf.data.TextLineDataset(pars['label_path'])
a_dataset = a_dataset.batch(pars['b_size'])
a_dataset = a_dataset.repeat()
iterator1 = s_dataset.make_one_shot_iterator()
image = iterator1.get_next()
iterator2 = a_dataset.make_one_shot_iterator()
label = iterator2.get_next()
return image, label
### Load Matrix
def load_mat( file ):
lines = [ line.rstrip('\n') for line in open( file ) ]
nd = lines[0].split( ' ' )
mat = np.zeros( ( int(nd[0]) , int(nd[1]) ) )
for i in range( 2 , len( lines ) ):
list = lines[i].split( ' ' )
for j in range( 0 , len( list ) - 1 ):
mat[i-2,j] = float( list[j] )
return mat
### Save List
def save_list( file , list ):
pickle.dump( list , open( file + '.lst' , 'wb' ) )
### Load List
def load_list( file ):
return pickle.load( open( file + '.lst' , 'rb' ) )
### Load Numpy
def load_numpy( file ):
return np.load( file + '.npy' )
|
bermeom/quadruped-robot
|
learning/sources/vrep/vrepper/vrep_object.py
|
<filename>learning/sources/vrep/vrepper/vrep_object.py
from vrepper.lib.vrepConst import sim_jointfloatparam_velocity, simx_opmode_buffer, simx_opmode_streaming
from vrepper.utils import check_ret, blocking
import numpy as np
class vrepobject():
def __init__(self, env, handle, is_joint=True):
self.env = env
self.handle = handle
self.is_joint = is_joint
def get_orientation(self, relative_to=None):
eulerAngles, = check_ret(self.env.simxGetObjectOrientation(
self.handle,
-1 if relative_to is None else relative_to.handle,
blocking))
return eulerAngles
def get_position(self, relative_to=None):
position, = check_ret(self.env.simxGetObjectPosition(
self.handle,
-1 if relative_to is None else relative_to.handle,
blocking))
return position
def get_velocity(self):
return check_ret(self.env.simxGetObjectVelocity(
self.handle,
# -1 if relative_to is None else relative_to.handle,
blocking))
# linearVel, angularVel
def set_velocity(self, v):
self._check_joint()
return check_ret(self.env.simxSetJointTargetVelocity(
self.handle,
v,
blocking))
def set_force(self, f):
self._check_joint()
return check_ret(self.env.simxSetJointForce(
self.handle,
f,
blocking))
def set_position_target(self, angle):
"""
Set desired position of a servo
:param int angle: target servo angle in degrees
:return: None if successful, otherwise raises exception
"""
self._check_joint()
return check_ret(self.env.simxSetJointTargetPosition(
self.handle,
-np.deg2rad(angle),
blocking))
def force_position(self, angle):
"""
Force desired position of a servo
:param int angle: target servo angle in degrees
:return: None if successful, otherwise raises exception
"""
self._check_joint()
return check_ret(self.env.simxSetJointPosition(
self.handle,
-np.deg2rad(angle),
blocking))
def set_position(self, x, y, z):
"""
Set object to specific position (should never be done with joints)
:param pos: tuple or list with 3 coordinates
:return: None
"""
pos = (x, y, z)
return check_ret(self.env.simxSetObjectPosition(self.handle, -1, pos, blocking))
def get_joint_angle(self):
self._check_joint()
angle = check_ret(
self.env.simxGetJointPosition(
self.handle,
blocking
)
)
return -np.rad2deg(angle[0])
def get_joint_force(self):
self._check_joint()
force = check_ret(
self.env.simxGetJointForce(
self.handle,
blocking
)
)
return force
def get_joint_velocity(self):
self._check_joint()
vel = check_ret(self.env.simxGetObjectFloatParameter(
self.handle,
sim_jointfloatparam_velocity,
blocking
))
return vel
def read_force_sensor(self):
state, forceVector, torqueVector = check_ret(self.env.simxReadForceSensor(
self.handle,
blocking))
if state & 1 == 1:
return None # sensor data not ready
else:
return forceVector, torqueVector
def get_vision_image(self):
resolution, image = check_ret(self.env.simxGetVisionSensorImage(
self.handle,
0, # options=0 -> RGB
blocking,
))
dim, im = resolution, image
nim = np.array(im, dtype='uint8')
nim = np.reshape(nim, (dim[1], dim[0], 3))
nim = np.flip(nim, 0) # LR flip
nim = np.flip(nim, 2) # RGB -> BGR
return nim
def _check_joint(self):
if not self.is_joint:
raise Exception("Trying to call a joint function on a non-joint object.")
def get_global_variable(self, name, is_first_time):
if is_first_time:
return self.env.simxGetFloatSignal(self.cid, name, simx_opmode_streaming)
else:
return self.env.simxGetFloatSignal(self.cid, name, simx_opmode_buffer)
|
bermeom/quadruped-robot
|
tensorblock/layers/layer_conv2d.py
|
import numpy as np
import tensorflow as tf
import tensorblock as tb
class layer_conv2d:
####### Data
def name(): return 'Conv2D'
def shapeMult(): return 1
def dims(): return 2
def allowPooling(): return True
####### Function
def function( x , W , b , recipe , pars ):
if tb.aux.tf_length( x ) == 2:
x = tb.aux.tf_fold2D( x , tb.aux.tf_shape( W )[-2] )
pars['in_sides'] = tb.aux.tf_shape( x )[1:3]
strides = pars['strides']
layer = tf.nn.conv2d( x , W , name = 'Conv2D' ,
strides = [ 1 , strides[0] , strides[1] , 1 ] ,
padding = pars['padding'] )
layer = tb.extras.bias( layer , b )
return [ layer ] , pars , None
####### Shapes
def shapes( input_shape , pars ):
in_channels = pars['in_channels']
out_channels = pars['out_channels']
ksize = pars['ksize']
weight_shape = [ ksize[0] , ksize[1] , in_channels , out_channels ]
bias_shape = [ out_channels ]
return weight_shape , bias_shape
|
bermeom/quadruped-robot
|
tensorblock/__init__.py
|
<reponame>bermeom/quadruped-robot<filename>tensorblock/__init__.py
import tensorblock.aux
import tensorblock.layers
import tensorblock.functions.func_variables as vars
import tensorblock.functions.func_activations as activs
import tensorblock.functions.func_operations as ops
import tensorblock.functions.func_optimizers as optims
import tensorblock.functions.func_extras as extras
import tensorblock.functions.func_plotters as plotters
from tensorblock.recipe.recipe import recipe
|
bermeom/quadruped-robot
|
learning/players_reinforcement/player_DDPG_1.py
|
<filename>learning/players_reinforcement/player_DDPG_1.py
from players_reinforcement.player import player
from auxiliar.aux_plot import *
import tensorflow as tf
import random
from collections import deque
import sys
sys.path.append('..')
import tensorblock as tb
import numpy as np
# PLAYER DDPG
class player_DDPG_1(player):
# __INIT__
def __init__(self):
player.__init__(self)
self.experiences = deque()
self.num_stored_obsv = self.NUM_FRAMES
self.noise_state = 0
self.dt = 0.01
## ORNSTEIN-UHLENBECK PROCESS
def OU( self, mu, theta, sigma):
x = self.noise_state
dx = self.dt * theta * (mu - self.noise_state) + sigma * np.random.randn(self.num_actions) * np.sqrt(self.dt)
self.noise_state = x + dx
return self.noise_state
### CHOOSE NEXT ACTION
def act( self , state):
return self.calculate( state )
# CALCULATE NETWORK
def calculate(self, state):
action = self.brain.run( 'NormalActor/Output', [ [ 'NormalActor/Observation', [state] ] ] )
noise = self.OU( mu = 0, theta = 0.15 , sigma = 0.2 )
action = action[0] + noise
return self.create_action( np.reshape( action, [self.num_actions] ) )
# PREPARE NETWORK
def operations(self):
# Placeholders
self.brain.addInput( shape = [ None, 1 ], name = 'TDTarget', dtype = tf.float32 )
self.brain.addInput( shape = [ None, self.num_actions ], name = 'ActionGrads', dtype = tf.float32 )
# Operations
# Critic
self.brain.addOperation( function = tb.ops.get_grads,
input = [ 'NormalCritic/Value', 'NormalCritic/Actions' ],
summary = 'Summary',
writer = 'Writer',
name = 'GetGrads' )
self.brain.addOperation( function = tb.ops.hlmean_squared_error,
input = [ 'NormalCritic/Value', 'TDTarget' ],
name = 'CriticCost' )
self.brain.addOperation( function = tb.optims.adam,
input = 'CriticCost',
learning_rate = self.C_LEARNING_RATE,
name = 'CriticOptimizer' )
# Actor
self.brain.addOperation( function = tb.ops.combine_grads,
input = [ 'NormalActor/Output', 'ActionGrads' ],
name = 'CombineGrads' )
self.brain.addOperation( function = tb.optims.adam_apply,
input = [ 'CombineGrads' ],
learning_rate = self.A_LEARNING_RATE,
name = 'ActorOptimizer' )
# Assign Softly
self.brain.addOperation( function = tb.ops.assign_soft,
input = ['TargetCritic', 'NormalCritic', self.TAU],
name = 'AssignCritic')
self.brain.addOperation( function = tb.ops.assign_soft,
input = ['TargetActor', 'NormalActor', self.TAU],
name = 'AssignActor')
# TRAIN NETWORK
def train(self, prev_state, curr_state, actn, rewd, done, episode):
# Store New Experience
self.experiences.append( ( prev_state , curr_state , actn , rewd , done ) )
if len( self.experiences ) > self.EXPERIENCES_LEN: self.experiences.popleft()
# Check for Train
if len( self.experiences ) > self.STEPS_BEFORE_TRAIN and self.BATCH_SIZE > 0:
# Select Random Batch
batch = random.sample( self.experiences , self.BATCH_SIZE )
# Separate Batch Data
prev_states = [d[0] for d in batch]
curr_states = [d[1] for d in batch]
actions = [d[2] for d in batch]
rewards = [d[3] for d in batch]
dones = [d[4] for d in batch]
# States Values
target_actns = self.brain.run( 'TargetActor/Output', [ [ 'TargetActor/Observation', curr_states ] ] )
next_values = self.brain.run( 'TargetCritic/Value', [ [ 'TargetCritic/Observation', curr_states ],
[ 'TargetCritic/Actions', target_actns ] ] )
# Calculate Expected Reward
expected_rewards = []
for i in range( self.BATCH_SIZE ):
if dones[i]:
expected_rewards.append( rewards[i] )
else:
expected_rewards.append( rewards[i] + self.REWARD_DISCOUNT * next_values[i] )
expected_rewards = np.reshape( expected_rewards, [ self.BATCH_SIZE, 1 ] )
# Optimize Critic
_ = self.brain.run( ['CriticOptimizer'], [ [ 'NormalCritic/Observation', prev_states ],
[ 'NormalCritic/Actions', actions ],
[ 'TDTarget', expected_rewards ] ] )
# Get New Actions
new_a = self.brain.run( 'NormalActor/Output', [ ['NormalActor/Observation', prev_states ] ] )
# Get Critic Grads wrt New Actions
grads = self.brain.run( ['GetGrads'], [ [ 'NormalCritic/Observation', prev_states],
[ 'NormalCritic/Actions', new_a ] ] )
# Optimize Actor
_ = self.brain.run( ['ActorOptimizer'], [ [ 'NormalActor/Observation', prev_states ],
[ 'ActionGrads', grads[0] ] ] )
# Copy weights to Target Networks
self.brain.run( ['AssignActor'], [] )
self.brain.run( ['AssignCritic'], [] )
|
bermeom/quadruped-robot
|
tensorblock/recipe/recipe_plot.py
|
import tensorflow as tf
class recipe_plot:
####### Add Plotter
def addPlotter( self , **args ):
pars = { **self.defs_plotter , **args }
pars['name'] = self.add_label(
self.plotters , 'Plotter' , pars['name'] , add_order = False )
self.plotters.append( [ pars['function'] , pars ] )
|
bermeom/quadruped-robot
|
learning/players_reinforcement/player_reinforce_1A.py
|
<reponame>bermeom/quadruped-robot
from players_reinforcement.player_reinforce_1 import *
# PLAYER REINFORCE
class player_reinforce_1A( player_reinforce_1 ):
NUM_FRAMES = 1
LEARNING_RATE = 1e-4
REWARD_DISCOUNT = 0.99
### __INIT__
def __init__( self ):
player_reinforce_1.__init__( self )
# PROCESS OBSERVATION
def process(self, obsv):
return np.stack( tuple( self.obsv_list[i] for i in range( self.NUM_FRAMES ) ), axis = 1 )
# PREPARE NETWORK
def network( self ):
# Input Placeholder
self.brain.addInput( shape = [ None, self.obsv_shape[0], self.NUM_FRAMES ],
name = 'Observation' )
# Fully Connected Layers
self.brain.setLayerDefaults( type = tb.layers.fully ,
activation = tb.activs.relu )
self.brain.addLayer( out_channels = 64 ,
input = 'Observation' )
self.brain.addLayer( out_channels = 64 )
self.brain.addLayer( out_channels = self.num_actions,
activation = tb.activs.softmax,
name = 'Output' )
|
bermeom/quadruped-robot
|
tensorblock/functions/func_plotters.py
|
import os
import matplotlib.pyplot as plt
from matplotlib.pyplot import clf , plot , draw , show
import tensorblock as tb
### Plot Initialize
def initialize( shape ):
plt.figure( figsize = ( 12 , 9 ) )
### Plot Reconstruction
def reconst( x1 , x2 ,
epoch = 0 , dir = 'figures' , shape = None ):
if len( x1.shape ) == 2:
s = tb.aux.side2D( x1.shape[1] ) ; x1 = x1.reshape( [ -1 , s , s , 1 ] )
if len( x2.shape ) == 2:
s = tb.aux.side2D( x2.shape[1] ) ; x2 = x2.reshape( [ -1 , s , s , 1 ] )
r , c = shape ; k = 0
for j in range( r ):
for i in range( c ):
plt.subplot( 2 * r , c , i + 2 * j * c + 1 )
plt.imshow( x1[ k , : , : , 0 ] , vmin = 0 , vmax = 1 )
plt.axis( 'off' )
plt.subplot( 2 * r , c , i + 2 * j * c + c + 1 )
plt.imshow( x2[ k , : , : , 0 ] , vmin = 0 , vmax = 1 )
plt.axis( 'off' )
k = k + 1
if not os.path.exists( dir ): os.makedirs( dir )
plt.savefig( dir + '/epoch%d.png' % epoch , bbox_inches = 'tight' )
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.