text stringlengths 8 6.05M |
|---|
import sys
inNum = int(sys.argv[1])
count = 3
nums = []
for i in range(inNum,2,-1):
if count == 0:
break
chk = True
for j in range(2,i):
if i%j == 0:
chk = False
break
if chk:
nums.append(i)
count -= 1
if count != 0:
print('number is so small')
else:
print('the biggest prime number up to 10 : ', str(nums[2]),str(nums[1]),str(nums[0])) |
from django.shortcuts import render, redirect
import random
def get_random_alphanum_str( length=8 ):
rv = ""
for i in range( length ):
t = random.randint(1,3)
if t == 1:
rv += chr( random.randint( 48, 57 ) )
elif t == 2:
rv += chr( random.randint( 65, 90 ) )
else:
rv += chr( random.randint( 97, 122 ) )
return( rv )
def index(request):
if 'nr_attempts' not in request.session:
request.session['nr_attempts'] = 1
request.session['random_word'] = get_random_alphanum_str( 14 )
return render( request, 'random_word_generator/index.html' )
def generate(request):
request.session['nr_attempts'] += 1
request.session['random_word'] = get_random_alphanum_str( 14 )
return redirect( '/' )
def reset(request):
request.session['nr_attempts'] = 1
request.session['random_word'] = get_random_alphanum_str( 14 )
return redirect( '/' )
|
from panda3d.core import * # safety first
from numpy import sqrt
class normalizer:
'''a normalizer object is an actual tool, it allows you to fully automate the normal calcultion process. It scans the model's
geomNodes, and for each vertex, calculates the normal of the adjacent surfaces, and get's the average normalized vector'''
def __init__(self):
return None
def compute_data(self,data,size):
'''
computes the normals for the array define by the provided data
'''
n = len(data) # readability
normalData = [LVecBase3f(0,0,1) for i in range(n)] # empty template
length, width = size[2], size[3]
# convert list to array
buffer = [data[i*width:
(i+1)*width ] for i in range(length)]
for i in range(length):
for j in range(width):
scanlist = [
(i , j+1),
(i-1 , j),
(i , j-1),
(i+1 , j), # only use the 4 closest vertices because cmon it's easier
]
normal = LVecBase3f(0,0,0)
for a in range(len(scanlist)):
if 0 <= scanlist[a][0] < length and 0 <= scanlist[a][1] < width and 0 <= scanlist[a-1][0] < length and 0 <= scanlist[a-1][1] < width:
previous, current = scanlist[a-1], scanlist[a]
normal += LVecBase3f(
crossProd(
buffer[ previous[0] ][ previous[1]] - buffer[i][j],
buffer[ current[0] ][ current[1] ] - buffer[i][j]
)
)
normalData[ (i-1)*width + j] = LVecBase3f(Vec3(normal).normalized())
return normalData
def blit_normals(self,normalData,geom):
vdata = geom.modifyVertexData()
vertex = GeomVertexReader(vdata, 'vertex')
normal = GeomVertexWriter(vdata, 'normal')
i = 0
while not vertex.isAtEnd():
v = vertex.getData3f()
normal.setData3f(normalData[i])
i+=1
return None
def crossProd(vectA, vectB):
output = [
vectA[1]*vectB[2] - vectA[2]*vectB[1],
vectA[2]*vectB[0] - vectA[0]*vectB[2],
vectA[0]*vectB[1] - vectA[1]*vectB[0]
]
return tuple(output) # easier LVecBase3f conversion
def normalize(vect): # unused
norm = sqrt(vect[0]**2 + vect[1]**2 + vect[2]**2)
output = LVecBase3f((
abs(vect[0]),
abs(vect[1]),
abs(vect[2])
))
output /= norm
return output |
#!/usr/bin/python
import sys
import argparse
import multiprocessing
import termcolor as T
from expt import Expt
from time import sleep
from host import *
from site_config import *
import os
parser = argparse.ArgumentParser(description="Netperf Test for various rate limiters.")
parser.add_argument('--proto',
dest="proto",
choices=["tcp","udp"],
default="tcp")
parser.add_argument('--nstream',
type=int,
help="Number of TCP_STREAM flows.",
default=4)
parser.add_argument('--ssize',
dest="ssize",
type=int,
help="Size for stream flows.",
default=4)
parser.add_argument('--nrr',
dest="nrr",
type=int,
help="Number of TCP_RR flows.",
default=512)
parser.add_argument('--rrsize',
dest="rrsize",
type=int,
help="Req and resp size for RR.",
default=1)
parser.add_argument('--htb-mtu',
dest="htb_mtu",
help="HTB MTU parameter.",
default=1500)
parser.add_argument('--num-class',
help="Number of classes of traffic.",
type=int,
default=1)
parser.add_argument('--num-senders', '--ns',
type=int,
help="Number of sender programs spawned to send flows.",
default=config['NUM_CPUS'])
parser.add_argument('--mtu',
help="MTU parameter.",
default=1500)
parser.add_argument('--pin',
dest="pin",
help="Pin programs to CPUs in round robin fashion.",
action="store_true",
default=True)
parser.add_argument('--exptid',
dest="exptid",
help="Experiment ID",
default=None)
parser.add_argument('--rl',
dest="rl",
help="Which rate limiter to use",
choices=["htb", "qfq", 'none', "tbf", "eyeq", "hwrl"],
default="")
parser.add_argument('--time', '-t',
dest="t",
type=int,
help="Time to run the experiment",
default=10)
parser.add_argument('--dryrun',
dest="dryrun",
help="Don't execute experiment commands.",
action="store_true",
default=False)
parser.add_argument('--hosts',
dest="hosts",
help="The two hosts (server/client) to run tests",
nargs="+", default=config['DEFAULT_HOSTS'])
parser.add_argument('--sniffer',
dest="sniffer",
help="The sniffer machine to capture packet timings",
default=config['SNIFFER_HOST'])
parser.add_argument('--rate',
dest="rate",
type=int,
help="total rate limit",
default=1000)
parser.add_argument('--user',
action="store_true",
default=False,
help="App-level rate limiting")
parser.add_argument('--startport',
dest="startport",
type=int,
default=5000)
args = parser.parse_args()
if args.rl == "none":
print "Using userspace rate limiting"
args.user = True
elif args.rl == "hwrl":
if not config['NIC_VENDOR'] == "Intel" and not config['NIC_VENDOR'] == "Mellanox":
print "Hardware rate limiting only available on Intel and Mellanox NICs"
sys.exit(-1)
print "Using %s hardware rate limiting" % config['NIC_VENDOR']
if (args.rl == "none" or args.rl == "hwrl" or args.rl == "htb"):
if (args.num_class < 2 * args.num_senders):
args.num_senders = args.num_class / 2
print "RL = %s and number of classes < 2*number of sender programs." % args.rl
print "So, I am setting #programs = #classes / 2"
else:
if args.num_class < args.num_senders:
args.num_senders = args.num_class
print "Number of classes is less than number of sender programs."
print "So, I am setting #programs = #classes"
if args.num_class == 1 and args.rate > 5000:
print "With Intel NIC, 1 sender program cannot push more than 5Gbps with 1500 byte packets."
print "Using 2 sender programs and 4 classes instead"
args.num_senders = 2
args.num_class = 4
def e(s, tmpdir="/tmp"):
return "%s/%s/%s" % (tmpdir, args.exptid, s)
class UDP(Expt):
def start(self):
# num servers, num clients
ns = self.opts("ns")
nc = self.opts("nrr")
dir = self.opts("exptid")
server = self.opts("hosts")[0]
client = self.opts("hosts")[1]
sniffer = self.opts("sniffer")
startport = self.opts("startport")
self.server = Host(server)
self.client = Host(client)
self.sniffer = Host(sniffer)
self.hlist = HostList()
self.hlist.append(self.server)
self.hlist.append(self.client)
self.hlist.rmrf(e(""))
self.hlist.mkdir(e(""))
if sniffer:
self.sniffer.rmrf(e("", tmpdir=config['SNIFFER_TMPDIR']))
self.sniffer.mkdir(e("", tmpdir=config['SNIFFER_TMPDIR']))
self.sniffer.cmd("killall -9 %s" % config['SNIFFER'])
self.hlist.rmmod()
self.hlist.killall("udp")
self.hlist.stop_trafgen()
self.hlist.remove_qdiscs()
if config['NIC_VENDOR'] == "Intel":
self.client.clear_intel_hw_rate_limits(numqueues=config['NIC_HW_QUEUES'])
sleep(4)
elif config['NIC_VENDOR'] == "Mellanox":
self.client.clear_mellanox_hw_rate_limits()
sleep(4)
#self.hlist.insmod_qfq()
# Start listener process on server
self.server.start_trafgen_server("udp", startport, self.opts("num_class"))
if self.opts("rl") == "htb":
self.client.add_htb_qdisc(str(args.rate) + "Mbit", args.htb_mtu)
rate_str = '%.3fMbit' % (self.opts("rate") * 1.0 / self.opts("num_class"))
if self.opts("num_class") is not None:
num_hash_bits = int(math.log(self.opts("num_class"), 2))
self.client.add_htb_hash(num_hash_bits=num_hash_bits)
self.client.add_n_htb_class(rate=rate_str, num_class=self.opts("num_class"))
# Just verify that we have created all classes correctly.
self.client.htb_class_filter_output(e(''))
elif self.opts("rl") == "tbf":
self.client.add_tbf_qdisc(str(args.rate) + "Mbit")
elif self.opts("rl") == "qfq":
self.client.add_qfq_qdisc(str(args.rate), args.htb_mtu, nclass=self.opts("num_class"), startport=startport)
elif self.opts("rl") == "eyeq":
self.client.insmod(rate=args.rate)
elif self.opts("rl") == "hwrl":
num_hw_rl = min(config['NIC_HW_QUEUES'], self.opts("num_senders"))
hw_rate = self.opts("rate") / num_hw_rl
if config['NIC_VENDOR'] == "Intel":
for q in xrange(0, num_hw_rl):
# First queue will account for remainder in rate limit
delta = 1 if (q < self.opts("rate") % num_hw_rl) else 0
self.client.add_intel_hw_rate_limit(rate=hw_rate + delta, queue=q)
sleep(0.5)
sleep(2)
elif config['NIC_VENDOR'] == "Mellanox":
rates = [hw_rate for x in range(0,num_hw_rl)]
if num_hw_rl < 8:
rates.extend([0 for x in range(num_hw_rl, 8)])
self.client.add_mellanox_hw_rate_limit(rates)
sleep(2)
self.client.start_cpu_monitor(e(''))
self.client.start_bw_monitor(e(''))
if self.opts("rl") == "qfq":
self.client.start_qfq_monitor(e(''))
self.client.start_mpstat(e(''))
self.client.set_mtu(self.opts("mtu"))
if sniffer:
self.sniffer.start_sniffer_delayed(e('', tmpdir=config['SNIFFER_TMPDIR']),
board=0, delay=config['SNIFFER_DELAY'],
duration=config['SNIFFER_DURATION'])
sleep(1)
num_senders = self.opts("num_senders")
num_class = self.opts("num_class")
# Vimal: Initially I kept this rate = 10000, so the kernel
# module will do all rate limiting. But it seems like the
# function __ip_route_output_key seems to consume a lot of CPU
# usage at high packet rates, so I thought I better keep the
# packet rate the same.
# Siva: We won't be rate limiting in application unless we are measuring
# user level rate limiting. So it doesn't really matter.
## rate = self.opts("rate") / num_senders
# If we want userspace rate limiting
if self.opts("user") == True:
rate = self.opts("rate") / num_senders
else:
rate = 0
self.client.start_n_trafgen("udp", num_class, num_senders,
socket.gethostbyname(server), startport,
rate, send_size=1472, mtu=self.opts("mtu"),
dir=e(''), pin=self.opts("pin"))
return
def stop(self):
self.client.qfq_stats(e(''))
print 'waiting...'
sleep(10)
self.hlist.stop_qfq_monitor()
self.hlist.killall("iperf netperf netserver ethstats udp")
self.hlist.stop_trafgen()
if self.opts("sniffer"):
self.sniffer.stop_sniffer()
self.sniffer.copy_local(e('', tmpdir=config['SNIFFER_TMPDIR']),
self.opts("exptid") + "-snf",
tmpdir=config['SNIFFER_TMPDIR'])
self.client.copy_local(e(''), self.opts("exptid"))
if config["NIC_VENDOR"] == "Intel":
self.client.clear_intel_hw_rate_limits(numqueues=config['NIC_HW_QUEUES'])
elif config['NIC_VENDOR'] == "Mellanox":
self.client.clear_mellanox_hw_rate_limits()
return
UDP(vars(args)).run()
os.system("killall -9 ssh")
|
import pandas as pd
from osmo_camera.correction import dark_frame
def from_rgb_images(rgbs_by_filepath):
""" Generates a flat field `RGB image` from a set of `RGB images` based:
https://docs.google.com/document/d/1i9VMA-XDHvCUdx-Bc7z1QxZqyYAOHiqckSKiXJOi0oU/edit
Args:
rgbs_by_filepath: A pandas Series of RGB images indexed by file path
Returns:
A flat field `RGB image` to be used for flat field correction
"""
# Subtract dark frame (using exposure of image) for all images
dark_frame_adjusted_rgb_images = pd.Series(
{
image_path: dark_frame.get_metadata_and_apply_dark_frame_correction(
rgb_image, image_path
)
for image_path, rgb_image in rgbs_by_filepath.items()
}
)
# Average (image) of all dark frame adjusted images
flat_field_mean_of_image_stack = dark_frame_adjusted_rgb_images.mean(axis=0)
# Average RGB value of averaged image
flat_field_mean = flat_field_mean_of_image_stack.mean(axis=(0, 1))
return flat_field_mean / flat_field_mean_of_image_stack
|
import matplotlib.pyplot as plt
import json
birthday_dict = {}
with open("35_2.json", "r") as json_file:
json_data = json.load(json_file)
# print(json_data[January])
# - - - plot - - - #
plot_x = []
plot_y = []
for element in json_data:
plot_x.append(element)
plot_y.append(json_data[element])
plt.plot(plot_x, plot_y)
plt.show()
|
from threading import Thread, Lock
from concurrent.futures import ThreadPoolExecutor
import typing
a = 0
def function1(arg : int, lock : typing.TypeVar('Lock')) -> int:
tmp = 0
for _ in range(arg):
tmp += 1
with lock:
global a
a += tmp
def function2(arg : int) -> int:
a = 0
for _ in range(arg):
a += 1
return a
def main() -> None:
# using previos solution
threads = []
lock = Lock()
for _ in range(5):
thread = Thread(target=function1, args=(1000000,lock))
thread.start()
threads.append(thread)
[t.join() for t in threads]
print("----------------------", a) # ???
# using thread pool
with ThreadPoolExecutor(max_workers=5) as executor:
future = executor.submit(function2, 5000000)
future.result()
print("----------------------", future.result())
main()
|
import os
import PyPDF2
## Finds PDF, opens pdf, and creates a PDF File Reader object
path = "sample.pdf"
f = open(path, 'rb')
pdf = PyPDF2.PdfFileReader(f)
## Get document information, calls getDocumentInfo, and prints author/title
information = pdf.getDocumentInfo()
print(information.author)
print(information.title)
## returns number of pages of PDF
print (pdf.getNumPages())
## closes the source PDF
f.close()
|
import warnings
import logging
from .base import LogMessage
class MQHandler(logging.Handler):
""" Handler that could be injected in Standard Logging system to push a log
in to a redis pubsub
>>> import redis
>>> import logging
>>> from mqlog import Channel, MQHandler
>>> log = logging.getLogger(__name__)
>>> log.addHandler(MQHandler(Channel('log', mq=redis.StrictRedis())))
>>> log.error('the galaxy in danger!',
>>> extra={'log_type': 'galaxy', 'status_code': '666'})
"""
def __init__(self, channel=None, mq_params=None):
"""
:type channel: mqlog.Channel
"""
if mq_params:
try:
channel = self.get_configured(mq_params, channel)
except Exception as err:
warnings.warn('Could not configure MQHandler: {}'.format(self, err))
logging.Handler.__init__(self)
self.queue = channel
def enqueue(self, record):
"""
:type record: logging.LogRecord
"""
log_record = LogMessage(
log_type=record.__dict__.get('log_type')
or record.__dict__.get('type')
or '{}:{}[{}]'.format(record.name,
record.funcName,str(record.lineno)),
object_name=record.__dict__.get('object_name'),
object_id=record.__dict__.get('object_id'),
level=record.levelno,
status_code=record.__dict__.get('status_code'),
datetime=record.__dict__.get('datetime') or record.created
).to_dict()
log_record.update({'msg': record.msg})
self.queue.send({'log': log_record})
def close(self):
pass
def get_configured(cls, mq_params, channel=None):
"""
:type mq_params: dict
:type channel: mqlog.Channel
"""
if not channel:
channel_name = mq_params.pop('channel', None)
assert channel_name, KeyError('You mush specify a {\'channel\': \'name\'}, '
'if you do not pass configured Channel object explicitly')
import redis
from mqlog.base import Channel as channel
mq = redis.StrictRedis(**mq_params)
return channel(channel_name, mq=mq)
return channel
def prepare(self, record):
"""
Prepares a record for queuing. The object returned by this method is
enqueued.
The base implementation formats the record to merge the message
and arguments, and removes unpickleable items from the record
in-place.
You might want to override this method if you want to convert
the record to a dict or JSON string, or send a modified copy
of the record while leaving the original intact.
"""
# The format operation gets traceback text into record.exc_text
# (if there's exception data), and also puts the message into
# record.message. We can then use this to replace the original
# msg + args, as these might be unpickleable. We also zap the
# exc_info attribute, as it's no longer needed and, if not None,
# will typically not be pickleable.
self.format(record)
record.msg = record.message
record.args = None
record.exc_info = None
return record
def emit(self, record):
"""
Emit a record.
Writes the LogRecord to the queue, preparing it for pickling first.
"""
try:
self.enqueue(self.prepare(record))
except Exception:
self.handleError(record)
|
""" Crappy module to get info on a steam game. """
from typing import Optional
from nio import MatrixRoom
from dors import command_hook, startup_hook, Jenny, HookMessage
import time
import requests
ts_steam = 0
ts_apps = {}
@startup_hook()
async def on_start(bot):
updateapps()
def updateapps():
global ts_steam, ts_apps
search1 = requests.get("https://api.steampowered.com/ISteamApps/GetAppList/v2/").json()
applist = {}
for l in search1['applist']['apps']:
applist[l['name'].lower()] = l['appid']
ts_steam = time.time()
ts_apps = applist
@command_hook(['steam', 'game'], help="Returns game info from Steam.")
async def steam(bot: Jenny, room: MatrixRoom, event: HookMessage):
global ts_steam, ts_apps
game = " ".join(event.args)
try:
appid = ts_apps[game.lower()]
except KeyError:
try:
appid = int(game.lower())
except ValueError:
return await bot.say("Couldn't find \002{0}\002".format(game))
await bot.say(getAppInfo(appid))
def getAppInfo(appid, error=True):
info = requests.get("https://store.steampowered.com/api/appdetails?appids={0}&cc=US&l=english".format(appid)).json()
try:
if not info[str(appid)]['success']:
return "Error getting info for \002{0}\002".format(appid) if error else 0
except KeyError:
return "Error getting info for \002{0}\002".format(appid) if error else 0
info = info[str(appid)]['data']
resp = "[{0}] ".format(info['type']) if error else ""
resp += "\002{0}\002".format(info['name'])
if info['is_free']:
resp += " (\00303free!\003)."
else:
resp += " (\002{0}\002 {1}".format(info['price_overview']['initial']/100, info['price_overview']['currency'])
if info['price_overview']['discount_percent'] != 0:
resp += " || \00303{0}% off\003!, \002{1}\002 {2}".format(info['price_overview']['discount_percent'],
info['price_overview']['final']/100, info['price_overview']['currency'])
resp += ")."
resp += " Platforms:"
if info['platforms']['windows']:
resp += " Windows,"
if info['platforms']['mac']:
resp += " Mac,"
if info['platforms']['mac']:
resp += " Linux,"
resp = resp[:-1] + "."
if info['genres']:
resp += " Genres:"
for genre in info['genres']:
resp += " {0},".format(genre['description'])
resp = resp [:-1] + "."
try:
resp += " Metacritic: \002{0}\002/100.".format(info['metacritic']['score'])
except:
pass
qq = requests.get("https://api.steampowered.com/ISteamUserStats/GetNumberOfCurrentPlayers/v1/?appid={0}".format(appid)).json()
if qq['response']['result'] == 1:
resp += " [\002{0}\002 people playing]".format(qq['response']['player_count'])
if error:
resp += " https://store.steampowered.com/app/{0}/".format(appid)
return resp
|
# -*- coding: utf-8 -*-
#座標距離計算
a=eval(input())
b=eval(input())
c=eval(input())
d=eval(input())
e=((a-c)**2+(b-d)**2)**0.5
print("(",a,",",b,")")
print("(",c,",",d,")")
print("Distance = ",e)
|
x = 2
x
print('This is a string')
|
#Complete truth table
#1TRUE
#2TRUE
#3TRUE
#4TRUE
#5TRUE
#6TRUE
#7FALSE
#8TRUE |
from tkinter import *
def wait_finish(channel):
while channel.get_busy():
pass
def correct():
num_good.set(num_good.get()+1)
def wrong():
num_bad.set(num_bad.get()+1)
app=Tk()
app.title("TVN Game Show")
app.geometry('300x100+200+100')
num_good=IntVar()
num_good.set(0)# 在整个循环中 set(0)让值变为0,函数调用增加至1,形成0101的序列
num_bad=IntVar()
num_bad.set(0)
lab=Label(app,text='when you are ready, click on the button',height=3)
lab.pack()
#gui中有很多窗口小部件,Label,Drop down list,Text box,Menu,Combo box,Dialog box
lab1=Label(app,textvariable=num_good)
lab1.pack(side='left')
lab2=Label(app,textvariable=num_bad)
lab2.pack(side='right')
b1=Button(app,text="correct",width=10,command=correct)
b1.pack(side='left',padx=10,pady=10)
b2=Button(app,text="wrong",width=10,command=wrong)
b2.pack(side='right',padx=10,pady=10)
app.mainloop()
|
from application.caches.cache import Cache
class CacheManager(Cache):
"""
Hold a hierarchy of caches, in order of querying
Each cache should conform to application.caches.cache.py
"""
def __init__(self):
self._caches = []
def add_cache(self, cache):
self._caches.append(cache)
def add(self, key, value, timeout=None):
"""
Add a value to all caches.
Args:
key: key of value to be stored
value: value to be stored
timeout: how long cache should keep the value
"""
for cache in self._caches:
cache.add(key, value)
return True
def get(self, key, numberOfLevels=None):
"""
How many levels of the cache to query
"""
level_counter = 0
for x in self._caches:
# Early exit condition
if numberOfLevels is not None and level_counter >= numberOfLevels:
return None
temp_value = x.get(key)
if temp_value is not None:
return temp_value
else:
level_counter += 1
# didnt find any value in any of the caches
return None
_cache_manager_instance = None
def getDefaultCacheManager():
global _cache_manager_instance
from application.caches.local_cache import LocalCache
# from application.caches.google_memcache import GoogleMemcache
if _cache_manager_instance is None:
_cache_manager_instance = CacheManager()
local_cache = LocalCache()
# google_memcache = GoogleMemcache()
_cache_manager_instance.add_cache(local_cache)
# _cache_manager_instance.add_cache(google_memcache)
return _cache_manager_instance
|
def Merge(left, right):
sorted = []
leftmark = 0
rightmark = 0
while len(sorted) != (len(left)+len(right)):
if leftmark == len(left):
sorted.append(right[rightmark])
rightmark += 1
elif rightmark == len(right) or left[leftmark] <= right[rightmark]:
sorted.append(left[leftmark])
leftmark +=1
else:
sorted.append(right[rightmark])
rightmark += 1
return sorted
def MergeSort(arr):
if len(arr) < 4:
arr.sort()
return arr
med = len(arr) // 2
left = MergeSort(arr[0:med])
right = MergeSort(arr[med:])
return Merge(left,right)
print(MergeSort([3,2,1,4,0,-5,4,77,100,99999,-45,6,7,8]))
|
# it's getting harder
# object oriented programming
"""
paradigms:
imperative - statements, loops, functions as subroutines
functional - pure functions, higher-order functions, recursions
oop - objects
"""
# syntax for classes:
class Classname:
classattribute = "classattribute" # assigns attribute to class that can be accessed from an instances of the class or the class itself
def __init__ (self, attribute1, attribute2): # __init__ method - creates an instance (instance method) of the class (using Classname as a function)
# takes one or more arguments and assigns them to the attribute(s), self must always be the first parameter
# attributes are accessed with self.attribute
self.attribute1 = attribute1 # sets initial value of attribute1
self.attribute2 = attribute2 # sets initial value of attribute2
def anothermethod (self): # other methods (besides the __init__ method) can be added (all must have self as first parameter)
print("anothermethod") # can be accessed the same way as attributes with self.anothermethod() > () because it's a function not an attribute
def usingclassattribute(self):
return self.classattribute # class accessing classattribute
var = Classname("argument1", "argument2") # uses the class (blueprint) to define an object and bind it to a variable
var2 = Classname("argument3", "argument4") # the same class (blueprint) can create different objects with different values
# print
print(var.attribute1)
print(var.attribute2)
print(var2.attribute1)
print(var2.attribute2)
print(var.classattribute) # instance of the class accessing classattribute
var.anothermethod() # instance of the class accessing method anothermethod
print(var.usingclassattribute()) # instance of the class accessing method usingclassattribute
# facts:
# objects are created using classes
# classes contain methods (which are functions)
# the class describes what the object will be, but is separate from the object itself (like a blueprint)
# __init__ method = initializer = constructor > creates instance(object) of the class
# instances = objects
# instances of a class have attributes, which are pieces of data associated with them
# in an __init__ method, self.attribute can be used to set the initial value of an instance's attributes
# within a method definition, self refers to the instance calling the method
print("")
class Simpleclass:
feuerblumen = 'awesome'
def __init__ (self, name, mood):
self.name = name
self.mood = mood
def itis(self):
print("itishowitis, ", end="")
print(self.name)
manu = Simpleclass("manu", "exhausted")
print(manu.name)
print("is")
print(manu.mood)
manu.itis()
print(manu.name + "is" + manu.feuerblumen)
print("")
johnny = Simpleclass("johnny", "chillaf")
print(johnny.name)
print("is")
print(johnny.mood)
johnny.itis()
print(johnny.name + "is" + johnny.feuerblumen)
print("") |
def firstDict(a, b):
return{a: b}
a = input("Введите ключ для первого словаря:")
b = input("Введите значение для первого словаря:")
firstDictReady = firstDict(a, b)
def secondDict(c, d):
return{c: d}
c = input("Введите ключ для второго словаря: ")
d = input("Введите значение для второго словаря: ")
secondDictReady = secondDict(c, d)
def merge_two_dicts(firstDictReady, secondDictReady):
result = firstDictReady.copy()
result.update(secondDictReady)
return result
print(merge_two_dicts(firstDictReady, secondDictReady))
|
# -*- coding: utf-8 -*-
from collections import deque
class Solution:
def floodFill(self, image, sr, sc, newColor):
nr, nc = len(image), len(image[0])
result = [[image[i][j] for j in range(nc)] for i in range(nr)]
queue = deque([(sr, sc)])
while queue:
r, c = queue.pop()
result[r][c] = newColor
if (
0 < r
and image[r][c] == image[r - 1][c]
and result[r - 1][c] != newColor
):
queue.append((r - 1, c))
if (
r < nr - 1
and image[r][c] == image[r + 1][c]
and result[r + 1][c] != newColor
):
queue.append((r + 1, c))
if (
0 < c
and image[r][c] == image[r][c - 1]
and result[r][c - 1] != newColor
):
queue.append((r, c - 1))
if (
c < nc - 1
and image[r][c] == image[r][c + 1]
and result[r][c + 1] != newColor
):
queue.append((r, c + 1))
return result
if __name__ == "__main__":
solution = Solution()
assert [[2, 2, 2], [2, 2, 0], [2, 0, 1],] == solution.floodFill(
[
[1, 1, 1],
[1, 1, 0],
[1, 0, 1],
],
1,
1,
2,
)
|
"""Runs Treadmill application register daemon.
"""
# TODO: it no longer registers anything, just refreshes tickets. Need to
# rename.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import os
import signal
import sys
import time
import traceback
import click
from treadmill import context
from treadmill import exc
from treadmill import subproc
from treadmill import supervisor
from treadmill import tickets
from treadmill import utils
from treadmill import zkutils
from treadmill.appcfg import abort as app_abort
from treadmill.appcfg import manifest as app_manifest
_LOGGER = logging.getLogger(__name__)
#: 3 hours
_TICKETS_REFRESH_INTERVAL = 60 * 60 * 3
def _start_service_sup(container_dir):
"""Safely start services supervisor."""
try:
supervisor.control_service(
os.path.join(container_dir, 'sys', 'start_container'),
supervisor.ServiceControlAction.once
)
except subproc.CalledProcessError:
raise exc.ContainerSetupError('start_container')
def _get_tickets(manifest, container_dir):
"""Get tickets."""
principals = set(manifest.get('tickets', []))
if not principals:
return False
tkts_spool_dir = os.path.join(
container_dir, 'root', 'var', 'spool', 'tickets')
try:
tickets.request_tickets(
context.GLOBAL.zk.conn,
manifest['name'],
tkts_spool_dir,
principals
)
except Exception:
_LOGGER.exception('Exception processing tickets.')
raise exc.ContainerSetupError('Get tickets error',
app_abort.AbortedReason.TICKETS)
# Check that all requested tickets are valid.
for princ in principals:
krbcc_file = os.path.join(tkts_spool_dir, princ)
if not tickets.krbcc_ok(krbcc_file):
_LOGGER.error('Missing or expired tickets: %s, %s',
princ, krbcc_file)
raise exc.ContainerSetupError(princ,
app_abort.AbortedReason.TICKETS)
else:
_LOGGER.info('Ticket ok: %s, %s', princ, krbcc_file)
return True
def _refresh_tickets(manifest, container_dir):
"""Refreshes the tickets with the given frequency."""
tkts_spool_dir = os.path.join(container_dir, 'root', 'var', 'spool',
'tickets')
# we do not abort here as we will make service fetch ticket again
# after register service is started again
principals = set(manifest.get('tickets', []))
tickets.request_tickets(context.GLOBAL.zk.conn,
manifest['name'],
tkts_spool_dir,
principals)
def sigterm_handler(_signo, _stack_frame):
"""Will raise SystemExit exception and allow for cleanup."""
_LOGGER.info('Got term signal.')
sys.exit(0)
def init():
"""App main."""
@click.group(name='presence')
def presence_grp():
"""Register container/app presence."""
context.GLOBAL.zk.conn.add_listener(zkutils.exit_on_lost)
@presence_grp.command(name='register')
@click.option('--refresh-interval', type=int,
default=_TICKETS_REFRESH_INTERVAL)
@click.argument('manifest', type=click.Path(exists=True))
@click.argument('container-dir', type=click.Path(exists=True))
def register_cmd(refresh_interval, manifest, container_dir):
"""Register container presence."""
try:
_LOGGER.info('Configuring sigterm handler.')
signal.signal(utils.term_signal(), sigterm_handler)
app = app_manifest.read(manifest)
# If tickets are not ok, app will be aborted.
#
# If tickets acquired successfully, services will start, and
# tickets will be refreshed after each interval.
refresh = False
try:
refresh = _get_tickets(app, container_dir)
_start_service_sup(container_dir)
except exc.ContainerSetupError as err:
app_abort.abort(
container_dir,
why=err.reason,
payload=traceback.format_exc()
)
while True:
# Need to sleep anyway even if not refreshing tickets.
time.sleep(refresh_interval)
if refresh:
_refresh_tickets(app, container_dir)
finally:
_LOGGER.info('Stopping zookeeper.')
context.GLOBAL.zk.conn.stop()
del register_cmd
return presence_grp
|
from .models import *
from django.contrib import admin
@admin.register(Film)
class FilmAdmin(admin.ModelAdmin):
list_display = ('film_name', 'url')
search_fields = ('film_name', 'url')
ordering = ('film_name', 'url')
@admin.register(Dictribution)
class DictributionAdmin(admin.ModelAdmin):
list_display = ('name', 'send_time', 'is_send')
search_fields = ('name',)
ordering = ('name', 'send_time')
fieldsets = [
('Техническая информация', {
'fields': ('name', 'send_time', 'is_send')
}),
('Тело рассылки', {
'fields': (
'heading_text', 'main_text', 'content_url',
'button_url', 'button_text')
})
]
@admin.register(Message)
class MessageAdmin(admin.ModelAdmin):
def show_text(self, obj):
return obj.text[:30]
list_display = ('unique_name', 'show_text')
search_fields = ('unique_name', 'text')
ordering = ('unique_name',)
|
#prepare wormbase file
from sys import argv
def define_DOID(infile):
f=open(infile)
idd=""
dic={}
for i in f:
if i.startswith("id:"):
l=i.split(" ")
idd=l[1].strip()
elif i.startswith("name: "):
l=i.split(" ")
dic[idd]=(l[1].strip())
idd=""
f.close()
return dic
def read_infile(infile, doid):
f=open(infile)
dic={}
for i in f:
if i.startswith("!")==False and i.startswith("Taxon")==False:
line=i.split("\t")
if line[1]=="gene":
if line[3] not in dic:
dic[line[3]]=[line[8].replace("_", " ")+" "+doid[line[10]]]
else:
if line[8].replace("_", " ")+" "+doid[line[10]] not in dic[line[3]]:
dic[line[3]].append(line[8].replace("_", " ")+" "+doid[line[10]])
f.close()
return dic
def writer(dic):
for key,value in dic.items():
if len(value)>0:
print(key+"\t"+";".join(value))
writer(read_infile(argv[1], define_DOID(argv[2])))
|
import tensorflow as tf
import numpy as np
import numpy.random as rnd
import numpy.linalg
import os.path
import scipy.misc
import tkinter as tk
import PIL.Image, PIL.ImageTk
values = np.load('celeba_variables.npz')
sess = tf.InteractiveSession()
def random_filelist(batch_size):
index = np.random.uniform(1, 202599.99, batch_size)
index = index.astype(int)
filelist = np.array(['%06i.png' % i for i in index])
return filelist
def nums_to_filelist(index):
filelist = np.array(['%06i.png' % i for i in index])
return filelist
# def weight_variable(shape):
# initial = tf.truncated_normal(shape, stddev=0.1)
# return tf.Variable(initial)
#
# def bias_variable(shape):
# initial = tf.constant(0.1, shape=shape)
# return tf.Variable(initial)
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def deconv2d(x, W, output_shape):
return tf.nn.conv2d_transpose(x, W, output_shape, strides=[1, 2, 2, 1], padding='VALID')
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
num_fc2 = 512
# Rueckweg
channels = 48 # 8*3RGB also: MUSS DURCH 3 TEILBAR SEIN!
mu = values['mu']
sigma = values['sigma']
# M = np.linalg.cholesky(sigma)
eigenvals, eigenvects = np.linalg.eigh(sigma)
# y_conv_v = np.reshape(np.array(rnd.multivariate_normal(mu, sigma), dtype=np.float32), [1, num_fc2])
y_conv = tf.placeholder(tf.float32, [None, num_fc2])
W_fc1_r = tf.Variable(values['W_fc1_r_v'])
b_fc1_r = tf.Variable(values['b_fc1_r_v'])
h_fc1_r_ = tf.matmul(y_conv, W_fc1_r)
h_fc1_r = tf.add(h_fc1_r_, b_fc1_r)
h_fc1_r_flat = tf.reshape(h_fc1_r, [-1, 16, 16, channels])
W_conv2_r = tf.Variable(values['W_conv2_r_v'])
b_conv2_r = tf.Variable(values['b_conv2_r_v'])
output_shape_conv2r = [1, 32, 32, channels]
h_conv2_r = tf.nn.relu(deconv2d(h_fc1_r_flat, W_conv2_r, output_shape_conv2r) + b_conv2_r) # deconvolution1
W_conv1_r = tf.Variable(values['W_conv1_r_v'])
b_conv1_r = tf.Variable(values['b_conv1_r_v'])
output_shape_conv1r = [1, 64, 64, channels]
h_conv1_r = deconv2d(h_conv2_r, W_conv1_r, output_shape_conv1r) + b_conv1_r # deconvolution 2
# output_img = tf.nn.softmax(tf.reshape(tf.reduce_mean(h_conv1_r, axis=3, keep_dims=True), [-1]), name='output_img')
# output_img = tf.reshape(tf.reduce_mean(h_conv1_r, axis=3, keep_dims=True), [-1], name='output_img')
output_img = tf.reshape(h_conv1_r, [1, 64, 64, 3, channels//3])
output_img = tf.reduce_mean(output_img, axis=4, name='output_img')
sess.run(tf.global_variables_initializer())
image_data = None
image_scaled = None
scale = 1
def compute_image(_=None):
global image_data, image_scaled
y_start = np.zeros(num_fc2)
for i in range(num_sliders):
y_start[num_fc2-i-1] = sliders[i].get()*10000
# y_start[i] = sliders[i].get()*10000
y_conv_v = np.array([np.matmul(eigenvects, y_start) + mu])
img_array = sess.run(output_img, feed_dict={y_conv: y_conv_v})
image_data = []
for i in range(64):
for j in range(64):
image_data.append(tuple(img_array[0, i, j, :]))
image_unscaled.putdata(image_data)
image_scaled = image_unscaled.resize((64*scale, 64*scale))
image_tk.paste(image_scaled)
def scale_image(newscale):
global scale, image_tk, image_scaled
scale = int(newscale)
canvas.config(width=64*scale, height=64*scale)
image_scaled = image_unscaled.resize((64*scale, 64*scale))
image_tk = PIL.ImageTk.PhotoImage(image_scaled)
canvas.delete('all')
canvas.create_image(64*scale*0.5, 64*scale*0.5, image=image_tk)
def reset_sliders(_=None):
for i in sliders:
i.set(0)
def save_image(_=None):
number = 0
while os.path.exists('face%06i.png'%number):
number += 1
image_unscaled.save('face%06i.png'%number)
window = tk.Tk()
left = tk.Frame(window)
right = tk.Frame(window)
left.pack(side='left', anchor='nw')
right.pack(side='right')
menuframe = tk.Frame(left)
menuframe.pack(anchor='nw')
button_reset = tk.Button(menuframe, text='reset sliders', command=reset_sliders)
button_reset.pack(anchor='nw')
button_save = tk.Button(menuframe, text='save image', command=save_image)
button_save.pack(anchor='nw')
slider_size = tk.Scale(menuframe, from_=1, to=10, orient='horizontal', command=scale_image)
slider_size.pack(anchor='nw')
canvas = tk.Canvas(left, width=64, height=64)
canvas.pack()
image_unscaled = PIL.Image.new('RGB', (64*scale, 64*scale))
image_tk = PIL.ImageTk.PhotoImage(image_unscaled)
canvas.create_image(64*scale*0.5, 64*scale*0.5, image=image_tk)
sliders = []
num_sliders = 15
for i in range(num_sliders):
sliders.append(tk.Scale(right, from_=-1, to=1, orient='horizontal', resolution=0.001, command=compute_image))
sliders[i].pack()
window.mainloop()
|
from selenium.webdriver.common.by import By
from selenium.webdriver.support.wait import WebDriverWait
class RemoveProductFromCart:
def __init__(self, driver):
self.driver = driver
self.wait = WebDriverWait(driver, 10)
def open(self):
self.driver.get("http://localhost/litecart/en/checkout")
return self
def remove_product(self):
self.driver.find_element(By.XPATH, "//button[@name='remove_cart_item' and @type='submit']").click()
return self
|
from flask_wtf import FlaskForm
from flask_wtf.file import FileField, FileAllowed
from wtforms import StringField, PasswordField, SubmitField, BooleanField, TextAreaField, SelectField, IntegerField, DecimalField, DateTimeField
from wtforms.validators import DataRequired, Length,Email, EqualTo, ValidationError
from wtforms.ext.sqlalchemy.fields import QuerySelectField
#from wtforms_components import CountryField
from ..models import Transit, Trucking
class AjouTrucking(FlaskForm):
provenance= StringField('Provenance', validators=[DataRequired("Completer la ville")])
destination=StringField('Provenance', validators=[DataRequired("Completer la ville")])
date_envoi=DateTimeField('Date', format='%Y-%m-%d %H:%M:%S', validators=[DataRequired("Completer la date")])
ville= StringField('Ville', validators=[DataRequired("Completer la ville")])
submit = SubmitField('Trucking number')
class Datelivraison(FlaskForm):
date_livraison=DateTimeField('Date', format='%Y-%m-%d %H:%M:%S', validators=[DataRequired("Completer la date")])
submit = SubmitField('Trucking number')
class TransitTrucking(FlaskForm):
en_transit=StringField('Provenance', validators=[DataRequired("Completer la ville")])
ville= StringField('Ville', validators=[DataRequired("Completer la ville")])
resume= TextAreaField(validators=[DataRequired("Commentaire ")])
date_envoi=DateTimeField('Date', format='%Y-%m-%d %H:%M:%S', validators=[DataRequired("Completer la date")])
submit = SubmitField('Trucking number')
class TransitModTrucking(FlaskForm):
truckingnumber= StringField('Provenance', validators=[DataRequired("Completer la ville")])
en_transit=StringField('Provenance', validators=[DataRequired("Completer la ville")])
ville= StringField('Ville', validators=[DataRequired("Completer la ville")])
resume= TextAreaField(validators=[DataRequired("Commentaire ")])
submit = SubmitField('Trucking number')
class MofTrucking(FlaskForm):
truckingnumber= StringField('Provenance', validators=[DataRequired("Completer la ville")])
provenance= StringField('Provenance', validators=[DataRequired("Completer la ville")])
destination=StringField('Destination', validators=[DataRequired("Completer la ville")])
ville= StringField('Ville', validators=[DataRequired("Completer la ville")])
submit = SubmitField('Trucking number')
|
print('REQ 1: This is program 5 - Alec Jones')
print('REQ 2: This program records Austin Area Whole Foods product data.')
number_of_stores = int(input('REQ 3: Enter the number of store: '))
store_counter = 0
while store_counter < number_of_stores:
store_name = input('REQ 4: Enter the store name: ')
store_phone_number = input('REQ 4: Enter the store phone number: ')
store_address = input('REQ 4: Enter the store address: ')
veg_counter = 0
veg_name_list = []
veg_plu_list = []
veg_order_quantity_list = []
products_to_enter = int(input('REQ 5: Do you have products to enter? (-1 to end) '))
while products_to_enter != -1:
veg_name = input('REQ 5: Enter vegetable name: ')
veg_name_list.append(veg_name)
veg_plu = input('REQ 5: Enter vegetable PLU: ')
veg_plu_list.append(veg_plu)
veg_order_quantity = input('REQ 5: Enter amount to order: ')
veg_order_quantity_list.append(veg_order_quantity)
veg_counter += 1
products_to_enter = int(input('REQ 5: Do you have products to enter? (-1 to end) '))
print('REQ 6: {}'.format(store_name))
print('REQ 6: {}'.format(store_phone_number))
print('REQ 6: {}'.format(store_address))
counter = 1
while counter <= veg_counter:
print('REQ 7: Vegetable Name {}: {}'.format(counter, veg_name_list[counter - 1]))
print('REQ 7: Vegetable PLU{}: {}'.format(counter, veg_plu_list[counter - 1]))
print('REQ 7: Amount to Order{}: {}'.format(counter, veg_order_quantity_list[counter - 1]))
counter += 1
print('REQ 8:')
|
def failed_login(username, pwd, err_msg, login_page):
login_page.input_username(account=username)
login_page.input_pwd(pwd=pwd)
login_page.close_keyboard()
login_page.click_login_button()
login_page.close_keyboard()
login_page.check_error_msg(err_msg=err_msg)
|
import time
# Create your tests here.
import time
import copy
class Result(dict):
def __getattr__(self, key):
return self[key]
def __deepcopy__(self, memo):
return Result(copy.deepcopy(dict(self)))
result = {
"eventid": "11",
"count": 0,
"username": "anonymous",
"water_level": False,
"start_time": 1530182460,
"ts": 1532596798,
"sessionid": 12,
"meta": [
{
"id": 2196,
"meta_source_id": 11,
"meta_source_type": "EventTab",
"meta_type": "event#text",
"meta_key": "endTime",
"meta_value": "{\"content\":1530354933}"
},
{
"id": 2198,
"meta_source_id": 11,
"meta_source_type": "EventTab",
"meta_type": "event#text",
"meta_key": "rankingNum",
"meta_value": "{\"content\":50}"
},
{
"id": 2197,
"meta_source_id": 11,
"meta_source_type": "EventTab",
"meta_type": "event#text",
"meta_key": "startTime",
"meta_value": "{\"content\":1530182160}"
}
],
"coins_quantity": "0.01",
"black_user": False,
"distributed_stock": 9500,
"id": 11,
"desc": "",
"slot": {
"id": 12,
"name": "sess",
"meta": [
{
"id": 2221,
"meta_source_id": 12,
"meta_source_type": "SlotTab",
"meta_type": "bgm#audio",
"meta_key": "gameBgm",
"meta_value": "{\"md5\":\"c3e2fe921d4becc7a1da05679c040990/Babyshark.aac\"}"
},
{
"id": 2222,
"meta_source_id": 12,
"meta_source_type": "SlotTab",
"meta_type": "bgm#audio",
"meta_key": "lpBgm",
"meta_value": "{\"md5\":\"f7986cc11293bbee5c1e22ca0830df7b/lp_bgm.mp3\"}"
},
{
"id": 2220,
"meta_source_id": 12,
"meta_source_type": "SlotTab",
"meta_type": "bgm#audio",
"meta_key": "shakeAudio",
"meta_value": "{\"md5\":\"d6ab8422c3e783a9f1a8691e7742c5dc/shake_voice.mp3\"}"
},
{
"id": 2217,
"meta_source_id": 12,
"meta_source_type": "SlotTab",
"meta_type": "bgm#audio",
"meta_key": "timeUpAudio",
"meta_value": "{\"md5\":\"c06120933e1e1ac94dad83a7a228f6e7/Time's up.mp3\"}"
},
{
"id": 2219,
"meta_source_id": 12,
"meta_source_type": "SlotTab",
"meta_type": "bgm#image",
"meta_key": "bgmOffBg",
"meta_value": "{\"md5\":\"3ee680c7e696005a9922c6dd3f1ffb6f\"}"
},
{
"id": 2218,
"meta_source_id": 12,
"meta_source_type": "SlotTab",
"meta_type": "bgm#image",
"meta_key": "bgmOnBg",
"meta_value": "{\"md5\":\"6c5ae75f3da7429e46f431ce0f79b7d5\"}"
},
{
"id": 2209,
"meta_source_id": 12,
"meta_source_type": "SlotTab",
"meta_type": "endPage#image",
"meta_key": "endCoinsBg",
"meta_value": "{\"md5\":\"f1cdec0118af02e2903bc8ab677d0174\"}"
},
{
"id": 2207,
"meta_source_id": 12,
"meta_source_type": "SlotTab",
"meta_type": "endPage#image",
"meta_key": "endPageLinkButton",
"meta_value": "{\"md5\":\"3c287f247de3d999579e22b8435203c9\"}"
},
{
"id": 2210,
"meta_source_id": 12,
"meta_source_type": "SlotTab",
"meta_type": "endPage#text",
"meta_key": "endedMessageTime",
"meta_value": "{\"content\":\"March 2nd, 19:30\"}"
},
{
"id": 2208,
"meta_source_id": 12,
"meta_source_type": "SlotTab",
"meta_type": "endPage#text",
"meta_key": "endedMessageTip",
"meta_value": "{\"content\":\"Oh no! This game has ended. The next game will start on\"}"
},
{
"id": 2205,
"meta_source_id": 12,
"meta_source_type": "SlotTab",
"meta_type": "endPage#text",
"meta_key": "endPageLink",
"meta_value": "{\"content\":\"https://www.google.com/search?q=shopee\"}"
},
{
"id": 2215,
"meta_source_id": 12,
"meta_source_type": "SlotTab",
"meta_type": "gamePage#image",
"meta_key": "backgroundImage",
"meta_value": "{\"md5\":\"3b0bf8819ef9702c8fa28f6632baff78\"}"
},
{
"id": 2216,
"meta_source_id": 12,
"meta_source_type": "SlotTab",
"meta_type": "gamePage#image",
"meta_key": "gameFG",
"meta_value": "{\"md5\":\"6bc1da6373d80f9080fde6d18990b054\"}"
},
{
"id": 2213,
"meta_source_id": 12,
"meta_source_type": "SlotTab",
"meta_type": "gamePage#image",
"meta_key": "resultPopBG",
"meta_value": "{\"md5\":\"b95338aa995782bbf138c9f5ddce6667\"}"
},
{
"id": 2212,
"meta_source_id": 12,
"meta_source_type": "SlotTab",
"meta_type": "gamePage#image",
"meta_key": "shareBtnBg",
"meta_value": "{\"md5\":\"b5d7bb2bd629e41da0f72375779d9df0\"}"
},
{
"id": 2214,
"meta_source_id": 12,
"meta_source_type": "SlotTab",
"meta_type": "gamePage#image",
"meta_key": "timeCountDownBG",
"meta_value": "{\"md5\":\"dc8a28f7591372f6a9ff7f9dc0aad189\"}"
},
{
"id": 2211,
"meta_source_id": 12,
"meta_source_type": "SlotTab",
"meta_type": "gamePage#image",
"meta_key": "userGuideBG",
"meta_value": "{\"md5\":\"6c788628fce5fffbbe1c195d18e34b15\"}"
},
{
"id": 2223,
"meta_source_id": 12,
"meta_source_type": "SlotTab",
"meta_type": "gamePage#image",
"meta_key": "userGuideFG",
"meta_value": "{\"md5\":\"0c2682deafac1e8a223765f9247e5ea1\"}"
},
{
"id": 2202,
"meta_source_id": 12,
"meta_source_type": "SlotTab",
"meta_type": "openPage#element#layout",
"meta_key": "statusTextPosition",
"meta_value": "{\"width\":{\"v\":100,\"u\":\"auto\"},\"height\":{\"v\":100,\"u\":\"auto\"},\"left\":{\"v\":\"25\",\"u\":\"%\"},\"top\":{\"v\":\"60\",\"u\":\"%\"},\"zIndex\":{\"v\":1}}"
},
{
"id": 2204,
"meta_source_id": 12,
"meta_source_type": "SlotTab",
"meta_type": "openPage#image",
"meta_key": "fontBg",
"meta_value": "{\"md5\":\"d6b226332e71f51a6fb5dd903104265a\"}"
},
{
"id": 2201,
"meta_source_id": 12,
"meta_source_type": "SlotTab",
"meta_type": "openPage#image",
"meta_key": "lpShareBg",
"meta_value": "{\"md5\":\"84ba14d1ed6576afadb1b4de80675fbc\"}"
},
{
"id": 2200,
"meta_source_id": 12,
"meta_source_type": "SlotTab",
"meta_type": "openPage#image",
"meta_key": "openBoxBg",
"meta_value": "{\"md5\":\"74a192d375970df22dadecee2dadb942\"}"
},
{
"id": 2206,
"meta_source_id": 12,
"meta_source_type": "SlotTab",
"meta_type": "openPage#image",
"meta_key": "openPageBg",
"meta_value": "{\"md5\":\"aafb379e49bc8979a30292f54609054d\"}"
},
{
"id": 2203,
"meta_source_id": 12,
"meta_source_type": "SlotTab",
"meta_type": "openPage#image",
"meta_key": "ruleBg",
"meta_value": "{\"md5\":\"158290aba1f6a421091bef3717e88cd6\"}"
},
{
"id": 2199,
"meta_source_id": 12,
"meta_source_type": "SlotTab",
"meta_type": "openPage#image",
"meta_key": "startButtonBG",
"meta_value": "{\"md5\":\"2f3c246f0c2d608658a827db4690449b\"}"
},
{
"id": 2235,
"meta_source_id": 12,
"meta_source_type": "SlotTab",
"meta_type": "other#color",
"meta_key": "complementaryA",
"meta_value": "{\"color\":\"rgba(99, 189, 117, 1)\"}"
},
{
"id": 2237,
"meta_source_id": 12,
"meta_source_type": "SlotTab",
"meta_type": "other#color",
"meta_key": "complementaryB",
"meta_value": "{\"color\":\"rgba(219, 160, 36, 1)\"}"
},
{
"id": 2236,
"meta_source_id": 12,
"meta_source_type": "SlotTab",
"meta_type": "other#color",
"meta_key": "complementaryC",
"meta_value": "{\"color\":\"rgba(234, 213, 66, 1)\"}"
},
{
"id": 2234,
"meta_source_id": 12,
"meta_source_type": "SlotTab",
"meta_type": "other#color",
"meta_key": "complementaryD",
"meta_value": "{\"color\":\"rgba(255, 255, 255, 1)\"}"
},
{
"id": 2233,
"meta_source_id": 12,
"meta_source_type": "SlotTab",
"meta_type": "other#color",
"meta_key": "themeColor",
"meta_value": "{\"color\":\"rgba(5, 78, 54, 1)\"}"
},
{
"id": 2275,
"meta_source_id": 12,
"meta_source_type": "SlotTab",
"meta_type": "prize#setting",
"meta_key": "shareLpBtn",
"meta_value": "{\"md5\":\"0b270d9bd4164db05bbfa7564518531f\"}"
},
{
"id": 2273,
"meta_source_id": 12,
"meta_source_type": "SlotTab",
"meta_type": "prize#setting",
"meta_key": "sharePopBg",
"meta_value": "{\"md5\":\"f7f443dc5aeace563183876bd0a18fc8\"}"
},
{
"id": 2276,
"meta_source_id": 12,
"meta_source_type": "SlotTab",
"meta_type": "prize#setting",
"meta_key": "shareResultBtn",
"meta_value": "{\"md5\":\"6bc30c0e24fd7f07fd3283d1b9a9fca7\"}"
},
{
"id": 2274,
"meta_source_id": 12,
"meta_source_type": "SlotTab",
"meta_type": "prize#setting",
"meta_key": "specialMsg",
"meta_value": "{\"show\":false,\"content\":\"\",\"threshold\":null}"
},
{
"id": 2231,
"meta_source_id": 12,
"meta_source_type": "SlotTab",
"meta_type": "ranking#image",
"meta_key": "rankBg",
"meta_value": "{\"md5\":\"30c9b7627a9a1a1e107c313cfeaa4d91\"}"
},
{
"id": 2232,
"meta_source_id": 12,
"meta_source_type": "SlotTab",
"meta_type": "ranking#image",
"meta_key": "rankBottomBg",
"meta_value": "{\"md5\":\"d26551ce6a4c9f268c204e19591681d8\"}"
},
{
"id": 2230,
"meta_source_id": 12,
"meta_source_type": "SlotTab",
"meta_type": "ranking#image",
"meta_key": "rankTopBg",
"meta_value": "{\"md5\":\"022db6ada10aa9c17bf6eaf04b04b3cf\"}"
},
{
"id": 2272,
"meta_source_id": 12,
"meta_source_type": "SlotTab",
"meta_type": "sessionBasic#text",
"meta_key": "tc",
"meta_value": "{\"content\":\"tc\"}"
},
{
"id": 2226,
"meta_source_id": 12,
"meta_source_type": "SlotTab",
"meta_type": "share#image",
"meta_key": "genernalShareBg",
"meta_value": "{\"md5\":\"e58d313540cc90ebf6a4cf9352cfce6c\"}"
},
{
"id": 2229,
"meta_source_id": 12,
"meta_source_type": "SlotTab",
"meta_type": "share#image",
"meta_key": "instagramShareBg",
"meta_value": "{\"md5\":\"a264e080ef9b969cc50515dab48985ca\"}"
},
{
"id": 2228,
"meta_source_id": 12,
"meta_source_type": "SlotTab",
"meta_type": "share#image",
"meta_key": "resultShareBg",
"meta_value": "{\"md5\":\"4fcec7b1886d62f672e6369825a4f4c9\"}"
},
{
"id": 2224,
"meta_source_id": 12,
"meta_source_type": "SlotTab",
"meta_type": "share#image",
"meta_key": "sharePopupBg",
"meta_value": "{\"md5\":\"e14a5b200317199634cdf7019395cd01\"}"
},
{
"id": 2225,
"meta_source_id": 12,
"meta_source_type": "SlotTab",
"meta_type": "share#text",
"meta_key": "genernalShareMessage",
"meta_value": "{\"content\":\"genernal Share Message!\"}"
},
{
"id": 2227,
"meta_source_id": 12,
"meta_source_type": "SlotTab",
"meta_type": "share#text",
"meta_key": "instagramShareMessage",
"meta_value": "{\"content\":\"I won {game result} coins from Shopee Raining coins!\"}"
},
{
"id": 2263,
"meta_source_id": 12,
"meta_source_type": "SlotTab",
"meta_type": "transifykey#fetext",
"meta_key": "t_404_msg",
"meta_value": "{\"content\":\"Page is unavailable at the moment. Please try again later!\",\"origin\":\"Page is unavailable at the moment. Please try again later!\"}"
},
{
"id": 2265,
"meta_source_id": 12,
"meta_source_type": "SlotTab",
"meta_type": "transifykey#fetext",
"meta_key": "t_404_title",
"meta_value": "{\"content\":\"Oops!\",\"origin\":\"Oops!\"}"
},
{
"id": 2270,
"meta_source_id": 12,
"meta_source_type": "SlotTab",
"meta_type": "transifykey#fetext",
"meta_key": "t_action_guide",
"meta_value": "{\"content\":\"Shake to continue >\",\"origin\":\"Shake to continue >\"}"
},
{
"id": 2262,
"meta_source_id": 12,
"meta_source_type": "SlotTab",
"meta_type": "transifykey#fetext",
"meta_key": "t_btn_negative_popup",
"meta_value": "{\"content\":\"OK\",\"origin\":\"OK\"}"
},
{
"id": 2264,
"meta_source_id": 12,
"meta_source_type": "SlotTab",
"meta_type": "transifykey#fetext",
"meta_key": "t_desc_title",
"meta_value": "{\"content\":\"Rules\",\"origin\":\"Rules\"}"
},
{
"id": 2267,
"meta_source_id": 12,
"meta_source_type": "SlotTab",
"meta_type": "transifykey#fetext",
"meta_key": "t_download_btn",
"meta_value": "{\"content\":\"Download now\",\"origin\":\"Download now\"}"
},
{
"id": 2266,
"meta_source_id": 12,
"meta_source_type": "SlotTab",
"meta_type": "transifykey#fetext",
"meta_key": "t_game_time_left",
"meta_value": "{\"content\":\"Time left\",\"origin\":\"Time left\"}"
},
{
"id": 2261,
"meta_source_id": 12,
"meta_source_type": "SlotTab",
"meta_type": "transifykey#fetext",
"meta_key": "t_header_rank",
"meta_value": "{\"content\":\"Rank\",\"origin\":\"Rank\"}"
},
{
"id": 2268,
"meta_source_id": 12,
"meta_source_type": "SlotTab",
"meta_type": "transifykey#fetext",
"meta_key": "t_header_username",
"meta_value": "{\"content\":\"Username\",\"origin\":\"Username\"}"
},
{
"id": 2269,
"meta_source_id": 12,
"meta_source_type": "SlotTab",
"meta_type": "transifykey#fetext",
"meta_key": "t_left_coins",
"meta_value": "{\"content\":\"Coins Left\",\"origin\":\"Coins Left\"}"
},
{
"id": 2271,
"meta_source_id": 12,
"meta_source_type": "SlotTab",
"meta_type": "transifykey#fetext",
"meta_key": "t_msg_network_error",
"meta_value": "{\"content\":\"We're experiencing network connectivity issues. Please try again later!\",\"origin\":\"We're experiencing network connectivity issues. Please try again later!\"}"
},
{
"id": 2260,
"meta_source_id": 12,
"meta_source_type": "SlotTab",
"meta_type": "transifykey#fetext",
"meta_key": "t_msg_not_enough_chance",
"meta_value": "{\"content\":\"You've played too many times. Rest your hand and come back at next session!\",\"origin\":\"You've played too many times. Rest your hand and come back at next session!\"}"
},
{
"id": 2259,
"meta_source_id": 12,
"meta_source_type": "SlotTab",
"meta_type": "transifykey#fetext",
"meta_key": "t_new_user_guide",
"meta_value": "{\"content\":\"Shake your phone in {gametime} seconds,\\\\n the more you shake, the more coins you may get!\",\"origin\":\"Shake your phone in {gametime} seconds,\\\\n the more you shake, the more coins you may get!\"}"
},
{
"id": 2248,
"meta_source_id": 12,
"meta_source_type": "SlotTab",
"meta_type": "transifykey#fetext",
"meta_key": "t_no_reward_description",
"meta_value": "{\"content\":\"Ah oh~ You did't win any coins.\\\\n Pleasa shake harder next time!\",\"origin\":\"Ah oh~ You did't win any coins.\\\\n Pleasa shake harder next time!\"}"
},
{
"id": 2252,
"meta_source_id": 12,
"meta_source_type": "SlotTab",
"meta_type": "transifykey#fetext",
"meta_key": "t_paly_now",
"meta_value": "{\"content\":\"Play now\",\"origin\":\"Play now\"}"
},
{
"id": 2254,
"meta_source_id": 12,
"meta_source_type": "SlotTab",
"meta_type": "transifykey#fetext",
"meta_key": "t_palying_people",
"meta_value": "{\"content\":\"{playing people} People Playing\",\"origin\":\"{playing people} People Playing\"}"
},
{
"id": 2253,
"meta_source_id": 12,
"meta_source_type": "SlotTab",
"meta_type": "transifykey#fetext",
"meta_key": "t_pc_reminder",
"meta_value": "{\"content\":\"This game can just play on MOBILE side, please download Shopee APP and play on Shopee APP.\",\"origin\":\"This game can just play on MOBILE side, please download Shopee APP and play on Shopee APP.\"}"
},
{
"id": 2257,
"meta_source_id": 12,
"meta_source_type": "SlotTab",
"meta_type": "transifykey#fetext",
"meta_key": "t_play_again",
"meta_value": "{\"content\":\"Play again\",\"origin\":\"Play again\"}"
},
{
"id": 2258,
"meta_source_id": 12,
"meta_source_type": "SlotTab",
"meta_type": "transifykey#fetext",
"meta_key": "t_rank_coins",
"meta_value": "{\"content\":\"Coins won\",\"origin\":\"Coins won\"}"
},
{
"id": 2247,
"meta_source_id": 12,
"meta_source_type": "SlotTab",
"meta_type": "transifykey#fetext",
"meta_key": "t_rank_entrance",
"meta_value": "{\"content\":\"Check Leaderboard Here >\",\"origin\":\"Check Leaderboard Here >\"}"
},
{
"id": 2250,
"meta_source_id": 12,
"meta_source_type": "SlotTab",
"meta_type": "transifykey#fetext",
"meta_key": "t_shake_guide",
"meta_value": "{\"content\":\"Shake\",\"origin\":\"Shake\"}"
},
{
"id": 2246,
"meta_source_id": 12,
"meta_source_type": "SlotTab",
"meta_type": "transifykey#fetext",
"meta_key": "t_share_msg",
"meta_value": "{\"content\":\"You got {sharebonus} coins!\",\"origin\":\"You got {sharebonus} coins!\"}"
},
{
"id": 2249,
"meta_source_id": 12,
"meta_source_type": "SlotTab",
"meta_type": "transifykey#fetext",
"meta_key": "t_share_title",
"meta_value": "{\"content\":\"Thanks for sharing\",\"origin\":\"Thanks for sharing\"}"
},
{
"id": 2241,
"meta_source_id": 12,
"meta_source_type": "SlotTab",
"meta_type": "transifykey#fetext",
"meta_key": "t_start_reminder",
"meta_value": "{\"content\":\"Start at {session start time}\",\"origin\":\"Start at {session start time}\"}"
},
{
"id": 2245,
"meta_source_id": 12,
"meta_source_type": "SlotTab",
"meta_type": "transifykey#fetext",
"meta_key": "t_tc_title",
"meta_value": "{\"content\":\"TERMS & CONDITIONS\",\"origin\":\"TERMS & CONDITIONS\"}"
},
{
"id": 2242,
"meta_source_id": 12,
"meta_source_type": "SlotTab",
"meta_type": "transifykey#fetext",
"meta_key": "t_timeup",
"meta_value": "{\"content\":\"Sorry!\",\"origin\":\"Sorry!\"}"
},
{
"id": 2243,
"meta_source_id": 12,
"meta_source_type": "SlotTab",
"meta_type": "transifykey#fetext",
"meta_key": "t_title_network_error",
"meta_value": "{\"content\":\"Sorry!\",\"origin\":\"Sorry!\"}"
},
{
"id": 2240,
"meta_source_id": 12,
"meta_source_type": "SlotTab",
"meta_type": "transifykey#fetext",
"meta_key": "t_title_not_enough_chance",
"meta_value": "{\"content\":\"Oh no!!\",\"origin\":\"Oh no!!\"}"
},
{
"id": 2244,
"meta_source_id": 12,
"meta_source_type": "SlotTab",
"meta_type": "transifykey#fetext",
"meta_key": "t_total_coins",
"meta_value": "{\"content\":\"Total Left\",\"origin\":\"Total Left\"}"
},
{
"id": 2255,
"meta_source_id": 12,
"meta_source_type": "SlotTab",
"meta_type": "transifykey#fetext",
"meta_key": "t_upgrade_text",
"meta_value": "{\"content\":\"Please upgrade the version of your browser and the version of your system to play the game\",\"origin\":\"Please upgrade the version of your browser and the version of your system to play the game\"}"
},
{
"id": 2256,
"meta_source_id": 12,
"meta_source_type": "SlotTab",
"meta_type": "transifykey#fetext",
"meta_key": "t_upgrade_title",
"meta_value": "{\"content\":\"Oops!\",\"origin\":\"Oops!\"}"
},
{
"id": 2251,
"meta_source_id": 12,
"meta_source_type": "SlotTab",
"meta_type": "transifykey#fetext",
"meta_key": "t_waiting_people",
"meta_value": "{\"content\":\"{waiting people} People Waiting\",\"origin\":\"{waiting people} People Waiting\"}"
},
{
"id": 2239,
"meta_source_id": 12,
"meta_source_type": "SlotTab",
"meta_type": "transifykey#fetext",
"meta_key": "t_winning_description_plural",
"meta_value": "{\"content\":\"Coins you won\",\"origin\":\"Coins you won\"}"
},
{
"id": 2238,
"meta_source_id": 12,
"meta_source_type": "SlotTab",
"meta_type": "transifykey#fetext",
"meta_key": "t_winning_description_singular",
"meta_value": "{\"content\":\"Coin you won\",\"origin\":\"Coin you won\"}"
}
],
"desc": "desc",
"event": 11,
"event_id": 11,
"chances": [
{
"id": 16,
"name": "1",
"meta": [
{
"id": 2277,
"meta_source_id": 16,
"meta_source_type": "ChanceTab",
"meta_type": "prize#image",
"meta_key": "prizeIcon",
"meta_value": "{\"md5\":\"584abc4b02ba21d98662dd931d9a31f1\"}"
}
],
"desc": "1",
"slot": 12,
"slot_id": 12,
"order_by": 0,
"probability": 1
}
],
"pre_start_time": 1530180660,
"enter_limit": 5,
"ar_noti_title": "aa",
"ar_noti_body": "aa",
"ar_noti_icon": "ecc756e9df534b97f56c5338decf2454",
"ar_noti_url": "https://www.google.com",
"shared_times": 2,
"shared_coins": 2,
"ar_shrd_title": "a",
"ar_shrd_body": "b",
"ar_shrd_icon": "ecc756e9df534b97f56c5338decf2454",
"ar_shrd_url": "https://www.google.com",
"coins_shrd_title": "c",
"coins_shrd_msg": "d",
"start_time": 1530182460,
"end_time": 1530182640,
"game_length_sec": 10,
"total_stock": 10000,
"water_level_stock": 500,
"chance_limit": 9999,
"coins_quantity": "0.01"
},
"name": "rain_event",
"remain_stock": 500,
"coins_left": 500,
"total_stock": 10000,
"userid": -1,
"flag": "last_ended_slot",
"logid": "13973267394254415325967975071710",
"actual_end_time": "1530183230",
"limit": 5,
"end_time": 1530182640
}
# cost_time = 0
# for i in range(10000):
# start_time = time.time()
# ret = {}
# for x in result:
# if x == 'slot':
# continue
# ret[x] = result[x]
#
# ret['slot']={}
#
# for x in result["slot"]:
# if x == 'meta' or x =='chances':
# continue
# ret['slot'][x] = result['slot'][x]
# cost_time += (time.time() - start_time)
#
# print("avarage time1:%s",cost_time)
#
#
# cost_time = 0
# for i in range(10000):
# start_time = time.time()
# res = copy.deepcopy(result)
# del res['slot']['meta']
# del res['slot']['chances']
# cost_time += (time.time() - start_time)
#
# print("avarage time2:%s",cost_time)
result = {'order': '1', 'score': 'ss'}
ret = copy.copy(result)
ret['order'] = 2
print(ret)
print(result)
|
# import torch
import time
from functools import wraps
def start():
print("starting...")
import tensorflow as tf
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras import datasets, layers, models
from tensorflow.keras.models import load_model
from sklearn.model_selection import train_test_split
import os
import shutil
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
import glob
from .dataloader import DataLoader
from .cnn_model import Model
from .preprocessing import move_files
from lime import lime_image
AUTOTUNE = tf.data.experimental.AUTOTUNE
img_width, img_height = 180, 180
BATCH_SIZE = 25
main_dir = "/Users/sebastianblum/GitHub/ML_UseCases/chest_xray/data/"
train_data_dir = f"{main_dir}train/"
validation_data_dir = f"{main_dir}val/"
test_data_dir = f"{main_dir}test/"
main_dir_2 = "/Users/sebastianblum/GitHub/ML_UseCases/chest_xray/data/new/"
train_data_dir_2 = f"{main_dir_2}train/"
validation_data_dir_2 = f"{main_dir_2}val/"
# this is done as val set is so low
filenames = tf.io.gfile.glob(f"{train_data_dir}*/*")
filenames.extend(tf.io.gfile.glob(f"{validation_data_dir}*/*"))
# re-split filenames to have a new train/test ratio
train_filenames, val_filenames = train_test_split(filenames, test_size=0.2)
def data_count(classtype, filenames):
if classtype == 0:
return sum(map(lambda x: "NORMAL" in x, filenames))
elif classtype == 1:
return sum(map(lambda x: "PNEUMONIA" in x, filenames))
print(f"training data | Normal : {data_count(0, train_filenames)}")
print(f"training data | PNEUMONIA : {data_count(1, train_filenames)}")
print(f"validation data | Normal : {data_count(0, val_filenames)}")
print(f"validation data | PNEUMONIA : {data_count(1, val_filenames)}")
# have to set weights for training as less normales then pneumonia
# print(train_filenames[0])
TRAIN_IMG_COUNT = len(train_filenames)
VAL_IMG_COUNT = len(val_filenames)
COUNT_NORMAL = data_count(0, train_filenames)
COUNT_PNEUMONIA = data_count(1, train_filenames)
def preprocessing():
move_files(train_filenames, train_data_dir_2)
move_files(val_filenames, validation_data_dir_2)
def create_input():
# Image Augmentation to have more data samples using zoom, flip and shear
# done for large datasets. this one could actually be done by loading everything into memory
datagen_train = ImageDataGenerator(
rescale=1.0 / 255, zoom_range=0.2, shear_range=0.2, horizontal_flip=True
)
datagen_val = ImageDataGenerator(rescale=1.0 / 255)
datagen_test = ImageDataGenerator(rescale=1.0 / 255)
train_generator = DataLoader.get_generator(
datagen_train, train_data_dir_2, img_width, img_height, BATCH_SIZE
)
validation_generator = DataLoader.get_generator(
datagen_val, validation_data_dir_2, img_width, img_height, BATCH_SIZE
)
test_generator = DataLoader.get_generator(
datagen=datagen_test,
directory=test_data_dir,
img_width=180,
img_height=180,
BATCH_SIZE=25,
)
image_batch, label_batch = next(iter(train_generator))
DataLoader.show_batch(image_batch, label_batch, BATCH_SIZE)
class_weight = DataLoader.class_weights(
COUNT_PNEUMONIA, COUNT_NORMAL, TRAIN_IMG_COUNT
)
print(f"Weight for class 0: {class_weight.get(0)}")
print(f"Weight for class 1: {class_weight.get(1)}")
input_shape = image_batch[1].shape
print(f"shape: {input_shape}")
output_shape = 1
cnn = Model.make_model(input_shape, output_shape)
print(cnn.summary())
def train_model():
EPOCHS = 20
METRICS = [
"accuracy",
tf.keras.metrics.Precision(name="precision"),
tf.keras.metrics.Recall(name="recall"),
]
cnn.compile(loss="binary_crossentropy", optimizer="rmsprop", metrics=METRICS)
history = cnn.fit(
train_generator,
steps_per_epoch=TRAIN_IMG_COUNT // BATCH_SIZE,
epochs=EPOCHS,
validation_data=validation_generator,
validation_steps=VAL_IMG_COUNT // BATCH_SIZE,
class_weight=class_weight,
)
Model.make_graph(history, ["precision", "recall", "accuracy", "loss"])
cnn.save("chestxray_cnn_model_3.h5")
def predict():
cnn = load_model("chestxray_cnn_model_3.h5")
loss, acc, prec, rec = cnn.evaluate(test_generator)
print("loss: {:.2f}".format(loss))
print("acc: {:.2f}".format(acc))
print("prec: {:.2f}".format(prec))
print("rec: {:.2f}".format(rec))
|
#Greatest Common Factor
#Design a function "gcf(x, y)" that returns the greatest common factor
# between x and y using the euclidean algorithm.
def gcf(x,y):
while(y):
x, y = y, x % y
return x
gcf = gcf(300, 400)
print('The GCF is', gcf)
def gcf(x,y):
while (x):
x , y = y, x % y
return x
gcf = gcf(300,400)
print(gcf)
|
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
class plotter:
def __init__(self):
plt.ion()
def scatterplot_onevariable(self,xpoints,ypoints, title):
self.xpoints = np.array(xpoints)
self.ypoints = np.array(ypoints)
plt.scatter(self.xpoints, self.ypoints)
plt.title(title)
plt.xlabel('x')
plt.ylabel('y')
plt.show()
raw_input()
def scatterplot_3groups(self,list1x, list1y,list2x, list2y,list3x, list3y, groups):
g1x= np.array(list1x)
g1y= np.array(list1y)
g2x= np.array(list2x)
g2y= np.array(list2y)
g3x= np.array(list3x)
g3y= np.array(list1y)
colors = ("red", "green", "blue")
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1, axisbg="1.0")
g1 = (g1x, g1y)
g2 = (g2x, g2y)
g3 = (g3x, g3y)
print g1
data = (g1,g2,g3)
for data, color, group in zip(data, colors, groups):
x, y = data
print x
print y
ax.scatter(x, y, alpha=0.8, c=color, edgecolors='none', s=30, label=group)
plt.show()
plt.legend(loc=2)
raw_input()
return
def pandaplot_3groups(self,listx,listy,listz,title,xlabel):
gx= np.array(listx)
gy= np.array(listy)
gz= np.array(listz)
colors = ("red", "green", "blue")
df = pd.DataFrame(dict(x=gx, y=gy, label=gz))
groups = df.groupby('label')
fig, ax = plt.subplots()
ax.margins(0.05)
for name, group in groups:
ax.plot(group.x, group.y, marker = 'o', linestyle='', alpha=0.8, ms=12, label=name)
ax.legend()
ax.set_ylabel("consommation [kWh]")
ax.set_xlabel(xlabel)
plt.title(title)
raw_input()
return
#
#import pandas as pd
#np.random.seed(1974)
#
## Generate Data
#num = 20
#x, y = np.random.random((2, num))
#labels = np.random.choice(['a', 'b', 'c'], num)
#df = pd.DataFrame(dict(x=x, y=y, label=labels))
#
#groups = df.groupby('label')
#
## Plot
#fig, ax = plt.subplots()
#ax.margins(0.05) # Optional, just adds 5% padding to the autoscaling
#for name, group in groups:
# ax.plot(group.x, group.y, marker='o', linestyle='', ms=12, label=name)
#ax.legend()
#
#plt.show()
#
## Create data
#N = 60
#g1 = (0.6 + 0.6 * np.random.rand(N), np.random.rand(N))
#g2 = (0.4+0.3 * np.random.rand(N), 0.5*np.random.rand(N))
#g3 = (0.3*np.random.rand(N),0.3*np.random.rand(N))
#
#data = (g1, g2, g3)
#colors = ("red", "green", "blue")
#groups = ("coffee", "tea", "water")
#
## Create plot
#fig = plt.figure()
#ax = fig.add_subplot(1, 1, 1, axisbg="1.0")
#
#for data, color, group in zip(data, colors, groups):
# x, y = data
# ax.scatter(x, y, alpha=0.8, c=color, edgecolors='none', s=30, label=group)
#
#plt.title('Matplot scatter plot')
#plt.legend(loc=2)
#plt.show()
#
|
#!/usr/bin/python
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtCore import QThread, QObject, pyqtSignal
from threading import Thread
import threading
from Read import *
import time
import sys
import design
import urllib2
import requests
import json
# class getPostsThread(QThread):
# add_post = pyqtSignal(str)
# def __init__(self):
# QThread.__init__(self)
# def __del__(self):
# self.wait()
# def run(self):
# post = "post"
# self.add_post.emit(post)
# self.sleep(2)
class Threading(design.Ui_MainWindow):
def __init__(self,MainWindow):
super(self.__class__, self).__init__()
self.setupUi(MainWindow)
def dataCard(self):
self.get_thread = getPostsThread()
self.get_thread.add_uid.connect(self.setuuid)
self.get_thread.add_memory.connect(self.setmemory)
self.get_thread.add_mssv.connect(self.setMssv);
self.get_thread.add_khoa.connect(self.setKhoa);
#self.get_thread.connect(self.get_thread, SIGNAL("finished()"), self.done)
self.get_thread.start()
@QtCore.pyqtSlot()
def setuuid(self,text):
self.txt_uuid.setText(text)
def setmemory(self,text):
self.txt_memory.setText(text)
def setMssv(self,text):
self.txt_mssv.setText(text)
def setKhoa(self,text):
self.txt_khoa.setText(text)
def done(self):
QtGui.QMessageBox.information(self, "Done!", "Done fetching posts!")
class Payload(object):
def __init__(self, j):
self.__dict__ = json.loads(j)
class getDataFromServer(threading.Thread):
def __init__(self,name,url):
threading.Thread.__init__(self)
self.url = url
self.name = name
self.threadID = "1"
def run(self):
print "Starting request "+self.url+" from "+self.name
url = 'https://api.backand.com/1/query/data/Pi'
headers = {'anonymoustoken': '551e6826-dadf-4851-9335-01bbf8203a7c'}
r = requests.get(url, headers=headers)
obj = json.loads(r.text)
a = r.json();
print "Response request from "+self.name
print type(a)
# print a[0]['mssv']
print a
def main():
# app = QtWidgets.QApplication(sys.argv)
# MainWindow = QtWidgets.QMainWindow()
# form = Threading(MainWindow)
# form.dataCard()
# MainWindow.show()
# sys.exit(app.exec_())
# j = '{"action": "print", "method": "onData", "data": "Madan Mohan"}'
# url = 'https://api.backand.com/1/query/data/Pi'
# headers = {'anonymoustoken': '551e6826-dadf-4851-9335-01bbf8203a7c'}
# r = requests.get(url, headers=headers)
# obj = json.loads(r.text)
# a = r.json();
# print type(a)
# print a[0]['mssv']
threads = []
thread1 = getDataFromServer("thread example 1","https://api.backand.com/1/query/data/Pi")
thread2 = getDataFromServer("thread example 2 ","https://api.backand.com/2/query/data/Pi")
thread3 = getDataFromServer("thread example 3","https://api.backand.com/3/query/data/Pi")
thread1.start()
thread2.start()
thread3.start()
threads.append(thread1)
threads.append(thread1)
threads.append(thread1)
print "waiting for thread"
for t in threads:
t.join()
print "Exiting Main Thread"
if __name__ == '__main__':
main()
|
InitialTime , EndTime = input().split()
InitialTime = int(InitialTime)
EndTime = int(EndTime)
if EndTime == InitialTime:
print("O JOGO DUROU 24 HORA(S)")
else:
aux = 0
while EndTime != InitialTime:
if EndTime == 0 and InitialTime != 0:
EndTime = 24
EndTime = EndTime - 1
aux = aux + 1
print("O JOGO DUROU {} HORA(S)".format(aux)) |
def sort_list(sort_by, lst):
return sorted(lst, key=lambda a: a[sort_by], reverse=True)
|
import os
import sys
filename = input("Enter an input file name: ")
exists = os.path.isfile("./%s" % filename)
if exists:
file = open ("./%s" % filename, "r")
else:
print ("File doesn't exist")
exit
freq = 0
for line in file:
if line is not None:
#print ("%d" % int(line))
freq = freq + int(line)
else:
print ("Reached EOF")
file.close()
print ("Sum of frequency changes: %d" % freq) |
from django.shortcuts import render, redirect
from .models import Tools, Rental, Employee
from .forms import ToolForm, EmployeeForm, RentalForm
# Create your views here.
def new_tool(request):
form_tool = ToolForm(request.POST or None, request.FILES or None)
if form_tool.is_valid():
form_tool.save()
return redirect(tools_list)
context = {
'form_tool': form_tool
}
return render(request, 'forms/form_tools.html', context)
def tools_list(request):
tools = Tools.objects.all().order_by('name')
context = {
'tools': tools
}
return render(request,'forms/tools.html',context)
def toolList():
tool_list = Tools.objects.filter(active='True').order_by('name')
return tool_list
def employeeList():
employee_list = Employee.objects.filter(active='True').order_by('name')
return employee_list
def new_employee(request):
form_employee = EmployeeForm(request.POST or None, request.FILES or None)
if form_employee.is_valid():
form_employee.save()
return redirect(employee_list)
context = {
'form_employee': form_employee
}
return render(request, 'forms/form_employee.html', context)
def employee_list(request):
employee = Employee.objects.all().order_by('name')
context = {
'employee': employee
}
return render(request,'forms/employee.html',context)
def new_rental(request):
form_rental = RentalForm(request.POST or None, request.FILES or None)
if form_rental.is_valid():
form_rental.save()
return redirect(rental_list)
context = {
'form_rental': form_rental
}
return render(request, 'forms/form_rental.html', context)
def rental_list(request):
rental = Rental.objects.all().order_by('tools')
context = {
'rental': rental
}
return render(request,'forms/rental.html',context)
|
import numpy
import json
import cv2
import numpy as np
import os
import scipy.misc as misc
###############################################################################################
def MergeOverLapping(InDir,SubDir):
for DirName in os.listdir(InDir):
DirName=InDir+"//"+DirName
SgDir=DirName+"/"+SubDir+"//"
if not os.path.isdir(SgDir):
print(SgDir)
continue
listfile=[]
for fl in os.listdir(SgDir):
if ".png" in fl:
listfile.append(fl)
l=len(listfile)
k=0
im=cv2.imread(DirName+"//Image.png")
for i in range(l):
path1=SgDir+"/"+listfile[i]
if not os.path.exists(path1):continue
sg1 = cv2.imread(path1,0)
im[:,:,0]*=1-sg1
im[:, :, 1] *= 1 - sg1
cv2.imshow(listfile[i],im)
cv2.waitKey()
cv2.destroyAllWindows()
#####################################################################3333
# SG = cv2.imread(path,0)
# Img = cv2.imread(ImFolder + ImName)
# Img[:, :, 2] *= 1 - SG
# Img[:, :, 1] *= 1 - SG
# Img2 = cv2.imread(ImFolder + ImName)
# Img=np.concatenate([Img,Img2],axis=1)
# Im=cv2.resize(Img,(1000,500))
# cv2.imshow(path,Im)
# cv2.waitKey()
# cv2.destroyAllWindows()
#########################################################################################################################
###########################################################################################################################
InDir=r"C:\Users\Sagi\Desktop\NewChemistryDataSet\NewFormat\\"
SubDir=r"Material"
MergeOverLapping(InDir, SubDir) |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ==================================================
# @Time : 2019-06-20 14:27
# @Author : ryuchen
# @File : router.py
# @Desc :
# ==================================================
from apps.apis.api import app
@app.route('/')
def hello_world():
return 'Hello, World!'
|
# Problem Statement :
# 6.5 Write code using find() and string slicing (see section 6.10) to extract
# the number at the end of the line below. Convert the extracted value to a
# floating point number and print it out.
# Code is Written By Krishna
# The code below almost works
text = "X-DSPAM-Confidence: 0.8475"
ind=text.find(" ")
pos=text.find('5')
print(float(text[ind:pos+1]))
|
# Generated by Django 3.1.4 on 2020-12-15 13:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('film_app', '0005_auto_20201215_1240'),
]
operations = [
migrations.AlterField(
model_name='film',
name='available_in_countries',
field=models.ManyToManyField(related_name='film_aviaible_country', to='film_app.Country'),
),
]
|
#%% [markdown]
# <center>
# <img src="https://habrastorage.org/web/677/8e1/337/6778e1337c3d4b159d7e99df94227cb2.jpg"/>
# ## Специализация "Машинное обучение и анализ данных"
# </center>
# <center>Автор материала: программист-исследователь Mail.ru Group, старший преподаватель Факультета Компьютерных Наук ВШЭ Юрий Кашницкий
#%% [markdown]
# # <center> Capstone проект №1. Идентификация пользователей по посещенным веб-страницам
#
# В этом проекте мы будем решать задачу идентификации пользователя по его поведению в сети Интернет. Это сложная и интересная задача на стыке анализа данных и поведенческой психологии. В качестве примера, компания Яндекс решает задачу идентификации взломщика почтового ящика по его поведению. В двух словах, взломщик будет себя вести не так, как владелец ящика: он может не удалять сообщения сразу по прочтении, как это делал хозяин, он будет по-другому ставить флажки сообщениям и даже по-своему двигать мышкой. Тогда такого злоумышленника можно идентифицировать и "выкинуть" из почтового ящика, предложив хозяину войти по SMS-коду. Этот пилотный проект описан в [статье](https://habrahabr.ru/company/yandex/blog/230583/) на Хабрахабре. Похожие вещи делаются, например, в Google Analytics и описываются в научных статьях, найти можно многое по фразам "Traversal Pattern Mining" и "Sequential Pattern Mining".
#
# <img src='http://i.istockimg.com/file_thumbview_approve/21546327/5/stock-illustration-21546327-identification-de-l-utilisateur.jpg'>
#
# Мы будем решать похожую задачу: по последовательности из нескольких веб-сайтов, посещенных подряд один и тем же человеком, мы будем идентифицировать этого человека. Идея такая: пользователи Интернета по-разному переходят по ссылкам, и это может помогать их идентифицировать (кто-то сначала в почту, потом про футбол почитать, затем новости, контакт, потом наконец – работать, кто-то – сразу работать).
#
# Будем использовать данные из [статьи](http://ceur-ws.org/Vol-1703/paper12.pdf) "A Tool for Classification of Sequential Data". И хотя мы не можем рекомендовать эту статью (описанные методы делеки от state-of-the-art, лучше обращаться к [книге](http://www.charuaggarwal.net/freqbook.pdf) "Frequent Pattern Mining" и последним статьям с ICDM), но данные там собраны аккуратно и представляют интерес.
#
# Имеются данные с прокси-серверов Университета Блеза Паскаля, они имеют очень простой вид. Для каждого пользователя заведен csv-файл с названием user\*\*\*\*.csv (где вместо звездочек – 4 цифры, соответствующие ID пользователя), а в нем посещения сайтов записаны в следующем формате: <br>
#
# <center>*timestamp, посещенный веб-сайт*</center>
#
# Скачать исходные данные можно по ссылке в статье, там же описание.
# Для этого задания хватит данных не по всем 3000 пользователям, а по 10 и 150. [Ссылка](https://yadi.sk/d/3gscKIdN3BCASG) на архив *capstone_user_identification* (~7 Mb, в развернутом виде ~ 60 Mb).
#
# В финальном проекте уже придется столкнуться с тем, что не все операции можно выполнить за разумное время (скажем, перебрать с кросс-валидацией 100 комбинаций параметров случайного леса на этих данных Вы вряд ли сможете), поэтому мы будем использовать параллельно 2 выборки: по 10 пользователям и по 150. Для 10 пользователей будем писать и отлаживать код, для 150 – будет рабочая версия.
#
# Данные устроены следующем образом:
#
# - В каталоге 10users лежат 10 csv-файлов с названием вида "user[USER_ID].csv", где [USER_ID] – ID пользователя;
# - Аналогично для каталога 150users – там 150 файлов;
# - В каталоге 3users – игрушечный пример из 3 файлов, это для отладки кода предобработки, который Вы далее напишете.
#
# На 5 неделе будет задание по [соревнованию](https://inclass.kaggle.com/c/identify-me-if-you-can4) Kaggle Inclass, которое организовано специально под Capstone проект нашей специализации. Соревнование уже открыто и, конечно, желающие могут начать уже сейчас.
#
# # <center>Неделя 1. Подготовка данных к анализу и построению моделей
#
# Первая часть проекта посвящена подготовке данных для дальнейшего описательного анализа и построения прогнозных моделей. Надо будет написать код для предобработки данных (исходно посещенные веб-сайты указаны для каждого пользователя в отдельном файле) и формирования единой обучающей выборки. Также в этой части мы познакомимся с разреженным форматом данных (матрицы `Scipy.sparse`), который хорошо подходит для данной задачи.
#
# **План 1 недели:**
# - Часть 1. Подготовка обучающей выборки
# - Часть 2. Работа с разреженным форматом данных
#%% [markdown]
# <font color='red'>**Задание:**</font> заполните код в этой тетрадке и выберите ответы в [веб-форме](https://docs.google.com/forms/d/e/1FAIpQLSedmwHb4cOI32zKJmEP7RvgEjNoz5GbeYRc83qFXVH82KFgGA/viewform).
#
#%% [markdown]
# **В этой части проекта Вам могут быть полезны видеозаписи следующих лекций 1 и 2 недели курса "Математика и Python для анализа данных":**
# - [Циклы, функции, генераторы, list comprehension](https://www.coursera.org/learn/mathematics-and-python/lecture/Kd7dL/tsikly-funktsii-ghienieratory-list-comprehension)
# - [Чтение данных из файлов](https://www.coursera.org/learn/mathematics-and-python/lecture/8Xvwp/chtieniie-dannykh-iz-failov)
# - [Запись файлов, изменение файлов](https://www.coursera.org/learn/mathematics-and-python/lecture/vde7k/zapis-failov-izmienieniie-failov)
# - [Pandas.DataFrame](https://www.coursera.org/learn/mathematics-and-python/lecture/rcjAW/pandas-data-frame)
# - [Pandas. Индексация и селекция](https://www.coursera.org/learn/mathematics-and-python/lecture/lsXAR/pandas-indieksatsiia-i-sieliektsiia)
#
# **Кроме того, в задании будут использоваться библиотеки Python [`glob`](https://docs.python.org/3/library/glob.html), [`pickle`](https://docs.python.org/2/library/pickle.html) и класс [`csr_matrix`](https://docs.scipy.org/doc/scipy-0.18.1/reference/generated/scipy.sparse.csr_matrix.html) из `Scipy.sparse`.**
#%% [markdown]
# Наконец, для лучшей воспроизводимости результатов приведем список версий основных используемых в проекте библиотек: NumPy, SciPy, Pandas, Matplotlib, Statsmodels и Scikit-learn. Для этого воспользуемся расширением [watermark](https://github.com/rasbt/watermark).
#%%
# pip install watermark
get_ipython().run_line_magic('load_ext', 'watermark')
#%%
get_ipython().run_line_magic('watermark', '-v -m -p numpy,scipy,pandas,matplotlib,statsmodels,sklearn -g')
#%%
from __future__ import division, print_function
# отключим всякие предупреждения Anaconda
import warnings
warnings.filterwarnings('ignore')
from glob import glob
import os
import pickle
#pip install tqdm
from tqdm import tqdm_notebook
import numpy as np
import pandas as pd
from scipy.sparse import csr_matrix
#%% [markdown]
# **Посмотрим на один из файлов с данными о посещенных пользователем (номер 31) веб-страницах.**
#%%
# Поменяйте на свой путь к данным
PATH_TO_DATA = '~/capstone_user_identification'
#%%
user31_data = pd.read_csv(os.path.join(PATH_TO_DATA,
'10users/user0031.csv'))
#%%
user31_data.head()
#%% [markdown]
# **Поставим задачу классификации: идентифицировать пользователя по сессии из 10 подряд посещенных сайтов. Объектом в этой задаче будет сессия из 10 сайтов, последовательно посещенных одним и тем же пользователем, признаками – индексы этих 10 сайтов (чуть позже здесь появится "мешок" сайтов, подход Bag of Words). Целевым классом будет id пользователя.**
#%% [markdown]
# ### <center>Пример для иллюстрации</center>
# **Пусть пользователя всего 2, длина сессии – 2 сайта.**
#
# <center>user0001.csv</center>
# <style type="text/css">
# .tg {border-collapse:collapse;border-spacing:0;}
# .tg td{font-family:Arial, sans-serif;font-size:14px;padding:10px 5px;border-style:solid;border-width:1px;overflow:hidden;word-break:normal;}
# .tg th{font-family:Arial, sans-serif;font-size:14px;font-weight:normal;padding:10px 5px;border-style:solid;border-width:1px;overflow:hidden;word-break:normal;}
# .tg .tg-yw4l{vertical-align:top}
# </style>
# <table class="tg">
# <tr>
# <th class="tg-031e">timestamp</th>
# <th class="tg-031e">site</th>
# </tr>
# <tr>
# <td class="tg-031e">00:00:01</td>
# <td class="tg-031e">vk.com</td>
# </tr>
# <tr>
# <td class="tg-yw4l">00:00:11</td>
# <td class="tg-yw4l">google.com</td>
# </tr>
# <tr>
# <td class="tg-031e">00:00:16</td>
# <td class="tg-031e">vk.com</td>
# </tr>
# <tr>
# <td class="tg-031e">00:00:20</td>
# <td class="tg-031e">yandex.ru</td>
# </tr>
# </table>
#
# <center>user0002.csv</center>
# <style type="text/css">
# .tg {border-collapse:collapse;border-spacing:0;}
# .tg td{font-family:Arial, sans-serif;font-size:14px;padding:10px 5px;border-style:solid;border-width:1px;overflow:hidden;word-break:normal;}
# .tg th{font-family:Arial, sans-serif;font-size:14px;font-weight:normal;padding:10px 5px;border-style:solid;border-width:1px;overflow:hidden;word-break:normal;}
# .tg .tg-yw4l{vertical-align:top}
# </style>
# <table class="tg">
# <tr>
# <th class="tg-031e">timestamp</th>
# <th class="tg-031e">site</th>
# </tr>
# <tr>
# <td class="tg-031e">00:00:02</td>
# <td class="tg-031e">yandex.ru</td>
# </tr>
# <tr>
# <td class="tg-yw4l">00:00:14</td>
# <td class="tg-yw4l">google.com</td>
# </tr>
# <tr>
# <td class="tg-031e">00:00:17</td>
# <td class="tg-031e">facebook.com</td>
# </tr>
# <tr>
# <td class="tg-031e">00:00:25</td>
# <td class="tg-031e">yandex.ru</td>
# </tr>
# </table>
#
# Идем по 1 файлу, нумеруем сайты подряд: vk.com – 1, google.com – 2 и т.д. Далее по второму файлу.
#
# Отображение сайтов в их индесы должно получиться таким:
#
# <style type="text/css">
# .tg {border-collapse:collapse;border-spacing:0;}
# .tg td{font-family:Arial, sans-serif;font-size:14px;padding:10px 5px;border-style:solid;border-width:1px;overflow:hidden;word-break:normal;}
# .tg th{font-family:Arial, sans-serif;font-size:14px;font-weight:normal;padding:10px 5px;border-style:solid;border-width:1px;overflow:hidden;word-break:normal;}
# .tg .tg-yw4l{vertical-align:top}
# </style>
# <table class="tg">
# <tr>
# <th class="tg-031e">site</th>
# <th class="tg-yw4l">site_id</th>
# </tr>
# <tr>
# <td class="tg-yw4l">vk.com</td>
# <td class="tg-yw4l">1</td>
# </tr>
# <tr>
# <td class="tg-yw4l">google.com</td>
# <td class="tg-yw4l">2</td>
# </tr>
# <tr>
# <td class="tg-yw4l">yandex.ru</td>
# <td class="tg-yw4l">3</td>
# </tr>
# <tr>
# <td class="tg-yw4l">facebook.com</td>
# <td class="tg-yw4l">4</td>
# </tr>
# </table>
#
# Тогда обучающая выборка будет такой (целевой признак – user_id):
# <style type="text/css">
# .tg {border-collapse:collapse;border-spacing:0;}
# .tg td{font-family:Arial, sans-serif;font-size:14px;padding:10px 5px;border-style:solid;border-width:1px;overflow:hidden;word-break:normal;}
# .tg th{font-family:Arial, sans-serif;font-size:14px;font-weight:normal;padding:10px 5px;border-style:solid;border-width:1px;overflow:hidden;word-break:normal;}
# .tg .tg-s6z2{text-align:center}
# .tg .tg-baqh{text-align:center;vertical-align:top}
# .tg .tg-hgcj{font-weight:bold;text-align:center}
# .tg .tg-amwm{font-weight:bold;text-align:center;vertical-align:top}
# </style>
# <table class="tg">
# <tr>
# <th class="tg-hgcj">session_id</th>
# <th class="tg-hgcj">site1</th>
# <th class="tg-hgcj">site2</th>
# <th class="tg-amwm">user_id</th>
# </tr>
# <tr>
# <td class="tg-s6z2">1</td>
# <td class="tg-s6z2">1</td>
# <td class="tg-s6z2">2</td>
# <td class="tg-baqh">1</td>
# </tr>
# <tr>
# <td class="tg-s6z2">2</td>
# <td class="tg-s6z2">1</td>
# <td class="tg-s6z2">3</td>
# <td class="tg-baqh">1</td>
# </tr>
# <tr>
# <td class="tg-s6z2">3</td>
# <td class="tg-s6z2">3</td>
# <td class="tg-s6z2">2</td>
# <td class="tg-baqh">2</td>
# </tr>
# <tr>
# <td class="tg-s6z2">4</td>
# <td class="tg-s6z2">4</td>
# <td class="tg-s6z2">3</td>
# <td class="tg-baqh">2</td>
# </tr>
# </table>
#
# Здесь 1 объект – это сессия из 2 посещенных сайтов 1-ым пользователем (target=1). Это сайты vk.com и google.com (номер 1 и 2). И так далее, всего 4 сессии. Пока сессии у нас не пересекаются по сайтам, то есть посещение каждого отдельного сайта относится только к одной сессии.
#%% [markdown]
# ## Часть 1. Подготовка обучающей выборки
# Реализуйте функцию *prepare_train_set*, которая принимает на вход путь к каталогу с csv-файлами *path_to_csv_files* и параметр *session_length* – длину сессии, а возвращает 2 объекта:
# - DataFrame, в котором строки соответствуют уникальным сессиям из *session_length* сайтов, *session_length* столбцов – индексам этих *session_length* сайтов и последний столбец – ID пользователя
# - частотный словарь сайтов вида {'site_string': [site_id, site_freq]}, например для недавнего игрушечного примера это будет {'vk.com': (1, 2), 'google.com': (2, 2), 'yandex.ru': (3, 3), 'facebook.com': (4, 1)}
#
# Детали:
# - Смотрите чуть ниже пример вывода, что должна возвращать функция
# - Используйте `glob` (или аналоги) для обхода файлов в каталоге. Для определенности, отсортируйте список файлов лексикографически. Удобно использовать `tqdm_notebook` (или просто `tqdm` в случае python-скрипта) для отслеживания числа выполненных итераций цикла
# - Создайте частотный словарь уникальных сайтов (вида {'site_string': (site_id, site_freq)}) и заполняйте его по ходу чтения файлов. Начните с 1
# - Рекомендуется меньшие индексы давать более часто попадающимся сайтам (приницип наименьшего описания)
# - Не делайте entity recognition, считайте *google.com*, *http://www.google.com* и *www.google.com* разными сайтами (подключить entity recognition можно уже в рамках индивидуальной работы над проектом)
# - Скорее всего в файле число записей не кратно числу *session_length*. Тогда последняя сессия будет короче. Остаток заполняйте нулями. То есть если в файле 24 записи и сессии длины 10, то 3 сессия будет состоять из 4 сайтов, и ей мы сопоставим вектор [*site1_id*, *site2_id*, *site3_id*, *site4_id*, 0, 0, 0, 0, 0, 0, *user_id*]
# - В итоге некоторые сессии могут повторяться – оставьте как есть, не удаляйте дубликаты. Если в двух сессиях все сайты одинаковы, но сессии принадлежат разным пользователям, то тоже оставляйте как есть, это естественная неопределенность в данных
# - Не оставляйте в частотном словаре сайт 0 (уже в конце, когда функция возвращает этот словарь)
# - 150 файлов из *capstone_websites_data/150users/* у меня обработались за 1.7 секунды, но многое, конечно, зависит от реализации функции и от используемого железа. И вообще, первая реализация скорее всего будет не самой эффективной, дальше можно заняться профилированием (особенно если планируете запускать этот код для 3000 пользователей). Также эффективная реализация этой функции поможет нам на следующей неделе.
#%%
def prepare_train_set(path_to_csv_files, session_length=10):
''' ВАШ КОД ЗДЕСЬ '''
#%% [markdown]
# **Примените полученную функцию к игрушечному примеру, убедитесь, что все работает как надо.**
#%%
get_ipython().system('cat $PATH_TO_DATA/3users/user0001.csv')
#%%
get_ipython().system('cat $PATH_TO_DATA/3users/user0002.csv')
#%%
get_ipython().system('cat $PATH_TO_DATA/3users/user0003.csv')
#%%
train_data_toy, site_freq_3users = prepare_train_set(os.path.join(PATH_TO_DATA, '3users'),
session_length=10)
#%%
train_data_toy
#%% [markdown]
# Частоты сайтов (второй элемент кортежа) точно должны быть такими, нумерация может быть любой (первые элементы кортежей могут отличаться).
#%%
site_freq_3users
#%% [markdown]
# Примените полученную функцию к данным по 10 пользователям.
#
# **<font color='red'> Вопрос 1. </font> Сколько уникальных сессий из 10 сайтов в выборке с 10 пользователями?**
#%%
train_data_10users, site_freq_10users = ''' ВАШ КОД ЗДЕСЬ '''
#%% [markdown]
# **<font color='red'> Вопрос 2. </font> Сколько всего уникальных сайтов в выборке из 10 пользователей? **
#%%
''' ВАШ КОД ЗДЕСЬ '''
#%% [markdown]
# Примените полученную функцию к данным по 150 пользователям.
#
# **<font color='red'> Вопрос 3. </font> Сколько уникальных сессий из 10 сайтов в выборке с 150 пользователями?**
#%%
get_ipython().run_cell_magic('time', '', "train_data_150users, site_freq_150users = ''' ВАШ КОД ЗДЕСЬ '''")
#%% [markdown]
# **<font color='red'> Вопрос 4. </font> Сколько всего уникальных сайтов в выборке из 150 пользователей? **
#%%
''' ВАШ КОД ЗДЕСЬ '''
#%% [markdown]
# **<font color='red'> Вопрос 5. </font> Какой из этих сайтов НЕ входит в топ-10 самых популярных сайтов среди посещенных 150 пользователями?**
# - www.google.fr
# - www.youtube.com
# - safebrowsing-cache.google.com
# - www.linkedin.com
#%%
''' ВАШ КОД ЗДЕСЬ '''
#%% [markdown]
# **Для дальнейшего анализа запишем полученные объекты DataFrame в csv-файлы.**
#%%
train_data_10users.to_csv(os.path.join(PATH_TO_DATA,
'train_data_10users.csv'),
index_label='session_id', float_format='%d')
train_data_150users.to_csv(os.path.join(PATH_TO_DATA,
'train_data_150users.csv'),
index_label='session_id', float_format='%d')
#%% [markdown]
# ## Часть 2. Работа с разреженным форматом данных
#%% [markdown]
# Если так подумать, то полученные признаки *site1*, ..., *site10* смысла не имеют как признаки в задаче классификации. А вот если воспользоваться идеей мешка слов из анализа текстов – это другое дело. Создадим новые матрицы, в которых строкам будут соответствовать сессии из 10 сайтов, а столбцам – индексы сайтов. На пересечении строки $i$ и столбца $j$ будет стоять число $n_{ij}$ – cколько раз сайт $j$ встретился в сессии номер $i$. Делать это будем с помощью разреженных матриц Scipy – [csr_matrix](https://docs.scipy.org/doc/scipy-0.18.1/reference/generated/scipy.sparse.csr_matrix.html). Прочитайте документацию, разберитесь, как использовать разреженные матрицы и создайте такие матрицы для наших данных. Сначала проверьте на игрушечном примере, затем примените для 10 и 150 пользователей.
#
# Обратите внимание, что в коротких сессиях, меньше 10 сайтов, у нас остались нули, так что первый признак (сколько раз попался 0) по смыслу отличен от остальных (сколько раз попался сайт с индексом $i$). Поэтому первый столбец разреженной матрицы надо будет удалить.
#%%
X_toy, y_toy = train_data_toy.iloc[:, :-1].values, train_data_toy.iloc[:, -1].values
#%%
X_toy
#%%
X_sparse_toy = csr_matrix ''' ВАШ КОД ЗДЕСЬ '''
#%% [markdown]
# **Размерность разреженной матрицы должна получиться равной 11, поскольку в игрушечном примере 3 пользователя посетили 11 уникальных сайтов.**
#%%
X_sparse_toy.todense()
#%%
X_10users, y_10users = train_data_10users.iloc[:, :-1].values, train_data_10users.iloc[:, -1].values
X_150users, y_150users = train_data_150users.iloc[:, :-1].values, train_data_150users.iloc[:, -1].values
#%%
X_sparse_10users = ''' ВАШ КОД ЗДЕСЬ '''
X_sparse_150users = ''' ВАШ КОД ЗДЕСЬ '''
#%% [markdown]
# **Сохраним эти разреженные матрицы с помощью [pickle](https://docs.python.org/2/library/pickle.html) (сериализация в Python), также сохраним вектора *y_10users, y_150users* – целевые значения (id пользователя) в выборках из 10 и 150 пользователей. То что названия этих матриц начинаются с X и y, намекает на то, что на этих данных мы будем проверять первые модели классификации.
# Наконец, сохраним также и частотные словари сайтов для 3, 10 и 150 пользователей.**
#%%
with open(os.path.join(PATH_TO_DATA, 'X_sparse_10users.pkl'), 'wb') as X10_pkl:
pickle.dump(X_sparse_10users, X10_pkl, protocol=2)
with open(os.path.join(PATH_TO_DATA, 'y_10users.pkl'), 'wb') as y10_pkl:
pickle.dump(y_10users, y10_pkl, protocol=2)
with open(os.path.join(PATH_TO_DATA, 'X_sparse_150users.pkl'), 'wb') as X150_pkl:
pickle.dump(X_sparse_150users, X150_pkl, protocol=2)
with open(os.path.join(PATH_TO_DATA, 'y_150users.pkl'), 'wb') as y150_pkl:
pickle.dump(y_150users, y150_pkl, protocol=2)
with open(os.path.join(PATH_TO_DATA, 'site_freq_3users.pkl'), 'wb') as site_freq_3users_pkl:
pickle.dump(site_freq_3users, site_freq_3users_pkl, protocol=2)
with open(os.path.join(PATH_TO_DATA, 'site_freq_10users.pkl'), 'wb') as site_freq_10users_pkl:
pickle.dump(site_freq_10users, site_freq_10users_pkl, protocol=2)
with open(os.path.join(PATH_TO_DATA, 'site_freq_150users.pkl'), 'wb') as site_freq_150users_pkl:
pickle.dump(site_freq_150users, site_freq_150users_pkl, protocol=2)
#%% [markdown]
# **Чисто для подстраховки проверим, что число столбцов в разреженных матрицах `X_sparse_10users` и `X_sparse_150users` равно ранее посчитанным числам уникальных сайтов для 10 и 150 пользователей соответственно.**
#%%
assert X_sparse_10users.shape[1] == len(site_freq_10users)
#%%
assert X_sparse_150users.shape[1] == len(site_freq_150users)
#%% [markdown]
# На следующей неделе мы еще немного поготовим данные и потестируем первые гипотезы, связанные с нашими наблюдениями.
|
__author__ = 'zhan'
class Node:
def __init__(self, name, dist):
self.name = name
self.distance = dist
# def __str__(self):
# return self.name + ":" + str(self.distance)
def __repr__(self):
return "%s(%s)" % (self.name, self.distance)
def __add__(self, other):
self.distance = self.distance + other
return self
def __gt__(self, other):
return self.distance > other
def __lt__(self, other):
return self.distance < other
if __name__ == '__main__':
v0 = Node('V0', 14)
v1 = Node('V1', 15)
v0, v1 = v1, v0
print v0
print v0<v1
|
import sv2.main as sv2
def main():
sv2.main() |
# Load equilibrium data from a file.
import numpy as np
from DREAM.Settings.LUKEMagneticField import LUKEMagneticField
def isAvailable():
"""
Magnetic fields from file are always available, regardless of the
computer system.
"""
return True
def getLUKE(file, *args, **kwargs):
"""
Returns magnetic equilibrium data from the named file.
:param file: Name of file to load data from.
"""
mf = LUKEMagneticField(file)
equil = {
'id': mf.id,
'Rp': mf.Rp,
'Zp': mf.Zp,
'psi_apRp': mf.psi_apRp,
'theta': mf.theta,
'ptx': mf.ptx,
'pty': mf.pty,
'ptBx': mf.ptBx,
'ptBy': mf.ptBy,
'ptBPHI': mf.ptBPHI
}
return equil
|
#!/usr/bin/python3
''' blueprint for state '''
from api.v1.views import app_views
from flask import jsonify, abort, request
from models import storage
from models import State
@app_views.route('/states', methods=["GET"], strict_slashes=False)
@app_views.route('/states/<state_id>', methods=["GET"], strict_slashes=False)
def state(state_id=None):
''' retrieves a list of all states'''
if state_id is None:
states = storage.all("State")
my_states = [value.to_dict() for key, value in states.items()]
return jsonify(my_states)
my_states = storage.get("State", state_id)
if my_states is not None:
return jsonify(my_states.to_dict())
abort(404)
@app_views.route('/states/<s_id>', methods=["DELETE"], strict_slashes=False)
def delete_states(s_id):
'''Deletes an specific state based on its id'''
my_state = storage.get("State", s_id)
if my_state is None:
abort(404)
storage.delete(my_state)
return (jsonify({}))
@app_views.route('/states', methods=["POST"], strict_slashes=False)
def post_states():
try:
content = request.get_json()
except:
return (jsonify({"error": "Not a JSON"}), 400)
name = content.get("name")
if name is None:
return (jsonify({"error": "Missing name"}), 400)
new_state = State()
new_state.name = name
new_state.save()
return (jsonify(new_state.to_dict()), 201)
@app_views.route('/states/<state_id>', methods=["PUT"], strict_slashes=False)
def update_states(state_id):
'''Updates a state'''
try:
content = request.get_json()
except:
return (jsonify({"error": "Not a JSON"}), 400)
my_state = storage.get("State", state_id)
if my_state is None:
abort(404)
for key, value in content.items():
if key != "id" or key != "created_at" or key != "updated_at":
setattr(my_state, key, value)
my_state.save()
return jsonify(my_state.to_dict())
|
import socket
import threading
# from server to client
S_OTHER_LOGIN = 0x00
S_OTHER_LOGOUT = 0x01
S_REQUEST_CONNECT = 0x03
S_CANCEL_REQUEST = 0x04
S_START_CONNECT = 0x05
S_REJECT_CONNECT = 0x06
S_INVALID_FORMAT = 0xf0
S_NO_AUTH = 0xf1
S_ERROR = 0xf2
# from client to server
R_LOGIN = 0x00
R_LOGOUT = 0x01
R_PING = 0x02
R_REQUEST_CONNECT = 0x03
R_CANCEL_REQUEST
UDP_IP = "127.0.0.1"
UDP_PORT = 9030
SERVER_IP = "127.0.0.1"
SERVER_PORT = 9020
SERVER_ADDRESS = (SERVER_IP, SERVER_PORT)
init = False
logged_in_as = ""
session_id = None
def listen_socket():
global session_id
print("starting socket loop")
while True:
received_data, addr = sock.recvfrom(65000)
if received_data[0] == S_LOGIN:
session_id = received_data[1:5]
print("signed in as " + received_data[5:].decode('ascii'))
else:
print("unknown message : " + received_data[1:].decode('ascii'))
while not init:
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind((UDP_IP, UDP_PORT))
init = True
except OSError:
init = False
UDP_PORT += 1
socket_thread = threading.Thread(target=listen_socket, args=())
socket_thread.start()
print("starting input loop")
while True:
command = input("")
command_type = command[0]
content = command[2:]
if command_type == "i" and session_id is None:
sock.sendto(bytes([R_LOGIN]) + content.encode('ascii'), SERVER_ADDRESS)
elif command_type == "o":
sock.sendto(bytes([R_LOGOUT]) + session_id, SERVER_ADDRESS)
session_id = None
print("probably logged out") |
from analysis import *
fig = plt.figure(figsize =(figWidth,figHeight))
ax = fig.add_axes([0,0,1,1])
for axis in ['top','bottom','left','right']:
ax.spines[axis].set_linewidth(linW1)
ax.spines[axis].set_color(colK1)
plt.style.use('seaborn-paper')
rc('font',**{'family':'sans-serif','sans-serif':['DejaVu Sans']})
formatter=ticker.ScalarFormatter(useMathText=True)
formatter.set_scientific(True)
formatter.set_powerlimits((-1,1))
title_pad=-2
for q in range(0,N_qMom):
ax.plot(dataR[:],dataMom[:,q], linewidth=linW2, marker= markersList[q%10],markersize=marS4,label='q={:3.2f}'.format(qMom[q]) )
ax.tick_params(axis='both',which='major',direction='in',colors=colK1,labelsize=fsTk3,length=tkL3,width=tkW2,pad=0.8)
# ax.set_xlabel(r' $\mathsf{ q }$',labelpad=0,color=colK1,fontsize=fsLb3)
# ax.set_ylabel(r' $\mathsf{ \tau_q$',labelpad=0,color=colK1,fontsize=fsLb3)
# ax.legend()
# h_legend = ax.legend(bbox_to_anchor=(0.01,0.98),ncol=2,loc=2,frameon=False,fontsize=fsLg2,markerscale=1.25,facecolor='none',columnspacing=0.4,borderpad=1.0)
plt.savefig(save_dir+'moments.pdf',dpi=dpi2,transparent=True,bbox_inches='tight')
|
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 9 13:11:30 2021
@author: anand
"""
# 1
# Import the libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# 2
# Import and load the data
dataset = pd.read_csv('1000_Companies.csv')
data = dataset.copy(deep = True)
# Extracting the Independant and Dependant Variables
X = data.iloc[:, :-1].values
y = data.iloc[:, 4].values
# print(data.corr()) # Dependancy of each value on another
# 3
# Data Visualisation
sns.heatmap(data.corr())
# 4
# Encoding categorical data
# As the State column contains City name as string, we have to convert it into
# numerics wit this step
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from sklearn.compose import ColumnTransformer
labelencoder = LabelEncoder()
X[:, 3] = labelencoder.fit_transform(X[:, 3])
# State column change
columnTransfer = ColumnTransformer([("State", OneHotEncoder(), [3])], remainder = 'passthrough')
X = columnTransfer.fit_transform(X)
# 5
# Aviod Dummy Variable Trap
X = X[:, 1:]
# 6
# Splitting the data into train and test data
from sklearn.model_selection import train_test_split
XTrain, XTest, yTrain, yTest = train_test_split(X, y, test_size = 0.2, random_state = 0)
# 7
# Fitting Multiple Linear Regression Model to Training set
from sklearn.linear_model import LinearRegression
linearReg = LinearRegression().fit(XTrain, yTrain)
# 8
# Predicting the Test set results
yPred = linearReg.predict(XTest)
# 9
# Calculating the Coeffecients and Intercept
print(linearReg.coef_)
print(linearReg.intercept_)
# 10
# Evaluating the model
from sklearn.metrics import r2_score
r2Score = r2_score(yTest, yPred)
print(r2Score)
|
### Summary
# This module takes an input as an image, converts the image to a string and determines
# the game state
# -------------------------------------------------
import requests
from modules.config import *
from PIL import Image
import pytesseract #pip3 install pytesseract
import os
import time
# If you are getting pytesseract error, change r"C:\Program Files\Tesseract-OCR\tesseract.exe" to r"C:\Users\USER\AppData\Local\Tesseract-OCR\tesseract.exe" and
# change USER to you computer name
#pytesseract.pytesseract.tesseract_cmd = r"C:\Users\USER\AppData\Local\Tesseract-OCR\tesseract.exe"
pytesseract.pytesseract.tesseract_cmd = r"C:\Program Files\Tesseract-OCR\tesseract.exe"
def processDiscussion(image: complex) -> int:
#1 = Not found, 2 = Discussion found, 3 = Voting ended found
discussion = keyword_whos_imposter
voting = keyword_voting_ended
raw_output = pytesseract.image_to_string(image)
out = set(raw_output.strip().strip('\n').strip("\\").strip("/").lower().split(" "))
if debug_mode:
print(out)
if len(out.intersection(discussion)) != 0: #if one of the keywords for discussion time is present
print("DISCUSSION [UNMUTED]")
requests.get(f"http://{address}:{port}/discussion")
return 2
elif len(out.intersection(voting)) != 0: #if one of the keywords for ended voting is present
print("VOTING ENDED [MUTING SOON]")
return 3
else:
return 1
def processEnding(image: complex) -> bool:
delay = 3.5 #Delay between getting role and game starting
defeat = keyword_defeat
victory = keyword_victory
imposter = keyword_imposter
crewmate = keyword_crewmate
raw_output = pytesseract.image_to_string(image)
out = set(raw_output.strip().strip('\n').strip("\\").strip("/").lower().split(" "))
if debug_mode:
print(out)
if len(out.intersection(defeat)) > 0: #if one of the keywords for defeat is present
print("DEFEAT [UNMUTED]")
requests.get(f"http://{address}:{port}/clear") #unmute everyone including the dead
return True
elif len(out.intersection(victory)) > 0: #if one of the keywords for victory is present
print("VICTORY [UNMUTED]")
requests.get(f"http://{address}:{port}/clear") #unmute everyone including the dead
return True
elif len(out.intersection(crewmate)) > 0: #if one of the keywords for crewmate is present
print("YOU GOT CREWMATE [MUTING SOON]")
time.sleep(delay + delay_start)
requests.get(f"http://{address}:{port}/task") #mute
return False
elif len(out.intersection(imposter)) > 0: #if one of the keywords for imposter is present
print("YOU GOT IMPOSTER [MUTING SOON]")
time.sleep(delay + delay_start)
requests.get(f"http://{address}:{port}/task") #mute
return False
else:
print(".")
return False
if __name__ == "__main__":
print("[*] Please run start.py: ")
exit() |
#temdegtiin urt
tem = input()
print(len(tem)) |
start = int(input())
stop = int(input())
string = ""
for index in range(start, stop+1):
char = chr(index)
string += f"{char} "
print(string) |
import time
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException, NoSuchElementException
from selenium.common.exceptions import ElementNotVisibleException
colleges = []
def init_driver():
driver = webdriver.Chrome(executable_path='/path/to/chromedriver')
driver.wait = WebDriverWait(driver, 5)
return driver
def lookup(driver):
delay = 60
driver.get('http://www.mcc.nic.in/MCCRes/Institute-Profile')
time.sleep(1)
links = driver.find_elements_by_xpath("""//*/td[2]/span/a""")
print len(links)
global colleges
counter = 0
total = len(links)
for link in links:
counter+=1
print "Procession link ", counter, " of ", total, "..."
print "Opening ", link.get_attribute('href')
try:
link.click()
except ElementNotVisibleException:
driver.find_element_by_xpath("""//*[@id="ctl00_ContentPlaceHolder1_Medical"]/div/span[2]""").click()
link.click()
time.sleep(2)
driver.switch_to_window(driver.window_handles[1])
driver.implicitly_wait(5)
try:
element_present = EC.presence_of_element_located((By.XPATH, """//*/table/tbody/tr[2]/td/table/tbody/tr/td/table/tbody/tr[6]/td[3]/table/tbody/tr[7]/td[3]/div"""))
WebDriverWait(driver, delay).until(element_present)
print "Page is ready!"
try:
college = [
driver.find_element_by_xpath("""//*/table/tbody/tr[2]/td/table/tbody/tr/td/table/tbody/tr[2]/td[3]/table/tbody/tr[2]/td[3]/div""").text,
driver.find_element_by_xpath("""//*/table/tbody/tr[2]/td/table/tbody/tr/td/table/tbody/tr[2]/td[3]/table/tbody/tr[2]/td[4]/div""").text,
driver.find_element_by_xpath("""//*/table/tbody/tr[2]/td/table/tbody/tr/td/table/tbody/tr[2]/td[3]/table/tbody/tr[3]/td[3]/div""").text,
driver.find_element_by_xpath("""//*/table/tbody/tr[2]/td/table/tbody/tr/td/table/tbody/tr[6]/td[3]/table/tbody/tr[2]/td[3]/div""").text,
driver.find_element_by_xpath("""//*/table/tbody/tr[2]/td/table/tbody/tr/td/table/tbody/tr[6]/td[3]/table/tbody/tr[3]/td[3]/div""").text,
driver.find_element_by_xpath("""//*/table/tbody/tr[2]/td/table/tbody/tr/td/table/tbody/tr[6]/td[3]/table/tbody/tr[5]/td[3]/div""").text,
driver.find_element_by_xpath("""//*/table/tbody/tr[2]/td/table/tbody/tr/td/table/tbody/tr[6]/td[3]/table/tbody/tr[6]/td[3]/div""").text,
driver.find_element_by_xpath("""//*/table/tbody/tr[2]/td/table/tbody/tr/td/table/tbody/tr[6]/td[3]/table/tbody/tr[7]/td[3]/div""").text,
driver.find_element_by_xpath("""//*/table/tbody/tr[2]/td/table/tbody/tr/td/table/tbody/tr[6]/td[3]/table/tbody/tr[8]/td[3]/div""").text,
driver.find_element_by_xpath("""//*/table/tbody/tr[2]/td/table/tbody/tr/td/table/tbody/tr[6]/td[3]/table/tbody/tr[9]/td[3]/div""").text,
driver.find_element_by_xpath("""//*/table/tbody/tr[2]/td/table/tbody/tr/td/table/tbody/tr[6]/td[3]/table/tbody/tr[10]/td[3]/div""").text,
driver.find_element_by_xpath("""//*/table/tbody/tr[2]/td/table/tbody/tr/td/table/tbody/tr[6]/td[3]/table/tbody/tr[11]/td[3]/div""").text,
driver.find_element_by_xpath("""//*/table/tbody/tr[2]/td/table/tbody/tr/td/table/tbody/tr[6]/td[3]/table/tbody/tr[12]/td[3]/div""").text,
driver.find_element_by_xpath("""//*/table/tbody/tr[2]/td/table/tbody/tr/td/table/tbody/tr[6]/td[3]/table/tbody/tr[13]/td[3]/div""").text,
driver.find_element_by_xpath("""//*/table/tbody/tr[2]/td/table/tbody/tr/td/table/tbody/tr[6]/td[3]/table/tbody/tr[14]/td[3]/div""").text,
driver.find_element_by_xpath("""//*/table/tbody/tr[2]/td/table/tbody/tr/td/table/tbody/tr[6]/td[3]/table/tbody/tr[15]/td[3]/div""").text
]
print "Saving ", college[1]
colleges.append(college)
write_to_file(college)
except NoSuchElementException:
print "Error at ", counter
pass
except Exception, e:
print "Error at ", counter
pass
except TimeoutException:
print "Loading took too much time!"
driver.close()
print "closed"
time.sleep(2)
driver.switch_to_window(driver.window_handles[0])
def write_to_file(data):
with open('colleges.csv', 'a') as f:
f.write((",".join([u'"{0}"'.format(x) for x in data]) + '\n').encode('utf8'))
if __name__ == "__main__":
driver = init_driver()
lookup(driver)
time.sleep(5)
driver.quit()
|
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, AveragePooling2D
from keras.regularizers import l2
from keras.layers.normalization import BatchNormalization
import keras
def LeNet():
model = Sequential()
# Layer C1
model.add(Conv2D(6,kernel_size=(5,5),strides=(1,1),padding='valid',
activation='relu',input_shape=(32,32,3)))
# Layer S2
model.add(AveragePooling2D(pool_size=2,strides=2,padding='valid'))
# Layer C3
model.add(Conv2D(16,kernel_size=(5,5),strides=(1,1),padding='valid',
activation='relu'))
# Layer S4
model.add(AveragePooling2D(pool_size=2,strides=2,padding='valid'))
# Fully Connected Layer
model.add(Flatten())
model.add(Dense(120,activation='relu'))
model.add(Dense(84,activation='relu'))
model.add(Dense(10,activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model.summary()
return model
classifier = LeNet()
|
#!/usr/bin/python
#
# Copyright 2014 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
#
# Config processing and expansion.
import copy
import json
import os
import sys
import yaml
# Local imports
import vm_images
def ProjectUrl(project):
return 'https://www.googleapis.com/compute/%(api_version)s/projects/%(project)s' % {
'api_version': 'v1',
'project': project,
}
def ProjectZoneUrl(project, zone):
return '%(project_url)s/zones/%(zone)s' % {
'project_url': ProjectUrl(project),
'zone': zone,
}
def ProjectGlobalUrl(project):
return '%(project_url)s/global' % {
'project_url': ProjectUrl(project),
}
class ConfigExpander(object):
def __init__(self, **kwargs):
self.__kwargs = {}
for key, value in kwargs.iteritems():
self.__kwargs[key] = value
def __ZoneUrl(self):
return ProjectZoneUrl(self.__kwargs['project'], self.__kwargs['zone'])
def __MachineTypeToUrl(self, instance_name):
return '%(zone_url)s/machineTypes/%(instance_name)s' % {
'zone_url': self.__ZoneUrl(),
'instance_name': instance_name,
}
def __NetworkToUrl(self, network):
# TODO(mbrukman): make this an auto-generated globally-unique name?
return '%(project_url)s/networks/%(network)s' % {
'project_url': ProjectGlobalUrl(self.__kwargs['project']),
'network': network,
}
def __ZoneToUrl(self, zone):
return ProjectZoneUrl(self.__kwargs['project'], zone)
def ExpandFile(self, file_name):
with open(file_name) as input_yaml:
config = yaml.safe_load(input_yaml)
# Expand the configuration.
# * convert numReplicas > 1 into multiple specs
# * convert machineType from a short name to a URL
# * convert network from a short name to a URL
# * convert sourceImage from name to URL
# * convert zone from a name to a URL
# * read files if specified in startupScript
expanded_config = []
for instance in config:
if 'machineType' in instance:
machineType = instance['machineType']
if not (machineType.startswith('http://') or
machineType.startswith('https://')):
instance['machineType'] = self.__MachineTypeToUrl(machineType)
if 'metadata' in instance:
metadata = instance['metadata']
if 'items' in metadata:
items = metadata['items']
for item in items:
if item['value'].startswith('%file'):
# Find the sourced file relative to the config file.
file_path = os.path.join(os.path.dirname(file_name), item['value'][6:])
if not os.path.exists(file_path):
sys.stderr.write('Error: startup script "%s" not found.\n' %
file_path)
sys.exit(1)
with open(file_path) as file_input:
file_contents = file_input.read()
# The limit for startup scripts sent via metadata is 35000 chars:
#
# "If your startup script is less than 35000 bytes, you could choose
# to pass in your startup script as pure metadata, [...]"
#
# https://developers.google.com/compute/docs/howtos/startupscript#example
if len(file_contents) >= 35000:
# TODO(mbrukman): write an automatic push-to-CloudStore to make
# this easy for the user.
sys.stderr.write('Startup script too large (%d); must be < 35000 chars; '
'please use "startup-script-url" instead.')
sys.exit(1)
item['value'] = file_contents
if 'networkInterfaces' in instance:
networkInterfaces = instance['networkInterfaces']
for networkIntf in networkInterfaces:
network_name = networkIntf['network']
if not (network_name.startswith('http://') or
network_name.startswith('https://')):
networkIntf['network'] = self.__NetworkToUrl(network_name)
if 'zone' in instance:
zone = instance['zone']
if not (zone.startswith('http://') or
zone.startswith('https://')):
instance['zone'] = self.__ZoneToUrl(zone)
else:
instance['zone'] = self.__ZoneUrl()
if 'disks' in instance:
for disk in instance['disks']:
if 'initializeParams' in disk:
initializeParams = disk['initializeParams']
# Translate sourceImage base name -> URL, if not already a URL.
if 'sourceImage' in initializeParams:
sourceImage = initializeParams['sourceImage']
if not (sourceImage.startswith('http://') or
sourceImage.startswith('https://')):
disk['initializeParams']['sourceImage'] = vm_images.ImageShortNameToUrl(sourceImage)
# convert numReplicas > 1 into multiple specs, updating the config
for instance in config:
numReplicas = 1
if 'numReplicas' in instance:
numReplicas = instance['numReplicas']
del instance['numReplicas']
for replicaId in range(0, numReplicas):
replica_copy = copy.deepcopy(instance)
# Allow the user to have some string substitutions in the name.
replica_copy['name'] = replica_copy['name'] % {
'env_user': os.getenv('USER'),
}
# Update the name to '<instance-name>-<replica-id>'.
replica_copy['name'] = '%s-%d' % (replica_copy['name'], replicaId)
# Update the PD name to '<instance-name>-<replica-id>-disk-<disk-id>'.
if 'disks' in replica_copy:
disks = replica_copy['disks']
for diskId, disk in enumerate(replica_copy['disks']):
if 'initializeParams' in disk:
initializeParams = disk['initializeParams']
if 'diskName' not in initializeParams:
initializeParams['diskName'] = '%s-disk-%d' % (replica_copy['name'], diskId)
if 'source' in disk:
source = disk['source']
if not (source.startswith('http://') or
source.startswith('https://')):
# Find the right zone for the disk, with fallbacks in the following order:
# * specified for the disk explicitly
# * specified for the instance explicitly
# * specified as a flag to this process
zone = None
if 'zone' in disk:
zone = disk['zone']
elif 'zone' in replica_copy:
zone = replica_copy['zone']
else:
zone = self.__kwargs['zone']
# Convert zone name to URL, if necessary.
if (zone.startswith('http://') or
zone.startswith('https://')):
zone_url = zone
else:
zone_url = self.__ZoneToUrl(zone)
source_url = '%(zone_url)s/disks/%(disk)s' % {
'zone_url': zone_url,
'disk': source,
}
source_url = source_url % {
'instance_name': replica_copy['name'],
}
disk['source'] = source_url
if 'deviceName' not in disk:
disk['deviceName'] = 'disk-%d' % diskId
expanded_config.append(replica_copy)
return expanded_config
def main(argv):
if len(argv) < 2:
sys.stderr.write('Missing YAML file as argument\n')
sys.exit(1)
expander = ConfigExpander(project='dummy-project', zone='dummy-zone')
config = expander.ExpandFile(argv[1])
print json.dumps(config, indent=2, separators=(',', ': '))
if __name__ == '__main__':
main(sys.argv)
|
# hard
# 字节技术营笔试压轴题
# 滑动窗口 + 队列 + 前缀和
# 指路 209
class Solution:
def shortestSubarray(self, nums: List[int], k: int) -> int:
# 前缀和
prefix = [0]
for v in nums:
prefix.append(prefix[-1]+v)
# 双端队列维护前缀和的递增序列(索引,因为要知道步长)
que = deque()
res = math.inf
for i in range(len(prefix)):
while que and prefix[i] - prefix[que[0]] >= k:
res = min(res, i - que[0])
# 为什么要popleft? 对于不同的y2>y1,不会出现相同的x,使得y2-x为满足要求的最短选择
que.popleft()
while que and prefix[i] < prefix[que[-1]]:
# 为什么要pop尾元素? 递增序列,只可能往后找
que.pop()
que.append(i)
return res if res!=math.inf else -1
|
#정수(int)
a = 10
#type(데이터) : 데이터의 자료형을 출력
print(type(a))
#실수(float)
a = 3.14
print(type(a))
#문자열(str)
a = "songjinwoo"
print(type(a))
#참/거짓(bool)
a = True
print(type(a))
|
import numpy as np
import pandas as pd
import math
# euclidean distance
def d_euclid(row1, row2):
diff = np.array(row1) - np.array(row2)
return np.linalg.norm(diff)
class k_means():
def __init__(self, k, dataset, iter):
self.k = k
self.dataset = dataset.copy()
self.n_data, self._variables = dataset.shape
self.iter = iter
self.clusters = [[] for i in range(self.k)]
self.centroids = []
def clustering(self):
# pick centroids
cent_ind = np.random.choice(self.n_data, self.k, replace = False)
self.centroids.append(self.dataset[i] for i in cent_ind)
# clustering algorithm
for i in range(iter):
# clustering datapoints
self.clusters = self.init_cluster(self.centroids)
# optimize centroid
old_centroids = self.centroids
self.centroids = self.opt_centroid(self.clusters)
# check error function
if self.check_error(old_centroids, self.centroids):
break
# final clusters
return self.final_clusters(self.clusters)
# clustering datapoints
def init_cluster(self, centroids):
clusters = [[] for i in range(self.k)]
for i, sample in enumerate(self.dataset):
cent_ind = self.nearest_centroid(centroids, sample)
clusters[cent_ind].append(i)
return clusters
# nearest centroid
def nearest_centroid(self, centroids, sample):
distances = [d_euclid(sample, center) for center in centroids]
return np.argmin(distances)
# optimizing centroids
def opt_centroid(self, clusters):
new_centroids = np.zeros((self.k, self._variables))
for i, cluster in enumerate(self.clusters):
new_centroids[i] = np.mean(self.dataset[cluster])
return new_centroids
# error function
def check_error(self, old_centroids, centroids):
error = [d_euclid(old_centroids[i], centroids[i]) for i in self.k]
if sum(error) == 0:
return True
# final clusters
def final_clusters(self, clusters):
final = np.empty(self.n_data)
for n_cluster, cluster in enumerate(clusters):
for ind in cluster:
final[ind] = n_cluster
return final
# file
# create dataset from csv file
# def create_dataset(filename):
# dataset = pd.read_csv(filename, header = 0, index_col = None)
#
|
#!/proj/sot/ska3/flight/bin/python
#############################################################################################
# #
# analyze_sim_data.py: read data from TL files and analyze sim movements #
# #
# author: t. isobe (tisobe@cfa.harvard.edu) #
# #
# last update: Jul 19, 2023 #
# #
#############################################################################################
import os
import sys
import re
import math
import string
import random
import time
import Chandra.Time
#
#--- reading directory list
#
#path = '/data/mta/Script/SIM/Scripts/house_keeping/dir_list'
path = '/data/mta4/testSIM/Scripts/house_keeping/dir_list'
with open(path, 'r') as f:
data = [line.strip() for line in f.readlines()]
for ent in data:
atemp = re.split(':', ent)
var = atemp[1].strip()
line = atemp[0].strip()
exec("%s = %s" %(var, line))
sys.path.append("/data/mta4/Script/Python3.10/MTA")
#--- import several functions
#
import mta_common_functions as mcf
#
#--- temp writing file name
#
rtail = int(time.time() * random.random())
zspace = '/tmp/zspace' + str(rtail)
tl_dir = exc_dir + '/TL/'
simt = [23336, 92905, 75620, -50505, -99612]
tscloc = ["SAFE", "ACIS-I", "ACIS-S", "HRC-I", "HRC-S"]
simf = [-595, -505, -536, -468, -716, -991, -1048, -545, -455, -486, -418, -666, -941, -998]
faloc = ["INIT1", "INIT2", "ACIS-I", "ACIS-S", "HRC-I", "HRC-S", "HRC-S", "INIT1+", "INIT2+",\
"ACIS-I+", "ACIS-S+", "HRC-I+", "HRC-S+", "HRC-S+"]
#
#--- test limits
#
tsc_test = 9
fa_test = 9
#---------------------------------------------------------------------------------------
#-- run_tl_analysis: run sim analysis function ---
#---------------------------------------------------------------------------------------
def run_tl_analysis():
"""
run sim analysis function
input:none
output: tsc_temps.txt etc (see analyze_sim_data)
"""
#
#--- check whether there are tl data
#
if os.listdir(tl_dir) != []:
analyze_sim_data()
#
#--- clean up TL directory
#
cmd = 'rm -rf ' + tl_dir + '*'
os.system(cmd)
#
#--- order and removed duplicated entries from tsc_temps.txt
#
clean_tsc_data()
#---------------------------------------------------------------------------------------
#-- analyze_sim_data: read data from TL files and analyze sim movements --
#---------------------------------------------------------------------------------------
def analyze_sim_data():
"""
read data from TL files and analyze sim movements
input: none, but read from saved TL files
output: <data_dir>/sim_ttabs.out
<data_dir>/sim_summary.out
<data_dir>/tsc_pos.out
<data_dir>/fa_pos.out
<data_dir>/errors.lis
<data_dir>/plotfile.out
<data_dir>/tsc_histogram.out
<data_dir>/limits.txt
<data_dir>/tsc_temps.txt
<data_dir>/tsc_temps2.txt
"""
#
#--- read data from tl files
#
cmd = 'ls ' + tl_dir + '*SIM*tl* > ' + zspace
os.system(cmd)
file_list = mcf.read_data_file(zspace, remove=1)
tldat, deltsc, delfa, ntsc, nfa = read_tl_file(file_list)
#
#--- open the list of lists; just to make it easy to read
#
tsec = tldat[0]
tdays = tldat[1]
dd = tldat[2]
tsc = tldat[3]
tscmove = tldat[4]
fa = tldat[5]
famove = tldat[6]
maxpwm = tldat[7]
tabaxis = tldat[8]
tabno = tldat[9]
tabpos = tldat[10]
motoc = tldat[11]
stall = tldat[12]
tflexa = tldat[13]
tflexb = tldat[14]
tflexc = tldat[15]
ttscmot = tldat[16]
tfamot = tldat[17]
tseaps = tldat[18]
trail = tldat[19]
tseabox = tldat[20]
trpm = tldat[21]
frpm = tldat[22]
tstate = tldat[23]
fstate = tldat[24]
tloc = tldat[25]
tsc_err = tldat[26]
floc = tldat[27]
fa_err = tldat[28]
bus_volts = tldat[29]
#
#--- initialization
#
ttab_line = ''
sum_line = ''
tsc_line = ''
fa_line = ''
err_line = ''
plt_line = ''
th_line = ''
lim_line = ''
ttmp_line = ''
ttmp2_line = ''
lasttsc = tsc[0]
lastfa = fa[0]
lasttabaxis = tabaxis[0]
lasttabno = tabno[0]
lasttabpos = tabpos[0]
tscpos = 0
tscneg = 0
fapos = 0
faneg = 0
tmoves = 0
fmoves = 0
n_tt_sum = 0
n_fa_sum = 0
sumsq_tt_err = 0.0
rms_tt_err = 0.0
sumsq_fa_err = 0.0
rms_fa_err = 0.0
tscsum = 0.0
tsctot = 0.0
fasum = 0.0
fatot = 0.0
last_tsctot = 0.0
last_tscsum = 0.0
last_tmoves = 0.0
last_fatot = 0.0
last_fasum = 0.0
last_fmoves = 0.0
for k in range(1, len(tldat[0])):
#
#--- check errors
#
terror = 'FALSE'
del_sec = tsec[k] - tsec[k-1]
if (del_sec > 33.0) or (del_sec < 32.0):
terror = 'TRUE'
err_line = err_line + "%16s TIME SKIP ERROR - %20s %12.3f %12.3f %12.1f\n" \
% (dd[k], dd[k-1], tsec[k], tsec[k-1], del_sec)
trpm[k] = (60.0/32.8) * (tsc[k] - tsc[k-1]) / 18.0
if (terror == "FALSE") and (abs(trpm[k]) > 3200.0):
err_line = err_line + "%16s TSC RPM ERROR - %10d %10d %10d\n" \
% (dd[k], tsc[k], tsc[k-1], trpm[k])
tsc[k] = lasttsc
trpm[k] = 0.0
frpm[k] = (60.0/32.8) * (fa[k] - fa[k-1]) / 18.0
if (terror == 'FALSE') and (abs(frpm[k]) > 1200.):
err_line = err_line + "%16s FA RPM ERROR - %10d %10d\n" \
% (dd[k], fa[k], frpm[k])
fa[k] = lastfa
frpm[k] = 0.0
#
#--- update tsc movment data
#
if tsc[k-1] != tsc[k]:
tstate[k] = 'MOVE'
if tsc[k] > tsc[k-1]:
tscpos += tsc[k] - lasttsc
tscsum += tsc[k] - lasttsc
tsctot += tsc[k] - lasttsc
else:
tscneg -= tsc[k] - lasttsc
tscsum += tsc[k] - lasttsc
tsctot -= tsc[k] - lasttsc
else:
tstate[k] = 'STOP'
#
#--- update fa movement data
#
if fa[k-1] != fa[k]:
fstate[k] = "MOVE"
if fa[k] > lastfa:
fapos += fa[k] - lastfa
fasum += fa[k] - lastfa
fatot += fa[k] - lastfa
else:
faneg -= fa[k] - lastfa
fasum += fa[k] - lastfa
fatot -= fa[k] - lastfa
else:
fstate[k] = "STOP"
if (fstate[k-1] == 'STOP') and(fstate[k] == 'MOVE'):
fmoves += 1
ptab = 0
if tabaxis[k-1] != tabaxis[k]:
ptab = 1
if tabno[k-1] != tabno[k]:
ptab = 1
if tabpos[k-1] != tabpos[k]:
ptab = 1
#
#--- TSC moved
#
if (tstate[k-1] == 'STOP') and (tstate[k] == 'MOVE'):
start_tsc = tsc[k-1]
tsc_line = tsc_line + "\n%16s %8s %6s %6s %6s %3s %7s %4s %4s %4s \
%10s %10s %4s %3s %3s %6s %5s %2s %6s %6s\n" \
% ("DATE", "TSCPOS", "STATE", "MFLAG", "RPM", \
"PWM", " LOCN", "ERR", " N", " RMS",\
"TOTSTP", "DELSTP", "MVES", "OVC", "STL",\
"TMOT", "AXIS", "NO", "TABPOS", "TRAIL")
tsc_line = tsc_line + "%16s %8d %6s %6s %6d %3d %7s %4d %4s %4s \
%10d %10d \%4d %3d %3d %6.1f\n" \
% (dd[k-1], tsc[k-1], tstate[k-1], tscmove[k-1], \
trpm[k-1], maxpwm[k-1], tloc[k-1], tsc_err[k-1],\
" ", " ", last_tsctot, last_tscsum, \
last_tmoves, motoc[k-1], stall[k-1], ttscmot[k-1])
temp_tsc_start = ttscmot[k]
max_tsc_pwm = maxpwm[k]
tsc_pos_start = tsc[k-1]
met = tdays[k-1] - 204.5
metyr = met / 365.0
ttmp2_line = ttmp2_line + "%16s %10.4f %10.4f %6.1f\n" \
% (dd[k-1], met, metyr, ttscmot[k-1])
#
#--- continue TSC move
#
if tstate[k] == 'MOVE':
tsc_line = tsc_line + "%16s %8d %6s %6s %6d %3d %7s %4d %4s \
%4s %10d %10d %4d %3d %3d %6.1f"\
% (dd[k], tsc[k], tstate[k], tscmove[k], trpm[k], \
maxpwm[k], tloc[k], tsc_err[k], " ", " ", \
tsctot, tscsum, tmoves, motoc[k], stall[k], ttscmot[k])
if (ptab > 0) and (tabaxis[k] == 'TSC'):
tsc_line = tsc_line + "%6s %2d %6d %6.1f\n" \
% (tabaxis[k], tabno[k], tabpos[k], trail[k])
else:
tsc_line = tsc_line + '\n'
if motoc[k] > 0:
lim_line = lim_line + "%16s %8d\n" % (dd[k], motoc[k])
if maxpwm[k-1] > max_tsc_pwm:
max_tsc_pwm = maxpwm[k]
#
#--- TSC stopped
#
if (tstate[k-1] == "MOVE") and (tstate[k] == "STOP"):
if tsc_err[k] < 999:
n_tt_sum = n_tt_sum + 1
sumsq_tt_err = sumsq_tt_err + tsc_err[k]**2
rms_tt_err = math.sqrt(sumsq_tt_err / n_tt_sum)
stop_tsc = tsc[k];
tsc_move_size = stop_tsc - start_tsc
met = tdays[k] - 204.5
metyr = met / 365.0
if maxpwm[k-1] > max_tsc_pwm:
max_tsc_pwm = maxpwm[k]
tsc_pos_end = tsc[k]
tsc_steps_moved = abs(tsc_pos_end - tsc_pos_start)
if tsc_steps_moved > 0:
tmoves += 1
tsc_line = tsc_line + "%16s %8d %6s %6s %6d %3d %7s %4d %4d %4.1f \
%10d %10d %4d %3d %3d %6.1f\n"\
% (dd[k], tsc[k], tstate[k], tscmove[k], trpm[k],\
maxpwm[k], tloc[k], tsc_err[k], n_tt_sum, \
rms_tt_err, tsctot, tscsum, tmoves, motoc[k],\
stall[k], ttscmot[k])
th_line = th_line + "%16s %10d %10d %10d\n" \
% (dd[k], start_tsc, stop_tsc, tsc_move_size)
ttmp2_line = ttmp2_line + "%16s %10.4f %10.4f %6.1f\n" \
% (dd[k], met, metyr, ttscmot[k])
ttmp_line = ttmp_line + "%16s %10.4f %6.1f %6.1f %6d %8d %4d %4d %6.1f\n" \
% (dd[k], metyr, temp_tsc_start, ttscmot[k], max_tsc_pwm,\
tsc_steps_moved, motoc[k], stall[k], bus_volts[k])
plt_line = plt_line + "%16s %10.4f %10.4f %10.4f %6d %10d %6d \
%6.1f %6d %10d %6d %6.1f %10d %10d\n"\
% (dd[k], tdays[k], met, metyr, tmoves, tsctot, n_tt_sum,\
rms_tt_err, fmoves, fatot, n_fa_sum, rms_fa_err,\
tsc[k], fa[k])
#
#--- FA moved
#
if (fstate[k-1] == "STOP") and (fstate[k] == "MOVE"):
fa_line = fa_line + '\n'
fa_line = fa_line + "%16s %8s %6s %6s %6s %3s %7s %4s %4s %4s %10s \
%10s %4s %3s %3s %6s %5s %2s %6s %6s\n"\
% ("DATE", "TSCPOS", "STATE", "MFLAG", "RPM", "PWM", \
" LOCN", "ERR", " N", " RMS", "TOTSTP", "DELSTP",\
"MVES", "OVC", "STL", "TMOT", "AXIS", "NO", "TABPOS", "TRAIL")
fa_line = fa_line + "%16s %8d %6s %6s %6d %3d %7s %4d %4s \
%4s %10d %10d %4d %3d %3d %6.1f\n"\
% (dd[k-1], fa[k-1], fstate[k-1], famove[k-1], frpm[k-1], \
maxpwm[k-1], floc[k-1], fa_err[k-1], " ", " ", \
last_fatot, last_fasum, last_fmoves, motoc[k-1], \
stall[k-1], tfamot[k-1])
#
#--- Continue FA move
#
if fstate[k] == 'MOVE':
fa_line = fa_line + "%16s %8d %6s %6s %6d %3d %7s %4d %4s \
%4s %10d %10d %4d %3d %3d %6.1f"\
% (dd[k], fa[k], fstate[k], famove[k], frpm[k], maxpwm[k],\
floc[k], fa_err[k], " ", " ", fatot, fasum, \
fmoves, motoc[k], stall[k], tfamot[k])
if (ptab > 0) and (tabaxis[k] == "FA" ):
fa_line = fa_line + "%6s %2d %6d %6.1f\n"\
% (tabaxis[k], tabno[k], tabpos[k], trail[k])
else:
fa_line = fa_line + '\n'
#
#--- FA stopped
#
if (fstate[k-1] == "MOVE") and (fstate[k] == "STOP"):
if fa_err[k] < 999:
n_fa_sum = n_fa_sum + 1
sumsq_fa_err = sumsq_fa_err + fa_err[k]**2
rms_fa_err = math.sqrt(sumsq_fa_err / n_fa_sum)
fa_line = fa_line + "%16s %8d %6s %6s %6d %3d %7s %4d %4d \
%4.1f %10d %10d %4d %3d %3d %6.1f\n"\
% (dd[k], fa[k], fstate[k], famove[k], frpm[k], maxpwm[k], floc[k],\
fa_err[k], n_fa_sum, rms_fa_err, fatot, fasum, fmoves, motoc[k],\
stall[k], tfamot[k])
met = tdays[k] - 204.5
metyr = met / 365.0
plt_line = plt_line + "%16s %10.4f %10.4f %10.4f %6d %10d \
%6d %6.1f %6d %10d %6d %6.1f %10d %10d\n"\
% (dd[k], tdays[k], met, metyr, tmoves, tsctot, n_tt_sum,\
rms_tt_err, fmoves, fatot, n_fa_sum, rms_fa_err, tsc[k], fa[k])
if ptab > 0:
if tabaxis[k] == "TSC":
ttab_line = ttab_line + "%16s %6s %2d %6d %6.1f\n"\
% (dd[k], tabaxis[k], tabno[k], tabpos[k], trail[k])
last_tscsum = tscsum
last_tsctot = tsctot
last_fasum = fasum
last_fatot = fatot
last_tmoves = tmoves
last_fmoves = fmoves
lasttsc = tsc[k]
lastfa = fa[k]
lasttabaxis = tabaxis[k]
lasttabno = tabno[k]
lasttabpos = tabpos[k]
init = 1
if ntsc < 1:
ntsc = 1
if nfa < 1:
nfa = 1
#
#--- create summary table
#
tscerr = deltsc / ntsc
faerr = delfa / nfa
sum_line = sum_line + "%24s %16d\n" % ("Total FA moves: ", fmoves)
sum_line = sum_line + "%24s %16d\n" % ("Total TT moves: ", tmoves)
sum_line = sum_line + "%24s %16d\n" % ("Sum of Pos TSC Steps:", tscpos)
sum_line = sum_line + "%24s %16d\n" % ("Sum of Neg TSC Steps:", tscneg)
sum_line = sum_line + "%24s %16d\n" % ("Sum of Pos FA Steps:", fapos)
sum_line = sum_line + "%24s %16d\n" % ("Sum of Pos FA Steps:", faneg)
sum_line = sum_line + "%24s %16.2f\n" % ("Avg Error in TSC Position: ", tscerr)
sum_line = sum_line + "%24s %16.2f\n" % ("Avg Error in FA Position: ", faerr)
#
#--- write out the results
#
l_list = [ttab_line, sum_line, tsc_line, fa_line, err_line, \
plt_line, th_line, lim_line, ttmp_line, ttmp2_line]
o_list = ['sim_ttabs.out', 'sim_summary.out', 'tsc_pos.out', 'fa_pos.out', 'errors.lis',\
'plotfile.out', 'tsc_histogram.out', 'limits.txt', 'tsc_temps.txt', 'tsc_temps2.txt']
f_list = ['w','w','w','w','w','w','w','w','a','a']
for k in range(0, len(o_list)):
out = data_dir + o_list[k]
with open(out, f_list[k]) as fo:
fo.write(l_list[k])
#---------------------------------------------------------------------------------------
#-- read_tl_file: read all tl files and create data table --
#---------------------------------------------------------------------------------------
def read_tl_file(file_list):
"""
read all tl files and create data table
input: file_list --- a list of tl files
tl file columns
col 0 time
col 1 3seaid
col 2 3searset
col 3 3searomf
col 4 3searamf
col 5 3seaincm
col 6 3tscmove
col 7 3tscpos
col 8 3famove
col 9 3fapos
col 10 3mrmmxmv
col 11 3smotoc
col 12 3smotstl
col 13 3stab2en
col 14 3ldrtmek
col 15 3ldrtno
col 16 3ldrtpos
col 17 3faflaat
col 18 3faflbat
col 19 3faflcat
col 20 3trmtrat
col 21 3famtrat
col 22 3fapsat
col 23 3ttralat
col 24 3faseaat
col 25 3smotpen
col 26 3smotsel
col 27 3prmramf
col 28 3spdmpa
col 29 3shtren
col 30 elbv
output: tldat --- a list of list of data
0 tsec
1 tdays
2 dd
3 tsc
4 tscmove
5 fa
6 famove
7 maxpwm
8 tabaxis
9 tabno
10 tabpos
11 motoc
12 stall
13 tflexa
14 tflexb
15 tflexc
16 ttscmot
17 tfamot
18 tseaps
19 trail
20 tseabox
21 trpm
22 frpm
23 tstate
24 fstate
25 tloc
26 tsc_err
27 floc
28 fa_err
29 deltsc
30 delfa
31 ntsc
32 nfa
"""
#
#--- initialization
#
lsec = 0.0
deltsc = 0
delfa = 0
ntsc = 0
nfa = 0
#
#--- initialize a list to save tl data
#
tldat = []
for k in range(0, 30):
tldat.append([])
for ifile in file_list:
#for ifile in file_list[1:2]:
#
#--- check whether the data is zipped or not, if it is unzip it
#
mc = re.search('gz', ifile)
if mc is not None:
cmd = 'gzip -d ' + ifile
os.system(cmd)
ifile = ifile.replace('.gz','')
data = mcf.read_data_file(ifile)
#
#--- skip none data part
#
for ent in data[2:]:
atemp = re.split('\t+', ent)
tline = atemp[0].strip()
if tline in ['TIME', '', 'N']:
continue
if len(atemp) < 30:
continue
#
#--- convert time into chandra time, day of mission, and display time
#
try:
[stime, dom, atime] = convert_time_format(tline)
except:
continue
sdiff = stime - lsec
#
#--- save the data every 32 seconds
#
if sdiff > 32.0:
#
#--- skip empty data fields
#
try:
tscmove = check_value(tldat, atemp, 6, 0)
tsc = check_value(tldat, atemp, 7)
famove = check_value(tldat, atemp, 8, 0)
fa = check_value(tldat, atemp, 9)
mrmmxmv = check_value(tldat, atemp, 10)
ldrtmek = check_value(tldat, atemp, 14, 0)
ldrtno = check_value(tldat, atemp, 15)
ldrtno = check_value(tldat, atemp, 16)
smotoc = check_value(tldat, atemp, 11)
smotstl = check_value(tldat, atemp, 12)
faflaat = check_value(tldat, atemp, 17)
faflbat = check_value(tldat, atemp, 18)
faflcat = check_value(tldat, atemp, 19)
trmtrat = check_value(tldat, atemp, 20)
famtrat = check_value(tldat, atemp, 21)
fapsat = check_value(tldat, atemp, 22)
ttralat = check_value(tldat, atemp, 23)
faseaat = check_value(tldat, atemp, 24)
bus_volts = check_value(tldat, atemp, 30)
except:
continue
lsec = stime
tldat[0].append(stime)
tldat[1].append(dom)
tldat[2].append(atime)
tldat[3].append(tsc) #--- 3tscpos
tldat[4].append(tscmove) #--- 3tscmove
tldat[5].append(fa) #--- 3fapos
tldat[6].append(famove) #--- 3favome
tldat[7].append(mrmmxmv) #--- 3mrmmxmv
tldat[8].append(ldrtmek) #--- 3ldrtmek
tldat[9].append(ldrtno) #--- 3ldrtno
tldat[10].append(ldrtno) #--- 3ldrtno
tldat[11].append(smotoc) #--- 3smotoc
tldat[12].append(smotstl) #--- 3smotstl
tldat[13].append(faflaat) #--- 3faflaat
tldat[14].append(faflbat) #--- 3faflbat
tldat[15].append(faflcat) #--- 3faflcat
tldat[16].append(trmtrat) #--- 3trmtrat
tldat[17].append(famtrat) #--- 3famtrat
tldat[18].append(fapsat) #--- 3fapsat
tldat[19].append(ttralat) #--- 3ttralat
tldat[20].append(faseaat) #--- 3faseaat
tldat[21].append(0.0)
tldat[22].append(0.0)
tldat[23].append("STOP")
tldat[24].append("STOP")
[tloc, floc, tsc_err, fa_err, deltsc, delfa, ntsc, nfa] \
= find_locs(tscmove, tsc, famove, fa, deltsc, delfa, ntsc, nfa)
tldat[25].append(tloc)
tldat[26].append(tsc_err)
tldat[27].append(floc)
tldat[28].append(fa_err)
tldat[29].append(bus_volts)
return tldat, deltsc, delfa, ntsc, nfa
#---------------------------------------------------------------------------------------
#-- check_value: check value and convert to an appropriate type --
#---------------------------------------------------------------------------------------
def check_value(tldata, adata, pos, fv=1):
"""
check value and convert to an appropriate type. if the value is not good use
the last entry value
input: tldata --- a list of lists of data
adata --- a list of the current data
pos --- a column position of the data to be analyzed
fv --- if 1, the value is float, otherwise string
output: fval --- the value
"""
val = adata[pos]
if fv == 1:
try:
fval = float(val)
except:
try:
fval = tldata[pos][-1]
except:
fval = FALSE
else:
if val == "":
fval = tldata[pos][-1]
else:
try:
fval = val.strip()
except:
fval = FALSE
return fval
#---------------------------------------------------------------------------------------
#-- find_locs: check the position of the instrument after stopped --
#---------------------------------------------------------------------------------------
def find_locs(tscmove, tsc, famove, fa, deltsc, delfa, ntsc, nfa):
"""
check the position of the instrument after stopped
input: tscmove --- 3tscmove
tsc --- 3tscpos
famove --- 3famove
fa --- 3fapos
deltsc
delfa
ntsc
nfa
output: tloc --- label name of tsc location
floc --- label name of fa location
tsc_err --- tsc postion difference from the expected position
fa_err --- fa postion difference from the expected position
deltsc
delfa
ntsc
nfa
"""
tloc = '---'
floc = '---'
tsc_err = 999
fa_err = 999
for j in range(0, len(simt)):
if tscmove == 'STOP':
dtsc = abs(tsc - simt[j])
if dtsc < tsc_test:
tsc_err = tsc - simt[j]
deltsc += dtsc
ntsc += 1
tloc = tscloc[j]
for j in range(0, len(simf)):
if famove == 'STOP':
dfa = abs(fa - simf[j])
if dfa < fa_test:
fa_err = fa - simf[j]
delfa += dfa
nfa += 1
floc = faloc[j]
return [tloc, floc, tsc_err, fa_err, deltsc, delfa, ntsc, nfa]
#---------------------------------------------------------------------------------------
#-- convert_time_format: convert time formats from that in TL files --
#---------------------------------------------------------------------------------------
def convert_time_format(tline):
"""
convert time formats from that in TL files
input: tline --- time in TL format
output: stime --- seconds from 1998.1.1
dom --- day of mission
atime --- display time <yyyy><ddd><hh><mm><ss><ss>
"""
atemp = re.split(':', tline)
btemp = re.split('\s+', atemp[0])
year = btemp[0]
yday = mcf.add_leading_zero(btemp[1], 3)
hh = mcf.add_leading_zero(btemp[2])
mm = mcf.add_leading_zero(atemp[1])
ctemp = re.split('\.', atemp[2])
ss = mcf.add_leading_zero(ctemp[0])
fsq = ctemp[1] + '0'
#
#--- chandra time
#
ltime = year + ':' + yday + ':' + hh + ':' + mm + ':' + ss + '.' + fsq
stime = Chandra.Time.DateTime(ltime).secs
#
#--- display time
#
atime = year + yday+ '.' + hh + mm + ss + fsq
#
#--- day of mission
#
dom = mcf.ydate_to_dom(year, yday)
dom = dom + float(hh) / 24.0 + float(mm) / 1440.0 + float(ss) / 86400.0
dom = dom + float(fsq) / 8640000.0
return [stime, dom, atime]
#---------------------------------------------------------------------------------------
#-- clean_tsc_data: order and removed duplicated entries --
#---------------------------------------------------------------------------------------
def clean_tsc_data():
"""
order and removed duplicated entries
input: none, but read from data file tsc_temps.txt
output: cleaned tsc_temps.txt
"""
ifile = data_dir + 'tsc_temps.txt'
data = mcf.read_data_file(ifile)
#
#--- the first line is the header
#
header = data[0]
body = data[1:]
#
#--- sort the data part
#
body = sorted(body)
#
#--- remove duplicates
#
prev = ''
line = header + '\n'
for ent in body:
if ent == prev:
continue
else:
prev = ent
#
#--- only the lines with full 9 entries are put back
#
atemp = re.split('\s+', ent)
if len(atemp) == 9:
line = line + ent + '\n'
#
#--- put back into the data file
#
with open(ifile, 'w') as fo:
fo.write(line)
#---------------------------------------------------------------------------------------
if __name__ == "__main__":
#
#--- Create a lock file and exit strategy in case of race conditions
#
name = os.path.basename(__file__).split(".")[0]
if os.path.isfile(f"/tmp/mta/{name}.lock"):
sys.exit(f"Lock file exists as /tmp/mta/{name}.lock. Process already running/errored out. Check calling scripts/cronjob/cronlog.")
else:
os.system(f"mkdir -p /tmp/mta; touch /tmp/mta/{name}.lock")
run_tl_analysis()
#
#--- Remove lock file once process is completed
#
os.system(f"rm /tmp/mta/{name}.lock") |
#!/usr/bin/env python
from File import File
from LSA import LSA
from Set import Set
from NaiveBayesClassifier import NaiveBayesClassifier
import numpy
import datetime
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure
###############################################################################
# Initializing
###############################################################################
f = File()
print("Data imported.")
MIN_FREQ = 3
MAX_GRAM = 5
P_EIG = 0.95
human_keywords = [""]
human_keywords.append(f.keywords)
for o in range(10):
y = []
yerrormin = []
yerrormax = []
for h in human_keywords:
l = LSA(MAX_GRAM, MIN_FREQ, P_EIG, f.x)
test_score = []
print("LSA created.")
###########################
# LSA
aux = l.manage_keywords(h)
lsa_results = l.train_phrases(aux)
print("LSA Results computed.")
for time in range(50):
sets = Set(lsa_results, f.y, f.x)
for i in range(len(sets.x_train)):
###########################
###########################
# NAIVE BAYES
naive = NaiveBayesClassifier(alpha=0.01)
naive.train(numpy.array(sets.x_train[i]), sets.y_train[i])
test_score.append(naive.test_score(numpy.array(sets.x_test[i]), numpy.array(sets.y_test[i])))
avg = numpy.round(numpy.average(numpy.array(test_score)), 2)
y.append(avg)
min_ = numpy.round(numpy.array(test_score).min(), 2)
yerrormin.append(numpy.round(avg - min_, 2))
max_ = numpy.round(numpy.array(test_score).max(), 2)
yerrormax.append(numpy.round(max_ - avg, 2))
print("Avg test performance: ", avg)
print(min_)
print(max_)
print('\n'*3)
print("y = ", y)
print("yerrormin = ", yerrormin)
print("yerrormax = ", yerrormax)
|
# -*- coding: utf-8 -*-
from flask import request, Blueprint
from flask import g
import logging
from libs.util import make_json_response
from libs.response_meta import ResponseMeta
from .authorization import require_auth
from models.user import User
app = Blueprint('push', __name__)
@app.route("/device/bind", methods=["POST"])
@require_auth
def bind_device_token():
rds = g.rds
appid = request.appid
uid = request.uid
obj = request.get_json(force=True, silent=True, cache=False)
if obj is None:
logging.debug("json decode err:%s", e)
raise ResponseMeta(400, "json decode error")
device_token = obj.get("apns_device_token", "")
pushkit_device_token = obj.get("pushkit_device_token", "")
ng_device_token = obj.get("ng_device_token", "")
xg_device_token = obj.get("xg_device_token", "")
xm_device_token = obj.get("xm_device_token", "")
hw_device_token = obj.get("hw_device_token", "")
gcm_device_token = obj.get("gcm_device_token", "")
jp_device_token = obj.get("jp_device_token", "")
if not device_token and not pushkit_device_token \
and not ng_device_token and not xg_device_token \
and not xm_device_token and not hw_device_token \
and not gcm_device_token and not jp_device_token:
raise ResponseMeta(400, "invalid param")
User.save_user_device_token(rds, appid, uid,
device_token, pushkit_device_token,
ng_device_token, xg_device_token,
xm_device_token, hw_device_token,
gcm_device_token, jp_device_token)
return make_json_response({"success":True}, 200)
@app.route("/device/unbind", methods=["POST"])
@require_auth
def unbind_device_token():
rds = g.rds
appid = request.appid
uid = request.uid
obj = request.get_json(force=True, silent=True, cache=False)
if obj is None:
logging.debug("json decode err:%s", e)
raise ResponseMeta(400, "json decode error")
device_token = obj.get("apns_device_token", "")
pushkit_device_token = obj.get("pushkit_device_token", "")
ng_device_token = obj.get("ng_device_token", "")
xg_device_token = obj.get("xg_device_token", "")
xm_device_token = obj.get("xm_device_token", "")
hw_device_token = obj.get("hw_device_token", "")
gcm_device_token = obj.get("gcm_device_token", "")
jp_device_token = obj.get("jp_device_token", "")
if not device_token and not pushkit_device_token \
and not ng_device_token and not xg_device_token \
and not xm_device_token and not hw_device_token \
and not gcm_device_token and not jp_device_token:
raise ResponseMeta(400, "invalid param")
User.reset_user_device_token(rds, appid, uid,
device_token, pushkit_device_token,
ng_device_token, xg_device_token,
xm_device_token, hw_device_token,
gcm_device_token, jp_device_token)
return make_json_response({"success":True}, 200)
|
import pytest
import responses
from .base import BaseTestCase
from pyyoutube.error import PyYouTubeException
class TestActivitiesResource(BaseTestCase):
RESOURCE = "activities"
def test_list(self, helpers, authed_cli):
with pytest.raises(PyYouTubeException):
authed_cli.activities.list()
with responses.RequestsMock() as m:
m.add(
method="GET",
url=self.url,
json=self.load_json(
"activities/activities_by_channel_p1.json", helpers
),
)
res = authed_cli.activities.list(
parts=["id", "snippet"],
channel_id="UC_x5XG1OV2P6uZZ5FSM9Ttw",
max_results=10,
)
assert len(res.items) == 10
assert authed_cli.activities.access_token == "access token"
res = authed_cli.activities.list(
parts=["id", "snippet"], mine=True, max_results=10
)
assert res.items[0].snippet.type == "upload"
|
from fastapi import FastAPI, Request
from starlette.responses import RedirectResponse
from pydantic import BaseModel
from typing import Optional
from src.auth import auth
from src.ls import Shortener
class Create(BaseModel):
url: str
name: Optional[str]
app = FastAPI(docs_url=None)
st = Shortener()
@app.get("/{linkid}")
async def get_page(linkid: str):
return RedirectResponse(st.get(linkid), status_code=307)
@app.post("/create")
async def create_link(data: Create, request: Request):
auth(request)
return st.create(data.url, data.name) |
# VRAY UTILS
from maya import cmds
from pymel import core as pm
def renderSettings():
if not cmds.getAttr("defaultRenderGlobals.currentRenderer") == "vray":
cmds.setAttr("defaultRenderGlobals.currentRenderer", "vray", type="string")
cmds.setAttr("vraySettings.samplerType", 4)
cmds.setAttr("vraySettings.minShadeRate", 2)
cmds.setAttr("vraySettings.aaFilterType", 1)
cmds.setAttr("vraySettings.dmcMaxSubdivs", 10)
cmds.setAttr("vraySettings.dmcs_useLocalSubdivs", 1)
cmds.setAttr("vraySettings.sys_regsgen_xc", 16)
cmds.setAttr("vraySettings.sys_regsgen_seqtype", 5)
cmds.setAttr("vraySettings.globopt_render_viewport_subdivision", 0)
def giSettings():
if not cmds.getAttr("defaultRenderGlobals.currentRenderer") == "vray":
cmds.setAttr("defaultRenderGlobals.currentRenderer", "vray", type="string")
cmds.setAttr("vraySettings.giOn", 1)
cmds.setAttr("vraySettings.primaryEngine", 0)
cmds.setAttr("vraySettings.secondaryEngine", 3)
cmds.setAttr("vraySettings.imap_subdivs", 50)
cmds.setAttr("vraySettings.imap_interpSamples", 20)
cmds.setAttr("vraySettings.subdivs", 1800)
def vray_attributes(selected=True, shapes=True, add=True, command=""):
if not command:
return
sel = pm.ls(sl=True, l=True)
# if selected is false, get list all decendents, filter for tranforms
if not selected:
objs = set()
for s in sel:
rel = pm.listRelatives(s, f=True, allDescendents=True, type="transform")
for o in rel:
objs.add(s)
objs.add(o)
else:
objs = sel
# if shapes is True, get shapes
if shapes:
items = get_shapes(objs)
else:
items = objs
if not items:
return
for i in items:
cmds.vray("addAttributesFromGroup", i, command, add)
if add:
proc = "ADDED"
else:
proc = "REMOVED"
print("%s.%s %s." % (i, command, proc))
# Create a Vray Object ID attribute on shape nodes of selected geometry
def makeVrayObjId():
shapes = get_shapes()
print shapes
if not shapes:
return
for s in shapes:
print s
cmds.vray("addAttributesFromGroup", s, "vray_objectID", 1)
# Remove a Vray Object ID attribute on shape nodes of selected geometry
def removeVrayObjId():
shapes = get_shapes()
print shapes
if not shapes:
return
for s in shapes:
print s
cmds.vray("addAttributesFromGroup", s, "vray_objectID", 0)
# Create Vray Subdivision attributes on shape nodes of selected geometry
def makeVraySubdAttr():
shapes = get_shapes()
if not shapes:
return
for s in shapes:
cmds.vray("addAttributesFromGroup", s, "vray_subdivision", 1)
cmds.vray("addAttributesFromGroup", s, "vray_subquality", 1)
def makeVrayMatId():
shadingGrpSet = set()
shaderSet = set()
shapes = get_shapes()
if not shapes:
return
for s in shapes:
shadingGrps = cmds.listConnections(s, type='shadingEngine')
for sg in shadingGrps:
shadingGrpSet.add(sg)
for sg in shadingGrpSet:
shaders = cmds.ls(cmds.listConnections(sg), materials=1)
for s in shaders:
shaderSet.add(s)
for shader in shaderSet:
cmds.vray("addAttributesFromGroup", s, "vray_material_id", 1)
def displacementControl():
shapes = get_shapes()
if not shapes:
return
for s in shapes:
cmds.vray("addAttributesFromGroup", s, "vray_displacement", 1)
def single_vop():
cmds.vray("objectProperties", "add_single")
def primVis():
sel=cmds.ls(sl=True, l=True)
vops = getVrayObjProperties(sel)
for v in vops:
cmds.editRenderLayerAdjustment("%s.primaryVisibility"% v)
cmds.setAttr("%s.primaryVisibility"% v, 0)
def matteSurface():
sel = cmds.ls(sl=True, l=True)
vops = getVrayObjProperties(sel)
for v in vops:
cmds.editRenderLayerAdjustment("%s.matteSurface" % v)
cmds.editRenderLayerAdjustment("%s.receiveGI" % v)
cmds.editRenderLayerAdjustment("%s.alphaContribution" % v)
cmds.editRenderLayerAdjustment("%s.shadowBrightness" % v)
cmds.editRenderLayerAdjustment("%s.reflectionAmount" % v)
cmds.editRenderLayerAdjustment("%s.refractionAmount" % v)
cmds.setAttr("%s.matteSurface" % v, 1)
cmds.setAttr("%s.receiveGI" % v, 0)
cmds.setAttr("%s.alphaContribution" % v, -1)
cmds.setAttr("%s.shadowBrightness" % v, 0)
cmds.setAttr("%s.reflectionAmount" % v, 0)
cmds.setAttr("%s.refractionAmount" % v, 0)
# General Utils
def get_shapes(objs=""):
if not objs:
objs = pm.ls(sl=1, l=1)
allShapes=set()
if not objs:
return
for s in objs:
shapes = pm.listRelatives(s, s=1, ni=1, f=1)
if not shapes:
continue
for shape in shapes:
allShapes.add(shape)
return allShapes
def getVrayObjProperties(objs=[]):
if not objs:
return
vops = set()
for o in objs:
if cmds.objectType(o) == "VRayObjectProperties":
vops.add(o)
print "success"
continue
vop = cmds.listConnections(o, type="VRayObjectProperties")
if not vop:
continue
for v in vop:
vops.add(v)
if not vops:
cmds.warning("Vray Object Properties node not found")
return vops
from maya import cmds
def removeVrayObjId():
shapes = get_shapes()
if not shapes:
return
for s in shapes:
print s
cmds.vray("addAttributesFromGroup", s, "vray_objectID", 0)
def removeSubDiv():
shapes = get_shapes()
if not shapes:
return
for s in shapes:
print s
cmds.vray("addAttributesFromGroup", s, "vray_subdivision", 0)
cmds.vray("addAttributesFromGroup", s, "vray_subquality", 0)
def addOpenSubdiv():
shapes = get_shapes()
if not shapes:
return
for s in shapes:
print s
cmds.vray("addAttributesFromGroup", s, "vray_opensubdiv", 1)
|
# https://codechalleng.es/oauth/complete/github/&response_type=code&state=dcYOJU1M0IK33oiiO6ekdg3OogeuCKPU
# https://codechalleng.es/oauth/login/github/?next=
import argparse
import os
from IPython.display import HTML
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
def create_parser():
"""TODO
"""
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--auth", help="Authentication method: userid/github" )
parser.add_argument("-u", "--userid", help="User id" )
parser.add_argument("-p", "--password", help="password" )
parser.add_argument("-a", "--action", help="action - get, test, run, submit")
return parser
def get_url(auth):
url = ""
base_url = "https://codechalleng.es"
userid_login_url = f"{base_url}/login/"
github_login_url = f"{base_url}/oauth/login/github/?next="
if auth == 'userid':
return userid_login_url
elif auth == 'github':
return github_login_url
else:
return None
def login( url, userid, password):
WAIT_SECONDS = 2
CODE_TEMPLATE = ""
TEST_NAME = ""
CODE_TEST = ""
# Set options to be headless, ..
options = webdriver.ChromeOptions()
options.add_argument('--headless')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
# Open it, go to a website, and get results
#driver = webdriver.Chrome('chromedriver', options=options)
driver = webdriver.Chrome(executable_path=r'/usr/bin/chromedriver', options=options)
driver.get(LOGIN_URL)
driver.find_element_by_name('username').send_keys(USERNAME)
driver.find_element_by_name('password').send_keys(PASSWORD + Keys.RETURN)
driver.find_element_by_name('login').send_keys('pavanraomr@gmail.com')
driver.find_element_by_name('password').send_keys('calCPR!23' + Keys.RETURN)
def get_bite(driver, bite_no):
pass
def test_bite(driver, bite_no):
pass
def run_bite(driver, bite_no):
pass
def submit_bite(driver, bite_no):
pass
if __name__ == "__main__":
parser = create_parser()
args = parser.parse_args()
driver = login(get_url(args.auth), args.userid, args.password)
|
from base import *
import clsTestService
from general import General
from logger import writeToLog
from editEntryPage import EditEntryPage
import enums
from selenium.webdriver.common.keys import Keys
class Home(Base):
driver = None
clsCommon = None
def __init__(self, clsCommon, driver):
self.driver = driver
self.clsCommon = clsCommon
#=============================================================================================================
#Home page locators:
#=============================================================================================================
HOME_LINK = ('id', 'menu-Home-btn')
HOME_PLAYLIST = ('xpath', "//a[@class='clickable-header' and contains(text(),'PLAYLIST')]")
HOME_PLAYLIST_ENTRY = ('xpath', '//img[contains(@alt,"ENTRY_NAME")]/ancestor::div[@class="photo-group featured_wrapper"]')
HOME_CAROUSEL_ENTRY = ('xpath', "//h1[@class='home__carousel-entry-title entryTitle tight' and contains(text(),'ENTRY_NAME')]")
HOME_CAROUSEL_ENTRY_OLD_UI = ('xpath', "//img[@alt='ENTRY_NAME']")
#=============================================================================================================
# @Author: Inbar Willman / Michal Zomper
# This method navigate to home page
def navigateToHomePage(self, forceNavigate=False):
if forceNavigate == False:
# Check if we are already in home page
if self.verifyUrl(localSettings.LOCAL_SETTINGS_TEST_BASE_URL , False, 3) == True:
return True
# navigate to home page
if self.clsCommon.base.navigate(localSettings.LOCAL_SETTINGS_TEST_BASE_URL) == False:
writeToLog("INFO","FAILED navigate to home page")
return False
if self.verifyUrl(localSettings.LOCAL_SETTINGS_TEST_BASE_URL , False, 5) == False:
writeToLog("INFO","FAILED verify that home page display")
return False
return True
# @Author: Inbar Willman
def checkEntryInHomePlaylist(self, playlist, entryName):
if self.navigateToHomePage() == False:
writeToLog("INFO","FAILED to navigate to home page")
return False
playlist_list = (self.HOME_PLAYLIST[0], self.HOME_PLAYLIST[1].replace('PLAYLIST', playlist))
if self.is_visible(playlist_list):
tmp_entry = (self.HOME_PLAYLIST_ENTRY[0], self.HOME_PLAYLIST_ENTRY[1].replace('ENTRY_NAME', entryName))
if self.is_visible(tmp_entry) == False:
writeToLog("INFO","FAILED to find entry in " + playlist + " List")
return False
return True
writeToLog("INFO","FAILED to find " + playlist + " List")
return False
# @Author: Michal Zomper
def verifyEntyNameAndThumbnailInHomePagePlaylist(self, entryName, expectedQrResult, cropLeft, croTop, cropRight, cropBottom):
tmp_entry = (self.HOME_PLAYLIST_ENTRY[0], self.HOME_PLAYLIST_ENTRY[1].replace('ENTRY_NAME', entryName))
if self.is_visible(tmp_entry) == False:
writeToLog("INFO","FAILED to find entry '" + entryName + "' in playlist")
return False
qrResult = self.clsCommon.qrcode.getScreenshotAndResolveCustomImageQrCode(cropLeft, croTop, cropRight, cropBottom, tmp_entry)
if qrResult != str(expectedQrResult):
writeToLog("INFO","FAILED entry thumbnail is '" + str(qrResult) + "' but need to be '" + str(expectedQrResult) + "'")
return False
writeToLog("INFO","Success, entry'" + entryName + "' was verified")
return True
def verifyEntryInHomePageCarousel(self, entryName, expectedQrResult, cropLeft, croTop, cropRight, cropBottom):
if localSettings.LOCAL_SETTINGS_IS_NEW_UI == False:
tmpEntryName = (self.HOME_CAROUSEL_ENTRY_OLD_UI[0], self.HOME_CAROUSEL_ENTRY_OLD_UI[1].replace('ENTRY_NAME', entryName) + "/ancestor::div[@class='carmain']")
else:
tmpEntryName = (self.HOME_CAROUSEL_ENTRY[0], self.HOME_CAROUSEL_ENTRY[1].replace('ENTRY_NAME', entryName) + "/ancestor::div[@class='thumbnail-info__container']")
if self.is_visible(tmpEntryName) == False:
writeToLog("INFO","FAILED to find entry '" + entryName + "' in home page carousel playlist")
return False
qrResult = self.clsCommon.qrcode.getScreenshotAndResolveCustomImageQrCode(cropLeft, croTop, cropRight, cropBottom, tmpEntryName)
if qrResult != str(expectedQrResult):
writeToLog("INFO","FAILED entry thumbnail is '" + str(qrResult) + "' but need to be '" + str(expectedQrResult) + "'")
return False
writeToLog("INFO","Success, entry'" + entryName + "' in home page carousel was verified")
return True |
from operator import itemgetter
from random import *
import sys
import time
import resource
class GSat:
m = 0 # variables
n = 0 # clauses
clauses = set() # set of clauses
dicClauses = {}
attempt = []
bestImproves = []
unsatisfiedClauses = set()
def __init__( self, filename ):
file = open( filename, "r" )
finish = False
read = False
clause = []
while not finish:
line = file.readline()
if not line:
finish = True
else:
line = line.split()
if len( line ) > 0:
if line[0] == 'p':
self.n = int( line[2] )
self.m = int( line[3] )
read = True
elif read:
for i in xrange( len( line ) ):
if line[i] == '0':
self.clauses.add( tuple( clause ) )
clause = []
else:
clause.append( line[i] )
if len( self.clauses ) < self.m:
self.clauses.add( tuple( clause ) )
file.close()
for clause in self.clauses:
for literal in clause:
literal = abs( int( literal ) )
if literal not in self.dicClauses:
self.dicClauses[literal] = set()
self.dicClauses[literal].add( tuple( clause ) )
def run( self, tries ):
maxFlips = 3 * self.n # number of search's restarts
maxTries = tries # amount of time to spend looking for an assignment
seed = time.time()
it = 0
for i in xrange( 3 ):
print 'Try', i + 1
variable = None
seed += 10000
# generates randomly truth assignment
self.attempt = self.generateAttempt( self.n + 1, seed )
for j in xrange( 2 ):
it += 1
if self.satisfies( variable ):
return ( self.attempt, j, it )
start = time.time()
variable = self.getVariable( variable )
print "time", time.time() - start
self.attempt[variable] = self.invValue( self.attempt[variable] )
return ( 'Insatisfied' )
def generateAttempt( self, n, s ):
seed( s )
result = []
for i in xrange( n ):
result.append( randint( 0, 1 ) )
return result
def invValue( self, v ):
if v == 0:
return 1
else:
return 0
def getVariable( self, v ):
if v is None:
self.bestImproves.append( -1 )
#print self.attempt
clausesChanged = 0
for variable in xrange( 1, self.n + 1 ):
clausesToEvaluate = self.dicClauses[variable]
#print len( clausesToEvaluate )
brokenClauses = self.countUnsatisfiedClauses( clausesToEvaluate )
self.attempt[variable] = self.invValue( self.attempt[variable] )
brokenClausesFlipped = self.countUnsatisfiedClauses( clausesToEvaluate )
self.attempt[variable] = self.invValue( self.attempt[variable] )
noBroken = brokenClauses - brokenClausesFlipped
self.bestImproves.append( noBroken )
#print variable, noBroken
if noBroken >= clausesChanged:
clausesChanged = noBroken
bestVariable = variable
else:
clausesChanged = 0
for clause in self.dicClauses[v]:
for variable in clause:
variable = abs( int( variable ) )
clausesToEvaluate = self.dicClauses[variable]
#print len( clausesToEvaluate )
brokenClauses = self.countUnsatisfiedClauses( clausesToEvaluate )
self.attempt[variable] = self.invValue( self.attempt[variable] )
brokenClausesFlipped = self.countUnsatisfiedClauses( clausesToEvaluate )
self.attempt[variable] = self.invValue( self.attempt[variable] )
noBroken = brokenClauses - brokenClausesFlipped
self.bestImproves[variable] = noBroken
#print variable, noBroken
if noBroken >= clausesChanged:
clausesChanged = noBroken
bestVariable = variable
print self.bestImproves
#print bestVariable
return bestVariable
'''mx = 0
px = []
#print vec
#idxs, srd = zip( *sorted( enumerate( vec ), key=itemgetter( 1 ), reverse=True ) )
#tam = int( len( vec )*0.02 )
#print srd[:tam], idxs[:tam]
#change = self.reservoir_sampling( list( srd[:tam] ), list( idxs[:tam] ), 1 )
#print self.qsort( vec )
for i in xrange( 1, len( vec ) ):
if vec[i] > mx:
mx = vec[i]
px = []
px.append( i )
elif vec[i] == mx:
px.append( i )
#print self.reservoir_sampling( px, 5 )
if len( px ) == 1:
return px[0]
else:
return choice( px )'''
#return change[0]
def countUnsatisfiedClauses( self, clauses ):
count = 0
for clause in clauses:
if not self.isSatisfiedClause( clause ):
count += 1
return count
def isSatisfiedClause( self, clause ):
clauseValue = 0
for variable in clause:
ab = abs( int( variable ) )
if int( variable ) < 0:
value = self.invValue( self.attempt[ab] )
else:
value = self.attempt[ab]
clauseValue += value
return clauseValue > 0
def satisfies( self, variable ):
if variable:
if variable in self.dicClauses:
for clause in self.dicClauses[variable]:
if self.isSatisfiedClause( clause ):
if clause in self.unsatisfiedClauses:
self.unsatisfiedClauses.remove( clause )
else:
if clause not in self.unsatisfiedClauses:
self.unsatisfiedClauses.add( clause )
else:
self.unsatisfiedClauses.clear()
for clause in self.clauses:
if not self.isSatisfiedClause( clause ):
self.unsatisfiedClauses.add( clause )
return len( self.unsatisfiedClauses ) == 0 |
# coding: utf-8
"""
Created by Alex Wang
On 2017-08-31
"""
import time
import requests
import json
import logging
import urllib.request
import traceback
from PIL import Image
logging.getLogger("requests").setLevel(logging.WARNING)
logging.getLogger("urllib3").setLevel(logging.WARNING)
def download_image_to_memory_with_retry(img_url, retry=5):
"""
:param img_url:
:param retry:
:return:
"""
try_time = 0
image = None
while try_time < retry and not image:
image = download_image_to_memory(img_url)
if not image:
try_time += 1
print('download image {} failed, retry:{}'.format(img_url, try_time))
time.sleep(0.2)
return image
def download_image_to_memory(img_url):
"""
download image to memory with PIL Image format
:param img_url:
:return:
"""
try:
response = requests.get(img_url, timeout=600, stream=True)
response.raw.decode_content = True
image = Image.open(response.raw)
if not response.ok:
return None
return image
except Exception as e:
print('download failed:', img_url)
traceback.print_exc()
return None
def image_download_with_retry(img_url, save_path, retry=5):
"""
:param img_url:
:param save_path:
:param retry:
:return:
"""
try_time = 0
succeed = False
while try_time < retry and not succeed:
succeed = image_download(img_url, save_path)
if not succeed:
try_time += 1
print('download image {} failed, retry:{}'.format(img_url, try_time))
time.sleep(0.2)
return succeed
def image_download(url, save_path):
"""
根据url下载图片并保存到save_path
:param url:
:param save_path:
:return:
"""
with open(save_path, 'wb') as handle:
response = requests.get(url, timeout=60, stream=True)
if not response.ok:
return False
for block in response.iter_content(1024):
if not block:
break
handle.write(block)
return True
def send_get(url, params=None):
"""
发送get请求,params是dict格式
:param url:
:param params:
:return:返回结果包含 status_code,headers,content属性,例如r.content来访问content属性
"""
return requests.get(url, params)
def send_get_content(url, params=None):
"""
发送get请求,params是dict格式
:param url:
:param params:
:return:返回content
"""
return requests.get(url, params).content.decode('UTF-8')
def send_post(url, params=None):
"""
发送post请求,params是dict格式
:param url:
:param params:
:return:返回结果包含 status_code,headers,content属性,例如r.content来访问content属性
"""
return requests.post(url, params)
def send_post_content(url, params=None):
"""
发送post请求,params是dict格式
:param url:
:param params:
:return:返回content
"""
return requests.post(url, params).content.decode('UTF-8')
def download_video(video_url, save_path):
"""
下载视频
python2:
def download_video(video_url, save_path):
rsp = urllib2.urlopen(video_url)
with open(save_path, 'wb') as f:
f.write(rsp.read())
:param video_url:
:param save_path: xxx.mp4
:return:
"""
urllib.request.urlretrieve(video_url, save_path)
if __name__ == "__main__":
# image_download('http://dmr.nosdn.127.net/v-20170826-6b05cdaa733282703f729b5afcc65759.jpg','E://temp/docduplicate/image/v-20170826-6b05cdaa733282703f729b5afcc65759.jpg')
# params = {'picUrl': 'http://img1.gtimg.com/20/2015/201558/20155894_980x1200_281.jpg'}
# response = send_post_content('http://nlp.service.163.org/cv-api-logo/watermarker_detect', params)
# print(response)
title_cheat_url = 'http://nlp.service.163.org/dl-nlp-news/titlecheat_detect_article'
params = {"title": "孙悟空被压五指山,菩提法师为啥不救他?背后原因吓人", "category": "人文"}
response = send_post_content(title_cheat_url, params)
print(response)
response_json = json.loads(response)
print(response_json['body']['finalMark'])
sansu_url = 'http://nlp.service.163.org/news-api/vulgarity_quant_article'
params = {"title": "美国怪兽车大赛 一名车手的表演燃爆全场", "category": "搞笑", 'docid': 'VCSAT9DI8', 'content': '',
'source': '总有刁民想害朕'}
response = send_post_content(sansu_url, params)
print(response)
response_json = json.loads(response)
print(response_json['body']['serverity'])
|
from itsdangerous import URLSafeTimedSerializer
from flask.sessions import SecureCookieSessionInterface
import os
from dotenv import load_dotenv
load_dotenv()
# variables from .env used to build database URI for psycopg2
POSTGRES_PORT = os.getenv("POSTGRES_PORT")
POSTGRES_USER = os.getenv("POSTGRES_USER")
POSTGRES_PW = os.getenv("POSTGRES_PW")
POSTGRES_DB = os.getenv("POSTGRES_DB")
try:
POSTGRES_HOSTNAME = os.environ['POSTGRES_CONTAINER']
except KeyError:
POSTGRES_HOSTNAME = os.getenv("POSTGRES_LOCAL")
DB_URI = 'postgresql+psycopg2://{user}:{pw}@{hostname}:{port}/{db}'.format(
user=POSTGRES_USER, pw=POSTGRES_PW, hostname=POSTGRES_HOSTNAME, port=POSTGRES_PORT, db=POSTGRES_DB)
class Config(object):
"""
Configuration file that handles setting global variables for data access/connections.
:var: SECRET_KEY: Secret key, used to encrypt cookies
:var: SQLALCHEMY_TRACK_MODIFICATIONS: SQLAlquemy variable, set to False
:var: GOOGLE_OAUTH_CLIENT_ID: The Client ID for the google authentication, registered from google.console
:var: GOOGLE_OAUTH_CLIENT_SECRET: The client secret key,registered from google.console
"""
SECRET_KEY = os.getenv("FLASK_SECRET_KEY")
SQLALCHEMY_TRACK_MODIFICATIONS = False
GOOGLE_OAUTH_CLIENT_ID = os.getenv("GOOGLE_OAUTH_CLIENT_ID")
GOOGLE_OAUTH_CLIENT_SECRET = os.getenv("GOOGLE_OAUTH_CLIENT_SECRET")
class SimpleSecureCookieSessionInterface(SecureCookieSessionInterface):
# Override method
# Take secret_key instead of an instance of a Flask app
def get_signing_serializer(self, secret_key):
"""
Used to check secret key
"""
if not secret_key:
return None
signer_kwargs = dict(
key_derivation=self.key_derivation,
digest_method=self.digest_method
)
return URLSafeTimedSerializer(secret_key, salt=self.salt,
serializer=self.serializer,
signer_kwargs=signer_kwargs)
def decodeFlaskCookie(self,secret_key, cookieValue):
"""
Decode a base 64 encoded string
"""
sscsi = SimpleSecureCookieSessionInterface()
signingSerializer = sscsi.get_signing_serializer(secret_key)
return signingSerializer.loads(cookieValue)
# Keep in mind that flask uses unicode strings for the
# dictionary keys
def encodeFlaskCookie(self,secret_key, cookieDict):
"""
Encode a string to base 64
"""
sscsi = SimpleSecureCookieSessionInterface()
signingSerializer = sscsi.get_signing_serializer(secret_key)
return signingSerializer.dumps(cookieDict)
|
import tensorflow as tf
#import keras.backend as K
import tensorflow.keras.backend as K
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, LSTM, RepeatVector, TimeDistributed, ConvLSTM2D, Activation, BatchNormalization, Flatten, Reshape
def tilted_loss(q,y,f):
e = (y[:,:,:,0,0]-f[:,:,:,0,0])
return K.mean(K.maximum(q*e, (q-1)*e))
def mse_loss(y, f):
return K.mean(K.square(y[:,:,:,0,0]-f[:,:,:,0,0]), axis = -1)
## Tilted loss for both mean and quantiles
def joint_tilted_loss(quantiles, y, f):
loss = K.mean(K.square(y[:,:,:,0,0]-f[:,:,:,0,0]), axis = -1)
for k in range(len(quantiles)):
q = quantiles[k]
e = (y[:,:,:,0,k+1]-f[:,:,:,0,k+1])
loss += K.mean(K.maximum(q*e, (q-1)*e))
return loss
## Encoder-decoder convolutional LSTM for jointly estimating quantiles and mean predictions.
def joint_convLstm(num_filters, kernel_length, input_timesteps, num_links, output_timesteps, quantiles, prob, loss):
model = Sequential()
model.add(BatchNormalization(name = 'batch_norm_0', input_shape = (input_timesteps, num_links, 1, 1)))
model.add(ConvLSTM2D(name ='conv_lstm_1',
filters = num_filters, kernel_size = (kernel_length, 1),
padding='same',
return_sequences = False))
model.add(Dropout(prob, name = 'dropout_1'))
model.add(BatchNormalization(name = 'batch_norm_1'))
model.add(Flatten())
model.add(RepeatVector(output_timesteps))
model.add(Reshape((output_timesteps, num_links, 1, num_filters)))
model.add(ConvLSTM2D(name ='conv_lstm_2',filters = num_filters, kernel_size = (kernel_length, 1), padding='same',return_sequences = True))
model.add(Dropout(prob, name = 'dropout_2'))
model.add(TimeDistributed(Dense(units = len(quantiles) + 1, name = 'dense_1')))
model.compile(loss = loss, optimizer = 'nadam')
return model
## Encoder-decoder LSTM for mean
def convLstm(num_filters, kernel_length, input_timesteps, num_links, output_timesteps, prob, loss):
model = Sequential()
model.add(BatchNormalization(name = 'batch_norm_0', input_shape = (input_timesteps, num_links, 1, 1)))
model.add(ConvLSTM2D(name ='conv_lstm_1',
filters = num_filters, kernel_size = (kernel_length, 1),
padding='same',
return_sequences = False))
model.add(Dropout(prob, name = 'dropout_1'))
model.add(BatchNormalization(name = 'batch_norm_1'))
model.add(Flatten())
model.add(RepeatVector(output_timesteps))
model.add(Reshape((output_timesteps, num_links, 1, num_filters)))
model.add(ConvLSTM2D(name ='conv_lstm_2',
filters = num_filters, kernel_size = (kernel_length, 1),
padding='same',
return_sequences = True))
model.add(Dropout(prob, name = 'dropout_2'))
model.add(TimeDistributed(Dense(units = 1, name = 'dense_1')))
model.compile(loss = loss, optimizer = 'nadam')
return model
|
from time import sleep
from threading import Lock
from math import floor
import random
from random import Random
import copy
from shared.ServiceManager import ServiceManager
from shared.utils import get_rnd, get_time, get_rnd_seed
from shared.utils import has_acs
from shared.LogParser import LogParser
from shared.utils import secs_to_datetime, date_to_string
if has_acs:
import ACS__POA
from ACS import CBDescIn
from Acspy.Clients.SimpleClient import PySimpleClient
import sb
# import jsonAcs
# ------------------------------------------------------------------
class MockSched(ServiceManager):
"""scheduler interface class, used by SchedulerACS
Only a single active instance is allowed to exist
"""
lock = Lock()
# ------------------------------------------------------------------
def __init__(self, base_config, service_name, interrupt_sig):
self.class_name = self.__class__.__name__
service_name = (service_name if service_name is not None else self.class_name)
super().__init__(service_name=service_name)
self.log = LogParser(base_config=base_config, title=__name__)
self.log.info([['y', ' - MockSched - ']])
self.base_config = base_config
# ------------------------------------------------------------------
# sanity check for development only
# ------------------------------------------------------------------
if not has_acs:
self.log.info([['wr', (' - no ACS !?! -') * 5]])
return
# ------------------------------------------------------------------
self.site_type = self.base_config.site_type
self.clock_sim = self.base_config.clock_sim
self.inst_data = self.base_config.inst_data
self.service_name = service_name
self.interrupt_sig = interrupt_sig
self.tel_ids = self.inst_data.get_inst_ids()
self.debug = True
self.debug = False
self.cycle_blocks = []
self.acs_blocks = dict()
self.acs_blocks['blocks'] = dict()
self.acs_blocks['metadata'] = dict()
self.acs_blocks['active'] = []
self.n_nights = -1
self.active_sched_block = 0
self.n_sched_subs = 0
self.name_prefix = get_rnd(n_digits=6, out_type=str)
self.client = PySimpleClient()
# self.supervisor = self.client.getComponent('ArraySupervisor')
self.supervisor = self.client.getDynamicComponent(
'ArraySupervisor',
'IDL:cta/centralctrl/ArraySupervisor:1.0',
None,
None,
)
self.log.info([['y', ' - MockSched - '], ['p', 'got supervisor!']])
self.script_name = 'gui_ACS_sched_blocks_script_0'
# self.script_name = 'wabbitSeason'
self.max_n_cycles = 100
self.min_n_sched_block = 2
self.max_n_sched_block = 5
self.min_n_obs_block = 2
self.max_n_obs_block = 6
self.min_n_tel_block = 4
self.max_n_free_tels = 5
self.base_obs_block_duration_sec = 1800 # 30 minutes
self.az_min_max = [0, 360]
self.zen_min_max_tel = [0, 70]
self.zen_min_max_pnt = [0, 20]
# sleep duration for thread loops
self.loop_sleep_sec = 1
# minimal real-time delay between randomisations (once every self.loop_act_rate sec)
self.loop_act_rate = max(int(3 / self.loop_sleep_sec), 1)
rnd_seed = get_rnd_seed()
# rnd_seed = 10987268332
self.rnd_gen = Random(rnd_seed)
# make sure this is the only active instance
self.init_active_instance()
self.setup_threads()
return
# ------------------------------------------------------------------
def setup_threads(self):
self.add_thread(target=self.loop_main)
return
# ------------------------------------------------------------------
def get_blocks(self):
with MockSched.lock:
self.set_active_sched_blocks(has_lock=True)
return copy.deepcopy(self.acs_blocks)
# ------------------------------------------------------------------
def cancel_sched_blocks(self, sched_block_id):
class MyVoid(ACS__POA.CBvoid):
def working(self, completion, desc):
# print 'bbbbbbbbbb Callback working',sched_block_id
return
def done(self, completion, desc):
# print 'bbbbbbbbbbbbb Callback done',sched_block_id
return
desc = CBDescIn(0, 0, 0)
# desc = CBDescIn(0L, 0L, 0L)
cb = MyVoid()
self.supervisor.cancelSchedulingBlock(
sched_block_id, self.client.activateOffShoot(cb), desc
)
return
# ------------------------------------------------------------------
def cancel_zombie_sched_blocks(self, sched_block_ids=None):
if sched_block_ids is None:
try:
active = self.supervisor.listSchedulingBlocks()
except Exception as e:
self.log.debug([['b', '- Exception - MockSched.listSchedulingBlocks: '],
['r', e]])
active = []
sched_block_ids = [x for x in active if x not in self.acs_blocks['blocks']]
if len(sched_block_ids) == 0:
return
self.log.debug([['r', ' - MockSched.cancel_zombie_sched_blocks() ...'],
['y', sched_block_ids]])
for sched_block_id_now in sched_block_ids:
gevent.spawn(self.cancel_sched_blocks, sched_block_id_now)
return
# ------------------------------------------------------------------
def init_block_cycle(self):
with MockSched.lock:
self.log.info([['p', ' - MockSched.init_block_cycle() ...']])
debug_tmp = not True
current_blocks = self.acs_blocks['blocks'].keys()
if len(current_blocks) > 0:
self.log.info([['r', '- will discard sched blocks: '],
['b', current_blocks]])
# cancel sched_blocks which should have expired
self.cancel_zombie_sched_blocks()
# init local bookkeeping objects
self.cycle_blocks = []
self.acs_blocks = dict()
self.acs_blocks['blocks'] = dict()
self.acs_blocks['metadata'] = dict()
self.acs_blocks['active'] = []
overhead_sec = 30
is_cycle_done = False
n_cycle_now = 0
self.n_nights = self.clock_sim.get_n_nights()
night_start_sec = self.clock_sim.get_night_start_sec()
night_end_sec = self.clock_sim.get_night_end_sec()
night_duration_sec = self.clock_sim.get_night_duration_sec()
tot_sched_duration_sec = night_start_sec
max_block_duration_sec = night_end_sec - self.base_obs_block_duration_sec
if debug_tmp:
print('---------------------------------------------------------------')
print('night_duration_sec: ', night_duration_sec)
while True:
can_break = not ((tot_sched_duration_sec < max_block_duration_sec) and
(n_cycle_now < self.max_n_cycles) and
(not is_cycle_done))
if can_break:
break
base_name = (
self.name_prefix + '_' + str(self.n_nights) + '_' + str(n_cycle_now)
+ '_'
)
n_cycle_now += 1
tel_ids_now = copy.deepcopy(self.tel_ids)
n_tels = len(tel_ids_now)
n_sched_blocks = min(
floor(n_tels / self.min_n_tel_block), self.max_n_sched_block
)
n_sched_blocks = max(
self.rnd_gen.randint(1, n_sched_blocks), self.min_n_sched_block
)
# n_sched_blocks = 50
# n_sched_blocks = 3
if debug_tmp:
print('------------------------------------------------------------')
print(
'- n_cycle_now',
n_cycle_now,
'tot_sched_duration_sec / percentage:',
tot_sched_duration_sec,
((tot_sched_duration_sec - night_start_sec)
/ float(night_duration_sec)),
)
# generate random target/pointing ids
target_pos = dict()
# blockTrgs = dict()
# blockTrgPnt = dict()
# n_trgs = self.rnd_gen.randint(1, n_sched_blocks)
# for n_trg_try in range(n_sched_blocks):
# n_trg_now = self.rnd_gen.randint(0, n_trgs-1)
# if n_trg_now not in blockTrgs:
# blockTrgs[n_trg_now] = [ n_trg_try ]
# else:
# blockTrgs[n_trg_now].append(n_trg_try)
# blockTrgPnt[n_trg_try] = { 'n_trg':n_trg_now, 'n_pnt':len(blockTrgs[n_trg_now])-1 }
cycle_blocks_now = []
sched_block_duration_sec = []
# ------------------------------------------------------------------
#
# ------------------------------------------------------------------
for n_sched_block_now in range(n_sched_blocks):
sched_block_id = 'sched_block_' + base_name + str(n_sched_block_now)
# temporary hack to avoid SubArraySequencer issue
sched_block_id = sched_block_id.replace('_', '')
if n_sched_block_now < n_sched_blocks - 1:
n_tel_now = max(
self.min_n_tel_block,
len(tel_ids_now) - n_sched_blocks
)
n_tel_now = self.rnd_gen.randint(self.min_n_tel_block, n_tel_now)
n_tel_now = min(n_tel_now, len(tel_ids_now))
else:
n_tel_now = len(tel_ids_now) - \
self.rnd_gen.randint(0, self.max_n_free_tels)
if n_tel_now < self.min_n_tel_block:
continue
sched_tel_ids = random.sample(tel_ids_now, n_tel_now)
tel_ids_now = [x for x in tel_ids_now if x not in sched_tel_ids]
sub_arr = []
for sched_tel_id_now in sched_tel_ids:
# tel_type = sb.SST if sched_tel_id_now[0] == 'S' else sb.MST if sched_tel_id_now[0] == 'M' else sb.LST
if sched_tel_id_now[0] == 'S':
tel_type = sb.SST
elif sched_tel_id_now[0] == 'M':
tel_type = sb.MST
else:
tel_type = sb.LST
sub_arr += [sb.Telescope(sched_tel_id_now, tel_type)]
# sub_arr = [sb.Telescope('L_0', sb.LST), sb.Telescope(
# 'L0', sb.LST), sb.Telescope('T0', sb.LST)]
# sub_arr = [sb.Telescope('T1', sb.LST), sb.Telescope(
# 'T1', sb.MST), sb.Telescope('T1', sb.SST)]
# print sub_arr
sched_conf = sb.Configuration(
sb.InstrumentConfiguration(
sb.PointingMode(2, sb.Divergent(2)), sb.Subarray([], sub_arr)
), 'camera', 'rta'
)
n_obs_blocks = self.rnd_gen.randint(
self.min_n_obs_block, self.max_n_obs_block
)
# n_trg = blockTrgPnt[n_sched_block_now]['n_trg']
# n_pnt = blockTrgPnt[n_sched_block_now]['n_pnt']
n_trg = n_sched_block_now
if n_trg not in target_pos:
delta_az = self.az_min_max[1] - self.az_min_max[0]
delta_zen = self.zen_min_max_tel[1] - self.zen_min_max_tel[0]
target_pos[n_trg] = [
(self.rnd_gen.random() * delta_az + self.az_min_max[0]),
(self.rnd_gen.random() * delta_zen + self.zen_min_max_tel[0])
]
target_id = 'target_' + str(n_trg)
if debug_tmp:
print(
' -- n_sched_block_now / n_tel_now:', n_sched_block_now,
n_tel_now, '-------', sched_block_id
)
obs_blocks = []
tot_obs_block_duration_sec = 0
block_duration_sec = tot_sched_duration_sec
for n_obs_now in range(n_obs_blocks):
obs_block_id = (
'obs_block_' + base_name + str(n_sched_block_now) + '_'
+ str(n_obs_now)
)
# temporary hack to avoid SubArraySequencer issue
obs_block_id = obs_block_id.replace('_', '')
rnd = self.rnd_gen.random()
base_obs_block_duration_sec = self.base_obs_block_duration_sec
# if rnd < 0.1:
# base_obs_block_duration_sec /= 3
# elif rnd < 0.3:
# base_obs_block_duration_sec /= 2
if rnd < 0.05:
base_obs_block_duration_sec /= 1.8
elif rnd < 0.3:
base_obs_block_duration_sec /= 1.5
elif rnd < 0.5:
base_obs_block_duration_sec /= 1.1
base_obs_block_duration_sec = int(base_obs_block_duration_sec)
planed_block_end_sec = (
block_duration_sec + base_obs_block_duration_sec
)
is_cycle_done = (planed_block_end_sec > night_end_sec)
if is_cycle_done:
if debug_tmp:
print(
' - is_cycle_done - n_obs_now',
'/ night_start_sec / duration:',
n_obs_now,
block_duration_sec,
base_obs_block_duration_sec,
)
break
# integrated time for all obs blocks within this sched block
tot_obs_block_duration_sec += base_obs_block_duration_sec
# correct for the fact that the config/finish stages take time and
# are not scaled
speed_factor = self.clock_sim.get_speed_factor()
scaled_duration = ((base_obs_block_duration_sec / speed_factor)
- overhead_sec)
# scaled_duration = (base_obs_block_duration_sec) / speed_factor
scaled_duration = max(1, scaled_duration)
point_pos = copy.deepcopy(target_pos[n_trg])
point_pos[0] += (self.rnd_gen.random() - 0.5) * 10
point_pos[1] += (self.rnd_gen.random() - 0.5) * 10
if point_pos[0] > self.az_min_max[1]:
point_pos[0] -= 360
elif point_pos[0] < self.az_min_max[0]:
point_pos[0] += 360
if debug_tmp:
print(
' --- n_obs_now / night_start_sec / duration / scaled_duration:',
n_obs_now, block_duration_sec,
base_obs_block_duration_sec, scaled_duration, '-------',
obs_block_id
)
obs_coords = sb.Coordinates(
2,
sb.HorizontalCoordinates(
target_pos[n_trg][1], target_pos[n_trg][0]
)
)
obs_mode = sb.ObservingMode(
sb.Slewing(1), sb.ObservingType(2, sb.GridSurvey(1, 1, 1)), []
)
obs_source = sb.Source(
target_id, sb.placeholder, sb.High, sb.RegionOfInterest(100),
obs_mode, obs_coords
)
obs_conds = sb.ObservingConditions(
1, scaled_duration, 1, sb.Quality(1, 1, 1),
sb.Weather(1, 1, 1, 1)
)
obs_block = sb.ObservationBlock(
obs_block_id, obs_source, obs_conds, self.script_name, 0
)
# metadata of the observing block
_n_sched_blocks = len(self.acs_blocks['blocks'].keys())
_n_obs_blocks = len(obs_blocks)
metadata = {
'n_sched':
_n_sched_blocks,
'n_obs':
_n_obs_blocks,
'block_name':
(str(_n_sched_blocks) + ' (' + str(_n_obs_blocks) + ')'),
}
# temporary way to store meta-data - should be replaced by
# global coordinate access function
self.acs_blocks['metadata'][obs_block_id] = {
'metadata': metadata,
'timestamp': get_time('msec'),
'point_pos': point_pos,
'duration': base_obs_block_duration_sec,
'scaled_duration': scaled_duration,
'start_time_sec_plan': block_duration_sec,
'start_time_sec_exe': None,
'status': sb.OB_PENDING,
'phases': []
}
obs_blocks += [obs_block]
block_duration_sec += base_obs_block_duration_sec
if len(obs_blocks) > 0:
sched_block = sb.SchedulingBlock(
sched_block_id,
sb.Proposal('proposalId'),
sched_conf,
obs_blocks,
)
cycle_blocks_now.append(sched_block)
self.acs_blocks['blocks'][sched_block_id] = {
'timestamp': get_time('msec'),
'state': 'wait',
'sched_block': sched_block
}
# list of duration of all sched blocks within this cycle
sched_block_duration_sec += [tot_obs_block_duration_sec]
# the maximal duration of all blocks within this cycle
tot_sched_duration_sec += max(sched_block_duration_sec)
if len(cycle_blocks_now) > 0:
self.cycle_blocks.append(cycle_blocks_now)
# print '-----',len(self.cycle_blocks)
return
# ------------------------------------------------------------------
# move one from wait to run
# ------------------------------------------------------------------
def set_active_sched_blocks(self, has_lock=False):
# self.log.info([['c', ' - starting MockSched.set_active_sched_blocks ...']])
metadata = self.acs_blocks['metadata']
blocks = self.acs_blocks['blocks']
def set_blocks():
try:
active = self.supervisor.listSchedulingBlocks()
# print('xxxxxxx active: ', active)
except Exception as e:
self.log.debug([
['b', '- Exception - MockSched.set_active_sched_blocks: '],
['r', e],
])
active = []
obs_block_delays = dict()
self.acs_blocks['active'] = []
for sched_block_id in blocks:
obs_block_delays[sched_block_id] = None
obs_blocks_status = None
if sched_block_id in active:
try:
obs_blocks_status = self.supervisor.getSbOperationStatus(
sched_block_id
)
obs_blocks_status = obs_blocks_status.ob_statuses
# print(
# 'xxxxxxx obs_blocks_status: ', sched_block_id,
# (obs_blocks_status is not None)
# )
except Exception as e:
self.log.debug([
['b', '- Exception - MockSched.getSbOperationStatus: '],
['r', e],
])
# ------------------------------------------------------------------
#
# ------------------------------------------------------------------
if obs_blocks_status is not None:
self.acs_blocks['active'].append(sched_block_id)
blocks[sched_block_id]['state'] = 'run'
# sched_block_status = self.supervisor.getSchedulingBlockStatus(
# sched_block_id)
# sched_block_op_status = self.supervisor.getSbOperationStatus(
# sched_block_id)
# self.log.info([
# ['y', ' - active_scheduling_blocks - ', sched_block_id, ' '], [
# 'g', active, '-> '], ['y', sched_block_op_status, ' ']
# ])
# ------------------------------------------------------------------
# all state definitions:
# enum SchedulingBlockStatus {
# SB_WAITING, SB_PENDING, SB_RUNNING, SB_SUCCEEDED,
# SB_TRUNCATED, SB_CANCELED, SB_FAILED, SB_ABORTED
# };
# enum ObservationBlockStatus {
# OB_PENDING, OB_RUNNING, OB_SUCCEEDED, OB_CANCELED,
# OB_TRUNCATED, OB_FAILED
# };
# ------------------------------------------------------------------
for obs_block_now in obs_blocks_status:
obs_block_id = obs_block_now.id
# print(
# '????', obs_block_id, (obs_block_id in metadata),
# obs_block_now.status.status.status, obs_block_now.phases
# )
if obs_block_id not in metadata:
continue
# obs_block_now.status -> sb.ObservationBlockStatusDetail
metadata[obs_block_id]['status'] = (
obs_block_now.status.status.status
)
metadata[obs_block_id]['phases'] = obs_block_now.phases
if ((metadata[obs_block_id]['start_time_sec_exe'] is None)
and (metadata[obs_block_id]['status'] != sb.OB_PENDING)):
time_now_sec = self.clock_sim.get_time_now_sec()
metadata[obs_block_id]['start_time_sec_exe'] = time_now_sec
time_dif = (
time_now_sec
- metadata[obs_block_id]['start_time_sec_plan']
)
if time_dif > 0:
obs_block_delays[sched_block_id] = time_dif
self.log.info([
['y', '- obs block is now running: '],
['c', sched_block_id, '/ '],
['r', obs_block_id, '\n'],
[
'g',
((' ' * 20) + '--> planned/executed start:'),
metadata[obs_block_id]['start_time_sec_plan'],
'/',
time_now_sec,
],
[
'p',
' duration:',
metadata[obs_block_id]['duration'],
'/',
metadata[obs_block_id]['scaled_duration'],
],
])
# ------------------------------------------------------------------
#
# ------------------------------------------------------------------
if ((obs_blocks_status is None)
and (blocks[sched_block_id]['state'] == 'run')):
blocks[sched_block_id]['state'] = 'done'
obs_blocks = blocks[sched_block_id]['sched_block'].observation_blocks
for obs_block in obs_blocks:
obs_block_id = obs_block.id
metadata[obs_block_id]['phases'] = []
rnd_now = self.rnd_gen.random()
if rnd_now < 0.05:
metadata[obs_block_id]['status'] = sb.OB_CANCELED
elif rnd_now < 0.1:
metadata[obs_block_id]['status'] = sb.OB_FAILED
else:
metadata[obs_block_id]['status'] = sb.OB_SUCCEEDED
self.log.info([
['b', '- sched block is done '],
['r', sched_block_id],
['g', ' - current-time: '],
[
'g',
date_to_string(
secs_to_datetime(self.clock_sim.get_time_now_sec()),
date_string=None,
)
],
['y', ' ', self.clock_sim.get_time_now_sec()],
])
# ------------------------------------------------------------------
# adjust the start time of all future OBs --- this will NOT
# take care of OBs of currently active SB, which may overshoot
# the end of the night
# ------------------------------------------------------------------
time_difs = [x for x in obs_block_delays.values() if x is not None]
if len(time_difs) > 0:
# adjust the start time of all future OBs in the currently active SBs
active_sched_block = len(self.cycle_blocks)
for sched_block_id in self.acs_blocks['active']:
if obs_block_delays[sched_block_id] is None:
continue
updated_obs_blocks = []
sched_block = blocks[sched_block_id]['sched_block']
for obs_block in sched_block.observation_blocks:
obs_block_id = obs_block.id
if metadata[obs_block_id]['start_time_sec_exe'] is None:
updated_obs_blocks += [[
obs_block_id, obs_block_delays[sched_block_id]
]]
metadata[obs_block_id]['start_time_sec_plan'] += (
obs_block_delays[sched_block_id]
)
if len(updated_obs_blocks) > 0:
self.log.info([
[
'b',
' -+- updating start_time_sec_plan of',
sched_block_id,
'',
],
['y', updated_obs_blocks],
])
for n_cycle_now in range(len(self.cycle_blocks)):
if sched_block in self.cycle_blocks[n_cycle_now]:
active_sched_block = n_cycle_now
# adjust the start time of all OBs in future SBs
self.log.info([['g', '- will delay future SBs ...']])
self.delay_sched_blocks(
active_sched_block=(active_sched_block + 1), time_dif=max(time_difs)
)
# ------------------------------------------------------------------
# ????????????????????????????????????????????????????????????????
# how do i get the status of a ob which has: OB_SUCCEEDED, OB_CANCELED,
# OB_TRUNCATED, OB_FAILED ????????????????
# ????????????????????????????????????????????????????????????????
# ------------------------------------------------------------------
return
if has_lock:
set_blocks()
else:
with MockSched.lock:
set_blocks()
return
# ------------------------------------------------------------------
# update the estimated starting times of all blocks based on the
# current state of execution clip the list if any block ends up
# lasting after the night is supposed to end
# ------------------------------------------------------------------
def delay_sched_blocks(self, active_sched_block=None, time_dif=None):
self.log.info([['c', ' - starting MockSched.delay_sched_blocks ...']])
if active_sched_block is None:
active_sched_block = self.active_sched_block
if active_sched_block >= len(self.cycle_blocks) - 1:
return
# get the maximal difference between planned and current time for each
# OB of each SB in the cycle which will is about to be submitted
if time_dif is None:
time_dif_max = -1
for sched_block in self.cycle_blocks[active_sched_block]:
obs_block_id = sched_block.observation_blocks[0].id
start_time_sec = (
self.acs_blocks['metadata'][obs_block_id]['start_time_sec_plan']
)
time_dif_now = self.clock_sim.get_time_now_sec() - start_time_sec
time_dif_max = max(time_dif_max, time_dif_now)
else:
time_dif_max = time_dif
# if any of the OBs is late, adjust the planned start time of all OBs in all SBs
# and remove any OBs which will overshoot the end of the night
if time_dif_max > 0:
self.log.info([
['r', '- updating start_time_sec_plan by: '],
['b', time_dif_max],
])
# perform the adjustment for all future SBs in all cycles
for n_cycle_now in range(active_sched_block, len(self.cycle_blocks)):
sched_block_overs = []
for sched_block in self.cycle_blocks[n_cycle_now]:
self.log.info([
['b', ' -+- updating ', sched_block.id, ''],
['y', [x.id for x in sched_block.observation_blocks]],
])
# adjust the start time of all OBs in this SB
for obs_block in sched_block.observation_blocks:
obs_block_id = obs_block.id
metadata = self.acs_blocks['metadata'][obs_block_id]
metadata['start_time_sec_plan'] += time_dif_max
end_time_sec = (
metadata['start_time_sec_plan'] + metadata['duration']
)
# a simplistic approach - cancel any SB if at least one of
# its OBs overshoots the end of the night
if ((end_time_sec > self.clock_sim.get_night_end_sec())
and (sched_block not in sched_block_overs)):
sched_block_overs.append(sched_block)
# remove all overshooting SBs from the cycle and apdate the bookkeeping
for sched_block in sched_block_overs:
self.log.info([
['r', '- cancelling all OBs from future cycle for SB '],
['p', sched_block.id],
])
for obs_block in sched_block.observation_blocks:
metadata = self.acs_blocks['metadata'][obs_block.id]
metadata['status'] = sb.OB_CANCELED
self.acs_blocks['blocks'][sched_block.id]['state'] = 'cancel'
self.cycle_blocks[n_cycle_now].remove(sched_block)
# remove any cycle which has no SBs letf in it
self.cycle_blocks = [x for x in self.cycle_blocks if len(x) > 0]
return
# ------------------------------------------------------------------
# move one from wait to run
# ------------------------------------------------------------------
def submit_block_cycle(self):
self.log.info([['g', ' - starting MockSched.submit_block_cycle ...']])
has_reset_night = (self.n_nights < self.clock_sim.get_n_nights())
if has_reset_night:
self.log.info([[
'p',
' - has_reset_night - will cancel all ',
'running blocks and reset cycles ...',
]])
if self.active_sched_block >= len(self.cycle_blocks) or has_reset_night:
self.n_sched_subs = 0
self.active_sched_block = 0
self.init_block_cycle()
# update the start time of all fufute SBs
with MockSched.lock:
self.delay_sched_blocks()
# check if has removed all executable blocks
if self.active_sched_block >= len(self.cycle_blocks):
self.submit_block_cycle()
return
# ------------------------------------------------------------------
# submit new blocks
# ------------------------------------------------------------------
with MockSched.lock:
self.n_nights = self.clock_sim.get_n_nights()
self.n_sched_subs = 0
for sched_block in self.cycle_blocks[self.active_sched_block]:
if self.acs_blocks['blocks'][sched_block.id]['state'] == 'wait':
# increase the self.n_sched_subs counter BEFORE spawning the thread
self.n_sched_subs += 1
# spawn a new SB submision (will wait for its start time to
# arrive before actually submitting)
gevent.spawn(self.submit_one_block, sched_block)
self.set_active_sched_blocks(has_lock=True)
self.active_sched_block += 1
return
# ------------------------------------------------------------------
def submit_one_block(self, sched_block):
with MockSched.lock:
obs_block_id = sched_block.observation_blocks[0].id
n_sub_tries = 0
while True:
has_reset_night = (self.n_nights < self.clock_sim.get_n_nights())
if (has_reset_night):
return
block_meta = self.acs_blocks['metadata']
start_time_sec = block_meta[obs_block_id]['start_time_sec_plan']
time_dif = self.clock_sim.get_time_now_sec() - start_time_sec
if time_dif >= 0:
break
if n_sub_tries % 10 == 0:
self.log.info([['b', '- waiting to submit '], ['g', sched_block.id],
['b', ' - remaining time: ', time_dif]])
n_sub_tries += 1
sleep(0.5)
try:
self.supervisor.putSchedulingBlock(sched_block)
do_msg = True
if do_msg:
block_meta = self.acs_blocks['metadata']
def block_start_time_sec(x):
return int(floor(block_meta[x.id]['start_time_sec_plan']))
def block_end_time_sec(x):
return int(
block_meta[x.id]['start_time_sec_plan']
+ block_meta[x.id]['duration']
)
def block_start_time(x):
return date_to_string(
secs_to_datetime(block_start_time_sec(x)),
date_string=None,
)
def block_end_time(x):
return date_to_string(
secs_to_datetime(block_end_time_sec(x)),
date_string=None,
)
block_times_0 = [(
((str(x.id) + ' --> ') +
(str(block_meta[x.id]['metadata']['n_sched']) + ' (') +
(str(block_meta[x.id]['metadata']['n_obs']) + ')')),
[block_start_time_sec(x),
block_end_time_sec(x)],
) for x in sched_block.observation_blocks]
block_times_1 = [([block_start_time(x),
block_end_time(x)], )
for x in sched_block.observation_blocks]
self.log.info([
['y', '- submitted sched block: '],
['p', sched_block.id, ' '],
['g', block_times_0, ' ... '],
['y', block_times_1],
])
except Exception as e:
self.log.debug([
['b', '- Exception - MockSched.putSchedulingBlock: '],
['r', '\n' + (' ' * 25), e],
])
# as the last action in this thread, update the self.n_sched_subs counter
self.n_sched_subs -= 1
return
# ------------------------------------------------------------------
# move one from wait to run
# ------------------------------------------------------------------
def check_sched_blocks(self):
self.log.debug([['b', ' - starting MockSched.check_sched_blocks ...']])
with MockSched.lock:
self.set_active_sched_blocks(has_lock=True)
for block_name in self.acs_blocks['active']:
status = self.supervisor.getSchedulingBlockStatus(block_name)
opstatus = self.supervisor.getSbOperationStatus(block_name)
self.log.debug([
['y', ' - active_scheduling_blocks - '],
['g', self.acs_blocks['active'], '-> '],
['y', status, ' '],
])
for nob in range(len(opstatus.ob_statuses)):
phases = opstatus.ob_statuses[nob].phases
for p in phases:
self.log.debug([
[
'y',
' -- phases - ',
block_name,
' ',
opstatus.ob_statuses[nob].id,
' ',
opstatus.ob_statuses[nob].status,
' ',
],
[
'g',
p.heartbeat_counter,
' ',
p.name,
' ',
p.status,
' ',
p.progress_message,
],
])
return
# ------------------------------------------------------------------
def loop_main(self):
self.log.info([['g', ' - starting MockSched.loop_main ...']])
self.submit_block_cycle()
print(' -- MockSched.loop_main has not been verified, since no acs ...')
print(' -- MockSched.loop_main has not been verified, since no acs ...')
print(' -- MockSched.loop_main has not been verified, since no acs ...')
n_loop = 0
while self.can_loop():
n_loop += 1
sleep(self.loop_sleep_sec)
if n_loop % self.loop_act_rate != 0:
continue
self.set_active_sched_blocks()
# print('------------- glob-active: ', self.acs_blocks['active'])
if len(self.acs_blocks['active']) == 0 and self.n_sched_subs == 0:
self.submit_block_cycle()
# self.check_sched_blocks()
return
if False:
print(
'the following is the code before 20200812, upgrades not tested sonce no acs available...'
)
# import gevent
# from gevent import sleep
# try:
# from gevent.coros import BoundedSemaphore
# except:
# from gevent.lock import BoundedSemaphore
# from math import floor
# import random
# from random import Random
# import copy
# from shared.utils import get_rnd, get_time, get_rnd_seed
# from shared.utils import has_acs
# from shared.LogParser import LogParser
# from shared.utils import secs_to_datetime, date_to_string
# if has_acs:
# import ACS__POA
# from ACS import CBDescIn
# from Acspy.Clients.SimpleClient import PySimpleClient
# import sb
# # import jsonAcs
# # ------------------------------------------------------------------
# #
# # ------------------------------------------------------------------
# class MockSched():
# has_active = False
# # privat lock for this widget type
# lock = BoundedSemaphore(1)
# # ------------------------------------------------------------------
# #
# # ------------------------------------------------------------------
# def __init__(self, base_config):
# self.log = LogParser(base_config=base_config, title=__name__)
# self.log.info([['y', ' - MockSched - ']])
# self.base_config = base_config
# # ------------------------------------------------------------------
# # sanity check for development only
# # ------------------------------------------------------------------
# if not has_acs:
# self.log.info([['wr', (' - no ACS !?! -') * 5]])
# return
# # ------------------------------------------------------------------
# if MockSched.has_active:
# self.log.info([['wr', (' - has_active -') * 5]])
# return
# MockSched.has_active = True
# # ------------------------------------------------------------------
# self.site_type = self.base_config.site_type
# self.clock_sim = self.base_config.clock_sim
# self.inst_data = self.base_config.inst_data
# self.tel_ids = self.inst_data.get_inst_ids()
# self.debug = True
# self.debug = False
# self.cycle_blocks = []
# self.acs_blocks = dict()
# self.acs_blocks['blocks'] = dict()
# self.acs_blocks['metadata'] = dict()
# self.acs_blocks['active'] = []
# self.n_nights = -1
# self.active_sched_block = 0
# self.n_sched_subs = 0
# self.name_prefix = get_rnd(n_digits=6, out_type=str)
# self.client = PySimpleClient()
# # self.supervisor = self.client.getComponent('ArraySupervisor')
# self.supervisor = self.client.getDynamicComponent(
# 'ArraySupervisor',
# 'IDL:cta/centralctrl/ArraySupervisor:1.0',
# None,
# None,
# )
# self.log.info([['y', ' - MockSched - '], ['p', 'got supervisor!']])
# self.script_name = 'gui_ACS_sched_blocks_script_0'
# # self.script_name = 'wabbitSeason'
# self.max_n_cycles = 100
# self.min_n_sched_block = 2
# self.max_n_sched_block = 5
# self.min_n_obs_block = 2
# self.max_n_obs_block = 6
# self.min_n_tel_block = 4
# self.max_n_free_tels = 5
# self.base_obs_block_duration_sec = 1800 # 30 minutes
# self.az_min_max = [0, 360]
# self.zen_min_max_tel = [0, 70]
# self.zen_min_max_pnt = [0, 20]
# self.loop_sleep = 3
# rnd_seed = get_rnd_seed()
# # rnd_seed = 10987268332
# self.rnd_gen = Random(rnd_seed)
# gevent.spawn(self.loop)
# return
# # ------------------------------------------------------------------
# #
# # ------------------------------------------------------------------
# def get_blocks(self):
# with MockSched.lock:
# self.set_active_sched_blocks(has_lock=True)
# return copy.deepcopy(self.acs_blocks)
# # ------------------------------------------------------------------
# #
# # ------------------------------------------------------------------
# def cancel_sched_blocks(self, sched_block_id):
# class MyVoid(ACS__POA.CBvoid):
# def working(self, completion, desc):
# # print 'bbbbbbbbbb Callback working',sched_block_id
# return
# def done(self, completion, desc):
# # print 'bbbbbbbbbbbbb Callback done',sched_block_id
# return
# desc = CBDescIn(0, 0, 0)
# # desc = CBDescIn(0L, 0L, 0L)
# cb = MyVoid()
# self.supervisor.cancelSchedulingBlock(
# sched_block_id, self.client.activateOffShoot(cb), desc
# )
# return
# # ------------------------------------------------------------------
# #
# # ------------------------------------------------------------------
# def cancel_zombie_sched_blocks(self, sched_block_ids=None):
# if sched_block_ids is None:
# try:
# active = self.supervisor.listSchedulingBlocks()
# except Exception as e:
# self.log.debug([['b', '- Exception - MockSched.listSchedulingBlocks: '],
# ['r', e]])
# active = []
# sched_block_ids = [x for x in active if x not in self.acs_blocks['blocks']]
# if len(sched_block_ids) == 0:
# return
# self.log.debug([['r', ' - MockSched.cancel_zombie_sched_blocks() ...'],
# ['y', sched_block_ids]])
# for sched_block_id_now in sched_block_ids:
# gevent.spawn(self.cancel_sched_blocks, sched_block_id_now)
# return
# # ------------------------------------------------------------------
# #
# # ------------------------------------------------------------------
# def init_block_cycle(self):
# with MockSched.lock:
# self.log.info([['p', ' - MockSched.init_block_cycle() ...']])
# debug_tmp = not True
# current_blocks = self.acs_blocks['blocks'].keys()
# if len(current_blocks) > 0:
# self.log.info([['r', '- will discard sched blocks: '],
# ['b', current_blocks]])
# # cancel sched_blocks which should have expired
# self.cancel_zombie_sched_blocks()
# # init local bookkeeping objects
# self.cycle_blocks = []
# self.acs_blocks = dict()
# self.acs_blocks['blocks'] = dict()
# self.acs_blocks['metadata'] = dict()
# self.acs_blocks['active'] = []
# overhead_sec = 30
# is_cycle_done = False
# n_cycle_now = 0
# self.n_nights = self.clock_sim.get_n_nights()
# night_start_sec = self.clock_sim.get_night_start_sec()
# night_end_sec = self.clock_sim.get_night_end_sec()
# night_duration_sec = self.clock_sim.get_night_duration_sec()
# tot_sched_duration_sec = night_start_sec
# max_block_duration_sec = night_end_sec - self.base_obs_block_duration_sec
# if debug_tmp:
# print('---------------------------------------------------------------')
# print('night_duration_sec: ', night_duration_sec)
# while True:
# can_break = not ((tot_sched_duration_sec < max_block_duration_sec) and
# (n_cycle_now < self.max_n_cycles) and
# (not is_cycle_done))
# if can_break:
# break
# base_name = (
# self.name_prefix + '_' + str(self.n_nights) + '_' + str(n_cycle_now)
# + '_'
# )
# n_cycle_now += 1
# tel_ids_now = copy.deepcopy(self.tel_ids)
# n_tels = len(tel_ids_now)
# n_sched_blocks = min(
# floor(n_tels / self.min_n_tel_block), self.max_n_sched_block
# )
# n_sched_blocks = max(
# self.rnd_gen.randint(1, n_sched_blocks), self.min_n_sched_block
# )
# # n_sched_blocks = 50
# # n_sched_blocks = 3
# if debug_tmp:
# print('------------------------------------------------------------')
# print(
# '- n_cycle_now',
# n_cycle_now,
# 'tot_sched_duration_sec / percentage:',
# tot_sched_duration_sec,
# ((tot_sched_duration_sec - night_start_sec)
# / float(night_duration_sec)),
# )
# # generate random target/pointing ids
# target_pos = dict()
# # blockTrgs = dict()
# # blockTrgPnt = dict()
# # n_trgs = self.rnd_gen.randint(1, n_sched_blocks)
# # for n_trg_try in range(n_sched_blocks):
# # n_trg_now = self.rnd_gen.randint(0, n_trgs-1)
# # if n_trg_now not in blockTrgs:
# # blockTrgs[n_trg_now] = [ n_trg_try ]
# # else:
# # blockTrgs[n_trg_now].append(n_trg_try)
# # blockTrgPnt[n_trg_try] = { 'n_trg':n_trg_now, 'n_pnt':len(blockTrgs[n_trg_now])-1 }
# cycle_blocks_now = []
# sched_block_duration_sec = []
# # ------------------------------------------------------------------
# #
# # ------------------------------------------------------------------
# for n_sched_block_now in range(n_sched_blocks):
# sched_block_id = 'sched_block_' + base_name + str(n_sched_block_now)
# # temporary hack to avoid SubArraySequencer issue
# sched_block_id = sched_block_id.replace('_', '')
# if n_sched_block_now < n_sched_blocks - 1:
# n_tel_now = max(
# self.min_n_tel_block,
# len(tel_ids_now) - n_sched_blocks
# )
# n_tel_now = self.rnd_gen.randint(self.min_n_tel_block, n_tel_now)
# n_tel_now = min(n_tel_now, len(tel_ids_now))
# else:
# n_tel_now = len(tel_ids_now) - \
# self.rnd_gen.randint(0, self.max_n_free_tels)
# if n_tel_now < self.min_n_tel_block:
# continue
# sched_tel_ids = random.sample(tel_ids_now, n_tel_now)
# tel_ids_now = [x for x in tel_ids_now if x not in sched_tel_ids]
# sub_arr = []
# for sched_tel_id_now in sched_tel_ids:
# # tel_type = sb.SST if sched_tel_id_now[0] == 'S' else sb.MST if sched_tel_id_now[0] == 'M' else sb.LST
# if sched_tel_id_now[0] == 'S':
# tel_type = sb.SST
# elif sched_tel_id_now[0] == 'M':
# tel_type = sb.MST
# else:
# tel_type = sb.LST
# sub_arr += [sb.Telescope(sched_tel_id_now, tel_type)]
# # sub_arr = [sb.Telescope('L_0', sb.LST), sb.Telescope(
# # 'L0', sb.LST), sb.Telescope('T0', sb.LST)]
# # sub_arr = [sb.Telescope('T1', sb.LST), sb.Telescope(
# # 'T1', sb.MST), sb.Telescope('T1', sb.SST)]
# # print sub_arr
# sched_conf = sb.Configuration(
# sb.InstrumentConfiguration(
# sb.PointingMode(2, sb.Divergent(2)), sb.Subarray([], sub_arr)
# ), 'camera', 'rta'
# )
# n_obs_blocks = self.rnd_gen.randint(
# self.min_n_obs_block, self.max_n_obs_block
# )
# # n_trg = blockTrgPnt[n_sched_block_now]['n_trg']
# # n_pnt = blockTrgPnt[n_sched_block_now]['n_pnt']
# n_trg = n_sched_block_now
# if n_trg not in target_pos:
# delta_az = self.az_min_max[1] - self.az_min_max[0]
# delta_zen = self.zen_min_max_tel[1] - self.zen_min_max_tel[0]
# target_pos[n_trg] = [
# (self.rnd_gen.random() * delta_az + self.az_min_max[0]),
# (self.rnd_gen.random() * delta_zen + self.zen_min_max_tel[0])
# ]
# target_id = 'target_' + str(n_trg)
# if debug_tmp:
# print(
# ' -- n_sched_block_now / n_tel_now:', n_sched_block_now,
# n_tel_now, '-------', sched_block_id
# )
# obs_blocks = []
# tot_obs_block_duration_sec = 0
# block_duration_sec = tot_sched_duration_sec
# for n_obs_now in range(n_obs_blocks):
# obs_block_id = (
# 'obs_block_' + base_name + str(n_sched_block_now) + '_'
# + str(n_obs_now)
# )
# # temporary hack to avoid SubArraySequencer issue
# obs_block_id = obs_block_id.replace('_', '')
# rnd = self.rnd_gen.random()
# base_obs_block_duration_sec = self.base_obs_block_duration_sec
# # if rnd < 0.1:
# # base_obs_block_duration_sec /= 3
# # elif rnd < 0.3:
# # base_obs_block_duration_sec /= 2
# if rnd < 0.05:
# base_obs_block_duration_sec /= 1.8
# elif rnd < 0.3:
# base_obs_block_duration_sec /= 1.5
# elif rnd < 0.5:
# base_obs_block_duration_sec /= 1.1
# base_obs_block_duration_sec = int(base_obs_block_duration_sec)
# planed_block_end_sec = (
# block_duration_sec + base_obs_block_duration_sec
# )
# is_cycle_done = (planed_block_end_sec > night_end_sec)
# if is_cycle_done:
# if debug_tmp:
# print(
# ' - is_cycle_done - n_obs_now',
# '/ night_start_sec / duration:',
# n_obs_now,
# block_duration_sec,
# base_obs_block_duration_sec,
# )
# break
# # integrated time for all obs blocks within this sched block
# tot_obs_block_duration_sec += base_obs_block_duration_sec
# # correct for the fact that the config/finish stages take time and
# # are not scaled
# speed_factor = self.clock_sim.get_speed_factor()
# scaled_duration = ((base_obs_block_duration_sec / speed_factor)
# - overhead_sec)
# # scaled_duration = (base_obs_block_duration_sec) / speed_factor
# scaled_duration = max(1, scaled_duration)
# point_pos = copy.deepcopy(target_pos[n_trg])
# point_pos[0] += (self.rnd_gen.random() - 0.5) * 10
# point_pos[1] += (self.rnd_gen.random() - 0.5) * 10
# if point_pos[0] > self.az_min_max[1]:
# point_pos[0] -= 360
# elif point_pos[0] < self.az_min_max[0]:
# point_pos[0] += 360
# if debug_tmp:
# print(
# ' --- n_obs_now / night_start_sec / duration / scaled_duration:',
# n_obs_now, block_duration_sec,
# base_obs_block_duration_sec, scaled_duration, '-------',
# obs_block_id
# )
# obs_coords = sb.Coordinates(
# 2,
# sb.HorizontalCoordinates(
# target_pos[n_trg][1], target_pos[n_trg][0]
# )
# )
# obs_mode = sb.ObservingMode(
# sb.Slewing(1), sb.ObservingType(2, sb.GridSurvey(1, 1, 1)), []
# )
# obs_source = sb.Source(
# target_id, sb.placeholder, sb.High, sb.RegionOfInterest(100),
# obs_mode, obs_coords
# )
# obs_conds = sb.ObservingConditions(
# 1, scaled_duration, 1, sb.Quality(1, 1, 1),
# sb.Weather(1, 1, 1, 1)
# )
# obs_block = sb.ObservationBlock(
# obs_block_id, obs_source, obs_conds, self.script_name, 0
# )
# # metadata of the observing block
# _n_sched_blocks = len(self.acs_blocks['blocks'].keys())
# _n_obs_blocks = len(obs_blocks)
# metadata = {
# 'n_sched':
# _n_sched_blocks,
# 'n_obs':
# _n_obs_blocks,
# 'block_name':
# (str(_n_sched_blocks) + ' (' + str(_n_obs_blocks) + ')'),
# }
# # temporary way to store meta-data - should be replaced by
# # global coordinate access function
# self.acs_blocks['metadata'][obs_block_id] = {
# 'metadata': metadata,
# 'timestamp': get_time('msec'),
# 'point_pos': point_pos,
# 'duration': base_obs_block_duration_sec,
# 'scaled_duration': scaled_duration,
# 'start_time_sec_plan': block_duration_sec,
# 'start_time_sec_exe': None,
# 'status': sb.OB_PENDING,
# 'phases': []
# }
# obs_blocks += [obs_block]
# block_duration_sec += base_obs_block_duration_sec
# if len(obs_blocks) > 0:
# sched_block = sb.SchedulingBlock(
# sched_block_id,
# sb.Proposal('proposalId'),
# sched_conf,
# obs_blocks,
# )
# cycle_blocks_now.append(sched_block)
# self.acs_blocks['blocks'][sched_block_id] = {
# 'timestamp': get_time('msec'),
# 'state': 'wait',
# 'sched_block': sched_block
# }
# # list of duration of all sched blocks within this cycle
# sched_block_duration_sec += [tot_obs_block_duration_sec]
# # the maximal duration of all blocks within this cycle
# tot_sched_duration_sec += max(sched_block_duration_sec)
# if len(cycle_blocks_now) > 0:
# self.cycle_blocks.append(cycle_blocks_now)
# # print '-----',len(self.cycle_blocks)
# return
# # ------------------------------------------------------------------
# # move one from wait to run
# # ------------------------------------------------------------------
# def set_active_sched_blocks(self, has_lock=False):
# # self.log.info([['c', ' - starting MockSched.set_active_sched_blocks ...']])
# metadata = self.acs_blocks['metadata']
# blocks = self.acs_blocks['blocks']
# def set_blocks():
# try:
# active = self.supervisor.listSchedulingBlocks()
# # print('xxxxxxx active: ', active)
# except Exception as e:
# self.log.debug([
# ['b', '- Exception - MockSched.set_active_sched_blocks: '],
# ['r', e],
# ])
# active = []
# obs_block_delays = dict()
# self.acs_blocks['active'] = []
# for sched_block_id in blocks:
# obs_block_delays[sched_block_id] = None
# obs_blocks_status = None
# if sched_block_id in active:
# try:
# obs_blocks_status = self.supervisor.getSbOperationStatus(
# sched_block_id
# )
# obs_blocks_status = obs_blocks_status.ob_statuses
# # print(
# # 'xxxxxxx obs_blocks_status: ', sched_block_id,
# # (obs_blocks_status is not None)
# # )
# except Exception as e:
# self.log.debug([
# ['b', '- Exception - MockSched.getSbOperationStatus: '],
# ['r', e],
# ])
# # ------------------------------------------------------------------
# #
# # ------------------------------------------------------------------
# if obs_blocks_status is not None:
# self.acs_blocks['active'].append(sched_block_id)
# blocks[sched_block_id]['state'] = 'run'
# # sched_block_status = self.supervisor.getSchedulingBlockStatus(
# # sched_block_id)
# # sched_block_op_status = self.supervisor.getSbOperationStatus(
# # sched_block_id)
# # self.log.info([
# # ['y', ' - active_scheduling_blocks - ', sched_block_id, ' '], [
# # 'g', active, '-> '], ['y', sched_block_op_status, ' ']
# # ])
# # ------------------------------------------------------------------
# # all state definitions:
# # enum SchedulingBlockStatus {
# # SB_WAITING, SB_PENDING, SB_RUNNING, SB_SUCCEEDED,
# # SB_TRUNCATED, SB_CANCELED, SB_FAILED, SB_ABORTED
# # };
# # enum ObservationBlockStatus {
# # OB_PENDING, OB_RUNNING, OB_SUCCEEDED, OB_CANCELED,
# # OB_TRUNCATED, OB_FAILED
# # };
# # ------------------------------------------------------------------
# for obs_block_now in obs_blocks_status:
# obs_block_id = obs_block_now.id
# # print(
# # '????', obs_block_id, (obs_block_id in metadata),
# # obs_block_now.status.status.status, obs_block_now.phases
# # )
# if obs_block_id not in metadata:
# continue
# # obs_block_now.status -> sb.ObservationBlockStatusDetail
# metadata[obs_block_id]['status'] = (
# obs_block_now.status.status.status
# )
# metadata[obs_block_id]['phases'] = obs_block_now.phases
# if ((metadata[obs_block_id]['start_time_sec_exe'] is None)
# and (metadata[obs_block_id]['status'] != sb.OB_PENDING)):
# time_now_sec = self.clock_sim.get_time_now_sec()
# metadata[obs_block_id]['start_time_sec_exe'] = time_now_sec
# time_dif = (
# time_now_sec
# - metadata[obs_block_id]['start_time_sec_plan']
# )
# if time_dif > 0:
# obs_block_delays[sched_block_id] = time_dif
# self.log.info([
# ['y', '- obs block is now running: '],
# ['c', sched_block_id, '/ '],
# ['r', obs_block_id, '\n'],
# [
# 'g',
# ((' ' * 20) + '--> planned/executed start:'),
# metadata[obs_block_id]['start_time_sec_plan'],
# '/',
# time_now_sec,
# ],
# [
# 'p',
# ' duration:',
# metadata[obs_block_id]['duration'],
# '/',
# metadata[obs_block_id]['scaled_duration'],
# ],
# ])
# # ------------------------------------------------------------------
# #
# # ------------------------------------------------------------------
# if ((obs_blocks_status is None)
# and (blocks[sched_block_id]['state'] == 'run')):
# blocks[sched_block_id]['state'] = 'done'
# obs_blocks = blocks[sched_block_id]['sched_block'].observation_blocks
# for obs_block in obs_blocks:
# obs_block_id = obs_block.id
# metadata[obs_block_id]['phases'] = []
# rnd_now = self.rnd_gen.random()
# if rnd_now < 0.05:
# metadata[obs_block_id]['status'] = sb.OB_CANCELED
# elif rnd_now < 0.1:
# metadata[obs_block_id]['status'] = sb.OB_FAILED
# else:
# metadata[obs_block_id]['status'] = sb.OB_SUCCEEDED
# self.log.info([
# ['b', '- sched block is done '],
# ['r', sched_block_id],
# ['g', ' - current-time: '],
# [
# 'g',
# date_to_string(
# secs_to_datetime(self.clock_sim.get_time_now_sec()),
# date_string=None,
# )
# ],
# ['y', ' ', self.clock_sim.get_time_now_sec()],
# ])
# # ------------------------------------------------------------------
# # adjust the start time of all future OBs --- this will NOT
# # take care of OBs of currently active SB, which may overshoot
# # the end of the night
# # ------------------------------------------------------------------
# time_difs = [x for x in obs_block_delays.values() if x is not None]
# if len(time_difs) > 0:
# # adjust the start time of all future OBs in the currently active SBs
# active_sched_block = len(self.cycle_blocks)
# for sched_block_id in self.acs_blocks['active']:
# if obs_block_delays[sched_block_id] is None:
# continue
# updated_obs_blocks = []
# sched_block = blocks[sched_block_id]['sched_block']
# for obs_block in sched_block.observation_blocks:
# obs_block_id = obs_block.id
# if metadata[obs_block_id]['start_time_sec_exe'] is None:
# updated_obs_blocks += [[
# obs_block_id, obs_block_delays[sched_block_id]
# ]]
# metadata[obs_block_id]['start_time_sec_plan'] += (
# obs_block_delays[sched_block_id]
# )
# if len(updated_obs_blocks) > 0:
# self.log.info([
# [
# 'b',
# ' -+- updating start_time_sec_plan of',
# sched_block_id,
# '',
# ],
# ['y', updated_obs_blocks],
# ])
# for n_cycle_now in range(len(self.cycle_blocks)):
# if sched_block in self.cycle_blocks[n_cycle_now]:
# active_sched_block = n_cycle_now
# # adjust the start time of all OBs in future SBs
# self.log.info([['g', '- will delay future SBs ...']])
# self.delay_sched_blocks(
# active_sched_block=(active_sched_block + 1), time_dif=max(time_difs)
# )
# # ------------------------------------------------------------------
# # ????????????????????????????????????????????????????????????????
# # how do i get the status of a ob which has: OB_SUCCEEDED, OB_CANCELED,
# # OB_TRUNCATED, OB_FAILED ????????????????
# # ????????????????????????????????????????????????????????????????
# # ------------------------------------------------------------------
# return
# if has_lock:
# set_blocks()
# else:
# with MockSched.lock:
# set_blocks()
# return
# # ------------------------------------------------------------------
# # update the estimated starting times of all blocks based on the
# # current state of execution clip the list if any block ends up
# # lasting after the night is supposed to end
# # ------------------------------------------------------------------
# def delay_sched_blocks(self, active_sched_block=None, time_dif=None):
# self.log.info([['c', ' - starting MockSched.delay_sched_blocks ...']])
# if active_sched_block is None:
# active_sched_block = self.active_sched_block
# if active_sched_block >= len(self.cycle_blocks) - 1:
# return
# # get the maximal difference between planned and current time for each
# # OB of each SB in the cycle which will is about to be submitted
# if time_dif is None:
# time_dif_max = -1
# for sched_block in self.cycle_blocks[active_sched_block]:
# obs_block_id = sched_block.observation_blocks[0].id
# start_time_sec = (
# self.acs_blocks['metadata'][obs_block_id]['start_time_sec_plan']
# )
# time_dif_now = self.clock_sim.get_time_now_sec() - start_time_sec
# time_dif_max = max(time_dif_max, time_dif_now)
# else:
# time_dif_max = time_dif
# # if any of the OBs is late, adjust the planned start time of all OBs in all SBs
# # and remove any OBs which will overshoot the end of the night
# if time_dif_max > 0:
# self.log.info([
# ['r', '- updating start_time_sec_plan by: '],
# ['b', time_dif_max],
# ])
# # perform the adjustment for all future SBs in all cycles
# for n_cycle_now in range(active_sched_block, len(self.cycle_blocks)):
# sched_block_overs = []
# for sched_block in self.cycle_blocks[n_cycle_now]:
# self.log.info([
# ['b', ' -+- updating ', sched_block.id, ''],
# ['y', [x.id for x in sched_block.observation_blocks]],
# ])
# # adjust the start time of all OBs in this SB
# for obs_block in sched_block.observation_blocks:
# obs_block_id = obs_block.id
# metadata = self.acs_blocks['metadata'][obs_block_id]
# metadata['start_time_sec_plan'] += time_dif_max
# end_time_sec = (
# metadata['start_time_sec_plan'] + metadata['duration']
# )
# # a simplistic approach - cancel any SB if at least one of
# # its OBs overshoots the end of the night
# if ((end_time_sec > self.clock_sim.get_night_end_sec())
# and (sched_block not in sched_block_overs)):
# sched_block_overs.append(sched_block)
# # remove all overshooting SBs from the cycle and apdate the bookkeeping
# for sched_block in sched_block_overs:
# self.log.info([
# ['r', '- cancelling all OBs from future cycle for SB '],
# ['p', sched_block.id],
# ])
# for obs_block in sched_block.observation_blocks:
# metadata = self.acs_blocks['metadata'][obs_block.id]
# metadata['status'] = sb.OB_CANCELED
# self.acs_blocks['blocks'][sched_block.id]['state'] = 'cancel'
# self.cycle_blocks[n_cycle_now].remove(sched_block)
# # remove any cycle which has no SBs letf in it
# self.cycle_blocks = [x for x in self.cycle_blocks if len(x) > 0]
# return
# # ------------------------------------------------------------------
# # move one from wait to run
# # ------------------------------------------------------------------
# def submit_block_cycle(self):
# self.log.info([['g', ' - starting MockSched.submit_block_cycle ...']])
# has_reset_night = (self.n_nights < self.clock_sim.get_n_nights())
# if has_reset_night:
# self.log.info([[
# 'p',
# ' - has_reset_night - will cancel all ',
# 'running blocks and reset cycles ...',
# ]])
# if self.active_sched_block >= len(self.cycle_blocks) or has_reset_night:
# self.n_sched_subs = 0
# self.active_sched_block = 0
# self.init_block_cycle()
# # update the start time of all fufute SBs
# with MockSched.lock:
# self.delay_sched_blocks()
# # check if has removed all executable blocks
# if self.active_sched_block >= len(self.cycle_blocks):
# self.submit_block_cycle()
# return
# # ------------------------------------------------------------------
# # submit new blocks
# # ------------------------------------------------------------------
# with MockSched.lock:
# self.n_nights = self.clock_sim.get_n_nights()
# self.n_sched_subs = 0
# for sched_block in self.cycle_blocks[self.active_sched_block]:
# if self.acs_blocks['blocks'][sched_block.id]['state'] == 'wait':
# # increase the self.n_sched_subs counter BEFORE spawning the thread
# self.n_sched_subs += 1
# # spawn a new SB submision (will wait for its start time to
# # arrive before actually submitting)
# gevent.spawn(self.submit_one_block, sched_block)
# self.set_active_sched_blocks(has_lock=True)
# self.active_sched_block += 1
# return
# # ------------------------------------------------------------------
# #
# # ------------------------------------------------------------------
# def submit_one_block(self, sched_block):
# with MockSched.lock:
# obs_block_id = sched_block.observation_blocks[0].id
# n_sub_tries = 0
# while True:
# has_reset_night = (self.n_nights < self.clock_sim.get_n_nights())
# if (has_reset_night):
# return
# block_meta = self.acs_blocks['metadata']
# start_time_sec = block_meta[obs_block_id]['start_time_sec_plan']
# time_dif = self.clock_sim.get_time_now_sec() - start_time_sec
# if time_dif >= 0:
# break
# if n_sub_tries % 10 == 0:
# self.log.info([['b', '- waiting to submit '], ['g', sched_block.id],
# ['b', ' - remaining time: ', time_dif]])
# n_sub_tries += 1
# sleep(0.5)
# try:
# self.supervisor.putSchedulingBlock(sched_block)
# do_msg = True
# if do_msg:
# block_meta = self.acs_blocks['metadata']
# def block_start_time_sec(x):
# return int(floor(block_meta[x.id]['start_time_sec_plan']))
# def block_end_time_sec(x):
# return int(
# block_meta[x.id]['start_time_sec_plan']
# + block_meta[x.id]['duration']
# )
# def block_start_time(x):
# return date_to_string(
# secs_to_datetime(block_start_time_sec(x)),
# date_string=None,
# )
# def block_end_time(x):
# return date_to_string(
# secs_to_datetime(block_end_time_sec(x)),
# date_string=None,
# )
# block_times_0 = [(
# ((str(x.id) + ' --> ') +
# (str(block_meta[x.id]['metadata']['n_sched']) + ' (') +
# (str(block_meta[x.id]['metadata']['n_obs']) + ')')),
# [block_start_time_sec(x),
# block_end_time_sec(x)],
# ) for x in sched_block.observation_blocks]
# block_times_1 = [([block_start_time(x),
# block_end_time(x)], )
# for x in sched_block.observation_blocks]
# self.log.info([
# ['y', '- submitted sched block: '],
# ['p', sched_block.id, ' '],
# ['g', block_times_0, ' ... '],
# ['y', block_times_1],
# ])
# except Exception as e:
# self.log.debug([
# ['b', '- Exception - MockSched.putSchedulingBlock: '],
# ['r', '\n' + (' ' * 25), e],
# ])
# # as the last action in this thread, update the self.n_sched_subs counter
# self.n_sched_subs -= 1
# return
# # ------------------------------------------------------------------
# # move one from wait to run
# # ------------------------------------------------------------------
# def check_sched_blocks(self):
# self.log.debug([['b', ' - starting MockSched.check_sched_blocks ...']])
# with MockSched.lock:
# self.set_active_sched_blocks(has_lock=True)
# for block_name in self.acs_blocks['active']:
# status = self.supervisor.getSchedulingBlockStatus(block_name)
# opstatus = self.supervisor.getSbOperationStatus(block_name)
# self.log.debug([
# ['y', ' - active_scheduling_blocks - '],
# ['g', self.acs_blocks['active'], '-> '],
# ['y', status, ' '],
# ])
# for nob in range(len(opstatus.ob_statuses)):
# phases = opstatus.ob_statuses[nob].phases
# for p in phases:
# self.log.debug([
# [
# 'y',
# ' -- phases - ',
# block_name,
# ' ',
# opstatus.ob_statuses[nob].id,
# ' ',
# opstatus.ob_statuses[nob].status,
# ' ',
# ],
# [
# 'g',
# p.heartbeat_counter,
# ' ',
# p.name,
# ' ',
# p.status,
# ' ',
# p.progress_message,
# ],
# ])
# return
# # ------------------------------------------------------------------
# #
# # ------------------------------------------------------------------
# def loop(self):
# self.log.info([['g', ' - starting MockSched.loop ...']])
# self.submit_block_cycle()
# while True:
# self.set_active_sched_blocks()
# # print('------------- glob-active: ', self.acs_blocks['active'])
# if len(self.acs_blocks['active']) == 0 and self.n_sched_subs == 0:
# self.submit_block_cycle()
# # self.check_sched_blocks()
# sleep(self.loop_sleep)
# return
|
from django.db import models
from datetime import datetime
class Tools(models.Model):
name = models.CharField(max_length=20)
active = models.BooleanField(default=True)
def __str__(self):
return self.name
class Employee(models.Model):
name = models.CharField(max_length=60)
active = models.BooleanField(default=True)
def __str__(self):
return self.name
class Rental(models.Model):
tools = models.ForeignKey(Tools, on_delete=models.CASCADE)
employee = models.ForeignKey(Employee, on_delete=models.CASCADE)
description = models.CharField(max_length=255)
date_of_rent = models.DateTimeField('Date of rent', default=datetime.now())
date_of_return = models.DateTimeField('Date of return')
def __str__(self):
return self.tools
|
import tensorflow as tf
from tensorflow import keras
import numpy as np
import matplotlib.pyplot as plt
class FashionModel:
def __init__(self):
self.fashion_mnist = keras.datasets.fashion_mnist
(self.train_images, self.train_labels), (self.test_images, self.test_labels) = self.fashion_mnist.load_data()
self.class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
self.model = None
def execute(self):
# ------------ 데이터셋 정보 -------------
self.show_dataset()
# ------------ 모델 구성 & 훈련 --------------
self.create_model()
# ----------- 예측하기 ---------------
predictions = self.predict_image()
# 테스트 : 예측한 값과 같은지 테스트 데이터의 레이블 확인
# 테스트
self.subplot_test(predictions)
# 이미지 하나 테스트
self.one_test()
def show_dataset(self):
print('-------Train set spec ----------')
print('훈련이미지 :', self.train_images.shape)
print('훈련이미지 수 :', len(self.train_labels))
print('훈련이미지 라벨:', self.train_labels)
print('-------Test set spec ----------')
print('테스트이미지 :', self.test_images.shape)
print('테스트이미지 수:', len(self.test_labels))
print('테스트이미지 라벨:', self.test_labels)
plt.figure(figsize=(10, 10))
for i in range(25):
plt.subplot(5, 5, i + 1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(self.train_images[i], cmap=plt.cm.binary)
plt.xlabel(self.class_names[self.train_labels[i]])
plt.show()
def create_model(self):
self.model = keras.Sequential([
keras.layers.Flatten(input_shape=(28, 28)),
keras.layers.Dense(128, activation='relu'),
keras.layers.Dense(10, activation='softmax')
])
self.model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
self.model.fit(self.train_images, self.train_labels, epochs=5)
test_loss, test_acc = self.model.evaluate(self.test_images, self.test_labels, verbose=2)
print('\n테스트 정확도:', test_acc)
def predict_image(self):
predictions = self.model.predict(self.test_images)
print('예측값 :', predictions[0])
print('가장 신뢰도가 높은 레이블 :', np.argmax(predictions[0]))
return predictions
def subplot_test(self, predictions):
print('테스트 데이터 :', self.test_labels[0])
# method 자리
i = 0
plt.figure(figsize=(6, 3))
plt.subplot(1, 2, 1)
self.plot_image(i, predictions, self.test_labels, self.test_images)
plt.subplot(1, 2, 2)
self.plot_value_array(i, predictions, self.test_labels)
plt.show()
# 처음 X 개의 테스트 이미지와 예측 레이블, 진짜 레이블을 출력합니다
# 올바른 예측은 파랑색으로 잘못된 예측은 빨강색으로 나타냅니다
num_rows = 5
num_cols = 3
num_images = num_rows * num_cols
plt.figure(figsize=(2 * 2 * num_cols, 2 * num_rows))
for i in range(num_images):
plt.subplot(num_rows, 2 * num_cols, 2 * i + 1)
self.plot_image(i, predictions, self.test_labels, self.test_images)
plt.subplot(num_rows, 2 * num_cols, 2 * i + 2)
self.plot_value_array(i, predictions, self.test_labels)
plt.show()
def one_test(self):
# 테스트 세트에서 이미지 하나를 선택합니다
img = self.test_images[0]
print(img.shape)
# 이미지 하나만 사용할 때도 배치에 추가합니다
img = (np.expand_dims(img, 0))
print(img.shape)
predictions_single = self.model.predict(img)
print(predictions_single)
self.plot_value_array(0, predictions_single, self.test_labels)
_ = plt.xticks(range(10), self.class_names, rotation=45)
print(np.argmax(predictions_single[0]))
def plot_image(self, i, predictions_array, true_label, img):
predictions_array, true_label, img = predictions_array[i], true_label[i], img[i]
plt.grid(False)
plt.xticks([])
plt.yticks([])
plt.imshow(img, cmap=plt.cm.binary)
predicted_label = np.argmax(predictions_array)
if predicted_label == true_label:
color = 'blue'
else:
color = 'red'
plt.xlabel("{} {:2.0f}% ({})".format(self.class_names[predicted_label],
100 * np.max(predictions_array),
self.class_names[true_label]),
color=color)
def plot_value_array(self, i, predictions_array, true_label):
predictions_array, true_label = predictions_array[i], true_label[i]
plt.grid(False)
plt.xticks([])
plt.yticks([])
thisplot = plt.bar(range(10), predictions_array, color="#777777")
plt.ylim([0, 1])
predicted_label = np.argmax(predictions_array)
thisplot[predicted_label].set_color('red')
thisplot[true_label].set_color('blue')
if __name__ == '__main__':
f = FashionModel()
f.execute() |
import random
score = 0
loops = 0
while(1):
number = random.randint(1,9)
print("Your pick:")
kek = input()
if kek == "exit":
print("Score: " + str(score))
break
print("The number is: " + str(number))
if int(kek) == number:
print("Nice")
score += 1
elif int(kek) < number:
print("Too low")
elif int(kek) > number:
print("Too high")
loops += 1
print("Avg: " + str(score/loops))
|
import scipy.io as sio
import os
import cPickle as pickle
import numpy as np
import cv2
def read_mat(mat_path, cache_dir='/tmp'):
# Reading the .mat annotation files in IMDB-WIKI datasets and converting them to python objects.
# Storing in the cache file (default in `/tmp/imdb_wiki.pth`).
cache_file = os.path.join(cache_dir, 'imdb_wiki.pth')
if not os.path.isfile(cache_file):
print
"generating cache_file"
file = sio.loadmat(mat_path)
image_paths = file['imdb'][0][0]['full_path'][0]
image_paths = [full_path[0] for full_path in image_paths]
face_locations = file['imdb'][0][0]['face_location'][0]
genders = file['imdb'][0][0]['gender'][0]
genders = [int(gender) if -1 < gender < 3 else -1 for gender in genders]
dob = file['imdb'][0][0]['dob'][0]
photo_taken = file['imdb'][0][0]['photo_taken'][0]
ages = np.array(photo_taken - dob / 365 + 1, np.uint8)
face_scores = file['imdb'][0][0]['face_score'][0]
second_face_scores = file['imdb'][0][0]['second_face_score'][0]
pickle.dump([image_paths, face_locations, genders, ages,
face_scores, second_face_scores], open(cache_file, 'wb'))
else:
print
"read from cache_file"
image_paths, face_locations, genders, ages, face_scores, second_face_scores = pickle.load(
open(cache_file, 'rb'))
print
"read mat OK"
return image_paths, face_locations, genders, ages, face_scores, second_face_scores
def crop_image(mat_path, input_dir, output_dir, expand_rate=0, max_size=600):
image_paths, face_locations, genders, ages, face_score, second_face_score = read_mat(mat_path)
for image_path, loc in zip(image_paths, face_locations):
in_path = os.path.join(input_dir, image_path)
out_path = os.path.join(output_dir, image_path)
out_dir = os.path.split(out_path)[0]
if not os.path.exists(out_dir):
print("make direction %s" % out_dir)
os.makedirs(out_dir)
im = cv2.imread(in_path)
loc = loc[0].astype(np.int32)
h = loc[3] - loc[1]
w = loc[2] - loc[0]
if expand_rate > 0:
loc[1] -= h * expand_rate
loc[3] += h * expand_rate
loc[0] -= w * expand_rate
loc[2] += w * expand_rate
loc = np.maximum(0, loc)
loc[3] = np.minimum(im.shape[0], loc[3])
loc[2] = np.minimum(im.shape[1], loc[2])
# loc=loc.astype(np.int32)
im = im[loc[1]:loc[3], loc[0]:loc[2]]
h = loc[3] - loc[1]
w = loc[2] - loc[0]
if w > max_size or h > max_size:
if w != h:
pass
print("resize picture %s" % image_path)
resize_factor = np.minimum(1. * max_size / w, 1. * max_size / h)
im = cv2.resize(im, (int(w * resize_factor), int(h * resize_factor)))
cv2.imwrite(out_path, im)
def generate_caffe_txt_age(mat_path, output_path, age_range,
cache_dir='/tmp', ignore_second_face=False, test_ratio=0.2):
# read mat file then generate train.txt and test.txt for age estimation training.
# `age_range` is a list containing age range like (0,2) and (32,40).
image_paths, face_locations, genders, ages, face_scores, second_face_scores = read_mat(mat_path, cache_dir)
# generate classes
ages_mid = np.array([(age[0] + age[1]) / 2 for age in age_range])
classes = []
for age in ages:
classes.append(np.argmin(np.abs(ages_mid - age)))
for idx, r in enumerate(age_range):
if r[0] <= age <= r[1]:
classes[-1] = idx
break
shuffle_idx = np.arange(len(image_paths))
np.random.shuffle(shuffle_idx)
train_idx = shuffle_idx[int(len(shuffle_idx) * test_ratio):]
test_idx = shuffle_idx[:int(len(shuffle_idx) * test_ratio)]
with open(os.path.join(output_path, 'age_train.txt'), 'w') as trainf:
for idx in train_idx:
if ignore_second_face:
if second_face_scores[idx] > 1.5:
continue
trainf.write("%s %d\n" % (image_paths[idx], classes[idx]))
with open(os.path.join(output_path, 'age_test.txt'), 'w') as testf:
for idx in test_idx:
if ignore_second_face:
if second_face_scores[idx] > 1.5:
continue
testf.write("%s %d\n" % (image_paths[idx], classes[idx]))
def generate_caffe_txt_gender(mat_path, output_path, cache_dir='/tmp', test_ratio=0.2):
# read mat file then generate train.txt and test.txt for gender estimation training
image_paths, face_locations, genders, ages, face_score, second_face_score = read_mat(mat_path, cache_dir)
shuffle_idx = np.arange(len(image_paths))
np.random.shuffle(shuffle_idx)
train_idx = shuffle_idx[int(len(shuffle_idx) * test_ratio):]
test_idx = shuffle_idx[:int(len(shuffle_idx) * test_ratio)]
with open(os.path.join(output_path, 'gender_train.txt'), 'w') as trainf:
for idx in train_idx:
if genders[idx] == -1: continue
trainf.write("%s %d\n" % (image_paths[idx], genders[idx]))
with open(os.path.join(output_path, 'gender_test.txt'), 'w') as testf:
for idx in test_idx:
if genders[idx] == -1: continue
testf.write("%s %d\n" % (image_paths[idx], genders[idx]))
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('mat_path', help='.mat file path in IMDB-WIKI datasets', type=str)
args = parser.parse_args()
read_mat(args.mat_path)
|
from pyspark import SparkConf, SparkContext
from pyspark.sql.session import SparkSession
class SparkBase:
def __init__(self):
self.__sc = SparkContext()
self.__sc.setLogLevel("WARN")
self.__spark = SparkSession.builder.enableHiveSupport().getOrCreate()
def get_spark_context(self):
return self.__sc, self.__spark
def get_applicationId(self):
return self.__sc.applicationId
def get_structured_streaming_kafka(self, bootstrap_servers, topic, max_offsets_per_trigger=None, security_protocol=None, sasl_mechanism=None, starting_offsets="earliest"):
'''
获取来源于kafka的dataframe对象
:param bootstrap_servers: str kafka broker地址
:param topic: str topic
:param security_protocol: str 认证协议
:param sasl_mechanism: str 认证信息
:return: dataframe
'''
if security_protocol:
return self.__spark.readStream.format("kafka") \
.option("kafka.bootstrap.servers", bootstrap_servers) \
.option("kafka.security.protocol", security_protocol) \
.option("kafka.sasl.mechanism", sasl_mechanism) \
.option("subscribe", topic) \
.option("failOnDataLoss", "true") \
.option("startingOffsets", starting_offsets) \
.option("includeTimestamp", True) \
.option("maxOffsetsPerTrigger", max_offsets_per_trigger)
else:
return self.__spark.readStream.format("kafka") \
.option("kafka.bootstrap.servers", bootstrap_servers) \
.option("subscribe", topic) \
.option("failOnDataLoss", "true") \
.option("startingOffsets", starting_offsets) \
.option("includeTimestamp", True) \
.option("maxOffsetsPerTrigger", max_offsets_per_trigger)
def close(self):
self.__sc.stop()
self.__spark.stop()
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.close()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 8/23/2017 3:28 PM
# @Author : Winnichen
# @File : __init__.py.py
import os
if not os.path.isdir("logs"):
os.mkdir("logs")
import utils.log_writer
|
from flask import Flask, render_template, request
import requests
import random
app = Flask(__name__)
@app.route('/')
def home():
# HTML 반환해주기
# 반드시 templates 폴더 안에 위치해야합니다.
# render_template 불러와주기
return render_template('home.html')
@app.route('/home.html')
def home2():
# HTML 반환해주기
# 반드시 templates 폴더 안에 위치해야합니다.
# render_template 불러와주기
return render_template('home.html')
@app.route('/service_intro.html')
def service_intro():
# HTML 반환해주기
# 반드시 templates 폴더 안에 위치해야합니다.
# render_template 불러와주기
return render_template('service_intro.html')
# render_template 사용해보기
# == 사용자에게 보여줄 데이터를 HTML에 담기
# @app.route('/service.html')
# def service():
# # HTML 반환해주기
# # 반드시 templates 폴더 안에 위치해야합니다.
# # render_template 불러와주기
# menu_db = [
# 'BBQ 황금 올리브치킨', 'BHC 뿌링클', '네네치킨 오리엔탈파닭', '교촌치킨 레드콤보', '페리카나 양념치킨', '굽네치킨 고추바사삭', '호식이두마리치킨 매운간장치킨', 'BHC 맛초킹',
# '파파존스 수퍼파파스', '도미노 베스트콰트로', '피자스쿨 고구마피자', '피자에땅 달피자', ]
# ans = random.choice(menu_db)
# return render_template('service.html', random_menu=ans)
@app.route('/service.html')
def service():
# HTML 반환해주기
# 반드시 templates 폴더 안에 위치해야합니다.
# render_template 불러와주기
menu_db = ['후라이드치킨', '갈릭파스타']
menu_photo = {'후라이드치킨' : '../static/images/fried_chicken.png', '갈릭파스타' : '../static/images/garlic_pasta.png',}
ans = random.choice(menu_db)
return render_template('service.html', random_menu=ans, random_menu_photo=menu_photo[ans])
# 파일 수정 시, 자동으로 반영해주는 코드
# 서버 껐다킬 필요 없음.
# 이제부터 python app.py로 서버 실행!
if __name__ == '__main__':
app.run(debug=True) |
import json
from apps.Utils.formresponse import formScssResp
from apps.Utils.message_constants import LOGGEDOUT_SCSS_MSG
def create_user(userObj):
res = formScssResp("000",LOGGEDOUT_SCSS_MSG,"logoutResp",{})
return res
|
import numpy as np
import matplotlib.pyplot as plt
import ROOT
from ROOT import gROOT
from math import *
from array import array
from scipy import stats
import random
def findEdge(n):
# print "finding Edge"
N=len(n)
sum=0
count=0
for i in xrange(0,N):
if n[i]!=0:
sum+=float(n[i])
count+=1.0
truAve= sum/count
start=0
end=0
print "number of events: "+str(sum)
print "number of events per bin "+str(truAve)
for i in xrange(0,N):
t= n[i]+3*sqrt(n[i])
start=i
if t>truAve:
break
for i in xrange(N-1,start,-1):
t= n[i]+3*sqrt(n[i])
end=i
if t>truAve:
break
return [start,end]
def randList(n,size=None):
if size is None:
size=int(.1*n)
ret=[]
if size>n:
print "size to big empyt list"
return ret
while len(ret)<size:
a=random.randint(0,n-1)
if a not in ret:
ret.append(a)
return ret
# inFile=ROOT.TFile("proc_cry_0.root")
# inFile=ROOT.TFile("proc_cry_total.root")
inFile=ROOT.TFile("proc_cry_total9.root")
# inFile=ROOT.TFile("proc_cry_total3.root")
tree=ROOT.TTree()
tree=inFile.Get("photon_Data")
# y=np.zeros(1,dtype=float)
x=array("f",[0])
y=array("f",[0])
z=array("f",[0])
t=array("f",[0])
pid=array("i",[0])
tree.SetBranchAddress("x",x)
tree.SetBranchAddress("y",y)
tree.SetBranchAddress("z",z)
tree.SetBranchAddress("t",t)
tree.SetBranchAddress("pid",pid)
bias_est=[]
bias_hi=[]
bias_lo=[]
slopeList=[]
estList=[]
entries=int(tree.GetEntries())
px=[]
py=[]
pz=[]
tim=[]
cellLen=950
for i in xrange(0,entries):
tree.GetEntry(i)
# if abs(t[0])!=0:
if abs(t[0])!=0 and abs(x[0])<(60.0) and abs(z[0])<(5.0) and abs(y[0])<(500.0)and abs(pid[0])==13:
px.append(x[0])
py.append(y[0])
pz.append(z[0])
tim.append(t[0])
print "number of muon events "+str(len(tim))
for q in xrange(0,1000):
print q
rl= randList(len(tim))
samTime=[]
samPos=[]
for v in rl:
samTime.append(tim[v])
samPos.append(py[v])
time=np.array(samTime)
pos=np.array(samPos)
slope, intercept, r_value, p_value, std_err = stats.linregress(time,pos)
# print "events: "+str(len(time))
dy=sqrt(12*pos.var())
dt=sqrt(12*time.var())
# print "b-a "+str(dt)
# print "dy "+str(dy)
est=-cellLen/dt
# print "intercept "+str(intercept)
# print "r "+str(r_value)
# print "p "+str(p_value)
# print "std "+str(std_err)
############scatter###########
# plt.plot(time,pos,'.')
# plt.show()
#############time hist***********
n, bins, patches=plt.hist(time,bins=61)
# plt.clf()
# plt.plot(time,pos,'.')
# print np.array(n).mean()
# print n
ival= findEdge(n)
# print "ival "+str(ival)
# print n[ival[0]]
# print n[ival[1]]
dt_dumb_lo= bins[ival[1]]-bins[ival[0]]
if dt_dumb_lo!=0:
slope_dumb_lo=-cellLen/dt_dumb_lo
else:
continue
# print "dt dumb lo "+str(dt_dumb_lo)
# print "slope dume lo "+str(slope_dumb_lo)
dt_dumb_hi= bins[ival[1]-1]-bins[ival[0]+1]
if dt_dumb_hi!=0:
slope_dumb_hi=-cellLen/dt_dumb_hi
else:
continue
# print "slope "+str(slope)
# print "slope est "+str(est)
# print "dt dumb hi "+str(dt_dumb_hi)
# print "slope dume hi "+str(slope_dumb_hi)
best=100*(slope-est)/slope
blo=100*(slope-slope_dumb_lo)/slope
bhi=100*(slope-slope_dumb_hi)/slope
if (abs(blo)>200 or abs(bhi) >200 or abs(best) >200) and False :
print "slope "+ str(slope)
print "est "+ str(est)
print "lo slope "+ str(slope_dumb_lo)
print "hi slope "+ str(slope_dumb_hi)
print "lo bias "+ str(blo)
print "hi bias "+ str(bhi)
print "est bias"+ str(best)
print n
print ival
print "first bin "+str(n[ival[0]])
print "second bin "+str(n[ival[1]])
print "time0 lo "+str(bins[ival[0]])
print "time1 lo "+str(bins[ival[1]])
print "time0 hi "+str(bins[ival[0]+1])
print "time1 hi "+str(bins[ival[1]-1])
# plt.show()
a=1
while(a>0 and False):
lo=input("enter first bin")
hi=input("enter second bin")
dt_dumb_lo= bins[hi]-bins[lo]
dt_dumb_hi= bins[hi-1]-bins[lo+1]
slope_dumb_lo=-cellLen/dt_dumb_lo
slope_dumb_hi=-cellLen/dt_dumb_hi
blo=100*(slope-slope_dumb_lo)/slope
bhi=100*(slope-slope_dumb_hi)/slope
print "lo "+ str(blo)
print "hi "+ str(bhi)
print "est "+ str(best)
a=input("enter neg value to cont")
bias_est.append(best)
bias_lo.append(blo)
bias_hi.append(bhi)
slopeList.append(slope)
estList.append(est)
plt.clf()
print "est mean " + str(np.array(bias_est).mean())
print "est std " +str(np.array(bias_est).std())
print "lo mean " +str(np.array(bias_lo).mean())
print "lo std " +str(np.array(bias_lo).std())
print "hi mean " +str(np.array(bias_hi).mean())
print "hi std" +str(np.array(bias_hi).std())
print "slope mean " +str(np.array(slopeList).mean())
print "slope std" +str(np.array(slopeList).std())
print "est mean " +str(np.array(estList).mean())
print "est std" +str(np.array(estList).std())
print "sample size "+str(.1*len(time))
plt.clf()
plt.hist(bias_lo)
plt.title("long dt est")
plt.show()
plt.clf()
plt.hist(bias_hi)
plt.title("short dt est")
plt.show()
plt.clf()
plt.hist(bias_est)
plt.title("Variance est dt")
plt.show()
plt.hist(slopeList)
plt.title("Slope")
plt.show()
plt.clf()
plt.hist(estList)
plt.title("Estimated Slope")
plt.show()
|
import email
import imaplib
import json
import os
from bs4 import BeautifulSoup
class IMAPPuller(object):
def __init__(self, logger, db, config_path):
"""
@type db: Data
@type logger: Logger
"""
self.logger = logger
self.db = db
try:
f = open(os.path.join(config_path, 'imap-pull.json'))
self.config = json.load(f)
self.logger.debug('IMAP module config: {0}'.format(self.config))
except Exception as e:
self.logger.error('Failed to initialize IMAP module: {0}'.format(e))
def is_dummy(self):
try:
return bool(self.config['dummy'])
except:
pass
return True
def fetch(self):
"""
Pulls and parses emails to extract Google ID of original sharer and first mentioned person ID
@return: list of tuples (gid, page_id)
"""
# dummy accounts must not proceed
if self.config['dummy']:
return
imap_server = imaplib.IMAP4_SSL(self.config['host'], self.config['port'])
imap_server.login(self.config['login'], self.config['password'])
imap_server.select('Plus')
status, email_ids = imap_server.uid('search', None, '(UNSEEN)')
pages = []
for e_id in email_ids[0].split():
_, response = imap_server.uid('fetch', e_id, '(RFC822)')
# parse method will append pages if parse is successful
if not response or not response[0] or not response[0][1]:
self.logger.info('IMAP WARN: Invalid response for message {0}'.format(e_id))
continue
self.parse(response[0][1], pages)
imap_server.close()
return pages
def parse(self, msg, pages):
"""
@param msg: email message
@return: a tuple (gid, page_id) where gid is a Google User ID mentioned in the
email and page_id is a Google Plus ID of a sender of the message
"""
e = email.message_from_string(msg)
self.logger.info('IMAP parsing message: {0}, From {1}'.format(e.get('Subject'), e.get('From')))
page_id = e.get('X-Sender-ID')
if not page_id:
self.logger.warning('Google ID not found in message {0}'.format(e.as_string()))
return None
for part in e.walk():
ct = part.get_params('Content-Type')
if ct and ('text/html', '') in ct:
html = part.get_payload(decode=True)
if html:
soup = BeautifulSoup(html)
a = soup.find('a', class_='proflink')
if a:
gid = a.get('oid')
self.logger.info('IMAP message parse success: gid:{0}, page:{1}'.format(gid, page_id))
pages.append((gid, page_id))
return
self.logger.info('WARN: IMAP message parse found no page:user links') |
from backpack.core.derivatives.elementwise import ElementwiseDerivatives
class TanhDerivatives(ElementwiseDerivatives):
def hessian_is_zero(self):
return False
def df(self, module, g_inp, g_out):
return 1.0 - module.output ** 2
def d2f(self, module, g_inp, g_out):
return -2.0 * module.output * (1.0 - module.output ** 2)
|
# -*- coding: utf-8 -*-
import tensorflow as tf
import numpy as np
tensor_list = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16], [17, 18, 19, 20]]
tensor_list2 = [[[1, 2, 3, 4]], [[5, 6, 7, 8]], [[9, 10, 11, 12]], [[13, 14, 15, 16]], [[17, 18, 19, 20]]]
with tf.Session() as sess:
for i in np.arange(1):
print ('##################### %d'%i)
x1 = tf.train.batch(tensor_list, batch_size=2, enqueue_many=False, capacity=1)
x2 = tf.train.batch(tensor_list, batch_size=2, enqueue_many=True, capacity=1)
y1 = tf.train.batch_join(tensor_list, batch_size=3, enqueue_many=False, capacity=1)
y2 = tf.train.batch_join(tensor_list2, batch_size=25, enqueue_many=True, capacity=1, allow_smaller_final_batch=False)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
print("x1 batch:"+"-"*10)
x1_batch = sess.run(x1)
print(x1_batch)
print("x2 batch:"+"-"*10)
print(sess.run(x2))
print("y1 batch:"+"-"*10)
print(sess.run(y1))
print("y2 batch:"+"-"*10)
print(sess.run(y2))
print("-"*10)
coord.request_stop()
coord.join(threads)
|
"""
Index Multiplier
Return the sum of all items in a list, where each
item is multiplied by its index (zero-based).
For empty lists, return 0.
Examples:
index_multiplier([1, 2, 3, 4, 5]) ➞ 40
# (1*0 + 2*1 + 3*2 + 4*3 + 5*4)
index_multiplier([-3, 0, 8, -6]) ➞ -2
# (-3*0 + 0*1 + 8*2 + -6*3)
Notes
All items in the list will be integers.
"""
def index_multiplier(lst):
return sum([i * v for i, v in enumerate(lst)])
|
# N-> This was from Glassdoor. Was a tech screen.
# Given input of host names of the pattern:
# ENV-DEPLOYMENT_ID-APP_NAME-INDEX.DOMAIN
# Report a tab separated table of
# ENV<TAB>APP_NAME<TAB>DEPLOYMENT_ID<TAB>COUNT_OF_HOSTS
#
# COUNT_OF_HOSTS is the number of hostnames of the same ENV, APP_NAME, and DEPLOYMENT_ID
#
# input = '''
# Prod-20160502-app-02.glassdoor.local
# Qa-20181001-app-02.glassdoor.local
# Qa-20181001-app-04.glassdoor.local
# Qa-20181002-myapp-01.glassdoor.local
# '''
#
# Expected Output
# Prod app 20160502 1
# Qa app 20181001 2
# Qa myapp 20181002 1
import re
# This function takes some raw input files and outputs both a more formatted string as well
# as grouping entries that have the same "ENV" and "APP_NAME" entry.
def parse_log_file(input_string):
# Keep a data structure that'll allow for counting.
counts_dict = {}
for line in input_string.splitlines():
# Split the entry into components we care about.
new_entry = re.split(r'[-.]', line)
print(new_entry[0])
# If the entry already exists, just increment the value, keep the original text as the key.
if str(new_entry[0]+new_entry[1]+new_entry[2]) in counts_dict:
counts_dict[new_entry[0]+ " " + new_entry[1] + " " + new_entry[2]] += 1
else:
counts_dict[new_entry[0]+ " " + new_entry[1] + " " + new_entry[2]] = 1
print(str(counts_dict))
#for entry in counts_dict:
#print(entry.split(" "))
example_log = '''Prod-20160502-app-02.glassdoor.local
Qa-20181001-app-02.glassdoor.local
Qa-20181001-app-04.glassdoor.local
Qa-20181002-myapp-01.glassdoor.local'''
parse_log_file(example_log)
|
def solution(s, skip, index):
answer = ''
# 97 ~ 122
skip = [ord(i) for i in skip]
for i in s:
loop = index
ask = ord(i)
while loop != 0:
ask += 1
if ask > 122:
ask = (ask % 123) + 97
if ask not in skip:
loop -= 1
answer += chr(ask)
return answer |
def ahaa(toisto):
i = 0
for i in range(0,toisto):
print("ahaa")
def säkeistö(line1,line2):
for i in range(0, 2):
print(line1)
ahaa(2)
print(line1)
print(line2)
ahaa(3)
print()
def main():
line1 = "saku sammakko kosiomatkallaan"
line2 = "hän lauleli kauniita laulujaan"
säkeistö(line1, line2)
line1 = "hän hillevi hiiren tavatessaan"
line2 = "pyysi mukanaan tulemaan pappilaan"
säkeistö(line1,line2)
line1 = "mikset kultasein kosinut aikanaan"
line2 = "minut matias myyrälle naitetaan"
säkeistö(line1,line2)
line1 = "sulle matias sovi ei laisinkaan"
line2 = "sillä multaa on myyrällä varpaissaan"
säkeistö(line1,line2)
main()
|
import unittest
from orgAnalyzer import OrgTable
from orgAnalyzer import OrgParser
from utils import CacheLocation
class TestOrgAnalyzer(unittest.TestCase):
None
def testOrgParserIdentifyOrgTables(self):
source = ["some text sdfsdf", "| Header | X | Header Z |", "|------+----+------|", "| Content | 1 | Content Z |", "Some other text"]
target = (CacheLocation(1,0), CacheLocation(3,27))
result = OrgParser.identifyOrgTable(source, CacheLocation.getZeroCacheLocation())
self.assertEqual(result, target)
def testOrgParserIdentifyOrgTables2cols(self):
source = ["some text sdfsdf", "| Header | X |", "|------+----|", "| Content | 1 |", "Some other text"]
target = (CacheLocation(1,0), CacheLocation(3,15))
result = OrgParser.identifyOrgTable(source, CacheLocation.getZeroCacheLocation())
self.assertEqual(result, target)
def testOrgParserIdentifyOrgTablesTrailingSpaces(self):
source = ["some text sdfsdf", "| Header | X | ", "|------+----|", "| Content | 1 |", "Some other text"]
target = (CacheLocation(1,0), CacheLocation(3,15))
result = OrgParser.identifyOrgTable(source, CacheLocation.getZeroCacheLocation())
self.assertEqual(result, target)
def testOrgParserIdentifyOrgTables3rows(self):
source = ["some text sdfsdf", "| Header | X |", "|------+----|", "| Content | 1 |", "| Third | 2 |", "Some other text"]
target = (CacheLocation(1,0), CacheLocation(4,13))
result = OrgParser.identifyOrgTable(source, CacheLocation.getZeroCacheLocation())
self.assertEqual(result, target)
def testOrgParserIdentifyOrgTablesNext(self):
source = ["some text sdfsdf", "| Header | X | Header Z |", "|------+----+------|", "| Content | 1 | Content Z |", "Some other text"]
target = (CacheLocation(1,0), CacheLocation(3,27))
result = OrgParser.identifyOrgTable(source, CacheLocation(1,0))
self.assertEqual(result, target)
def testParseOrgTable(self):
source = ["| Header | X | Header Z |", "|------+----+------|", "| Content | 1 | Content Z |", "| 2Content | 21 | 2Content Z |"]
result = OrgParser.parse(source)
self.assertEqual(result.getCols(0), 3)
self.assertEqual(result.getRows(), 3)
self.assertEqual(result.getColumnContent(0,0), "Header")
self.assertEqual(result.getColumnContent(0,1), "Content")
self.assertEqual(result.getColumnContent(0,2), "2Content")
self.assertEqual(result.getColumnContent(1,0), "X")
self.assertEqual(result.getColumnContent(1,1), "1")
self.assertEqual(result.getColumnContent(1,2), "21")
self.assertEqual(result.getColumnContent(2,0), "Header Z")
self.assertEqual(result.getColumnContent(2,1), "Content Z")
self.assertEqual(result.getColumnContent(2,2), "2Content Z")
def testParseOrgTable2Cols(self):
source = ["| Header | X |", "|------+-----|", "| Content | 1 |", "| 2Content | 21 |"]
result = OrgParser.parse(source)
self.assertEqual(result.getCols(0), 2)
self.assertEqual(result.getRows(), 3)
self.assertEqual(result.getColumnContent(0,0), "Header")
self.assertEqual(result.getColumnContent(0,1), "Content")
self.assertEqual(result.getColumnContent(0,2), "2Content")
self.assertEqual(result.getColumnContent(1,0), "X")
self.assertEqual(result.getColumnContent(1,1), "1")
self.assertEqual(result.getColumnContent(1,2), "21")
def testOrgTableFromTabe(self):
vTable = [["h"+str(i) for i in range(4)], ["c1"+str(i) for i in range(4)], ["c2"+str(i) for i in range(4)]]
vOrgTable = OrgTable.constructFromTable(vTable)
self.assertEqual(vOrgTable.getCols(0), 4)
self.assertEqual(vOrgTable.getRows(), 3)
self.assertEqual(vOrgTable.getColumnContent(0,0), "h0")
self.assertEqual(vOrgTable.getColumnContent(1,0), "h1")
self.assertEqual(vOrgTable.getColumnContent(2,0), "h2")
self.assertEqual(vOrgTable.getColumnContent(0,1), "c10")
self.assertEqual(vOrgTable.getColumnContent(1,1), "c11")
self.assertEqual(vOrgTable.getColumnContent(2,1), "c12")
self.assertEqual(vOrgTable.getColumnContent(0,2), "c20")
self.assertEqual(vOrgTable.getColumnContent(1,2), "c21")
self.assertEqual(vOrgTable.getColumnContent(2,2), "c22")
|
# Create class Icon
class Icon:
# Define main parameters
x = 0
y = 0
step = 22
# Define Icon position
def __init__(self, x, y):
self.x = x * self.step
self.y = y * self.step
# Show Icon
def draw(self, surface, image):
surface.blit(image, (self.x, self.y)) |
PAYPAL = 'paypal'
GROUPON = 'groupon'
METHODS = [PAYPAL, GROUPON]
|
from server import ma
from custommodels import User, Checkpoint, Report, Invoice, Scan
class UserSchema(ma.ModelSchema):
class Meta:
model = User
# all fields public
class CheckpointSchema(ma.ModelSchema):
class Meta:
model = Checkpoint
# all fields public
class ReportSchema(ma.ModelSchema):
class Meta:
model = Report
# all fields public
class ScanSchema(ma.ModelSchema):
class Meta:
model = Scan
# all fields public
class InvoiceSchema(ma.ModelSchema):
class Meta:
model = Invoice
# all fields public
|
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
from scrapy.contrib.loader.processor import MapCompose
from scrapy.item import Item, Field
import re
class UserItem(Item):
# define the fields for your item here like:
# name = Field()
uid=Field()
username=Field()
role=Field()
gradepoint=Field()
permission=Field()
credit=Field()
class LoginItem(Item):
username=Field()
password=Field()
|
import time, csv
begin = time.time()
csvfile = open('output.csv', 'w')
fieldnames = ['no_of_files', 'time_taken_sec']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for filenum in range(1, 501):
with open("./files/" + str(filenum) + ".txt", 'r+') as f:
out = f.read()
f.seek(0)
f.write(out.upper())
f.close()
if (filenum % 100 == 0):
print(time.time() - begin)
writer.writerow({
"no_of_files": filenum,
"time_taken_sec": time.time() - begin
}) |
import time
import argparse
import logging
import telepot
from telepot.loop import MessageLoop
# from utils import Utils
from view import View
from flask import Flask
app = Flask(__name__)
# def setup_logger(logfile, level):
# logger.setLevel(logging.DEBUG)
#
# fh = logging.FileHandler(logfile)
# fh.setLevel(logging.ERROR)
#
# ch = logging.StreamHandler()
# ch.setLevel(level)
#
# parser = argparse.ArgumentParser(description='Some initial bot.')
# parser.add_argument('--config',
# type=str,
# help='A config fole with required params',
# required=True)
# args = parser.parse_args()
#
# logger = logging.getLogger('telegramm_bot')
# uti = Utils(args.config)
# TOKEN = uti.get_token()
TOKEN = '415996818:AAFUiHtSM8Y7nkpSflHzfT9zPvpPxrk5jwk'
bot = telepot.Bot(TOKEN)
viwer = View(bot)
MessageLoop(bot, viwer.root_handle).run_as_thread()
print('I am ready to work ...')
# while 1:
# time.sleep(10)
if __name__ == '__main__':
app.run()
|
def countMaxActivities(arr):
i , j = 1 , 0
activities = 1
n = len(arr)
while i<n:
while i<n and arr[i][0]<arr[j][1]:
i+=1
if i<n:
activities+=1
j = i
i+=1
return activities
arr = [[5, 9], [1, 2], [3, 4], [0, 6],[5, 7], [8, 9]]
arr.sort(key = lambda x: x[1])
print(arr)
# print(countMaxActivities(arr)) |
import pandas as pd
import requests
import os.path
from time import sleep
py = 'https://www.reddit.com/r/python.json'
r = 'https://www.reddit.com/r/Rlanguage.json'
posts_csv = 'posts.csv'
def scrape(url, csv):
posts = []
after = None
csv = f'python_{csv}' if 'python' in url else f'r_{csv}'
for i in range(4):
if after and i == 1:
url = f"{url}?after={after}"
elif i > 1:
url = f"{url[:url.index('?')]}?after={after}"
res = requests.get(url, headers={'User-agent': 'Chrome'})
print(url)
if res.status_code != 200:
print(f'Status Error: {res.status_code}')
break
res_dict = res.json()['data']['children']
if os.path.isfile(csv):
df = pd.read_csv(csv)
for post in res_dict:
if (
post['data']['id'] not in df['id']
and post['data']['selftext']
and not post['data']['stickied']
):
posts.append(post['data'])
new_df = pd.DataFrame(posts)
pd.concat([df, new_df], axis=0, sort=0).to_csv(csv, index=False)
else:
for post in res_dict:
if post['data']['selftext'] and not post['data']['stickied']:
posts.append(post['data'])
pd.DataFrame(posts).to_csv(csv, index=False)
after = res.json()['data']['after']
sleep(2)
scrape(py, posts_csv)
scrape(r, posts_csv)
|
import pymongo
from retrying import retry
import datetime
# import pymysql
# from Micro_Logger import deal_log
# from mysql_fun import insert
class MongoDB_Store():
@retry(stop_max_attempt_number=3, wait_exponential_multiplier=1000, wait_exponential_max=10000)
def store_data_list(self, url, db_name, col_name, data_list, result_list):
print(datetime.datetime.now())
try:
client = pymongo.MongoClient(url)
db = client[db_name]
col = db[col_name]
for i in range(len(data_list)):
col.insert(
{
"key": data_list[i],
"value": result_list[i]
}
)
except Exception as e:
raise
class MySql_Store(object):
def insert(self, sql):
print("1")
if __name__ == "__main__":
url = "mongodb://root:123456@127.0.0.1:27017"
db_name = "iii"
col_name = "iii"
data_list = [1,2,2,4]
result_list = [1,2,2,4]
handle = MongoDB_Store()
handle.store_data_list(url, db_name, col_name, data_list, result_list) |
import json
import os
import re
import pyrebase as pyrebase
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as ec
from selenium.webdriver.support.wait import WebDriverWait
# environment variables
FACEBOOK_EMAIL = os.getenv('FACEBOOK_EMAIL', )
FACEBOOK_PASS = os.getenv('FACEBOOK_PASSWORD', )
CHROME_PATH = os.getenv('CHROME_PATH')
url_main = 'https://www.facebook.com/'
url_page = 'https://www.facebook.com/groups/kenyapolitforum/'
members_id_expression = re.compile('recently_joined_[0-9]+')
return_data = []
def load_pyre():
config = {
'apiKey': os.getenv('apiKey'),
'authDomain': os.getenv('authDomain'),
'databaseURL': os.getenv('databaseURL'),
'storageBucket': os.getenv('storageBucket')
}
pyre = pyrebase.initialize_app(config)
db = pyre.database()
return db
def get_driver():
chrome_options = webdriver.ChromeOptions()
# remove notifications
chrome_options.add_experimental_option(
"prefs",
{
"profile.default_content_setting_values.notifications": 2
}
)
main_driver = webdriver.Chrome(executable_path=CHROME_PATH, options=chrome_options)
main_driver.maximize_window()
return main_driver
driver = get_driver()
# login to facebook
def login_to_facebook():
login_driver = driver
login_driver.get(url_main)
login_driver.find_element_by_id('email').send_keys(FACEBOOK_EMAIL)
login_driver.find_element_by_id('pass').send_keys(FACEBOOK_PASS)
login_driver.find_element_by_id('loginbutton').click()
def extract_data(soup):
members = soup.find_all('div', id=members_id_expression)
for a in members:
user_data = {}
user_data['name'] = a.find('div', class_='_60ri').find('a').text
info = a.find_all('div', class_='_60rj')
user_data['Joining Info'] = info[0].text
user_data['Personal Info'] = info[1].text
return_data.append(user_data)
def get_details():
login_to_facebook()
driver.get(url_page)
WebDriverWait(driver, 30).until(ec.presence_of_all_elements_located((By.ID, 'mainContainer')))
# click members button
members_button = driver.find_element_by_xpath('//*[@id="u_0_u"]/div[3]/a/span[1]')
driver.execute_script('arguments[0].click();', members_button)
WebDriverWait(driver, 30).until(ec.presence_of_all_elements_located((By.ID, 'groupsMemberSection_recently_joined')))
# scroll to bottom part
while True:
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
WebDriverWait(driver, 30).until(
ec.presence_of_all_elements_located((By.CLASS_NAME, 'expandedList')))
soup = BeautifulSoup(driver.page_source, 'html.parser')
members = soup.find_all('div', id=members_id_expression)
if len(members) > 100:
extract_data(soup)
break
def save_data():
db = load_pyre()
for data in return_data:
db.child('/Group_Members').child(data['name']).set(data)
try:
get_details()
save_data()
except Exception as e:
print(e)
finally:
driver.close()
|
#5개의 모델을 찾기. 변화율이 가장 작은 5개의 모델을 찾는 방법.
#cv_diff_rate를 이용해서 각 그룹의 절대값 cv_diff_rate에 대한 합이 가장 낮은 주식 5개 정하기.
import pandas as pd
def sum_of_abs_cv_diff_rate(Dataframe):
cv_rate_series=abs(Dataframe["cv_diff_rate"])
return cv_rate_series.sum()
def find_stable_rate_stocks(stocks, n=15):
n_stocks = dict()
while n > 0:
stock_name = min(stocks, key=stocks.get)
n_stocks[stock_name]=stocks[stock_name]
del stocks[stock_name]
n -= 1
return n_stocks
if __name__ == "__main__":
df = pd.read_csv("stock_history_add.csv", encoding="euc-kr")
stock_dict=dict() #key:주식이름, value=sum(abs())
for name, group in df.groupby("stockname"):
group = group.copy() #SettingWithCopyWarning 방지
if len(group.index) > 100:
stock_dict[name]=sum_of_abs_cv_diff_rate(group)
stocks=find_stable_rate_stocks(stock_dict)
for key in stocks:
print(key, stocks[key])
|
from mlxtend.regressor import LinearRegression
import pandas as pd
from sklearn.metrics import mean_squared_error
import math
from sklearn.model_selection import cross_val_score as cvsc
from sklearn import linear_model
set_sizes = [100,500,1000,5000,10000,50000,100000,500000,1000000,5000000,10000000,50000000,100000000]
column_names = ["id","vendor_id","pickup_datetime","dropoff_datetime","passenger_count","pickup_longitude","pickup_latitude"
,"dropoff_longitude","dropoff_latitude","store_and_fwd_flag","trip_duration","Short_or_long"]
"""Read in dataset"""
data_size = 0
dataframe = pd.read_csv("C:\\Users\\bboyd\\Downloads\\train\\train.csv",
sep=',',header=0,names=column_names,index_col=0,usecols=[0,1,2,3,4,5,6,7,8,10,11] ,nrows = set_sizes[data_size])
Y = dataframe["trip_duration"]
X = dataframe[["vendor_id","passenger_count","pickup_longitude","pickup_latitude","dropoff_longitude","dropoff_latitude"]]
X_train = X.head(int(set_sizes[data_size]*0.7))
X_test = X.tail(int(set_sizes[data_size]*0.3))
Y_train = Y.head(int(set_sizes[data_size]*0.7))
Y_test = Y.tail(int(set_sizes[data_size]*0.3))
ne_lr = LinearRegression(minibatches=None)
Y2 = pd.to_numeric(Y, downcast='float')
print("here",type ((Y2)))
print(type(Y_train))
ne_lr.fit(X_train, pd.to_numeric(Y_train, downcast='float'))
print(ne_lr)
y_pred = ne_lr.predict(X_test)
res = mean_squared_error(Y_test,y_pred)
#res = scoring(y_target=Y_test, y_predicted=y_pred, metric='rmse')
print("results: ", res)
print("root", math.sqrt(res))
lin = linear_model.LinearRegression()
lin.fit(X_train, Y_train)
#lin.cross_val_predict(X,Y,cv=10,method="mean_squared_error")
cv = cvsc( lin, X, Y, cv=10)
print(cv)
res = mean_squared_error(Y_test,cv)
print("res" ,res)
|
import unicodedata
from nltk.corpus import stopwords
from nltk import bigrams
import string
import re
import operator
from collections import Counter
from collections import defaultdict
import numpy as np
import matplotlib.pyplot as plt
f = open ('tweetsTuristas+results.txt', 'r', encoding='utf8')
tweets = list(f)
textos = [str(i.split('\t')[0]) for i in tweets]
positivas = [int(i.split('\t')[2]) for i in tweets]
negativas = [int(i.split('\t')[3]) for i in tweets]
tweets = zip(textos, positivas, negativas)
tweets = sorted(tweets, key=lambda x: x[2], reverse=True)
### Histogram plot
histogram = {}
for tweet in tweets:
if abs(tweet[1]) > abs(tweet[2]):
histogram[tweet[1]] = histogram.get(tweet[1],0) + 1
elif abs(tweet[1]) < abs(tweet[2]):
histogram[tweet[2]] = histogram.get(tweet[1], 0) + 1
else:
histogram[0] = histogram.get(0,0) + 1
print(histogram)
N = len(histogram.keys())
sentValues = sorted(histogram.keys())
counts = list()
for key in sorted(histogram.keys()):
#print(key,histogram[key])
counts += [histogram[key]]
ind = np.arange(N)
width = 0.4
plt.style.use('custom538')
fig,ax = plt.subplots()
fig.set_size_inches(8,8)
rect = ax.bar(ind, counts, width)
ax.set_title("Strongest sentiment")
ax.set_ylabel("Tweets")
ax.set_xticks(ind+width/2)
ax.set_xticklabels(sentValues)
plt.savefig('sentAnalisis.png')
#plt.show()
### Most positive tweets
f = open('positiveTweets.txt', 'w', encoding='utf8')
for tweet in tweets:
if abs(tweet[1]) > abs(tweet[2]) and abs(tweet[1]) >= 3:
f.write('\t'.join([tweet[0],str(tweet[1]),str(tweet[2])])+'\n')
### Most negative tweets
f = open('negativeTweets.txt', 'w', encoding='utf8')
for tweet in tweets:
if abs(tweet[1]) < abs(tweet[2]) and abs(tweet[2]) >= 3:
f.write('\t'.join([tweet[0],str(tweet[1]),str(tweet[2])])+'\n')
### Most common terms
emoticons_str = r"""
(?:
[:=;] # Eyes
[oO\-]? # Nose (optional)
[D\)\]\(\]/\\OpP] # Mouth
)"""
regex_str = [
emoticons_str,
r'<[^>]+>', # HTML tags
r'(?:@[\w_]+)', # @-mentions
r"(?:\#+[\w_]+[\w\'_\-]*[\w_]+)", # hash-tags
r'http[s]?://(?:[a-z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-f][0-9a-f]))+', # URLs
r'(?:(?:\d+,?)+(?:\.?\d+)?)', # numbers
r"(?:[a-z][a-z'\-_]+[a-z])", # words with - and '
r"(?:[a-z][a-z'\-_]+[a-z])", # words with - and '
r'(?:[\w_]+)', # other words
r'(?:\S)' # anything else
# Faltan palabras con tildes y algunos problemas con la Ñ
]
tokens_re = re.compile(r'('+'|'.join(regex_str)+')', re.VERBOSE | re.IGNORECASE)
emoticon_re = re.compile(r'^'+emoticons_str+'$', re.VERBOSE | re.IGNORECASE)
def tokenize(s):
return tokens_re.findall(s)
def preprocess(s, lowercase=False):
tokens = tokenize(s)
if lowercase:
tokens = [token if emoticon_re.search(token) else token.lower() for token in tokens]
return tokens
def remove_diacritics(text):
"""
Returns a string with all diacritics (aka non-spacing marks) removed.
For example "Héllô" will become "Hello".
Useful for comparing strings in an accent-insensitive fashion.
"""
normalized = unicodedata.normalize("NFKD", text)
return "".join(c for c in normalized if unicodedata.category(c) != "Mn")
def mostFrequentTerms(tweets, nTerms ):
# Punctuation signs
punctuation = list(string.punctuation) + ['¿', '¡', '…']
# stopwords + punctuation signs
stop = stopwords.words('english') + [w.title() for w in stopwords.words('english')]\
+ [w.upper() for w in stopwords.words('english')] + [w.lower() for w in stopwords.words('english')] + punctuation
stop += stopwords.words('spanish') + [w.title() for w in stopwords.words('spanish')] \
+ [w.upper() for w in stopwords.words('spanish')] + [w.lower() for w in
stopwords.words('spanish')] + punctuation
# Most frequent terms
count_all = Counter()
for tweet in tweets:
terms_stop = [term for term in preprocess(remove_diacritics(tweet)) if term not in stop]
count_all.update(terms_stop)
return count_all.most_common(nTerms)
posTweets = list()
negTweets = list()
for tweet in tweets:
if abs(tweet[1]) > abs(tweet[2]) and abs(tweet[1]) >= 3:
posTweets += [tweet[0]]
elif abs(tweet[1]) < abs(tweet[2]) and abs(tweet[2]) >= 3:
negTweets += [tweet[0]]
freqTerms = mostFrequentTerms(posTweets, 10)
N = len(freqTerms)
terms = [str(i[0]) for i in freqTerms]
counts = [int(i[1]) for i in freqTerms]
ind = np.arange(N)
width = 0.4
plt.style.use('custom538')
fig, ((ax1, ax2)) = plt.subplots(nrows=1, ncols=2)
fig.set_size_inches(12, 6)
rect1 = ax1.bar(ind, counts, width)
ax1.set_title("Most frequent terms in positive tweets")
ax1.set_ylabel("Occurrences")
ax1.set_xticks(ind+width/2)
ax1.set_xticklabels(terms, rotation=30, ha='right')
freqTerms = mostFrequentTerms(negTweets, 10)
terms = [str(i[0]) for i in freqTerms]
counts = [int(i[1]) for i in freqTerms]
rect2 = ax2.bar(ind, counts, width)
ax2.set_title("Most frequent terms in negative tweets")
ax2.set_ylabel("Occurrences")
ax2.set_xticks(ind+width/2)
ax2.set_xticklabels(terms, rotation=30, ha='right')
plt.savefig('commonTerms.png')
#plt.show()
'''
fw = open('tweetsAnalizados.txt', 'w', encoding='utf8')
for tweet in tweets:
splTweet = tweet.split('\t')
fw.write(splTweet[0] + '\t' + splTweet[2] + '\t' + splTweet[3] + '\n')
''' |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.