content
stringlengths 5
1.05M
|
|---|
#!/usr/bin/env python3
from subprocess import Popen, DEVNULL
import os
import pandas
import time
import matplotlib.pyplot as plt
DSTAT_FNAME = "dstat.csv"
if os.path.exists(DSTAT_FNAME):
os.remove(DSTAT_FNAME) #dstat appends by default
dstat = Popen(["dstat", "--output="+DSTAT_FNAME], stdout=DEVNULL)
print("Dstat initialized.")
time.sleep(20) # run for 20 seconds
dstat.kill()
dstat_file = pandas.read_csv(DSTAT_FNAME, header=5)
print(len(dstat_file))
plt.plot(range(0,len(dstat_file)), dstat_file['recv'] / (1024*1024), label="Network receive")
plt.xlabel("Time in s")
plt.ylabel("Data recvd over network in MB")
# plt.show()
plt.plot(range(0,len(dstat_file)), dstat_file['send'] / (1024*1024), label="Network send")
plt.xlabel("Time in s")
plt.ylabel("Data recvd over network in MB")
plt.show()
|
def solve(n, a):
total = 0
currA = a
for i in range(1, n + 1):
total += i * currA
currA *= a
return total
if __name__ == '__main__':
print solve(3, 3)
print solve(4, 4)
print solve(150, 15)
|
from flask import render_template, session, redirect, url_for, request, \
current_app, flash
from . import main
from .forms import PostForm, CommentForm
from .. import db
from ..models import User, Post, Comment
from ..decorators import admin_required
from flask_login import current_user
@main.route('/')
def index():
page = request.args.get('page', 1, type=int)
pagination = Post.query.order_by(Post.timestamp.desc()).paginate(page, per_page=current_app.config['GRITY_POSTS_PER_PAGE'], error_out=False)
posts = pagination.items
return render_template('index.html', posts=posts, pagination=pagination)
@main.route('/archives')
def achives():
user = User.query.filter_by(is_administrator=True).first()
if user is None:
abort(404)
posts = user.posts.order_by(Post.timestamp.desc()).all()
return render_template('archives.html', posts=posts)
@admin_required
@main.route('/write', methods=['GET', 'POST'])
def write():
form = PostForm()
if form.validate_on_submit():
post = Post(title=form.title.data, summary=form.summary.data,
content=form.content.data, author_id= \
current_user._get_current_object().id)
db.session.add(post)
return redirect(url_for('.index'))
return render_template('write.html', form=form)
@main.route('/post/<int:id>', methods=['GET', 'POST'])
def post(id):
post = Post.query.get_or_404(id)
form = CommentForm()
if form.validate_on_submit():
comment = Comment(username=form.name.data, email=form.email.data,
content=form.content.data, post=post)
db.session.add(comment)
flash('Your comment has been published.')
return redirect(url_for('.post', id=post.id))
comments = post.comments.order_by(Comment.timestamp.asc()).all()
return render_template('post.html', posts=[post], form=form, comments=comments)
@admin_required
@main.route('/comment/moderate/<id>')
def comment_moderate(id):
comment = Comment.query.get_or_404(id)
comment.disabled = False if comment.disabled else True
db.session.add(comment)
return redirect(url_for('.post', id=comment.post_id))
@admin_required
@main.route('/edit/<int:id>', methods=['GET', 'POST'])
def edit(id):
post = Post.query.get_or_404(id)
form = PostForm()
if form.validate_on_submit():
post.title = form.title.data
post.summary = form.summary.data
post.content = form.content.data
db.session.add(post)
flash('The post has been updated.')
return redirect(url_for('.post', id=post.id))
form.title.data = post.title
form.summary.data = post.summary
form.content.data = post.content
return render_template('edit_post.html', form=form)
|
from ps.routes.api.v1.psone_router import endpoints
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Portforwarded'
db.create_table(u'portforwarding_portforwarded', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('server_host', self.gf('django.db.models.fields.related.ForeignKey')(related_name='portstoforward', to=orm['servers.Server'])),
('server_to', self.gf('django.db.models.fields.related.ForeignKey')(related_name='portsforwared', to=orm['servers.Server'])),
('port_from', self.gf('django.db.models.fields.IntegerField')()),
('port_to', self.gf('django.db.models.fields.IntegerField')()),
('protocol', self.gf('django.db.models.fields.CharField')(max_length=7)),
))
db.send_create_signal(u'portforwarding', ['Portforwarded'])
def backwards(self, orm):
# Deleting model 'Portforwarded'
db.delete_table(u'portforwarding_portforwarded')
models = {
u'portforwarding.portforwarded': {
'Meta': {'object_name': 'Portforwarded'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'port_from': ('django.db.models.fields.IntegerField', [], {}),
'port_to': ('django.db.models.fields.IntegerField', [], {}),
'protocol': ('django.db.models.fields.CharField', [], {'max_length': '7'}),
'server_host': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'portstoforward'", 'to': u"orm['servers.Server']"}),
'server_to': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'portsforwared'", 'to': u"orm['servers.Server']"})
},
u'servers.server': {
'Meta': {'object_name': 'Server'},
'external_ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'internal_ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'is_vm': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'keymanger_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'ssh_connection_string': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'vm_host': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['servers.Server']", 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['portforwarding']
|
import asyncio
import fractions
import logging
import threading
import time
import av
from av import AudioFrame, VideoFrame
from ..mediastreams import AUDIO_PTIME, MediaStreamError, MediaStreamTrack
logger = logging.getLogger('media')
REAL_TIME_FORMATS = [
'alsa',
'android_camera',
'avfoundation',
'bktr',
'decklink',
'dshow',
'fbdev',
'gdigrab',
'iec61883',
'jack',
'kmsgrab',
'openal',
'oss',
'pulse',
'sndio',
'rtsp',
'v4l2',
'vfwcap',
'x11grab',
]
async def blackhole_consume(track):
while True:
try:
await track.recv()
except MediaStreamError:
return
class MediaBlackhole:
"""
A media sink that consumes and discards all media.
"""
def __init__(self):
self.__tracks = {}
def addTrack(self, track):
"""
Add a track whose media should be discarded.
:param: track: An :class:`aiortc.AudioStreamTrack` or :class:`aiortc.VideoStreamTrack`.
"""
if track not in self.__tracks:
self.__tracks[track] = None
async def start(self):
"""
Start discarding media.
"""
for track, task in self.__tracks.items():
if task is None:
self.__tracks[track] = asyncio.ensure_future(blackhole_consume(track))
async def stop(self):
"""
Stop discarding media.
"""
for task in self.__tracks.values():
if task is not None:
task.cancel()
self.__tracks = {}
def player_worker(loop, container, streams, audio_track, video_track, quit_event,
throttle_playback):
audio_fifo = av.AudioFifo()
audio_format_name = 's16'
audio_layout_name = 'stereo'
audio_sample_rate = 48000
audio_samples = 0
audio_samples_per_frame = int(audio_sample_rate * AUDIO_PTIME)
audio_resampler = av.AudioResampler(
format=audio_format_name,
layout=audio_layout_name,
rate=audio_sample_rate)
video_first_pts = None
frame_time = None
start_time = time.time()
while not quit_event.is_set():
try:
frame = next(container.decode(*streams))
except (av.AVError, StopIteration):
if audio_track:
asyncio.run_coroutine_threadsafe(audio_track._queue.put(None), loop)
if video_track:
asyncio.run_coroutine_threadsafe(video_track._queue.put(None), loop)
break
# read up to 1 second ahead
if throttle_playback:
elapsed_time = (time.time() - start_time)
if frame_time and frame_time > elapsed_time + 1:
time.sleep(0.1)
if isinstance(frame, AudioFrame) and audio_track:
if (frame.format.name != audio_format_name or
frame.layout.name != audio_layout_name or
frame.sample_rate != audio_sample_rate):
frame.pts = None
frame = audio_resampler.resample(frame)
# fix timestamps
frame.pts = audio_samples
frame.time_base = fractions.Fraction(1, audio_sample_rate)
audio_samples += frame.samples
audio_fifo.write(frame)
while True:
frame = audio_fifo.read(audio_samples_per_frame)
if frame:
frame_time = frame.time
asyncio.run_coroutine_threadsafe(audio_track._queue.put(frame), loop)
else:
break
elif isinstance(frame, VideoFrame) and video_track:
if frame.pts is None: # pragma: no cover
logger.warning('Skipping video frame with no pts')
continue
# video from a webcam doesn't start at pts 0, cancel out offset
if video_first_pts is None:
video_first_pts = frame.pts
frame.pts -= video_first_pts
frame_time = frame.time
asyncio.run_coroutine_threadsafe(video_track._queue.put(frame), loop)
class PlayerStreamTrack(MediaStreamTrack):
def __init__(self, player, kind):
super().__init__()
self.kind = kind
self._player = player
self._queue = asyncio.Queue()
self._start = None
async def recv(self):
if self.readyState != 'live':
raise MediaStreamError
self._player._start(self)
frame = await self._queue.get()
if frame is None:
self.stop()
raise MediaStreamError
frame_time = frame.time
# control playback rate
if self._player._throttle_playback and frame_time is not None:
if self._start is None:
self._start = time.time() - frame_time
else:
wait = self._start + frame_time - time.time()
await asyncio.sleep(wait)
return frame
def stop(self):
super().stop()
if self._player is not None:
self._player._stop(self)
self._player = None
class MediaPlayer:
"""
A media source that reads audio and/or video from a file.
Examples:
.. code-block:: python
# Open a video file.
player = MediaPlayer('/path/to/some.mp4')
# Open an HTTP stream.
player = MediaPlayer(
'http://download.tsi.telecom-paristech.fr/'
'gpac/dataset/dash/uhd/mux_sources/hevcds_720p30_2M.mp4')
# Open webcam on Linux.
player = MediaPlayer('/dev/video0', format='v4l2', options={
'video_size': '640x480'
})
# Open webcam on OS X.
player = MediaPlayer('default:none', format='avfoundation', options={
'video_size': '640x480'
})
:param: file: The path to a file, or a file-like object.
:param: format: The format to use, defaults to autodect.
:param: options: Additional options to pass to FFmpeg.
"""
def __init__(self, file, format=None, options={}):
self.__container = av.open(file=file, format=format, mode='r', options=options)
self.__thread = None
self.__thread_quit = None
# examine streams
self.__started = set()
self.__streams = []
self.__audio = None
self.__video = None
for stream in self.__container.streams:
if stream.type == 'audio' and not self.__audio:
self.__audio = PlayerStreamTrack(self, kind='audio')
self.__streams.append(stream)
elif stream.type == 'video' and not self.__video:
self.__video = PlayerStreamTrack(self, kind='video')
self.__streams.append(stream)
# check whether we need to throttle playback
container_format = set(self.__container.format.name.split(','))
self._throttle_playback = not container_format.intersection(REAL_TIME_FORMATS)
@property
def audio(self):
"""
An :class:`aiortc.AudioStreamTrack` instance if the file contains audio.
"""
return self.__audio
@property
def video(self):
"""
A :class:`aiortc.VideoStreamTrack` instance if the file contains video.
"""
return self.__video
def _start(self, track):
self.__started.add(track)
if self.__thread is None:
self.__log_debug('Starting worker thread')
self.__thread_quit = threading.Event()
self.__thread = threading.Thread(
name='media-player',
target=player_worker,
args=(
asyncio.get_event_loop(),
self.__container, self.__streams,
self.__audio, self.__video,
self.__thread_quit,
self._throttle_playback))
self.__thread.start()
def _stop(self, track):
self.__started.discard(track)
if not self.__started and self.__thread is not None:
self.__log_debug('Stopping worker thread')
self.__thread_quit.set()
self.__thread.join()
self.__thread = None
def __log_debug(self, msg, *args):
logger.debug('player(%s) ' + msg, self.__container.name, *args)
class MediaRecorderContext:
def __init__(self, stream):
self.stream = stream
self.task = None
class MediaRecorder:
"""
A media sink that writes audio and/or video to a file.
Examples:
.. code-block:: python
# Write to a video file.
player = MediaRecorder('/path/to/file.mp4')
# Write to a set of images.
player = MediaRecorder('/path/to/file-%3d.png')
:param: file: The path to a file, or a file-like object.
:param: format: The format to use, defaults to autodect.
:param: options: Additional options to pass to FFmpeg.
"""
def __init__(self, file, format=None, options={}):
self.__container = av.open(file=file, format=format, mode='w', options=options)
self.__tracks = {}
def addTrack(self, track):
"""
Add a track to be recorded.
:param: track: An :class:`aiortc.AudioStreamTrack` or :class:`aiortc.VideoStreamTrack`.
"""
if track.kind == 'audio':
if self.__container.format.name == 'wav':
codec_name = 'pcm_s16le'
elif self.__container.format.name == 'mp3':
codec_name = 'mp3'
else:
codec_name = 'aac'
stream = self.__container.add_stream(codec_name)
else:
if self.__container.format.name == 'image2':
stream = self.__container.add_stream('png', rate=30)
stream.pix_fmt = 'rgb24'
else:
stream = self.__container.add_stream('libx264', rate=30)
stream.pix_fmt = 'yuv420p'
self.__tracks[track] = MediaRecorderContext(stream)
async def start(self):
"""
Start recording.
"""
for track, context in self.__tracks.items():
if context.task is None:
context.task = asyncio.ensure_future(self.__run_track(track, context))
async def stop(self):
"""
Stop recording.
"""
if self.__container:
for track, context in self.__tracks.items():
if context.task is not None:
context.task.cancel()
context.task = None
for packet in context.stream.encode(None):
self.__container.mux(packet)
self.__tracks = {}
if self.__container:
self.__container.close()
self.__container = None
async def __run_track(self, track, context):
while True:
try:
frame = await track.recv()
except MediaStreamError:
return
for packet in context.stream.encode(frame):
self.__container.mux(packet)
|
# Imports
import click
import numpy as np
from tqdm import tqdm
def encode(to_encode, unique_to_index, train_ord):
to_ret = np.zeros(len(to_encode), dtype=np.int64)
p_bar = tqdm(np.ndenumerate(to_encode), desc='Encoding', total=len(to_encode))
for idx, obj in p_bar:
try:
to_ret[idx] = train_ord[unique_to_index[obj]]
except KeyError:
to_ret[idx] = len(unique_to_index)
return to_ret
@click.command()
@click.option('--dataset-dir', required=True)
def preproc_ord_norm(dataset_dir):
# Load data
data = np.load(f'{dataset_dir}/deltas_split.npz')
train = data['train']
dev = data['dev']
test = data['test']
# Ordinal encoding
train_unique, train_index, train_ord = np.unique(train, return_index=True, return_inverse=True)
unique_to_index = dict(np.stack((train_unique, train_index)).T)
dev_ord = encode(dev, unique_to_index, train_ord)
test_ord = encode(test, unique_to_index, train_ord)
# Normalization
train_norm = train_ord / len(train_unique)
dev_norm = dev_ord / len(train_unique)
test_norm = test_ord / len(train_unique)
# Store
np.savez_compressed(
f'{dataset_dir}/deltas_ord_norm.npz',
train_ord=train_ord,
dev_ord=dev_ord,
test_ord=test_ord,
train_norm=train_norm.astype(np.float32),
dev_norm=dev_norm.astype(np.float32),
test_norm=test_norm.astype(np.float32),
train_unique=train_unique,
)
if __name__ == '__main__':
preproc_ord_norm() # pylint: disable=no-value-for-parameter
|
from __future__ import print_function
import sys, argparse, os.path, fnmatch
import xlrd
from openpyxl import Workbook
def create_csvs( filename ) :
workbook = xlrd.open_workbook( filename )
wb = Workbook()
worksheet_all = wb.create_sheet()
worksheet_all.title = "All"
sheet_count = 1
row_count_all = 1 # different libraries use different start values...
while sheet_count <= 81 :
sheet_name = "Sheet" + str(sheet_count)
worksheet = None
try:
worksheet = workbook.sheet_by_name( sheet_name )
except xlrd.biffh.XLRDError:
pass
if worksheet is None :
print( "Can't find worksheet '" + sheet_name )
#
# Open CSV writer
#
if worksheet is not None :
for row in xrange( 0, worksheet.nrows ):
for col in xrange( 0, worksheet.ncols ):
print( row_count_all, row, col )
value = worksheet.cell_value( row, col )
#type = worksheet.cell_type(row, col )
#print( type, value )
cell_to = worksheet_all.cell( row_count_all, col+1 )
cell_to.value = value
row_count_all += 1
print ("Extracted " + str(worksheet.nrows) + " rows from worksheet " + sheet_name )
sheet_count += 1
wb.save( "combined.xlsx")
if __name__ == "__main__":
create_csvs( "md-aihs.xlsx" )
|
login_query = \
"""
SELECT * FROM user_info
WHERE user_email=(%s) AND user_password=(%s)
"""
user_details_query = \
"""
SELECT * FROM user_info
WHERE user_id=(%s)
"""
admin_details = \
"""
SELECT * FROM admin
WHERE user_id=(%s)
"""
prof_details = \
"""
SELECT * FROM professor
WHERE user_id=(%s)
"""
student_details = \
"""
SELECT * FROM student
WHERE user_id=(%s)
"""
university_details_query = \
"""
SELECT * FROM university
WHERE university_id=(%s)
"""
registered_courses_student = \
"""
SELECT course_id, course_name FROM Course
WHERE course_id in
(
SELECT course_id FROM registered
WHERE user_id=(%s)
)
AND
is_active=(%s)
"""
update_course_status = \
"""
"""
courses_prof = \
"""
SELECT course_id, course_name FROM Course
WHERE course_id in
(
SELECT course_id FROM course_prof
WHERE user_id=(%s)
)
AND
is_active=(%s)
"""
courses_ta = \
"""
SELECT course_id, course_name FROM Course
WHERE course_id in
(
SELECT course_id FROM assists
WHERE user_id=(%s)
)
AND
is_active=(%s)
"""
prof_of_course = \
"""
SELECT * FROM course_prof
WHERE course_id=(%s)
AND user_id=(%s)
"""
ta_of_course = \
"""
SELECT * FROM assists
WHERE course_id=(%s)
AND user_id=(%s)
"""
student_of_course = \
"""
SELECT * FROM registered
WHERE course_id=(%s)
AND user_id=(%s)
"""
prof_of_course_details = \
"""
SELECT * FROM (
SELECT * FROM course_prof
WHERE course_id=(%s)) P
JOIN (SELECT * FROM user_info) U
ON (P.user_id=U.user_id)
"""
ta_of_course_details = \
"""
SELECT * FROM (
SELECT * FROM assists
WHERE course_id=(%s)) T
JOIN (SELECT * FROM user_info) U
ON (T.user_id=U.user_id)
"""
student_of_course_details = \
"""
SELECT * FROM (
SELECT * FROM registered
WHERE course_id=(%s)) S
JOIN (SELECT * FROM user_info) U
ON (S.user_id=U.user_id)
"""
get_course_details = \
"""
SELECT
course_id, course_name, user_description,
course_credits, course_year, course_semester,
venue_id, course_time_slot, is_course_online, is_active, university_id
FROM Course
WHERE course_id=(%s)
"""
get_course_details_all = \
"""
SELECT * FROM Course
WHERE university_id=(%s)
"""
get_student_details_all = \
"""
SELECT * FROM
(SELECT * FROM Student) S JOIN (
SELECT * FROM user_info
WHERE university_id=(%s)
) U ON (S.user_id=U.user_id)
"""
get_professor_details_all = \
"""
SELECT * FROM
(SELECT * FROM Professor) S JOIN (
SELECT * FROM user_info
WHERE university_id=(%s)
) U ON (S.user_id=U.user_id)
"""
get_student_details_all_not_registered_and_not_ta_in_course = \
"""
SELECT * FROM
(SELECT * FROM Student WHERE user_id NOT IN (SELECT user_id FROM registered WHERE course_id=(%s)) AND user_id NOT IN (SELECT user_id FROM assists WHERE course_id=(%s))) S JOIN (
SELECT * FROM user_info
WHERE university_id=(%s)
) U ON (S.user_id=U.user_id)
"""
get_professor_details_all_not_in_course = \
"""
SELECT * FROM
(SELECT * FROM Professor WHERE user_id NOT IN (SELECT user_id FROM course_prof WHERE course_id=(%s))) S JOIN (
SELECT * FROM user_info
WHERE university_id=(%s)
) U ON (S.user_id=U.user_id)
"""
get_venue_details_all = \
"""
SELECT * FROM venue
WHERE university_id=(%s)
"""
get_course_assignments = \
"""
SELECT
assignment_id, assignment_name,
TO_CHAR(assignment_posted_date, 'DD MON, HH:MI') assignment_posted_date,
TO_CHAR(assignment_due_date, 'DD MON, HH:MI') assignment_due_date,
total_marks
FROM Assignment
WHERE course_id=(%s)
"""
get_assignment_data = \
"""
SELECT * FROM Assignment
WHERE assignment_id=(%s)
"""
get_venue = \
"""
SELECT * FROM Venue
WHERE university_id=(%s)
"""
get_venue_by_venue_id = \
"""
SELECT * FROM Venue
WHERE venue_id=(%s)
"""
get_assignment_submission_data = \
"""
SELECT * FROM(
SELECT * FROM Submits
WHERE assignment_id = (%s)
AND user_id = (%s)
) S JOIN (
SELECT * FROM Assignment
) A
ON S.assignment_id=A.assignment_id
"""
get_assignment_submission_statistics = \
"""
SELECT MAX(S.marks_obtained) AS max, MIN(S.marks_obtained) AS min, AVG(S.marks_obtained) AS avg FROM(
(SELECT * FROM Registered WHERE
course_id IN (
SELECT course_id FROM Assignment
WHERE assignment_id = (%s))) R JOIN (SELECT * from submits WHERE assignment_id = (%s)) S ON (R.user_id=S.user_id)
)
"""
get_assignment_submission_data_all = \
"""
SELECT *, R.user_id as user_id FROM(
SELECT * FROM Registered WHERE
course_id IN (
SELECT course_id FROM Assignment
WHERE assignment_id = (%s)
)) R JOIN (
SELECT * FROM user_info
) U
ON R.user_id=U.user_id LEFT JOIN (
SELECT * FROM submits
WHERE assignment_id=(%s)
) S ON (R.user_id=S.user_id)
"""
create_assignment_query = \
"""
INSERT INTO Assignment VALUES ((%s), (%s), (%s), (%s), (%s), (%s));
"""
create_admin_query = \
"""
INSERT INTO user_info VALUES ((%s), (%s), (%s), (%s), (%s), (%s));
INSERT INTO admin VALUES ((%s), (%s));
"""
create_uni_query = \
"""
INSERT INTO university VALUES ((%s), (%s), (%s), (%s), (%s), (%s), (%s));
"""
create_course_query = \
"""
INSERT INTO course VALUES ((%s), (%s), (%s), (%s), (%s), (%s), (%s), (%s), (%s), (%s), (%s))
"""
create_student_query = \
"""
INSERT INTO user_info VALUES ((%s), (%s), (%s), (%s), (%s), (%s));
INSERT INTO student VALUES ((%s), (%s), (%s));
"""
create_professor_query = \
"""
INSERT INTO user_info VALUES ((%s), (%s), (%s), (%s), (%s), (%s));
INSERT INTO professor VALUES ((%s), (%s));
"""
create_venue_query = \
"""
INSERT INTO venue VALUES ((%s), (%s), (%s), (%s));
"""
delete_assignment_query = \
"""
DELETE FROM Assignment WHERE assignment_id=(%s);
"""
delete_course_query = \
"""
DELETE FROM Course WHERE course_id=(%s);
"""
delete_university_query = \
"""
DELETE FROM university WHERE university_id=(%s);
"""
delete_user_query = \
"""
DELETE FROM user_info WHERE user_id=(%s);
"""
delete_venue_query = \
"""
DELETE FROM venue WHERE venue_id=(%s);
"""
submit_assignment_material_first = \
"""
INSERT INTO Submits VALUES ((%s), (%s), (%s));
"""
submit_assignment_material_update = \
"""
UPDATE Submits SET
submitted_material = (%s)
WHERE assignment_id = (%s)
AND user_id = (%s);
"""
give_assignment_marks_first = \
"""
INSERT INTO Submits VALUES ((%s), (%s), (%s), (%s));
"""
add_course_profs = \
"""
INSERT INTO course_prof VALUES ((%s), (%s));
"""
add_course_students = \
"""
INSERT INTO registered VALUES ((%s), (%s));
"""
add_course_tas = \
"""
INSERT INTO assists VALUES ((%s), (%s));
"""
remove_course_profs = \
"""
DELETE FROM course_prof WHERE course_id=(%s) AND user_id= (%s);
"""
remove_course_students = \
"""
DELETE FROM registered WHERE course_id=(%s) AND user_id= (%s);
"""
remove_course_tas = \
"""
DELETE FROM assists WHERE course_id=(%s) AND user_id= (%s);
"""
give_assignment_marks_update = \
"""
UPDATE Submits SET
marks_obtained = (%s)
WHERE assignment_id = (%s)
AND user_id = (%s);
"""
update_user_query = \
"""
UPDATE user_info SET
(user_first_name, user_last_name, user_password) = ((%s), (%s), (%s))
WHERE user_id = (%s);
"""
update_course_query = \
"""
UPDATE course SET
(course_name, user_description, course_credits, is_active, course_year, course_semester, venue_id, course_time_slot, is_course_online)=((%s), (%s), (%s), (%s), (%s), (%s), (%s), (%s), (%s))
WHERE course_id=(%s);
"""
update_university_query = \
"""
UPDATE university SET
(university_city, university_state, university_address, university_pincode, university_establishment_date) = ((%s), (%s), (%s), (%s), (%s))
WHERE university_id = (%s);
"""
update_venue_query = \
"""
UPDATE venue SET
(venue_building, venue_class_number)=((%s), (%s))
WHERE venue_id=(%s);
"""
update_assignment_query = \
"""
UPDATE assignment SET
(assignment_name, assignment_posted_date, assignment_due_date, total_marks)=((%s), (%s), (%s), (%s))
WHERE assignment_id=(%s);
"""
get_current_credits_per_sem = \
"""
SELECT SUM(course_credits) AS credits_already_registered FROM course WHERE (course_semester, course_year) in
(SELECT course_semester, course_year FROM course
WHERE course_id='418757') AND course_id IN (SELECT course_id FROM registered WHERE user_id='23980')
"""
|
def fatorial(número, show=0):
"""
--> Calcula o fatorial de um número
:param número: O número a ser calculado
:param show: Mostra o cálculo do fatorial
"""
resultado = 1
if show == True:
if número == 0:
print("0! = 1",end='')
if número == 1:
print("1! = 1")
else:
print(f"{número}! = ", end='')
for c in range(número,0,-1):
resultado *= c
if c > 1:
print(f"{c} x ", end="")
else:
print(f"{c} = {resultado}")
elif show == False:
print(f"{número}! = ", end='')
for c in range(número, 0, -1):
resultado *= c
print(resultado)
fatorial(int(input("Número para fatorial: ")),show=True)
|
from abc import ABC, abstractmethod
import numpy as np
class StochasticProcess(ABC):
""" ABC for stochastic process generators """
def __init__(self, t_init, x_init, random_state):
self.rs = np.random.RandomState(random_state)
self.x = np.copy(x_init)
self.t = t_init
def sample(self):
"""
Draw next sample
"""
self.t += 1
return self._sample()
@abstractmethod
def _sample(self):
""" Implementation """
class GaussianWhiteNoiseProcess(StochasticProcess):
""" Generate Gaussian white noise samples """
def __init__(self, mu, sigma, random_state=None):
"""
Params
======
mu (float or array): process mean
sigma (float or array): process std_dev
random_state (None, int, array_like, RandomState): (optional) random
state
"""
self.mu = np.array(mu)
self.sigma = np.array(sigma)
super().__init__(t_init=0, x_init=mu, random_state=random_state)
def _sample(self):
"""Draw next sample"""
self.x = self.rs.normal(self.mu, self.sigma)
return np.copy(self.x)
class OUProcess(StochasticProcess):
""" Generate samples from an OU process"""
def __init__(self, x_inf, time_const, std_dev,
x_init=None, random_state=None):
"""
Params
======
x_inf (float or ndarray): Value to mean revert to. Also determines
dimensions of noise (multi-dimensional
process always uncorrelated)
time_const (float): Mean reversion time constant, i.e. 1/theta,
determines length of auto-correlation of process
std_dev (float): Long-term process standard deviation,
i.e. std_dev = sigma / sqrt(2*theta)
x_init (float or ndarray): (optional) current value of process.
Defaults to x_inf.
random_state (None, int, array_like, RandomState): (optional)
random state
"""
if x_init is None:
x_init = x_inf
super().__init__(0, x_init, random_state)
self.x_inf = x_inf
self.time_const = time_const
if isinstance(std_dev, (int, float)):
std_dev_const = std_dev
std_dev = lambda t: std_dev_const # allow for time-dependency
self.std_dev = std_dev
def _sample(self):
"""
Draw next sample
"""
theta = 1. / self.time_const
sigma = self.std_dev(self.t) * np.sqrt(2. * theta)
dw = self.rs.normal(size=self.x.shape)
dx = - theta * (self.x - self.x_inf) + sigma * dw
self.x += dx
return np.copy(self.x)
class Scrambler(ABC):
""" ABC for classes that scramble actions by adding random noise """
@abstractmethod
def __call__(self, actions):
""" Implement to scramble actions """
class AdditiveNoiseScrambler(Scrambler):
"""
Class that adds a stochastic process to (continuous-valued) action
vectors and then clips output between `lb` and `ub`.
"""
def __init__(self, process, lb=-1., ub=1.):
self.process = process
self.lb = lb
self.ub = ub
def __call__(self, actions):
actions += self.process.sample()
actions = actions.clip(self.lb, self.ub)
return actions
def _required_shape(self, num_agents, action_size):
return (num_agents, action_size)
class OUScrambler(AdditiveNoiseScrambler):
def __init__(self, num_agents, action_size, time_const, std_dev, lb=-1.,
ub=1., random_state=None):
x_inf = np.zeros(self._required_shape(num_agents, action_size))
process = OUProcess(x_inf, time_const, std_dev,
random_state=random_state)
super().__init__(process, lb, ub)
class GaussianWhiteNoiseScrambler(AdditiveNoiseScrambler):
def __init__(self, num_agents, action_size, std_dev, lb=-1., ub=1.,
random_state=None):
shape = self._required_shape(num_agents, action_size)
mu = np.zeros(shape)
sigma = std_dev * np.ones(shape)
process = GaussianWhiteNoiseProcess(mu, sigma, random_state)
super().__init__(process, lb, ub)
|
# This sample tests the alternative syntax for unions as
# documented in PEP 604.
from typing import Callable, Generic, TypeVar, Union
def foo2(a: int | str):
if isinstance(a, int):
return 1
else:
return 2
B = bytes | None | Callable[[], None]
A = int | str | B
def foo3(a: A) -> B:
if a == 3 or a is None:
return b""
elif not isinstance(a, (int, str, bytes)):
a()
def foo4(A: "int | str"):
return 1
T = TypeVar("T")
def foo5(a: str):
def helper(value: T) -> T | None:
...
class Baz(Generic[T]):
qux: T | None
reveal_type(helper(a), expected_text="str | None")
reveal_type(Baz[str].qux, expected_text="str | None")
T = TypeVar("T")
TT = TypeVar("TT", bound=type)
def decorator1(value: type[T]) -> type[T]:
...
def decorator2(value: TT) -> TT:
...
class ClassA:
class ClassA_A:
pass
@decorator1
class ClassA_B:
pass
@decorator2
class ClassA_C:
pass
a_or_str: "ClassA.ClassA_A | str"
b_or_str: "ClassA.ClassA_B | str"
b_or_str_Union: Union[ClassA.ClassA_B, str]
c_or_str: "ClassA.ClassA_C | str"
|
from kivy.app import App
from controller.game import Game
from controller.actor import Local, AI
class GameApp(App):
def build(self):
game = Game()
game.actors = [
Local(game=game, name='player1'),
AI(game=game, name='player2'),
]
view = game.actors[0].view
game.setup()
return view
if __name__ == '__main__':
GameApp().run()
|
from sys import byteorder
class Datagram:
def __init__(self):
self.head = bytes(10)
self.payload = bytes(0)
self.EOP = bytes(4)
def set_head(
self,
message_type,
message_id,
num_payloads,
payload_index,
payload_size,
error_type,
restart_index
):
self.head = (
int(message_type).to_bytes(1, byteorder) +
int(message_id).to_bytes(1, byteorder) +
int(num_payloads).to_bytes(2, byteorder) +
int(payload_index).to_bytes(2, byteorder) +
int(payload_size).to_bytes(1, byteorder) +
int(error_type).to_bytes(1, byteorder) +
int(restart_index).to_bytes(2, byteorder)
)
def set_payload(self, payload):
self.payload = payload
def set_EOP(self, eop=0):
self.EOP = (eop).to_bytes(4, byteorder)
def get_datagram(self):
return self.head+self.payload+self.EOP
"""
HEAD:
{
Message type: 1 Byte
Message id: 1 Byte
Num payloads: 2 Bytes
Payload index: 2 Bytes
Payload size: 1 Byte
Error type: 1 Byte
Where to Restart: 2 Bytes
}
"""
|
from aoc2015.day15 import (Ingredient, parse, cookie_vals, partition,
highest_score)
def test_parse():
a = ('Butterscotch: capacity -1, durability -2, flavor 6, '
'texture 3, calories 8')
assert parse(a) == Ingredient('Butterscotch', -1, -2, 6, 3, 8)
def test_cookie_vals():
ingredients = [Ingredient('Butterscotch', -1, -2, 6, 3, 8),
Ingredient('Butterscotch', 2, 3, -2, -1, 3)]
quantities = (44, 56)
assert cookie_vals(ingredients, quantities) == (62842880, 520)
def test_partition():
p2_2 = list(partition(2, 2))
assert p2_2 == [(1, 1)]
p10_2 = list(partition(10, 2))
assert p10_2 == [(9, 1),
(8, 2),
(7, 3),
(6, 4),
(5, 5)]
p10_2_7 = list(partition(10, 2, maximum=7))
assert p10_2_7 == [(7, 3),
(6, 4),
(5, 5)]
p4_2 = list(partition(4, 2))
assert p4_2 == [(3, 1), (2, 2)]
p5_2 = list(partition(5, 2))
assert p5_2 == [(4, 1), (3, 2)]
p3_3 = list(partition(3, 3))
assert p3_3 == [(1, 1, 1)]
p4_3 = list(partition(4, 3))
assert p4_3 == [(2, 1, 1)]
p5_3 = list(partition(5, 3))
assert p5_3 == [(3, 1, 1), (2, 2, 1)]
p10_4 = list(partition(10, 4))
assert p10_4 == [(7, 1, 1, 1),
(6, 2, 1, 1),
(5, 3, 1, 1),
(5, 2, 2, 1),
(4, 4, 1, 1),
(4, 3, 2, 1),
(4, 2, 2, 2),
(3, 3, 3, 1),
(3, 3, 2, 2)]
def test_highest_score():
ingredients = [Ingredient('Butterscotch', -1, -2, 6, 3, 8),
Ingredient('Butterscotch', 2, 3, -2, -1, 3)]
assert highest_score(ingredients, 100) == 62842880
assert highest_score(ingredients, 100, target=500) == 57600000
|
from django.core.management.base import BaseCommand, CommandError
from udc.models import Concept
class Command(BaseCommand):
args = '<filename>'
help = 'Updates UDC concepts'
def handle(self, *args, **options):
if len(args) != 1:
raise CommandError(u'Please specify udc.rdf file path')
source_file = args[0]
try:
Concept.load(source_file)
except (IOError, ), e:
raise CommandError(e)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-08-27 15:17
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('account', '0007_auto_20170825_2201'),
]
operations = [
migrations.AlterModelOptions(
name='user',
options={'ordering': ['-score']},
),
migrations.AddField(
model_name='user',
name='score',
field=models.FloatField(default=0),
),
]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2022 Milan Ondrašovič <milan.ondrasovic@gmail.com>
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
import torch
from PIL import Image
from torch.utils.data import DataLoader
from torch.utils.data.distributed import DistributedSampler
from torchvision import datasets, transforms
def make_transform(cfg, *, is_train=True):
transform = []
if is_train:
color_jitter_transform = transforms.ColorJitter(
brightness=cfg.DATASET.AUG.BRIGHTNESS,
contrast=cfg.DATASET.AUG.CONTRAST,
saturation=cfg.DATASET.AUG.SATURATION,
hue=cfg.DATASET.AUG.HUE
)
transform.append(color_jitter_transform)
horizontal_flip = cfg.DATASET.AUG.HORIZONTAL_FLIP
if horizontal_flip:
horizontal_flip_transform = transforms.RandomHorizontalFlip(
horizontal_flip
)
transform.append(horizontal_flip_transform)
resize_transform = transforms.Resize(
size=cfg.DATASET.AUG.SIZE, interpolation=Image.BICUBIC
)
transform.append(resize_transform)
to_tensor_transform = transforms.ToTensor()
transform.append(to_tensor_transform)
normalize_transform = transforms.Normalize(
mean=cfg.DATASET.AUG.NORM_MEAN, std=cfg.DATASET.AUG.NORM_STD
)
transform.append(normalize_transform)
transform = transforms.Compose(transform)
return transform
def make_dataset(cfg, transform, *, is_train=True):
dataset = datasets.CIFAR100(
root=cfg.DATASET.ROOT_PATH,
train=is_train,
transform=transform,
download=cfg.DATASET.DOWNLOAD
)
return dataset
def make_data_loader(cfg, dataset, sampler=None, *, is_train=True):
# If sampler is provided, then the shuffle parameter has to be avoided.
shuffle = is_train if sampler is None else None
pin_memory = torch.cuda.is_available()
data_loader = DataLoader(
dataset=dataset,
sampler=sampler,
batch_size=cfg.DATA_LOADER.BATCH_SIZE,
shuffle=shuffle,
num_workers=cfg.DATA_LOADER.N_WORKERS,
pin_memory=pin_memory,
drop_last=True
)
return data_loader
def make_data_loader_pack(cfg, *, is_train=True, is_distributed=False):
transform = make_transform(cfg, is_train=is_train)
dataset = make_dataset(cfg, transform, is_train=is_train)
if is_distributed:
sampler = DistributedSampler(dataset, shuffle=is_train)
else:
sampler = None
data_loader = make_data_loader(cfg, dataset, sampler, is_train=is_train)
return data_loader
|
#!/usr/bin/env python
"""
This code holds the solution for part 1 of day 8 of the Advent of Code for 2017.
"""
import sys
def check_register(register):
if register not in registers:
registers[register] = 0
def perform_check(register, test, test_value):
register_value = registers[register]
if test == "<":
return register_value < test_value
if test == ">":
return register_value > test_value
if test == "==":
return register_value == test_value
if test == "!=":
return register_value != test_value
if test == "<=":
return register_value <= test_value
if test == ">=":
return register_value >= test_value
return False
def perform_instruction(register, instruction, value):
if instruction == "inc":
registers[register] += value
if instruction == "dec":
registers[register] -= value
def process_line(line):
parts = line.split(" ")
check_register(parts[0])
instruction = parts[1]
amount = int(parts[2])
check_register(parts[4])
test = parts[5]
test_value = int(parts[6])
if perform_check(parts[4], test, test_value):
perform_instruction(parts[0], instruction, amount)
registers = {}
with open("test_input.txt", "r") as f:
for line in f.readlines():
process_line(line)
max = 0
first = True
for r in registers:
if first:
max = registers[r]
first = False
else:
if registers[r] > max:
max = registers[r]
if max != 1:
print "Incorrect test value seen."
sys.exit(-1)
print "Test passed."
# If we get here then all of our tests passed.
registers = {}
with open("input.txt", "r") as f:
for line in f.readlines():
process_line(line)
max = 0
first = True
for r in registers:
if first:
max = registers[r]
first = False
else:
if registers[r] > max:
max = registers[r]
print "Max value: {0}".format(max)
|
import sys
import typing
def action_sanitize():
''' Make action suitable for use as a Pose Library
'''
pass
def apply_pose(pose_index: int = -1):
''' Apply specified Pose Library pose to the rig
:param pose_index: Pose, Index of the pose to apply (-2 for no change to pose, -1 for poselib active pose)
:type pose_index: int
'''
pass
def browse_interactive(pose_index: int = -1):
''' Interactively browse poses in 3D-View
:param pose_index: Pose, Index of the pose to apply (-2 for no change to pose, -1 for poselib active pose)
:type pose_index: int
'''
pass
def new():
''' Add New Pose Library to active Object
'''
pass
def pose_add(frame: int = 1, name: str = "Pose"):
''' Add the current Pose to the active Pose Library
:param frame: Frame, Frame to store pose on
:type frame: int
:param name: Pose Name, Name of newly added Pose
:type name: str
'''
pass
def pose_move(pose: typing.Union[str, int] = '',
direction: typing.Union[str, int] = 'UP'):
''' Move the pose up or down in the active Pose Library
:param pose: Pose, The pose to move
:type pose: typing.Union[str, int]
:param direction: Direction, Direction to move the chosen pose towards
:type direction: typing.Union[str, int]
'''
pass
def pose_remove(pose: typing.Union[str, int] = ''):
''' Remove nth pose from the active Pose Library
:param pose: Pose, The pose to remove
:type pose: typing.Union[str, int]
'''
pass
def pose_rename(name: str = "RenamedPose", pose: typing.Union[str, int] = ''):
''' Rename specified pose from the active Pose Library
:param name: New Pose Name, New name for pose
:type name: str
:param pose: Pose, The pose to rename
:type pose: typing.Union[str, int]
'''
pass
def unlink():
''' Remove Pose Library from active Object
'''
pass
|
"""
Implements the template loading functionality.
Loads template files and replaces all placeholders.
"""
from pathlib import Path
import logging
import typing
import re
import json
import yaml # type: ignore
import toml # type: ignore
LOGGER = logging.getLogger(__name__)
class BadFormatError(json.JSONDecodeError, yaml.YAMLError, toml.TomlDecodeError):
"""
Common error class for decoding errors of the supported file types.
"""
def load_dict(path: Path, placeholder_marker_left: str = '${', placeholder_marker_right: str = '}',
safe=True, **replacements: typing.Union[str, bool, int, float]) -> dict:
"""
Loads a template file from multiple possible formats while replacing placeholders.
:param path: Path to the template file
:param placeholder_marker_left: Left char/string that indicates a beginning placeholder
:param placeholder_marker_right: Right char/string that indicates an ending placeholder
:param safe: If True, placeholders that have no fitting replacement are ignored,
else an error is raised
:param replacements: Replacement of placeholders specified as keyword-args
:return: Python dict-representation of the loaded template.
"""
data = load(path, placeholder_marker_left, placeholder_marker_right, safe, **replacements)
if isinstance(data, list):
LOGGER.error('Expected to find a dictionary-style formatted file!')
raise TypeError
return data
def load_list(path: Path, placeholder_marker_left: str = '${', placeholder_marker_right: str = '}',
safe=True, **replacements: typing.Union[str, bool, int, float]) -> list:
"""
Loads a template file from multiple possible formats while replacing placeholders.
:param path: Path to the template file
:param placeholder_marker_left: Left char/string that indicates a beginning placeholder
:param placeholder_marker_right: Right char/string that indicates an ending placeholder
:param safe: If True, placeholders that have no fitting replacement are ignored,
else an error is raised
:param replacements: Replacement of placeholders specified as keyword-args
:return: Python list-representation of the loaded template.
"""
if path.suffix == '.toml':
LOGGER.error('Toml files do not support list-only content!')
raise NotImplementedError
data = load(path, placeholder_marker_left, placeholder_marker_right, safe, **replacements)
if isinstance(data, dict):
LOGGER.error('Expected to find a dictionary-style formatted file!')
raise TypeError
return data
def load(path: Path, placeholder_marker_left: str = '${', placeholder_marker_right: str = '}',
safe=True, **replacements: typing.Union[str, bool, int, float]) \
-> typing.Union[list, dict]:
"""
Loads a template file from multiple possible formats while replacing placeholders.
:param path: Path to the template file
:param placeholder_marker_left: Left char/string that indicates a beginning placeholder
:param placeholder_marker_right: Right char/string that indicates an ending placeholder
:param safe: If True, placeholders that have no fitting replacement are ignored,
else an error is raised
:param replacements: Replacement of placeholders specified as keyword-args
:return: Python representation of the loaded template.
"""
try:
with open(path) as template:
text = template.read()
template.close()
except OSError as error:
LOGGER.error('Could not read from file %s', path)
LOGGER.error(
'Make sure the file exists and you have the appropriate rights to read from it')
raise error
if replacements:
# pylint: disable=C0321
def replacer(match): return func_replacer(match, safe, **replacements)
pattern = build_pattern(placeholder_marker_left, placeholder_marker_right)
text = re.sub(pattern, replacer, text)
try:
if path.suffix == '.json':
data = json.loads(text)
elif path.suffix == '.yaml':
data = yaml.safe_load(text)
elif path.suffix == '.toml':
data = toml.loads(text)
else:
raise NotImplementedError('Cannot handle templates of Type %s' % path.suffix)
return data
except (json.JSONDecodeError, yaml.YAMLError, toml.TomlDecodeError) as error:
LOGGER.error('Could not decode file %s', path)
LOGGER.error('Make sure it is formatted accordingly to the %s standard', path.suffix[1:])
raise BadFormatError from error
def build_pattern(placeholder_marker_left: str, placeholder_marker_right: str) -> re.Pattern[str]:
"""
Creates the pattern used to match the placeholders.
:param placeholder_marker_left: Marks the beginning of placeholders - internally
is escaped with re.escape
:param placeholder_marker_right: Marks the ending of placeholders - internally
is escaped with re.escape
:return: Created pattern, that matches the placeholders as specified and optionally
surrounding quotes with the following groups:
# group 'prefix_quote': optional leading quote
# group 'placeholder': everything in between left and right marker (smallest possible
match only)
# group 'prefix_quote': optional leading quote
"""
left_marker = re.escape(placeholder_marker_left)
right_marker = re.escape(placeholder_marker_right)
prefix_quote = r'(?P<prefix_quote>"?)'
suffix_quote = r'(?P<suffix_quote>"?)'
placeholder = r'(?P<placeholder>.+?)'
# regex matches everything between left and right marker, optionally in between quotes
# see https://regex101.com/r/zEwq7N/1
return re.compile(prefix_quote + left_marker + placeholder + right_marker + suffix_quote)
def func_replacer(match: re.Match, safe: bool,
**replacements: typing.Union[str, bool, int, float]) -> str:
"""
Replaces given match with desired replacement. Preserves format of placeholders inside longer
strings while transforming placeholders to replacement format when possible.
:param match: Match in which the placeholder is to be replaced
:param safe: If true, missing replacements for existing placeholders are ignored,
if not raises a KeyError
:param replacements: Keyword-style replacements in the format <placeholder='replacement'>
:return: Text with performed replacement
"""
prefix_quote = match.group('prefix_quote')
placeholder = match.group('placeholder')
suffix_quote = match.group('suffix_quote')
try:
# replace correctly so strings are not broken but non-strings are not converted to strings
if prefix_quote and not suffix_quote:
return prefix_quote + str(replacements[placeholder])
if suffix_quote and not prefix_quote:
return str(replacements[placeholder]) + suffix_quote
if prefix_quote and suffix_quote:
if not isinstance(replacements[placeholder], str):
return str(replacements[placeholder])
return prefix_quote + str(replacements[placeholder]) + suffix_quote
return str(replacements[placeholder])
except KeyError as error:
if safe:
return match.group(0)
raise KeyError(f'Missing replacement for placeholder {placeholder}!') from error
|
# Rotating Cube, by Al Sweigart al@inventwithpython.com
import math, time, random, sys, os
if len(sys.argv) == 3:
# Set size based on command line arguments:
WIDTH = int(sys.argv[1])
HEIGHT = int(sys.argv[2])
else:
WIDTH, HEIGHT = 80, 50
DEFAULT_SCALEX = (WIDTH - 4) // 8
DEFAULT_SCALEY = (HEIGHT - 4) // 4 # Text cells are twice as tall as they are wide, so set scaley accordingly.
DEFAULT_TRANSLATEX = (WIDTH - 4) // 2
DEFAULT_TRANSLATEY = (HEIGHT - 4) // 2
def line(x1, y1, x2, y2):
"""Returns a generator that produces all of the points in a line
between `x1`, `y1` and `x2`, `y2`. Uses the Bresenham line algorithm."""
isSteep = abs(y2-y1) > abs(x2-x1)
if isSteep:
x1, y1 = y1, x1
x2, y2 = y2, x2
isReversed = x1 > x2
if isReversed:
x1, x2 = x2, x1
y1, y2 = y2, y1
deltax = x2 - x1
deltay = abs(y2-y1)
error = int(deltax / 2)
y = y2
ystep = None
if y1 < y2:
ystep = 1
else:
ystep = -1
for x in range(x2, x1 - 1, -1):
if isSteep:
yield (y, x)
else:
yield (x, y)
error -= deltay
if error <= 0:
y -= ystep
error += deltax
else:
deltax = x2 - x1
deltay = abs(y2-y1)
error = int(deltax / 2)
y = y1
ystep = None
if y1 < y2:
ystep = 1
else:
ystep = -1
for x in range(x1, x2 + 1):
if isSteep:
yield (y, x)
else:
yield (x, y)
error -= deltay
if error < 0:
y += ystep
error += deltax
def rotateXYZ(x, y, z, ax, ay, az):
# NOTE: Rotates around the origin (0, 0, 0)
# Rotate along x axis:
rotatedX = x
rotatedY = (y * math.cos(ax)) - (z * math.sin(ax))
rotatedZ = (y * math.sin(ax)) + (z * math.cos(ax))
x, y, z = rotatedX, rotatedY, rotatedZ
# Rotate along y axis:
rotatedX = (z * math.sin(ay)) + (x * math.cos(ay))
rotatedY = y
rotatedZ = (z * math.cos(ay)) - (x * math.sin(ay))
x, y, z = rotatedX, rotatedY, rotatedZ
# Rotate along z axis:
rotatedX = (x * math.cos(az)) - (y * math.sin(az))
rotatedY = (x * math.sin(az)) + (y * math.cos(az))
rotatedZ = z
return (rotatedX, rotatedY, rotatedZ)
def transformPoint(point1, point2, scalex=None, scaley=None, translatex=None, translatey=None):
if scalex is None:
scalex = DEFAULT_SCALEX
if scaley is None:
scaley = DEFAULT_SCALEY
if translatex is None:
translatex = DEFAULT_TRANSLATEX
if translatey is None:
translatey = DEFAULT_TRANSLATEY
return (int(point1[0] * scalex + translatex),
int(point1[1] * scaley + translatey),
int(point2[0] * scalex + translatex),
int(point2[1] * scaley + translatey))
# Set up the points of the cube:
'''
Cube points:
0+-----+1
/ /|
/ / | -y
2+-----+3 | |
| 4+ | +5 +-- +x
| | / /
| |/ +z
6+-----+7
'''
points = [[-1, -1, -1],
[ 1, -1, -1],
[-1, -1, 1],
[ 1, -1, 1],
[-1, 1, -1],
[ 1, 1, -1],
[-1, 1, 1],
[ 1, 1, 1]]
rotatedPoints = [None] * len(points)
rx = ry = rz = 0 # Rotation amounts for each axis.
try:
while True:
# Rotate the cube:
rx += 0.03 + random.randint(1, 20) / 100
ry += 0.08 + random.randint(1, 20) / 100
rz += 0.13 + random.randint(1, 20) / 100
for i in range(len(points)):
rotatedPoints[i] = rotateXYZ(*points[i], rx, ry, rz)
# Get the points of the cube lines:
cubePoints = []
cubePoints.extend(line(*transformPoint(rotatedPoints[0], rotatedPoints[1])))
cubePoints.extend(line(*transformPoint(rotatedPoints[1], rotatedPoints[3])))
cubePoints.extend(line(*transformPoint(rotatedPoints[3], rotatedPoints[2])))
cubePoints.extend(line(*transformPoint(rotatedPoints[2], rotatedPoints[0])))
cubePoints.extend(line(*transformPoint(rotatedPoints[0], rotatedPoints[4])))
cubePoints.extend(line(*transformPoint(rotatedPoints[1], rotatedPoints[5])))
cubePoints.extend(line(*transformPoint(rotatedPoints[2], rotatedPoints[6])))
cubePoints.extend(line(*transformPoint(rotatedPoints[3], rotatedPoints[7])))
cubePoints.extend(line(*transformPoint(rotatedPoints[4], rotatedPoints[5])))
cubePoints.extend(line(*transformPoint(rotatedPoints[5], rotatedPoints[7])))
cubePoints.extend(line(*transformPoint(rotatedPoints[7], rotatedPoints[6])))
cubePoints.extend(line(*transformPoint(rotatedPoints[6], rotatedPoints[4])))
cubePoints = tuple(frozenset(cubePoints)) # Get rid of duplicate points.
# Draw the cube:
for y in range(0, HEIGHT, 2):
for x in range(WIDTH):
if (x, y) in cubePoints and (x, y + 1) in cubePoints:
print(chr(9608), end='', flush=False) # Draw full block.
elif (x, y) in cubePoints and (x, y + 1) not in cubePoints:
print(chr(9600), end='', flush=False) # Draw top half of block.
elif not (x, y) in cubePoints and (x, y + 1) in cubePoints:
print(chr(9604), end='', flush=False) # Draw bottom half of block.
else:
print(' ', end='', flush=False) # Draw empty space.
print(flush=False)
print('Press Ctrl-C or Ctrl-D to quit.', end='', flush=True)
time.sleep(0.1) # Pause for a bit.
# Erase the screen:
if sys.platform == 'win32':
os.system('cls')
else:
os.system('clear')
except KeyboardInterrupt:
pass # When Ctrl-C is pressed, stop looping.
|
from pyvod.channel_collections import MovieTrailers, FeatureFilmsPicfixer, \
FeatureFilms
query = "grindhouse"
print("Searching Feature Films Picfixer for: ", query, "\n")
for movie, score in FeatureFilmsPicfixer.search(query):
print(movie, score)
print("\nSearching Feature Films for: ", query, "\n")
for movie, score in FeatureFilms.search(query):
print(movie, score)
print(movie.data["tags"])
query = "Frankenstein"
print("\nSearching Media Trailers for: ", query, "\n")
for movie, score in MovieTrailers.search(query):
print(movie, score)
|
"""Test the Basic ICN Layer implementation"""
import multiprocessing
import time
import unittest
from PiCN.Layers.ICNLayer import BasicICNLayer
from PiCN.Layers.ICNLayer.ContentStore import ContentStoreMemoryExact
from PiCN.Layers.ICNLayer.ForwardingInformationBase import ForwardingInformationBaseMemoryPrefix
from PiCN.Layers.ICNLayer.PendingInterestTable import PendingInterstTableMemoryExact
from PiCN.Packets import Name, Interest, Content, Nack, NackReason
from PiCN.Processes import PiCNSyncDataStructFactory
class test_BasicICNLayer(unittest.TestCase):
"""Test the Basic ICN Layer implementation"""
def setUp(self):
#setup icn_layer
self.icn_layer = BasicICNLayer(log_level=255)
synced_data_struct_factory = PiCNSyncDataStructFactory()
synced_data_struct_factory.register("cs", ContentStoreMemoryExact)
synced_data_struct_factory.register("fib", ForwardingInformationBaseMemoryPrefix)
synced_data_struct_factory.register("pit", PendingInterstTableMemoryExact)
synced_data_struct_factory.create_manager()
cs = synced_data_struct_factory.manager.cs()
fib = synced_data_struct_factory.manager.fib()
pit = synced_data_struct_factory.manager.pit()
cs.set_cs_timeout(2)
pit.set_pit_timeout(2)
pit.set_pit_retransmits(2)
self.icn_layer.cs = cs
self.icn_layer.fib = fib
self.icn_layer.pit = pit
#setup queues icn_routing layer
self.queue1_icn_routing_up = multiprocessing.Queue()
self.queue1_icn_routing_down = multiprocessing.Queue()
#add queues to ICN layer
self.icn_layer.queue_from_lower = self.queue1_icn_routing_up
self.icn_layer.queue_to_lower = self.queue1_icn_routing_down
def tearDown(self):
self.icn_layer.stop_process()
def test_ICNLayer_interest_forward_basic(self):
"""Test ICN layer with no CS and PIT entry"""
self.icn_layer.start_process()
to_faceid = 1
from_faceid = 2
#Add entry to the fib
name = Name("/test/data")
interest = Interest("/test/data")
self.icn_layer.fib.add_fib_entry(name, [to_faceid], static=True)
#forward entry
self.queue1_icn_routing_up.put([from_faceid, interest])
try:
faceid, data = self.queue1_icn_routing_down.get(timeout=2.0)
except:
self.fail()
#check output
self.assertEqual(faceid, to_faceid)
self.assertEqual(data, interest)
#check data structures
self.assertEqual(self.icn_layer.cs.get_container_size(), 0)
self.assertEqual(self.icn_layer.fib.get_container_size(), 1)
self.assertEqual(self.icn_layer.pit.get_container_size(), 1)
self.assertEqual(self.icn_layer.fib.find_fib_entry(name).faceid, [to_faceid])
self.assertEqual(self.icn_layer.fib.find_fib_entry(name).name, name)
self.assertEqual(self.icn_layer.pit.find_pit_entry(name).faceids[0], from_faceid)
self.assertEqual(self.icn_layer.pit.find_pit_entry(name).name, name)
def test_ICNLayer_interest_forward_longest_match(self):
"""Test ICN layer with no CS and no PIT entry and longest match"""
self.icn_layer.start_process()
to_face_id = 1
from_face_id = 2
#Add entry to the fib
name = Name("/test")
interest = Interest("/test/data")
self.icn_layer.fib.add_fib_entry(name, [to_face_id], static=True)
#forward entry
self.queue1_icn_routing_up.put([from_face_id, interest])
try:
face_id, data = self.queue1_icn_routing_down.get(timeout=2.0)
except:
self.fail()
#check output
self.assertEqual(face_id, to_face_id)
self.assertEqual(data, interest)
#check data structures
self.assertEqual(self.icn_layer.cs.get_container_size(), 0)
self.assertEqual(self.icn_layer.fib.get_container_size(), 1)
self.assertEqual(self.icn_layer.pit.get_container_size(), 1)
self.assertEqual(self.icn_layer.fib.find_fib_entry(name).faceid, [to_face_id])
self.assertEqual(self.icn_layer.fib.find_fib_entry(name).name, name)
self.assertEqual(self.icn_layer.pit.find_pit_entry(interest.name).faceids[0], from_face_id)
self.assertEqual(self.icn_layer.pit.find_pit_entry(interest.name).name, interest.name)
def test_ICNLayer_interest_forward_deduplication(self):
"""Test ICN layer with no CS and no PIT entry and deduplication"""
self.icn_layer.start_process()
to_face_id = 1
from_face_id_1 = 2
from_face_id_2 = 3
# Add entry to the fib
name = Name("/test")
interest1 = Interest("/test/data")
interest2 = Interest("/test/data")
self.icn_layer.fib.add_fib_entry(name, [to_face_id])
# forward entry
self.queue1_icn_routing_up.put([from_face_id_1, interest1])
try:
face_id, data = self.queue1_icn_routing_down.get(timeout=2.0)
except:
self.fail()
self.queue1_icn_routing_up.put([from_face_id_2, interest2], block=True)
self.assertTrue(self.queue1_icn_routing_down.empty())
time.sleep(3)
# check output
self.assertEqual(face_id, to_face_id)
self.assertEqual(data, interest1)
time.sleep(0.3) # sleep required, since there is no blocking get before the checks
# check data structures
self.assertEqual(self.icn_layer.cs.get_container_size(), 0)
self.assertEqual(self.icn_layer.fib.get_container_size(), 1)
self.assertEqual(self.icn_layer.pit.get_container_size(), 1)
self.assertEqual(self.icn_layer.fib.find_fib_entry(name).faceid, [to_face_id])
self.assertEqual(self.icn_layer.fib.find_fib_entry(name).name, name)
self.assertEqual(len(self.icn_layer.pit.find_pit_entry(interest1.name).faceids), 2)
self.assertEqual(self.icn_layer.pit.find_pit_entry(interest1.name).faceids, [from_face_id_1, from_face_id_2])
self.assertEqual(self.icn_layer.pit.find_pit_entry(interest1.name).name, interest1.name)
def test_ICNLayer_interest_forward_content_match(self):
"""Test ICN layer with CS entry matching"""
self.icn_layer.start_process()
from_face_id = 2
interest = Interest("/test/data")
#add content
content = Content("/test/data")
self.icn_layer.cs.add_content_object(content)
#request content
self.queue1_icn_routing_up.put([from_face_id, interest])
#get content
try:
face_id, data = self.queue1_icn_routing_down.get(timeout=2.0)
except:
self.fail()
self.assertEqual(data, content)
self.assertEqual(face_id, from_face_id)
def test_ICNLayer_interest_forward_content_no_match(self):
"""Test ICN layer with CS entry no match"""
self.icn_layer.start_process()
to_face_id = 1
from_face_id = 2
interest = Interest("/test/data/bla")
name = Name("/test/data")
self.icn_layer.fib.add_fib_entry(name, [to_face_id], static=True)
#add content
content = Content("/test/data")
self.icn_layer.cs.add_content_object(content)
#request content
self.queue1_icn_routing_up.put([from_face_id, interest])
#get data from fib
try:
face_id, data = self.queue1_icn_routing_down.get(timeout=2.0)
except:
self.fail()
self.assertTrue(data, interest)
self.assertTrue(face_id, to_face_id)
self.assertTrue(self.queue1_icn_routing_up.empty())
self.assertEqual(self.icn_layer.pit.get_container_size(), 1)
self.assertEqual(self.icn_layer.pit.find_pit_entry(interest.name).name, interest.name)
def test_ICNLayer_content_no_pit(self):
"""Test receiving a content object with no PIT entry"""
self.icn_layer.start_process()
from_face_id = 2
content = Content("/test/data")
self.queue1_icn_routing_up.put([from_face_id, content])
self.assertTrue(self.queue1_icn_routing_down.empty())
def test_ICNLayer_content_pit(self):
"""Test receiving a content object with PIT entry"""
self.icn_layer.start_process()
content_in_face_id = 1
from_face_id = 2
name = Name("/test/data")
content = Content("/test/data")
self.icn_layer.pit.add_pit_entry(name, from_face_id, content_in_face_id, None, None)
self.queue1_icn_routing_up.put([content_in_face_id, content])
try:
face_id, data = self.queue1_icn_routing_down.get(timeout=2.0)
except:
self.fail()
self.assertEqual(face_id, from_face_id)
self.assertEqual(data, content)
def test_ICNLayer_content_two_pit_entries(self):
"""Test receiving a content object with two PIT entries"""
self.icn_layer.start_process()
content_in_face_id = 1
from_face_id_1 = 2
from_face_id_2 = 3
name = Name("/test/data")
content = Content("/test/data")
self.icn_layer.pit.add_pit_entry(name, from_face_id_1, content_in_face_id, None, False)
self.icn_layer.pit.add_pit_entry(name, from_face_id_2, content_in_face_id, None, False)
self.queue1_icn_routing_up.put([content_in_face_id, content])
try:
face_id_1, data1 = self.queue1_icn_routing_down.get(timeout=2.0)
except:
self.fail()
self.assertEqual(face_id_1, from_face_id_1)
self.assertEqual(data1, content)
face_id_2, data2 = self.queue1_icn_routing_down.get()
self.assertEqual(face_id_2, from_face_id_2)
self.assertEqual(data2, content)
def test_ICNLayer_ageing_pit(self):
"""Test PIT ageing"""
self.icn_layer.start_process()
from_face_id_1 = 1
to_face_id = 2
name = Name("/test/data")
interest = Interest(name)
self.icn_layer.fib.add_fib_entry(name, [to_face_id])
self.icn_layer.pit.add_pit_entry(name, from_face_id_1, to_face_id, interest, False)
self.assertEqual(self.icn_layer.pit.get_container_size(), 1)
self.assertEqual(self.icn_layer.pit.find_pit_entry(name).name, name)
# test retransmit 1
self.icn_layer.ageing()
time.sleep(0.1)
self.assertFalse(self.icn_layer.queue_to_lower.empty())
try:
rface_id, rinterest = self.icn_layer.queue_to_lower.get(timeout=2.0)
except:
self.fail()
self.assertEqual(self.icn_layer.pit.get_container_size(), 1)
self.assertEqual(rface_id, to_face_id)
self.assertEqual(rinterest, interest)
# test retransmit 2
self.icn_layer.ageing()
time.sleep(0.1)
self.assertFalse(self.icn_layer.queue_to_lower.empty())
try:
rface_id, rinterest = self.icn_layer.queue_to_lower.get(timeout=2.0)
except:
self.fail()
self.assertEqual(self.icn_layer.pit.get_container_size(), 1)
self.assertEqual(rface_id, to_face_id)
self.assertEqual(rinterest, interest)
# Wait for timeout
time.sleep(2)
# test retransmit 3 to get number of retransmit
self.icn_layer.ageing()
time.sleep(0.1)
self.assertFalse(self.icn_layer.queue_to_lower.empty())
try:
rface_id, rinterest = self.icn_layer.queue_to_lower.get(timeout=2.0)
except:
self.fail()
self.assertEqual(self.icn_layer.pit.get_container_size(), 1)
self.assertEqual(rface_id, to_face_id)
self.assertEqual(rinterest, interest)
# test remove pit entry
self.icn_layer.ageing()
# nack = self.icn_layer.queue_to_lower.get(timeout=8.0) # invalid, no PIT Timeout Nack anymore
# self.assertEqual(nack, [1, Nack(rinterest.name, NackReason.PIT_TIMEOUT, rinterest)])
self.assertTrue(self.icn_layer.queue_to_lower.empty())
self.assertEqual(self.icn_layer.pit.get_container_size(), 0)
def test_ICNLayer_ageing_cs(self):
"""Test CS ageing and static entries"""
self.icn_layer.start_process()
name1 = Name("/test/data")
content1 = Content(name1, "HelloWorld")
name2 = Name("/data/test")
content2 = Content(name2, "Goodbye")
self.icn_layer.cs.add_content_object(content1)
self.icn_layer.cs.add_content_object(content2, static=True)
self.assertEqual(self.icn_layer.cs.get_container_size(), 2)
self.assertEqual(self.icn_layer.cs.find_content_object(name1).content, content1)
self.assertEqual(self.icn_layer.cs.find_content_object(name2).content, content2)
#Test aging 1
self.icn_layer.ageing()
self.assertEqual(self.icn_layer.cs.get_container_size(), 2)
self.assertEqual(self.icn_layer.cs.find_content_object(name1).content, content1)
self.assertEqual(self.icn_layer.cs.find_content_object(name2).content, content2)
time.sleep(2)
# Test aging 2
self.icn_layer.ageing()
self.assertEqual(self.icn_layer.cs.get_container_size(), 1)
self.assertEqual(self.icn_layer.cs.find_content_object(name2).content, content2)
def test_ICNLayer_content_from_app_layer_no_pit(self):
"""get content from app layer when there is no pit entry available"""
queue_to_higher = multiprocessing.Queue()
queue_from_higher = multiprocessing.Queue()
self.icn_layer.queue_to_higher = queue_to_higher
self.icn_layer.queue_from_higher = queue_from_higher
self.icn_layer.start_process()
n = Name("/test/data")
c = Content(n, "HelloWorld")
self.icn_layer.queue_from_higher.put([0, c])
time.sleep(1)
self.assertTrue(self.queue1_icn_routing_down.empty())
def test_ICNLayer_content_from_app_layer(self):
"""get content from app layer when there is a pit entry available"""
queue_to_higher = multiprocessing.Queue()
queue_from_higher = multiprocessing.Queue()
self.icn_layer.queue_to_higher = queue_to_higher
self.icn_layer.queue_from_higher = queue_from_higher
self.icn_layer.start_process()
face_id = 1
n = Name("/test/data")
self.icn_layer.pit.add_pit_entry(n, face_id, -1)
self.assertEqual(self.icn_layer.pit.get_container_size(), 1)
c = Content(n, "HelloWorld")
self.icn_layer.queue_from_higher.put([0, c])
try:
data = self.icn_layer.queue_to_lower.get(timeout=2.0)
except:
self.fail()
self.assertEqual(data, [face_id, c])
def test_ICNLayer_content_to_app_layer_no_pit(self):
"""get content to app layer no pit"""
queue_to_higher = multiprocessing.Queue()
queue_from_higher = multiprocessing.Queue()
self.icn_layer.queue_to_higher = queue_to_higher
self.icn_layer.queue_from_higher = queue_from_higher
self.icn_layer.start_process()
from_face_id = 1
n = Name("/test/data")
c = Content(n, "HelloWorld")
self.icn_layer.queue_from_lower.put([from_face_id, c])
time.sleep(1)
self.assertTrue(self.icn_layer.queue_to_higher.empty())
def test_ICNLayer_content_to_app_layer(self):
"""get content to app layer"""
queue_to_higher = multiprocessing.Queue()
queue_from_higher = multiprocessing.Queue()
self.icn_layer.queue_to_higher = queue_to_higher
self.icn_layer.queue_from_higher = queue_from_higher
self.icn_layer.start_process()
face_id = -1
from_face_id = 1
n = Name("/test/data")
self.icn_layer.pit.add_pit_entry(n, face_id, -1, interest=None, local_app=True)
self.assertEqual(self.icn_layer.pit.get_container_size(), 1)
c = Content(n, "HelloWorld")
self.icn_layer.queue_from_lower.put([from_face_id, c])
try:
data = self.icn_layer.queue_to_higher.get(timeout=2.0)
except:
self.fail()
self.assertEqual(data, [1, c])
def test_ICNLayer_interest_from_app_layer_no_pit(self):
"""Test sending and interest message from APP with no PIT entry"""
queue_to_higher = multiprocessing.Queue()
queue_from_higher = multiprocessing.Queue()
self.icn_layer.queue_to_higher = queue_to_higher
self.icn_layer.queue_from_higher = queue_from_higher
self.icn_layer._interest_to_app=True
self.icn_layer.start_process()
face_id = 1
n = Name("/test/data")
i = Interest(n)
self.icn_layer.fib.add_fib_entry(n, [face_id], True)
self.icn_layer.queue_from_higher.put([0, i])
try:
to_faceid, data = self.icn_layer.queue_to_lower.get(timeout=2.0)
except:
self.fail()
self.assertEqual(to_faceid, face_id)
self.assertEqual(i, data)
self.assertEqual(self.icn_layer.pit.find_pit_entry(n).interest, i)
self.assertTrue(self.icn_layer.pit.find_pit_entry(n).local_app[0])
def test_ICNLayer_interest_from_app_layer_pit(self):
"""Test sending and interest message from APP with a PIT entry --> interest not for higher layer"""
queue_to_higher = multiprocessing.Queue()
queue_from_higher = multiprocessing.Queue()
self.icn_layer.queue_to_higher = queue_to_higher
self.icn_layer.queue_from_higher = queue_from_higher
self.icn_layer._interest_to_app=True
self.icn_layer.start_process()
face_id = 1
from_face_id = 2
n = Name("/test/data")
i = Interest(n)
self.icn_layer.fib.add_fib_entry(n, [face_id], True)
self.icn_layer.pit.add_pit_entry(n, from_face_id, face_id, i, local_app=False)
self.assertFalse(self.icn_layer.pit.find_pit_entry(n).local_app[0])
self.icn_layer.queue_from_higher.put([0, i])
try:
to_face_id, data = self.icn_layer.queue_to_lower.get(timeout=2.0)
except:
self.fail()
self.assertEqual(to_face_id, face_id)
self.assertEqual(i, data)
self.assertEqual(self.icn_layer.pit.find_pit_entry(n).interest, i)
self.assertFalse(self.icn_layer.pit.find_pit_entry(n).local_app[0]) #Just forward, not from local app
def test_ICNLayer_interest_to_app_layer_no_pit(self):
"""Test sending and interest message from APP with no PIT entry"""
queue_to_higher = multiprocessing.Queue()
queue_from_higher = multiprocessing.Queue()
self.icn_layer.queue_to_higher = queue_to_higher
self.icn_layer.queue_from_higher = queue_from_higher
self.icn_layer._interest_to_app = True
self.icn_layer.start_process()
face_id = 1
from_face_id = 2
n = Name("/test/data")
i = Interest(n)
self.icn_layer.fib.add_fib_entry(n, [face_id], True)
self.icn_layer.queue_from_lower.put([from_face_id, i])
try:
data = self.icn_layer.queue_to_higher.get(timeout=2.0)
except:
self.fail()
self.assertEqual(data[1], i)
self.assertEqual(self.icn_layer.pit.find_pit_entry(n).interest, i)
def test_ICNLayer_interest_to_app_layer_pit(self):
"""Test sending and interest message from APP with a PIT entry"""
queue_to_higher = multiprocessing.Queue()
queue_from_higher = multiprocessing.Queue()
self.icn_layer.queue_to_higher = queue_to_higher
self.icn_layer.queue_from_higher = queue_from_higher
self.icn_layer._interest_to_app = True
self.icn_layer.start_process()
face_id = [1]
from_face_id = 2
n = Name("/test/data")
i = Interest(n)
self.icn_layer.fib.add_fib_entry(n, face_id, True)
self.icn_layer.pit.add_pit_entry(n, from_face_id, face_id[0], i, local_app=False)
self.icn_layer.queue_from_lower.put([from_face_id, i])
time.sleep(1)
self.assertTrue(self.icn_layer.queue_to_higher.empty()) #--> deduplication by pit entry
def test_ICNLayer_interest_to_app_layer_cs(self):
"""Test sending and interest message from APP with a CS entry"""
queue_to_higher = multiprocessing.Queue()
queue_from_higher = multiprocessing.Queue()
self.icn_layer.queue_to_higher = queue_to_higher
self.icn_layer.queue_from_higher = queue_from_higher
self.icn_layer._interest_to_app = True
self.icn_layer.start_process()
face_id = 1
from_face_id = 2
n = Name("/test/data")
i = Interest(n)
c = Content(n, "Hello World")
self.icn_layer.fib.add_fib_entry(n, [face_id], True)
self.icn_layer.cs.add_content_object(c)
self.icn_layer.queue_from_lower.put([from_face_id, i])
try:
to_face_id, data = self.icn_layer.queue_to_lower.get(timeout=2.0)
except:
self.fail()
self.assertEqual(to_face_id, from_face_id)
self.assertEqual(data, c)
self.assertTrue(self.icn_layer.queue_to_higher.empty()) # --> was answered by using Content from cache
def test_ICNLayer_issue_nack_no_content_no_fib_from_lower(self):
"""Test if ICN Layer issues Nack if no content and no fib entry is available from lower"""
self.icn_layer.start_process()
interest = Interest("/test/data")
nack = Nack(interest.name, NackReason.NO_ROUTE, interest=interest)
self.icn_layer.queue_from_lower.put([1, interest])
try:
data = self.icn_layer.queue_to_lower.get(timeout=2.0)
except:
self.fail()
fid = data[0]
packet = data[1]
self.assertEqual(fid, 1)
self.assertEqual(packet, nack)
def test_ICNLayer_issue_nack_no_content_no_fib_from_higher(self):
"""Test if ICN Layer issues Nack if no content and no fib entry is available from higher"""
queue_to_higher = multiprocessing.Queue()
queue_from_higher = multiprocessing.Queue()
self.icn_layer.queue_to_higher = queue_to_higher
self.icn_layer.queue_from_higher = queue_from_higher
self.icn_layer.start_process()
interest = Interest("/test/data")
nack = Nack(interest.name, NackReason.NO_ROUTE, interest=interest)
self.icn_layer.queue_from_higher.put([1, interest])
try:
data = self.icn_layer.queue_to_higher.get(timeout=2.0)
except:
self.fail()
fid = data[0]
packet = data[1]
self.assertEqual(fid, 1)
self.assertEqual(packet, nack)
#TODO CHECK
def test_ICNLayer_handling_nack_no_fib(self):
"""Test if ICN Layer handles a Nack correctly if no fib entry is available"""
self.icn_layer.start_process()
n1 = Name("/test/data")
i1 = Interest(n1)
fid_1 = 1
nack_1 = Nack(n1, NackReason.NO_ROUTE, interest=i1)
self.icn_layer.pit.add_pit_entry(n1, fid_1, -1, i1, False)
self.icn_layer.queue_from_lower.put([2, nack_1])
try:
data = self.icn_layer.queue_to_lower.get(timeout=2.0)
print(data)
except:
self.fail()
self.assertEqual(data[0], fid_1)
self.assertEqual(data[1], nack_1)
#TODO Fix the error
def test_ICNLayer_handling_nack_next_fib(self):
"""Test if ICN Layer handles a Nack correctly if further fib entry is available"""
self.icn_layer.start_process()
n1 = Name("/test/data/d1")
i1 = Interest(n1)
from_fid = 1
to_fib1 = 2
to_fib2 = 3
to_fib3 = 4
nack_1 = Nack(n1, NackReason.NO_ROUTE, interest=i1)
self.icn_layer.pit.add_pit_entry(n1, from_fid, to_fib3, i1, None)
self.icn_layer.pit.add_used_fib_face(n1, [to_fib3])
self.icn_layer.fib.add_fib_entry(Name("/test"), [to_fib1])
self.icn_layer.fib.add_fib_entry(Name("/test/data"), [to_fib2])
self.icn_layer.fib.add_fib_entry(Name("/test/data/d1"), [to_fib3]) #assuming this entry was used first and is active when nack arrives
self.icn_layer.queue_from_lower.put([to_fib3, nack_1])
try:
data = self.icn_layer.queue_to_lower.get(timeout=2.0)
except:
self.fail()
self.assertEqual(data[0], to_fib2)
self.assertEqual(data[1], i1)
#check testing second path
self.icn_layer.queue_from_lower.put([to_fib2, nack_1])
try:
data = self.icn_layer.queue_to_lower.get(timeout=2.0)
except:
self.fail()
self.assertEqual(data[0], to_fib1)
self.assertEqual(data[1], i1)
#check no path left
self.icn_layer.queue_from_lower.put([to_fib1, nack_1])
try:
data = self.icn_layer.queue_to_lower.get(timeout=2.0)
except:
self.fail()
self.assertEqual(data[0], from_fid)
self.assertEqual(data[1], nack_1)
def test_multicast_and_nack_handling(self):
"""Test if a multicast works, and if the nack counter for the multicast works"""
i1 = Interest("/test/data")
n1 = Nack(i1.name, NackReason.NO_CONTENT, i1)
self.icn_layer.start_process()
self.icn_layer.fib.add_fib_entry(i1.name, [2,3,4])
self.icn_layer.queue_from_lower.put([1, i1])
d1 = self.icn_layer.queue_to_lower.get(timeout=2.0)
self.assertEqual([2, i1], d1)
self.assertTrue(self.icn_layer.queue_to_lower.empty())
self.icn_layer.queue_from_lower.put([2, n1])
d2 = self.icn_layer.queue_to_lower.get(timeout=4.0)
print(d2)
self.assertEqual([3, i1], d2)
self.assertTrue(self.icn_layer.queue_to_lower.empty())
self.icn_layer.queue_from_lower.put([3, n1])
try:
d3 = self.icn_layer.queue_to_lower.get(timeout=4.0)
except:
self.fail()
print(d3)
self.assertEqual([4, i1], d3)
##
#
#
# self.assertTrue(self.icn_layer.queue_to_lower.empty())
#
# self.icn_layer.queue_from_lower.put([2, n1])
# d3 = self.icn_layer.queue_to_lower.get(timeout=2.0)
# self.assertEqual([1, n1], d3)
def test_communicating_vessels_forwarding_strategy(self):
"""This function test the whole idea of forwarding strategy with multiple PIT entries
and multiple FIB entries with multiple matches in PIT and FIB and also it checks the NACK handler in case of one face was nacked in a fib entry or all faces
of a fib entry was nacked"""
ab_name = Name("/a/b")
i1 = Interest("/a/b/x")
i2 = Interest("/a/b/y")
i3 = Interest("/a/b/z")
i4 = Interest("/a/b/w")
i5 = Interest("/a/b/k")
i6 = Interest("/x/y")
i7 = Interest("/x")
i8 = Interest("/m/n")
i9 = Interest("/o/p")
n1 = Nack(i1.name, NackReason.NO_CONTENT, i1)
self.icn_layer.start_process()
self.icn_layer.fib.add_fib_entry(ab_name, [1, 2, 3])
# here the fib table has many entries which contains the prefix of ab_name
self.icn_layer.fib.add_fib_entry(i6.name, [1, 4])
self.icn_layer.fib.add_fib_entry(i7.name, [2, 3])
self.icn_layer.fib.add_fib_entry(i8.name, [1, 5])
self.icn_layer.fib.add_fib_entry(i9.name, [5, 3])
#send the first interest ("/a/b/x") with no matching PIT entry. The outgoing face should be "1"
#as it is the first Interest and there is no pending Interests to affect the decision
self.icn_layer.queue_from_lower.put([10, i1])
d1 = self.icn_layer.queue_to_lower.get(timeout=2.0)
pit_entry = self.icn_layer.pit.find_pit_entry(i1.name)
self.assertEqual([1], pit_entry.outgoing_faces)
#send the second Interest ("/a/b/y") while the PIT entry of ("a/b/x") was not removed yet from the PIT
#The interest should be sent to face "2" as the occupancy of face "1" is higher than the occupancy of "2" and "3"
self.icn_layer.queue_from_lower.put([10, i2])
d1 = self.icn_layer.queue_to_lower.get(timeout=2.0)
pit_entry = self.icn_layer.pit.find_pit_entry(i2.name)
self.assertEqual([2], pit_entry.outgoing_faces)
#send bunch of Interests which are ("/x/y"), ("/x"), ("/m/n"),("/o/p"). In this case and because there is no matching entry in PIT for any of them.
# each one will be sent to the first avaialble face in FIB. So i6 will be sent to 1. i7 will be sent to 2...
self.icn_layer.queue_from_lower.put([10, i6])
d1 = self.icn_layer.queue_to_lower.get(timeout=2.0)
self.icn_layer.queue_from_lower.put([10, i7])
d1 = self.icn_layer.queue_to_lower.get(timeout=2.0)
self.icn_layer.queue_from_lower.put([10, i8])
d1 = self.icn_layer.queue_to_lower.get(timeout=2.0)
self.icn_layer.queue_from_lower.put([10, i9])
d1 = self.icn_layer.queue_to_lower.get(timeout=2.0)
# send the second Interest i3 ("/a/b/z") while the PIT entry of ("a/b/x") and ("a/b/y") was not removed yet from the PIT
# The interest should be sent to face "3" as the occupancy of face "1" and face "2" is higher than the occupancy of "3"
self.icn_layer.queue_from_lower.put([10, i3])
d1 = self.icn_layer.queue_to_lower.get(timeout=2.0)
pit_entry = self.icn_layer.pit.find_pit_entry(i3.name)
self.assertEqual([3], pit_entry.outgoing_faces)
# now send an interest i4 (("/a/b/w") while the previous three matching interests still exist in PIT. All faces have equal capacity so it will be sent to the first one.
self.icn_layer.queue_from_lower.put([10, i4])
d1 = self.icn_layer.queue_to_lower.get(timeout=2.0)
pit_entry = self.icn_layer.pit.find_pit_entry(i4.name)
self.assertEqual([1], pit_entry.outgoing_faces)
# we are removing i4 and i1 from the PIT table
self.icn_layer.pit.remove_pit_entry(i4.name)
self.icn_layer.pit.remove_pit_entry(i1.name)
# senf=d i5 while i2 and i3 still exist in PIT and they match i5. So the interest will be sent to face 1 because i2 is sent to 2 and i3 is sent to 3 so 2,3 have higher occupancy than 1
self.icn_layer.queue_from_lower.put([10, i5])
d1 = self.icn_layer.queue_to_lower.get(timeout=2.0)
pit_entry = self.icn_layer.pit.find_pit_entry(i5.name)
self.assertEqual([1], pit_entry.outgoing_faces)
|
from datetime import datetime
import hashlib
class ServerHelper(object):
def __init__(self):
pass
@staticmethod
def get_secret(args: list):
m = hashlib.md5()
message = "".join(args)
m.update(message.encode())
return m.hexdigest()
def check_secret(self, secret, args):
hash = self.get_secret(args)
return True if hash == secret else False
|
class AuthenticationFailed(Exception):
"""Custom exception for authentication errors on web API level (i.e. when
hitting a Flask server endpoint).
"""
def __init__(self, error, status_code):
self.error = error
self.status_code = status_code
# Custom exceptions to be raised in GraphQL resolver functions
# cf. https://github.com/mirumee/ariadne/issues/339#issuecomment-604380881
# Re-use error codes proposed by Apollo-Server
# cf. https://www.apollographql.com/docs/apollo-server/data/errors/#error-codes
class UnknownResource(Exception):
extensions = {
"code": "INTERNAL_SERVER_ERROR",
"description": "This resource is not known",
}
class Forbidden(Exception):
def __init__(self, resource, value, user, *args, **kwargs):
self.extensions = {
"code": "FORBIDDEN",
"description": "You don't have access to the resource "
f"{resource}={value}",
"user": user,
}
super().__init__(*args, **kwargs)
class RequestedResourceNotFound(Exception):
extensions = {
"code": "BAD_USER_INPUT",
"description": "The requested resource does not exist in the database.",
}
|
import matplotlib.pyplot as plt
def plot_test_result(test):
sorted_x = sorted(test, key=lambda kv: kv[0])
x = list(range(50))
y = list(k[1] for k in sorted_x)
print(sorted_x)
sorted_k = sorted(test, key=lambda kv: kv[1])
print(sorted_k)
plt.plot(x, y)
plt.xlabel('epoch num')
plt.ylabel('domain mAP')
plt.title('test_result')
plt.show()
test = [(0, 0.49189779300424646), (1, 0.5138904387583882), (2, 0.536334691286777), (4, 0.5645421289016331), (3, 0.5658359516156337), (5, 0.5707727279143228), (6, 0.5729809018775144), (8, 0.5871151999965678), (7, 0.5907337688146128), (9, 0.5968492444634648), (10, 0.6080814978271548), (11, 0.6241844130311393), (12, 0.6284854817365658), (13, 0.6302649758341037), (16, 0.6397864472691053), (15, 0.6399975535300045), (18, 0.6405742179835137), (17, 0.6486698072446467), (14, 0.6534752561987729), (22, 0.6661780391057547), (42, 0.6673500216433375), (33, 0.6674027973738966), (23, 0.6674991527421578), (48, 0.6685768313347468), (19, 0.6690256113493928), (31, 0.6701044096424472), (47, 0.6702893382117315), (21, 0.6721327301262641), (34, 0.6722607267611632), (39, 0.6731835836014719), (28, 0.6732898732535876), (29, 0.6738632378669075), (27, 0.6738644463650251), (26, 0.6747462409127245), (45, 0.675343059268252), (46, 0.6754493009323576), (35, 0.6762345281109167), (20, 0.6762805875922873), (30, 0.6767393006679027), (44, 0.6768907584798227), (37, 0.677008915501073), (24, 0.6778242119082827), (49, 0.6787143534135133), (38, 0.6791027570047017), (36, 0.6795088227915387), (40, 0.6800957925286214), (25, 0.6817012226743485), (32, 0.6823109049155595), (43, 0.6844323451182417), (41, 0.6874981196941213)]
plot_test_result(test)
|
import torch
import numpy as np
def get_pad_mask(inputs):
"""Used to zero embeddings corresponding to [PAD] tokens before pooling BERT embeddings"""
inputs = inputs.tolist()
mask = np.ones_like(inputs)
for i in range(len(inputs)):
for j in range(len(inputs[i])):
if inputs[i][j] == 0:
mask[i][j] = 0
return torch.tensor(mask, dtype=torch.float)
|
from flask import Flask
app = Flask(__name__)
@app.route('/users/<string:username>')
def hello_world(username='MyName'):
return("Hello {}!".format(username))
if __name__ == '__main__':
app.run()
|
import os
__all__ = [
'list_all_py_files',
]
_excludes = [
'tensorlayer/db.py',
]
def _list_py_files(root):
for root, _dirs, files in os.walk(root):
if root.find('third_party') != -1:
continue
for file in files:
if file.endswith('.py'):
yield os.path.join(root, file)
def list_all_py_files():
dirs = ['tensorlayer', 'tests', 'example']
for d in dirs:
for filename in _list_py_files(d):
if filename not in _excludes:
yield filename
|
from django.urls import path
from shiny_sheep.chat.api.views import RoomCreateView, RoomView
urlpatterns = [
path('', RoomCreateView.as_view()),
path('/<int:pk>/', RoomView.as_view()),
]
|
class Solution:
def makeLargestSpecial(self, S: str) -> str:
count = i = 0
res = []
for j, v in enumerate(S):
count = count + 1 if v=='1' else count - 1
if count == 0:
res.append('1' + self.makeLargestSpecial(S[i + 1:j]) + '0')
i = j + 1
return ''.join(sorted(res)[::-1])
|
#############################################################
##### Simulates a pseudo-Premier League season
##### Scoring controlled by 3 ratings per team
##### Home team advantage not instituted
#############################################################
import sys
import numpy as np
import pandas as pd
import random
import matplotlib.pyplot as plt
# random.seed(a=3) # For testing reproducability
from operator import itemgetter
from scipy.optimize import curve_fit
##### Set up arrays for stats, etc.
# Fill all the initial ratings for teams, and their names
nteams = 20
teams = ("Powderdamp", "Loudemouth", "Borussia Albion", "Chumley United", "Aberabbit",
"Hilary", "Unicorn Castle", "Neverborough", "Baconbury", "Sputterstead Village",
"Moncester City", "Spleenwich", "Blokeham City", "Blokeham FC", "Geordie Abbey",
"Umbridge", "Kiddingshire Cold Pudding", "Bangerford", "North Chop United",
"Hopechester Munters")
abbrs = ("POW", "LOU", "BA", "CU", "ABE",
"HIL", "UC", "NEV", "BAC", "SV",
"MON", "SPL", "BC", "BFC", "GA",
"UMB", "KCP", "BAN", "NCU", "HM")
realt = ("ARS", "BOU", "BHA", "BUR", "CAR",
"CHE", "CP", "EVE", "FUL", "HUD",
"LC", "LIV", "MC", "MU", "NEW",
"SOU", "TOT", "WAT", "WHU", "WOL")
tcount = list(range(nteams))
tcolor1 = ("#C81B17", "#EF0107", "#0057B8", "#6C1D45", "#0070B5",
"#034694", "#1B458F", "#003399", "#CC0000", "#0E63AD",
"#003090", "#C8102E", "#6CABDD", "#DA291C", "#241F20",
"#130C0E", "#132257", "#FBEE23", "#7A263A", "#FDB913")
tcolor2 = ("#EFDBB2", "#DB0007", "#EEEEEE", "#99D6EA", "#D11524",
"#D1D3D4", "#C4122E", "#003399", "#000000", "#FFFFFF",
"#FDBE11", "#FFFFFF", "#1C2C5B", "#FBE122", "#41B6E6",
"#D71920", "#FFFFFF", "#ED2127", "#1BB1E7", "#231F20")
orate = (1, 2, 2, 3, 3, 1, 2, 3, 3, 3,
2, 1, 1, 2, 2, 2, 1, 2, 3, 2)
drate = (1, 3, 3, 3, 3, 1, 2, 1, 2, 3,
2, 1, 1, 1, 2, 3, 1, 3, 2, 3)
krate = (3, 2, 2, 1, 3, 2, 2, 2, 2, 3,
2, 2, 2, 1, 2, 2, 2, 3, 2, 2)
# Probabilities for scoring
shotp = (0.33, 0.37, 0.42, 0.49, 0.59) # index is (drate - orate + 2)
targp = (0.29, 0.33, 0.38) # index is (3 - orate)
keepp = (0.24, 0.31, 0.36) # index is (krate - 1)
pertmag = 0.06 # Allows for drift over time
# Function fit thru probabilities y = a - b*(1-exp(-k*x))
def func_epl(x, a, b, k):
return a - b/k * (1 - np.exp(-k * x))
xdata = (0.0, 1.0, 2.0, 3.0, 4.0)
popt, pcov = curve_fit(func_epl, xdata, shotp)
co_shot = popt # a, b and k to best match shotp
xdata = (0.0, 1.0, 2.0)
popt, pcov = curve_fit(func_epl, xdata, targp)
co_targ = popt # a, b and k to best match targp
xdata = (0.0, 1.0, 2.0)
popt, pcov = curve_fit(func_epl, xdata, keepp)
co_keep = popt # a, b and k to best match keepp
#print(popt)
#bestfit = [0] * 3
#for i in range(0,3):
# bestfit[i] = func_epl(i, *popt)
#print(bestfit)
#sys.exit()
# Set up perturbations to team ratings that can evolve
# in a random Brownian manner thru the season
opert = [0] * 20
dpert = [0] * 20
kpert = [0] * 20
##### Generate schedule array
# Create empty arrays to house schedule
weeks = (nteams-1) * 2
games = int(nteams/2)
week_home = [[0] * games for i in range(weeks)]
week_away = [[0] * games for i in range(weeks)]
# Set up first week of pattern
week_home[0] = list(range(0, games))
week_away[0] = list(range(nteams-1, games-1, -1))
#print("Week: 1")
#print(week_home[0])
#print(week_away[0])
#print(" --- ")
# Loop thru weeks, keep team 0 in same slot, rotate others thru
for week in range(0, weeks-1):
# print(" ".join(["Week:",str(week + 2)]))
# Clear out the week's lists of teams
new_home, new_away = [], []
# new_away = []
# Set up the teams in each game by moving slots from previous week
# Game #1
new_home.append(week_home[week][0])
new_away.append(week_home[week][1])
# Games #2-#9
for game in range(1, games-1):
new_home.append(week_home[week][game+1])
new_away.append(week_away[week][game-1])
# Game #10
new_home.append(week_away[week][9])
new_away.append(week_away[week][8])
# print(new_home)
# print(new_away)
# print(" --- ")
# Place new week's pairings into the schedule matrix
week_home[week+1] = new_home
week_away[week+1] = new_away
# print(week_home)
# Invert home and away slots every other week
for week in range(0, weeks):
if week % 2 == 0:
new_away = week_home[week]
week_home[week] = week_away[week]
week_away[week] = new_away
##### Keep track of stats and standings
# Set up table
table_w = [0] * nteams
table_d = [0] * nteams
table_l = [0] * nteams
table_gf = [0] * nteams
table_ga = [0] * nteams
table_gd = [0] * nteams
table_pts = [0] * nteams
table_rank = [0] * nteams
# Keep track of evolution of stats through season for each team
weekly_rank = [[0] * nteams for i in range(weeks)]
weekly_pts = [[0] * nteams for i in range(weeks)]
last_five = [""] * nteams
################################################################
# Game play
# 30 3-minute intervals (15 per half)
# Each interval has a shot probability (shotp)
# If there is a shot, it has an on-target prob (targp)
# If shot is on goal, it has prob to get past the keeper (keepp)
print(" ")
# Loop through weeks
for week in range(0, weeks):
print("Week: ",int(week+1))
# Update perturbed ratings
ocafe = [sum(i) for i in zip(orate,opert)]
dcafe = [sum(i) for i in zip(drate,dpert)]
kcafe = [sum(i) for i in zip(krate,kpert)]
# Loop through games
# Shuffle sequence for fun
gseq = [i for i in range(games)]
random.shuffle(gseq)
for game in range(0, games):
# Set up bookkeeping for game
home_team = week_home[week][gseq[game]]
away_team = week_away[week][gseq[game]]
# home_shotp = shotp[int(drate[away_team]-orate[home_team]+2)]
home_shotp = func_epl(dcafe[away_team]-ocafe[home_team]+2, *co_shot)
# home_targp = targp[int(3-orate[home_team])]
home_targp = func_epl(3-ocafe[home_team], *co_targ)
# home_keepp = keepp[krate[away_team]-1]
home_keepp = func_epl(kcafe[away_team]-1, *co_keep)
# away_shotp = shotp[int(drate[home_team]-orate[away_team]+2)]
away_shotp = func_epl(dcafe[home_team]-ocafe[away_team]+2, *co_shot)
# away_targp = targp[int(3-orate[away_team])]
away_targp = func_epl(3-ocafe[away_team], *co_targ)
# away_keepp = keepp[krate[home_team]-1]
away_keepp = func_epl(kcafe[home_team]-1, *co_keep)
home_goals_string = abbrs[home_team] + ":"
away_goals_string = abbrs[away_team] + ":"
home_shotc, home_targc, home_score = 0, 0, 0
away_shotc, away_targc, away_score = 0, 0, 0
print(" ")
print("Game",int(game+1),"-",teams[home_team],"vs",teams[away_team])
# Loop through the time intervals
for interval in range(0, 30):
cheer = 0
# Does home team score? (Having first chance is home field advantage)
if (random.random() < home_shotp):
home_shotc += 1
if (random.random() < home_targp):
home_targc += 1
if (random.random() < home_keepp):
home_score += 1
cheer = 1
goal_time = int(random.uniform(0,3))+interval*3+1
home_goals_string += " " + str(goal_time) + "'"
# print(f"{teams[home_team]} @ {goal_time}'")
# If not, does away team score?
if (random.random() < away_shotp) and (cheer == 0):
away_shotc += 1
if (random.random() < away_targp):
away_targc += 1
if (random.random() < away_keepp):
away_score += 1
goal_time = int(random.uniform(0,3))+interval*3+1
away_goals_string += " " + str(goal_time) + "'"
# print(f"{teams[away_team]} @ {goal_time}'")
print("FT:",teams[home_team],home_score,"-",away_score,teams[away_team])
if (home_score > 0):
if (away_score > 0):
print("> " + home_goals_string + "; " + away_goals_string)
else:
print("> " + home_goals_string)
else:
if (away_score > 0):
print("> " + away_goals_string)
else:
print("> ")
##### Match over - update table stats
if (home_score > away_score):
table_w[home_team] += 1
last_five[home_team] = "W" + last_five[home_team]
if (len(last_five[home_team]) > 5):
last_five[home_team] = last_five[home_team][:5]
table_l[away_team] += 1
last_five[away_team] = "L" + last_five[away_team]
if (len(last_five[away_team]) > 5):
last_five[away_team] = last_five[away_team][:5]
elif (home_score < away_score):
table_w[away_team] += 1
last_five[away_team] = "W" + last_five[away_team]
if (len(last_five[away_team]) > 5):
last_five[away_team] = last_five[away_team][:5]
table_l[home_team] += 1
last_five[home_team] = "L" + last_five[home_team]
if (len(last_five[home_team]) > 5):
last_five[home_team] = last_five[home_team][:5]
else:
table_d[home_team] += 1
last_five[home_team] = "D" + last_five[home_team]
if (len(last_five[home_team]) > 5):
last_five[home_team] = last_five[home_team][:5]
table_d[away_team] += 1
last_five[away_team] = "D" + last_five[away_team]
if (len(last_five[away_team]) > 5):
last_five[away_team] = last_five[away_team][:5]
table_gf[home_team] += home_score
table_ga[away_team] += home_score
table_gf[away_team] += away_score
table_ga[home_team] += away_score
table_gd[home_team] = table_gf[home_team] - table_ga[home_team]
table_gd[away_team] = table_gf[away_team] - table_ga[away_team]
table_pts[home_team] = 3 * table_w[home_team] + table_d[home_team]
table_pts[away_team] = 3 * table_w[away_team] + table_d[away_team]
print(" ")
print("-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-")
#############################################################
##### Week over - updated the table and print
table_tup = zip(teams,table_w,table_d,table_l,table_pts,table_gd,last_five,tcount)
# Do a complex sort, pts, gd
s = sorted(table_tup, key=itemgetter(5), reverse=True)
table_sort = sorted(s, key=itemgetter(4), reverse=True)
t_team,t_win,t_draw,t_loss,t_pts,t_gd,t_l5,t_tc = zip(*table_sort)
# Print table
print(" ")
print(" Team W D L Pts Dif")
for x in range(0, nteams):
print("{0:2} {1:25} {2:2d} {3:2d} {4:2d} {5:3d} {6:+3d} {7}".format(x+1,t_team[x],t_win[x],t_draw[x],t_loss[x],t_pts[x],t_gd[x], t_l5[x]))
weekly_rank[week][t_tc[x]] = x+1
weekly_pts[week][t_tc[x]] = t_pts[x]
print(" ")
# print(weekly_rank[week])
# dummy = input(" ")
# Update perturbations to team ratings
for x in range(0, nteams):
opert[x] += random.uniform(-pertmag,pertmag)
dpert[x] += random.uniform(-pertmag,pertmag)
kpert[x] += random.uniform(-pertmag,pertmag)
#
# sys.exit()
print(" ")
##### Plot a graph of season's week-by-week team performance
pmax = max(weekly_pts[37])
vtint = 0.05 * (pmax - 25)
plt.xlim((0, 38.5))
plt.ylim((-2, pmax+4))
plt.tick_params(axis='y',left='on',right='on',labelleft='on',labelright='on')
plt.xticks([1, 5, 10, 15, 20, 25, 30, 35, 38])
plt.vlines(19.5,-2,pmax+4,colors="#404040")
for t in range(0, nteams):
pseries = [x[t] for x in weekly_pts]
df=pd.DataFrame({'x': range(1,39), 'y': pseries })
plt.plot('x','y',data=df,color=tcolor2[t],linestyle='-',linewidth=3)
plt.plot('x','y',data=df,color=tcolor1[t],linestyle='--',linewidth=2)
plt.text(3.26,pmax-0.04-vtint*(weekly_rank[37][t]-1),abbrs[t],verticalalignment='center',horizontalalignment='right',fontsize=11,color=tcolor2[t],weight='normal')
plt.text(3.3,pmax-vtint*(weekly_rank[37][t]-1),abbrs[t],verticalalignment='center',horizontalalignment='right',fontsize=11,color=tcolor1[t],weight='normal')
plt.text(6.96,pmax-0.04-vtint*(weekly_rank[37][t]-1),table_pts[t],verticalalignment='center',horizontalalignment='left',fontsize=11,color=tcolor2[t],weight='normal')
plt.text(7.0,pmax-vtint*(weekly_rank[37][t]-1),table_pts[t],verticalalignment='center',horizontalalignment='left',fontsize=11,color=tcolor1[t],weight='normal')
plt.plot([4.00,6.33],[pmax-vtint*(weekly_rank[37][t]-1),pmax-vtint*(weekly_rank[37][t]-1)],color=tcolor2[t],linestyle='-',linewidth=3)
plt.plot([4.00,6.33],[pmax-vtint*(weekly_rank[37][t]-1),pmax-vtint*(weekly_rank[37][t]-1)],color=tcolor1[t],linestyle='--',linewidth=2)
plt.ylabel('Points')
plt.xlabel('Week')
plt.title('Week-by-Week Progression')
# plt.legend()
# plt.figure(figsize=(11,8.5))
plt.show()
|
#!/usr/bin/env python3
# Copyright (C) 2019 Canonical Ltd.
import nagios_plugin3
import yaml
from subprocess import check_output
snap_resources = ['kubectl', 'kubelet', 'kube-proxy']
def check_snaps_installed():
"""Confirm the snaps are installed, raise an error if not"""
for snap_name in snap_resources:
cmd = ['snap', 'list', snap_name]
try:
check_output(cmd).decode('UTF-8')
except Exception:
msg = '{} snap is not installed'.format(snap_name)
raise nagios_plugin3.CriticalError(msg)
def check_node(node):
checks = [{'name': 'MemoryPressure',
'expected': 'False',
'type': 'warn',
'error': 'Memory Pressure'},
{'name': 'DiskPressure',
'expected': 'False',
'type': 'warn',
'error': 'Disk Pressure'},
{'name': 'PIDPressure',
'expected': 'False',
'type': 'warn',
'error': 'PID Pressure'},
{'name': 'Ready',
'expected': 'True',
'type': 'error',
'error': 'Node Not Ready'}]
msg = []
error = False
for check in checks:
# find the status that matches
for s in node['status']['conditions']:
if s['type'] == check['name']:
# does it match expectations? If not, toss it on the list
# of errors so we don't show the first issue, but all.
if s['status'].lower() != check['expected'].lower():
msg.append(check['error'])
if check['type'] == 'error':
error = True
else:
break
else:
err_msg = 'Unable to find status for {}'.format(check['error'])
raise nagios_plugin3.CriticalError(err_msg)
if msg:
if error:
raise nagios_plugin3.CriticalError(msg)
else:
raise nagios_plugin3.WarnError(msg)
def verify_node_registered_and_ready():
try:
cmd = "/snap/bin/kubectl --kubeconfig /var/lib/nagios/.kube/config" \
" get no -o=yaml"
y = yaml.load(check_output(cmd.split()))
except Exception:
raise nagios_plugin3.CriticalError("Unable to run kubectl "
"and parse output")
for node in y['items']:
if node['metadata']['name'] == '{{node_name}}':
check_node(node)
return
else:
raise nagios_plugin3.CriticalError("Unable to find "
"node registered on API server")
def main():
nagios_plugin3.try_check(check_snaps_installed)
nagios_plugin3.try_check(verify_node_registered_and_ready)
print("OK - No memory, disk, or PID pressure. Registered with API server")
if __name__ == "__main__":
main()
|
#! /usr/bin/python
import sys
import common
def init_file(prefix):
if not prefix:
return
out_fd = open(prefix + ".h", "wt")
out_fd.write("#ifndef " + "_" + common.cmn_trans_underline(prefix).upper() + "_" + "\n")
out_fd.write("#define " + "_" + common.cmn_trans_underline(prefix).upper() + "_" + "\n")
out_fd.write("\n")
out_fd.write("#include <stdio.h>\n")
out_fd.write("\n")
out_fd.close()
|
import torch
from torch import nn, optim
import torchsolver as ts
class Discriminator(nn.Module):
def __init__(self, in_c=784, channels=(200,), leaky=0.02, layer_norm=nn.LayerNorm):
super(Discriminator, self).__init__()
layers = []
for out_c in channels:
layers.append(nn.Linear(in_c, out_c))
layers.append(nn.LeakyReLU(leaky))
if layer_norm is not None:
layers.append(layer_norm(out_c))
in_c = out_c
self.layers = nn.Sequential(*layers)
self.fc = nn.Linear(in_c, 1)
def forward(self, x):
x = self.layers(x)
x = self.fc(x)
return torch.sigmoid(x)
class Generator(nn.Module):
def __init__(self, z_dim=100, out_c=784, channels=(200,), leaky=0.02, layer_norm=nn.LayerNorm):
super(Generator, self).__init__()
layers = []
for in_c in channels:
layers.append(nn.Linear(z_dim, in_c))
layers.append(nn.LeakyReLU(leaky))
if layer_norm is not None:
layers.append(layer_norm(in_c))
z_dim = in_c
self.layers = nn.Sequential(*layers)
self.fc = nn.Linear(z_dim, out_c)
def forward(self, x):
x = self.layers(x)
x = self.fc(x)
return torch.tanh(x)
class GANNet(ts.GANModule):
def __init__(self, z_dim=100, out_c=784, d_channels=(200,), g_channels=(200,), **kwargs):
super(GANNet, self).__init__(**kwargs)
self.z_dim = z_dim
self.g_net = Generator(z_dim=z_dim, out_c=out_c, channels=g_channels)
self.d_net = Discriminator(in_c=out_c, channels=d_channels)
self.loss = nn.BCELoss()
self.g_optimizer = optim.Adam(self.g_net.parameters())
self.d_optimizer = optim.Adam(self.d_net.parameters())
def forward_d(self, img, label):
N = img.size(0)
real_label = torch.ones(N, 1, device=self.device)
fake_label = torch.zeros(N, 1, device=self.device)
# compute loss of real_img
real_out = self.d_net(img.flatten(1))
loss_real = self.loss(real_out, real_label)
real_score = (real_out > 0.5).float()
# compute loss of fake_img
z = torch.randn(N, self.z_dim, device=self.device)
fake_img = self.g_net(z)
fake_out = self.d_net(fake_img)
loss_fake = self.loss(fake_out, fake_label)
fake_score = (fake_out < 0.5).float()
d_loss = loss_real + loss_fake
d_score = torch.cat([real_score, fake_score], dim=0).mean()
return d_loss, {"d_loss": float(d_loss), "d_score": float(d_score)}
def forward_g(self, img, label):
N = img.size(0)
real_label = torch.ones(N, 1, device=self.device)
# compute loss of fake_img
z = torch.randn(N, self.z_dim, device=self.device)
fake_img = self.g_net(z)
fake_out = self.d_net(fake_img)
g_loss = self.loss(fake_out, real_label)
g_score = (fake_out > 0.5).float().mean()
return g_loss, {"g_loss": float(g_loss), "g_score": float(g_score)}
@torch.no_grad()
def sample(self, n, img_size=(1, 28, 28)):
z = torch.randn(n, self.z_dim, device=self.device)
img = self.g_net(z)
img = (img + 1) / 2.
img = torch.clamp(img, 0, 1)
return img.view(img.size(0), *img_size)
def val_epoch(self, epoch, *args):
img = self.sample(32)
self.logger.add_images("val/sample", img, global_step=epoch)
self.logger.flush()
|
'''
afhsb_csv.py creates CSV files filled_00to13.csv, filled_13to17.csv and simple_DMISID_FY2018.csv
which will be later used to create MYSQL data tables.
Several intermediate files will be created, including:
00to13.pickle 13to17.pickle 00to13.csv 13to17.csv
Required source files:
ili_1_2000_5_2013_new.sas7bdat and ili_1_2013_11_2017_new.sas7bdat under SOURCE_DIR
country_codes.csv and DMISID_FY2018.csv under TARGET_DIR
All intermediate files and final csv files will be stored in TARGET_DIR
'''
import csv
import os
import sas7bdat
import pickle
import epiweeks as epi
DATAPATH = '/home/automation/afhsb_data'
SOURCE_DIR = DATAPATH
TARGET_DIR = DATAPATH
INVALID_DMISIDS = set()
def get_flu_cat(dx):
# flu1 (influenza)
if (len(dx) == 0): return None
dx = dx.capitalize()
if (dx.isnumeric()):
for prefix in ["487", "488"]:
if (dx.startswith(prefix)): return 1
for i in range(0, 7):
prefix = str(480 + i)
if (dx.startswith(prefix)): return 2
for i in range(0, 7):
prefix = str(460 + i)
if (dx.startswith(prefix)): return 3
for prefix in ["07999", "3829", "7806", "7862"]:
if (dx.startswith(prefix)): return 3
elif (dx[0].isalpha() and dx[1:].isnumeric()):
for prefix in ["J09", "J10", "J11"]:
if (dx.startswith(prefix)): return 1
for i in range(12, 19):
prefix = "J{}".format(i)
if (dx.startswith(prefix)): return 2
for i in range(0, 7):
prefix = "J0{}".format(i)
if (dx.startswith(prefix)): return 3
for i in range(20, 23):
prefix = "J{}".format(i)
if (dx.startswith(prefix)): return 3
for prefix in ["J40", "R05", "H669", "R509", "B9789"]:
if (dx.startswith(prefix)): return 3
else:
return None
def aggregate_data(sourcefile, targetfile):
reader = sas7bdat.SAS7BDAT(os.path.join(SOURCE_DIR, sourcefile), skip_header=True)
# map column names to column indices
COL2IDX = {column.name.decode('utf-8'): column.col_id for column in reader.columns}
def get_field(row, column): return row[COL2IDX[column]]
def row2flu(row):
for i in range(1, 9):
dx = get_field(row, "dx{}".format(i))
flu_cat = get_flu_cat(dx)
if (flu_cat != None): return flu_cat
return 0
def row2epiweek(row):
date = get_field(row, 'd_event')
year, month, day = date.year, date.month, date.day
week_tuple = epi.Week.fromdate(year, month, day).weektuple()
year, week_num = week_tuple[0], week_tuple[1]
return year, week_num
results_dict = dict()
for r, row in enumerate(reader):
# if (r >= 1000000): break
if (get_field(row, 'type') != "Outpt"): continue
year, week_num = row2epiweek(row)
dmisid = get_field(row, 'DMISID')
flu_cat = row2flu(row)
key_list = [year, week_num, dmisid, flu_cat]
curr_dict = results_dict
for i, key in enumerate(key_list):
if (i == len(key_list) - 1):
if (not key in curr_dict): curr_dict[key] = 0
curr_dict[key] += 1
else:
if (not key in curr_dict): curr_dict[key] = dict()
curr_dict = curr_dict[key]
results_path = os.path.join(TARGET_DIR, targetfile)
with open(results_path, 'wb') as f:
pickle.dump(results_dict, f, pickle.HIGHEST_PROTOCOL)
return
################# Functions for geographical information ####################
def get_country_mapping():
filename = "country_codes.csv"
mapping = dict()
with open(os.path.join(TARGET_DIR, filename), "r") as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
print(row.keys())
alpha2 = row['alpha-2']
alpha3 = row['alpha-3']
mapping[alpha2] = alpha3
return mapping
def format_dmisid_csv(filename, target_name):
src_path = os.path.join(TARGET_DIR, "{}.csv".format(filename))
dst_path = os.path.join(TARGET_DIR, target_name)
src_csv = open(src_path, "r", encoding='utf-8-sig')
reader = csv.DictReader(src_csv)
dst_csv = open(dst_path, "w")
fieldnames = ['dmisid', 'country', 'state', 'zip5']
writer = csv.DictWriter(dst_csv, fieldnames=fieldnames)
writer.writeheader()
country_mapping = get_country_mapping()
for row in reader:
country2 = row['Facility ISO Country Code']
if (country2 == ""): country3 = ""
elif (not country2 in country_mapping):
for key in row.keys(): print(key, row[key])
continue
else:
country3 = country_mapping[country2]
new_row = {'dmisid': row['DMIS ID'],
'country': country3,
'state': row['Facility State Code'],
'zip5': row['Facility 5-Digit ZIP Code']}
writer.writerow(new_row)
def dmisid():
filename = 'DMISID_FY2018'
target_name = "simple_DMISID_FY2018.csv"
format_dmisid_csv(filename, target_name)
cen2states = {'cen1': {'CT', 'ME', 'MA', 'NH', 'RI', 'VT'},
'cen2': {'NJ', 'NY', 'PA'},
'cen3': {'IL', 'IN', 'MI', 'OH', 'WI'},
'cen4': {'IA', 'KS', 'MN', 'MO', 'NE', 'ND', 'SD'},
'cen5': {'DE', 'DC', 'FL', 'GA', 'MD', 'NC', 'SC', 'VA', 'WV'},
'cen6': {'AL', 'KY', 'MS', 'TN'},
'cen7': {'AR', 'LA', 'OK', 'TX'},
'cen8': {'AZ', 'CO', 'ID', 'MT', 'NV', 'NM', 'UT', 'WY'},
'cen9': {'AK', 'CA', 'HI', 'OR', 'WA'}}
hhs2states = {'hhs1': {'VT', 'CT', 'ME', 'MA', 'NH', 'RI'},
'hhs2': {'NJ', 'NY'},
'hhs3': {'DE', 'DC', 'MD', 'PA', 'VA', 'WV'},
'hhs4': {'AL', 'FL', 'GA', 'KY', 'MS', 'NC', 'TN', 'SC'},
'hhs5': {'IL', 'IN', 'MI', 'MN', 'OH', 'WI'},
'hhs6': {'AR', 'LA', 'NM', 'OK', 'TX'},
'hhs7': {'IA', 'KS', 'MO', 'NE'},
'hhs8': {'CO', 'MT', 'ND', 'SD', 'UT', 'WY'},
'hhs9': {'AZ', 'CA', 'HI', 'NV'},
'hhs10': {'AK', 'ID', 'OR', 'WA'}}
def state2region(D):
results = dict()
for region in D.keys():
states = D[region]
for state in states:
assert(not state in results)
results[state] = region
return results
def state2region_csv():
to_hhs = state2region(hhs2states)
to_cen = state2region(cen2states)
states = to_hhs.keys()
target_name = "state2region.csv"
fieldnames = ['state', 'hhs', 'cen']
with open(target_name, "w") as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for state in states:
content = {"state": state, "hhs": to_hhs[state], "cen": to_cen[state]}
writer.writerow(content)
################# Functions for geographical information ####################
######################### Functions for AFHSB data ##########################
def write_afhsb_csv(period):
flu_mapping = {0: "ili-flu3", 1: "flu1", 2:"flu2-flu1", 3: "flu3-flu2"}
results_dict = pickle.load(open(os.path.join(TARGET_DIR, "{}.pickle".format(period)), 'rb'))
fieldnames = ["id", "epiweek", "dmisid", "flu_type", "visit_sum"]
with open(os.path.join(TARGET_DIR, "{}.csv".format(period)), 'w') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
i = 0
for year in sorted(results_dict.keys()):
year_dict = results_dict[year]
for week in sorted(year_dict.keys()):
week_dict = year_dict[week]
for dmisid in sorted(week_dict.keys()):
dmisid_dict = week_dict[dmisid]
for flu in sorted(dmisid_dict.keys()):
visit_sum = dmisid_dict[flu]
i += 1
epiweek = int("{}{:02d}".format(year, week))
flu_type = flu_mapping[flu]
row = {"epiweek": epiweek, "dmisid": None if (not dmisid.isnumeric()) else dmisid,
"flu_type": flu_type, "visit_sum": visit_sum, "id": i}
writer.writerow(row)
if (i % 100000 == 0): print(row)
def dmisid_start_time_from_file(filename):
starttime_record = dict()
with open(filename, 'r') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
dmisid = row['dmisid']
epiweek = int(row['epiweek'])
if (not dmisid in starttime_record):
starttime_record[dmisid] = epiweek
else:
starttime_record[dmisid] = min(epiweek, starttime_record[dmisid])
return starttime_record
def dmisid_start_time():
record1 = dmisid_start_time_from_file(os.path.join(TARGET_DIR, "00to13.csv"))
record2 = dmisid_start_time_from_file(os.path.join(TARGET_DIR, "13to17.csv"))
record = record1
for dmisid, epiweek in record2.items():
if (dmisid in record):
record[dmisid] = min(record[dmisid], epiweek)
else:
record[dmisid] = epiweek
return record
def fillin_zero_to_csv(period, dmisid_start_record):
src_path = os.path.join(TARGET_DIR, "{}.csv".format(period))
dst_path = os.path.join(TARGET_DIR, "filled_{}.csv".format(period))
# Load data into a dictionary
src_csv = open(src_path, "r")
reader = csv.DictReader(src_csv)
results_dict = dict() # epiweek -> dmisid -> flu_type: visit_sum
for i, row in enumerate(reader):
epiweek = int(row['epiweek'])
dmisid = row['dmisid']
flu_type = row['flu_type']
visit_sum = row['visit_sum']
if (not epiweek in results_dict):
results_dict[epiweek] = dict()
week_dict = results_dict[epiweek]
if (not dmisid in week_dict):
week_dict[dmisid] = dict()
dmisid_dict = week_dict[dmisid]
dmisid_dict[flu_type] = visit_sum
# Fill in zero count records
dmisid_group = dmisid_start_record.keys()
flutype_group = ["ili-flu3", "flu1", "flu2-flu1", "flu3-flu2"]
for epiweek in results_dict.keys():
week_dict = results_dict[epiweek]
for dmisid in dmisid_group:
start_week = dmisid_start_record[dmisid]
if (start_week > epiweek): continue
if (not dmisid in week_dict):
week_dict[dmisid] = dict()
dmisid_dict = week_dict[dmisid]
for flutype in flutype_group:
if (not flutype in dmisid_dict):
dmisid_dict[flutype] = 0
# Write to csv files
dst_csv = open(dst_path, "w")
fieldnames = ["id", "epiweek", "dmisid", "flu_type", "visit_sum"]
writer = csv.DictWriter(dst_csv, fieldnames=fieldnames)
writer.writeheader()
i = 1
for epiweek in results_dict:
for dmisid in results_dict[epiweek]:
for flutype in results_dict[epiweek][dmisid]:
visit_sum = results_dict[epiweek][dmisid][flutype]
row = {"id": i, "epiweek": epiweek, "dmisid": dmisid,
"flu_type": flutype, "visit_sum": visit_sum}
writer.writerow(row)
if (i % 100000 == 0):
print(row)
i += 1
print("Wrote {} rows".format(i))
######################### Functions for AFHSB data ##########################
def main():
# Build tables containing geographical information
state2region_csv()
dmisid()
# Aggregate raw data into pickle files
aggregate_data("ili_1_2000_5_2013_new.sas7bdat", "00to13.pickle")
aggregate_data("ili_1_2013_11_2017_new.sas7bdat", "13to17.pickle")
# write pickle content to csv files
write_afhsb_csv("00to13")
write_afhsb_csv("13to17")
# Fill in zero count records
dmisid_start_record = dmisid_start_time()
fillin_zero_to_csv("00to13", dmisid_start_record)
fillin_zero_to_csv("13to17", dmisid_start_record)
if __name__ == '__main__':
main()
|
import os
import numpy as np
import cv2
from Lane import Line
import parameter
import helper
import matplotlib.pyplot as plt
from moviepy.editor import VideoFileClip
def find_lane_pixels(binary_warped):
global LeftLane, RightLane
# Identify the x and y positions of all nonzero pixels in the image
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
out_img = np.dstack((binary_warped, binary_warped, binary_warped))
if not LeftLane.detected or not RightLane.detected:
print("using hist method")
# Take a histogram of the bottom half of the image
size = binary_warped.shape
histogram = np.sum(binary_warped[binary_warped.shape[0] // 2:, :], axis=0)
# Create an output image to draw on and visualize the result
midpoint = np.int(histogram.shape[0] // 2)
# Set height of windows - based on nwindows above and image shape
window_height = np.int(binary_warped.shape[0] // parameter.nwindows)
# Visualize the resulting histogram
#plt.plot(histogram)
#plt.show()
# Find the peak of the left and right halves of the histogram
# These will be the starting point for the left and right lines
leftx_base = np.argmax(histogram[:midpoint])
rightx_base = np.argmax(histogram[midpoint:]) + midpoint
# Current positions to be updated later for each window in nwindows
leftx_current = leftx_base
rightx_current = rightx_base
# Create empty lists to receive left and right lane pixel indices
left_lane_inds = []
right_lane_inds = []
# Step through the windows one by one
for window in range(parameter.nwindows):
# Identify window boundaries in x and y (and right and left)
win_y_low = binary_warped.shape[0] - (window + 1) * window_height
win_y_high = binary_warped.shape[0] - window * window_height
if not LeftLane.detected:
win_xleft_low = leftx_current - parameter.margin
win_xleft_high = leftx_current + parameter.margin
# Draw the windows on the visualization image
cv2.rectangle(out_img, (win_xleft_low, win_y_low),
(win_xleft_high, win_y_high), (0, 255, 0), 2)
# Identify the nonzero pixels in x and y within the window ###
good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &
(nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high)).nonzero()[0]
# Append these indices to the lists
left_lane_inds.append(good_left_inds)
if len(good_left_inds) > parameter.minpix:
leftx_current = np.int(np.mean(nonzerox[good_left_inds]))
if not RightLane.detected:
win_xright_low = rightx_current - parameter.margin
win_xright_high = rightx_current + parameter.margin
cv2.rectangle(out_img, (win_xright_low, win_y_low),
(win_xright_high, win_y_high), (0, 255, 0), 2)
good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &
(nonzerox >= win_xright_low) & (nonzerox < win_xright_high)).nonzero()[0]
right_lane_inds.append(good_right_inds)
if len(good_right_inds) > parameter.minpix:
rightx_current = np.int(np.mean(nonzerox[good_right_inds]))
# Concatenate the arrays of indices (previously was a list of lists of pixels)
try:
left_lane_inds = np.concatenate(left_lane_inds)
except ValueError:
# Avoids an error if the above is not implemented fully
pass
try:
right_lane_inds = np.concatenate(right_lane_inds)
except ValueError:
# Avoids an error if the above is not implemented fully
pass
if LeftLane.detected:
print("leftlane detected")
left_fit = LeftLane.best_fit
# Set the area of search based on activated x-values #
# within the +/- margin of our polynomial function #
left_lane_inds = ((nonzerox > (left_fit[0] * (nonzeroy ** 2) + left_fit[1] * nonzeroy +
left_fit[2] - parameter.margin))
& (nonzerox < (left_fit[0] * (nonzeroy ** 2) + left_fit[1] * nonzeroy +
left_fit[2] + parameter.margin)))
if RightLane.detected:
print("rightlane detected")
right_fit = RightLane.best_fit
right_lane_inds = ((nonzerox > (right_fit[0] * (nonzeroy ** 2) + right_fit[1] * nonzeroy +
right_fit[2] - parameter.margin))
& (nonzerox < (right_fit[0] * (nonzeroy ** 2) + right_fit[1] * nonzeroy +
right_fit[2] + parameter.margin)))
# Extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
LeftLane.allx = leftx
LeftLane.ally = lefty
return leftx, lefty, rightx, righty, out_img
def fit_polynomial(binary_warped):
# Find our lane pixels first
leftx, lefty, rightx, righty, out_img = find_lane_pixels(binary_warped)
# Fit a second order polynomial to each using `np.polyfit` ###
left_fit = None
right_fit = None
if len(leftx) > 0 and len(lefty) > 0:
left_fit = np.polyfit(lefty, leftx, 2)
if len(rightx) > 0 and len(righty) > 0:
right_fit = np.polyfit(righty, rightx, 2)
LeftLane.update_best_fit(left_fit)
RightLane.update_best_fit(right_fit)
if LeftLane.best_fit is None or RightLane.best_fit is None:
print("retry to find lane pixels")
# Find our lane pixels first
leftx, lefty, rightx, righty, out_img = find_lane_pixels(binary_warped)
# Fit a second order polynomial to each using `np.polyfit` #
left_fit = np.polyfit(lefty, leftx, 2)
right_fit = np.polyfit(righty, rightx, 2)
LeftLane.update_best_fit(left_fit)
RightLane.update_best_fit(right_fit)
# Generate x and y values for plotting
ploty = np.linspace(0, binary_warped.shape[0] - 1, binary_warped.shape[0])
try:
left_fitx = LeftLane.best_fit[0] * ploty ** 2 + LeftLane.best_fit[1] * ploty + LeftLane.best_fit[2]
right_fitx = RightLane.best_fit[0] * ploty ** 2 + RightLane.best_fit[1] * ploty + RightLane.best_fit[2]
except TypeError:
# Avoids an error if `left` and `right_fit` are still none or incorrect
print('The function failed to fit a line!')
left_fitx = 1 * ploty ** 2 + 1 * ploty
right_fitx = 1 * ploty ** 2 + 1 * ploty
# # Visualization #
# # Colors in the left and right lane regions
# window_img = np.zeros_like(out_img)
# out_img[lefty, leftx] = [255, 0, 0]
# out_img[righty, rightx] = [0, 0, 255]
#
# # Plots the left and right polynomials on the lane lines
# plt.plot(left_fitx, ploty, color='yellow')
# plt.plot(right_fitx, ploty, color='yellow')
#
# # Visualization for search from prior#
# # Generate a polygon to illustrate the search window area
# # And recast the x and y points into usable format for cv2.fillPoly()
# left_line_window1 = np.array([np.transpose(np.vstack([left_fitx - parameter.margin, ploty]))])
# left_line_window2 = np.array([np.flipud(np.transpose(np.vstack([left_fitx + parameter.margin,
# ploty])))])
# left_line_pts = np.hstack((left_line_window1, left_line_window2))
# right_line_window1 = np.array([np.transpose(np.vstack([right_fitx - parameter.margin, ploty]))])
# right_line_window2 = np.array([np.flipud(np.transpose(np.vstack([right_fitx + parameter.margin,
# ploty])))])
# right_line_pts = np.hstack((right_line_window1, right_line_window2))
#
# # Draw the lane onto the warped blank image
# cv2.fillPoly(window_img, np.int_([left_line_pts]), (0, 255, 0))
# cv2.fillPoly(window_img, np.int_([right_line_pts]), (0, 255, 0))
# result = cv2.addWeighted(out_img, 1, window_img, 0.0, 0)
#
#
#
# # Plot the polynomial lines onto the image
# plt.plot(left_fitx, ploty, color='yellow')
# plt.plot(right_fitx, ploty, color='yellow')
# plt.imshow(out_img)
# plt.show()
# # End visualization for search from prior #
# todo plausibility check
#LeftLane.detected = True
#RightLane.detected = True
return left_fitx, right_fitx, ploty
def create_combined_binary(img, s_thresh=(170, 255)):
global count
img = np.copy(img)
r_channel = img[:, :, 0]
# Convert to HLS color space and separate the V channel
hls = cv2.cvtColor(img, cv2.COLOR_BGR2HLS)
l_channel = hls[:, :, 1]
s_channel = hls[:, :, 2]
imgHLS = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
yellow_low = np.array([0, 100, 100])
yellow_high = np.array([50, 255, 255])
yellow_binary_output = np.zeros((img.shape[0], img.shape[1]))
yellow_binary_output[(imgHLS[:, :, 0] >= yellow_low[0]) & (imgHLS[:, :, 0] <= yellow_high[0]) & (imgHLS[:, :, 1] >= yellow_low[1]) & (
imgHLS[:, :, 1] <= yellow_high[1]) & (imgHLS[:, :, 2] >= yellow_low[2]) & (
imgHLS[:, :, 2] <= yellow_high[2])] = 1
ksize =11
gradx = helper.abs_sobel_thresh(l_channel, orient='x', sobel_kernel=ksize, thresh=(2, 50))
grady = helper.abs_sobel_thresh(l_channel, orient='y', sobel_kernel=ksize, thresh=(2, 50))
mag_binary = helper.mag_thresh(l_channel, sobel_kernel=ksize, mag_thresh=(20, 100))
dir_binary = helper.dir_threshold(l_channel, sobel_kernel=ksize, thresh=(0.5, 1.0))
# Threshold color channel
s_binary2 = np.zeros_like(s_channel)
s_binary2[(s_channel >= s_thresh[0]) & (s_channel <= s_thresh[1])] = 1
r_binarymax = np.zeros_like(r_channel)
r_binarymax[(r_channel >= 220) & (r_channel <= 255)] = 1
r_binarymin = np.zeros_like(r_channel)
r_binarymin[(r_channel >= 10) & (r_channel <= 255)] = 1
combined = np.zeros_like(dir_binary)
combined2 = np.zeros_like(dir_binary)
combined[(((gradx == 1) & (grady == 1) & (dir_binary == 1) & (mag_binary == 1) & (r_binarymin == 1) & (s_binary2 == 1)) |
(r_binarymax == 1) | (yellow_binary_output == 1))] = 255
combined2[((gradx == 1) & (grady == 1) & (dir_binary == 1) & (mag_binary == 1) & (r_binarymin == 1)& (s_binary2 == 1))] = 255
# combined[(((gradx == 1) & (grady == 1) & (dir_binary == 1)) | ((mag_binary == 1) & (dir_binary == 1))) | (
# (s_binary2 == 1) | (r_binarymax == 1))] = 255
# combined[(((gradx == 1) & (grady == 1) & (dir_binary == 1) & (r_binary2 == 1))
# | ((s_binary2 == 1) & (dir_binary == 1)))] = 255
#cv2.imshow("r2", combined2)
#cv2.waitKey(0)
# Stack each channel
# binary_combined = np.dstack((combined, combined, combined)) * 255
#cv2.imwrite("output_images_project/r_binarymax" + str(count) + ".jpg", r_binarymax*255)
#cv2.imwrite("output_images_project/r_channel" + str(count) +".jpg", r_channel)
#cv2.imwrite("output_images_project/s_channel" + str(count) +".jpg", s_channel)
#cv2.imwrite("output_images_project/l_channel" + str(count) +".jpg", l_channel)
return combined
def process_image(image):
global mtx, dist, M, Minv, count
print("processing: "+str(count)+"\n")
undist = cv2.undistort(image, mtx, dist, None, mtx)
binary_combined = create_combined_binary(undist)
combined = np.zeros_like(binary_combined).astype(np.uint8)
binary_combined2 = np.dstack((binary_combined* 255, binary_combined* 255, binary_combined* 255)).astype(np.uint8)
imshape = image.shape
p1x = 0
p1y = int(imshape[0])
p2x = int(imshape[1] / 2)
p2y = int(0.55 * imshape[0])
p3x = imshape[1]
p3y = imshape[0]
vertices = np.array([[(p1x, p1y), (p2x, p2y), (p3x, p3y)]], dtype=np.int32)
binary_combined_roi = helper.region_of_interest(binary_combined, vertices)
warped = cv2.warpPerspective(binary_combined_roi, M, image.shape[1::-1], flags=cv2.INTER_LINEAR)
left_fitx, right_fitx, ploty = fit_polynomial(warped)
#cv2.imshow("warped", warped*255)
#cv2.waitKey()
warp_zero = np.zeros_like(warped).astype(np.uint8)
color_warp = np.dstack((warp_zero, warp_zero, warp_zero))
# Recast the x and y points into usable format for cv2.fillPoly()
pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))])
pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))])
pts = np.hstack((pts_left, pts_right))
# Draw the lane onto the warped blank image
cv2.fillPoly(color_warp, np.int_([pts]), (0, 255, 0))
warped3 = np.dstack((warped, warped, warped)).astype(np.uint8)
warped3_result = cv2.addWeighted(warped3, 1, color_warp, 0.3, 0)
# Warp the blank back to original image space using inverse perspective matrix (Minv)
newwarp = cv2.warpPerspective(color_warp, Minv, (warped.shape[1], warped.shape[0]))
# Combine the result with the original image
result = cv2.addWeighted(undist, 1, newwarp, 0.3, 0)
count = count +1
cv2.imwrite("output_images_project/binary" + str(count) +".jpg", binary_combined*255)
#cv2.imwrite("output_images_project/binary_warped" + str(count) +".jpg", warped*255)
center_offset = (((LeftLane.best_fit[0] * 720 ** 2 + LeftLane.best_fit[1] * 720 + LeftLane.best_fit[2]) + (
RightLane.best_fit[0] * 720 ** 2 + RightLane.best_fit[1] * 720 + RightLane.best_fit[2])) / 2 - 640) * parameter.xm_per_pix
# Create merged output image
## This layout was introduces in https://chatbotslife.com/advanced-lane-line-project-7635ddca1960
img_out = np.zeros((576, 1280, 3), dtype=np.uint8)
img_out[0:576, 0:1024, :] = cv2.resize(result, (1024, 576))
# combined binary image
img_out[0:288, 1024:1280, 0] = cv2.resize(binary_combined * 255, (256, 288))
img_out[0:288, 1024:1280, 1] = cv2.resize(binary_combined * 255, (256, 288))
img_out[0:288, 1024:1280, 2] = cv2.resize(binary_combined * 255, (256, 288))
# warped bird eye view
img_out[310:576, 1024:1280, :] = cv2.resize(warped3_result, (256, 266))
# Write curvature and center in image
TextLeft = "Left curv: " + str(int(LeftLane.radius_of_curvature)) + " m"
TextRight = "Right curv: " + str(int(RightLane.radius_of_curvature)) + " m"
TextCenter = "Center offset: " + str(round(center_offset, 2)) + "m"
fontScale = 1
thickness = 2
fontFace = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img_out, TextLeft, (130, 40), fontFace, fontScale, (255, 255, 255), thickness, lineType=cv2.LINE_AA)
cv2.putText(img_out, TextRight, (130, 70), fontFace, fontScale, (255, 255, 255), thickness, lineType=cv2.LINE_AA)
cv2.putText(img_out, TextCenter, (130, 100), fontFace, fontScale, (255, 255, 255), thickness, lineType=cv2.LINE_AA)
return img_out
mtx = []
dist = []
M = []
Minv = []
LeftLane = Line()
RightLane = Line()
center_offset = 0
count = 0
# main function for processing videos
def main():
global mtx, dist, M, Minv
mtx, dist = helper.calibrate_camera()
M, Minv = helper.calculate_warp_parameter()
output = 'test_videos_output/project_video_v2.mp4'
clip1 = VideoFileClip("project_video.mp4")#.subclip(22, 30)
white_clip = clip1.fl_image(process_image)
white_clip.write_videofile(output, audio=False)
# #main function for processing test images
# def main():
# global mtx, dist, M, Minv
# mtx, dist = calibrate_camera()
# M, Minv = calculate_warp_parameter()
#
# test_images = os.listdir("test_images/")
# for fname in test_images:
# print(fname)
# img1 = cv2.imread("test_images/"+fname)
# undist = cv2.undistort(img1, mtx, dist, None, mtx)
# cv2.imwrite("output_images_test/" + fname + "_undist.jpg", undist)
# undist_warped = cv2.warpPerspective(undist, M, img1.shape[1::-1], flags=cv2.INTER_LINEAR)
# cv2.imwrite("output_images_test/" + fname + "_undist_warped.jpg", undist_warped)
# binary_combined = create_combined_binary(undist)
# cv2.imwrite("output_images_test/" + fname + "_binary.jpg", binary_combined)
#
# binary_combined_warped = cv2.warpPerspective(binary_combined, M, img1.shape[1::-1], flags=cv2.INTER_LINEAR)
# cv2.imwrite("output_images_test/" + fname + "_binary_warped.jpg", binary_combined_warped)
#
# left_fitx, right_fitx, ploty = fit_polynomial(binary_combined_warped)
#
# warp_zero = np.zeros_like(binary_combined_warped).astype(np.uint8)
# color_warp = np.dstack((warp_zero, warp_zero, warp_zero))
#
# # Recast the x and y points into usable format for cv2.fillPoly()
# pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))])
# pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))])
# pts = np.hstack((pts_left, pts_right))
# # Draw the lane onto the warped blank image
# cv2.fillPoly(color_warp, np.int_([pts]), (0, 255, 0))
#
# # Warp the blank back to original image space using inverse perspective matrix (Minv)
# newwarp = cv2.warpPerspective(color_warp, Minv, (binary_combined_warped.shape[1], binary_combined_warped.shape[0]))
# # Combine the result with the original image
# result = cv2.addWeighted(undist, 1, newwarp, 0.3, 0)
#
# plt.imshow(binary_combined_warped)
# plt.show()
# cv2.imwrite("output_images_test/" + fname + "_final.jpg", result)
# # cv2.imshow("afterwarp", result3)
if __name__ == "__main__":
main()
|
import sys
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
try:
dataFileName = sys.argv[1]
except IndexError:
print("USAGE: python plotEnergies.py 'filename'")
sys.exit(0)
HFEnergy3 = 3.161921401722216
HFEnergy6 = 20.71924844033019
numParticles = \
int(dataFileName[dataFileName.find('N')+1:dataFileName.find('E')-1])
hfenergyFound = False
if (numParticles == 2):
HFEnergy = 3.161921401722216
hfenergyFound = True
elif (numParticles == 6):
HFEnergy = 20.71924844033019
hfenergyFound = True
else:
hfenergyFound = False
data = np.loadtxt(dataFileName, dtype=np.float64)
data[:,1] = np.sqrt(data[:,1])
n = len(data[:,0])
x = np.arange(0,n)
fig = plt.figure()
if (hfenergyFound):
yline = np.zeros(n)
yline.fill(HFEnergy)
plt.plot(x, yline, 'r--', label="HF Energy")
msize = 1.0
ax = fig.add_subplot(111)
plt.errorbar(x, data[:,0], yerr=data[:,1], fmt='bo', markersize=msize, label="VMC Energy")
plt.fill_between(x, data[:,0]-data[:,1], data[:,0]+data[:,1])
plt.xlim(0,n)
plt.xlabel('Iteration')
plt.ylabel('$E_0[a.u]$')
plt.legend(loc='best')
minSub = 80
maxSub = 120
inset_axes(ax, width="50%", height=1.0, loc='right')
plt.errorbar(x[minSub:maxSub], data[minSub:maxSub,0],
yerr=data[minSub:maxSub,1], fmt='bo', markersize=msize, label="VMC "
"Energy")
plt.plot(x[minSub:maxSub], yline[minSub:maxSub], 'r--', label="HF Energy")
plt.show()
|
"""Test script for Time Predictor Network (TPN).
Once you have trained your model with train_time.py, you can use this script to test the model.
It will load a saved model from --checkpoints_dir and print out the results.
It first creates model and dataset given the option. It will hard-code some parameters.
It then runs inference for --num_test images and prints out the results.
Example (You need to train models first:
Test a TimePredictoNetwork model:
python test_time.py --dataroot #DATASET_LOCATION# --name #EXP_NAME# --model time_predictor --netD time_input --direction AtoB
See options/base_options.py and options/test_options.py for more test options.
See training and test tips at: https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/tips.md
See frequently asked questions at: https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/qa.md
"""
import os
from options.test_options import TestOptions
from data.data_loader import CreateDataLoader
from models.models import create_model
from util.visualizer import Visualizer
#from util.visualizer import save_images
from util import html
import torch
def predict_time(opt=None, dataset=None, model=None):
if dataset == None:
dataset = create_dataset(opt) # create a dataset given opt.dataset_mode and other options
if model == None:
model = create_model(opt) # create a model given opt.model and other options
model.setup(opt) # regular setup: load and print networks; create schedulers
# Create matrix to hold predictions:
predictions = torch.zeros(min(opt.num_test, len(dataset)))
true_times = torch.zeros(len(predictions))
for i, data in enumerate(dataset):
if i >= opt.num_test: # only apply our model to opt.num_test images.
break
model.set_input(data) # unpack data from data loader
model.test() # run inference
predictions[i] = torch.mean(model.prediction).item()
true_times[i] = model.true_time
L1 = torch.nn.L1Loss()
MSE = torch.nn.MSELoss()
loss_l1 = L1(predictions, true_times)
loss_mse = MSE(predictions, true_times)
print("Loss for {} set: L1: {}, MSE: {}".format(opt.phase, loss_l1, loss_mse))
return predictions, true_times
if __name__ == '__main__':
opt = TestOptions().parse() # get test options
# hard-code some parameters for test
opt.num_threads = 0 # test code only supports num_threads = 1
opt.batch_size = 1 # test code only supports batch_size = 1
opt.serial_batches = True # disable data shuffling; comment this line if results on randomly chosen images are needed.
opt.no_flip = True # no flip; comment this line if results on flipped images are needed.
opt.display_id = -1 # no visdom display; the test code saves the results to a HTML file.
predictions, true_times = predict_time(opt)
for i, (pred, true_t) in enumerate(zip(predictions, true_times)):
print("Image {}: Predicted {}, True time {}".format(i, pred, true_t))
|
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import numpy as np
import tensorflow as tf
tf.get_logger().setLevel('INFO')
tf.compat.v1.enable_eager_execution()
import argparse
import matplotlib.pyplot as plt
from ptfrenderer import utils
from ptfrenderer import camera
from ptfrenderer import uv
_shape = utils._shape
_broadcast = utils._broadcast
# inputs
parser = argparse.ArgumentParser()
parser.add_argument('-save_dir', default=None)
parser.add_argument('-res', default=128, type=int)
parser.add_argument('-meshres', default=128, type=int)
parser.add_argument('-nviews', default=1000, type=int)
parser.add_argument('-margin', default=1., type=float)
parser.add_argument('-blur_mesh', default=2.0, type=float)
parser.add_argument('-batch_size', default=100, type=int)
parser.add_argument('-epochs', default=15, type=int)
parser.add_argument('-lrate', default=0.01, type=float)
args = parser.parse_args()
# model
rgbres = args.res
meshres = args.meshres
bcgres = args.res*3
# rendering
imres = args.res
angle_range = [[-60., -90., 0.], [60., 90., 0.]]
fow = 10.*np.pi/180.
margin = args.margin
# visualize result
grid_size = [5, 7]
def load_im(path):
im = np.asarray(plt.imread(path))
im = im.astype(np.float32) / 255.
return im
def save_im(im, path):
im = np.array(tf.cast(tf.clip_by_value(im, 0., 1.)*255., tf.uint8))
plt.imsave(path, im)
return
class Scene(tf.keras.Model):
def __init__(self, xyz, rgb, bcg, meshres, imres, angle_range, fow, margin, blur_mesh=2.0, isgt=False):
super(Scene, self).__init__()
self.xyz = tf.Variable(xyz)
self.rgb = tf.Variable(rgb)
self.bcg = tf.Variable(bcg)
self.vertuv, self.triangles = uv.sphere([meshres, meshres])
self.imsize = tf.constant([imres, imres])
self.angle_range = tf.constant(angle_range) * np.pi/180.
self.fow = tf.constant(fow)
self.margin = tf.constant(margin)
self.blur_mesh = blur_mesh
self.isgt = isgt
def get_xyz(self):
if self.isgt:
return self.xyz
xyz = utils.gaussian_blur(self.xyz, self.blur_mesh)
# blurring is necessary because of the uv sampling and stability of training
return xyz
def get_cam_params(self, angles):
K, T = camera.look_at_origin(self.fow)
angles_cam = angles * (self.angle_range[1]-self.angle_range[0]) + self.angle_range[0]
R = camera.euler(angles_cam)
R, T = _broadcast(R, T)
P = tf.matmul(camera.hom(T,'P'), camera.hom(R,'R'))[:,:3,:]
return K, P
def get_bcg_cropped(self, angles):
# simulates bcg at infinity somewhat
bcg, angles = _broadcast(self.bcg, angles)
B, H, W, _ = _shape(bcg)
rh = tf.cast(imres, tf.float32) / tf.cast(H, tf.float32)
rw = tf.cast(imres, tf.float32) / tf.cast(W, tf.float32)
y1 = (1.-rh)*angles[:,0]
x1 = (1.-rw)*angles[:,1]
boxes = tf.stack([y1, x1, y1+rh, x1+rw], -1)
bcg = tf.image.crop_and_resize(bcg, boxes, tf.range(B), self.imsize)
bcg = tf.pad(bcg, [[0,0],[0,0],[0,0],[0,1]], constant_values=1.)
return bcg
def call(self, angles, isnorm=False):
# renders the scene form the specified angles
xyz = self.get_xyz()
def l2_normalize(ima):
im = tf.math.l2_normalize(ima[...,:-1], -1)
a = ima[...,-1:]
ima = tf.concat([(im+1.)/2., a], -1)
return ima
if isnorm:
attr = uv.normals(xyz)
bcg = 1.
margin = 0.
postfun = l2_normalize
else:
attr = self.rgb
bcg = self.get_bcg_cropped(angles)
margin = self.margin
postfun = lambda x: x
K, P = self.get_cam_params(angles)
ima = uv.render(xyz, attr, self.vertuv, self.triangles,
K, P, self.imsize, bcg=bcg, margin=margin)
ima = postfun(ima)
return ima[...,:3] # ignore alpha
# ground truth scene
def deformed_sphere():
xyz = uv.sphere_init([rgbres, rgbres])
x = xyz[:,:,:,0]; y = -xyz[:,:,:,1]; z = -xyz[:,:,:,2]
xt = tf.minimum(tf.maximum(2*x,-2),2) + 0.5*tf.sin(x) + 0.2*tf.cos(5*y) + 0.5*z*z
yt = 2*y + 0.5*tf.sin(x+2*y) + 0.2*tf.cos(5*z) - 0.3*x*z
zt = 2*z + 0.5*tf.sin(z-3*x) + 0.2*tf.cos(15*y+6*z) + 0.3*y*x
xyz = 0.25 * tf.stack([xt,-yt,-zt],axis=-1)
return xyz
xyz = deformed_sphere()
rgb = load_im("assets/fish.jpg")
rgb = tf.image.resize(rgb[tf.newaxis,...,:3], [rgbres, rgbres])
bcg = load_im("assets/lake.jpg")
bcg = tf.image.resize(bcg[tf.newaxis,...,:3], [bcgres, bcgres])
ground_truth = Scene(xyz, rgb, bcg, meshres, imres, angle_range, fow, margin, isgt=True)
# ground truth data
print("Generate training data ...")
angles_train = tf.random.uniform([args.nviews, 3])
# ima_train = ground_truth(angles_train) # this might not fit into memory
ima_train = tf.concat([ground_truth(angles_train[i:i+1]) for i in range(args.nviews)], 0)
print("done.")
# estimated scene
xyz = 0.5 * uv.sphere_init([rgbres, rgbres])
rgb = 0.5 + tf.zeros([1, rgbres, rgbres, 3])
bcg = 0.5 + tf.zeros([1, bcgres, bcgres, 3])
model = Scene(xyz, rgb, bcg, meshres, imres, angle_range, fow, margin, blur_mesh=args.blur_mesh)
def loss_l2(ypred, y):
return tf.reduce_mean((ypred-y)**2, axis=[1,2,3])
print("Train model ...")
print("image resolution: {} x {}".format(imres, imres))
print("number of vertices: {}".format(_shape(model.vertuv)[-2]))
print("number of triangles: {}".format(_shape(model.triangles)[-2]))
model.compile(
optimizer=tf.keras.optimizers.Adam(learning_rate=args.lrate, beta_1=0.9, beta_2=0.999),
loss=loss_l2)
model.fit(x=angles_train, y=ima_train, batch_size=args.batch_size, epochs=args.epochs)
print("done.")
print("Evaluate model ...")
angles_test = utils.grid_angles(grid_size)
im_test = tf.concat([ground_truth(angles_test[i:i+1]) for i in range(tf.reduce_prod(grid_size))], 0)
model.evaluate(angles_test, im_test, verbose=2)
print("done.")
print("Visualize results ...")
im_test = utils.stack_images(im_test, grid_size)
norm_test = tf.concat([ground_truth(angles_test[i:i+1], isnorm=True) for i in range(tf.reduce_prod(grid_size))], 0)
norm_test = utils.stack_images(norm_test, grid_size)
im_pred = tf.concat([model(angles_test[i:i+1]) for i in range(tf.reduce_prod(grid_size))], 0)
im_pred = utils.stack_images(im_pred, grid_size)
norm_pred = tf.concat([model(angles_test[i:i+1], isnorm=True) for i in range(tf.reduce_prod(grid_size))], 0)
norm_pred = utils.stack_images(norm_pred, grid_size)
if args.save_dir is None:
plt.figure(1); plt.imshow(im_pred, interpolation='nearest')
plt.figure(2); plt.imshow(norm_pred, interpolation='nearest')
plt.figure(3); plt.imshow(im_test, interpolation='nearest')
plt.figure(4); plt.imshow(norm_test, interpolation='nearest')
plt.show()
else:
save_dir = os.path.join(args.save_dir, "imres={} margin={}".format(imres, margin))
os.makedirs(save_dir, exist_ok=True)
save_im(im_pred, os.path.join(save_dir, 'im_pred.png'))
save_im(norm_pred, os.path.join(save_dir, 'norm_pred.png'))
save_im(im_test, os.path.join(save_dir, 'im_test.png'))
save_im(norm_test, os.path.join(save_dir, 'norm_test.png'))
print("done.")
|
import itertools
import json
from unittest.mock import patch
from tornado import testing, web
from jsonrpcclient.clients.tornado_client import TornadoClient
from jsonrpcclient.request import Request
class EchoHandler(web.RequestHandler):
def data_received(self, chunk):
pass
def post(self):
self.finish(
{
"id": 1,
"jsonrpc": "2.0",
"result": [1, [2], {"3": 4, "5": True, "6": None}],
}
)
class FailureHandler(web.RequestHandler):
def data_received(self, chunk):
pass
def post(self):
request = json.loads(self.request.body.decode())
raise web.HTTPError(request["params"]["code"])
class Test(testing.AsyncHTTPTestCase):
def setup_method(self, *_):
# Patch Request.id_generator to ensure the id is always 1
Request.id_generator = itertools.count(1)
def get_app(self):
return web.Application([("/echo", EchoHandler), ("/fail", FailureHandler)])
@patch("jsonrpcclient.client.request_log")
@patch("jsonrpcclient.client.response_log")
@testing.gen_test
def test_request(self, *_):
response = yield TornadoClient(self.get_url("/echo")).request(
"foo", 1, [2], {"3": 4, "5": True, "6": None}
)
assert response.data.result == [1, [2], {"3": 4, "6": None, "5": True}]
@patch("jsonrpcclient.client.request_log")
@patch("jsonrpcclient.client.response_log")
@testing.gen_test
def test_custom_headers(self, *_):
response = yield TornadoClient(self.get_url("/echo")).send(
Request("foo", 1, [2], {"3": 4, "5": True, "6": None}),
headers={"foo": "bar"},
)
assert response.data.result == [1, [2], {"3": 4, "6": None, "5": True}]
|
from csaf_f16.models.waypoint import *
def model_init(model):
"""load trained model"""
model.parameters['auto'] = None
def get_auto(model, f16_state):
if model.auto is None:
model.parameters['auto'] = WaypointAutopilot(model.waypoints, airspeed_callable=model.airspeed)
return model.auto
def model_output(model, time_t, state_controller, input_f16):
# NOTE: not using llc
input_f16 = input_f16[:13] + [0.0, 0.0, 0.0] + input_f16[13:] + [0.0, 0.0, 0.0]
auto = get_auto(model, input_f16)
return auto.get_u_ref(time_t, input_f16)[:4]
def model_state_update(model, time_t, state_controller, input_f16):
# NOTE: not using llc
input_f16 = input_f16[:13] + [0.0, 0.0, 0.0] + input_f16[13:] + [0.0, 0.0, 0.0]
auto = get_auto(model, input_f16)
return [auto.advance_discrete_mode(time_t, input_f16)]
|
from typing import List
class Solution:
def oddCells(self, n: int, m: int, indices: List[List[int]]) -> int:
matrix = [[0 for i in range(m)] for i in range(n)]
for i in range(len(indices)):
for j in range(n):
matrix[j][indices[i][1]] += 1
for j in range(m):
matrix[indices[i][0]][j] += 1
count = 0
for i in range(n):
for j in range(m):
if matrix[i][j] % 2 != 0:
count += 1
return count
|
from django.urls import path
from . import views
urlpatterns = [
path('hands', views.HandListCreateApiView.as_view(), name='hands_view'),
]
|
from pypinyin import pinyin as to_pinyin
from pypinyin import Style as style
tone_styles = [style.TONE, style.TONE2, style.TONE3]
def char_to_pinyin(char: str, tone_style: int) -> str:
"""Converts a single character to pinyin
# TODO support heteronyms?
Parameters
----------
char : String
A single chinese character
tone_style : int
an integeter representing the tone style to use. 0 is "zhong", 1 is "zhōng", 2 is "zho1ng"
Returns
-------
String
The pinyin representing the single chinese character
"""
# Is created as a list of lists, so return as just string
return to_pinyin(char, style=tone_styles[tone_style], heteronyms=False)[0][0]
def chars_to_pinyin(chars: str, tone_style: int, as_list=True) -> [str]:
"""Converts a series characters in a single str into a list of pinyin representing those characters
Parameters
----------
chars : str
A string representing a series of characters
tone_style : int
The tone style to use in the pinyin
as_list : bool, optional
If the result should be returned as a list , or as a space separated string
Returns
-------
[str]
[description]
"""
chars_list = to_pinyin(chars, style=tone_styles[tone_style])
chars_list = [char[0] for char in chars_list if char[0] != " "]
if as_list:
return chars_list
# Return as space separated sentence
return " ".join(chars_list)
|
from netlds.models import *
from netlds.generative import *
from netlds.inference import *
from data.sim_data import build_model
import os
try:
# set simulation parameters
num_time_pts = 20
dim_obs = 50
dim_latent = 2
obs_noise = 'poisson'
results_dir = '/home/mattw/results/tmp/' # for storing simulation params
# build simulation
model = build_model(
num_time_pts, dim_obs, dim_latent, num_layers=2, np_seed=1,
obs_noise=obs_noise)
# checkpoint model so we can restore parameters and sample
checkpoint_file = results_dir + 'true_model.ckpt'
model.checkpoint_model(checkpoint_file=checkpoint_file, save_filepath=True)
y, z = model.sample(num_samples=128, seed=123)
# specify inference network for approximate posterior
inf_network = SmoothingLDS
inf_network_params = {
'dim_input': dim_obs,
'dim_latent': dim_latent,
'num_mc_samples': 4,
'num_time_pts': num_time_pts}
# specify probabilistic model
gen_model = FLDS
noise_dist = obs_noise
gen_model_params = {
'dim_obs': dim_obs,
'dim_latent': dim_latent,
'num_time_pts': num_time_pts,
'noise_dist': noise_dist,
'nn_params': [{'units': 15}, {'units': 15}, {}], # 3 layer nn to output
'gen_params': None}
# initialize model
models = LDSModel(
inf_network=inf_network, inf_network_params=inf_network_params,
gen_model=gen_model, gen_model_params=gen_model_params,
np_seed=1, tf_seed=1)
# set optimization parameters
adam = {'learning_rate': 1e-3}
opt_params = {
'learning_alg': 'adam',
'adam': adam,
'epochs_training': 10, # max iterations
'epochs_display': None, # output to notebook
'epochs_ckpt': None, # checkpoint model parameters (inf=last epoch)
'epochs_summary': None, # output to tensorboard
'batch_size': 4,
'use_gpu': True,
'run_diagnostics': False} # memory/compute time in tensorboard
data_dict = {'observations': y, 'inf_input': y}
model.train(data=data_dict, opt_params=opt_params)
os.remove(checkpoint_file)
print('test successful')
except:
print('test error')
|
from trapcards import JustMathGenius
a = [1,2,3]
b = [1,1,1]
print(JustMathGenius.weightedAvg(a,b))
|
from collections import defaultdict
import sys
mapped = defaultdict(int)
with open(sys.argv[1], 'r') as samfile:
for line in samfile:
if (line[0] != '@') & (line.split('\t')[2] != '*'):
mapped[line.split('\t')[0]] = 1
print(len(mapped))
seqs = defaultdict(str)
outfile = open(sys.argv[3], 'w')
with open(sys.argv[2], 'r') as fastafile:
for line in fastafile:
if line[0] == '>':
curRead = line.split(' ')[0][1:]
elif (mapped[curRead] != 1):
outfile.write(">" + curRead + '\n' + line + '\n')
outfile.close()
|
# ------------------------------
# 497. Random Point in Non-overlapping Rectangles
#
# Description:
# Given a list of non-overlapping axis-aligned rectangles rects, write a function pick which
# randomly and uniformily picks an integer point in the space covered by the rectangles.
#
# Note:
#
# An integer point is a point that has integer coordinates.
# A point on the perimeter of a rectangle is included in the space covered by the rectangles.
# ith rectangle = rects[i] = [x1,y1,x2,y2], where [x1, y1] are the integer coordinates of the
# bottom-left corner, and [x2, y2] are the integer coordinates of the top-right corner.
# length and width of each rectangle does not exceed 2000.
# 1 <= rects.length <= 100
# pick return a point as an array of integer coordinates [p_x, p_y]
# pick is called at most 10000 times.
#
# Example 1:
# Input:
# ["Solution","pick","pick","pick"]
# [[[[1,1,5,5]]],[],[],[]]
# Output:
# [null,[4,1],[4,1],[3,3]]
#
# Example 2:
# Input:
# ["Solution","pick","pick","pick","pick","pick"]
# [[[[-2,-2,-1,-1],[1,0,3,0]]],[],[],[],[],[]]
# Output:
# [null,[-1,-2],[2,0],[-2,-1],[3,0],[-2,-2]]
# Explanation of Input Syntax:
#
# The input is two lists: the subroutines called and their arguments. Solution's constructor
# has one argument, the array of rectangles rects. pick has no arguments. Arguments are always
# wrapped with a list, even if there aren't any.
#
# Version: 1.0
# 10/25/19 by Jianfa
# ------------------------------
class Solution(object):
def __init__(self, rects):
"""
:type rects: List[List[int]]
"""
self.rects = rects
self.range = [0] # Add 0 at first is to make pick() easier
self.summ = 0
for rect in rects:
self.summ += (rect[2] - rect[0] + 1) * (rect[3] - rect[1] + 1) # Keep adding new result to make a new range
self.range.append(self.summ)
def pick(self):
"""
:rtype: List[int]
"""
n = random.randint(0, self.summ - 1)
i = bisect.bisect(self.range, n)
rect = self.rects[i-1] # A 0 is at the begining of self.range, so i will be starting 1, then i - 1 is the correct index of rect
n -= self.range[i-1] # Here is the reason why a 0 should be added at the begining of self.range
x = rect[0] + n % (rect[2] - rect[0] + 1)
y = rect[1] + n / (rect[2] - rect[0] + 1)
return [x, y]
# Your Solution object will be instantiated and called as such:
# obj = Solution(rects)
# param_1 = obj.pick()
# Used for testing
if __name__ == "__main__":
test = Solution()
# ------------------------------
# Summary:
# Idea from https://leetcode.com/problems/random-point-in-non-overlapping-rectangles/discuss/154147/Python-weighted-probability-solution/160274
# First populate the sum of each rectangle to a range list, then randomly pick a number
# from this range.
# Manage to get x, y from just one-time random
#
# O(n) time, O(n) space
|
'''
Created on Mar 26, 2015
@author: jpenamar
'''
import unittest
from mytree import Tree
class TreeTestCase(unittest.TestCase):
"""Test the methods of the class Tree."""
def setUp(self):
self.tree = Tree('D', 'D data')
self.tree.insert('Q', 'Q data') # Insert in right node.
self.tree.insert('A', 'A data') # Insert in left node.
def test_init(self):
"""Verify attributes are initialized as expected."""
t = Tree('D', 'D data')
self.assertEqual(t.key, 'D')
self.assertEqual(t.data, 'D data')
self.assertTrue(t.left == t.right == None)
def test_insert(self):
"""Verify subtrees are inserted as expected."""
t = self.tree
# Test right subtree.
self.assertEqual(t.right.key, 'Q')
self.assertEqual(t.right.data, 'Q data')
# Test left subtree.
self.assertEqual(t.left.key, 'A')
self.assertEqual(t.left.data, 'A data')
# Test duplicates.
self.assertRaises(ValueError, t.insert, 'Q', 'Q data')
self.assertRaises(ValueError, t.insert, 'A', 'A data')
def test_walk(self):
"""Verify keys, data are returned from tree in order, sorted by key."""
t = self.tree.walk()
for kv in [('A', 'A data'), ('D', 'D data'), ('Q', 'Q data')]:
self.assertEqual(kv, next(t))
def test_find(self):
"""Verify find() returns the correct data or raises KeyError."""
t = self.tree
self.assertEqual(t.find('Q'), 'Q data')
self.assertEqual(t.find('A'), 'A data')
self.assertRaises(KeyError, t.find, 'I')
if __name__ == '__main__':
unittest.main()
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import imp
import os.path
import sys
import unittest
def _GetDirAbove(dirname):
"""Returns the directory "above" this file containing |dirname| (which must
also be "above" this file)."""
path = os.path.abspath(__file__)
while True:
path, tail = os.path.split(path)
assert tail
if tail == dirname:
return path
try:
imp.find_module("mojom")
except ImportError:
sys.path.append(os.path.join(_GetDirAbove("pylib"), "pylib"))
from mojom.generate import data
from mojom.generate import module as mojom
class DataTest(unittest.TestCase):
def testStructDataConversion(self):
"""Tests that a struct can be converted from data."""
module = mojom.Module('test_module', 'test_namespace')
struct_data = {
'name': 'SomeStruct',
'enums': [],
'constants': [],
'fields': [
{'name': 'field1', 'kind': 'i32'},
{'name': 'field2', 'kind': 'i32', 'ordinal': 10},
{'name': 'field3', 'kind': 'i32', 'default': 15}]}
struct = data.StructFromData(module, struct_data)
struct.fields = map(lambda field:
data.FieldFromData(module, field, struct), struct.fields_data)
self.assertEquals(struct_data, data.StructToData(struct))
def testUnionDataConversion(self):
"""Tests that a union can be converted from data."""
module = mojom.Module('test_module', 'test_namespace')
union_data = {
'name': 'SomeUnion',
'fields': [
{'name': 'field1', 'kind': 'i32'},
{'name': 'field2', 'kind': 'i32', 'ordinal': 10}]}
union = data.UnionFromData(module, union_data)
union.fields = map(lambda field:
data.FieldFromData(module, field, union), union.fields_data)
self.assertEquals(union_data, data.UnionToData(union))
def testImportFromDataNoMissingImports(self):
"""Tests that unions, structs, interfaces and enums are imported."""
module = mojom.Module('test_module', 'test_namespace')
imported_module = mojom.Module('import_module', 'import_namespace')
#TODO(azani): Init values in module.py.
#TODO(azani): Test that values are imported.
imported_module.values = {}
imported_data = {'module' : imported_module}
struct = mojom.Struct('TestStruct', module=module)
imported_module.kinds[struct.spec] = struct
union = mojom.Union('TestUnion', module=module)
imported_module.kinds[union.spec] = union
interface = mojom.Interface('TestInterface', module=module)
imported_module.kinds[interface.spec] = interface
enum = mojom.Enum('TestEnum', module=module)
imported_module.kinds[enum.spec] = enum
data.ImportFromData(module, imported_data)
# Test that the kind was imported.
self.assertIn(struct.spec, module.kinds)
self.assertEquals(struct.name, module.kinds[struct.spec].name)
self.assertIn(union.spec, module.kinds)
self.assertEquals(union.name, module.kinds[union.spec].name)
self.assertIn(interface.spec, module.kinds)
self.assertEquals(interface.name, module.kinds[interface.spec].name)
self.assertIn(enum.spec, module.kinds)
self.assertEquals(enum.name, module.kinds[enum.spec].name)
# Test that the imported kind is a copy and not the original.
self.assertIsNot(struct, module.kinds[struct.spec])
self.assertIsNot(union, module.kinds[union.spec])
self.assertIsNot(interface, module.kinds[interface.spec])
self.assertIsNot(enum, module.kinds[enum.spec])
def testImportFromDataNoExtraneousImports(self):
"""Tests that arrays, maps and interface requests are not imported."""
module = mojom.Module('test_module', 'test_namespace')
imported_module = mojom.Module('import_module', 'import_namespace')
#TODO(azani): Init values in module.py.
imported_module.values = {}
imported_data = {'module' : imported_module}
array = mojom.Array(mojom.INT16, length=20)
imported_module.kinds[array.spec] = array
map_kind = mojom.Map(mojom.INT16, mojom.INT16)
imported_module.kinds[map_kind.spec] = map_kind
interface = mojom.Interface('TestInterface', module=module)
imported_module.kinds[interface.spec] = interface
interface_req = mojom.InterfaceRequest(interface)
imported_module.kinds[interface_req.spec] = interface_req
data.ImportFromData(module, imported_data)
self.assertNotIn(array.spec, module.kinds)
self.assertNotIn(map_kind.spec, module.kinds)
self.assertNotIn(interface_req.spec, module.kinds)
def testNonInterfaceAsInterfaceRequest(self):
"""Tests that a non-interface cannot be used for interface requests."""
module = mojom.Module('test_module', 'test_namespace')
interface = mojom.Interface('TestInterface', module=module)
method_dict = {
'name': 'Foo',
'parameters': [{'name': 'foo', 'kind': 'r:i32'}],
}
with self.assertRaises(Exception) as e:
data.MethodFromData(module, method_dict, interface)
self.assertEquals(e.exception.__str__(),
'Interface request requires \'i32\' to be an interface.')
|
import numpy
import torch
def image_to_text(captions, images, npts=None, verbose=False):
"""
Images->Text (Image Annotation)
Images: (5N, K) matrix of images
Captions: (5N, K) matrix of captions
"""
if npts == None:
npts = images.size()[0] / 5
npts = int(npts)
ranks = numpy.zeros(npts)
for index in range(npts):
# Get query image
im = images[5 * index].unsqueeze(0)
# Compute scores
d = torch.mm(im, captions.t())
d_sorted, inds = torch.sort(d, descending=True)
inds = inds.data.squeeze(0).cpu().numpy()
# Score
rank = 1e20
# find the highest ranking
for i in range(5*index, 5*index + 5, 1):
tmp = numpy.where(inds == i)[0][0]
if tmp < rank:
rank = tmp
ranks[index] = rank
# Compute metrics
r1 = 100.0 * len(numpy.where(ranks < 1)[0]) / len(ranks)
r5 = 100.0 * len(numpy.where(ranks < 5)[0]) / len(ranks)
r10 = 100.0 * len(numpy.where(ranks < 10)[0]) / len(ranks)
medr = numpy.floor(numpy.median(ranks)) + 1
if verbose:
print(" * Image to text scores: R@1: %.1f, R@5: %.1f, R@10: %.1f, Medr: %.1f" % (r1, r5, r10, medr))
return r1+r5+r10, (r1, r5, r10, medr)
def text_to_image(captions, images, npts=None, verbose=False):
if npts == None:
npts = images.size()[0] / 5
npts = int(npts)
ims = torch.cat([images[i].unsqueeze(0) for i in range(0, len(images), 5)])
ranks = numpy.zeros(5 * npts)
for index in range(npts):
# Get query captions
queries = captions[5*index : 5*index + 5]
# Compute scores
d = torch.mm(queries, ims.t())
for i in range(d.size()[0]):
d_sorted, inds = torch.sort(d[i], descending=True)
inds = inds.data.squeeze(0).cpu().numpy()
ranks[5 * index + i] = numpy.where(inds == index)[0][0]
# Compute metrics
r1 = 100.0 * len(numpy.where(ranks < 1)[0]) / len(ranks)
r5 = 100.0 * len(numpy.where(ranks < 5)[0]) / len(ranks)
r10 = 100.0 * len(numpy.where(ranks < 10)[0]) / len(ranks)
medr = numpy.floor(numpy.median(ranks)) + 1
if verbose:
print(" * Text to image scores: R@1: %.1f, R@5: %.1f, R@10: %.1f, Medr: %.1f" % (r1, r5, r10, medr))
return r1+r5+r10, (r1, r5, r10, medr)
|
num = int(input('Digite um número de 4 algarismos: '))
centena = num // 100
if centena % 4 == 0:
print(f'({centena}) é multiplo de 4')
else:
print(f'({centena}) não é multiplo de 4'')
|
#
from pyclics.api import Clics # flake8: noqa
__version__ = "3.0.3.dev0"
|
def add_binary(a,b):
|
"""
Copyright 2021 Max-Planck-Gesellschaft
Code author: Jan Achterhold, jan.achterhold@tuebingen.mpg.de
Embodied Vision Group, Max Planck Institute for Intelligent Systems, Tübingen
This source code is licensed under the MIT license found in the
LICENSE.md file in the root directory of this source tree or at
https://opensource.org/licenses/MIT.
"""
import itertools
COMMENT_PREFIX = "cr"
# named_config, optional: env_name
ENV_NAMES = {
"asq1": ["action_squash", "action_squash_1"],
"asq1_noisefree": ["action_squash", "action_squash_1_noisefree"],
"asq2": ["action_squash", "action_squash_2"],
"asq2_noisefree": ["action_squash", "action_squash_2_noisefree"],
"mountaincar": ["mountaincar", "mountaincar"],
"pendulum_bd": ["pendulum_bd", "pendulum_quadrantactionfactorar2bd"],
}
NP_KL_WEIGHT = [
5,
]
SEED = [1, 2, 3]
POSWEIGHTS = [False, "relu"]
commands = []
for (
environment_name,
np_kl_weight,
pos_weights,
seed,
) in itertools.product(ENV_NAMES.keys(), NP_KL_WEIGHT, POSWEIGHTS, SEED):
if environment_name.startswith("asq") and not pos_weights:
continue
pos_weights_cfg = {False: "no_positive_weights", "relu": ""}[pos_weights]
pos_weights_str = {False: "False", "relu": "relu"}[pos_weights]
command = (
f"python -m context_exploration.train_model "
f"--comment={COMMENT_PREFIX}_"
f"s{seed}_"
f"{environment_name}_"
f"posweights_{pos_weights_str}_"
f"npklw{np_kl_weight} "
f"with "
f"{ENV_NAMES[environment_name][0]}_cfg "
f"env_name={ENV_NAMES[environment_name][1]} "
f"{pos_weights_cfg} "
f"kl_np_weight={np_kl_weight} "
f"seed={seed}\n"
)
commands.append(command)
# write commands to job file
with open(f"jobs_training_{COMMENT_PREFIX}.txt", "w") as handle:
handle.writelines(commands)
|
# -*-coding:utf-8-*-
from random import random
import requests
import re
import time
import pandas as pd
import xlrd
from xlutils.copy import copy
from lxml import etree # xpath
user_agent = [
"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50",
"Mozilla/5.0 (Windows; U; Windows NT 6.1; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50",
"Mozilla/5.0 (Windows NT 10.0; WOW64; rv:38.0) Gecko/20100101 Firefox/38.0",
"Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; .NET4.0C; .NET4.0E; .NET CLR 2.0.50727; .NET CLR 3.0.30729; .NET CLR 3.5.30729; InfoPath.3; rv:11.0) like Gecko",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)",
"Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)",
"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
"Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
"Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; en) Presto/2.8.131 Version/11.11",
"Opera/9.80 (Windows NT 6.1; U; en) Presto/2.8.131 Version/11.11",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Maxthon 2.0)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; TencentTraveler 4.0)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; The World)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; SE 2.X MetaSr 1.0; SE 2.X MetaSr 1.0; .NET CLR 2.0.50727; SE 2.X MetaSr 1.0)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; 360SE)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Avant Browser)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)",
"Mozilla/5.0 (iPhone; U; CPU iPhone OS 4_3_3 like Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8J2 Safari/6533.18.5",
"Mozilla/5.0 (iPod; U; CPU iPhone OS 4_3_3 like Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8J2 Safari/6533.18.5",
"Mozilla/5.0 (iPad; U; CPU OS 4_3_3 like Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8J2 Safari/6533.18.5",
"Mozilla/5.0 (Linux; U; Android 2.3.7; en-us; Nexus One Build/FRF91) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1",
"MQQBrowser/26 Mozilla/5.0 (Linux; U; Android 2.3.7; zh-cn; MB200 Build/GRJ22; CyanogenMod-7) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1",
"Opera/9.80 (Android 2.3.4; Linux; Opera Mobi/build-1107180945; U; en-GB) Presto/2.8.149 Version/11.10",
"Mozilla/5.0 (Linux; U; Android 3.0; en-us; Xoom Build/HRI39) AppleWebKit/534.13 (KHTML, like Gecko) Version/4.0 Safari/534.13",
"Mozilla/5.0 (BlackBerry; U; BlackBerry 9800; en) AppleWebKit/534.1+ (KHTML, like Gecko) Version/6.0.0.337 Mobile Safari/534.1+",
"Mozilla/5.0 (hp-tablet; Linux; hpwOS/3.0.0; U; en-US) AppleWebKit/534.6 (KHTML, like Gecko) wOSBrowser/233.70 Safari/534.6 TouchPad/1.0",
"Mozilla/5.0 (SymbianOS/9.4; Series60/5.0 NokiaN97-1/20.0.019; Profile/MIDP-2.1 Configuration/CLDC-1.1) AppleWebKit/525 (KHTML, like Gecko) BrowserNG/7.1.18124",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows Phone OS 7.5; Trident/5.0; IEMobile/9.0; HTC; Titan)",
"UCWEB7.0.2.37/28/999",
"NOKIA5700/ UCWEB7.0.2.37/28/999",
"Openwave/ UCWEB7.0.2.37/28/999",
"Mozilla/4.0 (compatible; MSIE 6.0; ) Opera/UCWEB7.0.2.37/28/999",
"Mozilla/6.0 (iPhone; CPU iPhone OS 8_0 like Mac OS X) AppleWebKit/536.26 (KHTML, like Gecko) Version/8.0 Mobile/10A5376e Safari/8536.25",
]
headers = {
'Referer': 'https://music.163.com',
'User-Agent': random.choice(user_agent)
}
def get_page(url):
index = 0
res = requests.get(url, headers=headers)
data = re.findall('<a title="(.*?)" href="/playlist\?id=(\d+)" class="msk"></a>', res.text)
for item in data:
index = get_songs(item, index)
return index
def get_songs(data, index):
playlist_url = 'https://music.163.com/playlist?id=%s' % data[1]
res = requests.get(playlist_url, headers=headers)
obj_nodes = etree.HTML(res.text)
node_texts = obj_nodes.xpath('//div[@id="song-list-pre-cache"]//a/@href')
if len(node_texts) > 0:
for nodeText in node_texts:
re.findall(r'id=(\d+)', nodeText)
if 'id=' in nodeText:
temp_text = nodeText.split('id=')
if temp_text[1] not in list:
download_mp3(data, temp_text[1], index)
index += 1
return index
def get_music_info(song_id, index, num):
music_url = 'https://music.163.com/song?id=%s' % song_id
res = requests.get(music_url, headers=headers)
obj_nodes = etree.HTML(res.text)
img = obj_nodes.xpath('//img/@data-src')
title = obj_nodes.xpath('//em[@class="f-ff2"]/text()')
author = obj_nodes.xpath('//div[@class="cnt"]//span/@title')
lrc = obj_nodes.xpath('//div[@id="lyric-content"]/text()')
print('当前id:%s,歌曲是:%s的%s,他的图片是%s' % (song_id, author, title, img))
xlsx = xlrd.open_workbook(r'../excel/music.xls', formatting_info=True)
wb = copy(xlsx)
ws = wb.get_sheet(num)
ws.write(index, 0, song_id)
ws.write(index, 1, title)
ws.write(index, 2, author)
ws.write(index, 3, img)
ws.write(index, 4, lrc)
wb.save("fomatting.xlsx")
def read_excel():
dfxm = pd.read_excel(r'songId.xlsx')
df = dfxm['song_id'].astype(str)
list = []
for i in df:
list.append(i)
return list
def download_mp3(data, song_id, index):
download_url = "http://music.163.com/song/media/outer/url?id=%s" % song_id
try:
print("正在下载%s:%s下的歌曲:%s,第%s首歌" % (data[1], data[0], song_id, index))
with open('G:\\MP3\\yueyu\\' + song_id + '.mp3', 'wb') as f:
f.write(requests.get(download_url, headers=headers).content)
except FileNotFoundError:
pass
except OSError:
pass
if __name__ == '__main__':
startTime = time.time()
limit = 35
for num in range(0, 38):
offset = num * limit
hot_url = 'https://music.163.com/discover/playlist/?order=粤语&cat=全部&limit=%s&offset=%s' % (limit, offset)
get_page(hot_url, num)
time.time() - startTime
print("爬取数据成功,用时%s" % (time.time() - startTime))
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import _init_paths
import os
import sys
import numpy as np
import argparse
import pprint
import pdb
import time
import cv2
import cPickle
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.optim as optim
import torchvision.transforms as transforms
from roi_data_layer.roidb import combined_roidb
from roi_data_layer.roibatchLoader import roibatchLoader
from model.utils.config import cfg, cfg_from_file, cfg_from_list, get_output_dir
# from model.faster_rcnn.faster_rcnn_cascade import _fasterRCNN
from model.rpn.bbox_transform import clip_boxes
from model.nms.nms_wrapper import nms
from model.co_nms.co_nms_wrapper import co_nms
# from model.fast_rcnn.nms_wrapper import nms
from model.rpn.bbox_transform import bbox_transform_inv
from model.utils.network import save_net, load_net, vis_detections, vis_det_att, vis_relations, vis_gt_relations, eval_relations_recall
from model.repn.bbox_transform import combine_box_pairs
def vis_dets(img, im_info, rois, bbox_pred, obj_cls_prob, imdb):
pdb.set_trace()
im2show = img.data.permute(2, 3, 1, 0).squeeze().cpu().numpy()
im2show += cfg.PIXEL_MEANS
thresh = 0.01
boxes = rois[:, :, 1:5]
if cfg.TEST.BBOX_REG:
# Apply bounding-box regression deltas
box_deltas = bbox_pred
if cfg.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED:
# Optionally normalize targets by a precomputed mean and stdev
box_deltas = box_deltas.view(-1, 4) * torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_STDS).cuda() \
+ torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_MEANS).cuda()
box_deltas = box_deltas.view(1, -1, 4)
pred_boxes = bbox_transform_inv(boxes, box_deltas, 1)
pred_boxes = clip_boxes(pred_boxes, im_info.data, 1)
obj_scores = obj_cls_prob.squeeze()
pred_boxes = pred_boxes.squeeze()
for j in xrange(1, len(imdb._classes)):
inds = torch.nonzero(obj_scores[:,j] > thresh).view(-1)
# if there is det
if inds.numel() > 0:
obj_cls_scores = obj_scores[:,j][inds]
_, order = torch.sort(obj_cls_scores, 0, True)
cls_boxes = pred_boxes[inds, :]
cls_dets = torch.cat((cls_boxes, obj_cls_scores), 1)
cls_dets = cls_dets[order]
keep = nms(cls_dets, cfg.TEST.NMS)
cls_dets = cls_dets[keep.view(-1).long()]
im2show = vis_detections(im2show, imdb._classes[j], cls_dets.cpu().numpy(), 0.2)
# save image to disk
cv2.imwrite("detections.jpg", im2show)
|
from QCGym.hamiltonians.int_cross_resonance import InteractiveCrossResonance
from QCGym.fidelities.trace_fidelity import TraceFidelity
import gym
import numpy as np
from gym import spaces
from scipy.linalg import expm
import logging
logger = logging.getLogger(__name__)
CNOT = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0]])
# We use natural units
H_CROSS = 1
class InteractiveEnv(gym.Env):
"""
Interactive Gym Environment for Quantum Control.
Parameters
----------
max_timesteps : int
How many nanoseconds to run for
target : ndarray of shape(4,4)
Target Unitary we want to acheive
hamiltonian : Hamiltonian
The Hamiltonian to be used
fidelity : Fidelity
Fidelity to be used to calculate reward
dt : double
Timestep_size/mesh
"""
metadata = {'render.modes': ['human']}
observation_space = spaces.Tuple((spaces.Box(low=-np.inf, high=np.inf, shape=(4, 4)),
spaces.Box(
low=-np.inf, high=np.inf, shape=(4, 4)),
spaces.Discrete(1000)))
reward_range = (float(0), float(1))
def __init__(self, max_timesteps=30, target=CNOT, hamiltonian=InteractiveCrossResonance(), fidelity=TraceFidelity(), dt=0.1):
self.max_timesteps = max_timesteps
self.target = target
self.hamiltonian = hamiltonian
self.fidelity = fidelity
self.dt = dt
self.U = np.eye(4)
self.name = f"IntEnv-{max_timesteps}-{CNOT}-{hamiltonian}-{fidelity}-{dt}"
logger.info(self.name)
self.action_space = self.hamiltonian.action_space
self.actions_so_far = []
def __str__(self):
return self.name
def step(self, action):
"""
Take one action for one timestep
Parameters
----------
action : ActionSpace
Object in Hamiltonian.action_space
Returns
-------
observation : ndarray(shape=(4,4)),ndarray(shape=(4,4)),int
Real part of state, Imag part of state, Number of timesteps done
reward : double
Returns fidelity on final timestep, zero otherwise
done : boolean
Returns true of episode is finished
info : dict
Additional debugging Info
"""
self.actions_so_far.append(action)
logger.info(f"Action#{len(self.actions_so_far)}={action}")
done = len(self.actions_so_far) == self.max_timesteps
H = np.sum(self.hamiltonian(action, done=done), axis=0)
if not np.all(H == np.conjugate(H).T):
logger.error(
f"{H} is not Hermitian with actions as {np.array(self.actions_so_far)}")
self.U = expm(-1j*self.dt*H/H_CROSS) @ self.U
if not np.allclose(np.matmul(self.U, np.conjugate(self.U.T)), np.eye(4)):
logger.error(
f"Unitary Invalid-Difference is{np.matmul(self.U,self.U.T)-np.eye(4)}")
if not np.isclose(np.abs(np.linalg.det(self.U)), 1):
logger.error(f"Det Invalid-{np.abs(np.linalg.det(self.U))}")
if done:
reward = self.fidelity(self.U, self.target)
else:
reward = 0
return (np.real(self.U), np.imag(self.U), len(self.actions_so_far)), reward, done, {}
def reset(self):
self.actions_so_far = []
self.U = np.eye(4)
self.hamiltonian.reset()
logger.info("IntEnv Reset")
return (np.eye(4), np.zeros((4, 4)), 0)
def render(self, mode='human'):
pass
def close(self):
pass
|
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from symfit import Parameter, Variable, Fit, GradientModel
from symfit.distributions import Gaussian
palette = sns.color_palette()
x = Variable('x')
y = Variable('y')
A = Parameter('A')
sig = Parameter(name='sig', value=1.4, min=1.0, max=2.0)
x0 = Parameter(name='x0', value=15.0, min=0.0)
# Gaussian distribution
model = GradientModel({y: A*Gaussian(x, x0, sig)})
# Sample 10000 points from a N(15.0, 1.5) distrubution
np.random.seed(seed=123456789)
sample = np.random.normal(loc=15.0, scale=1.5, size=(10000,))
ydata, bin_edges = np.histogram(sample, 100)
xdata = (bin_edges[1:] + bin_edges[:-1])/2
fit = Fit(model, xdata, ydata)
fit_result = fit.execute()
y, = model(x=xdata, **fit_result.params)
sns.regplot(xdata, ydata, fit_reg=False)
plt.plot(xdata, y, color=palette[2])
plt.ylim(0, 400)
plt.show()
|
import argparse
from app import db
from app.models import Node, Post
all_posts = Post.query.all()
all_nodes = Node.query.all()
for i in all_posts:
db.session.delete(i)
db.session.commit()
for i in all_nodes:
db.session.delete(i)
db.session.commit()
# for u in users:
# if u.email == 'None':
# db.session.delete(u)
# db.session.commit()
# elif u.email == args.email:
# db.session.delete(u)
# db.session.commit()
#
# users = User.query.all()
# print users
#
#
#
# for p in posts:
# print user.username, ':', p.message
|
from pyneuroml.neuron import export_to_neuroml2
import sys
import os
os.chdir("../NEURON/test")
sys.path.append(".")
export_to_neuroml2("load_l23.hoc",
"../NeuroML2/L23_morph.cell.nml",
includeBiophysicalProperties=False,
known_rev_potentials={"na":60,"k":-90,"ca":140})
|
import torch
import torch.nn as nn
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from .crf import CRF
class BiLSTMCRF(nn.Module):
def __init__(self, vocab_size, tag_to_ix, embedding_dim, hidden_dim, pre_word_embed=None, num_rnn_layers=1):
super(BiLSTMCRF, self).__init__()
self.embedding_dim = embedding_dim
self.hidden_dim = hidden_dim
self.vocab_size = vocab_size
self.tag_to_ix = tag_to_ix
self.word_embedding = nn.Embedding(vocab_size, embedding_dim)
if pre_word_embed is not None:
self.pre_word_embed = True
self.word_embedding.weight = nn.Parameter(torch.FloatTensor(pre_word_embed))
self.bilstm = nn.LSTM(embedding_dim, hidden_dim // 2, num_layers=num_rnn_layers,
bidirectional=True, batch_first=True)
self.crf = CRF(hidden_dim, self.tag_to_ix)
def __build_features(self, sentences):
masks = sentences.gt(0)
embeds = self.word_embedding(sentences.long())
seq_length = masks.sum(1)
sorted_seq_length, perm_idx = seq_length.sort(descending=True)
embeds = embeds[perm_idx, :]
pack_sequence = pack_padded_sequence(embeds, lengths=sorted_seq_length, batch_first=True)
packed_output, _ = self.bilstm(pack_sequence)
lstm_out, _ = pad_packed_sequence(packed_output, batch_first=True)
_, unperm_idx = perm_idx.sort()
lstm_out = lstm_out[unperm_idx, :]
return lstm_out, masks
def loss(self, xs, tags):
features, masks = self.__build_features(xs)
loss = self.crf.loss(features, tags, masks=masks)
return loss
def forward(self, xs):
# Get the emission scores from the BiLSTM
features, masks = self.__build_features(xs)
scores, tag_seq, probs = self.crf(features, masks)
return scores, tag_seq, probs
|
import numpy as np
import logging
import torch
def seq_to_array(fp, max_len=15):
with open(fp) as f:
data = [x for x in f.read().split("\n") if x != ""]
new_list = []
for seq in data:
arr = seq.split(",")
new_list.append(np.array([int(w) for w in arr] + [0] * (max_len - len(arr))))
return np.vstack(new_list)
def seq_match(r, s):
l_r = len(r)
l_s = len(s)
for i in range(l_r - l_s + 1):
if np.dot(r[i:i+l_s] - s, s) == 0:
return True
return False
class Agent:
def __init__(self):
pass
def human_label(self, X):
labels = np.zeros(X.shape[0])
for r in X:
if seq_match(r, np.array([1, 0, 5])):
pass
if __name__ == "__main__":
# print(seq_to_array("sequences-1-test.txt"))
r = np.array([5, 7, 3, 4, 6, 3, 2, 5, 2, 3, 1, 2, 3, 4])
s = np.array([0, 2, 3])
|
import tensorflow as tf
class DCGAN2(object):
def __init__(self, name, data_shape,
noise_shape, discriminator, generator):
self.name = name
if self.name is not None:
with tf.variable_scope(self.name) as scope:
self._build_graph(data_shape, noise_shape, discriminator, generator)
else:
self._build_graph(data_shape, noise_shape, discriminator, generator)
self._init_loss()
self._init_trainable_vars()
self._init_update_ops()
def _build_graph(self, data_shape, noise_shape,
discriminator, generator):
self.noise = tf.placeholder(tf.float32, [None] + noise_shape)
self.gen_train_flag = tf.placeholder(tf.bool)
self.data = tf.placeholder(tf.float32, [None] + data_shape)
self.label = tf.placeholder(tf.float32, [None] + [1, 1, 1])
self.disc_train_flag = tf.placeholder(tf.bool)
self.gen = generator('gen', self.noise, self.gen_train_flag)
self.disc = discriminator('disc', self.data, self.label, self.disc_train_flag)
self.gen_raw_out = self.gen.outputs[0]
self.disc_raw_out = self.disc.outputs[0]
def _init_loss(self):
self.loss_gen = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
logits=self.disc.raw_out, labels=tf.ones_like(self.disc_raw_out)))
self.loss_disc = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
logits=self.disc_raw_out, labels=self.label))
def _init_trainable_vars(self):
if not self.name:
self.vars_disc = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, self.disc.name)
self.vars_gen = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, self.gen.name)
else:
self.vars_disc = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
self.name + '/' + self.disc.name)
self.vars_gen = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
self.name + '/' + self.gen.name)
def _init_update_ops(self):
if not self.name:
self.ops_disc = tf.get_collection(tf.GraphKeys.UPDATE_OPS, self.disc_real.name)
self.ops_gen = tf.get_collection(tf.GraphKeys.UPDATE_OPS, self.gen.name)
else:
self.ops_disc = tf.get_collection(tf.GraphKeys.UPDATE_OPS, self.name + '/' + self.disc_real.name)
self.ops_gen = tf.get_collection(tf.GraphKeys.UPDATE_OPS, self.name + '/' + self.gen.name)
self.ops_all = self.ops_disc + self.ops_gen
|
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from tensorflow.examples.tutorials.mnist import input_data
import numpy as np
class ProcesssData(object):
def __init__(self, config):
self.batch_size = config["model"]["batch_size"]
self.data = input_data.read_data_sets(config["global"]["train_data_file_path"], one_hot=True)
def __call__(self, batch_size):
batch_imgs, y = self.data.train.next_batch(batch_size)
size = 28
channel = 1
batch_imgs = np.reshape(batch_imgs, (self.batch_size, size, size, channel))
return batch_imgs, y
def data2fig(self, samples):
fig = plt.figure(figsize=(4, 4))
gs = gridspec.GridSpec(4, 4)
gs.update(wspace=0.05, hspace=0.05)
size = 32
for i, sample in enumerate(samples):
ax = plt.subplot(gs[i])
plt.axis('off')
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_aspect('equal')
plt.imshow(sample.reshape(size,size), cmap='Greys_r')
return fig
|
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import os
import shutil
import mock
import unittest
from ebcli.core import fileoperations
from ebcli.core.ebcore import EB
from ebcli.core.ebpcore import EBP
from ebcli.controllers.platform import initialize
from ebcli.lib import aws
from ebcli.objects.platform import PlatformVersion
class TestInitialize(unittest.TestCase):
platform = PlatformVersion(
'arn:aws:elasticbeanstalk:us-west-2::platform/PHP 7.1 running on 64bit Amazon Linux/2.6.5'
)
def setUp(self):
self.root_dir = os.getcwd()
if not os.path.exists('testDir'):
os.mkdir('testDir')
aws.set_region(None)
os.chdir('testDir')
def tearDown(self):
os.chdir(self.root_dir)
shutil.rmtree('testDir', ignore_errors=True)
class TestEBPlatform(TestInitialize):
def test_init__attempt_to_init_inside_application_workspace(self):
fileoperations.create_config_file(
'my-application',
'us-west-2',
'php',
)
app = EB(argv=['platform', 'init'])
app.setup()
with self.assertRaises(EnvironmentError) as context_manager:
app.run()
self.assertEqual(
'This directory is already initialized with an application workspace.',
str(context_manager.exception)
)
@mock.patch('ebcli.controllers.platform.initialize.platformops.set_workspace_to_latest')
@mock.patch('ebcli.controllers.platform.initialize.fileoperations.write_keyname')
@mock.patch('ebcli.controllers.platform.initialize.fileoperations.touch_config_folder')
@mock.patch('ebcli.controllers.platform.initialize.commonops.get_region_from_inputs')
@mock.patch('ebcli.controllers.platform.initialize.commonops.set_up_credentials')
@mock.patch('ebcli.controllers.platform.initialize.aws.set_region')
@mock.patch('ebcli.controllers.platform.initialize.initializeops.setup')
@mock.patch('ebcli.controllers.platform.initialize.get_platform_name_and_version')
def test_init__non_interactive_mode(
self,
get_platform_name_and_version_mock,
setup_mock,
set_region_mock,
set_up_credentials_mock,
get_region_from_inputs_mock,
touch_config_folder_mock,
write_keyname_mock,
set_workspace_to_latest_mock
):
get_region_from_inputs_mock.return_value = 'us-west-2'
get_platform_name_and_version_mock.return_value = ('my-custom-platform', None)
set_up_credentials_mock.return_value = 'us-west-2'
app = EB(argv=['platform', 'init', 'my-custom-platform'])
app.setup()
app.run()
set_region_mock.assert_has_calls([mock.call(None), mock.call('us-west-2')])
set_up_credentials_mock.assert_called_once_with(None, 'us-west-2', False)
setup_mock.assert_called_once_with(
'Custom Platform Builder',
'us-west-2',
None,
platform_name='my-custom-platform',
platform_version=None,
workspace_type='Platform'
)
touch_config_folder_mock.assert_called_once_with()
write_keyname_mock.assert_called_once_with(None)
set_workspace_to_latest_mock.assert_called_once_with()
@mock.patch('ebcli.controllers.platform.initialize.platformops.set_workspace_to_latest')
@mock.patch('ebcli.controllers.platform.initialize.fileoperations.write_keyname')
@mock.patch('ebcli.controllers.platform.initialize.fileoperations.touch_config_folder')
@mock.patch('ebcli.controllers.platform.initialize.commonops.get_region_from_inputs')
@mock.patch('ebcli.controllers.platform.initialize.commonops.set_up_credentials')
@mock.patch('ebcli.controllers.platform.initialize.aws.set_region')
@mock.patch('ebcli.controllers.platform.initialize.initializeops.setup')
@mock.patch('ebcli.controllers.platform.initialize.get_platform_name_and_version')
def test_init__non_interactive_mode__keyname_specified(
self,
get_platform_name_and_version_mock,
setup_mock,
set_region_mock,
set_up_credentials_mock,
get_region_from_inputs_mock,
touch_config_folder_mock,
write_keyname_mock,
set_workspace_to_latest_mock
):
get_region_from_inputs_mock.return_value = 'us-west-2'
get_platform_name_and_version_mock.return_value = ('my-custom-platform', '1.0.3')
set_up_credentials_mock.return_value = 'us-west-2'
app = EB(argv=['platform', 'init', 'my-custom-platform', '-k', 'keyname'])
app.setup()
app.run()
set_region_mock.assert_has_calls([mock.call(None), mock.call('us-west-2')])
set_up_credentials_mock.assert_called_once_with(None, 'us-west-2', False)
setup_mock.assert_called_once_with(
'Custom Platform Builder',
'us-west-2',
None,
platform_name='my-custom-platform',
platform_version='1.0.3',
workspace_type='Platform'
)
touch_config_folder_mock.assert_called_once_with()
write_keyname_mock.assert_called_once_with('keyname')
set_workspace_to_latest_mock.assert_not_called()
@mock.patch('ebcli.controllers.platform.initialize.platformops.set_workspace_to_latest')
@mock.patch('ebcli.controllers.platform.initialize.fileoperations.write_keyname')
@mock.patch('ebcli.controllers.platform.initialize.fileoperations.touch_config_folder')
@mock.patch('ebcli.controllers.platform.initialize.commonops.get_region')
@mock.patch('ebcli.controllers.platform.initialize.commonops.set_up_credentials')
@mock.patch('ebcli.controllers.platform.initialize.aws.set_region')
@mock.patch('ebcli.controllers.platform.initialize.initializeops.setup')
@mock.patch('ebcli.controllers.platform.initialize.get_platform_name_and_version')
@mock.patch('ebcli.controllers.platform.initialize.get_keyname')
def test_init__force_interactive_mode_by_not_specifying_the_platform(
self,
get_keyname_mock,
get_platform_name_and_version_mock,
setup_mock,
set_region_mock,
set_up_credentials_mock,
get_region_mock,
touch_config_folder_mock,
write_keyname_mock,
set_workspace_to_latest_mock
):
get_region_mock.return_value = 'us-west-2'
get_platform_name_and_version_mock.return_value = ('my-custom-platform', '1.0.3')
set_up_credentials_mock.return_value = 'us-west-2'
get_keyname_mock.return_value = 'keyname'
app = EB(argv=['platform', 'init'])
app.setup()
app.run()
set_region_mock.assert_has_calls([mock.call(None), mock.call('us-west-2')])
set_up_credentials_mock.assert_called_once_with(None, 'us-west-2', True)
setup_mock.assert_called_once_with(
'Custom Platform Builder',
'us-west-2',
None,
platform_name='my-custom-platform',
platform_version='1.0.3',
workspace_type='Platform'
)
get_region_mock.assert_called_once_with(None, True)
touch_config_folder_mock.assert_called_once_with()
write_keyname_mock.assert_called_once_with('keyname')
set_workspace_to_latest_mock.assert_not_called()
@mock.patch('ebcli.controllers.platform.initialize.platformops.set_workspace_to_latest')
@mock.patch('ebcli.controllers.platform.initialize.fileoperations.write_keyname')
@mock.patch('ebcli.controllers.platform.initialize.fileoperations.touch_config_folder')
@mock.patch('ebcli.controllers.platform.initialize.commonops.get_region')
@mock.patch('ebcli.controllers.platform.initialize.commonops.set_up_credentials')
@mock.patch('ebcli.controllers.platform.initialize.aws.set_region')
@mock.patch('ebcli.controllers.platform.initialize.initializeops.setup')
@mock.patch('ebcli.controllers.platform.initialize.get_platform_name_and_version')
@mock.patch('ebcli.controllers.platform.initialize.get_keyname')
def test_init__force_interactive_mode_by_passing_interactive_argument(
self,
get_keyname_mock,
get_platform_name_and_version_mock,
setup_mock,
set_region_mock,
set_up_credentials_mock,
get_region_mock,
touch_config_folder_mock,
write_keyname_mock,
set_workspace_to_latest_mock
):
get_region_mock.return_value = 'us-west-2'
get_platform_name_and_version_mock.return_value = ('my-custom-platform', '1.0.3')
set_up_credentials_mock.return_value = 'us-west-2'
get_keyname_mock.return_value = 'keyname'
app = EB(argv=['platform', 'init', 'my-custom-platform', '-i'])
app.setup()
app.run()
set_region_mock.assert_has_calls([mock.call(None), mock.call('us-west-2')])
set_up_credentials_mock.assert_called_once_with(None, 'us-west-2', True)
setup_mock.assert_called_once_with(
'Custom Platform Builder',
'us-west-2',
None,
platform_name='my-custom-platform',
platform_version='1.0.3',
workspace_type='Platform'
)
get_region_mock.assert_called_once_with(None, True)
touch_config_folder_mock.assert_called_once_with()
write_keyname_mock.assert_called_once_with('keyname')
set_workspace_to_latest_mock.assert_not_called()
@mock.patch('ebcli.controllers.platform.initialize.platformops.set_workspace_to_latest')
@mock.patch('ebcli.controllers.platform.initialize.fileoperations.write_keyname')
@mock.patch('ebcli.controllers.platform.initialize.fileoperations.touch_config_folder')
@mock.patch('ebcli.controllers.platform.initialize.commonops.get_region')
@mock.patch('ebcli.controllers.platform.initialize.commonops.set_up_credentials')
@mock.patch('ebcli.controllers.platform.initialize.aws.set_region')
@mock.patch('ebcli.controllers.platform.initialize.initializeops.setup')
@mock.patch('ebcli.controllers.platform.initialize.get_platform_name_and_version')
@mock.patch('ebcli.controllers.platform.initialize.get_keyname')
def test_init__force_interactive_mode_by_passing_interactive_argument_and_omitting_platform_argument(
self,
get_keyname_mock,
get_platform_name_and_version_mock,
setup_mock,
set_region_mock,
set_up_credentials_mock,
get_region_mock,
touch_config_folder_mock,
write_keyname_mock,
set_workspace_to_latest_mock
):
get_region_mock.return_value = 'us-west-2'
get_platform_name_and_version_mock.return_value = ('my-custom-platform', '1.0.3')
set_up_credentials_mock.return_value = 'us-west-2'
get_keyname_mock.return_value = 'keyname'
app = EB(argv=['platform', 'init', '-i'])
app.setup()
app.run()
set_region_mock.assert_has_calls([mock.call(None), mock.call('us-west-2')])
set_up_credentials_mock.assert_called_once_with(None, 'us-west-2', True)
setup_mock.assert_called_once_with(
'Custom Platform Builder',
'us-west-2',
None,
platform_name='my-custom-platform',
platform_version='1.0.3',
workspace_type='Platform'
)
get_region_mock.assert_called_once_with(None, True)
touch_config_folder_mock.assert_called_once_with()
write_keyname_mock.assert_called_once_with('keyname')
set_workspace_to_latest_mock.assert_not_called()
@mock.patch('ebcli.controllers.platform.initialize.platformops.set_workspace_to_latest')
@mock.patch('ebcli.controllers.platform.initialize.fileoperations.write_keyname')
@mock.patch('ebcli.controllers.platform.initialize.fileoperations.touch_config_folder')
@mock.patch('ebcli.controllers.platform.initialize.commonops.get_region')
@mock.patch('ebcli.controllers.platform.initialize.commonops.set_up_credentials')
@mock.patch('ebcli.controllers.platform.initialize.aws.set_region')
@mock.patch('ebcli.controllers.platform.initialize.initializeops.setup')
@mock.patch('ebcli.controllers.platform.initialize.get_platform_name_and_version')
@mock.patch('ebcli.controllers.platform.initialize.get_keyname')
def test_init__interactive_mode__pass_keyname_in_interactive(
self,
get_keyname_mock,
get_platform_name_and_version_mock,
setup_mock,
set_region_mock,
set_up_credentials_mock,
get_region_mock,
touch_config_folder_mock,
write_keyname_mock,
set_workspace_to_latest_mock
):
get_region_mock.return_value = 'us-west-2'
get_platform_name_and_version_mock.return_value = ('my-custom-platform', '1.0.3')
set_up_credentials_mock.return_value = 'us-west-2'
app = EB(argv=['platform', 'init', '-k', 'keyname'])
app.setup()
app.run()
set_region_mock.assert_has_calls([mock.call(None), mock.call('us-west-2')])
set_up_credentials_mock.assert_called_once_with(None, 'us-west-2', True)
setup_mock.assert_called_once_with(
'Custom Platform Builder',
'us-west-2',
None,
platform_name='my-custom-platform',
platform_version='1.0.3',
workspace_type='Platform'
)
get_region_mock.assert_called_once_with(None, True)
touch_config_folder_mock.assert_called_once_with()
write_keyname_mock.assert_called_once_with('keyname')
set_workspace_to_latest_mock.assert_not_called()
get_keyname_mock.assert_not_called()
class TestEBP(TestInitialize):
def test_init__attempt_to_init_inside_application_workspace(self):
fileoperations.create_config_file(
'my-application',
'us-west-2',
'php',
)
app = EB(argv=['platform', 'init'])
app.setup()
with self.assertRaises(EnvironmentError) as context_manager:
app.run()
self.assertEqual(
'This directory is already initialized with an application workspace.',
str(context_manager.exception)
)
@mock.patch('ebcli.controllers.platform.initialize.platformops.set_workspace_to_latest')
@mock.patch('ebcli.controllers.platform.initialize.fileoperations.write_keyname')
@mock.patch('ebcli.controllers.platform.initialize.fileoperations.touch_config_folder')
@mock.patch('ebcli.controllers.platform.initialize.commonops.get_region_from_inputs')
@mock.patch('ebcli.controllers.platform.initialize.commonops.set_up_credentials')
@mock.patch('ebcli.controllers.platform.initialize.aws.set_region')
@mock.patch('ebcli.controllers.platform.initialize.initializeops.setup')
@mock.patch('ebcli.controllers.platform.initialize.get_platform_name_and_version')
def test_init__non_interactive_mode(
self,
get_platform_name_and_version_mock,
setup_mock,
set_region_mock,
set_up_credentials_mock,
get_region_from_inputs_mock,
touch_config_folder_mock,
write_keyname_mock,
set_workspace_to_latest_mock
):
get_region_from_inputs_mock.return_value = 'us-west-2'
get_platform_name_and_version_mock.return_value = ('my-custom-platform', None)
set_up_credentials_mock.return_value = 'us-west-2'
app = EBP(argv=['init', 'my-custom-platform'])
app.setup()
app.run()
set_region_mock.assert_has_calls([mock.call(None), mock.call('us-west-2')])
set_up_credentials_mock.assert_called_once_with(None, 'us-west-2', False)
setup_mock.assert_called_once_with(
'Custom Platform Builder',
'us-west-2',
None,
platform_name='my-custom-platform',
platform_version=None,
workspace_type='Platform'
)
touch_config_folder_mock.assert_called_once_with()
write_keyname_mock.assert_called_once_with(None)
set_workspace_to_latest_mock.assert_called_once_with()
@mock.patch('ebcli.controllers.platform.initialize.platformops.set_workspace_to_latest')
@mock.patch('ebcli.controllers.platform.initialize.fileoperations.write_keyname')
@mock.patch('ebcli.controllers.platform.initialize.fileoperations.touch_config_folder')
@mock.patch('ebcli.controllers.platform.initialize.commonops.get_region_from_inputs')
@mock.patch('ebcli.controllers.platform.initialize.commonops.set_up_credentials')
@mock.patch('ebcli.controllers.platform.initialize.aws.set_region')
@mock.patch('ebcli.controllers.platform.initialize.initializeops.setup')
@mock.patch('ebcli.controllers.platform.initialize.get_platform_name_and_version')
def test_init__non_interactive_mode__keyname_specified(
self,
get_platform_name_and_version_mock,
setup_mock,
set_region_mock,
set_up_credentials_mock,
get_region_from_inputs_mock,
touch_config_folder_mock,
write_keyname_mock,
set_workspace_to_latest_mock
):
get_region_from_inputs_mock.return_value = 'us-west-2'
get_platform_name_and_version_mock.return_value = ('my-custom-platform', '1.0.3')
set_up_credentials_mock.return_value = 'us-west-2'
app = EBP(argv=['init', 'my-custom-platform', '-k', 'keyname'])
app.setup()
app.run()
set_region_mock.assert_has_calls([mock.call(None), mock.call('us-west-2')])
set_up_credentials_mock.assert_called_once_with(None, 'us-west-2', False)
setup_mock.assert_called_once_with(
'Custom Platform Builder',
'us-west-2',
None,
platform_name='my-custom-platform',
platform_version='1.0.3',
workspace_type='Platform'
)
touch_config_folder_mock.assert_called_once_with()
write_keyname_mock.assert_called_once_with('keyname')
set_workspace_to_latest_mock.assert_not_called()
@mock.patch('ebcli.controllers.platform.initialize.platformops.set_workspace_to_latest')
@mock.patch('ebcli.controllers.platform.initialize.fileoperations.write_keyname')
@mock.patch('ebcli.controllers.platform.initialize.fileoperations.touch_config_folder')
@mock.patch('ebcli.controllers.platform.initialize.commonops.get_region')
@mock.patch('ebcli.controllers.platform.initialize.commonops.set_up_credentials')
@mock.patch('ebcli.controllers.platform.initialize.aws.set_region')
@mock.patch('ebcli.controllers.platform.initialize.initializeops.setup')
@mock.patch('ebcli.controllers.platform.initialize.get_platform_name_and_version')
@mock.patch('ebcli.controllers.platform.initialize.get_keyname')
def test_init__force_interactive_mode_by_not_specifying_the_platform(
self,
get_keyname_mock,
get_platform_name_and_version_mock,
setup_mock,
set_region_mock,
set_up_credentials_mock,
get_region_mock,
touch_config_folder_mock,
write_keyname_mock,
set_workspace_to_latest_mock
):
get_region_mock.return_value = 'us-west-2'
get_platform_name_and_version_mock.return_value = ('my-custom-platform', '1.0.3')
set_up_credentials_mock.return_value = 'us-west-2'
get_keyname_mock.return_value = 'keyname'
app = EBP(argv=['init'])
app.setup()
app.run()
set_region_mock.assert_has_calls([mock.call(None), mock.call('us-west-2')])
set_up_credentials_mock.assert_called_once_with(None, 'us-west-2', True)
setup_mock.assert_called_once_with(
'Custom Platform Builder',
'us-west-2',
None,
platform_name='my-custom-platform',
platform_version='1.0.3',
workspace_type='Platform'
)
get_region_mock.assert_called_once_with(None, True)
touch_config_folder_mock.assert_called_once_with()
write_keyname_mock.assert_called_once_with('keyname')
set_workspace_to_latest_mock.assert_not_called()
@mock.patch('ebcli.controllers.platform.initialize.platformops.set_workspace_to_latest')
@mock.patch('ebcli.controllers.platform.initialize.fileoperations.write_keyname')
@mock.patch('ebcli.controllers.platform.initialize.fileoperations.touch_config_folder')
@mock.patch('ebcli.controllers.platform.initialize.commonops.get_region')
@mock.patch('ebcli.controllers.platform.initialize.commonops.set_up_credentials')
@mock.patch('ebcli.controllers.platform.initialize.aws.set_region')
@mock.patch('ebcli.controllers.platform.initialize.initializeops.setup')
@mock.patch('ebcli.controllers.platform.initialize.get_platform_name_and_version')
@mock.patch('ebcli.controllers.platform.initialize.get_keyname')
def test_init__force_interactive_mode_by_passing_interactive_argument(
self,
get_keyname_mock,
get_platform_name_and_version_mock,
setup_mock,
set_region_mock,
set_up_credentials_mock,
get_region_mock,
touch_config_folder_mock,
write_keyname_mock,
set_workspace_to_latest_mock
):
get_region_mock.return_value = 'us-west-2'
get_platform_name_and_version_mock.return_value = ('my-custom-platform', '1.0.3')
set_up_credentials_mock.return_value = 'us-west-2'
get_keyname_mock.return_value = 'keyname'
app = EBP(argv=['init', 'my-custom-platform', '-i'])
app.setup()
app.run()
set_region_mock.assert_has_calls([mock.call(None), mock.call('us-west-2')])
set_up_credentials_mock.assert_called_once_with(None, 'us-west-2', True)
setup_mock.assert_called_once_with(
'Custom Platform Builder',
'us-west-2',
None,
platform_name='my-custom-platform',
platform_version='1.0.3',
workspace_type='Platform'
)
get_region_mock.assert_called_once_with(None, True)
touch_config_folder_mock.assert_called_once_with()
write_keyname_mock.assert_called_once_with('keyname')
set_workspace_to_latest_mock.assert_not_called()
@mock.patch('ebcli.controllers.platform.initialize.platformops.set_workspace_to_latest')
@mock.patch('ebcli.controllers.platform.initialize.fileoperations.write_keyname')
@mock.patch('ebcli.controllers.platform.initialize.fileoperations.touch_config_folder')
@mock.patch('ebcli.controllers.platform.initialize.commonops.get_region')
@mock.patch('ebcli.controllers.platform.initialize.commonops.set_up_credentials')
@mock.patch('ebcli.controllers.platform.initialize.aws.set_region')
@mock.patch('ebcli.controllers.platform.initialize.initializeops.setup')
@mock.patch('ebcli.controllers.platform.initialize.get_platform_name_and_version')
@mock.patch('ebcli.controllers.platform.initialize.get_keyname')
def test_init__force_interactive_mode_by_passing_interactive_argument_and_omitting_platform_argument(
self,
get_keyname_mock,
get_platform_name_and_version_mock,
setup_mock,
set_region_mock,
set_up_credentials_mock,
get_region_mock,
touch_config_folder_mock,
write_keyname_mock,
set_workspace_to_latest_mock
):
get_region_mock.return_value = 'us-west-2'
get_platform_name_and_version_mock.return_value = ('my-custom-platform', '1.0.3')
set_up_credentials_mock.return_value = 'us-west-2'
get_keyname_mock.return_value = 'keyname'
app = EBP(argv=['init', '-i'])
app.setup()
app.run()
set_region_mock.assert_has_calls([mock.call(None), mock.call('us-west-2')])
set_up_credentials_mock.assert_called_once_with(None, 'us-west-2', True)
setup_mock.assert_called_once_with(
'Custom Platform Builder',
'us-west-2',
None,
platform_name='my-custom-platform',
platform_version='1.0.3',
workspace_type='Platform'
)
get_region_mock.assert_called_once_with(None, True)
touch_config_folder_mock.assert_called_once_with()
write_keyname_mock.assert_called_once_with('keyname')
set_workspace_to_latest_mock.assert_not_called()
@mock.patch('ebcli.controllers.platform.initialize.platformops.set_workspace_to_latest')
@mock.patch('ebcli.controllers.platform.initialize.fileoperations.write_keyname')
@mock.patch('ebcli.controllers.platform.initialize.fileoperations.touch_config_folder')
@mock.patch('ebcli.controllers.platform.initialize.commonops.get_region')
@mock.patch('ebcli.controllers.platform.initialize.commonops.set_up_credentials')
@mock.patch('ebcli.controllers.platform.initialize.aws.set_region')
@mock.patch('ebcli.controllers.platform.initialize.initializeops.setup')
@mock.patch('ebcli.controllers.platform.initialize.get_platform_name_and_version')
@mock.patch('ebcli.controllers.platform.initialize.get_keyname')
def test_init__interactive_mode__pass_keyname_in_interactive(
self,
get_keyname_mock,
get_platform_name_and_version_mock,
setup_mock,
set_region_mock,
set_up_credentials_mock,
get_region_mock,
touch_config_folder_mock,
write_keyname_mock,
set_workspace_to_latest_mock
):
get_region_mock.return_value = 'us-west-2'
get_platform_name_and_version_mock.return_value = ('my-custom-platform', '1.0.3')
set_up_credentials_mock.return_value = 'us-west-2'
app = EBP(argv=['init', '-k', 'keyname'])
app.setup()
app.run()
set_region_mock.assert_has_calls([mock.call(None), mock.call('us-west-2')])
set_up_credentials_mock.assert_called_once_with(None, 'us-west-2', True)
setup_mock.assert_called_once_with(
'Custom Platform Builder',
'us-west-2',
None,
platform_name='my-custom-platform',
platform_version='1.0.3',
workspace_type='Platform'
)
get_region_mock.assert_called_once_with(None, True)
touch_config_folder_mock.assert_called_once_with()
write_keyname_mock.assert_called_once_with('keyname')
set_workspace_to_latest_mock.assert_not_called()
get_keyname_mock.assert_not_called()
class TestGenericPlatformInitController(unittest.TestCase):
@mock.patch('ebcli.controllers.platform.initialize.commonops.get_default_keyname')
@mock.patch('ebcli.controllers.platform.initialize.sshops.prompt_for_ec2_keyname')
def test_get_keyname__found_default_keyname(
self,
prompt_for_ec2_keyname_mock,
get_default_keyname_mock
):
get_default_keyname_mock.return_value = 'keyname'
self.assertEqual(
'keyname',
initialize.get_keyname()
)
prompt_for_ec2_keyname_mock.assert_not_called()
@mock.patch('ebcli.controllers.platform.initialize.commonops.get_default_keyname')
@mock.patch('ebcli.controllers.platform.initialize.sshops.prompt_for_ec2_keyname')
def test_get_keyname__could_not_find_default_keyname(
self,
prompt_for_ec2_keyname_mock,
get_default_keyname_mock
):
get_default_keyname_mock.return_value = None
prompt_for_ec2_keyname_mock.return_value = 'keyname'
self.assertEqual(
'keyname',
initialize.get_keyname()
)
prompt_for_ec2_keyname_mock.assert_called_once_with(
message='Would you like to be able to log into your platform packer environment?'
)
@mock.patch('ebcli.controllers.platform.initialize.platformops.get_platform_name_and_version_interactive')
@mock.patch('ebcli.controllers.platform.initialize.fileoperations.get_platform_name')
@mock.patch('ebcli.controllers.platform.initialize.fileoperations.get_platform_version')
def test_get_platform_name_and_version__platform_name_specified__non_interactive_flow(
self,
get_platform_version_mock,
get_platform_name_mock,
get_platform_name_and_version_interactive_mock
):
self.assertEqual(
('my-custom-platform', None),
initialize.get_platform_name_and_version('my-custom-platform')
)
get_platform_version_mock.assert_not_called()
get_platform_name_mock.assert_not_called()
get_platform_name_and_version_interactive_mock.assert_not_called()
@mock.patch('ebcli.controllers.platform.initialize.platformops.get_platform_name_and_version_interactive')
@mock.patch('ebcli.controllers.platform.initialize.fileoperations.get_platform_name')
@mock.patch('ebcli.controllers.platform.initialize.fileoperations.get_platform_version')
def test_get_platform_name_and_version__platform_name_not_specified__force_interactive_flow__default_platform_found(
self,
get_platform_version_mock,
get_platform_name_mock,
get_platform_name_and_version_interactive_mock
):
get_platform_name_mock.return_value = 'my-custom-platform'
get_platform_version_mock.return_value = '1.0.3'
self.assertEqual(
('my-custom-platform', '1.0.3'),
initialize.get_platform_name_and_version(None)
)
get_platform_name_mock.assert_called_once_with(default=None)
get_platform_version_mock.assert_called_once_with()
get_platform_name_and_version_interactive_mock.assert_not_called()
@mock.patch('ebcli.controllers.platform.initialize.platformops.get_platform_name_and_version_interactive')
@mock.patch('ebcli.controllers.platform.initialize.fileoperations.get_platform_name')
@mock.patch('ebcli.controllers.platform.initialize.fileoperations.get_platform_version')
def test_get_platform_name_and_version__platform_name_not_specified__default_platform_not_found__customer_prompted_for_paltform_name(
self,
get_platform_version_mock,
get_platform_name_mock,
get_platform_name_and_version_interactive_mock
):
get_platform_name_mock.side_effect = initialize.NotInitializedError
get_platform_name_and_version_interactive_mock.return_value = ('my-custom-platform', '1.0.3')
self.assertEqual(
('my-custom-platform', '1.0.3'),
initialize.get_platform_name_and_version(None)
)
get_platform_name_mock.assert_called_once_with(default=None)
get_platform_version_mock.assert_not_called()
get_platform_name_and_version_interactive_mock.assert_called_once_with()
|
from pprint import pprint
import logging
from .shared import get_model
from hubit import clear_hubit_cache
# logging.basicConfig(level=logging.INFO)
def model_0():
"""Send entire car object to worker"""
print(f"\n***MODEL 0***")
hmodel = get_model("model0.yml")
query = ["cars[0].price", "cars[1].price", "cars[2].price"]
response = hmodel.get(query, use_multi_processing=False)
print(response)
print(hmodel.get_results().as_dict())
def model_1():
"""Run model 1 and illustrate worker-level caching
input.yml shows that cars[0] and cars[2] are identical so the effect
of worker caching on the execution time can be seen. The execution time
with worker caching is expected to be ~2/3 of the execution time without
worker chaching (the calculation for one car can be reused).
"""
print(f"\n***MODEL 1***")
hmodel = get_model("model1.yml")
query = ["cars[0].price", "cars[1].price", "cars[2].price"]
# With worker caching
hmodel.set_component_caching(True)
response = hmodel.get(query, use_multi_processing=False)
# Without worker caching
hmodel.set_component_caching(False)
response = hmodel.get(query, use_multi_processing=False)
results = hmodel.get_results()
print("results", results)
results_dict = results.as_dict()
print("results_dict", results_dict)
results_inflated = results.inflate()
print("results_inflated", results_inflated)
print(response)
elapsed_times = hmodel.log().get_all("elapsed_time")
print(f"Time WITH worker caching: {elapsed_times[1]:.1f} s. ")
print(f"Time WITHOUT worker caching: {elapsed_times[0]:.1f} s. ")
print(hmodel.log())
def model_2():
"""Run model 2 and illustrate model-level caching"""
print(f"\n***MODEL 2***")
model_caching_mode = "after_execution"
# model_caching_mode = "incremental"
# model_caching_mode = "never"
clear_hubit_cache()
hmodel = get_model("model2.yml")
hmodel.set_model_caching(model_caching_mode)
query = [
"cars[:].parts[:].price", # price for all components for all cars
"cars[:].price", # price for all cars
]
response = hmodel.get(query, use_results="cached")
response = hmodel.get(query, use_results="cached")
pprint(response)
elapsed_times = hmodel.log().get_all("elapsed_time")
print(f"\nTime WITHOUT cached results on model: {elapsed_times[1]:.1f} s.")
print(f"Time WITH cached results on model: {elapsed_times[0]:.1f} s.")
print(hmodel.log())
def model_3():
"""Run model 3"""
print(f"\n***MODEL 3***")
hmodel = get_model("model3.yml")
query = ["cars[:].price"] # price for all cars
response = hmodel.get(query)
print(f"{response}")
print(hmodel.log())
def model_2_component_cache():
"""Run model 2 and illustrate model-level caching"""
print(f"\n***MODEL 2***")
use_multi_processing = False
hmodel = get_model("model2.yml")
query = [
"cars[:].parts[:].price", # price for all components for all cars
"cars[:].price", # price for all cars
]
component_caching_levels = False, True
for component_caching in component_caching_levels:
hmodel.set_component_caching(component_caching)
hmodel.get(query, use_multi_processing=use_multi_processing)
elapsed_times = reversed(hmodel.log().get_all("elapsed_time"))
for elapsed_time, component_caching in zip(elapsed_times, component_caching_levels):
print(f"Component caching is {component_caching}: {elapsed_time:.1f} s.")
print(hmodel.log())
model_0()
model_1()
model_2()
model_3()
model_2_component_cache()
|
import tensorrt as trt
import torch
from torch2trt_dynamic.module_test import add_module_test
from torch2trt_dynamic.plugins import create_adaptivepool_plugin
from torch2trt_dynamic.torch2trt_dynamic import (get_arg, tensorrt_converter,
trt_)
@tensorrt_converter('torch.nn.functional.adaptive_max_pool2d')
def convert_adaptive_max_pool2d(ctx):
input = ctx.method_args[0]
output_size = get_arg(ctx, 'output_size', pos=1, default=0)
output = ctx.method_return
input_trt = trt_(ctx.network, input)
if isinstance(output_size, int):
output_size = (output_size, output_size)
output_size = tuple([-1 if not o else o for o in output_size])
if output_size[0] == 1 and output_size[1] == 1:
# use reduce as max pool2d
shape_length = len(input.shape)
axes = (1 << (shape_length - 1)) + (1 << (shape_length - 2))
keepdim = True
layer = ctx.network.add_reduce(input_trt, trt.ReduceOperation.MAX,
axes, keepdim)
output._trt = layer.get_output(0)
else:
plugin = create_adaptivepool_plugin(
'adaptive_max_pool2d_' + str(id(input)),
output_size=output_size,
pooling_type=trt.PoolingType.MAX)
layer = ctx.network.add_plugin_v2(inputs=[input_trt], plugin=plugin)
output._trt = layer.get_output(0)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 224, 224)])
def test_adaptive_max_pool2d_1x1():
return torch.nn.AdaptiveMaxPool2d((1, 1))
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 224, 224)])
def test_adaptive_max_pool2d_2x2():
return torch.nn.AdaptiveMaxPool2d((2, 2))
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 224, 224)])
def test_adaptive_max_pool2d_3x3():
return torch.nn.AdaptiveMaxPool2d((3, 3))
|
#!/usr/bin/env python3
# coding:utf-8
profit = int(input("Please enter current month profit: "))
bonus = 0
gear1 = 100000 * 0.1 # 第 1 档,10w 以内最高奖金
gear2 = gear1 + ( 200000-100000) * 0.075 # 第 2 档,20w 以内最高奖金
gear3 = gear2 + ( 400000-200000) * 0.05 # 第 3 档,40w 以内最高奖金
gear4 = gear3 + ( 600000-400000) * 0.03 # 第 4 档,60w 以内最高奖金
gear5 = gear4 + (1000000-600000) * 0.015 # 第 5 档,100w 以内最高奖金
if profit > 1000000:
bonus += gear5 + (profit-1000000) * 0.01
elif profit > 600000:
bonus += gear4 + (profit- 600000) * 0.015
elif profit > 400000:
bonus += gear3 + (profit- 400000) * 0.03
elif profit > 200000:
bonus += gear2 + (profit- 200000) * 0.05
elif profit > 100000:
bonus += gear1 + (profit- 100000) * 0.075
elif profit > 0:
bonus += (profit-100000) * 0.1
else:
print("Input error, please input again.")
print("\nbonus = {}".format(bonus))
|
# Python - 3.6.0
Test.it('Basic tests')
Test.assert_equals(solve([3, 4, 4, 3, 6, 3]), [4, 6, 3])
Test.assert_equals(solve([1, 2, 1, 2, 1, 2, 3]), [1, 2, 3])
Test.assert_equals(solve([1, 2, 3, 4]), [1, 2, 3, 4])
Test.assert_equals(solve([1, 1, 4, 5, 1, 2, 1]), [4, 5, 2, 1])
|
from django.apps import AppConfig
class TractebelConfig(AppConfig):
name = 'tractebel'
|
#!/usr/bin/env python3
# Copyright 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""Implementation of the RNN based DrQA reader."""
import ipdb
import torch
import torch.nn as nn
from torch.nn import functional as F
from . import layers
#import layers
# ------------------------------------------------------------------------------
# Network
# ------------------------------------------------------------------------------
class RnnDocReader(nn.Module):
RNN_TYPES = {'lstm': nn.LSTM, 'gru': nn.GRU, 'rnn': nn.RNN}
def __init__(self, args, normalize=True):
super(RnnDocReader, self).__init__()
# Store config
self.args = args
self.char_embedding = nn.Embedding(args.char_vocab_size,
50,
padding_idx=0)
self.pos_embedding = nn.Embedding(args.pos_vocab_size, 50, padding_idx=0)
self.ner_embedding = nn.Embedding(args.ner_vocab_size, 20, padding_idx=0)
args.char_emb = 50
self.emb_rnn = nn.GRU(args.char_emb,
args.char_emb,
batch_first=True,
bidirectional=True)
# Word embeddings (+1 for padding)
self.embedding = nn.Embedding(args.vocab_size,
args.embedding_dim,
padding_idx=0)
self.embedding.weight.requires_grad=False
self.emb_attn = layers.SeqAttnMatch(args.embedding_dim)
# Input size to RNN: word emb + question emb + manual features
# RNN document encoder
pdim = args.embedding_dim * 2 + 50 * 2 + 50 + 20
qdim = args.embedding_dim + 50 * 2 + 50 + 20
#self.emb_hw = layers.Highway( 2, dim, gate_bias=-2)
self.enc_rnn_p = layers.StackedBRNN(
input_size=pdim,
hidden_size=128,
num_layers=2,
dropout_rate=0,
dropout_output=False,
concat_layers=True,
rnn_type=nn.GRU,
padding=True,
)
self.enc_rnn_q = layers.StackedBRNN(
input_size=qdim,
hidden_size=128,
num_layers=2,
dropout_rate=0,
dropout_output=False,
concat_layers=True,
rnn_type=nn.GRU,
padding=True,
)
self.enc_rnn_qu = layers.StackedBRNN(
input_size=512,
hidden_size=128,
num_layers=1,
dropout_rate=0.2,
dropout_output=True,
concat_layers=False,
rnn_type=nn.GRU,
padding=True,
)
full_size = 300 + 100 + 256 + 256
hidden_size = 256 * 3
num_level = 3
self.full_attn = layers.FullAttention(full_size, hidden_size, num_level)
self.enc_rnn_fusion = layers.StackedBRNN(
input_size=256 * 5,
hidden_size=128,
num_layers=1,
dropout_rate=0.2,
dropout_output=True,
concat_layers=False,
rnn_type=nn.GRU,
padding=True,
)
full_size = 300 + 100 + 256 * 2 + 256 * 3 + 256
hidden_size = 256
num_level = 1
self.self_full_attn = layers.FullAttention(full_size, hidden_size, num_level)
self.enc_rnn_final = layers.StackedBRNN(
input_size=256 * 2,
hidden_size=128,
num_layers=1,
dropout_rate=0.2,
dropout_output=True,
concat_layers=False,
rnn_type=nn.GRU,
padding=True,
)
self.q_agg = layers.LinearSeqAttn(256)
self.start_attn = layers.BilinearSeqAttn(256)
self.end_attn = layers.BilinearSeqAttn(256)
def forward( self, x1, x1_pos, x1_ner, x1_c, x1_mask, x2, x2_pos, x2_ner, x2_c, x2_mask, x1_f ):
"""
Args:
x1 = document word indices [batch * len_d]
x1_p = document pos indices [batch * len_d]
x1_e = document ner indices [batch * len_d]
x1_c = document char indices [batch * len_d * word_len]
x1_f = document features [batch * len_d * num_features]
x1_mask = document padding mask [batch * len_d]
x2 = question word indices [batch * len_q]
x2_p = question pos indices [batch * len_q]
x2_e = question ner indices [batch * len_q]
x2_c = question char indices [batch * len_q * word_len]
x2_mask = question padding mask [batch * len_q]
"""
# Embed char of doc and question
b, sl, wl = x1_c.size()
x1_c_emb = self.char_embedding(x1_c.view(b * sl, wl))
_ , x1_c_emb = self.emb_rnn(x1_c_emb)
x1_c_emb = torch.cat(list(x1_c_emb), dim=1).view(b, sl, -1)
b, sl, wl = x2_c.size()
x2_c_emb = self.char_embedding(x2_c.view(b*sl, wl))
_, x2_c_emb = self.emb_rnn(x2_c_emb)
x2_c_emb = torch.cat(list(x2_c_emb), dim=1).view(b, sl, -1)
# Embed both document and question
x1_emb = self.embedding(x1)
x2_emb = self.embedding(x2)
# Embed pos and ner
x1_pos_emb = self.pos_embedding(x1_pos)
x2_pos_emb = self.pos_embedding(x2_pos)
x1_ner_emb = self.ner_embedding(x1_ner)
x2_ner_emb = self.ner_embedding(x2_ner)
x1_attn_emb = self.emb_attn(x1_emb, x2_emb, x2_mask)
# concatenate the embedding from char and word
x1 = torch.cat([x1_c_emb, x1_emb, x1_pos_emb, x1_ner_emb, x1_attn_emb], dim=2)
x2 = torch.cat([x2_c_emb, x2_emb, x2_pos_emb, x2_ner_emb], dim=2)
# Drop word
#x1 = F.dropout2d(x1.unsqueeze(3), p=0.1, training=self.training).squeeze(3)
#x2 = F.dropout2d(x2.unsqueeze(3), p=0.1, training=self.training).squeeze(3)
# Drop some dimension in word dim
x1 = layers.dropout( x1, p=0.2, training=self.training)
x2 = layers.dropout( x2, p=0.2, training=self.training)
# preprocess
x1_h = self.enc_rnn_p(x1, x1_mask)
x2_h = self.enc_rnn_q(x2, x2_mask)
x1_h = layers.dropout( x1_h, p=0.2, training=self.training)
x2_h = layers.dropout( x2_h, p=0.2, training=self.training)
# preprocess
x2_u = self.enc_rnn_qu(x2_h, x2_mask)
x2_u = layers.dropout(x2_u, p=0.2, training=self.training)
# inter-attention
x1_HoW = torch.cat([x1_emb, x1_c_emb, x1_h], dim=2)
x2_HoW = torch.cat([x2_emb, x2_c_emb, x2_h], dim=2)
# dropout
x1_HoW = layers.dropout(x1_HoW, p=0.2, training=self.training)
x2_HoW = layers.dropout(x2_HoW, p=0.2, training=self.training)
x2_value = torch.cat([x2_h, x2_u], dim=2)
x1_attn = self.full_attn(x1_HoW, x2_HoW, x2_value, x2_mask)
# preprocess
x1_v = self.enc_rnn_fusion(torch.cat([x1_h,x1_attn], dim=2), x1_mask)
# intra-attention
X1_HoW_self = torch.cat([x1_emb, x1_c_emb, x1_h, x1_attn, x1_v], dim=2)
x1_attn_self = self.self_full_attn(X1_HoW_self, X1_HoW_self, x1_v, x1_mask)
x1_u = self.enc_rnn_final(torch.cat([x1_v, x1_attn_self], dim=2), x1_mask)
x2_vec = self.q_agg(x2_u,x2_mask)
start_scores = self.start_attn(x1_u, x2_vec, x1_mask)
end_scores = self.end_attn(x1_u, x2_vec, x1_mask)
return start_scores, end_scores
|
from turtle import *
def turtle_controller(do, val):
do = do.upper()
if do == 'F':
forward(val)
elif do == 'B':
backward(val)
elif do == 'R':
right(val)
elif do == 'L':
left(val)
elif do == 'U':
penup()
elif do == 'D':
pendown()
elif do == 'N':
reset()
else:
print('Command unrecognized:', do)
def string_artist(program):
cmd_list = program.split('-')
for command in cmd_list:
cmd_len = len(command)
if cmd_len == 0:
continue
cmd_type = command[0]
num = 0
if cmd_len > 1:
num_string = command[1:]
num = int(num_string)
print(command, ':', cmd_type, num)
turtle_controller(cmd_type, num)
instructions = '''Enter a program for the turtle:
e.g. N-F100-R45-F100-L45-D-F100-R90-B50
N = New drawing
U/D = Pen up/down
F100 = Forward 100
B50 = Backwards 50
R90 = Right turn 90 Deg
L45 = Left turn 90 Deg'''
screen = getscreen()
while True:
t_program = screen.textinput('Drawing Machine', instructions)
print(t_program)
if t_program == None or t_program.upper() == 'END':
quit()
string_artist(t_program)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
(c) 2017 Brant Faircloth || http://faircloth-lab.org/
All rights reserved.
This code is distributed under a 3-clause BSD license. Please see
LICENSE.txt for more information.
Created on 17 March 2017 15:15 CDT (-0500)
"""
import pdb
import sys
import gzip
import argparse
from Bio import SeqIO
from collections import Counter
def get_args():
parser = argparse.ArgumentParser(description='Parse fastqs from input based on tag sequence')
parser.add_argument('--input', nargs='?', default=sys.stdin)
parser.add_argument('--tag', required = True, dest = 'tag')
parser.add_argument('--output', nargs='?', default=sys.stdout)
return parser.parse_args()
def main():
args = get_args()
count = 0
with gzip.open(args.output, 'wb') as outfile:
with gzip.open(args.input, 'rb') as infile:
fastqs = SeqIO.parse(infile, 'fastq')
for read in fastqs:
read_tag = read.description.split(' ')[1].split(":")[-1]
if read_tag == args.tag:
outfile.write(read.format('fastq'))
count += 1
if count%1000000 == 0:
print count
#pdb.set_trace()
if __name__ == '__main__':
main()
|
from datetime import timedelta
import logging
from homeassistant.components.switch import SwitchEntity
from homeassistant.const import STATE_UNAVAILABLE
from .entity import PanasonicBaseEntity
from .const import (
DOMAIN,
DEVICE_TYPE_AC,
DATA_CLIENT,
DATA_COORDINATOR,
DEVICE_CLASS_SWITCH,
LABEL_NANOE,
LABEL_ECONAVI,
LABEL_BUZZER,
LABEL_TURBO,
LABEL_CLIMATE_DRYER,
LABEL_CLIMATE_SLEEP,
LABEL_CLIMATE_CLEAN,
ICON_NANOE,
ICON_ECONAVI,
ICON_BUZZER,
ICON_TURBO,
ICON_SLEEP,
ICON_DRYER,
ICON_CLEAN,
)
_LOGGER = logging.getLogger(__package__)
async def async_setup_entry(hass, entry, async_add_entities) -> bool:
client = hass.data[DOMAIN][entry.entry_id][DATA_CLIENT]
coordinator = hass.data[DOMAIN][entry.entry_id][DATA_COORDINATOR]
devices = coordinator.data
commands = client.get_commands()
switches = []
for index, device in enumerate(devices):
device_type = int(device.get("DeviceType"))
current_device_commands = [
command
for command in commands
if command["ModelType"] == device.get("ModelType")
][0]["JSON"][0]["list"]
command_types = list(
map(lambda c: c["CommandType"].lower(), current_device_commands)
)
if device_type == DEVICE_TYPE_AC:
if "0x08" in command_types:
switches.append(
PanasonicACNanoe(
coordinator,
index,
client,
device,
)
)
if "0x1b" in command_types:
switches.append(
PanasonicACEconavi(
coordinator,
index,
client,
device,
)
)
if "0x1e" in command_types:
switches.append(
PanasonicACBuzzer(
coordinator,
index,
client,
device,
)
)
if "0x1a" in command_types:
switches.append(
PanasonicACTurbo(
coordinator,
index,
client,
device,
)
)
if "0x05" in command_types:
switches.append(
PanasonicACSleepMode(
coordinator,
index,
client,
device,
)
)
if "0x17" in command_types:
switches.append(
PanasonicACDryer(
coordinator,
index,
client,
device,
)
)
if "0x18" in command_types:
switches.append(
PanasonicACSelfClean(
coordinator,
index,
client,
device,
)
)
async_add_entities(switches, True)
return True
class PanasonicACNanoe(PanasonicBaseEntity, SwitchEntity):
""" Panasonic AC nanoe switch """
@property
def available(self) -> bool:
status = self.coordinator.data[self.index]["status"]
_is_on_status = bool(int(status.get("0x00", 0)))
return _is_on_status
@property
def label(self):
return f"{self.nickname} {LABEL_NANOE}"
@property
def icon(self) -> str:
return ICON_NANOE
@property
def device_class(self) -> str:
return DEVICE_CLASS_SWITCH
@property
def is_on(self) -> int:
status = self.coordinator.data[self.index]["status"]
_nanoe_status = status.get("0x08")
if _nanoe_status == None:
return STATE_UNAVAILABLE
_is_on = bool(int(_nanoe_status))
_LOGGER.debug(f"[{self.label}] is_on: {_is_on}")
return _is_on
async def async_turn_on(self) -> None:
_LOGGER.debug(f"[{self.label}] Turning on nanoe")
await self.client.set_command(self.auth, 136, 1)
await self.coordinator.async_request_refresh()
async def async_turn_off(self) -> None:
_LOGGER.debug(f"[{self.label}] Turning off nanoe")
await self.client.set_command(self.auth, 136, 0)
await self.coordinator.async_request_refresh()
class PanasonicACEconavi(PanasonicBaseEntity, SwitchEntity):
""" Panasonic AC ECONAVI switch """
@property
def available(self) -> bool:
status = self.coordinator.data[self.index]["status"]
_is_on_status = bool(int(status.get("0x00", 0)))
return _is_on_status
@property
def label(self):
return f"{self.nickname} {LABEL_ECONAVI}"
@property
def icon(self) -> str:
return ICON_ECONAVI
@property
def device_class(self) -> str:
return DEVICE_CLASS_SWITCH
@property
def is_on(self) -> int:
status = self.coordinator.data[self.index]["status"]
_nanoe_status = status.get("0x1B")
if _nanoe_status == None:
return STATE_UNAVAILABLE
_is_on = bool(int(_nanoe_status))
_LOGGER.debug(f"[{self.label}] is_on: {_is_on}")
return _is_on
async def async_turn_on(self) -> None:
_LOGGER.debug(f"[{self.label}] Turning on ECONAVI")
await self.client.set_command(self.auth, 155, 1)
await self.coordinator.async_request_refresh()
async def async_turn_off(self) -> None:
_LOGGER.debug(f"[{self.label}] Turning off ECONAVI")
await self.client.set_command(self.auth, 155, 0)
await self.coordinator.async_request_refresh()
class PanasonicACBuzzer(PanasonicBaseEntity, SwitchEntity):
""" Panasonic AC buzzer switch """
@property
def available(self) -> bool:
status = self.coordinator.data[self.index]["status"]
_is_on_status = bool(int(status.get("0x00", 0)))
return _is_on_status
@property
def label(self):
return f"{self.nickname} {LABEL_BUZZER}"
@property
def icon(self) -> str:
return ICON_BUZZER
@property
def device_class(self) -> str:
return DEVICE_CLASS_SWITCH
@property
def is_on(self) -> int:
status = self.coordinator.data[self.index]["status"]
_buzzer_status = status.get("0x1E")
if _buzzer_status == None:
return STATE_UNAVAILABLE
_is_on = not bool(int(_buzzer_status))
_LOGGER.debug(f"[{self.label}] is_on: {_is_on}")
return _is_on
async def async_turn_on(self) -> None:
_LOGGER.debug(f"[{self.label}] Turning on buzzer")
await self.client.set_command(self.auth, 30, 0)
await self.coordinator.async_request_refresh()
async def async_turn_off(self) -> None:
_LOGGER.debug(f"[{self.label}] Turning off buzzer")
await self.client.set_command(self.auth, 30, 1)
await self.coordinator.async_request_refresh()
class PanasonicACTurbo(PanasonicBaseEntity, SwitchEntity):
""" Panasonic AC turbo switch """
@property
def available(self) -> bool:
status = self.coordinator.data[self.index]["status"]
_is_on_status = bool(int(status.get("0x00", 0)))
return _is_on_status
@property
def label(self):
return f"{self.nickname} {LABEL_TURBO}"
@property
def icon(self) -> str:
return ICON_TURBO
@property
def device_class(self) -> str:
return DEVICE_CLASS_SWITCH
@property
def is_on(self) -> int:
status = self.coordinator.data[self.index]["status"]
_turbo_status = status.get("0x1A", None)
if _turbo_status == None:
return STATE_UNAVAILABLE
_is_on = bool(int(_turbo_status))
_LOGGER.debug(f"[{self.label}] is_on: {_is_on}")
return _is_on
async def async_turn_on(self) -> None:
_LOGGER.debug(f"[{self.label}] Turning on turbo mode")
await self.client.set_command(self.auth, 154, 1)
await self.coordinator.async_request_refresh()
async def async_turn_off(self) -> None:
_LOGGER.debug(f"[{self.label}] Turning off turbo mode")
await self.client.set_command(self.auth, 154, 0)
await self.coordinator.async_request_refresh()
class PanasonicACSleepMode(PanasonicBaseEntity, SwitchEntity):
""" Panasonic AC sleep mode switch """
@property
def available(self) -> bool:
status = self.coordinator.data[self.index]["status"]
_is_on_status = bool(int(status.get("0x00", 0)))
return _is_on_status
@property
def label(self):
return f"{self.nickname} {LABEL_CLIMATE_SLEEP}"
@property
def icon(self) -> str:
return ICON_SLEEP
@property
def device_class(self) -> str:
return DEVICE_CLASS_SWITCH
@property
def is_on(self) -> int:
status = self.coordinator.data[self.index]["status"]
_sleep_mode_status = status.get("0x05")
if _sleep_mode_status == None:
return STATE_UNAVAILABLE
_is_on = bool(int(_sleep_mode_status))
_LOGGER.debug(f"[{self.label}] is_on: {_is_on}")
return _is_on
async def async_turn_on(self) -> None:
_LOGGER.debug(f"[{self.label}] Turning on sleep mode")
await self.client.set_command(self.auth, 5, 1)
await self.coordinator.async_request_refresh()
async def async_turn_off(self) -> None:
_LOGGER.debug(f"[{self.label}] Turning off sleep mode")
await self.client.set_command(self.auth, 5, 0)
await self.coordinator.async_request_refresh()
class PanasonicACDryer(PanasonicBaseEntity, SwitchEntity):
""" Panasonic AC dryer switch """
@property
def available(self) -> bool:
status = self.coordinator.data[self.index]["status"]
_is_on_status = bool(int(status.get("0x00", 0)))
return _is_on_status
@property
def label(self):
return f"{self.nickname} {LABEL_CLIMATE_DRYER}"
@property
def icon(self) -> str:
return ICON_DRYER
@property
def device_class(self) -> str:
return DEVICE_CLASS_SWITCH
@property
def is_on(self) -> int:
status = self.coordinator.data[self.index]["status"]
_dryer_status = status.get("0x17")
if _dryer_status == None:
return STATE_UNAVAILABLE
_is_on = bool(int(_dryer_status))
_LOGGER.debug(f"[{self.label}] is_on: {_is_on}")
return _is_on
async def async_turn_on(self) -> None:
_LOGGER.debug(f"[{self.label}] Turning on dryer")
await self.client.set_command(self.auth, 23, 1)
await self.coordinator.async_request_refresh()
async def async_turn_off(self) -> None:
_LOGGER.debug(f"[{self.label}] Turning off dryer")
await self.client.set_command(self.auth, 23, 0)
await self.coordinator.async_request_refresh()
class PanasonicACSelfClean(PanasonicBaseEntity, SwitchEntity):
""" Panasonic AC self clean switch """
@property
def available(self) -> bool:
status = self.coordinator.data[self.index]["status"]
_is_on_status = bool(int(status.get("0x00", 0)))
return _is_on_status
@property
def label(self):
return f"{self.nickname} {LABEL_CLIMATE_CLEAN}"
@property
def icon(self) -> str:
return ICON_CLEAN
@property
def device_class(self) -> str:
return DEVICE_CLASS_SWITCH
@property
def is_on(self) -> int:
status = self.coordinator.data[self.index]["status"]
_self_clean_status = status.get("0x18")
if _self_clean_status == None:
return STATE_UNAVAILABLE
_is_on = bool(int(_self_clean_status))
_LOGGER.debug(f"[{self.label}] is_on: {_is_on}")
return _is_on
async def async_turn_on(self) -> None:
_LOGGER.debug(f"[{self.label}] Turning on self clean")
await self.client.set_command(self.auth, 24, 1)
await self.coordinator.async_request_refresh()
async def async_turn_off(self) -> None:
_LOGGER.debug(f"[{self.label}] Turning off self clean")
await self.client.set_command(self.auth, 24, 0)
await self.coordinator.async_request_refresh()
|
'''
File name : rdf_FZUplace.py
Author : Jinwook Jung
Created on : Wed 14 Aug 2019 12:08:23 AM EDT
Last modified : 2020-03-30 22:54:26
Description :
'''
import subprocess, os, sys, random, yaml, time
from subprocess import Popen, PIPE, CalledProcessError
# FIXME
sys.path.insert(0, '../../../src/stage.py')
from stage import *
def run(config, stage_dir, prev_out_dir, user_parms, write_run_scripts):
print("-"*79)
print("Running FZUplace...")
print("-"*79)
print("Job directory: {}".format(stage_dir))
print("Previous stage outputs: {}".format(prev_out_dir))
fzu = FZUplaceRunner(config, stage_dir, prev_out_dir, user_parms)
fzu.write_run_scripts()
if not write_run_scripts:
fzu.run()
print("Done.")
print("")
class FZUplaceRunner(Stage):
def __init__(self, config, stage_dir, prev_out_dir, user_parms):
super().__init__(config, stage_dir, prev_out_dir, user_parms)
self.lef_mod = "{}/merged_padded_spacing.lef".format(self.lib_dir)
# FIXME
self.bookshelf = "bookshelf.aux"
def write_run_scripts(self):
cmds = list()
# FZUplace assumes bookshelf is stored at the directory under run directory.
cmd = "mkdir {}/bookshelf".format(self.stage_dir)
cmds.append(cmd)
# Write bookshelf
cmd = "cd {}/bookshelf && {}/src/util/bookshelf_writer".format(self.stage_dir, self.rdf_path)
cmd += " --lef {}".format(self.lef_mod)
cmd += " --def {}".format(self.in_def)
cmd += " --bookshelf bookshelf"
cmds.append(cmd)
# Run
cmd = "cd {} && {}/bin/global_place/FZUplace/FZUplace".format(self.stage_dir, self.rdf_path)
cmd += " -aux bookshelf/{}".format(self.bookshelf)
if "target_density" in self.user_parms.keys():
cmd += " -util {}".format(float(self.user_parms["target_density"]))
cmd += " -nolegal -nodetail"
cmds.append(cmd)
# Write DEF
cmd = "cd {} && {}/src/util/place_updater".format(self.stage_dir, self.rdf_path)
cmd += " --lef {}".format(self.lef_mod)
cmd += " --def {}".format(self.in_def)
cmd += " --pl {}/output/bookshelf/bookshelf.pl".format(self.stage_dir)
cmds.append(cmd)
# Copy
cmd = "ln -s {0}/out.def {0}/out/{1}.def" \
.format(self.stage_dir, self.design_name)
cmds.append(cmd)
# Copy previous verilog file
cmd = "ln -s {0}/{1}.v {2}/out/{1}.v" \
.format(self.prev_out_dir, self.design_name, self.stage_dir)
cmds.append(cmd)
self.create_run_script_template()
with open("{}/run.sh".format(self.stage_dir), 'a') as f:
[f.write("{}\n".format(_)) for _ in cmds]
def run(self):
print("Hello FZUplace...")
self._write_bookshelf()
self._run_fzu_place()
self._write_def()
self._copy_final_output()
def _write_bookshelf(self):
# FZUplace assumes bookshelf is stored at the directory under run directory.
os.mkdir("{}/bookshelf".format(self.stage_dir))
cmd = "cd {}/bookshelf && {}/src/util/lefdef_util/bookshelf_writer/bookshelf_writer".format(self.stage_dir, self.rdf_path)
cmd += " --lef {}".format(self.lef_mod)
cmd += " --def {}".format(self.in_def)
cmd += " --bookshelf bookshelf"
print(cmd)
with open("{}/out/{}.log".format(self.stage_dir, self.design_name), 'a') as f:
f.write("\n")
f.write("# Command: {}\n".format(cmd))
f.write("\n")
run_shell_cmd(cmd, f)
def _run_fzu_place(self):
cmd = "cd {} && {}/bin/global_place/FZUplace/FZUplace".format(self.stage_dir, self.rdf_path)
cmd += " -aux bookshelf/{}".format(self.bookshelf)
if "target_density" in self.user_parms.keys():
cmd += " -util {}".format(float(self.user_parms["target_density"]))
cmd += " -nolegal -nodetail"
print(cmd)
with open("{}/out/{}.log".format(self.stage_dir, self.design_name), 'a') as f:
f.write("\n")
f.write("# Command: {}\n".format(cmd))
f.write("\n")
run_shell_cmd(cmd, f)
def _write_def(self):
cmd = "cd {} && {}/src/util/lefdef_util/place_updater/place_updater".format(self.stage_dir, self.rdf_path)
cmd += " --lef {}".format(self.lef_mod)
cmd += " --def {}".format(self.in_def)
cmd += " --pl {}/output/bookshelf/bookshelf.pl".format(self.stage_dir)
print(cmd)
with open("{}/out/{}.log".format(self.stage_dir, self.design_name), 'a') as f:
f.write("\n")
f.write("# Command: {}\n".format(cmd))
f.write("\n")
run_shell_cmd(cmd, f)
def _copy_final_output(self):
cmd = "ln -s {0}/out.def {0}/out/{1}.def" \
.format(self.stage_dir, self.design_name)
run_shell_cmd(cmd)
# Copy previous verilog file
cmd = "ln -s {0}/{1}.v {2}/out/{1}.v" \
.format(self.prev_out_dir, self.design_name, self.stage_dir)
run_shell_cmd(cmd)
|
__doc__ = """
Main module for running BRISE configuration balancing."""
import itertools
import datetime
import socket
from sys import argv
from warnings import filterwarnings
filterwarnings("ignore") # disable warnings for demonstration.
from WSClient import WSClient
from model.model_selection import get_model
from repeater.repeater_selection import get_repeater
from tools.initial_config import initialize_config
from tools.features_tools import split_features_and_labels
from tools.write_results import write_results
from selection.selection_algorithms import get_selector
def run(io=None):
time_started = datetime.datetime.now()
# argv is a run parameters for main - using for configuration
global_config, task_config = initialize_config(argv)
# Generate whole search space for regression.
search_space = list(itertools.product(*task_config["DomainDescription"]["AllConfigurations"]))
if io:
# APPI_QUEUE.put({"global_config": global_config, "task": task_config})
temp = {"global_config": global_config, "task": task_config}
io.emit('main_config', temp)
# Creating instance of selector based on selection type and
# task data for further uniformly distributed data points generation.
selector = get_selector(selection_algorithm_config=task_config["SelectionAlgorithm"],
search_space=task_config["DomainDescription"]["AllConfigurations"])
# Instantiate client for Worker Service, establish connection.
WS = WSClient(task_config=task_config,
ws_addr=global_config["WorkerService"]["Address"],
logfile='%s%s_WSClient.csv' % (global_config['results_storage'], task_config["ExperimentsConfiguration"]["FileToRead"]))
# Creating runner for experiments that will repeat task running for avoiding fluctuations.
repeater = get_repeater("default", WS, task_config["ExperimentsConfiguration"])
print("Measuring default configuration that we will used in regression to evaluate solution... ")
default_result = repeater.measure_task([task_config["DomainDescription"]["DefaultConfiguration"]], io) #change it to switch inside and devide to
default_features, default_value = split_features_and_labels(default_result, task_config["ModelCreation"]["FeaturesLabelsStructure"])
print(default_value)
if io:
temp = {'conf': default_features, "result": default_value}
io.emit('default conf', temp)
# APPI_QUEUE.put({"default configuration": {'configuration': default_features, "result": default_value}})
print("Measuring initial number experiments, while it is no sense in trying to create model"
"\n(because there is no data)...")
initial_task = [selector.get_next_point() for x in range(task_config["SelectionAlgorithm"]["NumberOfInitialExperiments"])]
repeater = get_repeater(repeater_type=task_config["ExperimentsConfiguration"]["RepeaterDecisionFunction"],
WS=WS, experiments_configuration=task_config["ExperimentsConfiguration"])
results = repeater.measure_task(initial_task, io, default_point=default_result[0])
features, labels = split_features_and_labels(results, task_config["ModelCreation"]["FeaturesLabelsStructure"])
print("Results got. Building model..")
# The main effort does here.
# 1. Loading and building model.
# 2. If model built - validation of model.
# 3. If model is valid - prediction solution and verification it by measuring.
# 4. If solution is OK - reporting and terminating. If not - add it to all data set and go to 1.
# 5. Get new point from selection algorithm, measure it, check if termination needed and go to 1.
#
finish = False
while not finish:
model = get_model(model_creation_config=task_config["ModelCreation"],
log_file_name="%s%s%s_model.txt" % (global_config['results_storage'],
task_config["ExperimentsConfiguration"]["FileToRead"],
task_config["ModelCreation"]["ModelType"]),
features=features,
labels=labels)
model_built = model.build_model(score_min=task_config["ModelCreation"]["MinimumAccuracy"])
if model_built:
model_validated = model.validate_model(io=io, search_space=search_space)
if model_validated:
predicted_labels, predicted_features = model.predict_solution(io=io, search_space=search_space)
print("Predicted solution features:%s, labels:%s." %(str(predicted_features), str(predicted_labels)))
validated_labels, finish = model.validate_solution(io=io, task_config=task_config["ModelCreation"],
repeater=repeater,
default_value=default_value,
predicted_features=predicted_features)
features += [predicted_features]
labels += [validated_labels]
if finish:
optimal_result, optimal_config = model.get_result(repeater, features, labels, io=io)
write_results(global_config, task_config, time_started, features, labels,
repeater.performed_measurements, optimal_config, optimal_result, default_features, default_value)
return optimal_result, optimal_config
else:
continue
print("New data point needed to continue process of balancing. "
"Number of data points retrieved from the selection algorithm: %s" % str(selector.numOfGeneratedPoints))
print('='*120)
cur_task = [selector.get_next_point() for x in range(task_config["SelectionAlgorithm"]["Step"])]
results = repeater.measure_task(cur_task, io=io, default_point=default_result[0])
new_feature, new_label = split_features_and_labels(results, task_config["ModelCreation"]["FeaturesLabelsStructure"])
features += new_feature
labels += new_label
# If BRISE cannot finish his work properly - terminate it.
if len(features) > len(search_space):
print("Unable to finish normally, terminating with best of measured results.")
optimal_result, optimal_config = model.get_result(repeater, features, labels, io=io)
write_results(global_config, task_config, time_started, features, labels,
repeater.performed_measurements, optimal_config, optimal_result, default_features,
default_value)
return optimal_result, optimal_config
if __name__ == "__main__":
run()
|
from . tga import *
|
from django.urls import path, include
from rest_framework.routers import DefaultRouter
from profiles_api import views
router = DefaultRouter()
router.register('hello-viewset', views.HelloViewSet, base_name='hello-viewset')
router.register('profile', views.UserProfileViewSet)
router.register('feed', views.UserProfileFeedViewSet)
#(name of url, view, retrieving URLs in our router)
#Api Root page will only show routes registered via DefaultRouter().
#Only need to specify the base_name if the ViewSet doesn't have a queryset.
#Django takes queryset as basename otherwise.
urlpatterns = [
path('hello-view/', views.HelloApiView.as_view()),
path('login/', views.UserLoginApiView.as_view()),
path('', include(router.urls))
]
|
PAIRWISE_DRIVER_EXCEPTIONS = {
'verstappen': 'max_verstappen',
'magnussen': 'kevin_magnussen'
}
def initials_for_driver(name):
new_name = ''
if ((name) and (len(name) > 2)):
new_name = name[0:3].upper()
return new_name
def rename_drivers(name):
new_name = name.lower()
new_name = PAIRWISE_DRIVER_EXCEPTIONS.get(
new_name, new_name)
return new_name
|
from django.contrib.auth.models import AbstractUser
from social.invites.models import Invite
from . import constants
class User(AbstractUser):
"""A custom user for extension"""
@property
def remaining_connections(self):
"""Get the number of remaining available connections."""
return (
constants.MAX_USER_CONNECTIONS
- Invite.objects.filter(from_user=self)
.exclude(status=Invite.InviteStatus.ACCEPTED)
.count()
)
|
r"""The real valued VGG model. Adapted from
https://github.com/kuangliu/pytorch-cifar/blob/master/models/vgg.py
https://github.com/anokland/local-loss/blob/master/train.py
"""
import torch
# var-dropout
from cplxmodule.nn.relevance import LinearVD
from cplxmodule.nn.relevance import Conv2dVD
# automatic relevance determination
from cplxmodule.nn.relevance import LinearARD
from cplxmodule.nn.relevance import Conv2dARD
from cplxmodule.nn.masked import Conv2dMasked, LinearMasked
from ....musicnet.models.real.base import Flatten
cfg = {
# 'VGG6a' : [ 128, 'M', 256, 'M', 512, 'M', 512 ],
# 'VGG6b' : [ 128, 'M', 256, 'M', 512, 'M', 512, 'M' ],
'VGG8' : [ 64, 'M', 128, 'M', 256, 'M', 512, 'M', 512, 'M'],
# 'VGG8a' : [ 128, 256, 'M', 256, 512, 'M', 512, 'M', 512 ],
# 'VGG8b' : [ 128, 256, 'M', 256, 512, 'M', 512, 'M', 512, 'M' ],
# 'VGG11b' : [ 128, 128, 128, 256, 'M', 256, 512, 'M', 512, 512, 'M', 512, 'M' ],
'VGG11' : [ 64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'VGG13' : [ 64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'VGG16' : [ 64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'VGG19' : [ 64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}
class VGG(torch.nn.Module):
Linear = torch.nn.Linear
Conv2d = torch.nn.Conv2d
def __new__(cls, vgg_name='VGG16', n_outputs=10, n_channels=3, double=False):
layers = []
for x in cfg[vgg_name]:
if x == 'M':
layers.append(torch.nn.MaxPool2d(kernel_size=2, stride=2))
else:
x = (x * 2) if double else x
layers.extend([
cls.Conv2d(n_channels, x, kernel_size=3, padding=1),
torch.nn.BatchNorm2d(x),
torch.nn.ReLU(),
])
n_channels = x
return torch.nn.Sequential(
*layers,
Flatten(-3, -1),
cls.Linear(1024 if double else 512, n_outputs)
)
class VGGVD(VGG):
Linear = LinearVD
Conv2d = Conv2dVD
class VGGARD(VGG):
Linear = LinearARD
Conv2d = Conv2dARD
class VGGMasked(VGG):
Linear = LinearMasked
Conv2d = Conv2dMasked
|
import sqlite3
conn = sqlite3.connect('ej.db')
c = conn.cursor()
f = open("data.txt","w") ##crear archivo de texto para el regresor logístico
for row in c.execute('SELECT ancho, alto, clase FROM features'): # mostrar base de datos
print(row)
f.write(' '.join(str(s) for s in row) + '\n') #escribimos el archivo de texto con los datos de la bd
conn.close()
f.close()
|
import numpy as np
import os
import skimage.io as io
import skimage.transform as trans
from keras.models import *
from keras.layers import *
from keras.optimizers import *
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras import backend as keras
def unet(pretrained_weights = None, input_size = (256,256, 1)):
inputs = Input(input_size)
#keras.layers.Conv2D(filters, kernel_size, strides=(1, 1),
# padding='valid', data_format=None,
# dilation_rate=(1, 1), activation=None,
# use_bias=True, kernel_initializer='glorot_uniform',
# bias_initializer='zeros', kernel_regularizer=None,
# bias_regularizer=None, activity_regularizer=None,
# kernel_constraint=None, bias_constraint=None)
# Rangkaian Extraction 1
conv1 = Conv2D(64, 3 , activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(inputs)
pool1 = MaxPooling2D(pool_size = (2,2))(conv1)
# Rangkaian Extraction 2
conv2 = Conv2D(128, 3 , activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool1)
pool2 = MaxPooling2D(pool_size = (2,2))(conv2)
# Rangkaian Extraction 3
conv3 = Conv2D(256, 3 , activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool2)
pool3 = MaxPooling2D(pool_size = (2,2))(conv3)
# Rangkaian Extraction 4
conv4 = Conv2D(512, 3 , activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool3)
drop4 = Dropout(0.5)(conv4)
pool4 = MaxPooling2D(pool_size = (2,2))(drop4)
# Rangkaian Extraction 5
conv5 = Conv2D(1024, 3 , activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool4)
drop5 = Dropout(0.5)(conv5)
# Rangkaian Expansion 1
up6 = Conv2D(512, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(drop5))
merge6 = concatenate([drop4, up6], axis = 3)
conv6 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge6)
# Rangkaian Expansion 2
up7 = Conv2D(256, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv6))
merge7 = concatenate([conv3, up7], axis = 3)
conv7 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge7)
# Rangkaian Expansion 3
up8 = Conv2D(128, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv7))
merge8 = concatenate([conv2, up8], axis = 3)
conv8 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge8)
# Rangkaian Expansion 4
up9 = Conv2D(64, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv8))
merge9 = concatenate([conv1, up9], axis = 3)
conv9 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge9)
# Rangkaian Expansion 5
conv10 = Conv2D(1, 1, activation = 'sigmoid')(conv9)
# Membuat Model
model = Model(input = inputs, output = conv10)
model.compile(optimizer = Adam(lr = 1e-4), loss = 'binary_crossentropy', metrics = ['accuracy'])
model.summary()
if(pretrained_weights):
model.load_weights(pretrained_weights)
return model
|
import globals
from globals import get_soup, job_insert
from job import Job
# Upward Bound House
organization = "Upward Bound House"
url = 'https://upwardboundhouse.org/about-us/careers/'
organization_id= 60
def run(url):
soup = get_soup(url)
jobs_div = soup.find('h1', text='Careers').parent
job_class= Job(organization, "")
job_class.organization_id= organization_id
insert_count= 0
for job_listing in jobs_div.find_all('a'):
job_class.title = job_listing.text
job_class.info_link = job_listing['href']
insert_count+= job_insert(job_class)
return insert_count
|
# Copyright (C) 2020 Red Hat, Inc.
# SPDX-License-Identifier: MIT
#
# pylint: disable=invalid-name,missing-function-docstring
"""double-quote-strings-if-needed test cases.
"""
from . import common as C
class TestCase(C.YamlLintTestCase):
"""
double-quote-strings-if-needed test cases.
"""
tcase = "double-quote-strings-if-needed"
# vim:sw=4:ts=4:et:
|
import sys
import os
from Algorithmia.errors import AlgorithmException
import Algorithmia
# look in ../ BEFORE trying to import Algorithmia. If you append to the
# you will load the version installed on the computer.
sys.path = ['../'] + sys.path
import unittest
if sys.version_info.major >= 3:
class AlgoDummyTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.client = Algorithmia.client(api_address="http://localhost:8080", api_key="simabcd123")
def test_call_customCert(self):
result = self.client.algo('util/echo').pipe(bytearray('foo', 'utf-8'))
self.assertEquals('binary', result.metadata.content_type)
self.assertEquals(bytearray('foo', 'utf-8'), result.result)
def test_normal_call(self):
result = self.client.algo('util/echo').pipe("foo")
self.assertEquals("text", result.metadata.content_type)
self.assertEquals("foo", result.result)
def test_dict_call(self):
result = self.client.algo('util/echo').pipe({"foo": "bar"})
self.assertEquals("json", result.metadata.content_type)
self.assertEquals({"foo": "bar"}, result.result)
def test_text_unicode(self):
telephone = u"\u260E"
# Unicode input to pipe()
result1 = self.client.algo('util/Echo').pipe(telephone)
self.assertEquals('text', result1.metadata.content_type)
self.assertEquals(telephone, result1.result)
# Unicode return in .result
result2 = self.client.algo('util/Echo').pipe(result1.result)
self.assertEquals('text', result2.metadata.content_type)
self.assertEquals(telephone, result2.result)
def test_get_build_by_id(self):
result = self.client.algo("J_bragg/Echo").get_build("1a392e2c-b09f-4bae-a616-56c0830ac8e5")
self.assertTrue(result.build_id is not None)
def test_get_build_logs(self):
result = self.client.algo("J_bragg/Echo").get_build_logs("1a392e2c-b09f-4bae-a616-56c0830ac8e5")
self.assertTrue(result.logs is not None)
def test_get_scm_status(self):
result = self.client.algo("J_bragg/Echo").get_scm_status()
self.assertTrue(result.scm_connection_status is not None)
def test_exception_ipa_algo(self):
try:
result = self.client.algo('zeryx/raise_exception').pipe("")
except AlgorithmException as e:
self.assertEqual(e.message, "This is an exception")
else:
class AlgoTest(unittest.TestCase):
def setUp(self):
self.client = Algorithmia.client()
def test_call_customCert(self):
open("./test.pem", 'w')
c = Algorithmia.client(ca_cert="./test.pem")
result = c.algo('util/Echo').pipe(bytearray('foo', 'utf-8'))
self.assertEquals('binary', result.metadata.content_type)
self.assertEquals(bytearray('foo', 'utf-8'), result.result)
try:
os.remove("./test.pem")
except OSError as e:
print(e)
def test_call_binary(self):
result = self.client.algo('util/Echo').pipe(bytearray('foo', 'utf-8'))
self.assertEquals('binary', result.metadata.content_type)
self.assertEquals(bytearray('foo', 'utf-8'), result.result)
def test_text_unicode(self):
telephone = u"\u260E"
# Unicode input to pipe()
result1 = self.client.algo('util/Echo').pipe(telephone)
self.assertEquals('text', result1.metadata.content_type)
self.assertEquals(telephone, result1.result)
# Unicode return in .result
result2 = self.client.algo('util/Echo').pipe(result1.result)
self.assertEquals('text', result2.metadata.content_type)
self.assertEquals(telephone, result2.result)
def test_get_build_by_id(self):
result = self.client.algo("J_bragg/Echo").get_build("1a392e2c-b09f-4bae-a616-56c0830ac8e5")
self.assertTrue(result.build_id is not None)
def test_get_build_logs(self):
result = self.client.algo("J_bragg/Echo").get_build_logs("1a392e2c-b09f-4bae-a616-56c0830ac8e5")
self.assertTrue(result.logs is not None)
def test_get_scm_status(self):
result = self.client.algo("J_bragg/Echo").get_scm_status()
self.assertTrue(result.scm_connection_status is not None)
def test_exception_ipa_algo(self):
try:
result = self.client.algo('zeryx/raise_exception').pipe("")
except AlgorithmException as e:
self.assertEqual(e.message, "This is an exception")
if __name__ == '__main__':
unittest.main()
|
#Report Memory
'''
Reset reason: RTC WDT reset
uPY stack: 19456 bytes
uPY heap: 512256/6240/506016 bytes (in SPIRAM using malloc)
'''
import gc
import micropython
#gc.collect()
micropython.mem_info()
print('-----------------------------')
print('Initial free: {} allocated: {}'.format(gc.mem_free(), gc.mem_alloc()))
micropython.mem_info()
dir(gc)
dir(micropython)
|
'''
DATASET'S AND DATALOADER'S
AUTHOR - shyamgupta196
PYTHON - 3.8.3
'''
ON HOLD
import torch
from torch.utils.data import DataLoader,Dataset
from torchvision import datasets
from torchvision.io import read_image
from torchvision.transforms import ToTensor, Lambda
import matplotlib.pyplot as plt
from pytorch4 import Transform
import torch.nn as nn
from torchvision import transforms
# Transforms = nn.Sequential(
# transforms.CenterCrop(10),
# transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
# )
training_data = datasets.FashionMNIST(
root="../data",
train=True,
download=True,
transform=Transform
)
test_data = datasets.FashionMNIST(
root="../data",
train=False,
download=True,
transform=ToTensor()
)
labels_map = {
0: "T-Shirt",
1: "Trouser",
2: "Pullover",
3: "Dress",
4: "Coat",
5: "Sandal",
6: "Shirt",
7: "Sneaker",
8: "Bag",
9: "Ankle Boot",
}
figure = plt.figure(figsize=(8, 8))
cols, rows = 3, 3
for i in range(1, cols * rows + 1):
sample_idx = torch.randint(len(training_data), size=(1,)).item()
img, label = training_data[sample_idx]
figure.add_subplot(rows, cols, i)
plt.title(labels_map[label])
plt.axis("off")
plt.imshow(img, cmap="gray")
plt.show()
import pandas as pd
import os
# class CustomDataSet(Dataset):
# def __init__(self,annotations,root_dir,transform):
# self.data = pd.read_csv(annotations)
# self.root_dir = root_dir
# self.transform = transform
# def __len__(self):
# return len(self.data)
# def __getitem__(self,idx):
# image_path = os.path.join(self.root_dir,self.data.iloc[idx,0])
# image = read_image(image_path)
# label = self.data.iloc[idx,1]
# if transform:
# image = self.transform(image)
# sample = {'image':image,'label':labels_map.get(label)}
# return image,label
train_dataloader = DataLoader(training_data, batch_size=64, shuffle=True)
test_dataloader = DataLoader(test_data, batch_size=64, shuffle=True)
# Display image and label.
train_features, train_labels = next(iter(train_dataloader))
print(f"Feature batch shape: {train_features.size()}")
print(f"Labels batch shape: {train_labels.size()}")
img = train_features[10].squeeze()
# I MAPPED THE LABELS ADDITIONALY
label = labels_map.get(int(train_labels[10]))
plt.imshow(img, cmap="gray")
plt.show()
print(f"Label: {label}")
|
from __future__ import unicode_literals
import re
def camelToSnake(s):
"""
https://gist.github.com/jaytaylor/3660565
Is it ironic that this function is written in camel case, yet it
converts to snake case? hmm..
"""
_underscorer1 = re.compile(r'(.)([A-Z][a-z]+)')
_underscorer2 = re.compile('([a-z0-9])([A-Z])')
subbed = _underscorer1.sub(r'\1_\2', s)
return _underscorer2.sub(r'\1_\2', subbed).lower()
|
import logging
from pathlib import Path
from typing import Any, Dict, Mapping, Optional, cast
log = logging.getLogger(__name__)
try:
import message_data
except ImportError:
log.warning("message_data is not installed or cannot be imported")
MESSAGE_DATA_PATH: Optional[Path] = None
else: # pragma: no cover (needs message_data)
# Root directory of the message_data repository.
MESSAGE_DATA_PATH = Path(message_data.__file__).parents[1]
# Directory containing message_ix_models.__init__
MESSAGE_MODELS_PATH = Path(__file__).parents[1]
#: Package data already loaded with :func:`load_package_data`.
PACKAGE_DATA: Dict[str, Any] = dict()
#: Data already loaded with :func:`load_private_data`.
PRIVATE_DATA: Dict[str, Any] = dict()
def _load(
var: Dict, base_path: Path, *parts: str, default_suffix: Optional[str] = None
) -> Any:
"""Helper for :func:`.load_package_data` and :func:`.load_private_data`."""
key = " ".join(parts)
if key in var:
log.debug(f"{repr(key)} already loaded; skip")
return var[key]
path = _make_path(base_path, *parts, default_suffix=default_suffix)
if path.suffix == ".yaml":
import yaml
with open(path, encoding="utf-8") as f:
var[key] = yaml.safe_load(f)
else:
raise ValueError(path.suffix)
return var[key]
def _make_path(
base_path: Path, *parts: str, default_suffix: Optional[str] = None
) -> Path:
p = base_path.joinpath(*parts)
return p.with_suffix(p.suffix or default_suffix) if default_suffix else p
def load_package_data(*parts: str, suffix: Optional[str] = ".yaml") -> Any:
"""Load a :mod:`message_ix_models` package data file and return its contents.
Data is re-used if already loaded.
Example
-------
The single call:
>>> info = load_package_data("node", "R11")
1. loads the metadata file :file:`data/node/R11.yaml`, parsing its contents,
2. stores those values at ``PACKAGE_DATA["node R11"]`` for use by other code, and
3. returns the loaded values.
Parameters
----------
parts : iterable of str
Used to construct a path under :file:`message_ix_models/data/`.
suffix : str, optional
File name suffix, including, the ".", e.g. :file:`.yaml`.
Returns
-------
dict
Configuration values that were loaded.
"""
return _load(
PACKAGE_DATA,
MESSAGE_MODELS_PATH / "data",
*parts,
default_suffix=suffix,
)
def load_private_data(*parts: str) -> Mapping: # pragma: no cover (needs message_data)
"""Load a private data file from :mod:`message_data` and return its contents.
Analogous to :mod:`load_package_data`, but for non-public data.
Parameters
----------
parts : iterable of str
Used to construct a path under :file:`data/` in the :mod:`message_data`
repository.
Returns
-------
dict
Configuration values that were loaded.
Raises
------
RuntimeError
if :mod:`message_data` is not installed.
"""
if MESSAGE_DATA_PATH is None:
raise RuntimeError("message_data is not installed")
return _load(PRIVATE_DATA, MESSAGE_DATA_PATH / "data", *parts)
def package_data_path(*parts) -> Path:
"""Construct a path to a file under :file:`message_ix_models/data/`."""
return _make_path(MESSAGE_MODELS_PATH / "data", *parts)
def private_data_path(*parts) -> Path: # pragma: no cover (needs message_data)
"""Construct a path to a file under :file:`data/` in :mod:`message_data`."""
return _make_path(cast(Path, MESSAGE_DATA_PATH) / "data", *parts)
|
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
# pylint: disable=too-many-lines
from knack.help_files import helps
helps['stack-hci'] = '''
type: group
short-summary: Manage Azure Stack HCI
'''
helps['stack-hci arc-setting'] = """
type: group
short-summary: Manage arc setting with stack hci
"""
helps['stack-hci arc-setting list'] = """
type: command
short-summary: "Get ArcSetting resources of HCI Cluster."
examples:
- name: List ArcSetting resources by HCI Cluster
text: |-
az stack-hci arc-setting list --cluster-name "myCluster" --resource-group "test-rg"
"""
helps['stack-hci arc-setting show'] = """
type: command
short-summary: "Get ArcSetting resource details of HCI Cluster."
examples:
- name: Get ArcSetting
text: |-
az stack-hci arc-setting show --name "default" --cluster-name "myCluster" --resource-group "test-rg"
"""
helps['stack-hci arc-setting create'] = """
type: command
short-summary: "Create ArcSetting for HCI cluster."
examples:
- name: Create ArcSetting
text: |-
az stack-hci arc-setting create --name "default" --cluster-name "myCluster" --resource-group "test-rg"
"""
helps['stack-hci arc-setting delete'] = """
type: command
short-summary: "Delete ArcSetting resource details of HCI Cluster."
examples:
- name: Delete ArcSetting
text: |-
az stack-hci arc-setting delete --name "default" --cluster-name "myCluster" --resource-group "test-rg"
"""
helps['stack-hci arc-setting wait'] = """
type: command
short-summary: Place the CLI in a waiting state until a condition of the stack-hci arc-setting is met.
examples:
- name: Pause executing next line of CLI script until the stack-hci arc-setting is successfully deleted.
text: |-
az stack-hci arc-setting wait --name "default" --cluster-name "myCluster" --resource-group "test-rg" \
--deleted
"""
helps['stack-hci cluster'] = """
type: group
short-summary: Manage cluster with stack hci
"""
helps['stack-hci cluster list'] = """
type: command
short-summary: "List all HCI clusters in a resource group. And List all HCI clusters in a subscription."
examples:
- name: List clusters in a given resource group
text: |-
az stack-hci cluster list --resource-group "test-rg"
- name: List clusters in a given subscription
text: |-
az stack-hci cluster list
"""
helps['stack-hci cluster show'] = """
type: command
short-summary: "Get HCI cluster."
examples:
- name: Get cluster
text: |-
az stack-hci cluster show --name "myCluster" --resource-group "test-rg"
"""
helps['stack-hci cluster create'] = """
type: command
short-summary: "Create an HCI cluster."
parameters:
- name: --desired-properties
short-summary: "Desired properties of the cluster."
long-summary: |
Usage: --desired-properties windows-server-subscription=XX diagnostic-level=XX
windows-server-subscription: Desired state of Windows Server Subscription.
diagnostic-level: Desired level of diagnostic data emitted by the cluster.
examples:
- name: Create cluster
text: |-
az stack-hci cluster create --location "East US" --aad-client-id "24a6e53d-04e5-44d2-b7cc-1b732a847dfc" \
--aad-tenant-id "7e589cc1-a8b6-4dff-91bd-5ec0fa18db94" --endpoint "https://98294836-31be-4668-aeae-698667faf99b.waconaz\
ure.com" --name "myCluster" --resource-group "test-rg"
"""
helps['stack-hci cluster update'] = """
type: command
short-summary: "Update an HCI cluster."
parameters:
- name: --desired-properties
short-summary: "Desired properties of the cluster."
long-summary: |
Usage: --desired-properties windows-server-subscription=XX diagnostic-level=XX
windows-server-subscription: Desired state of Windows Server Subscription.
diagnostic-level: Desired level of diagnostic data emitted by the cluster.
examples:
- name: Update cluster
text: |-
az stack-hci cluster update --endpoint "https://98294836-31be-4668-aeae-698667faf99b.waconazure.com" \
--desired-properties diagnostic-level="Basic" windows-server-subscription="Enabled" --tags tag1="value1" tag2="value2" \
--name "myCluster" --resource-group "test-rg"
"""
helps['stack-hci cluster delete'] = """
type: command
short-summary: "Delete an HCI cluster."
examples:
- name: Delete cluster
text: |-
az stack-hci cluster delete --name "myCluster" --resource-group "test-rg"
"""
helps['stack-hci extension'] = """
type: group
short-summary: Manage extension with stack hci
"""
helps['stack-hci extension list'] = """
type: command
short-summary: "List all Extensions under ArcSetting resource."
examples:
- name: List Extensions under ArcSetting resource
text: |-
az stack-hci extension list --arc-setting-name "default" --cluster-name "myCluster" --resource-group \
"test-rg"
"""
helps['stack-hci extension show'] = """
type: command
short-summary: "Get particular Arc Extension of HCI Cluster."
examples:
- name: Get ArcSettings Extension
text: |-
az stack-hci extension show --arc-setting-name "default" --cluster-name "myCluster" --name \
"MicrosoftMonitoringAgent" --resource-group "test-rg"
"""
helps['stack-hci extension create'] = """
type: command
short-summary: "Create Extension for HCI cluster."
examples:
- name: Create Arc Extension
text: |-
az stack-hci extension create --arc-setting-name "default" --cluster-name "myCluster" --type \
"MicrosoftMonitoringAgent" --protected-settings "{\\"workspaceKey\\":\\"xx\\"}" --publisher "Microsoft.Compute" \
--settings "{\\"workspaceId\\":\\"xx\\"}" --type-handler-version "1.10" --name "MicrosoftMonitoringAgent" \
--resource-group "test-rg"
"""
helps['stack-hci extension delete'] = """
type: command
short-summary: "Delete particular Arc Extension of HCI Cluster."
examples:
- name: Delete Arc Extension
text: |-
az stack-hci extension delete --arc-setting-name "default" --cluster-name "myCluster" --name \
"MicrosoftMonitoringAgent" --resource-group "test-rg"
"""
helps['stack-hci extension wait'] = """
type: command
short-summary: Place the CLI in a waiting state until a condition of the stack-hci extension is met.
examples:
- name: Pause executing next line of CLI script until the stack-hci extension is successfully created.
text: |-
az stack-hci extension wait --arc-setting-name "default" --cluster-name "myCluster" --name \
"MicrosoftMonitoringAgent" --resource-group "test-rg" --created
- name: Pause executing next line of CLI script until the stack-hci extension is successfully updated.
text: |-
az stack-hci extension wait --arc-setting-name "default" --cluster-name "myCluster" --name \
"MicrosoftMonitoringAgent" --resource-group "test-rg" --updated
- name: Pause executing next line of CLI script until the stack-hci extension is successfully deleted.
text: |-
az stack-hci extension wait --arc-setting-name "default" --cluster-name "myCluster" --name \
"MicrosoftMonitoringAgent" --resource-group "test-rg" --deleted
"""
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.12 on 2017-01-30 14:30
from __future__ import unicode_literals
import enum
from django.db import migrations
import enumfields.fields
class TrxType(enum.Enum):
FINALIZED = 0
PENDING = 1
CANCELLATION = 2
class TrxStatus(enum.Enum):
PENDING = 0
FINALIZED = 1
REJECTED = 2
CANCELED = 3
class Migration(migrations.Migration):
dependencies = [
('wallet', '0005_auto_20160309_1722'),
]
operations = [
migrations.AlterField(
model_name='wallettransaction',
name='trx_status',
field=enumfields.fields.EnumIntegerField(default=1, enum=TrxStatus),
),
migrations.AlterField(
model_name='wallettransaction',
name='trx_type',
field=enumfields.fields.EnumIntegerField(default=0, enum=TrxType),
),
]
|
# Setup CA Firebase Project, and then create a mapping of the codes to their profile details.
# Ideally we should only need the users thingy
from firebase_admin import firestore
import json
from dateutil.parser import parse
from time import time
import firebase_admin
from firebase_admin import credentials
import pathlib
this_folder = pathlib.Path(__file__).parent.absolute()
parent_folder = this_folder.parent
FIREBASE_CA_SA_KEY = this_folder / 'serviceAccountKeyCA.json'
print('[INFO] Authenticating CA...')
ca_cred = credentials.Certificate(FIREBASE_CA_SA_KEY)
ca_app = firebase_admin.initialize_app(ca_cred, name='CA')
ca_client = firestore.client(ca_app)
import re
REGEX = re.compile('[\n\r\t,]')
def escape(string):
if isinstance(string, str):
a = REGEX.sub('_', string)
else:
a = str(string)
a = a.encode('ascii', 'ignore')
a = a.decode()
return a
cas = {}
print('[INFO] Writing JSON Document...')
with open(parent_folder / f'cas.json', 'w', encoding="utf-8") as f:
print('[INFO] Fetching Registrations...')
snapshots = list(ca_client.collection('users').get())
print('[INFO] Registraions Fetched')
for snapshot in snapshots:
cas[snapshot.id] = snapshot.to_dict()
f.write(json.dumps(cas, indent=4))
# f.write(json.dumps(k))
print('[INFO] Finished Writing JSON Document')
cac = {}
with open(parent_folder / f'cas-registrations.json', 'w', encoding="utf-8") as f:
snapshots = list(ca_client.collection('ca_code').get())
for snapshot in snapshots:
cac[snapshot.id] = snapshot.to_dict()
f.write(json.dumps(cac, indent=4))
_LINE = "user_id, ca_code, first_name, last_name, phone, city, college_name, address, country, about, tasks_completed, registrations"
with open(parent_folder / f'cas.csv', 'w', encoding="utf-8") as f:
f.write(_LINE + '\n')
for [key, value] in cas.items():
code = key[:7]
if code in cac:
registrations = cac[code]['number_of_regis']
else:
registrations = 0
count = 0
for [_key, _value] in value['task_complition_data'].items():
if _value:
count += 1
f.write(f"{key},{key[:7]}, {escape(value['first_name'])}, {escape(value['last_name'])}, {escape(value['phone'])}, {escape(value['city'])}, {escape(value['college_name'])}, {escape(value['address'])}, {value['country']}, {escape(value['about'])}, {count}, {registrations}\n")
# We can now convert this to a csv file.
# IDK if we should have the task thingy. I am going to just use the
|
#some code
codeChanged = True;
|
def test_placeholder():
# Sample test added to make pytest happy :)
pass
|
#
# wayne_django_rest copyright © 2020 - all rights reserved
# Created at: 26/10/2020
# By: mauromarini
# License: MIT
# Repository: https://github.com/marinimau/wayne_django_rest
# Credits: @marinimau (https://github.com/marinimau)
#
from django.contrib.auth.models import User
from django.db import models
from django.utils.translation import gettext_lazy as _
# ----------------------------------------------------------------------------------------------------------------------
# Config
#
# This model is used to store the user preferences for his client.
# The model has the following attributes:
#
# - Country : enum (store the current country)
# - Language : enum (store the language to use in the client app)
# - UIMode : enum (store the theme to use in the client app)
# ----------------------------------------------------------------------------------------------------------------------
class Config(models.Model):
class Country(models.TextChoices):
IT = 'IT', _('Italy')
US = 'US', _('United States')
FR = 'FR', _('France')
class Language(models.TextChoices):
IT = 'IT', _('Italy')
EN = 'EN', _('English')
class UIMode(models.TextChoices):
L = 'L', _('Light')
D = 'D', _('Dark')
A = 'A', _('Auto')
user = models.OneToOneField(User, on_delete=models.CASCADE, related_name='config', primary_key=True)
country = models.CharField(null=False, max_length=2, choices=Country.choices, default=Country.IT)
language = models.CharField(null=False, max_length=2, choices=Language.choices, default=Language.EN)
ui_pref = models.CharField(null=False, max_length=1, choices=UIMode.choices, default=UIMode.A)
|
import logging
import sys
from time import sleep
root = logging.getLogger(__name__)
root.setLevel(logging.DEBUG)
handler = logging.StreamHandler(sys.stderr)
handler.setLevel(logging.DEBUG)
log_format = '%(name)s - %(levelname)s - %(message)s'
formatter = logging.Formatter(log_format)
handler.setFormatter(formatter)
root.addHandler(handler)
while (True):
Root.info("Writes to stderr by default")
Root.warning("Faut faire gaffe")
Root.error("Trop tard")
Root.critical("C'est tout cassé")
sleep (1)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.