content stringlengths 5 1.05M |
|---|
###############################################################################
### Games support module for LiveWires using pygame.
###
### $Revision: 1.7 $ -- $Date: 2001/10/27 17:43:51 $
###############################################################################
# Copyright Richard Crook, Gareth McCaughan, Rhodri James, Neil Turton
# and Paul Wright. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# Neither name of Scripture Union nor LiveWires nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SCRIPTURE UNION
# OR THE CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
###############################################################################
###############################################################################
# Modified by Michael Dawson
# 5/24/05
#
# Restructured Classes
# - created a single Sprite class (no multiple inheritance)
# - added properties to classes (getter and setter methods still available)
# - added Question class to get user keyboard input
# - added Mouse class for mouse input
# - added Keyboard class for keyboard input
# - added Music class to access music channel
#
# Revised Animation Class
# - now receives only one list of images to animate
# - images now displayed in order, from first frame to last (not the reverse)
# - n_repeats represents number of cycles to display (not number of frames)
#
# "Americanized" Spelling
# - 'colour' is now 'color'
###############################################################################
import pygame, pygame.image, pygame.mixer, pygame.font, pygame.transform
import pygame.draw
from pygame.locals import *
pygame.init()
###############################################################################
## Error classes ##############################################################
###############################################################################
class GamesError(Exception): pass
###############################################################################
## Mouse class ################################################################
###############################################################################
class Mouse(object):
#------Properties--------#
## position
def get_position(self):
return pygame.mouse.get_pos()
def set_position(self, new_position):
pygame.mouse.set_pos(new_position)
position = property(get_position, set_position)
## x
def get_x(self):
return pygame.mouse.get_pos()[0]
def set_x(self, new_x):
current_y = pygame.mouse.get_pos()[1]
pygame.mouse.set_pos( (new_x, current_y) )
x = property(get_x, set_x)
## y
def get_y(self):
return pygame.mouse.get_pos()[1]
def set_y(self, new_y):
current_mouse_x = pygame.mouse.get_pos()[0]
pygame.mouse.set_pos( (current_x, new_y) )
y = property(get_y, set_y)
## is visible
def set_is_visible(self, new_visibility):
pygame.mouse.set_visible(new_visibility)
is_visible = property(fset = set_is_visible)
def is_pressed(self, button_number):
return pygame.mouse.get_pressed()[button_number] == 1
###############################################################################
## Keyboard class #############################################################
###############################################################################
class Keyboard(object):
def is_pressed(self, key):
return pygame.key.get_pressed()[key] == 1
###############################################################################
## Music class ################################################################
###############################################################################
class Music(object):
def load(self, filename):
pygame.mixer.music.load(filename)
def play(self, loop=0):
pygame.mixer.music.play(loop)
def fadeout(self, millisec):
pygame.mixer.music.fadeout(millisec)
def stop(self):
pygame.mixer.music.stop()
###############################################################################
## Screen class ###############################################################
###############################################################################
##
## The Screen object represents the playing area. Since we can have
## only one screen under pygame, it's just a handy container for stuff
##
###############################################################################
class Screen(object):
initialized = 0
def __init__ (self, width=640, height=480, fps=50):
# Bomb if you try this more than once
if Screen.initialized:
raise GamesError("Cannot have more than on Screen object")
Screen.initialized = 1
# Create the pygame display
#self._display = pygame.display.set_mode ((width, height), HWSURFACE)
self._display = pygame.display.set_mode ((width, height))
self._width = width
self._height = height
self._background = self._display.convert()
# Initialize a list of objects in play
self._objects = []
# Initialize list dirty rectangles to be repainted
self._dirtyrects = []
# Time when we should draw the next frame
self._next_tick = 0
# Frames per second screen will be updated
self._fps = fps
#------Properties--------#
## width
def get_width(self):
return self._width
width = property(get_width)
## height
def get_height(self):
return self._height
height = property(get_height)
## fps
def get_fps(self):
return self._fps
fps = property(get_fps)
## background
def get_background(self):
return self._background
def set_background(self, new_background):
"""
Set the background to the surface provided. Note that the
surface should not have transparency set, or weird things
will happen.
"""
self._background = pygame.Surface((self._width, self._height))
for x in range(0, self._width, new_background.get_width()):
for y in range(0, self._height, new_background.get_height()):
self._background.blit(new_background, (x, y))
self._display.blit(self._background, (0,0))
pygame.display.update()
background = property(get_background, set_background)
## all objects
def get_all_objects(self):
"""
Returns a list of all the Sprites on the Screen.
"""
return self._objects[:]
all_objects = property(get_all_objects)
## event_grab
def get_event_grab(self):
return pygame.event.get_grab()
def set_event_grab(self, new_status):
pygame.event.set_grab(new_status)
event_grab = property(get_event_grab, set_event_grab)
def tick(self):
"""
If you override the tick method in a subclass of the Screen
class, you can specify actions which are carried out every
tick.
"""
pass
def keypress(self, key):
"""
If you override the keypress method, you will be able to
handle individual keypresses instead of dealing with the
keys held down as in the standard library
"""
pass
def handle_events(self):
"""
If you override this method in a subclass of the Screen
class, you can specify how to handle different kinds of
events. However you must handle the quit condition!
"""
events = pygame.event.get()
for event in events:
if event.type == QUIT:
self.quit()
elif event.type == KEYDOWN:
if event.key == K_ESCAPE:
self.quit()
else:
self.keypress(event.key)
def quit(self):
"""
Calling this method will stop the main loop from running and
make the graphics window disappear.
"""
self._exit = 1
def clear(self):
"""
Destroy all objects on this Screen.
"""
for object in self._objects[:]:
object.destroy()
self._objects = []
def _update_display(self):
"""
Get the actual display in sync with reality.
"""
pygame.display.update(self._dirtyrects)
self._dirtyrects = []
def mainloop(self):
"""
Run the pygame main loop. This will animate the objects on the
screen and call their tick methods every tick.
"""
self._exit = 0
while not self._exit:
self._wait_frame()
for object in self._objects:
object._erase()
# Take a copy of the _objects list as it may get changed in place.
for object in self._objects[:]:
if object._tickable:
object._tick()
self.tick()
for object in self._objects:
object._draw()
self._update_display()
pygame.display.flip()
self.handle_events()
# Throw away any pending events.
pygame.event.get()
def _wait_frame (self):
"Wait for the correct fps time to expire"
this_tick = pygame.time.get_ticks()
if this_tick < self._next_tick:
pygame.time.delay(int(self._next_tick+0.5) - this_tick)
self._next_tick = this_tick + (1000./self._fps)
def overlapping_objects(self, rectangle):
"""
Return list of all sprites which overlap given rectangle.
"""
rect = pygame.Rect (rectangle)
rect_list = []
for obj in self._objects:
rect_list.append (obj._rect)
indices = rect.collidelistall (rect_list)
over_objects = []
for index in indices:
if (self._objects[index]).is_collideable:
over_objects.append (self._objects [index])
return over_objects
def _elevate(self, it, above=None):
"""
Elevates an object to the top of the stack, or above the specified
object.
"""
# This makes sure we're always in a consistent state.
objects = self._objects[:]
# Remove the object from the list.
objects.remove(it)
if above == None:
# Put it on top (the end).
objects.append(it)
else:
# Put the object after <above>.
idx = 1+objects.index(above)
objects[idx:idx]=[it]
# Install the new list.
self._objects = objects
def _lower(self, object, below=None):
"""
Lower an object to the bottom of the stack, or below the specified
object.
"""
# This makes sure we're always in a consistent state.
objects = self._objects[:]
objects.remove(it)
if below == None:
# Put the object on the beginning (bottom) of the list.
self._objects = [it]+objects
else:
# Put the object before (below) the aboject.
idx = objects.index(below)
objects[idx:idx]=[it]
self._objects = objects
def add(self, sprite):
self._objects.append(sprite)
def remove(self, sprite):
try:
self._objects.remove(sprite)
except ValueError:
# Already done it: happens in some games, not an error.
pass
def blit_and_dirty (self, source_surf, dest_pos):
"""
You probably won't need to use this method in your own programs,
as |Sprite| and its sub-classes know how to draw themselves on
the screen. You'd need to use method if you wanted to draw an
image on the screen which wasn't an |Sprite|.
This method blits (draws, taking account of transparency) the
given source surface |source_surf| to the screen at the position
given by |dest_pos|.
It then remembers the place where the surface was drawn as
``dirty''. This means that when the display is updated on the
next tick, this part of it will be redrawn.
"""
rect = self._display.blit(source_surf, dest_pos)
self._dirtyrects.append(rect)
def blit_background(self, rect):
"""
This method draws the background over the given rectangle, and
marks that rectangle as ``dirty'' (see the |blit_and_dirty|
method for what that means). It's used to erase an object before
moving it. You shouldn't need to call it yourself.
"""
rect = self._display.blit(self._background, rect, rect)
self._dirtyrects.append(rect)
###############################################################################
## Sprite class ###############################################################
###############################################################################
## ##
## Sprite represents a graphical object on the screen. Sprites ##
## can be moved, rotated, deleted, and maybe have other things done to them. ##
## ##
###############################################################################
class Sprite(object):
def __init__(self, image, angle=0,
x=0, y=0,
top=None, bottom=None, left=None, right=None,
dx=0, dy=0,
interval=1, is_collideable=True):
if not Screen.initialized:
raise GamesError("Screen object must be intialized before any Sprite object")
self._surface = image
self._orig_surface = image # Surface before any rotation
self._rect = self._surface.get_rect()
self.position = (x, y)
if top != None:
self.top = top
if bottom != None:
self.bottom = bottom
if left != None:
self.left = left
if right != None:
self.right = right
self.velocity = (dx, dy)
self._angle = angle % 360
if self._angle != 0:
self._rotate()
self.is_collideable = is_collideable
self._interval = interval
self._tickable = 1
self._next = 0
self._gone = 0
def __del__(self):
if screen and not self._gone:
self.destroy()
def _draw(self):
"""
Draw object on screen by blitting the image onto the screen.
"""
screen.blit_and_dirty(self._surface, self._rect)
def _erase(self):
"""
Erase object from screen by blitting the background over where
it was.
"""
screen.blit_background(self._rect)
def _replace(self, new_surface):
x, y = self.position
self._surface = new_surface
self._rect = self._surface.get_rect()
self.position = (x, y)
def _rotate(self):
self._replace(pygame.transform.rotate(self._orig_surface, -self._angle))
def _tick(self):
self._next = self._next + 1
if self._next >= self._interval:
self._next = 0
self.tick()
if self._dx or self._dy:
self.position = ( (self._x + self._dx), (self._y + self._dy) )
self.update()
def start (self):
self._tickable = 1
self._next = 0
def stop (self):
self._tickable = 0
def update(self):
pass
def tick(self):
pass
def overlaps(self, other):
if not self.is_collideable or not other.is_collideable:
return False
else:
return self._rect.colliderect(other._rect)
def elevate(self, above=None):
"""
Elevate an object to the top of the stack, or above the specified
object.
"""
screen._elevate(self, above)
def lower(self, below=None):
"""
Lower an object to the bottom of the stack, or below the specified
object.
"""
screen._lower(self, below)
def destroy(self):
"""
Erase object from screen and remove it from the list of objects
maintained by games module.
"""
self._erase()
screen.remove(self)
self._gone = 1
#------Properties--------#
## x
def get_x(self):
return self._x
def set_x(self, new_x):
self._x = new_x
self._rect.centerx = int(self._x)
x = property(get_x, set_x)
## y
def get_y(self):
return self._y
def set_y(self, new_y):
self._y = new_y
self._rect.centery = int(self._y)
y = property(get_y, set_y)
## position
def get_position(self):
return ( (self.x, self.y) )
def set_position(self, new_position):
self.x, self.y = new_position
position = property(get_position, set_position)
## dx
def get_dx(self):
return self._dx
def set_dx(self, new_dx):
self._dx = new_dx
dx = property(get_dx, set_dx)
## dy
def get_dy(self):
return self._dy
def set_dy(self, new_dy):
self._dy = new_dy
dy = property(get_dy, set_dy)
## velocity
def get_velocity(self):
return ( (self.dx, self.dy) )
def set_velocity (self, new_velocity):
self.dx, self.dy = new_velocity
velocity = property(get_velocity, set_velocity)
## left
def get_left(self):
return self._rect.left
def set_left(self, new_left):
self._rect.left = new_left
self._x = self._rect.centerx
left = property(get_left, set_left)
## right
def get_right(self):
return self._rect.right
def set_right(self, new_right):
self._rect.right = new_right
self._x = self._rect.centerx
right = property(get_right, set_right)
## top
def get_top(self):
return self._rect.top
def set_top(self, new_top):
self._rect.top = new_top
self._y = self._rect.centery
top = property(get_top, set_top)
## bottom
def get_bottom(self):
return self._rect.bottom
def set_bottom(self, new_bottom):
self._rect.bottom = new_bottom
self._y = self._rect.centery
bottom = property(get_bottom, set_bottom)
## angle
def get_angle(self):
return self._angle
def set_angle(self, new_angle):
self._angle = new_angle % 360
self._rotate()
angle = property(get_angle, set_angle)
## image
def get_image(self):
return self._orig_surface
def set_image(self, new_image):
self._orig_surface = new_image
if self._angle != 0:
self._rotate()
else:
self._replace(new_image)
image = property(get_image, set_image)
## height
def get_height(self):
return self._surface.get_height()
height = property(get_height)
## width
def get_width(self):
return self._surface.get_width()
width = property(get_width)
## is_collideable
def get_is_collideable(self):
return self._is_collideable
def set_is_collideable(self, new_status):
self._is_collideable = new_status
is_collideable = property(get_is_collideable, set_is_collideable)
## overlapping_sprites
def get_overlapping_sprites(self):
overlapping = screen.overlapping_objects(self._rect)
if self in overlapping:
overlapping.remove(self)
return overlapping
overlapping_sprites = property(get_overlapping_sprites)
## interval
def get_interval(self):
return self._interval
def set_interval(self, new_interval):
self._interval = new_interval
interval = property(get_interval, set_interval)
class Text(Sprite):
"""
Alphanumeric values displayed on the screen.
"""
def __init__(self, value, size, color, angle=0,
x=0, y=0,
top=None, bottom=None, left=None, right=None,
dx=0, dy=0,
interval=1, is_collideable=True):
self._size = size
self._color = color
self._value = value
self._font = pygame.font.Font(None, self._size)
Sprite.__init__(self, self._create_surface(), angle,
x, y,
top, bottom, left, right,
dx, dy,
interval, is_collideable)
def _create_surface(self):
return self._font.render(str(self._value), 1, self._color)
#------Properties--------#
## value
def get_value(self):
return self._value
def set_value(self, new_value):
if new_value != self._value:
self._value = new_value
self.image = self._create_surface()
value = property(get_value, set_value)
## color
def get_color(self):
return self._color
def set_color(self, new_color):
if new_color != self._color:
self._color = new_color
surface = self._create_surface()
self.image = surface
color = property(get_color, set_color)
## size
def get_size(self):
return self._size
def set_size(self, new_size):
if new_size != self._size:
self._size = new_size
self._font = pygame.font.Font(None, self._size)
surface = self._create_surface()
self.image = surface
size = property(get_size, set_size)
class Question(Text):
def __init__(self, value, size, color, angle=0,
x=0, y=0,
top=None, bottom=None, left=None, right=None,
dx=0, dy=0,
interval=1, is_collideable=True, responses=()):
Text.__init__(self, value, size, color, angle,
x, y,
top, bottom, left, right,
dx, dy,
interval, is_collideable)
self.responses = responses
def tick(self):
for key, action in self.responses:
if keyboard.is_pressed(key):
action()
class Message(Text):
def __init__(self, value, size, color, angle=0,
x=0, y=0,
top=None, bottom=None, left=None, right=None,
dx=0, dy=0,
lifetime=0, is_collideable=True, after_death=None):
Text.__init__(self, value, size, color, angle,
x, y,
top, bottom, left, right,
dx, dy,
lifetime, is_collideable)
self._after_death = after_death
def tick(self):
if self._after_death:
self._after_death()
self.stop()
self.destroy()
class Animation(Sprite):
"""
An image that changes every repeat_interval ticks.
The n_repeats parameter is the number of complete animation cycles to show.
If n_repeats <= 0, the animation will repeat forever.
You can give list of filenames or list of images.
"""
def __init__(self, images, angle=0,
x=0, y=0,
top=None, bottom=None, left=None, right=None,
dx=0, dy=0,
repeat_interval=1, n_repeats=0, is_collideable=True):
if images and type(images[0]) is type(""):
images = load_animation(images)
self.images = images
if self.images == []:
raise GamesError("An animation with no images is illegal.")
self.n_repeats = n_repeats or -1
if self.n_repeats > 0:
self.n_repeats = (self.n_repeats * len(self.images))
first_image = self.next_image()
Sprite.__init__(self, self.next_image(), angle,
x, y,
top, bottom, left, right,
dx, dy,
repeat_interval, is_collideable)
def next_image(self):
if self.n_repeats==0: return None
if self.n_repeats>0: self.n_repeats -= 1
new_image = self.images[0]
self.images = self.images[1:] + [self.images[0]]
return new_image
def tick(self):
new_image = self.next_image()
if new_image is None:
self.destroy()
else:
self.image = new_image
###############################################################################
## Utility Functions
###############################################################################
def load_image(filename, transparent=True):
"""Loads an image, prepares it for play. Returns a pygame.Surface object
which you can give as the "image" parameter to Sprite.
filename -- the filename of the image to load
transparent -- whether the background of the image should be transparent.
Defaults to true.
The background color is taken as the color of the pixel
at (0,0) in the image.
"""
try:
surface = pygame.image.load(filename)
except pygame.error:
raise GamesError( 'Could not load image "%s" %s'%(filename, pygame.get_error()) )
if transparent:
corner = surface.get_at((0, 0))
surface.set_colorkey(corner, RLEACCEL)
return surface.convert()
def scale_image(image, x_scale, y_scale=None):
if y_scale is None: y_scale = x_scale
(x_size, y_size) = image.get_size()
x_size = x_size * x_scale
y_size = y_size * y_scale
return pygame.transform.scale(image, (x_size, y_size))
def load_animation(filenames, transparent=1):
"""
Loads a number of files. Receives file names. Returns corresponding file objects
needed by the Animation constructor.
"""
def _(name, transparent=transparent):
try: surface = pygame.image.load(name)
except pygame.error:
raise GamesError( 'Could not load animation frame "%s": %s' % (name, pygame.get_error()) )
if transparent:
surface.set_colorkey(surface.get_at((0,0)), RLEACCEL)
return surface.convert()
files = list(map(_, filenames))
return files
def load_sound(filename):
"""
Load a sound file, returning a Sound object.
"""
return pygame.mixer.Sound(filename)
###############################################################################
## Initialization Function
###############################################################################
def init(screen_width = 640, screen_height = 480, fps = 50):
global screen
screen = Screen(screen_width, screen_height, fps)
mouse = Mouse()
keyboard = Keyboard()
music = Music()
|
# -*- coding: utf-8 -*-
#Git for team work
'''
This is HW2 for CS225
Team Member:
Zhu Zhongbo
Yang Zhaohua
Guan Zimu
Xie Tian
'''
import os
def upload(function = "all"):
if function == "all":
os.system('git add main.py &&\
git add Git.py &&\
git add pylist.py &&\
git add dlinkedlist.py &&\
git add runtime.jpg &&\
git commit -m"updated" &&\
git push -u origin master')
elif function == "dl":
os.system('git add dlinkedlist.py &&\
git commit -m"updated" &&\
git push -u origin master')
elif function == "py":
os.system('git add pylist.py &&\
git commit -m"updated" &&\
git push -u origin master')
elif function == "main":
os.system('git add main.py &&\
git commit -m"updated" &&\
git push -u origin master')
return
if __name__ == "__main__":
upload("all")
|
from setuptools import setup, find_packages
setup(
name='SmartShopper',
version='1.0',
description='SE project: group 22',
author="CSC510 - Group 22, Chandrahas Reddy Mandapati, Harini Bharata, Sri Pallavi Damuluri, Niraj Lavani, sandesh A S": ,
author_email="sgaladha@ncsu.edu",
packages=find_packages(),
tests_require=['pytest'],
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Development Status :: 2- Pre-Alpha",
"Intended Audience :: Developers",
"Topic :: SE Fall 21 Project",
],
license='MIT',
install_requires=['python', 'pytest'],
)
|
# Copyright (c) 2021.
# The copyright lies with Timo Hirsch-Hoffmann, the further use is only permitted with reference to source
import urllib.request
from RiotGames.API.RiotApi import RiotApi
class Match(RiotApi):
__timeline_by_match_id_url: str = "https://{}.api.riotgames.com/lol/match/v4/timelines/by-match/{}?api_key={}"
def __init__(self, apikey: str):
"""
:param apikey:
"""
super().__init__(apikey)
self.__super = super()
def by_id(self, match_id: int, region: str):
"""
Special Function still in development
https://developer.riotgames.com/apis#match-v4/GET_getMatchlist
TODO
:param match_id:
:param region:
:return:
"""
pass
def matchlist_by_account_id(self, account_id: str, begin_time: int = None, end_time: int = None,
begin_index: int = None, end_index: int = None, champions: list = None,
queue: list = None, season: list = None):
"""
Special Function still in development
https://developer.riotgames.com/apis#match-v4/GET_getMatchlist
TODO
format url
:param account_id:
encrypted account id
:param begin_time:
:param end_time:
:param begin_index:
:param end_index:
:param champions:
:param queue:
:param season:
:return:
"""
pass
def timeline_by_match_id(self, match_id: int, region: str) -> dict:
"""
:param match_id:
:param region:
:return:
"""
return eval(bytes(
urllib.request.urlopen(
self.__timeline_by_match_id_url.format(region, match_id, super()._get_key())).read()).decode())
|
import sys
from utils import timex
from covid19.lk_vax_centers.expand import expand
from covid19.lk_vax_centers.expand_i18n import expand_i18n
from covid19.lk_vax_centers.finalize import finalize
from covid19.lk_vax_centers.parse_pdf import parse_pdf
from covid19.lk_vax_centers.scrape_pdf import scrape_pdf
from covid19.lk_vax_centers.summarise import summarise
if __name__ == '__main__':
date_id = timex.get_date_id()
if not scrape_pdf(date_id):
sys.exit(-1)
parse_pdf(date_id)
expand(date_id)
expand_i18n(date_id)
summarise(date_id)
finalize(date_id)
sys.exit(0)
|
import pycassa
from pycassa.types import *
from pycassa.system_manager import *
# Completely destroys and recreates the sample keyspace for this app.
def setup(keyspace):
schema = Schema(keyspace)
schema.create_keyspace()
schema.create_column_families()
schema.close()
class Schema(object):
def __init__(self, keyspace, **kwargs):
self.keyspace = keyspace
self.sys = SystemManager(**kwargs)
def create_keyspace(self):
try:
self.sys.drop_keyspace(self.keyspace)
except pycassa.cassandra.c10.ttypes.InvalidRequestException:
pass
self.sys.create_keyspace(self.keyspace,
strategy_options={'replication_factor': '1'})
def create_column_families(self):
self.create_lists_cf()
self.create_threads_cf()
self.create_msgs_cf()
self.create_list_threads_cf()
self.create_list_msgs_cf()
self.create_thread_msgs_cf()
def close(self):
self.sys.close()
def create_lists_cf(self):
self.sys.create_column_family(self.keyspace, 'lists',
key_validation_class=UTF8_TYPE,
comparator_type=UTF8_TYPE)
self.alter_columns('lists', name=UTF8_TYPE)
def create_threads_cf(self):
self.sys.create_column_family(self.keyspace, 'threads',
key_validation_class=UTF8_TYPE,
comparator_type=UTF8_TYPE)
self.alter_columns('threads', list_key=UTF8_TYPE, title=UTF8_TYPE,
message_updated_at=DATE_TYPE)
def create_msgs_cf(self):
self.sys.create_column_family(self.keyspace, 'messages',
key_validation_class=TimeUUIDType,
comparator_type=UTF8_TYPE)
self.alter_columns('messages',
list_key=UTF8_TYPE, thread_key=UTF8_TYPE,
title=UTF8_TYPE,
created_at=DATE_TYPE, updated_at=DATE_TYPE)
def create_list_threads_cf(self):
self.sys.create_column_family(self.keyspace, 'list_threads',
key_validation_class=UTF8_TYPE,
comparator_type=CompositeType(
DateType(reversed=True),UTF8_TYPE))
def create_list_msgs_cf(self):
self.sys.create_column_family(self.keyspace, 'list_messages',
key_validation_class=UTF8_TYPE,
comparator_type=CompositeType(
DateType(reversed=True),TimeUUIDType()))
def create_thread_msgs_cf(self):
self.sys.create_column_family(self.keyspace, 'thread_messages',
key_validation_class=UTF8_TYPE,
comparator_type=CompositeType(
DateType(reversed=True),TimeUUIDType()))
def alter_columns(self, cf, **columns):
for name in columns:
self.sys.alter_column(self.keyspace, cf, name, columns[name])
|
"""
Config for accessing database.
DB_TYPE: Type of database to connect to. Options: MYSQL, SQLITE, MSSQL (Only for Decision Tree Classifier as of now)
"""
DB_TYPE = "DB_TYPE"
DB_PATH = "DB_PATH"
DB_HOST = "DB_HOST"
DB_USER = "DB_USER"
DB_PW = "DB_PW"
DB_NAME = "DB_NAME"
DB_PORT = DB_PORT
"""
These fields are required for each connection type:
MySQL:
DB_TYPE = "DB_TYPE"
DB_PATH = "DB_PATH"
DB_HOST = "DB_HOST"
DB_USER = "DB_USER"
DB_PW = "DB_PW"
DB_NAME = "DB_NAME"
DB_PORT = DB_PORT
SQLite:
DB_TYPE = "DB_TYPE"
DB_PATH = "DB_PATH"
DB_NAME = "DB_NAME"
MSSQL:
DB_TYPE = "DB_TYPE"
DB_HOST = "DB_HOST"
DB_NAME = "DB_NAME"
""" |
def from_bin(filename):
memory = []
with open(filename, 'rb') as f:
word = f.read(2)
while word:
memory.append(int.from_bytes(word, 'little'))
word = f.read(2)
return memory
|
from Imprimir_Listas import repetir
repetir(["hola", 52, ["Perros", 65, [16,23]], "gatos"], True)
|
n = int(input())
length = len(str(n))
ans = 0
for i in range(length, -1, -1):
if i%2 != 0 or i == 0:
continue
elif i==length:
b = n%(10**(i//2))
a = (n-b)//(10**(i//2))
ans += (a-(10**(i//2-1)))
if a <= b:
ans += 1
else:
ans += (10**(i//2))*0.9
print(int(ans)) |
import os
import time
import copy
import logging
import warnings
import os.path as osp
import numpy as np
import tensorflow as tf
import scipy.sparse as sp
from functools import partial
from tensorflow.keras.utils import Sequence
from tensorflow.python.keras import callbacks as callbacks_module
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
from tensorflow.keras.callbacks import History
from tensorflow.python.keras.utils.generic_utils import Progbar
from graphgallery.nn.models import BaseModel
from graphgallery.nn.models import training
from graphgallery.nn.functions import softmax
from graphgallery.data.io import makedirs_from_filename
from graphgallery.data import BaseGraph
from graphgallery.transforms import asintarr
from graphgallery.utils.raise_error import raise_if_kwargs
from graphgallery import POSTFIX, intx
# Ignora warnings:
# UserWarning: Converting sparse IndexedSlices to a dense Tensor of unknown shape. This may consume a large amount of memory.
# This is caused by `tf.gather` and it will be solved in future tensorflow version.
warnings.filterwarnings(
'ignore', '.*Converting sparse IndexedSlices to a dense Tensor of unknown shape.*')
class SemiSupervisedModel(BaseModel):
def __init__(self, *graph, device='cpu:0', seed=None, name=None, **kwargs):
super().__init__(*graph, device=device, seed=seed, name=name, **kwargs)
if self.kind == "T":
self.train_step_fn = partial(training.train_step_tf, device=self.device)
self.test_step_fn = partial(training.test_step_tf, device=self.device)
self.predict_step_fn = partial(training.predict_step_tf, device=self.device)
else:
self.train_step_fn = training.train_step_torch
self.test_step_fn = training.test_step_torch
self.predict_step_fn = training.predict_step_torch
def process(self, *graph, **kwargs):
"""pre-process for the input graph, including manipulations
on adjacency matrix and attribute matrix, and finally convert
them into tensor (optional).
Note:
----------
This method will call the method 'process_step'
and it must be implemented for processing the graph.
Parameters:
----------
graph: An instance of `graphgallery.data.Graph` or a tuple (list) of inputs.
A sparse, attributed, labeled graph.
kwargs: other custom keyword parameters.
"""
if len(graph) > 0:
if len(graph) == 1:
graph, = graph
if isinstance(graph, BaseGraph):
self.graph = graph
elif isinstance(graph, dict):
self.graph.set_inputs(**graph)
else:
self.graph.set_inputs(*graph)
return self.process_step()
def process_step(self):
raise NotImplementedError
def build(self):
"""Build the model using custom hyperparameters.
Note:
----------
This method must be called before training/testing/predicting.
Use `model.build()`. The following `Parameters` are only commonly used
Parameters, and other model-specific Parameters are not introduced as follows.
Parameters:
----------
hiddens: `list` of integer or integer scalar
The number of hidden units of model. Note: the last hidden unit (`n_classes`)
aren't necessary to specified and it will be automatically added in the last
layer.
activations: `list` of string or string
The activation function of model. Note: the last activation function (`softmax`)
aren't necessary to specified and it will be automatically specified in the
final output.
dropout: float scalar
Dropout rate for the hidden outputs.
l2_norm: float scalar
L2 normalize parameters for the hidden layers. (only used in the hidden layers)
lr: float scalar
Learning rate for the training model.
use_bias: bool
Whether to use bias in the hidden layers.
"""
raise NotImplementedError
def build_from_model(self, model):
"""Build the model using custom model.
Note:
----------
This method must be called before training/testing/predicting.
Use `model.build_from_model(model)` where the input `model` is
a TensorFlow model or PyTorch Model.
Parameters:
----------
model: a TensorFlow model or PyTorch Model
"""
# TODO: check for the input model
if self.kind == "T":
with tf.device(self.device):
self.model = model
else:
self.model = model.to(self.device)
def train(self, idx_train, idx_val=None,
epochs=200, early_stopping=None,
verbose=0, save_best=True, weight_path=None, as_model=False,
monitor='val_acc', early_stop_metric='val_loss', callbacks=None, **kwargs):
"""Train the model for the input `idx_train` of nodes or `sequence`.
Note:
----------
You must compile your model before training/testing/predicting. Use `model.build()`.
Parameters:
----------
idx_train: Numpy array-like, `list`, Integer scalar or `graphgallery.Sequence`
The index of nodes (or sequence) that will be used during training.
idx_val: Numpy array-like, `list`, Integer scalar or
`graphgallery.Sequence`, optional
The index of nodes (or sequence) that will be used for validation.
(default :obj: `None`, i.e., do not use validation during training)
epochs: Positive integer
The number of epochs of training.(default :obj: `200`)
early_stopping: Positive integer or None
The number of early stopping patience during training. (default :obj: `None`,
i.e., do not use early stopping during training)
verbose: int in {0, 1, 2, 3, 4}
'verbose=0': not verbose;
'verbose=1': Progbar (one line, detailed);
'verbose=2': Progbar (one line, omitted);
'verbose=3': Progbar (multi line, detailed);
'verbose=4': Progbar (multi line, omitted);
(default :obj: 0)
save_best: bool
Whether to save the best weights (accuracy of loss depend on `monitor`)
of training or validation (depend on `validation` is `False` or `True`).
(default :bool: `True`)
weight_path: String or None
The path of saved weights/model. (default :obj: `None`, i.e.,
`./log/{self.name}_weights`)
as_model: bool
Whether to save the whole model or weights only, if `True`, the `self.custom_objects`
must be speficied if you are using custom `layer` or `loss` and so on.
monitor: String
One of (val_loss, val_acc, loss, acc), it determines which metric will be
used for `save_best`. (default :obj: `val_acc`)
early_stop_metric: String
One of (val_loss, val_acc, loss, acc), it determines which metric will be
used for early stopping. (default :obj: `val_loss`)
callbacks: tensorflow.keras.callbacks. (default :obj: `None`)
kwargs: other keyword Parameters.
Return:
----------
A `tf.keras.callbacks.History` object. Its `History.history` attribute is
a record of training loss values and metrics values
at successive epochs, as well as validation loss values
and validation metrics values (if applicable).
"""
raise_if_kwargs(kwargs)
if not (isinstance(verbose, int) and 0<=verbose<=4):
raise ValueError("'verbose=0': not verbose"
"'verbose=1': Progbar(one line, detailed), "
"'verbose=2': Progbar(one line, omitted), "
"'verbose=3': Progbar(multi line, detailed), "
"'verbose=4': Progbar(multi line, omitted), "
f"but got {verbose}")
model = self.model
# Check if model has been built
if model is None:
raise RuntimeError(
'You must compile your model before training/testing/predicting. Use `model.build()`.')
if isinstance(idx_train, Sequence):
train_data = idx_train
else:
idx_train = asintarr(idx_train)
train_data = self.train_sequence(idx_train)
self.idx_train = idx_train
validation = idx_val is not None
if validation:
if isinstance(idx_val, Sequence):
val_data = idx_val
else:
idx_val = asintarr(idx_val)
val_data = self.test_sequence(idx_val)
self.idx_val = idx_val
else:
monitor = 'acc' if monitor[:3] == 'val' else monitor
if not isinstance(callbacks, callbacks_module.CallbackList):
callbacks = callbacks_module.CallbackList(callbacks)
history = History()
callbacks.append(history)
if early_stopping:
es_callback = EarlyStopping(monitor=early_stop_metric,
patience=early_stopping,
mode='auto',
verbose=kwargs.pop('es_verbose', 1))
callbacks.append(es_callback)
if save_best:
if not weight_path:
weight_path = self.weight_path
else:
self.weight_path = weight_path
makedirs_from_filename(weight_path)
if not weight_path.endswith(POSTFIX):
weight_path = weight_path + POSTFIX
mc_callback = ModelCheckpoint(weight_path,
monitor=monitor,
save_best_only=True,
save_weights_only=not as_model,
verbose=0)
callbacks.append(mc_callback)
callbacks.set_model(model)
model.stop_training = False
callbacks.on_train_begin()
if verbose:
stateful_metrics = {"acc", 'loss', 'val_acc', 'val_loss', 'time'}
if verbose <=2:
progbar = Progbar(target=epochs, verbose=verbose, stateful_metrics=stateful_metrics)
print("Training...")
begin_time = time.perf_counter()
for epoch in range(epochs):
if verbose > 2:
progbar = Progbar(target=len(train_data), verbose=verbose - 2, stateful_metrics=stateful_metrics)
callbacks.on_epoch_begin(epoch)
callbacks.on_train_batch_begin(0)
loss, accuracy = self.train_step(train_data)
training_logs = {'loss': loss, 'acc': accuracy}
if validation:
val_loss, val_accuracy = self.test_step(val_data)
training_logs.update(
{'val_loss': val_loss, 'val_acc': val_accuracy})
val_data.on_epoch_end()
callbacks.on_train_batch_end(len(train_data), training_logs)
callbacks.on_epoch_end(epoch, training_logs)
train_data.on_epoch_end()
if verbose:
time_passed = time.perf_counter() - begin_time
training_logs.update({'time': time_passed})
if verbose > 2:
print(f"Epoch {epoch+1}/{epochs}")
progbar.update(len(train_data), training_logs.items())
else:
progbar.update(epoch + 1, training_logs.items())
if model.stop_training:
break
callbacks.on_train_end()
if save_best:
self.load(weight_path, as_model=as_model)
self.remove_weights()
return history
def test(self, index, verbose=1):
"""
Test the output accuracy for the `index` of nodes or `sequence`.
Note:
----------
You must compile your model before training/testing/predicting.
Use `model.build()`.
Parameters:
----------
index: Numpy array-like, `list`, Integer scalar or `graphgallery.Sequence`
The index of nodes (or sequence) that will be tested.
Return:
----------
loss: Float scalar
Output loss of forward propagation.
accuracy: Float scalar
Output accuracy of prediction.
"""
if not self.model:
raise RuntimeError(
'You must compile your model before training/testing/predicting. Use `model.build()`.')
if isinstance(index, Sequence):
test_data = index
else:
index = asintarr(index)
test_data = self.test_sequence(index)
self.idx_test = index
if verbose:
print("Testing...")
stateful_metrics = {"test_acc", 'test_loss', 'time'}
progbar = Progbar(target=len(test_data), verbose=verbose, stateful_metrics=stateful_metrics)
begin_time = time.perf_counter()
loss, accuracy = self.test_step(test_data)
time_passed = time.perf_counter() - begin_time
progbar.update(len(test_data), [('test_loss', loss), ('test_acc', accuracy), ('time', time_passed)])
return loss, accuracy
def train_step(self, sequence):
"""
Forward propagation for the input `sequence`. This method will be called
in `train`. If you want to specify your custom data during training/testing/predicting,
you can implement a subclass of `graphgallery.Sequence`, which is iterable
and yields `inputs` and `labels` in each iteration.
Note:
----------
You must compile your model before training/testing/predicting.
Use `model.build()`.
Parameters:
----------
sequence: `graphgallery.Sequence`
The input `sequence`.
Return:
----------
loss: Float scalar
Output loss of forward propagation.
accuracy: Float scalar
Output accuracy of prediction.
"""
return self.train_step_fn(self.model, sequence)
def test_step(self, sequence):
"""
Forward propagation for the input `sequence`. This method will be called
in `test`. If you want to specify your custom data during training/testing/predicting,
you can implement a subclass of `graphgallery.Sequence`, which is iterable
and yields `inputs` and `labels` in each iteration.
Note:
----------
You must compile your model before training/testing/predicting.
Use `model.build()`.
Parameters:
----------
sequence: `graphgallery.Sequence`
The input `sequence`.
Return:
----------
loss: Float scalar
Output loss of forward propagation.
accuracy: Float scalar
Output accuracy of prediction.
"""
return self.test_step_fn(self.model, sequence)
def predict(self, index=None, return_prob=True):
"""
Predict the output probability for the input node index.
Note:
----------
You must compile your model before training/testing/predicting.
Use `model.build()`.
Parameters:
----------
index: Numpy 1D array, optional.
The indices of nodes to predict.
if None, predict the all nodes.
return_prob: bool.
whether to return the probability of prediction.
Return:
----------
The predicted probability of each class for each node,
shape (n_nodes, n_classes).
"""
if not self.model:
raise RuntimeError(
'You must compile your model before training/testing/predicting. Use `model.build()`.')
if index is None:
index = np.arange(self.graph.n_nodes, dtype=intx())
else:
index = asintarr(index)
sequence = self.predict_sequence(index)
logit = self.predict_step(sequence)
if return_prob:
logit = softmax(logit)
return logit
def predict_step(self, sequence):
return self.predict_step_fn(self.model, sequence)
def train_sequence(self, index):
"""
Construct the training sequence for the `index` of nodes.
Parameters:
----------
index: Numpy array-like, `list` or integer scalar
The index of nodes used in training.
Return:
----------
sequence: The sequence of `graphgallery.Sequence` for the nodes.
"""
raise NotImplementedError
def test_sequence(self, index):
"""
Construct the testing sequence for the `index` of nodes.
Note:
----------
If not implemented, this method will call `train_sequence` automatically.
Parameters:
----------
index: Numpy array-like, `list` or integer scalar
The index of nodes used in testing.
Return:
----------
sequence: The sequence of `graphgallery.Sequence` for the nodes.
"""
return self.train_sequence(index)
def predict_sequence(self, index):
"""
Construct the prediction sequence for the `index` of nodes.
Note:
----------
If not implemented, this method will call `test_sequence` automatically.
Parameters:
----------
index: Numpy array-like, `list` or integer scalar
The index of nodes used in prediction.
Return:
----------
The sequence of `graphgallery.Sequence` for the nodes.
"""
return self.test_sequence(index)
def _test_predict(self, index):
logit = self.predict(index)
predict_class = logit.argmax(1)
labels = self.graph.labels[index]
return (predict_class == labels).mean()
def reset_weights(self):
# TODO: add torch support
"""reset the model to the first time.
"""
model = self.model
if self.backup is None:
raise RuntimeError("You must store the `backup` before `reset_weights`."
"`backup` will be automatically stored when the model is built.")
for w, wb in zip(model.weights, self.backup):
w.assign(wb)
def reset_optimizer(self):
# TODO: add torch support
model = self.model
if hasattr(model, 'optimizer'):
for var in model.optimizer.variables():
var.assign(tf.zeros_like(var))
def reset_lr(self, value):
# TODO: add torch support
model = self.model
if not hasattr(model, 'optimizer'):
raise RuntimeError("The model has not attribute `optimizer`!")
model.optimizer.learning_rate.assign(value)
def remove_weights(self):
filepath = self.weight_path
if not filepath.endswith(POSTFIX):
filepath = filepath + POSTFIX
if osp.exists(filepath):
os.remove(filepath)
|
"""
*Buffer-Point*
"""
class BufferPoint:
pass
|
"""lib/netbox/ansible.py"""
class NetBoxToAnsible:
"""Main NetBox to Ansible class"""
def __init__(self, netbox_data):
self.netbox_data = netbox_data
self.ansible_data = {}
def data(self):
"""Translate NetBox data to Ansible constructs"""
# DCIM
self.dcim_translations()
# Tenancy
self.tenancy_translations()
# IPAM
self.ipam_translations()
# Virtualization
self.virtualization_translations()
# Circuits
self.circuits_translations()
# Secrets
self.secrets_translations()
# Extras
self.extras_translations()
return self.ansible_data
def dcim_translations(self):
"""Translate DCIM related info"""
self.regions()
self.sites()
self.rack_roles()
self.rack_groups()
self.racks()
self.manufacturers()
self.platforms()
self.device_types()
self.device_roles()
self.devices()
self.interfaces()
self.inventory_items()
def tenancy_translations(self):
"""Translate tenancy related info"""
self.tenant_groups()
self.tenants()
def ipam_translations(self):
"""Translate IPAM related info"""
self.roles()
self.vlan_groups()
self.vlans()
self.vrfs()
self.rirs()
self.aggs()
self.prefixes()
self.ip_addresses()
def virtualization_translations(self):
"""Translate virtualization related info"""
self.cluster_groups()
self.cluster_types()
self.clusters()
self.virtual_machines()
self.virtual_interfaces()
def circuits_translations(self):
"""Translate circuit related info"""
# self.providers()
# self.circuit_types()
# self.circuits()
def extras_translations(self):
"""Translate extras related info"""
# self.config_contexts()
def secrets_translations(self):
"""Translate secrets related info"""
# self.secret_roles()
# self.secrets()
def roles(self):
"""Extract NetBox roles"""
netbox_ipam_roles = []
for role in self.netbox_data['netbox_ipam_roles']:
data = role['data']
role_info = {
'data': {'name': data['name'], 'weight': data['weight']},
'state': role['state']}
netbox_ipam_roles.append(role_info)
self.ansible_data['netbox_ipam_roles'] = netbox_ipam_roles
def vlan_groups(self):
"""Extract NetBox VLAN groups"""
netbox_vlan_groups = []
for group in self.netbox_data['netbox_vlan_groups']:
data = group['data']
# Update site with name only if defined
if data['site'] is not None:
data['site'] = data['site']['name']
group_info = {
'data': {'name': data['name'], 'site': data['site']},
'state': group['state']}
netbox_vlan_groups.append(group_info)
self.ansible_data['netbox_vlan_groups'] = netbox_vlan_groups
def vlans(self):
"""Extract NetBox VLANs"""
netbox_vlans = []
for vlan in self.netbox_data['netbox_vlans']:
data = vlan['data']
# Update site with name only if defined
if data['site'] is not None:
data['site'] = data['site']['name']
vlan_info = {
'data': {'name': data['name'], 'site': data['site']},
'state': vlan['state']}
netbox_vlans.append(vlan_info)
self.ansible_data['netbox_vlans'] = netbox_vlans
def vrfs(self):
"""Extract NetBox VRFs"""
netbox_vrfs = []
for vrf in self.netbox_data['netbox_vrfs']:
data = vrf['data']
# Update tenant with name only if defined
if data['tenant'] is not None:
data['tenant'] = data['tenant']['name']
vrf_info = {
'data': {'name': data['name'], 'rd': data['rd'],
'enforce_unique': data['enforce_unique'],
'description': data['description'],
'tags': data['tags'],
'custom_fields': data['custom_fields'],
'tenant': data['tenant']},
'state': vrf['state']}
netbox_vrfs.append(vrf_info)
self.ansible_data['netbox_vrfs'] = netbox_vrfs
def rirs(self):
"""Extract NetBox RIRs"""
netbox_rirs = []
for rir in self.netbox_data['netbox_rirs']:
data = rir['data']
rir_info = {
'data': {'name': data['name'],
'is_private': data['is_private']},
'state': rir['state']}
netbox_rirs.append(rir_info)
self.ansible_data['netbox_rirs'] = netbox_rirs
def aggs(self):
"""Extract NetBox aggregates"""
netbox_aggregates = []
for agg in self.netbox_data['netbox_aggregates']:
data = agg['data']
if data['rir'] is not None:
data['rir'] = data['rir']['name']
agg_info = {
'data': {'custom_fields': data['custom_fields'],
'description': data['description'],
'prefix': data['prefix'],
'rir': data['rir'],
'tags': data['tags']},
'state': agg['state']}
netbox_aggregates.append(agg_info)
self.ansible_data['netbox_aggregates'] = netbox_aggregates
def prefixes(self):
"""Extract NetBox prefixes"""
netbox_prefixes = []
for prefix in self.netbox_data['netbox_prefixes']:
data = prefix['data']
# Update role with name only if defined
if data['role'] is not None:
data['role'] = data['role']['name']
# Update site with name only if defined
if data['site'] is not None:
data['site'] = data['site']['name']
# Update tenant with name only if defined
if data['tenant'] is not None:
data['tenant'] = data['tenant']['name']
# Update vrf with name only if defined
if data['vrf'] is not None:
data['vrf'] = data['vrf']['name']
prefix_info = {
'data': {'custom_fields': data['custom_fields'],
'description': data['description'],
'family': data['family']['value'],
'is_pool': data['is_pool'],
'prefix': data['prefix'],
'site': data['site'],
'status': data['status']['label'],
'prefix_role': data['role'],
'tags': data['tags'],
'tenant': data['tenant'],
'vlan': data['vlan'],
'vrf': data['vrf']
}, 'state': prefix['state']}
netbox_prefixes.append(prefix_info)
self.ansible_data['netbox_prefixes'] = netbox_prefixes
def ip_addresses(self):
"""Extract NetBox IP addresses"""
netbox_ip_addresses = []
for address in self.netbox_data['netbox_ip_addresses']:
data = address['data']
# Update interface with name and device
if data['interface'] is not None:
interface = data['interface']
data['interface'] = {
'name': interface['name']
}
try:
data['interface']['device'] = interface['device']['name']
except TypeError:
pass
if interface['virtual_machine'] is not None:
data['interface']['virtual_machine'] = interface[
'virtual_machine']['name']
# Update nat_inside
if data['nat_inside'] is not None:
data['nat_inside'] = {
'address': data['nat_inside']['address'],
'vrf': data['nat_inside']['vrf']
}
# Update tenant with name only if defined
if data['tenant'] is not None:
data['tenant'] = data['tenant']['name']
# Update vrf with name only if defined
if data['vrf'] is not None:
data['vrf'] = data['vrf']['name']
address_info = {'data': {'address': data['address'],
'custom_fields': data['custom_fields'],
'description': data['description'],
'family': data['family']['value'],
'interface': data['interface'],
'nat_inside': data['nat_inside'],
'status': data['status']['label'],
'tags': data['tags'],
'tenant': data['tenant'],
'vrf': data['vrf']},
'state': address['state']}
if data['role'] is not None:
address_info['data']['role'] = data['role']['label']
netbox_ip_addresses.append(address_info)
self.ansible_data['netbox_ip_addresses'] = netbox_ip_addresses
def tenant_groups(self):
"""Extract NetBox tenant groups"""
netbox_tenant_groups = []
for group in self.netbox_data['netbox_tenant_groups']:
data = group['data']
group_info = {
'data': {'name': data['name']}, 'state': group['state']}
netbox_tenant_groups.append(group_info)
self.ansible_data['netbox_tenant_groups'] = netbox_tenant_groups
def tenants(self):
"""Extract NetBox tenant groups"""
netbox_tenants = []
for tenant in self.netbox_data['netbox_tenants']:
data = tenant['data']
# Update group with name only if defined
if data['group'] is not None:
data['group'] = data['group']['name']
tenant_info = {
'data': {'description': data['description'],
'comments': data['comments'],
'custom_fields': data['custom_fields'],
'name': data['name'],
'slug': data['slug'],
'tenant_group': data['group'],
'tags': data['tags']},
'state': tenant['state']}
netbox_tenants.append(tenant_info)
self.ansible_data['netbox_tenants'] = netbox_tenants
def regions(self):
"""Extract NetBox regions"""
netbox_regions = []
for region in self.netbox_data['netbox_regions']:
data = region['data']
# Update parent region with name only if defined
if data['parent'] is not None:
data['parent'] = data['parent']['name']
region_info = {
'data': {'name': data['name'],
'parent_region': data['parent']},
'state': region['state']}
netbox_regions.append(region_info)
self.ansible_data['netbox_regions'] = netbox_regions
def sites(self):
"""Extract NetBox sites"""
netbox_sites = []
for site in self.netbox_data['netbox_sites']:
data = site['data']
# Update region with name only if defined
if data['region'] is not None:
data['region'] = data['region']['name']
# Update tenant with name only if defined
if data['tenant'] is not None:
data['tenant'] = data['tenant']['name']
site_info = {
'data': {'asn': data['asn'],
'comments': data['comments'],
'contact_name': data['contact_name'],
'contact_phone': data['contact_phone'],
'contact_email': data['contact_email'],
'custom_fields': data['custom_fields'],
'description': data['description'],
'facility': data['facility'],
'latitude': data['latitude'],
'longitude': data['longitude'],
'name': data['name'],
'physical_address': data['physical_address'],
'shipping_address': data['shipping_address'],
'slug': data['slug'],
'region': data['region'],
'status': data['status']['label'],
'tags': data['tags'],
'tenant': data['tenant'],
'time_zone': data['time_zone'],
}, 'state': site['state']}
netbox_sites.append(site_info)
self.ansible_data['netbox_sites'] = netbox_sites
def rack_roles(self):
"""Extract NetBox rack roles"""
netbox_rack_roles = []
for role in self.netbox_data['netbox_rack_roles']:
data = role['data']
role_info = {'data': {'name': data['name'],
'color': data['color']},
'state': role['state']}
netbox_rack_roles.append(role_info)
self.ansible_data['netbox_rack_roles'] = netbox_rack_roles
def rack_groups(self):
"""Extract NetBox rack groups"""
netbox_rack_groups = []
for group in self.netbox_data['netbox_rack_groups']:
data = group['data']
# Update site with name only if defined
if data['site'] is not None:
data['site'] = data['site']['name']
group_info = {
'data': {'name': data['name'], 'site': data['site']},
'state': group['state']}
netbox_rack_groups.append(group_info)
self.ansible_data['netbox_rack_groups'] = netbox_rack_groups
def racks(self):
"""Extract NetBox racks"""
netbox_racks = []
for rack in self.netbox_data['netbox_racks']:
data = rack['data']
# Update site with name only if defined
if data['site'] is not None:
data['site'] = data['site']['name']
# Update rack group with name only if defined
if data['group'] is not None:
data['group'] = data['group']['name']
# Update tenant with name only if defined
if data['tenant'] is not None:
data['tenant'] = data['tenant']['name']
# Update type with label only if defined
if data['type'] is not None:
data['type'] = data['type']['label']
# Update width with value only if defined
if data['width'] is not None:
data['width'] = data['width']['value']
rack_info = {
'data': {'asset_tag': data['asset_tag'],
'comments': data['comments'],
'custom_fields': data['custom_fields'],
'desc_units': data['desc_units'],
'name': data['name'],
'facility_id': data['facility_id'],
'outer_depth': data['outer_depth'],
'outer_width': data['outer_width'],
'rack_group': data['group'],
'rack_role': data['role'],
'serial': data['serial'],
'site': data['site'],
'status': data['status']['label'],
'tags': data['tags'],
'tenant': data['tenant'],
'type': data['type'],
'u_height': data['u_height'],
'width': data['width']
}, 'state': rack['state']}
if data['outer_unit'] is not None:
rack_info['data']['outer_unit'] = data['outer_unit']
netbox_racks.append(rack_info)
self.ansible_data['netbox_racks'] = netbox_racks
def manufacturers(self):
"""Extract NetBox manufacturers"""
netbox_manufacturers = []
for manufacturer in self.netbox_data['netbox_manufacturers']:
data = manufacturer['data']
manufacturer_info = {'data': {'name': data['name']},
'state': manufacturer['state']}
netbox_manufacturers.append(manufacturer_info)
self.ansible_data['netbox_manufacturers'] = netbox_manufacturers
def platforms(self):
"""Extract NetBox platforms"""
netbox_platforms = []
for platform in self.netbox_data['netbox_platforms']:
data = platform['data']
# Update manufacturer with name only if defined
if data['manufacturer'] is not None:
data['manufacturer'] = data['manufacturer']['name']
platform_info = {'data': {'manufacturer': data['manufacturer'],
'name': data['name'],
'napalm_driver': data['napalm_driver'],
'napalm_args': data['napalm_args']},
'state': platform['state']}
netbox_platforms.append(platform_info)
self.ansible_data['netbox_platforms'] = netbox_platforms
def device_types(self):
"""Extract NetBox device types"""
netbox_device_types = []
for device_type in self.netbox_data['netbox_device_types']:
data = device_type['data']
# Update manufacturer with name only if defined
if data['manufacturer'] is not None:
data['manufacturer'] = data['manufacturer']['name']
device_type_info = {
'data': {
'comments': data['comments'],
'custom_fields': data['custom_fields'],
'is_full_depth': data['is_full_depth'],
'manufacturer': data['manufacturer'],
'model': data['model'],
'part_number': data['part_number'],
'slug': data['slug'],
'tags': data['tags'],
'u_height': data['u_height']
},
'state': device_type['state']}
if data['subdevice_role'] is not None:
device_type_info['data']['subdevice_role'] = data[
'subdevice_role']['label']
netbox_device_types.append(device_type_info)
self.ansible_data['netbox_device_types'] = netbox_device_types
def device_roles(self):
"""Extract NetBox device roles"""
netbox_device_roles = []
for role in self.netbox_data['netbox_device_roles']:
data = role['data']
role_info = {'data': {
'name': data['name'],
'color': data['color'],
'vm_role': data['vm_role']
}, 'state': role['state']}
netbox_device_roles.append(role_info)
self.ansible_data['netbox_device_roles'] = netbox_device_roles
def devices(self):
"""Extract NetBox devices"""
netbox_devices = []
for device in self.netbox_data['netbox_devices']:
data = device['data']
device_info = {'data': {
'name': data['name'],
'platform': data['platform'],
'serial': data['serial'],
'asset_tag': data['asset_tag'],
'position': data['position'],
'status': data['status']['label'],
'comments': data['comments'],
'tags': data['tags'],
'custom_fields': data['custom_fields']
}, 'state': device['state']}
# Update cluster with name only if defined
if data['cluster'] is not None:
device_info['data']['cluster'] = data['cluster']['name']
# Update device_role with name only if defined
if data['device_role'] is not None:
device_info['data']['device_role'] = data['device_role'][
'name']
# Update device_type with name only if defined
if data['device_type'] is not None:
device_info['data']['device_type'] = data['device_type'][
'model']
# Update face with label only if defined
if data['face'] is not None:
device_info['data']['face'] = data['face']['label']
# Update rack with name only if defined
if data['rack'] is not None:
device_info['data']['rack'] = data['rack']['name']
# Update site with name only if defined
if data['site'] is not None:
device_info['data']['site'] = data['site']['name']
# Update tenant with name only if defined
if data['tenant'] is not None:
device_info['data']['tenant'] = data['tenant']['name']
netbox_devices.append(device_info)
self.ansible_data['netbox_devices'] = netbox_devices
def interfaces(self):
"""Extract NetBox interfaces"""
netbox_device_interfaces = []
for interface in self.netbox_data['netbox_device_interfaces']:
data = interface['data']
# This is related to https://github.com/netbox-community/ansible_modules/issues/193
form_factor = data.get('form_factor')
int_type = data.get('type')
if int_type is not None:
data['type'] = data['type']['label']
elif form_factor is not None:
data['type'] = data['form_factor']['label']
if data['mode'] is not None:
data['mode'] = data['mode']['label']
interface_info = {'data': {
'description': data['description'],
'device': data['device']['name'],
'enabled': data['enabled'],
'type': data['type'],
'lag': data['lag'],
'mac_address': data['mac_address'],
'mgmt_only': data['mgmt_only'],
'mode': data['mode'],
'mtu': data['mtu'],
'name': data['name'],
'tagged_vlans': data['tagged_vlans'],
'tags': data['tags'],
'untagged_vlan': data['untagged_vlan']
}, 'state': interface['state']}
netbox_device_interfaces.append(interface_info)
self.ansible_data[
'netbox_device_interfaces'] = netbox_device_interfaces
def inventory_items(self):
"""Extract NetBox inventory items"""
netbox_inventory_items = []
for item in self.netbox_data['netbox_inventory_items']:
data = item['data']
if data['manufacturer'] is not None:
data['manufacturer'] = data['manufacturer']['name']
item_info = {
'data': {'device': data['device']['name'],
'name': data['name'],
'part_id': data['part_id'],
'manufacturer': data['manufacturer'],
'serial': data['serial'],
'asset_tag': data['asset_tag'],
'description': data['description'],
'tags': data['tags']
}, 'state': item['state']}
netbox_inventory_items.append(item_info)
self.ansible_data['netbox_inventory_items'] = netbox_inventory_items
def cluster_groups(self):
"""Extract NetBox cluster groups"""
netbox_cluster_groups = []
for group in self.netbox_data['netbox_cluster_groups']:
data = group['data']
group_info = {'data': {'name': data['name']},
'state': group['state']}
netbox_cluster_groups.append(group_info)
self.ansible_data['netbox_cluster_groups'] = netbox_cluster_groups
def cluster_types(self):
"""Extract NetBox cluster types"""
netbox_cluster_types = []
for cluster_type in self.netbox_data['netbox_cluster_types']:
data = cluster_type['data']
cluster_type_info = {'data': {'name': data['name']},
'state': cluster_type['state']}
netbox_cluster_types.append(cluster_type_info)
self.ansible_data['netbox_cluster_types'] = netbox_cluster_types
def clusters(self):
"""Extract NetBox clusters"""
netbox_clusters = []
for cluster in self.netbox_data['netbox_clusters']:
data = cluster['data']
# Update site with name only if defined
if data['site'] is not None:
data['site'] = data['site']['name']
cluster_info = {'data': {'comments': data['comments'],
'custom_fields': data['custom_fields'],
'name': data['name'],
'cluster_group': data['group']['name'],
'cluster_type': data['type']['name'],
'site': data['site'],
'tags': data['tags']},
'state': cluster['state']}
netbox_clusters.append(cluster_info)
self.ansible_data['netbox_clusters'] = netbox_clusters
def virtual_machines(self):
"""Extract NetBox virtual machines"""
netbox_virtual_machines = []
for virtual_machine in self.netbox_data['netbox_virtual_machines']:
data = virtual_machine['data']
vm_info = {'data': {'disk': data['disk'],
'memory': data['memory'],
'name': data['name'],
'platform': data['platform']['name'],
'site': data['site'],
'vcpus': data['vcpus'],
'status': data['status']['label'],
'tags': data['tags'],
'custom_fields': data['custom_fields']
},
'state': virtual_machine['state']}
# Update cluster with name only if defined
if data['cluster'] is not None:
vm_info['data']['cluster'] = data['cluster']['name']
# Update virtual_machine_role with name only if defined
if data['role'] is not None:
vm_info['data']['virtual_machine_role'] = data['role']['name']
# Update site with name only if defined
if data['site'] is not None:
vm_info['data']['site'] = data['site']['name']
# Update tenant with name only if defined
if data['tenant'] is not None:
vm_info['data']['tenant'] = data['tenant']['name']
netbox_virtual_machines.append(vm_info)
self.ansible_data['netbox_virtual_machines'] = netbox_virtual_machines
def virtual_interfaces(self):
"""Extract NetBox virtual interfaces"""
netbox_virtual_interfaces = []
for interface in self.netbox_data['netbox_virtual_interfaces']:
data = interface['data']
if data['form_factor'] is not None:
data['form_factor'] = data['form_factor']['label']
if data['mode'] is not None:
data['mode'] = data['mode']['label']
interface_info = {'data': {
'description': data['description'],
'enabled': data['enabled'],
'mac_address': data['mac_address'],
'mode': data['mode'],
'mtu': data['mtu'],
'name': data['name'],
'tagged_vlans': data['tagged_vlans'],
'tags': data['tags'],
'untagged_vlan': data['untagged_vlan'],
'virtual_machine': data['virtual_machine']['name']
}, 'state': interface['state']}
netbox_virtual_interfaces.append(interface_info)
self.ansible_data[
'netbox_virtual_interfaces'] = netbox_virtual_interfaces
|
from __future__ import print_function
from tweaker.tweaker import DatabaseTweaker
from config import config
import sys
# Setup
csv_file = "resources/broken-resources/emlo-url-check-all-errors.csv"
id_name = 'id'
skip_first_row = False
debugging = False
restrict = 500
errors = [
#'https://databank.ora.ox.ac.uk/',
'http://cofk2.bodleian.ox.ac.uk/',
'https://cofk2.bodleian.ox.ac.uk/',
'http://sers018.sers.ox.ac.uk/'
]
def row_process( tweaker, row ) :
resource = tweaker.get_resource_from_resource_id( row[id_name] )
if resource :
for error in errors :
# find work (A) the resource is related to
# find work (B) the resource url relates to
# check if there's a match relation between (A) and (B)
# if there is we don't need this resource
# else create one and then remove the resource
if resource['resource_url'].startswith( error ) :
old_url = resource['resource_url']
new_url = None
# http://sers018.sers.ox.ac.uk/
if old_url.startswith( "http://sers018.sers.ox.ac.uk/history/cofk/union.php?iwork_id=" ) :
new_url = old_url.replace( 'http://sers018.sers.ox.ac.uk/history/cofk/union.php?iwork_id=', 'http://emlo.bodleian.ox.ac.uk/w/' )
new_url += '?previous=sers018-union'
if old_url.startswith( "http://sers018.sers.ox.ac.uk/history/cofk/selden_end.php?iwork_id=" ):
new_url = old_url.replace( 'http://sers018.sers.ox.ac.uk/history/cofk/selden_end.php?iwork_id=', 'http://emlo.bodleian.ox.ac.uk/w/' )
new_url += '?previous=sers018-selden'
# http://cofk2.bodleian.ox.ac.uk/
# https://cofk2.bodleian.ox.ac.uk/interface/union.php?iwork_id=942085
# http://cofk2.bodleian.ox.ac.uk/interface/union.php?iwork_id=100456
if old_url.startswith( 'https://cofk2.bodleian.ox.ac.uk/interface/union.php?iwork_id=' ) :
new_url = old_url.replace( 'https://cofk2.bodleian.ox.ac.uk/interface/union.php?iwork_id=', 'http://emlo.bodleian.ox.ac.uk/w/' )
new_url += '?previous=cofk2-https'
if old_url.startswith( 'http://cofk2.bodleian.ox.ac.uk/interface/union.php?iwork_id=' ) :
new_url = old_url.replace( 'http://cofk2.bodleian.ox.ac.uk/interface/union.php?iwork_id=', 'http://emlo.bodleian.ox.ac.uk/w/' )
new_url += '?previous=cofk2-http'
if new_url :
tweaker.update_resource( resource['resource_id'], {
'resource_url' : new_url
})
pass # print( "New: " + new_url + " (Old: " + old_url + " )" )
else :
print ("NOT CHANGED: " + old_url )
def main() :
tweaker = DatabaseTweaker.tweaker_from_connection( config["dbname"], config["host"], config["port"], config["user"], config["password"] )
tweaker.set_debug( debugging )
if debugging:
print( "Debug ON so no commit" )
commit = False
else :
commit = do_commit()
csv_rows = tweaker.get_csv_data( csv_file )
if debugging:
print( "Restricting rows to just", restrict)
csv_rows = csv_rows[:restrict]
count = countdown = len(csv_rows)
for csv_row in csv_rows:
if countdown == count and skip_first_row:
continue
#print( str(countdown) + " of " + str(count), ":", csv_row[id_name] )
row_process( tweaker, csv_row )
countdown -= 1
print()
tweaker.print_audit()
tweaker.commit_changes(commit)
print( "Fini" )
def do_commit() :
commit = ( raw_input("Commit changes to database (y/n): ") == "y")
if commit:
print( "COMMITTING changes to database." )
else:
print( "NOT committing changes to database." )
return commit
if __name__ == '__main__':
print( "Starting...")
main()
print( "...Finished")
|
import requests
raw_url = "http://numbersapi.com/{}/math"
params = {
'json': True,
}
with open('data/step03.txt') as in_f, open('output/step03.txt', 'a') as out_f:
for line in in_f:
line = line.strip()
api_url = raw_url.format(line)
res = requests.get(api_url, params=params)
result_list = []
if res.json()['found']:
result = 'Interesting'
else:
result = 'Boring'
out_f.write(result + '\n')
|
from typing import Optional
import attr
from .action import Action
from .block import Block
@attr.dataclass(slots=True)
class Field:
"""Field on Attachment"""
title: str
value: str
short: bool
@attr.dataclass(slots=True)
class Attachment:
"""Slack Attachment"""
fallback: Optional[str] = None
color: Optional[str] = None
pretext: Optional[str] = None
author_name: Optional[str] = None
author_link: Optional[str] = None
author_icon: Optional[str] = None
title: Optional[str] = None
title_link: Optional[str] = None
text: Optional[str] = None
blocks: Optional[list[Block]] = None
fields: list[Field] = attr.Factory(list)
actions: Optional[list[Action]] = None
image_url: Optional[str] = None
thumb_url: Optional[str] = None
footer: Optional[str] = None
footer_icon: Optional[str] = None
ts: Optional[int] = None
callback_id: Optional[str] = None
|
import numpy as np
def dynamic_slicing(array, slices):
"""Dynamic slicing of an array with arbitrary number of dimensions.
Slices must match number of dimensions. A single slice can either be [None], [i, None] or [None, j]. None is equal to ':'.
i is the slice index and can be negative."""
slc = [slice(None)] * len(array.shape)
axis_squeeze = []
for axis in range(len(array.shape)):
if slices[axis] is None:
slc[axis] = slice(None)
elif isinstance(slices[axis], int):
slc[axis] = slice(slices[axis], slices[axis]+1)
axis_squeeze.append(axis - len(axis_squeeze))
else:
slc[axis] = slice(slices[axis][0], slices[axis][1])
sliced_array = array[tuple(slc)]
for axis in axis_squeeze:
sliced_array = sliced_array.squeeze(axis)
return sliced_array
my_arr = np.zeros((100, 100, 100))
new_arr = dynamic_slicing(my_arr, [1, 3, 4])
print(new_arr.shape)
print(new_arr)
new_arr = my_arr[1, 3, 4]
print(new_arr.shape)
print(new_arr) |
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"dygraph transformer layers"
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
import json
import numpy as np
import paddle
import paddle.fluid as fluid
from paddle.fluid.dygraph import Embedding, LayerNorm, Linear, to_variable, Layer, guard
from model.transformer_encoder import EncoderLayer, PrePostProcessLayer
class BertConfig(object):
def __init__(self, config_path):
self._config_dict = self._parse(config_path)
def _parse(self, config_path):
try:
with open(config_path) as json_file:
config_dict = json.load(json_file)
except Exception:
raise IOError("Error in parsing bert model config file '%s'" %
config_path)
else:
return config_dict
def __getitem__(self, key):
return self._config_dict[key]
def print_config(self):
for arg, value in sorted(six.iteritems(self._config_dict)):
print('%s: %s' % (arg, value))
print('------------------------------------------------')
class BertModelLayer(Layer):
"""
bert
"""
def __init__(self, config, return_pooled_out=True, use_fp16=False):
super(BertModelLayer, self).__init__()
self._emb_size = config['hidden_size']
self._n_layer = config['num_hidden_layers']
self._n_head = config['num_attention_heads']
self._voc_size = config['vocab_size']
self._max_position_seq_len = config['max_position_embeddings']
self._sent_types = config['type_vocab_size']
self._hidden_act = config['hidden_act']
self._prepostprocess_dropout = config['hidden_dropout_prob']
self._attention_dropout = config['attention_probs_dropout_prob']
self.return_pooled_out = return_pooled_out
self._word_emb_name = "word_embedding"
self._pos_emb_name = "pos_embedding"
self._sent_emb_name = "sent_embedding"
self._dtype = "float16" if use_fp16 else "float32"
self._param_initializer = fluid.initializer.TruncatedNormal(
scale=config['initializer_range'])
self._src_emb = Embedding(
size=[self._voc_size, self._emb_size],
param_attr=fluid.ParamAttr(
name=self._word_emb_name, initializer=self._param_initializer),
dtype=self._dtype)
self._pos_emb = Embedding(
size=[self._max_position_seq_len, self._emb_size],
param_attr=fluid.ParamAttr(
name=self._pos_emb_name, initializer=self._param_initializer),
dtype=self._dtype)
self._sent_emb = Embedding(
size=[self._sent_types, self._emb_size],
param_attr=fluid.ParamAttr(
name=self._sent_emb_name, initializer=self._param_initializer),
dtype=self._dtype)
self.pooled_fc = Linear(
input_dim=self._emb_size,
output_dim=self._emb_size,
param_attr=fluid.ParamAttr(
name="pooled_fc.w_0", initializer=self._param_initializer),
bias_attr="pooled_fc.b_0",
act="tanh")
self.pre_process_layer = PrePostProcessLayer(
"nd", self._emb_size, self._prepostprocess_dropout, "")
self._encoder = EncoderLayer(
hidden_act=self._hidden_act,
n_layer=self._n_layer,
n_head=self._n_head,
d_key=self._emb_size // self._n_head,
d_value=self._emb_size // self._n_head,
d_model=self._emb_size,
d_inner_hid=self._emb_size * 4,
prepostprocess_dropout=self._prepostprocess_dropout,
attention_dropout=self._attention_dropout,
relu_dropout=0,
preprocess_cmd="",
postprocess_cmd="dan",
param_initializer=self._param_initializer)
def forward(self, src_ids, position_ids, sentence_ids, input_mask):
"""
forward
"""
src_emb = self._src_emb(src_ids)
pos_emb = self._pos_emb(position_ids)
sent_emb = self._sent_emb(sentence_ids)
emb_out = src_emb + pos_emb
emb_out = emb_out + sent_emb
emb_out = self.pre_process_layer(emb_out)
self_attn_mask = fluid.layers.matmul(
x=input_mask, y=input_mask, transpose_y=True)
self_attn_mask = fluid.layers.scale(
x=self_attn_mask, scale=10000.0, bias=-1.0, bias_after_scale=False)
n_head_self_attn_mask = fluid.layers.stack(
x=[self_attn_mask] * self._n_head, axis=1)
n_head_self_attn_mask.stop_gradient = True
enc_output = self._encoder(emb_out, n_head_self_attn_mask)
if not self.return_pooled_out:
return enc_output
next_sent_feat = fluid.layers.slice(
input=enc_output, axes=[1], starts=[0], ends=[1])
next_sent_feat = self.pooled_fc(next_sent_feat)
next_sent_feat = fluid.layers.reshape(
next_sent_feat, shape=[-1, self._emb_size])
return enc_output, next_sent_feat
class PretrainModelLayer(Layer):
"""
pretrain model
"""
def __init__(self,
config,
return_pooled_out=True,
weight_sharing=True,
use_fp16=False):
super(PretrainModelLayer, self).__init__()
self.config = config
self._voc_size = config['vocab_size']
self._emb_size = config['hidden_size']
self._hidden_act = config['hidden_act']
self._prepostprocess_dropout = config['hidden_dropout_prob']
self._word_emb_name = "word_embedding"
self._param_initializer = fluid.initializer.TruncatedNormal(
scale=config['initializer_range'])
self._weight_sharing = weight_sharing
self.use_fp16 = use_fp16
self._dtype = "float16" if use_fp16 else "float32"
self.bert_layer = BertModelLayer(
config=self.config, return_pooled_out=True, use_fp16=self.use_fp16)
self.pre_process_layer = PrePostProcessLayer(
"n", self._emb_size, self._prepostprocess_dropout, "pre_encoder")
self.pooled_fc = Linear(
input_dim=self._emb_size,
output_dim=self._emb_size,
param_attr=fluid.ParamAttr(
name="mask_lm_trans_fc.w_0",
initializer=self._param_initializer),
bias_attr="mask_lm_trans_fc.b_0",
act="tanh")
self.mask_lm_out_bias_attr = fluid.ParamAttr(
name="mask_lm_out_fc.b_0",
initializer=fluid.initializer.Constant(value=0.0))
if not self._weight_sharing:
self.out_fc = Linear(
input_dim=self._emb_size,
output_dim=self._voc_size,
param_attr=fluid.ParamAttr(
name="mask_lm_out_fc.w_0",
initializer=self._param_initializer),
bias_attr=self.mask_lm_out_bias_attr)
else:
self.fc_create_params = self.create_parameter(
shape=[self._voc_size],
dtype=self._dtype,
attr=self.mask_lm_out_bias_attr,
is_bias=True)
self.next_sent_fc = Linear(
input_dim=self._emb_size,
output_dim=2,
param_attr=fluid.ParamAttr(
name="next_sent_fc.w_0", initializer=self._param_initializer),
bias_attr="next_sent_fc.b_0")
def forward(self, src_ids, position_ids, sentence_ids, input_mask,
mask_label, mask_pos, labels):
"""
forward
"""
mask_pos = fluid.layers.cast(x=mask_pos, dtype='int32')
enc_output, next_sent_feat = self.bert_layer(src_ids, position_ids,
sentence_ids, input_mask)
reshaped_emb_out = fluid.layers.reshape(
x=enc_output, shape=[-1, self._emb_size])
mask_feat = fluid.layers.gather(input=reshaped_emb_out, index=mask_pos)
mask_trans_feat = self.pooled_fc(mask_feat)
mask_trans_feat = self.pre_process_layer(None, mask_trans_feat, "n",
self._prepostprocess_dropout)
if self._weight_sharing:
fc_out = fluid.layers.matmul(
x=mask_trans_feat,
y=self.bert_layer._src_emb._w,
transpose_y=True)
fc_out += self.fc_create_params
else:
fc_out = self.out_fc(mask_trans_feat)
mask_lm_loss = fluid.layers.softmax_with_cross_entropy(
logits=fc_out, label=mask_label)
mean_mask_lm_loss = fluid.layers.mean(mask_lm_loss)
next_sent_fc_out = self.next_sent_fc(next_sent_feat)
next_sent_loss, next_sent_softmax = fluid.layers.softmax_with_cross_entropy(
logits=next_sent_fc_out, label=labels, return_softmax=True)
next_sent_acc = fluid.layers.accuracy(
input=next_sent_softmax, label=labels)
mean_next_sent_loss = fluid.layers.mean(next_sent_loss)
loss = mean_next_sent_loss + mean_mask_lm_loss
return next_sent_acc, mean_mask_lm_loss, loss
|
# coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class JobRequest(Model):
"""JobRequest.
:param job_id: Job identifier
:type job_id: str
:param type: Required.
The type of job to execute. Possible values include: 'unknown', 'export',
'import', 'backup', 'readDeviceProperties', 'writeDeviceProperties',
'updateDeviceConfiguration', 'rebootDevice', 'factoryResetDevice',
'firmwareUpdate', 'scheduleDeviceMethod', 'scheduleUpdateTwin',
'restoreFromBackup', 'failoverDataCopy'
:type type: str or ~service20180630.models.enum
:param cloud_to_device_method: Required if jobType is cloudToDeviceMethod.
The method type and parameters.
:type cloud_to_device_method: ~service20180630.models.CloudToDeviceMethod
:param update_twin:
:type update_twin: ~service20180630.models.Twin
:param query_condition: Required if jobType is updateTwin or
cloudToDeviceMethod.
Condition for device query to get devices to execute the job on
:type query_condition: str
:param start_time: ISO 8601 date time to start the job
:type start_time: datetime
:param max_execution_time_in_seconds: Max execution time in secounds (ttl
duration)
:type max_execution_time_in_seconds: long
"""
_attribute_map = {
"job_id": {"key": "jobId", "type": "str"},
"type": {"key": "type", "type": "str"},
"cloud_to_device_method": {"key": "cloudToDeviceMethod", "type": "CloudToDeviceMethod"},
"update_twin": {"key": "updateTwin", "type": "Twin"},
"query_condition": {"key": "queryCondition", "type": "str"},
"start_time": {"key": "startTime", "type": "iso-8601"},
"max_execution_time_in_seconds": {"key": "maxExecutionTimeInSeconds", "type": "long"},
}
def __init__(
self,
job_id=None,
type=None,
cloud_to_device_method=None,
update_twin=None,
query_condition=None,
start_time=None,
max_execution_time_in_seconds=None,
):
super(JobRequest, self).__init__()
self.job_id = job_id
self.type = type
self.cloud_to_device_method = cloud_to_device_method
self.update_twin = update_twin
self.query_condition = query_condition
self.start_time = start_time
self.max_execution_time_in_seconds = max_execution_time_in_seconds
|
from odoo import api,fields,models
class TransfertDomain(models.Model):
_name = 'tansfert.domain'
generate_domain = fields.Integer(string="Generator", default=0) |
from datetime import datetime
from users.models import User
from django.utils import timezone
from django.contrib import messages
from django.http import JsonResponse
from reminders.models import Reminder
from django.forms import formset_factory
from django.views.generic import TemplateView
from main.utilities import convert_time_delta
from django.shortcuts import render, redirect
from django.core.exceptions import ValidationError
from django.contrib.auth.decorators import login_required
from reminders.forms import ReminderCreationForm, ReminderUpdateForm
from events.models import Event, EventParticipant, OptionalMeetingDates
from events.forms import (
EventCreationForm,
EventUpdateForm,
OptionalMeetingDateForm,
ParticipantForm,
BaseOptionalMeetingDateFormSet,
BaseParticipantFormSet
)
HOME_PAGE = 'home'
LOGIN_PAGE = 'login'
@login_required(login_url=LOGIN_PAGE)
def create_event(request, day=None, month=None, year=None):
if request.method == 'POST':
event_form = EventCreationForm(request.POST, user_id=request.user)
reminder_form = ReminderCreationForm(request.POST)
if event_form.is_valid() and reminder_form.is_valid():
event = event_form.save()
participant = EventParticipant.objects.get(user_id=request.user, event_id=event)
reminder = reminder_form.save(commit=False)
if reminder.date_time:
reminder.participant_id = participant
reminder.messages = convert_time_delta(event.date_time_start - reminder.date_time)
reminder.save()
return redirect(HOME_PAGE)
else:
initial_state = None
if day and month and year:
current_time = datetime.now().time()
initial_state = {
'date_time_start': datetime(
int(year), int(month), int(day), current_time.hour, current_time.minute
).strftime("%Y-%m-%dT%H:%M")
}
event_form = EventCreationForm(user_id=request.user, initial=initial_state)
reminder_form = ReminderCreationForm()
return render(request, 'events/create_event.html',
{'event_form': event_form, 'reminder_form': reminder_form, 'title': 'Create Event', 'event_id': None})
@login_required(login_url=LOGIN_PAGE)
def update_event(request, event_id):
event_instance = Event.objects.get(id=event_id)
try:
participant = EventParticipant.objects.get(user_id=request.user, event_id=event_instance)
reminder_instance = Reminder.objects.get(participant_id=participant)
except Reminder.DoesNotExist:
reminder_instance = None
if request.method == 'POST':
event_form = EventUpdateForm(request.POST, user_id=request.user, instance=event_instance)
reminder_form = ReminderUpdateForm(request.POST, instance=reminder_instance)
if event_form.is_valid() and reminder_form.is_valid():
event = event_form.save()
if reminder_form.instance.date_time:
reminder = reminder_form.save(commit=False)
reminder.messages = convert_time_delta(event.date_time_start - reminder.date_time)
reminder.participant_id = participant
reminder.save()
else:
if reminder_instance:
reminder_instance.delete()
return redirect(HOME_PAGE)
else:
reminder_form = ReminderUpdateForm(instance=reminder_instance)
event_form = EventUpdateForm(user_id=request.user, instance=event_instance)
return render(request, 'events/create_event.html',
{'event_form': event_form, 'reminder_form': reminder_form, 'title': 'Update Event',
'event_id': event_id})
class CreateMeetingView(TemplateView):
template_name = "meetings/create_meeting.html"
def __init__(self, **kwargs) -> None:
self.create_event_form = None
self.formset_meeting_data = []
self.formset_participant_data = []
self.optional_meetings_formset = None
self.meeting_participants_formset = None
self.OptionalMeetingDateFormSet = formset_factory(
OptionalMeetingDateForm, formset=BaseOptionalMeetingDateFormSet,
max_num=10, extra=0
)
self.MeetingParticipantsFormset = formset_factory(
ParticipantForm, formset=BaseParticipantFormSet,
max_num=10, extra=0
)
super().__init__(**kwargs)
def dispatch(self, request, *args, **kwargs):
if not request.user.is_authenticated:
return redirect(LOGIN_PAGE)
return super(CreateMeetingView, self).dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context = {
'create_event_form': self.create_event_form,
'optional_meetings_formset': self.optional_meetings_formset,
'meeting_participants_formset': self.meeting_participants_formset,
'formset_meeting_data': self.formset_meeting_data,
'total_meeting_forms': len(self.formset_meeting_data),
'formset_participant_data': self.formset_participant_data,
'total_participant_forms': len(self.formset_participant_data),
'title': 'Create meeting'
}
return context
def get(self, request, day=None, month=None, year=None):
initial_state = None
if day and month and year:
current_time = datetime.now().time()
initial_state = {
'date_time_start': datetime(
int(year), int(month), int(day), current_time.hour, current_time.minute
).strftime("%Y-%m-%dT%H:%M")
}
self.create_event_form = EventCreationForm(user_id=request.user, initial=initial_state)
self.optional_meetings_formset = self.OptionalMeetingDateFormSet(prefix='optional_meetings')
self.meeting_participants_formset = self.MeetingParticipantsFormset(prefix='participants', user_id=request.user)
return super().get(request)
def post(self, request, day=None, month=None, year=None):
self.create_event_form = EventCreationForm(request.POST, user_id=request.user)
self.optional_meetings_formset = self.OptionalMeetingDateFormSet(request.POST, prefix='optional_meetings')
self.meeting_participants_formset = self.MeetingParticipantsFormset(
request.POST, prefix='participants', user_id=request.user
)
is_valid_formsets = False
if self.create_event_form.is_valid():
event_instance = self.create_event_form.save()
event_creator = EventParticipant.objects.get(event_id=event_instance, user_id=request.user, is_creator=True)
if self.check_optional_meeting_dates_formset(
request, event_instance, event_creator, self.optional_meetings_formset):
if self.check_participant_formset(request, event_instance, self.meeting_participants_formset):
# all the forms are valid and all the data saved in the DB
is_valid_formsets = True
return redirect('home')
if not is_valid_formsets:
# getting all the data that the user entered in the forms
self.formset_meeting_data = self.get_formset_meeting_date(self.optional_meetings_formset)
self.formset_participant_data = self.get_formset_participant_date(request)
return self.render_to_response(self.get_context_data())
@staticmethod
def adding_event_creator(form, event_participant):
""" adding event creator to each optional meeting date
and saving this optional date in the DB """
if not form.cleaned_data.get('date_time_start') and not form.cleaned_data.get('date_time_end'):
# if the current optional meeting form is empty
return
instance = form.save(commit=False)
instance.event_creator_id = event_participant
instance.save()
@staticmethod
def check_dates_constraint(form, event_instance, request):
""" checking if the event dates are the same as one of the optional meeting dates
and if the chosen event dates are not in the past """
form_start_time = form.cleaned_data.get('date_time_start')
form_end_time = form.cleaned_data.get('date_time_end')
if(event_instance.date_time_start, event_instance.date_time_end) == (form_start_time, form_end_time):
messages.warning(request, "The optional meeting dates should be different")
return True
if event_instance.date_time_start < timezone.now() or event_instance.date_time_end < timezone.now():
messages.warning(request, "Optional meeting dates cannot be in the past")
return True
return False
@staticmethod
def get_formset_meeting_date(formset):
result = []
for index, form in enumerate(formset):
form_data = {'id': 0, 'date_time_end': None, 'date_time_start': None}
form_data['id'] = index
if form.cleaned_data.get('date_time_end'):
form_data['date_time_end'] = form.cleaned_data.get('date_time_end').strftime("%Y-%m-%dT%H:%M")
if form.cleaned_data.get('date_time_start'):
form_data['date_time_start'] = form.cleaned_data.get('date_time_start').strftime("%Y-%m-%dT%H:%M")
result.append(form_data)
return result
@staticmethod
def get_formset_participant_date(request):
result = []
keys = list(filter(lambda x: "participants-" in x, request.POST.keys()))[4:-1]
for index, key in enumerate(keys):
form_data = {}
form_data['id'] = index
form_data['email'] = request.POST[key]
result.append(form_data)
return result
def saving_all_optional_meeting_dates(self, event_creator, event_instance, optional_meetings_formset):
_ = list(map(lambda form: self.adding_event_creator(form, event_creator), optional_meetings_formset))
# add the event time to the optional meeting dates
OptionalMeetingDates(
event_creator_id=event_creator,
date_time_start=event_instance.date_time_start,
date_time_end=event_instance.date_time_end).save()
def check_optional_meeting_dates_formset(self, request, event_instance, event_creator, optional_meetings_formset):
if optional_meetings_formset.is_valid():
is_meeting_formset_invalid = False
for form in optional_meetings_formset:
is_meeting_formset_invalid = self.check_dates_constraint(form, event_instance, request)
if is_meeting_formset_invalid:
break
if is_meeting_formset_invalid:
event_instance.delete()
return False
else:
self.saving_all_optional_meeting_dates(event_creator, event_instance, optional_meetings_formset)
else:
event_instance.delete()
return False
return True
@staticmethod
def check_participant_formset(request, event_instance, meeting_participants_formset):
if meeting_participants_formset.is_valid():
for form in meeting_participants_formset:
if form.is_valid():
try:
participant_email = form.cleaned_data.get('participant_email')
if participant_email:
user_instance = User.objects.get(email=participant_email)
participant = EventParticipant(
event_id=event_instance, user_id=user_instance, is_creator=False
)
participant.save()
except User.DoesNotExist:
messages.warning(request, f"There is no user with the email: {participant_email}")
event_instance.delete()
return False
except ValidationError: # duplication of the same participant email
pass
else:
event_instance.delete()
return False
return True
@login_required(login_url=LOGIN_PAGE)
def delete_event(request, event_id):
user = request.user
event_instance = Event.objects.get(id=event_id)
try:
EventParticipant.objects.get(event_id=event_instance, user_id=user, is_creator=True)
event_instance.delete()
return JsonResponse({"result": "success"}, safe=False)
except EventParticipant.DoesNotExist:
return JsonResponse({"result": "fail"}, safe=False)
|
import doctest
import unittest
from future.moves import sys
import snips_nlu.dataset
import snips_nlu.result
doctest_modules = [
snips_nlu.dataset.entity,
snips_nlu.dataset.intent,
snips_nlu.dataset.dataset,
snips_nlu.result
]
suite = unittest.TestSuite()
for mod in doctest_modules:
suite.addTest(doctest.DocTestSuite(mod))
runner = unittest.TextTestRunner()
if not runner.run(suite).wasSuccessful():
sys.exit(1)
|
from base_notifier import BaseNotifier
from slacker import Slacker, Error
class SlackNotifier(BaseNotifier):
def __init__(self, template, debug):
super(SlackNotifier, self).__init__(template, debug)
def notify(self, message, config):
slack_token = config['slack_api_token']
channels = config["notify_slack_channels"] or []
members = config["notify_slack_members"] or []
user = config["post_as_user"]
super(SlackNotifier, self).notify(message, config)
self.logger.debug(message)
slack = Slacker(slack_token)
try:
self.post_to_slack(channels, message, slack, user, '#')
self.post_to_slack(members, message, slack, user, '@')
except Error as error:
self.logger.error(str(error))
def post_to_slack(self, notification_targets, message, slack, user, target_symbol):
for target in notification_targets:
self.logger.info("Sending notification to " + target)
response = slack.chat.post_message(target_symbol + target,
message,
user,
icon_emoji=':bell',
icon_url=':bell')
self.logger.debug(response)
self.__log_notification_response(response, target)
def format(self, initial_message, pull_requests, owner):
self.logger.info("Formatting the text for slack as required")
args = super(SlackNotifier, self).format(initial_message, pull_requests, owner)
template = self.get_jinja_template()
return template.render(args) # this is where to put args to the template renderer
def __log_notification_response(self, response, recipient):
if response.successful:
self.logger.info("Notification sent to " + recipient)
else:
self.logger.error(response.error)
|
from figcow import cow
s = cow("Testing")
print(s)
|
# -*- coding: utf-8; tab-width: 4; indent-tabs-mode: nil; -*-
### BEGIN LICENSE
# Copyright (C) 2010 Kevin Mehall <km@kevinmehall.net>
# Copyright (C) 2012 Christopher Eby <kreed@kreed.org>
#This program is free software: you can redistribute it and/or modify it
#under the terms of the GNU General Public License version 3, as published
#by the Free Software Foundation.
#
#This program is distributed in the hope that it will be useful, but
#WITHOUT ANY WARRANTY; without even the implied warranties of
#MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
#PURPOSE. See the GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License along
#with this program. If not, see <http://www.gnu.org/licenses/>.
### END LICENSE
#import blowfish
from blowfish import Blowfish
import pandora_keys
import simplejson as json
import logging
import time
import urllib
import urllib2
# This is an implementation of the Pandora JSON API using Android partner
# credentials.
# See http://pan-do-ra-api.wikia.com/wiki/Json/5 for API documentation.
PROTOCOL_VERSION = '5'
RPC_URL = "://tuner.pandora.com/services/json/?"
DEVICE_MODEL = 'android-generic'
PARTNER_USERNAME = 'android'
PARTNER_PASSWORD = 'AC7IBG09A3DTSYM4R41UJWL07VLN8JI7'
HTTP_TIMEOUT = 30
AUDIO_FORMAT = 'highQuality'
USER_AGENT = 'pithos'
RATE_BAN = 'ban'
RATE_LOVE = 'love'
RATE_NONE = None
API_ERROR_API_VERSION_NOT_SUPPORTED = 11
API_ERROR_INSUFFICIENT_CONNECTIVITY = 13
API_ERROR_READ_ONLY_MODE = 1000
API_ERROR_INVALID_AUTH_TOKEN = 1001
API_ERROR_INVALID_LOGIN = 1002
PLAYLIST_VALIDITY_TIME = 60*60*3
#print Blowfish
#print blowfish.__file__
class PandoraError(IOError):
def __init__(self, message, status=None, submsg=None):
self.status = status
self.message = message
self.submsg = submsg
class PandoraAuthTokenInvalid(PandoraError): pass
class PandoraNetError(PandoraError): pass
class PandoraAPIVersionError(PandoraError): pass
class PandoraTimeout(PandoraNetError): pass
blowfish_encode = Blowfish(pandora_keys.out_key_p, pandora_keys.out_key_s)
def pad(s, l):
return s + "\0" * (l - len(s))
def pandora_encrypt(s):
return "".join([blowfish_encode.encrypt(pad(s[i:i+8], 8)).encode('hex') for i in xrange(0, len(s), 8)])
blowfish_decode = Blowfish(pandora_keys.in_key_p, pandora_keys.in_key_s)
def pandora_decrypt(s):
return "".join([blowfish_decode.decrypt(pad(s[i:i+16].decode('hex'), 8)) for i in xrange(0, len(s), 16)]).rstrip('\x08')
class Pandora(object):
def __init__(self):
self.set_proxy(None)
self.set_audio_format(AUDIO_FORMAT)
def json_call(self, method, args={}, https=False, blowfish=True):
url_arg_strings = []
if self.partnerId:
url_arg_strings.append('partner_id=%s'%self.partnerId)
if self.userId:
url_arg_strings.append('user_id=%s'%self.userId)
if self.userAuthToken:
url_arg_strings.append('auth_token=%s'%urllib.quote_plus(self.userAuthToken))
elif self.partnerAuthToken:
url_arg_strings.append('auth_token=%s'%urllib.quote_plus(self.partnerAuthToken))
url_arg_strings.append('method=%s'%method)
protocol = 'https' if https else 'http'
url = protocol + RPC_URL + '&'.join(url_arg_strings)
if self.time_offset:
args['syncTime'] = int(time.time()+self.time_offset)
if self.userAuthToken:
args['userAuthToken'] = self.userAuthToken
elif self.partnerAuthToken:
args['partnerAuthToken'] = self.partnerAuthToken
data = json.dumps(args)
logging.debug(url)
logging.debug(data)
if blowfish:
data = pandora_encrypt(data)
req = urllib2.Request(url, data, {'User-agent': USER_AGENT, 'Content-type': 'text/plain'})
response = self.opener.open(req)
text = response.read()
tree = json.loads(text)
if tree['stat'] == 'fail':
code = tree['code']
msg = tree['message']
logging.error('fault code: ' + str(code) + ' message: ' + msg)
if code == API_ERROR_INVALID_AUTH_TOKEN:
raise PandoraAuthTokenInvalid(msg)
elif code == API_ERROR_API_VERSION_NOT_SUPPORTED:
raise PandoraAPIVersionError(msg)
elif code == API_ERROR_INSUFFICIENT_CONNECTIVITY:
raise PandoraError("Out of sync", code,
submsg="Correct your system's clock. If the problem persists, a Pithos update may be required")
elif code == API_ERROR_READ_ONLY_MODE:
raise PandoraError("Pandora maintenance", code,
submsg="Pandora is in read-only mode as it is performing maintenance. Try again later.")
elif code == API_ERROR_INVALID_LOGIN:
raise PandoraError("Login Error", code, submsg="Invalid username or password")
else:
raise PandoraError("Pandora returned an error", code, "%s (code %d)"%(msg, code))
if 'result' in tree:
return tree['result']
def set_audio_format(self, fmt):
self.audio_format = fmt
def set_proxy(self, proxy):
if proxy:
proxy_handler = urllib2.ProxyHandler({'http': proxy})
self.opener = urllib2.build_opener(proxy_handler)
else:
self.opener = urllib2.build_opener()
def connect(self, user, password):
self.partnerId = self.userId = self.partnerAuthToken = self.userAuthToken = self.time_offset = None
partner = self.json_call('auth.partnerLogin', {'deviceModel': DEVICE_MODEL, 'username': PARTNER_USERNAME, 'password': PARTNER_PASSWORD, 'version': PROTOCOL_VERSION}, https=True, blowfish=False)
self.partnerId = partner['partnerId']
self.partnerAuthToken = partner['partnerAuthToken']
pandora_time = int(pandora_decrypt(partner['syncTime'])[4:14])
self.time_offset = pandora_time - time.time()
logging.info("Time offset is %s", self.time_offset)
user = self.json_call('auth.userLogin', {'username': user, 'password': password, 'loginType': 'user'}, https=True)
self.userId = user['userId']
self.userAuthToken = user['userAuthToken']
self.get_stations(self)
def get_stations(self, *ignore):
stations = self.json_call('user.getStationList')['stations']
self.quickMixStationIds = None
self.stations = [Station(self, i) for i in stations]
if self.quickMixStationIds:
for i in self.stations:
if i.id in self.quickMixStationIds:
i.useQuickMix = True
return stations
def save_quick_mix(self):
stationIds = []
for i in self.stations:
if i.useQuickMix:
stationIds.append(i.id)
self.json_call('user.setQuickMix', {'quickMixStationIds': stationIds})
def search(self, query):
results = self.json_call('music.search', {'searchText': query})
l = [SearchResult('artist', i) for i in results['artists']]
l += [SearchResult('song', i) for i in results['songs']]
l.sort(key=lambda i: i.score, reverse=True)
return l
def add_station_by_music_id(self, musicid):
d = self.json_call('station.createStation', {'musicToken': musicid})
station = Station(self, d)
self.stations.append(station)
return station
def get_station_by_id(self, id):
for i in self.stations:
if i.id == id:
return i
def add_feedback(self, trackToken, rating):
logging.info("pandora: addFeedback")
rating_bool = True if rating == RATE_LOVE else False
feedback = self.json_call('station.addFeedback', {'trackToken': trackToken, 'isPositive': rating_bool})
return feedback['feedbackId']
def delete_feedback(self, stationToken, feedbackId):
self.json_call('station.deleteFeedback', {'feedbackId': feedbackId, 'stationToken': stationToken})
class Station(object):
def __init__(self, pandora, d):
self.pandora = pandora
self.id = d['stationId']
self.idToken = d['stationToken']
self.isCreator = not d['isShared']
self.isQuickMix = d['isQuickMix']
self.name = d['stationName']
self.useQuickMix = False
if self.isQuickMix:
self.pandora.quickMixStationIds = d.get('quickMixStationIds', [])
def transformIfShared(self):
if not self.isCreator:
logging.info("pandora: transforming station")
self.pandora.json_call('station.transformSharedStation', {'stationToken': self.idToken})
self.isCreator = True
def get_playlist(self):
logging.info("pandora: Get Playlist")
playlist = self.pandora.json_call('station.getPlaylist', {'stationToken': self.idToken, 'additionalAudioUrl': 'HTTP_64_AACPLUS_ADTS,HTTP_128_MP3,HTTP_192_MP3'}, https=True)
songs = []
for i in playlist['items']:
if 'songName' in i: # check for ads
songs.append(Song(self.pandora, i))
return songs
#property
def info_url(self):
return 'http://www.pandora.com/stations/'+self.idToken
def rename(self, new_name):
if new_name != self.name:
self.transformIfShared()
logging.info("pandora: Renaming station")
self.pandora.json_call('station.renameStation', {'stationToken': self.idToken, 'stationName': new_name})
self.name = new_name
def delete(self):
logging.info("pandora: Deleting Station")
self.pandora.json_call('station.deleteStation', {'stationToken': self.idToken})
class Song(object):
def __init__(self, pandora, d):
self.pandora = pandora
self.album = d['albumName']
self.artist = d['artistName']
self.audioUrl = d['audioUrlMap'][self.pandora.audio_format]['audioUrl']
self.fileGain = d['trackGain']
self.trackToken = d['trackToken']
self.rating = RATE_LOVE if d['songRating'] == 1 else RATE_NONE # banned songs won't play, so we don't care about them
self.stationId = d['stationId']
self.title = d['songName']
self.songDetailURL = d['songDetailUrl']
self.albumDetailURL = d['albumDetailUrl']
self.artRadio = d['albumArtUrl']
self.tired=False
self.message=''
self.start_time = None
self.finished = False
self.playlist_time = time.time()
self.feedbackId = None
@property
def station(self):
return self.pandora.get_station_by_id(self.stationId)
def rate(self, rating):
if self.rating != rating:
self.station.transformIfShared()
if rating == RATE_NONE:
if not self.feedbackId:
# We need a feedbackId, get one by re-rating the song. We
# could also get one by calling station.getStation, but
# that requires transferring a lot of data (all feedback,
# seeds, etc for the station).
opposite = RATE_BAN if self.rating == RATE_LOVE else RATE_LOVE
self.feedbackId = self.pandora.add_feedback(self.trackToken, opposite)
self.pandora.delete_feedback(self.station.idToken, self.feedbackId)
else:
self.feedbackId = self.pandora.add_feedback(self.trackToken, rating)
self.rating = rating
def set_tired(self):
if not self.tired:
self.pandora.json_call('user.sleepSong', {'trackToken': self.trackToken})
self.tired = True
def bookmark(self):
self.pandora.json_call('bookmark.addSongBookmark', {'trackToken': self.trackToken})
def bookmark_artist(self):
self.pandora.json_call('bookmark.addArtistBookmark', {'trackToken': self.trackToken})
@property
def rating_str(self):
return self.rating
def is_still_valid(self):
return (time.time() - self.playlist_time) < PLAYLIST_VALIDITY_TIME
class SearchResult(object):
def __init__(self, resultType, d):
self.resultType = resultType
self.score = d['score']
self.musicId = d['musicToken']
if resultType == 'song':
self.title = d['songName']
self.artist = d['artistName']
elif resultType == 'artist':
self.name = d['artistName']
|
def clean_indentation(element, level=0, spaces_per_level=2):
"""
copy and paste from http://effbot.org/zone/elementent-lib.htm#prettyprint
it basically walks your tree and adds spaces and newlines so the tree is
printed in a nice way
"""
i = "\n" + level*spaces_per_level*" "
if len(element):
if not element.text or not element.text.strip():
element.text = i + spaces_per_level*" "
if not element.tail or not element.tail.strip():
element.tail = i
for sub_element in element:
clean_indentation(sub_element, level+1, spaces_per_level)
if not sub_element.tail or not sub_element.tail.strip():
sub_element.tail = i
else:
if level and (not element.tail or not element.tail.strip()):
element.tail = i
def get_text(elem, name, default=None):
"""Retrieve text of an attribute or subelement.
Parameters
----------
elem : xml.etree.ElementTree.Element
Element from which to search
name : str
Name of attribute/subelement
default : object
A defult value to return if matching attribute/subelement exists
Returns
-------
str
Text of attribute or subelement
"""
if name in elem.attrib:
return elem.get(name, default)
else:
child = elem.find(name)
return child.text if child is not None else default
|
import numpy as np
#quick percentage function
def percent(num, denom):
result = np.round((num/denom)*100,1)
return f'{result}%'
def evaluation(df):
#initialize the variable to 0
pdf_count = 0
html_count = 0
xml_count = 0
plain_count = 0
abstract_count = 0
content_count = 0
tag_count = 0
any_count = 0
for index, row in df.iterrows():
#iterate the retrieval value for each format
if row['pdf'] == 1:
pdf_count += 1
if row['xml'] == 1:
xml_count += 1
if row['html'] == 1:
html_count += 1
if row['plain'] == 1:
plain_count += 1
if row.content_text == '' or row.content_text == None or row.content_text != row.content_text:
pass
elif row.content_text[:4] == 'ABS:':
#keeping count of the time we did not retreive the full text but we found teh bastract
abstract_count += 1
else:
# computing the value of retreived document(any format)
content_count += 1
if row['xml'] == 1 or row['html'] == 1:
#number of document we have at least one taged version available
tag_count +=1
if row['xml'] == 1 or row['html'] == 1 or row['pdf'] == 1 or row['plain'] == 1:
#number of document we have at least one format available
any_count +=1
#printing the result
print(f'Here is the performance thus far:')
print(f'PDF:{pdf_count} = {percent(pdf_count, len(df))}')
print(f'XML:{xml_count} = {percent(xml_count, len(df))}')
print(f'HTML:{html_count} = {percent(html_count, len(df))}')
print(f'Plain Text:{plain_count} = {percent(plain_count, len(df))}')
print(f'\nWe have at least one fomat for {any_count} articles = {percent(any_count, len(df))}')
print(f'\nWe have a tagged version for {tag_count} articles = {percent(tag_count, len(df))}')
print(f'\nWe have only the abstract for {abstract_count} articles = {percent(abstract_count, len(df))}')
print(f'\nWe have a content text for {content_count} articles = {percent(content_count, len(df))}') |
#!/usr/bin/env python
# encoding: utf-8
import click
import io
import sys
import yaml
import storage
import datadiff
import hashlib
import methods
# TODO pattern to factor out:
# option to choose something from named dictionary
# TODO pattern to factor out:
# input/output file
# (flags: filename defaulting to -, allow overwrite, binary/text mode)
@click.command()
@click.option("--data-dir",
help="Input directory containing datawatch data.")
@click.option("--include-unchanged/--no-include-unchanged",
default=False, show_default=True, type=bool,
help="Perform reduction even when nothing has changed from the previous version.")
@click.option("--omit-data/--no-omit-data",
default=False, show_default=True, type=bool,
help="Omit the actual data from the output.")
@click.option("--extra-info/--no-extra-info",
default=False, show_default=True, type=bool,
help="Add some extra descriptive metadata in the output.")
@click.option("--select-key", multiple=True,
help="Select only a specific set of keys.")
@click.option("--value-type", default="auto", show_default=True,
help="Choose kind of value to output.")
def main(data_dir, include_unchanged, omit_data, extra_info, select_key, value_type):
valuedecoders = {
"auto": lambda rev: rev.get_data_as_bytes_or_unicode(),
"raw": lambda rev: rev.data,
"string": lambda rev: rev.get_data_as_unicode(),
}
try:
valuedecoder = valuedecoders[value_type]
except KeyError:
raise ValueError("unknown or unhandled --value_type: {} (options: {})".format(repr(value_type), repr(list(valuedecoders))))
stream = datadiff.read_streaming(
store=storage.LocalFileStorage(data_dir),
key_filter=select_key or None,
include_unchanged=include_unchanged)
for entry, revision in stream:
record = {
"key": entry.key,
"data_version": revision.data_version,
}
if extra_info:
record["info"] = {
"keyhash": entry.keyhash,
"data_length": len(revision.data),
"data_hash": revision.content_hash_digest,
}
if not omit_data:
record["value"] = valuedecoder(revision)
yaml.safe_dump(record, explicit_start=True, explicit_end=True, stream=sys.stdout)
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
import os
import sys
import csv
import argparse
try:
import numpy as np
from scipy.stats import spearmanr
from scipy.stats.mstats import kruskalwallis
except ImportError:
sys.exit( "This script requires the Python scientific stack: numpy and scipy." )
try:
from humann2 import config
from humann2.tools import util
except ImportError:
sys.exit( "CRITICAL ERROR: Unable to find the HUMAnN2 python package." +
" Please check your install.")
# ---------------------------------------------------------------
# constants
# ---------------------------------------------------------------
c_allowed_impute = 0.2
# ---------------------------------------------------------------
# argument parsing
# ---------------------------------------------------------------
description = """
HUMAnN2 utility for performing metadata association
===================================================
"""
def get_args( ):
parser = argparse.ArgumentParser(
description=description,
formatter_class=argparse.RawTextHelpFormatter,
)
parser.add_argument( "-i", "--input",
metavar="<path>",
required=True,
help="HUMAnN2 table with metadata rows at the top", )
parser.add_argument( "-m", "--focal-metadatum",
metavar="<str>",
required=True,
help="Indicate metadatum to test vs. community feature totals", )
parser.add_argument( "-l", "--last-metadatum",
metavar="<str>",
required=True,
help="Indicate end of metadata rows", )
parser.add_argument( "-t", "--focal-type",
required=True,
choices=["continuous", "categorical"],
help="Metadatum type", )
parser.add_argument( "-o", "--output",
metavar="<path>",
default="associations.tsv",
help="Where to save the output", )
parser.add_argument( "-f", "--fdr",
metavar="<float>",
type=float,
default=0.2,
help="FDR threshold (default=0.2)", )
return parser.parse_args( )
# ---------------------------------------------------------------
# utilities
# ---------------------------------------------------------------
def pvalues2qvalues( pvalues ):
n = len( pvalues )
# after sorting, index[i] is the original index of the ith-ranked value
index = range( n )
index = sorted( index, key=lambda i: pvalues[i] )
pvalues = sorted( pvalues )
qvalues = [pvalues[i-1] * n / i for i in range( 1, n+1 )]
# adjust qvalues to enforce monotonic behavior
# q( i ) = min( q( i..n ) )
qvalues.reverse()
for i in range( 1, n ):
if qvalues[i] > qvalues[i-1]:
qvalues[i] = qvalues[i-1]
qvalues.reverse()
# rebuild qvalues in the original order
ordered_qvalues = [None for q in qvalues]
for i, q in enumerate( qvalues ):
ordered_qvalues[index[i]] = q
return ordered_qvalues
def adjust_stats( stats ):
pvalues = [stat[-1] for stat in stats]
qvalues = pvalues2qvalues( pvalues )
for i in range( len( stats ) ):
stats[i].append( qvalues[i] )
return sorted( stats, key=lambda stat: stat[-1] )
def spearman_analysis( mvalues, fnames, fvalues ):
stats = []
for fname, frow in zip( fnames, fvalues ):
try:
rho, p = spearmanr( mvalues, frow )
stats.append( [fname, "%.4g" % rho, p] )
except:
sys.stderr.write( "NOTE: Unable to compute Spearman r with feature: " + fname +"\n" )
return adjust_stats( stats )
def shatter( cats, values ):
lists = {}
for c, v in zip( cats, values ):
lists.setdefault( c, [] ).append( v )
return lists
def kruskalwallis_analysis( mvalues, fnames, fvalues ):
stats = []
for fname, frow in zip( fnames, fvalues ):
try:
lists = shatter( mvalues, frow )
summary = {k:"%.4g" % ( np.mean( v ) ) for k, v in lists.items( )}
summary = [":".join( [k, v] ) for k, v in summary.items( )]
summary = "|".join( summary )
hstat, p = kruskalwallis( *lists.values( ) )
stats.append( [fname, summary, p] )
except:
sys.stderr.write("NOTE: Unable to compute Kruskal-Wallis with feature: " + fname + "\n")
return adjust_stats( stats )
def test_float_list( values ):
good = []
values2 = []
for v in values:
try:
v = float( v )
values2.append( v )
good.append( v )
except:
values2.append( None )
if len( good ) / float( len( values2 ) ) >= 1 - c_allowed_impute:
impute = np.mean( good )
values = [k if k is not None else impute for k in values2]
return values
else:
return None
# ---------------------------------------------------------------
# main
# ---------------------------------------------------------------
def main( ):
args = get_args( )
mname, mvalues = None, []
fnames, fvalues = [], []
adding = False
with open( args.input , "rt" ) as fh:
for row in csv.reader( fh, csv.excel_tab ):
header, values = row[0], row[1:]
if header == args.focal_metadatum:
mname, mvalues = header, values
if header == args.last_metadatum:
adding = True
continue
if adding and "|" not in header:
fnames.append( header )
fvalues.append( list(map( float, values ) ) )
# tests
if not adding:
sys.exit( "STOPPED: last metadata row <{}> not found.\n".format( args.last_metadatum ) )
if mname is None:
sys.exit( "STOPPED: focal metadata row <{}> not found.\n".format( args.focal_metadatum ) )
if args.focal_type == "categorical" and len( set( mvalues ) ) > np.sqrt( len( mvalues ) ):
sys.stderr.write( "WARNING: categorical metadata <{}> has many distinct values.\n".format( args.focal_metadatum ) )
# begin analysis
fh = open( args.output, "w" ) if args.output is not None else sys.stdout
if args.focal_type == "continuous":
mvalues = test_float_list( mvalues )
if mvalues is None:
sys.exit( "STOPPED: failed to float many entries in focal row <{}>".format( args.focal_metadatum ) )
stats = spearman_analysis( mvalues, fnames, fvalues )
sys.stderr.write( "Performing Spearman analysis vs. metadatum: " + mname + "\n" )
fh.write( "# Feature\tRho\tP-value\tQ-value\n" )
elif args.focal_type == "categorical":
stats = kruskalwallis_analysis( mvalues, fnames, fvalues )
sys.stderr.write( "Performing Kruskal-Wallis analysis vs. metadatum: " + mname + "\n" )
fh.write( "# Feature\tLevel means (|ed)\tP-value\tQ-value\n" )
found_something = False
for stat in stats:
if stat[-1] <= args.fdr:
found_something = True
stat[-1] = "%.4g" % stat[-1]
stat[-2] = "%.4g" % stat[-2]
fh.write( "\t".join( list( map( str, stat ) ) ) + "\n" )
if not found_something:
sys.stderr.write( "NO FDR SIGNIFICANT ASSOCIATIONS\n" )
fh.close( )
sys.stderr.write( "Finished successfully.\n" )
if __name__ == "__main__":
main( )
|
import os
import shutil
print('path-utils loaded')
def makedir(direc):
if not os.path.exists(direc):
os.makedirs(direc)
return True
else:
return False
def get_file_name(filepath):
return os.path.splitext(os.path.basename(filepath))[0]
def get_files(direc, extns=None):
''' Returns a list of files in a directory.
If extns is not None, only files with those extensions will be returned.
'''
files = os.listdir(direc)
if extns is not None:
files = [f for f in files if f.split('.')[-1] in extns]
return files
def delete_file(filepath):
if os.path.exists(filepath):
os.remove(filepath)
return True
else:
return False
def deletedir(direc):
if os.path.exists(direc):
shutil.rmtree(direc)
return True
else:
return False
if __name__=='__main__': # pragma: no cover
# Driver code:
direc = './test_dir'
makedir(direc)
print(os.path.exists(direc))
|
#!/usr/bin/env python
# This check data available on ESGF and on raijin that matches constraints passed on by user and return a summary.
"""
Copyright 2016 ARC Centre of Excellence for Climate Systems Science
author: Paola Petrelli <paola.petrelli@utas.edu.au>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
from ARCCSSive import CMIP5
from ARCCSSive.CMIP5.Model import Instance
# connect to the database
db=CMIP5.connect()
#search database instances
outputs=db.outputs(variable='tas',model='MIROC5',experiment='historical',mip='Amon',ensemble='r1i1p1')
# loop through result instance objects returned by search
for o in outputs:
model = o.model
print(str(model))
files = o.filenames()
print(files)
fpath = o.drstree_path()
print(str(fpath))
# loops through result version objects related to instance
for v in o.versions:
if v.is_latest: print("latest available version on ESGF as of ",str(v.checked_on))
# search without specifying variables and then use filter to select only two
outputs=db.outputs(model='MIROC5',experiment='historical',mip='Amon',ensemble='r1i1p1')\
.filter(Instance.variable.in_(['tas','pr']))
# loop through result instance objects returned by search
for o in outputs:
var = o.variable
print(str(var))
files = o.filenames()
print(files)
fpath = o.drstree_path()
print(str(fpath))
# loops through result version objects related to instance
for v in o.versions:
# print message if version is latest on ESGF
if v.is_latest: print("latest available version on ESGF as of ",str(v.checked_on))
# print checksum and tracking-id for first file listed
print(str(o.versions[0].files[0].sha256))
print(str(o.versions[0].files[0].md5))
print(str(o.versions[0].files[0].tracking_id))
|
from unittest import TestCase
import mock
from ..tf_log_base import TFLogBase
class TestTFLogBase(TestCase):
def test_defaults_to_public_tf_api(self):
tf_log = TFLogBase([], 'foo')
self.assertEqual(tf_log.base_uri, "https://api.threshingfloor.io")
def test_base_uri_cannot_end_in_slash(self):
with self.assertRaisesRegexp(Exception, "base_uri cannot end in slash"):
TFLogBase([], 'foo', base_uri='http://asdf.com/')
def test_ip_query_batch_size_cannot_be_greater_than_1000(self):
with self.assertRaisesRegexp(Exception, "ip_query_batch_size cannot be more than 1000"):
TFLogBase([], 'foo', ip_query_batch_size=1001)
def test_extract_line_features_must_be_defined(self):
tf_log = TFLogBase([], 'foo')
with self.assertRaisesRegexp(NotImplementedError, "Must be implemented"):
tf_log._extract_line_features()
def test_extract_features_must_be_defined(self):
tf_log = TFLogBase([], 'foo')
with self.assertRaisesRegexp(NotImplementedError, "Must be implemented"):
tf_log._extract_features()
def test_analyze_must_be_defined(self):
tf_log = TFLogBase([], 'foo')
with self.assertRaisesRegexp(NotImplementedError, "Must be implemented"):
tf_log._analyze()
def test_can_iterate_over_reduced_log_lines(self):
tf_log = TFLogBase([], 'foo')
tf_log.quiet_logs = [{'raw': 'a'}, {'raw': 'b'}, {'raw': 'c'}]
tf_log.noisy_logs = [{'raw': 'x'}, {'raw': 'y'}, {'raw': 'z'}]
self.assertEqual(list(tf_log.reduce()), ['a', 'b', 'c'])
self.assertEqual(list(tf_log.reduce(show_noisy=True)), ['x', 'y', 'z'])
def test_can_batch_ip_queries_for_filter(self):
features = {'ips': ['1.1.1.1', '2.2.2.2', '3.3.3.3', '4.4.4.4', '5.5.5.5', '6.6.6.6'],
'ports': [22, 2222]}
tf_log = TFLogBase([], 'foo', ip_query_batch_size=1)
with mock.patch.object(tf_log, '_send_features') as mock_send_features:
tf_log._get_filter(features)
self.assertEqual(mock_send_features.call_args_list, [mock.call({'ips': ['1.1.1.1'], 'ports': [22, 2222]}),
mock.call({'ips': ['2.2.2.2'], 'ports': [22, 2222]}),
mock.call({'ips': ['3.3.3.3'], 'ports': [22, 2222]}),
mock.call({'ips': ['4.4.4.4'], 'ports': [22, 2222]}),
mock.call({'ips': ['5.5.5.5'], 'ports': [22, 2222]}),
mock.call({'ips': ['6.6.6.6'], 'ports': [22, 2222]})])
tf_log = TFLogBase([], 'foo', ip_query_batch_size=5)
with mock.patch.object(tf_log, '_send_features') as mock_send_features:
tf_log._get_filter(features)
self.assertEqual(mock_send_features.call_args_list, [
mock.call({'ips': ['1.1.1.1', '2.2.2.2', '3.3.3.3', '4.4.4.4', '5.5.5.5'], 'ports': [22, 2222]}),
mock.call({'ips': ['6.6.6.6'], 'ports': [22, 2222]})])
|
"""
cTivoTelnetControl - a solution for controlling a TiVo over the internet
Converts a single key press into a command for a TiVo box
Charles Machalow - MIT License
"""
import telnetlib #for telnet connection
import sys #for args
import getch #local file for getch in Windows and Unix
import socket #for socket.error
#makes connection to the given TiVo ip, port on default TiVo port
#returns the telnetlib.Telnet object, used to hold the connection
def connect(ip, port=31339):
tn = telnetlib.Telnet(ip, str(port))
return tn
#returns a dictionary from keychar to telnet command
#expects a RemoteToKeyMappings.txt
def getKeyToTelnet():
d = {}
with open('RemoteToKeyMappings.txt', 'r') as file:
for line in file:
vals = line.split('\t')
#vals[0] is the remote button, not used by code
#makes the file more human readable
d[vals[1]] = vals[2].rstrip()
return d
#main
if __name__ == "__main__":
port = 31339
ip = -1
#args handling
if len(sys.argv) == 3:
port = sys.argv[2]
ip = sys.argv[1]
tn = connect(ip, port)
elif len(sys.argv) == 2:
ip = sys.argv[1]
tn = connect(ip)
else:
print("Incorrect script args")
print("usage: python cTivoTelnetControl.py ip <port=31339>")
sys.exit(0)
keydict = getKeyToTelnet()
print("listening for keypresses...")
#go in key listening loop, send command as needed
while True:
try:
c = getch.getch().decode("ascii")
except UnicodeDecodeError:
print("Can't decode that key into binary, not usable")
#get one more key because arrows may be two seperate getch calls
getch.getch().decode("ascii")
continue
if c not in keydict:
print(c + " not in keydict, exiting...")
break
telnet_cmd = keydict[c] + "\r\n"
print ("recv'd \"" + c + "\" sending:" + telnet_cmd + " to " + str(ip) + ":" + str(port))
try:
tn.write(str.encode(telnet_cmd))
except socket.error:
print ("lost connection, reconnecting")
tn = connect(ip, port)
tn.write(str.encode(telnet_cmd))
tn.close()
|
import os
os.environ.setdefault('DJANGO_SETTINGS_MODULE','data_aggregate.settings')
import django
django.setup()
from data_agg_api.models import Temperature
import datetime
import random
import json
import requests
def create_temperatures():
print("Generating fake temperature data...")
no_data = 10
date = datetime.date.today()
for i in range (no_data):
temp = round(random.uniform(-100, 100),2)
hour = random.randint(0,23)
minute = random.randint(0, 59)
second = random.randint(0, 59)
time = datetime.time(hour, minute, second)
oneday = datetime.timedelta(days=1)
temperature = Temperature(temp=temp,date_time = str(date) + " " + str(time))
date = date - oneday
temperature.save()
print("Create a temperature data at {0} Celcius, at {1} on {2}".format(temp, time, date))
def create_json_data(no_data=10):
print("Generating fake JSON data...")
data_json = {"data":[]}
date = datetime.date.today()
for i in range(no_data):
temp = round(random.uniform(-100, 100),2)
hour = random.randint(0,23)
minute = random.randint(0, 59)
second = random.randint(0, 59)
time = datetime.time(hour, minute, second)
oneday = datetime.timedelta(days=1)
temperature = {
"date_time": str(date) + " " + str(time),
"val": temp
}
date = date - oneday
data_json["data"].append(temperature)
with open('data1.json', 'w') as json_file:
json.dump(data_json, json_file)
def main():
print("Type you command here: ", end='')
usage = input()
if (usage == 'simulate'):
print("Number of data: ", end = '')
no_data = int(input())
print("Number of loops: ", end='')
loop = int(input())
for i in range(loop):
print("generating and calling api")
create_json_data(no_data)
res = requests.post("http://localhost:8000/api/temperatures/upload/", data = {'data_file': open('data1.json', 'rb').read()})
if res.status_code != 200:
return 0
main()
|
import tensorflow as tf
import numpy as np
import resnet_fcn
from datageneratorClusterLblMask import ImageDataGenerator2
import time
import scipy
import cv2
import matplotlib.pyplot as plt
import os
from skimage.transform import resize
from scipy.misc import imsave
import copy
#####################################################
"""
Configuration settings
"""
mean = np.array([128., 128., 128.], np.float32)
# Path to the textfiles for the trainings and validation set
train_file = './ColumbClustrTrain/ColmbClstrTrain1.txt'
AnnoBase = './BoundingBox/' # bounding box information of participants
MaskPath = './MaskGenetn/' # Mask storage path
MOMENTUM = 0.9
UPDATE_OPS_COLLECTION = 'resnet_update_ops' # must be grouped with training op
batch_size = 1 # number CAms to be taken at a time
#################################Excitation Map generator ########
train_generator = ImageDataGenerator2(train_file, shuffle=False, basePath='./MaskGenetn/SpkNonExTrain1/') # speak and not speak CAMs taking
num_classes = 2
Train_batches_per_epoch = np.ceil(train_generator.data_size / batch_size).astype(np.int16)
##########################Mask Generation #############
def Mask_Gen(Img):
thresh_fxd = 0.20
kernel = np.ones((11, 11), np.uint8)
mask = np.zeros(shape=[512, 832], dtype=float)
j =0
fnlthresh = thresh_fxd + cv2.mean(Img[j])[0]
Img2 = Img.astype('uint8')
th2, im_bw = cv2.threshold(Img2, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
mask = np.logical_or.reduce([mask, im_bw])
mask_Fnl = cv2.dilate(mask.astype(np.uint8), kernel, iterations=2)
return mask_Fnl
if __name__ == "__main__":
temp = np.empty([0, 2048])
label = np.empty([0, 1])
batch_size = 1;
for _ in range(Train_batches_per_epoch ):
batch_tx, batch_ty, AnnoLoctn,Paths,_ = train_generator.next_batch(batch_size)
#print(Paths)
test_count = 0
MaskImg = []
for j in range(batch_size):
##################Mask Label Image Generation###########
labls = batch_ty[j]
peopl = len(labls)
#####################People Location###############
Imheight= batch_tx[0].height# inp
Imwidth= batch_tx[0].width# inp
Locatns = AnnoLoctn[j]
Person1 = np.multiply((labls[0]+1),np.ones(shape=[Imheight,Locatns[1]], dtype=int))
Person2 = np.multiply((labls[1] + 1), np.ones(shape=[Imheight,(Locatns[3]-Locatns[2]-1)], dtype=int))
All = np.hstack((Person1,Person2))
if(peopl>2):
Person3 = np.multiply((labls[2] + 1),
np.ones(shape=[Imheight, (Locatns[5] - Locatns[4] - 1)], dtype=int))
All = np.hstack((All, Person3))
Fnl = resize(All, (Imheight,Imwidth),preserve_range=True)
Fnl_Spk = copy.copy(Fnl)
Fnl_NonSpk = copy.copy(Fnl)
Fnl_Spk[Fnl_Spk==1] = 0
Fnl_NonSpk[Fnl_NonSpk== 2] = 0
Spk_mskExc = batch_tx[0,:,:]
NonSpk_mskExc = batch_tx[1, :, :]
Spk_msk = Mask_Gen(np.multiply(Spk_mskExc, Fnl_Spk ),)
NonSpk_msk = Mask_Gen(np.multiply(NonSpk_mskExc, Fnl_NonSpk),)
Fnl_Spk = np.multiply(Spk_msk,2)
Fnl2 = Fnl_Spk + NonSpk_msk #Fnl_MakGeneratd
##################################done###########################
names = MaskPath +"/Mask/Img" + str(test_count).zfill(5) + '.jpg'
test_count = test_count+1
Fnl2= np.multiply(Fnl2,127)
imsave(names, Fnl2)
|
import psm.agemodels
import psm.coral
import psm.cellulose
import psm.icecore
import psm.speleo
import psm.aux_functions
|
from netconify.tty_serial import Serial
from netconify.tty_telnet import Telnet
from netconify import constants as C
__version__ = C.version
__date__ = C.date
__author__ = C.author
|
# Copyright 2018 The Fuego Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
"""
import os
import sys
fuegoRoot = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, os.path.join(fuegoRoot, 'lib'))
sys.path.insert(0, fuegoRoot)
import settings
settings.fuegoRoot = fuegoRoot
import logging
# import gdal
def mapping_with_bounds(latLong, latLongBounds, diffLatLong, rasterSize):
logging.warning('coords: %f, %f, %f', latLong, latLongBounds, diffLatLong)
pix = int((latLong - latLongBounds) / diffLatLong)
logging.warning('pix: (%d)', pix)
if 0 <= pix <= rasterSize:
return pix
else:
logging.warning("sorry coordinate not in data (%d > %d) or (%d < %d)", latLong, diffLatLong*rasterSize, latLong, latLongBounds)
return None
# def main():
# reqArgs = [
# ["g", "geoTiffName", "File name of geotiff"],
# ["a", "lat", "latitude of desired point", float],
# ["o", "long", "longtitude of desired point", float],
# ]
# args = collect_args.collectArgs(reqArgs, optionalArgs=[], parentParsers=[goog_helper.getParentParser()])
# tiffData = gdal.Open(args.geoTiffName)
# logging.warning('x: %d, y: %d', tiffData.RasterXSize, tiffData.RasterYSize)
# metadata = tiffData.GetGeoTransform()
# logging.warning('metadata: %s', metadata)
# specs = tiffData.ReadAsArray(xoff=0, yoff=0)
# logging.warning('specs: %s', specs)
#
# coordX = mapping_with_bounds(args.long, metadata[0], metadata[1], tiffData.RasterXSize)
# coordY = mapping_with_bounds(args.lat, metadata[3], metadata[5], tiffData.RasterYSize)
# if coordX != None and coordY != None:
# val = specs[coordX,coordY]
# logging.warning("The value is (%s)", val)
#
#
# if __name__=="__main__":
# main()
|
from ..kast import KAtt, KClaim, KRule, KToken
from ..ktool import KompileBackend
from ..prelude import Sorts
from .kprove_test import KProveTest
class SimpleProofTest(KProveTest):
KOMPILE_MAIN_FILE = 'k-files/simple-proofs.k'
KOMPILE_BACKEND = KompileBackend.HASKELL
KOMPILE_OUTPUT_DIR = 'definitions/simple-proofs'
KOMPILE_EMIT_JSON = True
KPROVE_USE_DIR = '.simple-proof-test'
@staticmethod
def _update_symbol_table(symbol_table):
pass
def test_prove_claim_with_lemmas(self):
# Given
new_lemma = KRule(KToken('pred1(3) => true', Sorts.BOOL), requires=KToken('pred1(4)', Sorts.BOOL), att=KAtt(atts={'simplification': ''}))
new_claim = KClaim(KToken('<k> foo => bar ... </k> <state> 3 |-> 3 </state>', 'TCellFragment'), requires=KToken('pred1(4)', Sorts.BOOL))
# When
result1 = self.kprove.prove_claim(new_claim, 'claim-without-lemma')
result2 = self.kprove.prove_claim(new_claim, 'claim-with-lemma', lemmas=[new_lemma])
# Then
self.assertNotTop(result1)
self.assertTop(result2)
def test_prove_claim_rule_profile(self):
# Given
new_lemma = KRule(KToken('pred1(3) => true', Sorts.BOOL), requires=KToken('pred1(4)', Sorts.BOOL), att=KAtt(atts={'simplification': ''}))
new_claim = KClaim(KToken('<k> foo => bar ... </k> <state> 3 |-> 3 </state>', 'TCellFragment'), requires=KToken('pred1(4)', Sorts.BOOL))
rule_profile = self.KPROVE_USE_DIR + '/rule-profile'
# When
_ = self.kprove.prove_claim(new_claim, 'claim-with-lemma', lemmas=[new_lemma], rule_profile=rule_profile)
# Then
with open(rule_profile, 'r') as rp:
lines = rp.read().split('\n')
self.assertEqual(len(lines), 4)
|
val = int(input('Diga o valor da casa: '))
sal = int(input('Qual o seu salario? '))
yar = int(input('Em quantos anos pretende pagar? '))
cor = {'lin':'\033[m','red': '\033[31m','gre':'\033[32m','yel':'\033[33m', 'blu':'\033[34m'}
par = val / (yar * 12)
print('{}-={}'.format(cor['blu'], cor['lin']) * 25)
if sal * 0.3 > par:
print('{}Seu enprestimo foi aprovaado{}'.format(cor['gre'], cor['lin']))
else:
print('{}Lamento mas você não pode finaciar essa casa{}'.format(cor['red'], cor['lin']))
print('{}-={}'.format(cor['blu'], cor['lin']) * 25)
|
from django.core.exceptions import ValidationError
import re
def validate_password(password):
print('validating passowrd', password)
if len(password) < 8:
print('pass is short')
raise ValidationError('Password should be longer than 8 chars')
# return TypeError('Password should be longer than 8 chars')
elif re.search('[A-Z]', password) is None:
print('pass is not short')
raise ValidationError('Password should have at least one capital letter')
elif re.search('^[a-z0-9A-Z]+$', password) is not None:
print('pass has no special chars')
raise ValidationError('Password should contain at least one special char')
def validate_email(email):
if re.search(r'^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$', email) is None:
raise ValidationError('Email is not valid') |
import django_tables2 as tables
from netbox.tables import BaseTable, ToggleColumn
from sidekick.models import (
AccountingProfile,
AccountingSource,
BandwidthProfile,
)
NAME_LINK = """
<a href="{{ record.get_absolute_url }}">{{ record }}</a>
"""
MEMBER_LINK = """
<a href="{{ record.accounting_profile.member.get_absolute_url }}">
{{ record.accounting_profile.member.name }}
</a>
"""
class AccountingProfileTable(BaseTable):
table_pagination = {
'per_page': 1000,
}
pk = ToggleColumn()
name = tables.TemplateColumn(
template_code=NAME_LINK,
verbose_name='Profile Name',
)
traffic_cap = tables.Column(empty_values=())
burst_limit = tables.Column(empty_values=())
def render_traffic_cap(self, value, record):
v = record.get_current_bandwidth_profile()
if v is not None:
return v.traffic_cap
return '-'
def render_burst_limit(self, value, record):
v = record.get_current_bandwidth_profile()
if v is not None:
return v.burst_limit
return '-'
class Meta(BaseTable.Meta):
model = AccountingProfile
fields = (
'pk', 'name', 'enabled',
)
class AccountingSourceTable(BaseTable):
pk = ToggleColumn()
name = tables.LinkColumn()
class Meta(BaseTable.Meta):
model = AccountingSource
fields = ('pk', 'device', 'name', 'destination')
class BandwidthProfileTable(BaseTable):
pk = ToggleColumn()
member = tables.TemplateColumn(
template_code=MEMBER_LINK,
verbose_name='Member',
)
class Meta(BaseTable.Meta):
model = BandwidthProfile
fields = ('pk', 'member', 'traffic_cap', 'burst_limit', 'effective_date')
|
import os
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
# instantiate the db
db = SQLAlchemy()
def create_app(script_info=None):
# instantiate the app
app = Flask(__name__)
# set config
app_settings = os.getenv('APP_SETTINGS')
app.config.from_object(app_settings)
# set up extensions
db.init_app(app)
# register blueprints
from project.api.stocks import stocks_blueprint
app.register_blueprint(stocks_blueprint)
# shell context for flask cli
app.shell_context_processor({'app': app, 'db': db})
return app
|
"""
Settings for Ublox Reader package
:author: Angelo Cutaia
:copyright: Copyright 2021, LINKS Foundation
:version: 1.0.0
..
Copyright 2021 LINKS Foundation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Standard library
import configparser
import os
# ------------------------------------------------------------------------------
# Module version
__version_info__ = (1, 0, 0)
__version__ = ".".join(str(x) for x in __version_info__)
# Documentation strings format
__docformat__ = "restructuredtext en"
# ------------------------------------------------------------------------------
############
# SETTINGS #
############
config = configparser.ConfigParser()
"""Config object"""
DEV_PATH = os.path.join(
os.path.abspath(os.path.dirname(__file__)), "config/ublox_config.ini"
)
"""Path for the configuration file in developer mode"""
USER_PATH = "/etc/ublox-reader/config/ublox_config.ini"
"""Path for the configuration file in user mode"""
config.read((DEV_PATH, USER_PATH))
"""Read from configuration file"""
|
# Copyright (c) 2009 Matt Harrison
#from distutils.core import setup
from setuptools import setup
from cov2emacslib import meta
setup(name='cov2emacs',
version=meta.__version__,
author=meta.__author__,
description='FILL IN',
scripts=['bin/cov2emacs'],
package_dir={'cov2emacslib':'cov2emacslib'},
packages=['cov2emacslib'],
)
|
#!/usr/bin/env python
#
# Copyright 2011-2012 BloomReach, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
'''
Unit tests for Zinc.
'''
from __future__ import with_statement
import unittest, sys, os, commands, subprocess
import zinc
TMP_DIR = "/tmp/zinc-unittests"
def run(command):
print "shell> %s" % command
code = subprocess.call(command, shell=True)
return code
def create_file(path, value):
f = open(path, "w")
f.write(value)
f.close()
def create_zeroed_file(path, size_in_mb):
run("dd if=/dev/zero bs=1000000 count=%s of=%s" % (size_in_mb, path))
class ZincUnitTests(unittest.TestCase):
def setUp(self):
if TMP_DIR.startswith("/tmp/"):
run("rm -rf %s" % TMP_DIR)
run("mkdir -p %s" % TMP_DIR)
def test_md5(self):
filename = "%s/somefile" % TMP_DIR
create_file(filename, "some contents")
val = zinc.file_md5(filename)
val2 = commands.getoutput("md5sum %s" % filename).split()[0]
self.assertEquals(val, val2)
def test_md5_large(self):
filename = "%s/bigfile" % TMP_DIR
create_zeroed_file(filename, 40)
val = zinc.file_md5(filename)
val2 = commands.getoutput("md5sum %s" % filename).split()[0]
self.assertEquals(val, val2)
def test_shortvals(self):
s = zinc.shortvals_to_str([("key1", "val1"), ("key2", 23), ("key3", "val3-foobar"), ("skipme", None)])
d = zinc.shortvals_from_str("key1=val1;key2=23;key3=val3-foobar")
self.assertEquals(s, zinc.shortvals_to_str(d))
def test_decompose_store_path(self):
def call(input, expected):
result = zinc.Repo.decompose_store_path(input)
self.assertEquals(result, expected)
call(("znversion"), ("meta", "", "znversion"))
call(("scopes"), ("meta", "", "scopes"))
call(("_c/0000000000000/manifest"), ("manifest", "", "0000000000000/manifest"))
call(("_f/0000000000000/filea1"), ("file", "filea1", "0000000000000/filea1"))
call(("_s/dir1/tags"), ("tags", "dir1", "tags"))
call(("_s/dir1/_b/main/commits"), ("commits", "dir1", "main/commits"))
call(("_s/dir1/_f/0000000000001/fileb3"), ("file", "dir1/fileb3", "0000000000001/fileb3"))
call(("_s/dir1/_s/subdir1/_f/0000000000001/fileb4"), ("file", "dir1/subdir1/fileb4", "0000000000001/fileb4"))
call(("_s/dir1/_s/subdir1/_s/subdir2/_f/0000000000001/fileb4"), ("file", "dir1/subdir1/subdir2/fileb4", "0000000000001/fileb4"))
call(("_s/dir1/_s/subdir1/_s/_s/_f/0000000000001/fileb4"), ("file", "dir1/subdir1/_s/fileb4", "0000000000001/fileb4"))
def test_temp_output_dir(self):
out_dir = "./out-dir"
run("rm -rf %s" % out_dir)
with zinc.temp_output_dir(out_dir) as temp_dir:
assert os.path.isdir(temp_dir)
run("mkdir -p %s/subdir/subsubdir" % temp_dir)
run("mkdir -p %s/empty-subdir" % temp_dir)
with open("%s/foo" % temp_dir, "w") as f: f.write("blah1")
with open("%s/bar" % temp_dir, "w") as f: f.write("blah2")
with open("%s/subdir/baz" % temp_dir, "w") as f: f.write("blah3")
with open("%s/subdir/subsubdir/other" % temp_dir, "w") as f: f.write("blah4")
temp_dir_save = temp_dir
assert sorted(list(zinc.list_files_recursive(temp_dir))) == ["bar", "foo", "subdir/baz", "subdir/subsubdir/other"]
assert sorted(list(zinc.list_dirs_recursive(temp_dir))) == ['empty-subdir', 'subdir', 'subdir/subsubdir']
assert sorted(list(zinc.list_files_recursive(out_dir))) == ["bar", "foo", "subdir/baz", "subdir/subsubdir/other"]
assert not os.path.isdir(temp_dir_save)
def test_temp_output_file(self):
out_file = "out-file"
run("rm -rf %s" % out_file)
with zinc.atomic_output_file(out_file) as temp_file:
with open(temp_file, "w") as f: f.write("contents")
with open(out_file) as f:
assert f.read() == "contents"
def test_ignore_path(self):
ignore_path = zinc.WorkingDir.ignore_path
assert not ignore_path("foo")
assert not ignore_path("/foo/bar")
assert ignore_path("foo.bak")
assert ignore_path("blah/foo~")
assert not ignore_path("foo.ba")
assert ignore_path("foo.orig")
assert not ignore_path("foo.")
assert ignore_path(".foo")
assert ignore_path("blah/blah/.tmp-copy.xxx/abc/def")
assert not ignore_path("blah/blah/.tmp-keep/abc/def")
def test_parent_dirs(self):
assert list(zinc.parent_dirs("a")) == [("a", ""), ("", "a")]
assert list(zinc.parent_dirs("/a/b/c")) == [("/a/b/c", ""), ("/a/b", "c"), ("/a", "b/c"), ("/", "a/b/c")]
assert list(zinc.parent_dirs("a/b/c")) == [("a/b/c", ""), ("a/b", "c"), ("a", "b/c"), ("", "a/b/c")]
def test_compress_lzo(self):
filename = "%s/somefile" % TMP_DIR
lzo_filename = filename + ".lzo"
uncompressed_filename = filename + ".new"
create_zeroed_file(filename, 5)
zinc.compress_lzo(filename, lzo_filename)
zinc.decompress_lzo(lzo_filename, uncompressed_filename)
code = run("cmp %s %s" % (filename, uncompressed_filename))
assert code == 0
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(ZincUnitTests)
test_result = unittest.TextTestRunner(verbosity=2).run(suite)
print "%s errors, %s failures" % (len(test_result.errors), len(test_result.failures))
sys.exit(1 if len(test_result.errors) + len(test_result.failures) > 0 else 0)
|
phrase = input("Entrer une phrase :")
lettre = input("Entrer Entrez le caractère recherché :")
if phrase.count(lettre) != 0 :
for i in range(len(phrase)) :
if lettre == phrase [i] :
print("Le caractère", lettre, "recherché est trouvé à l'index", i, "de la phrase")
else :
print("Le caractère recherché n'existe pas dans la phrase") |
# Problem: https://www.hackerrank.com/challenges/python-tuples/problem
# Score: 10
|
from onto_tool import onto_tool
import pydot
def test_local_instance():
onto_tool.main([
'graphic',
'-t', 'Local Ontology',
'--no-image',
'-o', 'tests-output/graphic/test_schema',
'tests/graphic/domain_ontology.ttl',
'tests/graphic/upper_ontology.ttl',
'tests/graphic/instance_data.ttl'
])
(instance_graph,) = pydot.graph_from_dot_file('tests-output/graphic/test_schema.dot')
edges = list(sorted((e.get_source(), e.get_destination()) for e in instance_graph.get_edges()))
assert edges == [
('Domain', 'Upper'),
('Instances', 'Domain')
]
|
import pytest
from rcache import cache, lru_cache
def execute_times(n):
def wrap(f, *args, **kwargs):
for _ in range(n):
last = f(*args, **kwargs)
return last
return wrap
def inspect_max_cache_size(size):
def wrap(f):
def wrap(*args, **kwargs):
result = f(*args, **kwargs)
assert len(f.cache) <= size
return result
wrap.cache = f.cache
return wrap
return wrap
def execute_once(f): # WITH GIVEN PARAMETERS
executed = set()
def wrap(*args, **kwargs):
key = (args, frozenset(kwargs.items()))
assert key not in executed, \
f"you can {f.__name__}({', '.join(str(_) for _ in args)}{', ' if args and kwargs else ''}" \
f"{', '.join(f'{k}={v!r}' for k, v in kwargs.items())}) only once"
calculated = f(*args, **kwargs)
executed.add(key)
return calculated
return wrap
def test_base():
class A:
@cache
@execute_once
def foo(self):
return 42
@execute_once
def bar(self):
return 13
a = A()
assert execute_times(5)(a.foo) == 42
assert a.bar() == 13
with pytest.raises(AssertionError):
a.bar()
def test_classmethod():
class A:
secret = 42
@cache
@classmethod
@execute_once
def foo(cls):
return cls.secret
a = A()
a.secret = 13
assert execute_times(5)(A.foo) == 42
assert execute_times(5)(a.foo) == 42
def test_staticmethod():
class A:
@cache
@staticmethod
@execute_once
def foo(x):
return 42 * x
a = A()
assert execute_times(3)(a.foo, 2) == 42 * 2
assert execute_times(3)(A.foo, 2) == 42 * 2
assert execute_times(3)(a.foo, 3) == 42 * 3
assert execute_times(3)(A.foo, 3) == 42 * 3
def test_generate_key():
class A:
def __init__(self, id):
self.id = id
@lru_cache(generate_key = lambda self: self.id, keep_stat=True)
@execute_once
def foo(self):
return 100000 * self.id
a = A(id=3)
assert execute_times(10)(a.foo) == 100000 * 3
assert A.foo.cache == {3: 100000 * 3}
assert A.foo.cache.misses == a.foo.cache.misses == 1
assert A.foo.cache.hits == a.foo.cache.hits == 9
def test_stat_optimisation():
class A:
@lru_cache(keep_stat=False)
@execute_once
def foo(self): pass
a = A()
assert execute_times(5)(a.foo) is None
assert not hasattr(a.foo.cache, 'hits')
assert not hasattr(a.foo.cache, 'misses')
def test_lru_cache():
@inspect_max_cache_size(2)
@lru_cache(maxsize=2, keep_stat=True)
@execute_once
def foo(a, b):
return a + b
cache = foo.cache
assert foo(1, 2) == 3
assert cache.misses == 1 and cache.hits == 0
assert foo(1, 2) == 3
assert cache.misses == 1 and cache.hits == 1
assert foo(2, 2) == 4
assert foo(2, 2) == 4
assert foo(5, 5) == 10
with pytest.raises(AssertionError):
foo(1, 2) # because it ran once, removed from cache, and tries to recalculate
assert cache == {
((2, 2), frozenset()): 4,
((5, 5), frozenset()): 10,
}
def test_wrapped():
@lru_cache(keep_stat=True)
def foo(a: int, b: int):
'adds integers'
return a + b
assert foo.__name__ == 'foo'
assert foo.__doc__ == 'adds integers' |
import torch
from mavi.torch.base_class.numerical_basis import Nbasist_fn
from mavi.torch.base_class.numerical_basis import NBasist as _Basist
from mavi.torch.base_class.numerical_basis import Intermidiate as _Intermidiate
from mavi.torch.util.util import res, pres, matrixfact, blow
class Basist(_Basist):
def __init__(self, G, F):
super().__init__(G, F)
class Intermidiate(_Intermidiate):
def __init__(self, FX):
super().__init__(FX)
def extend(self, interm):
super().extend(interm)
def initialize(X, **kwargs):
device = X.device
npoints, ndims = X.shape
constant = 1./npoints**0.5
F0 = Nbasist_fn(torch.ones(1,1, device=device)*constant)
G0 = Nbasist_fn(torch.zeros(0,0, device=device))
FX = torch.ones(npoints, 1, device=device) * constant
interm = Intermidiate(FX)
basis0 = Basist(G0, F0)
return [basis0], interm
def init_candidates(X, **kwargs):
return Intermidiate(X)
def candidates(int_1, int_t):
return Intermidiate(blow(int_1.FX, int_t.FX))
# @profile
def construct_basis_t(cands, intermidiate, eps, **kwargs):
CtX = cands.FX # evaluation matrix of candidate polynomials
FX = intermidiate.FX # evlauation matrix of nonvanishing polynomials up to degree t-1
CtX_, L = pres(CtX, FX) # orthogonal projection
d, V = matrixfact(CtX_)
FtX = CtX_ @ V[:, d>eps]
scales = FtX.norm(dim=0)
FtX /= scales
Ft = Nbasist_fn(V[:, d>eps] / scales, L)
Gt = Nbasist_fn(V[:, d<=eps], L)
return Basist(Gt, Ft), Intermidiate(FtX) |
import traceback
import uuid
from oslo.config import cfg
import requests
from st2actions.query.base import Querier
from st2common.util import jsonify
from st2common import log as logging
from st2common.util.url import get_url_without_trailing_slash
from st2common.constants.action import (LIVEACTION_STATUS_SUCCEEDED, LIVEACTION_STATUS_FAILED,
LIVEACTION_STATUS_RUNNING)
LOG = logging.getLogger(__name__)
DONE_STATES = {'ERROR': LIVEACTION_STATUS_FAILED, 'SUCCESS': LIVEACTION_STATUS_SUCCEEDED}
def get_query_instance():
return MistralResultsQuerier(str(uuid.uuid4()))
class MistralResultsQuerier(Querier):
def __init__(self, id, *args, **kwargs):
super(MistralResultsQuerier, self).__init__(*args, **kwargs)
self._base_url = get_url_without_trailing_slash(cfg.CONF.mistral.v2_base_url)
def query(self, execution_id, query_context):
"""
Queries mistral for workflow results using v2 APIs.
:param execution_id: st2 execution_id (context to be used for logging/audit)
:type execution_id: ``str``
:param query_context: context for the query to be made to mistral. This contains mistral
execution id.
:type query_context: ``objext``
:rtype: (``str``, ``object``)
"""
exec_id = query_context.get('mistral', {}).get('execution_id', None)
if not exec_id:
raise Exception('Mistral execution id invalid in query_context %s.' %
str(query_context))
try:
status, output = self._get_workflow_result(exec_id)
if output and 'tasks' in output:
LOG.warn('Key conflict with tasks in the workflow output.')
except requests.exceptions.ConnectionError:
msg = 'Unable to connect to mistral.'
trace = traceback.format_exc(10)
LOG.exception(msg)
return (LIVEACTION_STATUS_RUNNING, {'error': msg, 'traceback': trace})
except:
LOG.exception('Exception trying to get workflow status and output for '
'query context: %s. Will skip query.', query_context)
raise
result = output or {}
try:
result['tasks'] = self._get_workflow_tasks(exec_id)
except requests.exceptions.ConnectionError:
msg = 'Unable to connect to mistral.'
trace = traceback.format_exc(10)
LOG.exception(msg)
return (LIVEACTION_STATUS_RUNNING, {'error': msg, 'traceback': trace})
except:
LOG.exception('Unable to get workflow results for '
'query_context: %s. Will skip query.', query_context)
LOG.debug('Mistral query results: %s' % result)
return (status, result)
def _get_execution_tasks_url(self, exec_id):
return self._base_url + '/executions/' + exec_id + '/tasks'
def _get_execution_url(self, exec_id):
return self._base_url + '/executions/' + exec_id
def _get_workflow_result(self, exec_id):
"""
Returns the workflow status and output. Mistral workflow status will be converted
to st2 action status.
:param exec_id: Mistral execution ID
:type exec_id: ``str``
:rtype: (``str``, ``dict``)
"""
url = self._get_execution_url(exec_id)
resp = requests.get(url)
execution = resp.json()
workflow_state = execution.get('state', None)
if not workflow_state:
raise Exception('Workflow status unknown for mistral execution id %s. %s'
% (exec_id, execution))
if workflow_state in DONE_STATES:
workflow_output = jsonify.try_loads(execution.get('output', {}))
return (DONE_STATES[workflow_state], workflow_output)
return (LIVEACTION_STATUS_RUNNING, None)
def _get_workflow_tasks(self, exec_id):
"""
Returns the list of tasks for a workflow execution.
:param exec_id: Mistral execution ID
:type exec_id: ``str``
:rtype: ``list``
"""
url = self._get_execution_tasks_url(exec_id)
resp = requests.get(url)
result = resp.json()
tasks = result.get('tasks', [])
result = []
for task in tasks:
# Format the task output
formatted_task = self._format_task_result(task=task)
result.append(formatted_task)
return result
def _format_task_result(self, task):
"""
Format task result to follow the unified workflow result format.
"""
result = {}
result['id'] = task['id']
result['name'] = task['name']
result['workflow_execution_id'] = task.get('workflow_execution_id', None)
result['workflow_name'] = task['workflow_name']
result['created_at'] = task.get('created_at', None)
result['updated_at'] = task.get('updated_at', None)
result['state'] = task.get('state', None)
result['input'] = task.get('input', None)
result['result'] = task.get('result', None)
for attr in ['result', 'input']:
result[attr] = jsonify.try_loads(task.get(attr, None))
return result
def get_instance():
return MistralResultsQuerier(str(uuid.uuid4()))
|
import os
MFMODULE = os.environ['MFMODULE']
HOSTNAME = os.environ['MFCOM_HOSTNAME']
MFMODULE_VERSION = os.environ.get('MFMODULE_VERSION', 'unknown')
def transform_func(dict_object):
if "name" in dict_object:
# FIXME: don't hardcode elasticsearch here
# But it's difficult to block elasticsearch logger where it's used only
# in jsonlog2elasticsearch
if dict_object['name'] in ("elasticsearch", "jsonlog2elasticsearch"):
return None
if "module" not in dict_object:
dict_object["module"] = MFMODULE
if "hostname" not in dict_object:
dict_object["hostname"] = HOSTNAME
if "module_version" not in dict_object:
dict_object["module_version"] = MFMODULE_VERSION
return dict_object
|
import os
mailacct = os.environ['MAILACCT']
hodor = os.environ['HODOR'] + "!MtG"
|
# Python program to convert decimal to binary
# Author: Yeffian the Teapot
num = 0 # Declaring a num variable to check and store input
exitCode = 1 # Declaring a exit code, which the user can input to stop the program
# Function to convert decimal to binary
def decimalToBinary(n):
return bin(n).replace("0b", "")
# Infinite loop to check for input
while True:
if num == exitCode: # Checking if the input is the exit code
print("Goodbye!")
break # Using the 'break' keyword to break out of the loop
# If the input is not the exit code
print("Welcome to my decimal to binary converter! Use the exit code '1' to exit the program!")
num = int(input("Enter a decimal number: ")) # Asking for input
print(decimalToBinary(num)) # Printing the binary output
|
#!/usr/bin/env python3
import subprocess
import os
import re
import time
import rpyc
import configparser
import fileinput
from threading import Thread
from clic import initnode
from clic import nodesup
from clic import synchosts
from clic import pssh
from clic import nodes
config = configparser.ConfigParser()
config.read('/etc/clic/clic.conf')
# Constants
settings = config['Daemon']
minRuntime = settings.getint('minRuntime')
namescheme = settings['namescheme']
import logging as loggingmod
loggingmod.basicConfig(filename=settings['logfile'], format='%(levelname)s: %(message)s', level=loggingmod.CRITICAL)
logging = loggingmod.getLogger('clic')
logging.setLevel(loggingmod.DEBUG)
isCloud = settings.getboolean('cloudHeadnode')
# Cloud settings
from clic import cloud as api
cloud = api.getCloud()
# Queue settings
isHeadnode = os.popen('hostname -s').read().strip() == namescheme or not isCloud
from clic import queue as q
queue = q.getQueue(isHeadnode, nodes.partitions)
class Job:
def __init__(self, num):
self.num = num
self.time = time.time()
def timeWaiting(self):
return time.time() - self.time
jobs = {partition : [] for partition in nodes.partitions}
def getNodesInState(state):
return {node for node in nodes.nodes if node.state == state}
def getDeletableNodes(partition):
deletable = queue.idle()
return [node for node in deletable if node.partition == partition and node.state == 'R' and node.timeInState() >= minRuntime]
def create(numToCreate, partition):
existingDisks = cloud.getDisks()
while numToCreate > 0:
# Get a valid node
while True:
node = nodes.getFreeNode(partition)
if node == None:
return
elif node.name in existingDisks:
node.setState('D')
logging.warning('Disk for {0} exists, but shouldn\'t! Deleting...'.format(node.name))
cloud.deleteDisk(node.name)
else:
break
node.setState('C')
node.errors = 0
queue.nodeChangedState(node)
logging.info('Creating {}'.format(node.name))
cloud.create(node)
numToCreate -= 1
def deleteNode(node):
node.setState('D')
logging.info('Deleting {}'.format(node.name))
queue.nodeChangedState(node)
cloud.delete(node)
#subprocess.Popen('while true; do if [ -n "`sinfo -h -N -o "%N %t" | grep "{0} " | awk \'{{print $2}}\' | grep drain`" ]; then echo Y | gcloud compute instances delete {0}; break; fi; sleep 10; done'.format(node.name), shell=True)
def mainLoop():
while True:
if not isCloud:
synchosts.addAll()
# Start with some book keeping
queueRunning = queue.running()
cloudRunning = nodesup.responds()
cloudAll = nodesup.all(False)
# Nodes that were creating and now are running:
cameUp = []
for node in cloudRunning:
if node.state == 'C':
node.setState('R')
initnode.init(node.name, node.partition.cpus, node.partition.disk, node.partition.mem)
cameUp.append(node)
logging.info('Node {} came up'.format(node.name))
if len(cameUp) > 0:
queue.configChanged()
for node in cameUp:
queue.nodeChangedState(node)
continue
# Nodes that were deleting and now are gone:
nodesWentDown = False
for node in getNodesInState('D') - cloudAll:
nodesWentDown = True
node.setState('')
queue.nodeChangedState(node)
logging.info('Node {} went down'.format(node.name))
if nodesWentDown:
# There's a chance they'll come up later with different IPs.
queue.configChanged()
continue
# Error conditions:
# We think they're up, but the cloud doesn't:
for node in getNodesInState('R') - cloudAll:
logging.warning('Node {} deleted outside of clic!'.format(node.name))
deleteNode(node)
# We think they're running, but slurm doesn't:
for node in getNodesInState('R') - queueRunning:
if node.timeInState() > 30:
logging.error('Node {} is unresponsive!'.format(node.name))
queue.restart(False, node=node)
node.errors += 1
if node.errors < 5:
# Spam a bunch of stuff to try to bring it back online
initnode.init(node.name, node.partition.cpus, node.partition.disk, node.partition.mem)
queue.restart(True, node=node)
time.sleep(5)
for node in getNodesInState('R'):
queue.restart(False, node=node)
else:
# Something is very wrong. Kill it.
node.setState('D')
logging.error('Node {} is unresponsive. Deleting...'.format(node.name))
queue.nodeChangedState(node)
cloud.delete(node)
# Nodes are running but aren't registered:
for node in cloudRunning - getNodesInState('R') - getNodesInState('D'):
logging.warning('Encountered unregistered node {}!'.format(node.name))
node.setState('R')
if not node in queueRunning:
queue.nodeChangedState(node)
# Nodes that are taking way too long to boot:
for node in getNodesInState('C'):
if node.timeInState() > 200:
logging.error('Node {} hung on boot!'.format(node.name))
# Book keeping for jobs. Modify existing structure rather than replacing because jobs keep track of wait time.
# jobs = {partition : [job, ...], ...}
# qJobs = [[jobNum, partition], ...]
qJobs = queue.queuedJobs()
# Delete dequeued jobs
for partition in jobs:
for job in jobs[partition]:
if job.num not in [qJob[0] for qJob in qJobs if qJob[1] == partition]:
jobs[partition].remove(job)
# Add new jobs
# Sometimes, immediately after slurmctld restarts, running jobs are listed as queued. Only queue jobs with a number greater than any other job.
sampleNum = 0
for partition in jobs:
for job in jobs[partition]:
if int(job.num) > sampleNum:
sampleNum = int(job.num)
for qJob in qJobs:
if qJob[1] in jobs and qJob[0] not in [job.num for job in jobs[qJob[1]]] and int(qJob[0]) > sampleNum:
jobs[qJob[1]].append(Job(qJob[0]))
# Create and delete nodes
for partition in jobs:
deletable = getDeletableNodes(partition)
creating = {node for node in getNodesInState('C') if node.partition == partition}
running = {node for node in getNodesInState('R') if node.partition == partition}
if len(creating) + len(running) == 0 and len(jobs[partition]) > 0:
create(int((len(jobs[partition]) + 1) / 2), partition)
else:
# SLURM may not have had the chance to utilize some "running" nodes
unutilized = 0
for node in running:
if node.timeInState() < 60:
unutilized += 1
jobsWaitingTooLong = [job for job in jobs[partition] if job.timeWaiting() > 30]
create(int((len(jobsWaitingTooLong) + 1) / 2 - len(creating) - len(deletable) - unutilized), partition)
# Delete nodes
if len(deletable) > 0 and len(jobs[partition]) == 0:
for node in deletable[0:int((len(deletable) + 1) / 2)]:
deleteNode(node)
class exportNodes(rpyc.Service):
def on_connect(self):
pass
def on_disconnect(self):
pass
def exposed_getNodes(self):
return nodes.nodes
def startServer():
if __name__ == "__main__":
from rpyc.utils.server import ThreadedServer
t = ThreadedServer(exportNodes, hostname='localhost', port=18861, protocol_config={'allow_public_attrs':True})
t.start()
def main():
import argparse
parser = argparse.ArgumentParser(description='Start the clic daemon')
from clic import version
parser.add_argument('-v', '--version', action='version', version=version.__version__)
parser.parse_args()
Thread(target = startServer).start()
if isHeadnode:
# This is the head node
logging.info('Starting clic as a head node')
# Sort out ssh keys
from clic import copyid
copyid.refresh(True)
copyid.copyAll(True)
copyid.send()
queue.restart(True)
mainLoop()
else:
# This is a compute node
logging.info('Starting clic as a compute node')
|
import os
import io
from time import sleep
import time
import random
import re
import pathlib
import discord
from redbot.core import commands, bot, checks, data_manager, Config
from functools import reduce
from typing import List, Optional
from .eris_event_lib import ErisEventMixin
BaseCog = getattr(commands, "Cog", object)
RETYPE = type(re.compile("a"))
class OutOfContext(BaseCog, ErisEventMixin):
def __init__(self, bot_instance: bot):
super().__init__()
self.bot = bot_instance
self.oocconfig = Config.get_conf(
self,
identifier=875239875438276234987523048752087,
force_registration=True,
cog_name="ooc",
)
default_guild = {
"ooc_blocklist": [],
"quotes": [],
"quote_hash": [],
}
self.oocconfig.register_guild(**default_guild)
self.message_match: RETYPE = re.compile(
'(?:(["“])([^"”]*?)("|”))', flags=re.IGNORECASE
) # flag not required
self.message_log = {}
self.bot.add_listener(self.out_of_context_handler, "on_message")
@commands.group()
async def ooc(self, ctx: commands.Context):
pass
@ooc.command()
@checks.mod()
async def block(self, ctx: commands.Context, *phrase):
"""
Add phrase to blocklist
"""
phrase = " ".join(phrase).lower()
async with self.oocconfig.guild(ctx.guild).ooc_blocklist() as blocklist:
blocklist.append(phrase)
await ctx.send("Success")
@ooc.command()
@checks.mod()
async def show(self, ctx: commands.Context):
"""
Show current blocklist. This will eventually break if you have too many lines.
"""
lines = []
async with self.oocconfig.guild(ctx.guild).ooc_blocklist() as blocklist:
for i, phrase in enumerate(blocklist):
lines.append(f"{i} {phrase}")
lines = "\n".join(lines)
await ctx.send(f"```\n{lines}\n```")
@ooc.command()
@checks.mod()
async def remove(self, ctx: commands.Context, index: int):
"""
Remove item from current blocklist.
"""
async with self.oocconfig.guild(ctx.guild).ooc_blocklist() as blocklist:
if 0 <= index < len(blocklist):
blocklist.pop(index)
await ctx.send("Success")
@ooc.command()
@checks.mod()
async def download(self, ctx: commands.Context):
"""
Remove item from current blocklist.
"""
async with self.oocconfig.guild(ctx.guild).quotes() as quotes:
await ctx.send(
file=discord.File(io.StringIO("\n".join(quotes)), filename="ooc.txt")
)
async def out_of_context_handler(self, message):
if random.random() <= 0.99: # 1% chance of activation
return
ctx = await self.bot.get_context(message)
# channel-specific logs for last 5 messages
chan_id = message.channel.id
if chan_id not in self.message_log:
self.message_log[chan_id] = [message.clean_content.lower()]
else:
self.message_log[chan_id].append(message.clean_content.lower())
if len(self.message_log[chan_id]) > 5:
self.message_log[chan_id].pop(0)
async with self.lock_config.channel(message.channel).get_lock():
allowed: bool = await self.allowed(ctx, message)
if not allowed:
return
reply = await self.get_quote(ctx)
async with ctx.typing():
sleep(1)
await message.channel.send(reply)
await self.log_last_message(ctx, message)
@commands.command()
async def penny(self, ctx: commands.Context):
"""
Penny for your thoughts? Posts a random out-of-context quote
Usage: [p]penny
"""
reply = await self.get_quote(ctx, most_recent=False)
async with ctx.typing():
sleep(1)
await ctx.send(reply)
async def get_quote(
self, ctx: commands.Context, most_recent: Optional[bool] = True
):
channel_id: int = ctx.channel.id
async with self.oocconfig.guild(ctx.guild).quotes() as quotes:
reply = random.choice(quotes)
if channel_id not in self.message_log:
return reply # just random if no logs
split_msgs = [s.split(" ") for s in self.message_log[channel_id]]
if most_recent:
split_message = split_msgs[-1] # just grab the last
else:
split_message = reduce(lambda a, b: a + b, split_msgs)
random.shuffle(split_message)
split_message = [s for s in split_message if len(s) > 3]
async with self.oocconfig.guild(ctx.guild).quote_hash() as quote_hash:
for word in split_message:
if word in quote_hash:
reply = random.choice(quote_hash[word])
break
return reply
@ooc.command()
@checks.mod()
async def update(self, ctx: commands.Context):
"""
Updates the out of context quotes from the current channel. WILL OVERWRITE ALL OTHERS!
Usage: [p]update_ooc
"""
channel: discord.TextChannel = ctx.channel
async with self.oocconfig.guild(ctx.guild).ooc_blocklist() as blocklist:
phrases_to_block = blocklist
ooc_list = []
# let's start with just the latest 500
message: discord.Message
last_message_examined: discord.Message = None
message_count = 0
stime = time.time()
while True:
chunk = await channel.history(
limit=1000, before=last_message_examined
).flatten()
if len(chunk) == 0:
break
message_count += len(chunk)
for message in chunk:
matches: List[tuple] = self.message_match.findall(message.content)
for match in matches:
quote = match[1]
if quote == "":
continue
for phrase in phrases_to_block:
if phrase in quote.lower():
break
else:
ooc_list.append(quote)
last_message_examined = message
ooc_list = list(set(ooc_list))
await self.oocconfig.guild(ctx.guild).quotes.set(ooc_list)
quote_hash = dict()
for quote in ooc_list:
quote_words = [_ for _ in quote.lower().split() if len(_) > 3]
for word in quote_words:
if word not in quote_hash:
quote_hash[word] = []
quote_hash[word].append(quote)
await self.oocconfig.guild(ctx.guild).quote_hash.set(quote_hash)
delta = time.time() - stime
minutes = delta // 60
seconds = delta - (minutes * 60)
await ctx.send(
f"Done. Processed {message_count} messages, found {len(ooc_list)} quotes. Duration of {minutes:0.0f} minutes, {seconds:0.03f} seconds"
)
|
import math
def binary_search(sorted_table, element):
index = math.ceil(len(sorted_table) / 2)
while element != sorted_table[index]:
print("boo")
if sorted_table[index] > element:
index = math.ceil(index / 2)
else:
index = math.ceil(index + index / 2)
return index
table = [1, 2, 3, 4, 5]
print(str(binary_search(table, 2)))
|
# coding=utf-8
import re
from flask import Flask,render_template,session,redirect,url_for,flash,json,jsonify
from wtforms import Form, BooleanField,TextAreaField, IntegerField,TextField,HiddenField, PasswordField, validators,ValidationError,SelectField
from flask.ext.wtf import Form
from flask.ext.login import logout_user,login_required
from flask.ext.sqlalchemy import *
from wtforms import StringField,SubmitField
from wtforms.validators import Required
from flask import request
from flask.ext.script import Manager
from flask.ext.bootstrap import Bootstrap
from flask.ext.moment import Moment
from werkzeug.security import generate_password_hash, check_password_hash
#from . import login_manager
from datetime import datetime
#import MySQLdb
import sys
from flask.ext.mail import Mail
#from flask.ext.mysql import MySQL
reload(sys)
sys.setdefaultencoding('utf8')
from werkzeug.security import generate_password_hash, check_password_hash
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
from flask import current_app
from flask.ext.login import UserMixin
from . import db, login_manager
class User(UserMixin,db.Model): #对应数据库中的user表,user表存储用户个人信息
id=db.Column(db.Integer,primary_key=True)
classes=db.Column(db.String(80))
student_number=db.Column(db.Integer)
name=db.Column(db.String(80))
password=db.Column(db.String(32))
admin=db.Column(db.Boolean)
onlineTime=db.Column(db.Integer)
numofSubmit=db.Column(db.Integer)
recentsubmitTime=db.Column(db.String(32))
averageSpeed=db.Column(db.Integer)
def __init__(self,classes,student_number,name,password,admin,onlineTime,numofSubmit,recentsubmitTime,averageSpeed):
self.classes=classes
self.student_number=student_number
self.name=name
self.password=password
self.admin=admin
self.onlineTime=onlineTime
self.numofSubmit=numofSubmit
self.recentsubmitTime=recentsubmitTime
self.averageSpeed=averageSpeed
class Writing_article(db.Model): #对应mysql中writing_article表,该表存储文章
id=db.Column(db.Integer,primary_key=True)
name=db.Column(db.String(50))
rank=db.Column(db.Integer)
content=db.Column(db.String(2040))
def __init__(self,name,rank,content):
self.name=name
self.rank=rank
self.content=content
class practice_result(db.Model): #对应practice_result表,该表存储用户平时练习成绩
id=db.Column(db.Integer,primary_key=True)
classes=db.Column(db.String(80))
student_number=db.Column(db.Integer)
username=db.Column(db.String(50))
articlename=db.Column(db.String(100))
write_speed=db.Column(db.String(50))
right_rate=db.Column(db.String(50))
rank=db.Column(db.Integer)
submitTime=db.Column(db.String(50))
writenum=db.Column(db.Integer)
level=db.Column(db.String(50))
def __init__(self,classes,student_number,username,articlename,write_speed,right_rate,rank,submitTime,writenum,level):
self.classes=classes
self.student_number=student_number
self.username=username
self.articlename=articlename
self.write_speed=write_speed
self.right_rate=right_rate
self.rank=rank
self.submitTime=submitTime
self.writenum=writenum
self.level=level
class context_list(db.Model): #对应context_list表,该表用于存储所有比赛(开始时间、结束时间、文章名字等)的信息
id=db.Column(db.Integer,primary_key=True)
contextname=db.Column(db.String(500))
start_time=db.Column(db.String(50))
end_time=db.Column(db.String(50))
def __init__(self,contextname,start_time,end_time):
self.contextname=contextname
self.start_time=start_time
self.end_time=end_time
class context_result(db.Model): #对应context_resutl表,该表用于存储比赛的结果
id=db.Column(db.Integer,primary_key=True)
classes=db.Column(db.String(80))
student_number=db.Column(db.Integer)
username=db.Column(db.String(50))
contextId=db.Column(db.Integer)
write_speed=db.Column(db.String(50))
right_rate=db.Column(db.String(50))
rank=db.Column(db.Integer)
submitTime=db.Column(db.String(50))
writenum=db.Column(db.Integer)
articlename=db.Column(db.String(100))
score=db.Column(db.Integer())
level=db.Column(db.String(50))
def __init__(self,classes,student_number,username,contextId,write_speed,right_rate,rank,submitTime,writenum,articlename,score,level):
self.classes=classes
self.student_number=student_number
self.username=username
self.contextId=contextId
self.write_speed=write_speed
self.right_rate=right_rate
self.rank=rank
self.submitTime=submitTime
self.writenum=writenum
self.articlename=articlename
self.score=score
self.level=level
class context_show(db.Model): #对应context_show表,该表用于存储具体某个比赛的信息(比赛时长、比赛中的文章名字)
id=db.Column(db.Integer,primary_key=True)
context_id=db.Column(db.Integer)
articlename=db.Column(db.String(50))
timelimit=db.Column(db.Integer)
def __init__(self,context_id,articlename,timelimit):
self.context_id=context_id
self.articlename=articlename
self.timelimit=timelimit
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from scripts.classes import Connection, Footpath, Stop, Trip
from scripts.connectionscan_router import ConnectionScanData
from scripts.helpers.funs import seconds_to_hhmmss, hhmmss_to_sec
fribourg = Stop("1", "FR", "Fribourg/Freiburg", 0.0, 0.0)
bern = Stop("2", "BN", "Bern", 0.0, 0.0)
zuerich_hb = Stop("3", "ZUE", "Zürich HB", 0.0, 0.0)
winterthur = Stop("4", "W", "Winterthur", 0.0, 0.0)
st_gallen = Stop("5", "SG", "St. Gallen", 0.0, 0.0)
interlaken_ost = Stop("6", "IO", "Interlaken Ost", 0.0, 0.0)
basel_sbb = Stop("7", "BS", "Basel SBB", 0.0, 0.0)
chur = Stop("8", "CH", "Chur", 0.0, 0.0)
thusis = Stop("9", "TH", "Thusis", 0.0, 0.0)
samedan = Stop("10", "SAM", "Samedan", 0.0, 0.0)
st_moritz = Stop("11", "SM", "St. Moritz", 0.0, 0.0)
bern_duebystrasse = Stop("12", "", "Bern, Dübystrasse", 0.0, 0.0)
koeniz_zentrum = Stop("13", "", "Köniz, Zentrum", 0.0, 0.0)
bern_bahnhof = Stop("14", "", "Bern, Bahnhof", 0.0, 0.0)
ostermundigen_bahnhof = Stop("15", "", "Ostermundigen, Bahnhof", 0.0, 0.0)
samedan_bahnhof = Stop("16", "", "Samedan, Bahnhof", 0.0, 0.0)
samedan_spital = Stop("17", "", "Samedan, Spital", 0.0, 0.0)
def create_test_connectionscan_data():
stops_per_id = {s.id: s for s in [
fribourg,
bern,
zuerich_hb,
winterthur,
st_gallen,
interlaken_ost,
basel_sbb,
chur,
thusis,
samedan,
st_moritz,
bern_duebystrasse,
koeniz_zentrum,
bern_bahnhof,
ostermundigen_bahnhof,
samedan_bahnhof,
samedan_spital,
]}
footpaths_per_from_stop_to_stop_id = {(s.id, s.id): Footpath(s.id, s.id, 2 * 60) for s in stops_per_id.values()}
footpaths_per_from_stop_to_stop_id[(zuerich_hb.id, zuerich_hb.id)] = Footpath(zuerich_hb.id, zuerich_hb.id, 7 * 60)
footpaths_per_from_stop_to_stop_id[(bern.id, bern.id)] = Footpath(bern.id, bern.id, 5 * 60)
footpaths_per_from_stop_to_stop_id[(bern_bahnhof.id, bern.id)] = Footpath(bern_bahnhof.id, bern.id, 5 * 60)
footpaths_per_from_stop_to_stop_id[(bern.id, bern_bahnhof.id)] = Footpath(bern.id, bern_bahnhof.id, 5 * 60)
footpaths_per_from_stop_to_stop_id[(chur.id, chur.id)] = Footpath(chur.id, chur.id, 4 * 60)
footpaths_per_from_stop_to_stop_id[(samedan.id, samedan_bahnhof.id)] = Footpath(samedan.id, samedan_bahnhof.id,
3 * 60)
footpaths_per_from_stop_to_stop_id[(samedan_bahnhof.id, samedan.id)] = Footpath(samedan_bahnhof.id, samedan.id,
3 * 60)
trips = []
trips += get_forth_and_back_trips(
[fribourg, bern, zuerich_hb, winterthur, st_gallen],
[22 * 60, 56 * 60, 26 * 60, 35 * 60],
[6 * 60, 9 * 60, 3 * 60],
hhmmss_to_sec("05:34:00"),
32,
30 * 60
)
trips += get_forth_and_back_trips(
[interlaken_ost, bern, basel_sbb],
[52 * 60, 55 * 60],
[12 * 60],
hhmmss_to_sec("05:00:00"),
16,
60 * 60
)
trips += get_forth_and_back_trips(
[basel_sbb, zuerich_hb, chur],
[53 * 60, 75 * 60],
[11 * 60],
hhmmss_to_sec("05:33:00"),
16,
60 * 60
)
trips += get_forth_and_back_trips(
[chur, thusis, samedan, st_moritz],
[30 * 60, 75 * 60, 12 * 60],
[2 * 60, 6 * 60],
hhmmss_to_sec("05:58:00"),
16,
60 * 60
)
trips += get_forth_and_back_trips(
[koeniz_zentrum, bern_duebystrasse, bern_bahnhof, ostermundigen_bahnhof],
[6 * 60, 7 * 60, 15 * 60],
[0, 0],
hhmmss_to_sec("05:00:00"),
10 * 16,
6 * 60
)
trips += get_forth_and_back_trips(
[samedan_bahnhof, samedan_spital],
[7 * 60],
[],
hhmmss_to_sec("15:00:00"),
1,
24 * 60 * 60
)
return ConnectionScanData(stops_per_id, footpaths_per_from_stop_to_stop_id, {t.id: t for t in trips})
def create_trips(stops, running_times, stop_times, first_departure, nb_trips, headway):
trips = []
for trip_index in range(nb_trips):
dep_first_stop = first_departure + trip_index * headway
trip_id = "{}_{}_{}_{}".format(stops[0].name, stops[-1].name, seconds_to_hhmmss(dep_first_stop), trip_index)
cons = []
arr = None
for stop_index in range(len(stops) - 1):
dep = dep_first_stop if stop_index == 0 else arr + stop_times[stop_index - 1]
arr = dep + running_times[stop_index]
cons += [Connection(trip_id, stops[stop_index].id, stops[stop_index + 1].id, dep, arr)]
trips += [Trip(trip_id, cons)]
return trips
def test_create_trips():
dep_first_trip_first_stop = 5 * 60 * 60 + 42 * 60
trips_fri_sg = create_trips(
[fribourg, bern, zuerich_hb, winterthur, st_gallen],
[14 * 60, 58 * 60, 20 * 60, 38 * 60],
[6 * 60, 5 * 60, 3 * 60],
dep_first_trip_first_stop,
32,
30 * 60)
assert len(trips_fri_sg) == 32
assert "1" == trips_fri_sg[3].connections[0].from_stop_id
assert "2" == trips_fri_sg[3].connections[0].to_stop_id
assert "2" == trips_fri_sg[3].connections[1].from_stop_id
assert "3" == trips_fri_sg[3].connections[1].to_stop_id
assert "4" == trips_fri_sg[3].connections[-1].from_stop_id
assert "5" == trips_fri_sg[3].connections[-1].to_stop_id
assert "08:12:00" == seconds_to_hhmmss(trips_fri_sg[5].connections[0].dep_time)
assert "08:26:00" == seconds_to_hhmmss(trips_fri_sg[5].connections[0].arr_time)
assert "08:32:00" == seconds_to_hhmmss(trips_fri_sg[5].connections[1].dep_time)
assert "09:30:00" == seconds_to_hhmmss(trips_fri_sg[5].connections[1].arr_time)
assert "09:35:00" == seconds_to_hhmmss(trips_fri_sg[5].connections[2].dep_time)
assert "09:55:00" == seconds_to_hhmmss(trips_fri_sg[5].connections[2].arr_time)
assert "09:58:00" == seconds_to_hhmmss(trips_fri_sg[5].connections[3].dep_time)
assert "10:36:00" == seconds_to_hhmmss(trips_fri_sg[5].connections[3].arr_time)
def get_forth_and_back_trips(stops, running_times, stop_times, dep_first_trip, nb_trips, headway):
return create_trips(
stops,
running_times,
stop_times,
dep_first_trip,
nb_trips,
headway) + create_trips(
list(reversed(stops)),
list(reversed(running_times)),
list(reversed(stop_times)),
dep_first_trip,
nb_trips,
headway)
def test_get_forth_and_back_trips():
dep_first_trip_first_stop = 5 * 60 * 60 + 42 * 60
trips = get_forth_and_back_trips(
[fribourg, bern, zuerich_hb, winterthur, st_gallen],
[14 * 60, 58 * 60, 20 * 60, 38 * 60],
[6 * 60, 5 * 60, 3 * 60],
dep_first_trip_first_stop,
32,
30 * 60)
assert len(trips) == 64
trips_fri_sg = trips[:32]
trips_sg_fri = trips[32:65]
assert "1" == trips_fri_sg[0].connections[0].from_stop_id
assert "5" == trips_fri_sg[-1].connections[-1].to_stop_id
assert "5" == trips_sg_fri[0].connections[0].from_stop_id
assert "1" == trips_sg_fri[-1].connections[-1].to_stop_id
|
# Copyright 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import dpctl
import numpy as np
from numba import njit
from numba.tests.support import captured_stdout
import numba_dppy
import numba_dppy as dppy
from numba_dppy import config
from . import _helper
class TestWithDPPYContext(unittest.TestCase):
@unittest.skipIf(not _helper.has_gpu_queues(), "No GPU platforms available")
def test_with_dppy_context_gpu(self):
@njit
def nested_func(a, b):
np.sin(a, b)
@njit
def func(b):
a = np.ones((64), dtype=np.float64)
nested_func(a, b)
config.DEBUG = 1
expected = np.ones((64), dtype=np.float64)
got_gpu = np.ones((64), dtype=np.float64)
with captured_stdout() as got_gpu_message:
device = dpctl.SyclDevice("opencl:gpu")
with dppy.offload_to_sycl_device(device):
func(got_gpu)
config.DEBUG = 0
func(expected)
np.testing.assert_array_equal(expected, got_gpu)
self.assertTrue("Parfor offloaded to opencl:gpu" in got_gpu_message.getvalue())
@unittest.skipIf(not _helper.has_cpu_queues(), "No CPU platforms available")
def test_with_dppy_context_cpu(self):
@njit
def nested_func(a, b):
np.sin(a, b)
@njit
def func(b):
a = np.ones((64), dtype=np.float64)
nested_func(a, b)
config.DEBUG = 1
expected = np.ones((64), dtype=np.float64)
got_cpu = np.ones((64), dtype=np.float64)
with captured_stdout() as got_cpu_message:
device = dpctl.SyclDevice("opencl:cpu")
with dppy.offload_to_sycl_device(device):
func(got_cpu)
config.DEBUG = 0
func(expected)
np.testing.assert_array_equal(expected, got_cpu)
self.assertTrue("Parfor offloaded to opencl:cpu" in got_cpu_message.getvalue())
if __name__ == "__main__":
unittest.main()
|
import os
from setuptools import setup
README = open(os.path.join(os.path.dirname(__file__), 'README.rst')).read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='djangorestframework-drilldown',
version='0.1.1',
url='http://github.com/peterh32/django-rest-framework-drilldown',
license='MIT',
packages=['rest_framework_drilldown'],
include_package_data=True,
description='Django REST API extension enables chained relations, filters, field selectors, limit, offset, etc., via a single view.',
long_description=README,
author='Peter Hollingsworth',
author_email='peter@hollingsworth.net',
install_requires=['djangorestframework'],
)
|
from setuptools import setup
def readme():
with open('README.md') as f:
README = f.read()
return README
setup(
name = 'Data_Split',
packages = ['Data_Split_by_Bhawika'],
version = '1.0.0',
license='MIT',
description = 'A python package to split Directory into Training, Testing and Validation Directory',
long_description=readme(),
long_description_content_type="text/markdown",
author = 'BHAWIKA ARORA',
author_email = 'bhawikavk2@gmail.com',
url = 'https://github.com/Bhawika16/Data_Split', # Provide either the link to your github or to your website
include_package_data=True,
classifiers=[
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
entry_points={
"console_scripts": [
"train_test_split=Data_Split_by_Bhawika.split_train_validation:main",
]
},
) |
# Generated by Django 3.0.7 on 2020-07-07 19:32
import applications.users.managers
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0011_update_proxy_permissions'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('email', models.EmailField(max_length=254, unique=True, verbose_name='email address')),
('gender', models.CharField(choices=[('F', 'Female'), ('M', 'Male'), ('O', 'Other')], default='O', max_length=10)),
('name', models.CharField(blank=True, max_length=250)),
('is_staff', models.BooleanField(default=False)),
('is_superuser', models.BooleanField(default=False)),
('is_active', models.BooleanField(default=True)),
('date_joined', models.DateTimeField(default=django.utils.timezone.now)),
],
options={
'abstract': False,
},
managers=[
('objects', applications.users.managers.UserManager()),
],
),
]
|
from configs import cfg
from src.utils.record_log import _logger
from src.utils.nlp import dynamic_length, dynamic_keep
from src.utils.file import load_glove, save_file
import numpy as np
import nltk
import re
import os
import math
import random
class Dataset(object):
def __init__(self, data_file_path, dataset_type=None):
self.dataset_type = dataset_type
data_list, self.class_num = self.load_sc_and_process_data(data_file_path)
self.dicts, self.max_lens = self.count_data_and_build_dict(data_list, gene_dicts=True)
self.digital_data = self.digitize_data(data_list, self.dicts, self.dataset_type)
self.sample_num = len(self.digital_data)
self.emb_mat_token, self.emb_mat_glove = self.generate_index2vec_matrix()
self.nn_data_blocks = None
# -------------- external use --------------
def split_dataset_to_blocks(self, n_fold=10):
# split data for 10-fold validation
self.nn_data_blocks = self.split_data_list(self.digital_data, n_fold)
def save_dict(self, path):
save_file(self.dicts, path, 'token and char dict data', 'pickle')
def generate_batch_sample_iter(self, validation_idx, max_step=None):
if max_step is not None:
train_data_list = []
for idx_db, data_block in enumerate(self.nn_data_blocks):
if idx_db != validation_idx:
train_data_list.extend(data_block)
batch_size = cfg.train_batch_size
def data_queue(data, batch_size):
assert len(data) >= batch_size
random.shuffle(data)
data_ptr = 0
dataRound = 0
idx_b = 0
step = 0
while True:
if data_ptr + batch_size <= len(data):
yield data[data_ptr:data_ptr + batch_size], dataRound, idx_b
data_ptr += batch_size
idx_b += 1
step += 1
elif data_ptr + batch_size > len(data):
offset = data_ptr + batch_size - len(data)
out = data[data_ptr:]
random.shuffle(data)
out += data[:offset]
data_ptr = offset
dataRound += 1
yield out, dataRound, 0
idx_b = 1
step += 1
if step >= max_step:
break
batch_num = math.ceil(len(train_data_list) / batch_size)
for sample_batch, data_round, idx_b in data_queue(train_data_list, batch_size):
yield sample_batch, batch_num, data_round, idx_b
else:
dev_data_list = self.nn_data_blocks[validation_idx]
batch_size = cfg.test_batch_size
batch_num = math.ceil(len(dev_data_list) / batch_size)
idx_b = 0
sample_batch = []
for sample in dev_data_list:
sample_batch.append(sample)
if len(sample_batch) == batch_size:
yield sample_batch, batch_num, 0, idx_b
idx_b += 1
sample_batch = []
if len(sample_batch) > 0:
yield sample_batch, batch_num, 0, idx_b
def get_statistic(self):
len_list = []
output = {}
for nn_data_block in self.nn_data_blocks:
for sample in nn_data_block:
len_list.append(len(sample['token']))
len_array = np.array(len_list).astype('float32')
output['mean'] = float(np.mean(len_array))
output['std'] = float(np.std(len_array))
output['max'] = float(np.max(len_array))
return output
# -------------- internal use --------------
def load_sc_and_process_data(self, data_file_path):
data_list = []
gold_label_set = set()
with open(data_file_path, 'r', encoding='latin-1') as file:
for line in file:
split_list = line.strip().split(' ')
gold_label = int(split_list[0])
sentence = ' '.join(split_list[1:])
token_list = Dataset.further_tokenize(nltk.word_tokenize(sentence))
sample = {'sentence':sentence, 'token': token_list, 'gold_label': gold_label}
data_list.append(sample)
gold_label_set.add(gold_label)
return data_list, len(gold_label_set)
def count_data_and_build_dict(self, data_list, gene_dicts=True):
def add_ept_and_unk(a_list):
a_list.insert(0, '@@@empty')
a_list.insert(1, '@@@unk')
return a_list
_logger.add()
_logger.add('counting and build dictionaries')
token_collection = []
char_collection = []
sent_len_collection = []
token_len_collection = []
for sample in data_list:
token_collection += sample['token']
sent_len_collection.append(len(sample['token']))
for token in sample['token']:
char_collection += list(token)
token_len_collection.append(len(token))
max_sent_len = dynamic_length(sent_len_collection, 1, security=False)[0]
max_token_len = dynamic_length(token_len_collection, 0.99, security=False)[0]
if gene_dicts:
# token & char
tokenSet = dynamic_keep(token_collection, 1)
charSet = dynamic_keep(char_collection, 1)
if cfg.use_glove_unk_token:
gloveData = load_glove(cfg.word_embedding_length)
gloveTokenSet = list(gloveData.keys())
if cfg.lower_word:
tokenSet = list(set([token.lower() for token in tokenSet])) ##!!!
gloveTokenSet = list(set([token.lower() for token in gloveTokenSet])) ##!!!
# delete token from gloveTokenSet which appears in tokenSet
for token in tokenSet:
try:
gloveTokenSet.remove(token)
except ValueError:
pass
else:
if cfg.lower_word:
tokenSet = list(set([token.lower() for token in tokenSet]))
gloveTokenSet = []
tokenSet = add_ept_and_unk(tokenSet)
charSet = add_ept_and_unk(charSet)
dicts = {'token': tokenSet, 'char': charSet, 'glove': gloveTokenSet}
else:
dicts = {}
_logger.done()
return dicts, {'sent': max_sent_len, 'token': max_token_len}
def digitize_data(self, data_list, dicts, dataset_type):
token2index = dict([(token, idx) for idx, token in enumerate(dicts['token'] + dicts['glove'])])
char2index = dict([(token, idx) for idx, token in enumerate(dicts['char'])])
def digitize_token(token):
token = token if not cfg.lower_word else token.lower()
try:
return token2index[token]
except KeyError:
return 1
def digitize_char(char):
try:
return char2index[char]
except KeyError:
return 1
_logger.add()
_logger.add('digitizing data: %s...' % dataset_type)
for sample in data_list:
sample['token_digital'] = [digitize_token(token) for token in sample['token']]
sample['char_digital'] = [[digitize_char(char) for char in list(token)]
for token in sample['token']]
_logger.done()
return data_list
def generate_index2vec_matrix(self):
_logger.add()
_logger.add('generate index to vector numpy matrix')
token2vec = load_glove(cfg.word_embedding_length)
if cfg.lower_word:
newToken2vec = {}
for token, vec in token2vec.items():
newToken2vec[token.lower()] = vec
token2vec = newToken2vec
# prepare data from trainDataset and devDataset
mat_token = np.random.uniform(-0.05, 0.05, size=(len(self.dicts['token']), cfg.word_embedding_length)).astype(
cfg.floatX)
mat_glove = np.zeros((len(self.dicts['glove']), cfg.word_embedding_length), dtype=cfg.floatX)
for idx, token in enumerate(self.dicts['token']):
try:
mat_token[idx] = token2vec[token]
except KeyError:
pass
mat_token[0] = np.zeros(shape=(cfg.word_embedding_length,), dtype=cfg.floatX)
for idx, token in enumerate(self.dicts['glove']):
mat_glove[idx] = token2vec[token]
_logger.add('Done')
return mat_token, mat_glove
def split_data_list(self, data_list, n=10):
assert len(data_list) >= n
random.shuffle(data_list)
unit_len = len(data_list) * 1. / n
idxs = [math.floor(idx * unit_len) for idx in range(n + 1)] # len = n+1
idxs[-1] = len(data_list)
nn_data = []
for i in range(n):
nn_data.append(data_list[idxs[i]:idxs[i+1]])
return nn_data
@staticmethod
def further_tokenize(temp_tokens):
tokens = [] # [[(s,e),...],...]
for token in temp_tokens:
l = (
"-", "\u2212", "\u2014", "\u2013", "/", "~", '"', "'", "\u201C", "\u2019", "\u201D", "\u2018", "\u00B0")
tokens.extend(re.split("([{}])".format("".join(l)), token))
return tokens
if __name__ == '__main__':
paths = [
'/Users/xxx/Workspaces/dataset/sentence_classification/custrev.all',
'/Users/xxx/Workspaces/dataset/sentence_classification/mpqa.all',
'/Users/xxx/Workspaces/dataset/sentence_classification/rt-polarity.all',
'/Users/xxx/Workspaces/dataset/sentence_classification/subj.all',
]
data_obj = Dataset(paths[0], cfg.dataset_type)
|
num1=1
num2=3
|
from pandas import Series, DataFrame
import pandas as pd
import numpy as np
import pandasql
def main():
years = [str(year) for year in range(2002,2013)]
# Load IEEE data
ieee_data = load_ieee_data()
# Load Tags data
tags_data = load_tags_data(years)
# Analyse the IEEE data
analyse_countries(ieee_data)
analyse_publications(ieee_data)
# Analyse the Tags Data
analyse_tags(tags_data,years)
def load_ieee_data():
ieee_data = pd.read_json('final-data/ieee_data.json')
# Format Country and Tags strings
country_format = lambda x: np.NaN if x == 'NA' else x.lower()
tags_format = lambda x: np.NaN if x == '' else x.lower()
ieee_data['country'] = ieee_data['country'].map(country_format)
ieee_data['tags'] = ieee_data['tags'].map(tags_format)
return ieee_data
def load_tags_data(years):
tags_data = {}
for year in years:
tags_file = 'final-data/tags_' + year + '.json'
tags_df = pd.read_json(tags_file)
tags_data[year] = tags_df
return tags_data
def analyse_countries(ieee_data):
grouped = ieee_data.groupby('country')
grouped_citations = grouped['citations']
pubs = grouped_citations.count()
citations = grouped_citations.sum()
pubs_filtered = pubs[pubs >= 300]
citations_filtered = citations[pubs >= 300]
citations_per_pub = citations_filtered / pubs_filtered
pubs.sort(ascending=False)
citations_filtered.sort(ascending=False)
citations_per_pub.sort(ascending=False)
print 'Number of Publications by Country'
print '---------------------------------'
print pubs[0:20]
print '\n'
print 'Number of Citations by Country'
print '------------------------------'
print citations_filtered[0:20]
print '\n'
print 'Number of Citations per Publication by Country'
print '----------------------------------------------'
print citations_per_pub[0:20]
print '\n'
pubs.to_csv('analysed-data/pubs_by_country.csv')
citations_filtered.to_csv('analysed-data/citations_by_country.csv')
citations_per_pub.to_csv('analysed-data/citations_per_pub_by_country.csv')
def analyse_publications(ieee_data):
pubs = ieee_data.groupby(['code','year'])['citations'].count().unstack()
citations = ieee_data.groupby(['code','year'])['citations'].sum().unstack()
citations_per_pub = citations / pubs
print 'Number of Publications by Code and Year'
print '---------------------------------------'
print pubs
print '\n'
print 'Number of Citations by Code and Year'
print '------------------------------------'
print citations
print '\n'
print 'Number of Citations per Publication by Code and Year'
print '----------------------------------------------------'
print citations_per_pub
print '\n'
pubs.to_csv('analysed-data/pubs_by_year_code.csv')
citations.to_csv('analysed-data/citations_by_year_code.csv')
citations_per_pub.to_csv('analysed-data/citations_per_pub_by_year_code.csv')
def analyse_tags(tags_data,years):
for year in years:
tags_df = tags_data[year]
tags_df = tags_df[tags_df['pubs'] > 10]
pubs = tags_df.sort(columns='pubs',ascending=False,inplace=False)
citations = tags_df.sort(columns='citations',ascending=False,inplace=False)
citations_per_pub = tags_df.sort(columns='citations_per_pub',ascending=False,inplace=False)
print 'Top 20 Tags by Publications for ' + year
print '------------------------------------'
print pubs[0:20]
print '\n'
print 'Top 20 Tags by Citations for ' + year
print '---------------------------------'
print citations[0:20]
print '\n'
print 'Top 20 Tags by Citations per Publication for ' + year
print '-------------------------------------------------'
print citations_per_pub[0:20]
print '\n'
pubs.to_csv('analysed-data/pubs_by_tags_' + year + '.csv')
citations.to_csv('analysed-data/citations_by_tags_' + year + '.csv')
citations_per_pub.to_csv('analysed-data/citations_per_pub_by_tags_' + year + '.csv')
if __name__ == "__main__":
main() |
# MIT License
#
# Copyright (C) 2021. Huawei Technologies Co., Ltd. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import torch
from torch import nn
import numpy as np
from smarts.core.agent import Agent
from ultra.utils.common import merge_discrete_action_spaces, to_3d_action, to_2d_action
import pathlib, os, copy
import ultra.adapters as adapters
from ultra.baselines.dqn.dqn.explore import EpsilonExplore
from ultra.baselines.dqn.dqn.network import DQNCNN, DQNWithSocialEncoder
from ultra.baselines.common.replay_buffer import ReplayBuffer
from ultra.baselines.common.social_vehicle_config import get_social_vehicle_configs
from ultra.baselines.common.yaml_loader import load_yaml
class DQNPolicy(Agent):
lane_actions = ["keep_lane", "slow_down", "change_lane_left", "change_lane_right"]
def __init__(
self,
policy_params=None,
checkpoint_dir=None,
):
self.policy_params = policy_params
self.lr = float(policy_params["lr"])
self.seed = int(policy_params["seed"])
self.train_step = int(policy_params["train_step"])
self.target_update = float(policy_params["target_update"])
self.warmup = int(policy_params["warmup"])
self.gamma = float(policy_params["gamma"])
self.batch_size = int(policy_params["batch_size"])
self.use_ddqn = policy_params["use_ddqn"]
self.sticky_actions = int(policy_params["sticky_actions"])
self.epsilon_obj = EpsilonExplore(1.0, 0.05, 100000)
self.step_count = 0
self.update_count = 0
self.num_updates = 0
self.current_sticky = 0
self.current_iteration = 0
self.action_type = adapters.type_from_string(policy_params["action_type"])
self.observation_type = adapters.type_from_string(
policy_params["observation_type"]
)
self.reward_type = adapters.type_from_string(policy_params["reward_type"])
if self.action_type == adapters.AdapterType.DefaultActionContinuous:
discrete_action_spaces = [
np.asarray([-0.25, 0.0, 0.5, 0.75, 1.0]),
np.asarray(
[-1.0, -0.75, -0.5, -0.25, -0.1, 0.0, 0.1, 0.25, 0.5, 0.75, 1.0]
),
]
self.index2actions = [
merge_discrete_action_spaces([discrete_action_space])[0]
for discrete_action_space in discrete_action_spaces
]
self.action2indexs = [
merge_discrete_action_spaces([discrete_action_space])[1]
for discrete_action_space in discrete_action_spaces
]
self.merge_action_spaces = 0
self.num_actions = [
len(discrete_action_space)
for discrete_action_space in discrete_action_spaces
]
self.action_size = 2
self.to_real_action = to_3d_action
elif self.action_type == adapters.AdapterType.DefaultActionDiscrete:
discrete_action_spaces = [[0], [1], [2], [3]]
index_to_actions = [
discrete_action_space.tolist()
if not isinstance(discrete_action_space, list)
else discrete_action_space
for discrete_action_space in discrete_action_spaces
]
action_to_indexs = {
str(discrete_action): index
for discrete_action, index in zip(
index_to_actions, np.arange(len(index_to_actions)).astype(np.int)
)
}
self.index2actions = [index_to_actions]
self.action2indexs = [action_to_indexs]
self.merge_action_spaces = -1
self.num_actions = [len(index_to_actions)]
self.action_size = 1
self.to_real_action = lambda action: self.lane_actions[action[0]]
else:
raise Exception(
f"DQN baseline does not support the '{self.action_type}' action type."
)
if self.observation_type == adapters.AdapterType.DefaultObservationVector:
observation_space = adapters.space_from_type(self.observation_type)
low_dim_states_size = observation_space["low_dim_states"].shape[0]
social_capacity = observation_space["social_vehicles"].shape[0]
num_social_features = observation_space["social_vehicles"].shape[1]
# Get information to build the encoder.
encoder_key = policy_params["social_vehicles"]["encoder_key"]
social_policy_hidden_units = int(
policy_params["social_vehicles"].get("social_policy_hidden_units", 0)
)
social_policy_init_std = int(
policy_params["social_vehicles"].get("social_policy_init_std", 0)
)
social_vehicle_config = get_social_vehicle_configs(
encoder_key=encoder_key,
num_social_features=num_social_features,
social_capacity=social_capacity,
seed=self.seed,
social_policy_hidden_units=social_policy_hidden_units,
social_policy_init_std=social_policy_init_std,
)
social_vehicle_encoder = social_vehicle_config["encoder"]
social_feature_encoder_class = social_vehicle_encoder[
"social_feature_encoder_class"
]
social_feature_encoder_params = social_vehicle_encoder[
"social_feature_encoder_params"
]
# Calculate the state size based on the number of features (ego + social).
state_size = low_dim_states_size
if social_feature_encoder_class:
state_size += social_feature_encoder_class(
**social_feature_encoder_params
).output_dim
else:
state_size += social_capacity * num_social_features
# Add the action size to account for the previous action.
state_size += self.action_size
network_class = DQNWithSocialEncoder
network_params = {
"num_actions": self.num_actions,
"state_size": state_size,
"social_feature_encoder_class": social_feature_encoder_class,
"social_feature_encoder_params": social_feature_encoder_params,
}
elif self.observation_type == adapters.AdapterType.DefaultObservationImage:
observation_space = adapters.space_from_type(self.observation_type)
stack_size = observation_space.shape[0]
image_shape = (observation_space.shape[1], observation_space.shape[2])
network_class = DQNCNN
network_params = {
"n_in_channels": stack_size,
"image_dim": image_shape,
"num_actions": self.num_actions,
}
else:
raise Exception(
f"DQN baseline does not support the '{self.observation_type}' "
f"observation type."
)
self.prev_action = np.zeros(self.action_size)
self.checkpoint_dir = checkpoint_dir
torch.manual_seed(self.seed)
self.device_name = "cuda:0" if torch.cuda.is_available() else "cpu"
self.device = torch.device(self.device_name)
self.online_q_network = network_class(**network_params).to(self.device)
self.target_q_network = network_class(**network_params).to(self.device)
self.update_target_network()
self.optimizers = torch.optim.Adam(
params=self.online_q_network.parameters(), lr=self.lr
)
self.loss_func = nn.MSELoss(reduction="none")
self.replay = ReplayBuffer(
buffer_size=int(policy_params["replay_buffer"]["buffer_size"]),
batch_size=int(policy_params["replay_buffer"]["batch_size"]),
observation_type=self.observation_type,
device_name=self.device_name,
)
self.reset()
if self.checkpoint_dir:
self.load(self.checkpoint_dir)
def lane_action_to_index(self, state):
state = state.copy()
if (
len(state["action"]) == 3
and (state["action"] == np.asarray([0, 0, 0])).all()
): # initial action
state["action"] = np.asarray([0])
else:
state["action"] = self.lane_actions.index(state["action"])
return state
def reset(self):
self.eps_throttles = []
self.eps_steers = []
self.eps_step = 0
self.current_sticky = 0
def soft_update(self, target, src, tau):
for target_param, param in zip(target.parameters(), src.parameters()):
target_param.detach_()
target_param.copy_(target_param * (1.0 - tau) + param * tau)
def update_target_network(self):
self.target_q_network.load_state_dict(self.online_q_network.state_dict().copy())
def act(self, *args, **kwargs):
if self.current_sticky == 0:
self.action = self._act(*args, **kwargs)
self.current_sticky = (self.current_sticky + 1) % self.sticky_actions
self.current_iteration += 1
return self.to_real_action(self.action)
def _act(self, state, explore=True):
epsilon = self.epsilon_obj.get_epsilon()
if not explore or np.random.rand() > epsilon:
state = copy.deepcopy(state)
if self.observation_type == adapters.AdapterType.DefaultObservationVector:
# Default vector observation type.
state["low_dim_states"] = np.float32(
np.append(state["low_dim_states"], self.prev_action)
)
state["social_vehicles"] = (
torch.from_numpy(state["social_vehicles"])
.unsqueeze(0)
.to(self.device)
)
state["low_dim_states"] = (
torch.from_numpy(state["low_dim_states"])
.unsqueeze(0)
.to(self.device)
)
else:
# Default image observation type.
state = torch.from_numpy(state).unsqueeze(0).to(self.device)
self.online_q_network.eval()
with torch.no_grad():
qs = self.online_q_network(state)
qs = [q.data.cpu().numpy().flatten() for q in qs]
# out_str = " || ".join(
# [
# " ".join(
# [
# "{}: {:.4f}".format(index2action[j], q[j])
# for j in range(num_action)
# ]
# )
# for index2action, q, num_action in zip(
# self.index2actions, qs, self.num_actions
# )
# ]
# )
# print(out_str)
inds = [np.argmax(q) for q in qs]
else:
inds = [np.random.randint(num_action) for num_action in self.num_actions]
action = []
for j, ind in enumerate(inds):
action.extend(self.index2actions[j][ind])
self.epsilon_obj.step()
self.eps_step += 1
action = np.asarray(action)
return action
def save(self, model_dir):
model_dir = pathlib.Path(model_dir)
torch.save(self.online_q_network.state_dict(), model_dir / "online.pth")
torch.save(self.target_q_network.state_dict(), model_dir / "target.pth")
def load(self, model_dir, cpu=False):
model_dir = pathlib.Path(model_dir)
print("loading from :", model_dir)
map_location = None
if cpu:
map_location = torch.device("cpu")
self.online_q_network.load_state_dict(
torch.load(model_dir / "online.pth", map_location=map_location)
)
self.target_q_network.load_state_dict(
torch.load(model_dir / "target.pth", map_location=map_location)
)
print("Model loaded")
def step(self, state, action, reward, next_state, done, info, others=None):
# dont treat timeout as done equal to True
max_steps_reached = info["logs"]["events"].reached_max_episode_steps
if max_steps_reached:
done = False
if self.action_type == adapters.AdapterType.DefaultActionContinuous:
action = to_2d_action(action)
_action = (
[[e] for e in action]
if not self.merge_action_spaces
else [action.tolist()]
)
action_index = np.asarray(
[
action2index[str(e)]
for action2index, e in zip(self.action2indexs, _action)
]
)
else:
action_index = self.lane_actions.index(action)
action = action_index
self.replay.add(
state=state,
action=action_index,
reward=reward,
next_state=next_state,
done=done,
others=others,
prev_action=self.prev_action,
)
if (
self.step_count % self.train_step == 0
and len(self.replay) >= self.batch_size
and (self.warmup is None or len(self.replay) >= self.warmup)
):
out = self.learn()
self.update_count += 1
else:
out = {}
if self.target_update > 1 and self.step_count % self.target_update == 0:
self.update_target_network()
elif self.target_update < 1.0:
self.soft_update(
self.target_q_network, self.online_q_network, self.target_update
)
self.step_count += 1
self.prev_action = action
return out
def learn(self):
states, actions, rewards, next_states, dones, others = self.replay.sample(
device=self.device
)
if not self.merge_action_spaces:
actions = torch.chunk(actions, len(self.num_actions), -1)
else:
actions = [actions]
self.target_q_network.eval()
with torch.no_grad():
qs_next_target = self.target_q_network(next_states)
if self.use_ddqn:
self.online_q_network.eval()
with torch.no_grad():
qs_next_online = self.online_q_network(next_states)
next_actions = [
torch.argmax(q_next_online, dim=1, keepdim=True)
for q_next_online in qs_next_online
]
else:
next_actions = [
torch.argmax(q_next_target, dim=1, keepdim=True)
for q_next_target in qs_next_target
]
qs_next_target = [
torch.gather(q_next_target, 1, next_action)
for q_next_target, next_action in zip(qs_next_target, next_actions)
]
self.online_q_network.train()
qs, aux_losses = self.online_q_network(states, training=True)
qs = [torch.gather(q, 1, action.long()) for q, action in zip(qs, actions)]
qs_target_value = [
rewards + self.gamma * (1 - dones) * q_next_target
for q_next_target in qs_next_target
]
td_loss = [
self.loss_func(q, q_target_value).mean()
for q, q_target_value in zip(qs, qs_target_value)
]
mean_td_loss = sum(td_loss) / len(td_loss)
loss = mean_td_loss + sum(
[e["value"] * e["weight"] for e in aux_losses.values()]
)
self.optimizers.zero_grad()
loss.backward()
self.optimizers.step()
out = {}
out.update(
{
"loss/td{}".format(j): {
"type": "scalar",
"data": td_loss[j].data.cpu().numpy(),
"freq": 10,
}
for j in range(len(td_loss))
}
)
out.update(
{
"loss/{}".format(k): {
"type": "scalar",
"data": v["value"], # .detach().cpu().numpy(),
"freq": 10,
}
for k, v in aux_losses.items()
}
)
out.update({"loss/all": {"type": "scalar", "data": loss, "freq": 10}})
self.num_updates += 1
return out
|
import zipfile
import tempfile
class ZipIter:
def __init__(self,filename,mode="r"):
if mode!="r" and mode!="rb" and mode!="w" and mode!="wb":
raise Exception("Unknown mode")
self.mode=mode
if mode[0]=="r": #READ MODE
self.zipfolder=zipfile.ZipFile(filename,mode)
self.name=self.zipfolder.namelist()[0]
self.zipfolder.extract(self.name)
self.zipfolder.close()
self.f=open(self.name,mode)
pass
if mode[0]=="w": #WRITE MODE
self.zipfolder=zipfile.ZipFile(filename+".zip",mode)
self.name=filename
self.f=open(filename,mode)
def __iter__(self):
return self
def __getattr__(self, attr):
if self.__dict__.has_key(attr):
return self.__dict__[attr]
return getattr(self.f, attr)
def close(self):
self.f.close()
if self.mode[0]=="w":
self.zipfolder.write(self.name,self.name.rsplit('/',1)[-1])
self.zipfolder.close()
import os
os.remove(self.name) |
"""
UCR-FordA dataset
"""
import os
import numpy as np
import cv2
from tensorflow.keras.utils import to_categorical
def __get_pic(y, module_path):
if y == 10:
return cv2.imread(module_path+'/datasets/pics/arabic/num_0.png', cv2.IMREAD_GRAYSCALE).astype('float32') / 255.
elif y == 1:
return cv2.imread(module_path+'/datasets/pics/arabic/num_1.png', cv2.IMREAD_GRAYSCALE).astype('float32') / 255.
elif y == 2:
return cv2.imread(module_path+'/datasets/pics/arabic/num_2.png', cv2.IMREAD_GRAYSCALE).astype('float32') / 255.
elif y == 3:
return cv2.imread(module_path+'/datasets/pics/arabic/num_3.png', cv2.IMREAD_GRAYSCALE).astype('float32') / 255.
elif y == 4:
return cv2.imread(module_path+'/datasets/pics/arabic/num_4.png', cv2.IMREAD_GRAYSCALE).astype('float32') / 255.
elif y == 5:
return cv2.imread(module_path+'/datasets/pics/arabic/num_5.png', cv2.IMREAD_GRAYSCALE).astype('float32') / 255.
elif y == 6:
return cv2.imread(module_path+'/datasets/pics/arabic/num_6.png', cv2.IMREAD_GRAYSCALE).astype('float32') / 255.
elif y == 7:
return cv2.imread(module_path+'/datasets/pics/arabic/num_7.png', cv2.IMREAD_GRAYSCALE).astype('float32') / 255.
elif y == 8:
return cv2.imread(module_path+'/datasets/pics/arabic/num_8.png', cv2.IMREAD_GRAYSCALE).astype('float32') / 255.
elif y == 9:
return cv2.imread(module_path+'/datasets/pics/arabic/num_9.png', cv2.IMREAD_GRAYSCALE).astype('float32') / 255.
def __class_to_pic(Y, module_path):
pics = []
for y in Y:
pics.append(__get_pic(y, module_path))
return np.expand_dims(np.array(pics),3)
def load_data():
"""
Load and return the UCR-FordA dataset.
============== ==============
Training Samples total 6600
Testing Samples total 2200
Number of time steps 93
Dimensionality 13
Number of targets 10
============== ==============
# Returns
Tuple of Numpy arrays: (x_train, y_train, pic_train), (x_test, y_test, pic_test)
"""
module_path = os.getcwd()
X_train = np.load(module_path + '/datasets/data/arabic/x_train.npy')
y_train = np.load(module_path + '/datasets/data/arabic/y_train.npy')
X_test = np.load(module_path + '/datasets/data/arabic/x_test.npy')
y_test = np.load(module_path + '/datasets/data/arabic/y_test.npy')
pic_train = __class_to_pic(y_train, module_path)
pic_test = __class_to_pic(y_test, module_path)
y_train = y_train-1
y_test = y_test-1
# y_train = to_categorical(y_train, num_classes=10)
# y_test = to_categorical(y_test, num_classes=10)
return (X_train, y_train, pic_train), (X_test, y_test, pic_test)
|
import csv
import re
globalPriority = []
globalTechPriority = []
globalFlag = 0
# inputPriceMin, inputPriceMax, inputHeight, inputTypeWeights, inputPriorities, inputTechPriorities
def runAHP(data):
global globalFlag
global globalPriority
global globalTechPriority
userPriceMin = data['inputPriceMin']
userPriceMax = data['inputPriceMax']
userPrice = int((userPriceMin+userPriceMax)/2)
# userPrice = 1999
userHeight = data['inputHeight']
def userFrameFinder(height):
if(height < 148):
return 'XXS'
elif(height <= 158):
return 'XS'
elif(height <= 168):
return 'S'
elif(height <= 178):
return 'M'
elif(height <= 185):
return 'L'
elif(height <= 193):
return 'XL'
elif(height > 193):
return 'XXL'
userFrame = userFrameFinder(int(userHeight))
userTypeWeights = data['inputTypeWeights'] # mountain, hybrid, road
inputPriorities = data['inputPriorities']
inputTechPriorities = data['inputTechPriorities']
priorities = {
'priceWeight': inputPriorities[0],
'priceType': inputPriorities[1],
'priceFrame': inputPriorities[2],
'priceTech': inputPriorities[3],
'weightType': inputPriorities[4],
'weightFrame': inputPriorities[5],
'weightTech': inputPriorities[6],
'typeFrame': inputPriorities[7],
'typeTech': inputPriorities[8],
'frameTech': inputPriorities[9]
}
techPriorities = {
'brakeTransmission': inputTechPriorities[0],
'brakeSuspension': inputTechPriorities[1],
'transmissionSuspension': inputTechPriorities[2]
}
priorityMatrix = [ # price weight type frame tech
[ 1, priorities['priceWeight'], priorities['priceType'], priorities['priceFrame'], priorities['priceTech']], # price
[ 1/priorities['priceWeight'], 1, priorities['weightType'], priorities['weightFrame'], priorities['weightTech']], # weight
[ 1/priorities['priceType'], 1/priorities['weightType'], 1, priorities['typeFrame'], priorities['typeTech']], # type
[ 1/priorities['priceFrame'], 1/priorities['weightFrame'], 1/priorities['typeFrame'], 1, priorities['frameTech']], # frame
[ 1/priorities['priceTech'], 1/priorities['weightTech'], 1/priorities['typeTech'], 1/priorities['frameTech'], 1] # tech
]
techMatrix = [ #brake transmission suspension
[1, techPriorities['brakeTransmission'], techPriorities['brakeSuspension'] ], # brake
[1/techPriorities['brakeTransmission'], 1, techPriorities['transmissionSuspension']], # transmission
[1/techPriorities['brakeSuspension'], 1/techPriorities['transmissionSuspension'], 1] # suspension
]
typeMatrix = [
[1, userTypeWeights[0]/userTypeWeights[1], userTypeWeights[0]/userTypeWeights[2]],
[userTypeWeights[1]/userTypeWeights[0], 1, userTypeWeights[1]/userTypeWeights[2]],
[userTypeWeights[2]/userTypeWeights[0], userTypeWeights[2]/userTypeWeights[1], 1]
]
def consistencyCheck(matrix):
matrixSum = []
for i in range(len(matrix)):
sum = 0
for j in range(len(matrix)):
sum += matrix[j][i]
matrixSum.append(sum)
standardizedMatrix = matrix.copy()
for i in range(len(matrix)):
for j in range(len(matrix)):
standardizedMatrix[i][j] = matrix[i][j]/matrixSum[j]
priorityVector = []
for i in standardizedMatrix:
sum = 0
for j in i:
sum += j
priorityVector.append(sum/len(matrix))
global globalFlag
global globalPriority
global globalTechPriority
if(globalFlag == 0):
global globalPriority
globalPriority = priorityVector.copy()
globalFlag = 1
elif(globalFlag == 1):
global globalTechPriority
globalTechPriority = priorityVector.copy()
globalFlag = 2
vectorCheck = 0
for i in priorityVector:
vectorCheck += i
if(round(vectorCheck, 2) != 1):
print('ERROR PAIR-WISE PRIORITY VECTOR CALCULATION (NOT EQUAL 1) == ', vectorCheck)
exit()
lambdaMax = 0
for i in range(len(matrixSum)):
lambdaMax += matrixSum[i]*priorityVector[i]
CI = (lambdaMax - len(matrixSum)) / (len(matrixSum) - 1)
# CR = CI / 0.9
if(CI<0.1):
return bool(True)
else:
print("ERROR CONSISTENCY")
exit()
return bool(False)
if(not consistencyCheck(priorityMatrix)): # ana ahp check, techten once yapilmali ONEMLI cunku priority vectorler fonksiyon calisma sirasina gore geliyor
print('It is NOT CONSISTENT -- Priorities')
exit()
if(not consistencyCheck(techMatrix)):
print('It is NOT CONSISTENT -- Technical')
exit()
def criteriaAHP(matrix):
matrixSum = []
for i in range(len(matrix)):
sum = 0
for j in range(len(matrix)):
sum += matrix[j][i]
matrixSum.append(sum)
standardizedMatrix = matrix.copy()
for i in range(len(matrix)):
for j in range(len(matrix)):
standardizedMatrix[i][j] = matrix[i][j]/matrixSum[j]
priorityVector = []
for i in standardizedMatrix:
sum = 0
for j in i:
sum += j
priorityVector.append(sum/len(standardizedMatrix))
check = 0
for i in priorityVector:
check += i
if(round(check, 2) == 1):
return priorityVector
else:
print('CRITERIA AHP ERROR')
return priorityVector
def inchConverter(inch):
if(float(inch) <= 12):
return 'XXS'
if(float(inch) <= 14):
return 'XS'
elif(float(inch) <= 16):
return 'S'
elif(float(inch) <= 18):
return 'M'
elif(float(inch) <= 20):
return 'L'
elif(float(inch) <= 22):
return 'XL'
elif(float(inch) > 22):
return 'XXL'
else:
raise Exception('inch converter error ==> ',inch)
def frameFixer(frame):
if(frame == '------'):
return(frame)
inches = re.findall("\d\d.\d\d\"|\d\d.\d\"|\d\d\"", frame)
returnAr = []
if(inches):
for inch in inches:
returnAr.append(inchConverter(float(inch.replace("\"",""))))
else:
categories = frame.split(",")
for i in categories:
returnAr.append(i.strip())
return returnAr
frametoNumber = {
'XXS': 0,
'XS': 1,
'S': 2,
'M': 3,
'L': 4,
'XL': 5,
'XXL': 6
}
def frameSearcher(frameAr, search):
min = 100
search = frametoNumber[search]
for i in frameAr:
if(abs(frametoNumber[i]-search)<min):
min = abs(frametoNumber[i]-search)
if(min==0):
return min
return min
def frameCalculate(left, right, userFrame):
leftMin = frameSearcher(left, userFrame)
rightMin = frameSearcher(right, userFrame)
leftMin = 7 - leftMin
rightMin = 7 - rightMin
return(leftMin / rightMin)
# data csvlerden aliniyor. bicycles. hybrid-mountain-road+Scores
reader = csv.DictReader(open('newdata.csv', "r", errors='ignore'), delimiter=';')
bicycles = []
mainData = next(reader, bool(False))
while(mainData):
bicycles.append(mainData)
mainData = next(reader, bool(False))
reader = csv.DictReader(open('hybrid.csv', "r", errors='ignore'), delimiter=';')
hybridScores = []
score = next(reader, bool(False))
while(score):
hybridScores.append(score)
score = next(reader, bool(False))
reader = csv.DictReader(open('mountain.csv', "r", errors='ignore'), delimiter=';')
mountainScores = []
score = next(reader, bool(False))
while(score):
mountainScores.append(score)
score = next(reader, bool(False))
reader = csv.DictReader(open('road.csv', "r", errors='ignore'), delimiter=';')
roadScores = []
score = next(reader, bool(False))
while(score):
roadScores.append(score)
score = next(reader, bool(False))
reader = csv.DictReader(open('bicyclesurlsave.csv', "r", errors='ignore'), delimiter=',')
urlList = []
score = next(reader, bool(False))
while(score):
urlList.append(score)
score = next(reader, bool(False))
def techScoreFinder(type, pieceType, pieceName):
scoreTable = []
if(type == 'Mountain'):
scoreTable = mountainScores
elif(type == 'Hybrid - City'):
scoreTable = hybridScores
elif(type == 'Road'):
scoreTable = roadScores
else:
print('ERROR ON techScoreFinder. Type is ==> ', type)
exit()
foundFlag = 0
if(pieceName == '------' or pieceName == 'N/A' or pieceName == 'n/a'): # data yoksa kac puan verilsin?
return 2
if(pieceType == 'transmission'):
for i in scoreTable:
if i['transmission'].lower() == pieceName.lower():
foundFlag = 1
return i['tScore']
elif(pieceType == 'suspension'):
if(type == 'Road'): # road tipinda data hatali. 4 olmasi lazimken 1 girilmis.
return 1
for i in scoreTable:
if i['suspension'].lower() == pieceName.lower():
foundFlag = 1
return i['sScore']
elif(pieceType == 'brake'):
for i in scoreTable:
if i['brake'].lower() == pieceName.lower():
foundFlag = 1
return i['bScore']
else:
print('ERROR ON techScoreFinder. pieceType is ==> ', pieceType, ' //// foundFlag is ==>',foundFlag)
exit()
if(foundFlag == 0):
print('ERROR ON techScoreFinder. type ==> ',type , '//// pieceType ==> ', pieceType, ' //// pieceName ==> ', pieceName)
exit()
# tek tek kiyaslicaz bakalim
typeCompare = []
priceCompare = []
weightCompare = []
weightSum = 0
frameCompare = []
techCompare = []
brakeCompare = []
transmissionCompare = []
suspensionCompare = []
for i in range(len(bicycles)):
typeCompare.append([])
priceCompare.append([])
frameCompare.append([])
brakeCompare.append([])
transmissionCompare.append([])
suspensionCompare.append([])
for j in range(len(bicycles)):
leftType = bicycles[i]['Type']
rightType = bicycles[j]['Type']
leftTypeIndex = 0
rightTypeIndex = 0
if(leftType == 'Mountain'):
leftTypeIndex = 0
elif(leftType == 'Hybrid - City'):
leftTypeIndex = 1
elif(leftType == 'Road'):
leftTypeIndex = 2
if(rightType == 'Mountain'):
rightTypeIndex = 0
elif(rightType == 'Hybrid - City'):
rightTypeIndex = 1
elif(rightType == 'Road'):
rightTypeIndex = 2
typeCompare[i].append(typeMatrix[leftTypeIndex][rightTypeIndex])
# PRICE
leftPrice = float(bicycles[i]['Price'].replace(',','.'))
rightPrice = float(bicycles[j]['Price'].replace(',','.'))
leftPriceDiff = (abs(leftPrice - userPrice) / 300) + 1
# burda int alinabilir belki
rightPriceDiff = (abs(rightPrice - userPrice) / 300) + 1
priceCompare[i].append(rightPriceDiff/leftPriceDiff)
leftFrame = frameFixer(bicycles[i]['Frame Size'])
rightFrame = frameFixer(bicycles[j]['Frame Size'])
if(leftFrame == '------'):
if(rightFrame == '------'):
frameCompare[i].append(1)
else:
frameCompare[i].append(1/5)
elif(rightFrame == '------'):
if(leftFrame == '------'):
frameCompare[i].append(1)
else:
frameCompare[i].append(5)
else:
frameCompare[i].append(frameCalculate(leftFrame, rightFrame, userFrame))
# teknik parcalar. emin'in puanlamalarini direkt birbirine boluyorum. fena degil ama daha iyi olabilir.
leftBrake = bicycles[i]['Brakes']
rightBrake = bicycles[j]['Brakes']
leftScore = int(techScoreFinder(leftType, 'brake', leftBrake))
rightScore = int(techScoreFinder(rightType, 'brake', rightBrake))
if(leftScore > rightScore):
brakeCompare[i].append(leftScore-rightScore)
elif(leftScore == rightScore):
brakeCompare[i].append(1)
elif(leftScore < rightScore):
brakeCompare[i].append(1/(rightScore-leftScore))
leftSuspension = bicycles[i]['Fork']
rightSuspension = bicycles[j]['Fork']
leftScore = int(techScoreFinder(leftType, 'suspension', leftSuspension))
rightScore = int(techScoreFinder(rightType, 'suspension', rightSuspension))
if(leftScore > rightScore):
suspensionCompare[i].append(leftScore-rightScore)
elif(leftScore == rightScore):
suspensionCompare[i].append(1)
elif(leftScore < rightScore):
suspensionCompare[i].append(1/(rightScore-leftScore))
leftTransmission = bicycles[i]['Rear Derailleur']
rightTransmission = bicycles[j]['Rear Derailleur']
leftSpeed = bicycles[i]['Speed']
rightSpeed = bicycles[j]['Speed']
leftScore = int(techScoreFinder(leftType, 'transmission', leftTransmission))
rightScore = int(techScoreFinder(rightType, 'transmission', rightTransmission))
if(leftScore > rightScore):
transmissionCompare[i].append(leftScore-rightScore)
elif(leftScore == rightScore):
transmissionCompare[i].append(1)
elif(leftScore < rightScore):
transmissionCompare[i].append(1/(rightScore-leftScore))
# weight isleri
bicycle = bicycles[i]
bicycleWeight = bicycle['Weight ']
if(re.search("^\d+.\d+",bicycles[i]['Weight '])):
bicycleWeight = float(re.search("^\d+.\d+",bicycles[i]['Weight ']).group().replace(',','.'))
else: # weight olmayanlara mountain 15, road 10, hybrid 18
if(bicycle['Type'] == 'Mountain'):
bicycleWeight = 17
elif(bicycle['Type'] == 'Road'):
bicycleWeight = 12
elif(bicycle['Type'] == 'Hybrid - City'):
bicycleWeight = 20
else:
bicycleWeight = 16
weightCompare.append(bicycleWeight)
weightSum += bicycleWeight
# bitti. kriter ici kiyas matrisleri hesaplanip agirliklariyla carpilsin
for i in range(len(weightCompare)):
weightCompare[i] = 1.0-(weightCompare[i]/weightSum)
brakeCompareAvg = criteriaAHP(brakeCompare)
transmissionCompareAvg = criteriaAHP(transmissionCompare)
suspensionCompareAvg = criteriaAHP(suspensionCompare)
priceCompareAvg = criteriaAHP(priceCompare)
typeCompareAvg = criteriaAHP(typeCompare)
weightCompareAvg = weightCompare
frameCompareAvg = criteriaAHP(frameCompare)
techCompareAvg = []
for i in range(len(brakeCompareAvg)):
techCompareAvg.append((brakeCompareAvg[i]*globalTechPriority[0]) + (transmissionCompareAvg[i]*globalTechPriority[1]) + (suspensionCompareAvg[i]*globalTechPriority[2]))
for i in range(len(bicycles)):
priceCompareAvg[i] = priceCompareAvg[i]*globalPriority[0]
weightCompareAvg[i] = weightCompareAvg[i]*globalPriority[1]
typeCompareAvg[i] = typeCompareAvg[i]*globalPriority[2]
frameCompareAvg[i] = frameCompareAvg[i]*globalPriority[3]
techCompareAvg[i] = techCompareAvg[i]*globalPriority[4]
endScores = []
for i in range(len(bicycles)):
endScores.append(priceCompareAvg[i]+weightCompareAvg[i]+typeCompareAvg[i]+frameCompareAvg[i]+techCompareAvg[i])
# checksum = 0,
# for i in endScores:,
# checksum+=i,
# print(round(checksum,2), ' -- ', checksum)
for i in range(len(bicycles)):
bicycles[i]['Score'] = endScores[i]
bicycles[i]['Price Score'] = priceCompareAvg[i]
bicycles[i]['Weight Score'] = weightCompareAvg[i]
bicycles[i]['Type Score'] = typeCompareAvg[i]
bicycles[i]['Frame Size Score'] = frameCompareAvg[i]
bicycles[i]['Technical Score'] = techCompareAvg[i]
endList = sorted(bicycles, key=lambda i: i['Score'],reverse=True)
returnList = []
for i in range(7):
returnList.append(endList[i])
for i in returnList:
bikeUrl = ''
for j in urlList:
if(j['title']==(i['Brand']+' '+i['Model'])):
bikeUrl = j['url']
break
i['url'] = bikeUrl
# print('returnList created')
return returnList
# print(f'User Input:\nType: {userTypeWeights}, Price: {userPrice}, Frame: {userFrame}\n\n')
# j = 0
# for i in bicycles:
# print(i['Brand'],', ', i['Model'])
# print(i['Type'],' // ', typeCompareAvg[j])
# print(i['Price'],' // ', priceCompareAvg[j])
# print(i['Weight '], ' // ', weightCompareAvg[j])
# try:
# print(frameSearcher(frameFixer(i['Frame Size']), userFrame), ' // ', frameCompareAvg[j])
# except:
# print(i['Frame Size'],' // ', frameCompareAvg[j])
# print('\n\n')
# j+=1
# for i in range(10):
# print('Brand == ', endList[i]['Brand'],'Model == ', endList[i]['Model'],' /// ','Score == ', endList[i]['Score'])
|
#create a dictionary
patient = {'name': 'Sameeksha', 'age':25,'disease':'Alzeimers','therapy':'drug b','response':'True'}
#Iterating over dictionaries
for characteristic in patient:
print(characteristic,patient[characteristic])
# Create a dictionary with multiple values
names = ['Max', 'Peter', 'Abby']
age=[77, 54, 28]
disease=['Alzheimers', 'Parkinson', 'Alzheimers']
therapy=['Drug B', 'Drug A', 'Drug B']
response=[True, False, False]
patients={'name':names,
'age': age,
'disease':disease,
'therapy':therapy,
'response':response}
# DATAFRAMES
# creating a dataframes
import pandas as pd
from os.path import join
df= pd.DataFrame(patients)
#Saving a data frame as excel file
path = r'C:\Users\samee\Masters Internship\course_material\course_material\slides'
output_path= join(path,'test_excel.xlsx')
df.to_excel(output_path)
# The IRIS DataSet
path= r'C:\Users\samee\Masters Internship\course_material\course_material\excercises\d2'
# To read a data frame stored in excel file
df_iris=pd.read_excel(join(path,'iris_data.xlsx'))
#df_iris.iloc[0:5, :] gets rows (and/or columns) at integer locations.
#df_iris.loc[0:5, 'target'] gets rows (and/or columns) with particular labels.
# Masking and Indexing
#Set up a specifing condition that creates a "Boolean Mask"
mask = df_iris['target']=='setosa'
setosa=df_iris[mask]
# Exploratory statistical analysis
#dataframe.describe()---- all common metrics(mean,median,std...) of the data set
#Data Visulisation
# 1. Bar Plot
#Get unique counts for each category
#counts=df_iris['target'].value_counts()
import matplotlib.pyplot as plt
plt.figure()
plt.bar(counts.keys(), counts.values)
plt.ylabel('Sample counts')
plt.show()
# 2. Histogram
plt.figure()
plt.hist(setosa['sepal length (cm)'])
plt.xlabel('sepal length (cm)')
plt.ylabel('Sample counts')
plt.show()
# Multiple Histograms
def plot_axes(data):
fig, axs = plt.subplots(nrows=2, ncols=2, figsize=(10,8))
axs= axs.flatten()
for i in range(0,4):
column=data.keys()[i]
axs[i].hist(data[column])
axs[i].set_xlabel(column)
axs[i].set_ylabel("Sample counts")
return plt.show()
plot_axes(setosa)
#Plot Histogram for another IRIS subtype
viginica= df_iris[df_iris['target']=='virginica']
plot_axes(viginica)
#Compare Distrubutions of IRIS subtypes for a specifc features
# SEABORN
#KDE- Density plot is a variation of a histogram and useful when comparing multiple distribution in one plot
import seaborn as sns
plt.figure()
sns.kdeplot(setosa['sepal length (cm)'])
sns.kdeplot(viginica['sepal length (cm)'])
plt.legend(['setosa', 'virginica'])
plt.xlabel('sepal length (cm)')
plt.ylabel('Normalized sample counts')
plt.show()
# 3. Scatter Plot
# Useful to find relationships between data
# pyplot.scatter() to plot a scatter plot between two continuous variables
plt.figure()
plt.scatter(setosa['sepal length (cm)'],setosa['sepal width (cm)'], c='b')
plt.scatter(viginica['sepal length (cm)'], viginica['sepal width (cm)'], c='r')
plt.legend(['setosa', 'virginica'])
plt.xlabel('sepal length (cm)')
plt.ylabel('sepal width(cm)')
plt.show()
# Pair Plot
sns.pairplot(df_iris, hue='target', diag_kind='kde')
plt.show()
# Linear Correlation between two variables
#The linear correlation coefficient measures the strength of the linear relationship between two variables.
# If r is close to ±1, the two variables are highly correlated and if plotted on a scatter plot,
# the data points cluster about a line. If r is far from ±1, the data points are more widely scattered.
import numpy as np
def correlation(data):
corr_coef= round(np.corrcoef(data['sepal length (cm)'],
data['sepal width (cm)']) [0][1], 3)
return corr_coef
corr_coef_setosa = correlation(setosa)
corr_coef_viginica = correlation(viginica)
def parameters(data):
slope, intercept = np.polyfit(data['sepal length (cm)'], data['sepal width (cm)'], deg=1)
return slope, intercept
slope_setosa, intercept_setosa = parameters(setosa)
slope_viginica, intercept_viginica = parameters(viginica)
x_values_setosa= setosa['sepal length (cm)']
y_values_setosa= setosa['sepal width (cm)']
x_values_vignica= viginica['sepal length (cm)']
y_values_viginica= viginica['sepal width (cm)']
plt.figure()
#1. Setosa
plt.scatter(x_values_setosa, y_values_setosa)
plt.plot(x_values_setosa, x_values_setosa*slope_setosa+intercept_setosa)
#2. Viginica
plt.scatter(x_values_vignica, y_values_viginica)
plt.plot(x_values_vignica, x_values_vignica*slope_viginica+intercept_viginica)
#3. Correlation Coefficients
plt.text(7.5, 4.85, f'setosa r:{corr_coef_setosa}',
horizontalalignment='center',
verticalalignment= 'center')
plt.text(7.5, 4.7, f'virginica r:{corr_coef_viginica}',
horizontalalignment='center',
verticalalignment='center')
plt.xlabel('sepal legth (cm)')
plt.ylabel('sepal width (cm)')
plt.show()
# Correlation HeatMap
# Find relationship between all variable and visualize in color
mask= np.tril(df_iris.corr())
heatmap_matrix = sns.heatmap (correlation_matrix, annot= True,
vmin=-1, vmax=1, cmap='coolwarm', mack=mask)
|
from typing import Dict, Tuple
from stable_baselines3.common.evaluation import \
evaluate_policy as evaluate_policy_sb3
from stable_baselines.common.evaluation import evaluate_policy
from envs.env_eval_callback import EnvEvalCallback
from log import Log
class AcrobotEvalCallback(EnvEvalCallback):
# i.e. performance is not adequate when there is a 20% degradation wrt reward threshold
def __init__(self, reward_threshold=-100.0, unacceptable_pct_degradation=20.0):
self.reward_threshold = reward_threshold
self.unacceptable_pct_degradation = unacceptable_pct_degradation
self.logger = Log("AcrobotEvalCallback")
def evaluate_env(self, model, env, n_eval_episodes, sb_version="sb2") -> Tuple[bool, Dict]:
if sb_version == "sb2":
mean_reward, std_reward = evaluate_policy(
model, env, n_eval_episodes=n_eval_episodes, render=False, deterministic=True
)
elif sb_version == "sb3":
mean_reward, std_reward = evaluate_policy_sb3(
model, env, n_eval_episodes=n_eval_episodes, render=False, deterministic=True
)
else:
raise NotImplemented("sb_version can be either sb2 or sb3. Found: {}".format(sb_version))
percentage_drop = (
abs(100.0 - (100.0 * mean_reward) / self.reward_threshold) if mean_reward < self.reward_threshold else 0.0
)
self.logger.debug(
"Mean reward: {}, Std reward: {}, Percentage drop: {}".format(mean_reward, std_reward, percentage_drop)
)
adequate_performance = mean_reward > (
self.reward_threshold - abs((self.reward_threshold * self.unacceptable_pct_degradation / 100.0))
)
info = dict()
info["mean_reward"] = mean_reward
info["std_reward"] = std_reward
info["percentage_drop"] = percentage_drop
# release resources
env.close()
return adequate_performance, info
def get_reward_threshold(self) -> float:
return self.reward_threshold
|
import time
from data import get_addresses, get_reference
from substrate.substrate import get_subscan_rewards
from cardano.cardano import get_cardano_rewards
# Aggregate rewards and print them out
def calculate():
# Calculate rewards for all addresses
rewards = []
for _, address in enumerate(get_addresses()):
if address['chain'] == "cardano":
reward = get_cardano_rewards(address=address['address'], reference=get_reference())
if reward:
rewards.append(reward)
else:
reward = get_subscan_rewards(chain=address['chain'], address=address['address'], reference=get_reference())
if reward:
rewards.append(reward)
# Avoid API rate exceeded
time.sleep(1)
# Aggregate rewards, there must be a simpler way...
rewards_aggr = {}
for reward in rewards:
if reward['chain'] in rewards_aggr:
rewards_aggr[reward['chain']]['today_coins'] = float(rewards_aggr[reward['chain']]['today_coins']) + float(reward['today_coins'])
rewards_aggr[reward['chain']]['today_value'] = float(rewards_aggr[reward['chain']]['today_value']) + float(reward['today_value'])
rewards_aggr[reward['chain']]['yesterday_coins'] = float(rewards_aggr[reward['chain']]['yesterday_coins']) + float(reward['yesterday_coins'])
rewards_aggr[reward['chain']]['yesterday_value'] = float(rewards_aggr[reward['chain']]['yesterday_value']) + float(reward['yesterday_value'])
else:
rewards_aggr[reward['chain']] = reward
# Display results
print('')
print(' REWARDS TODAY!')
print('')
print(' TODAY')
total = 0
for reward in list(rewards_aggr.values()):
total = total + float(reward['today_value'])
print(' - {} {:.2f} {} ({:.4f} {})'.format(reward['chain'].ljust(10),
float(reward['today_value']),
get_reference(),
float(reward['today_coins']),
reward['ticker']))
print(' Total: {:.2f} {}'.format(total, get_reference()))
print('')
print('')
print(' YESTERDAY')
total = 0
for reward in list(rewards_aggr.values()):
total = total + float(reward['yesterday_value'])
print(' - {} {:.2f} {} ({:.4f} {})'.format(reward['chain'].ljust(10),
float(reward['yesterday_value']),
get_reference(),
float(reward['yesterday_coins']),
reward['ticker']))
print(' Total: {:.2f} {}'.format(total, get_reference()))
print('')
print('')
if __name__ == '__main__':
calculate()
input(" Press enter to exit")
|
from django.conf.urls import url
from remo.voting import views
urlpatterns = [
url(r'^(?P<slug>[a-z0-9-]+)/edit/$', views.edit_voting, name='voting_edit_voting'),
url(r'^(?P<slug>[a-z0-9-]+)/$', views.view_voting, name='voting_view_voting'),
url(r'^(?P<slug>[a-z0-9-]+)/delete/$', views.delete_voting, name='voting_delete_voting'),
url(r'^(?P<slug>[a-z0-9-]+)/comment/(?P<display_name>[A-Za-z0-9_]+)'
r'/(?P<comment_id>\d+)/delete/$',
views.delete_poll_comment, name='voting_delete_poll_comment'),
]
|
from jinja2 import Environment, FileSystemLoader
import pandas as pd
import yaml
from tornado.ioloop import IOLoop
from tornado.web import RequestHandler
from bokeh.application import Application
from bokeh.application.handlers import FunctionHandler
from bokeh.embed import autoload_server
from bokeh.layouts import column
from bokeh.models import ColumnDataSource, Slider
from bokeh.plotting import figure
from bokeh.server.server import Server
from bokeh.themes import Theme
env = Environment(loader=FileSystemLoader('templates'))
class IndexHandler(RequestHandler):
def get(self):
template = env.get_template('embed.html')
script = autoload_server(url='http://localhost:5006/bkapp')
self.write(template.render(script=script, template="Tornado"))
def modify_doc(doc):
data_url = "http://www.neracoos.org/erddap/tabledap/B01_sbe37_all.csvp?time,temperature&depth=1&temperature_qc=0&time>=2016-02-15&time<=2017-03-22"
df = pd.read_csv(data_url, parse_dates=True, index_col=0)
df = df.rename(columns={'temperature (celsius)': 'temperature'})
df.index.name = 'time'
source = ColumnDataSource(data=df)
plot = figure(x_axis_type='datetime', y_range=(0, 25), y_axis_label='Temperature (Celsius)',
title="Sea Surface Temperature at 43.18, -70.43")
plot.line('time', 'temperature', source=source)
def callback(attr, old, new):
if new == 0:
data = df
else:
data = df.rolling('{0}D'.format(new)).mean()
source.data = ColumnDataSource(data=data).data
slider = Slider(start=0, end=30, value=0, step=1, title="Smoothing by N Days")
slider.on_change('value', callback)
doc.add_root(column(slider, plot))
doc.theme = Theme(json=yaml.load("""
attrs:
Figure:
background_fill_color: "#DDDDDD"
outline_line_color: white
toolbar_location: above
height: 500
width: 800
Grid:
grid_line_dash: [6, 4]
grid_line_color: white
"""))
bokeh_app = Application(FunctionHandler(modify_doc))
io_loop = IOLoop.current()
server = Server({'/bkapp': bokeh_app}, io_loop=io_loop, extra_patterns=[('/', IndexHandler)])
server.start()
if __name__ == '__main__':
from bokeh.util.browser import view
print('Opening Tornado app with embedded Bokeh application on http://localhost:5006/')
io_loop.add_callback(view, "http://localhost:5006/")
io_loop.start()
|
import torch
from torch_geometric.nn import SignedConv
def test_signed_conv():
in_channels, out_channels = (16, 32)
pos_ei = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]])
neg_ei = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]])
num_nodes = pos_ei.max().item() + 1
x = torch.randn((num_nodes, in_channels))
conv = SignedConv(in_channels, out_channels, first_aggr=True)
assert conv.__repr__() == 'SignedConv(16, 32, first_aggr=True)'
out1 = conv(x, pos_ei, neg_ei)
assert out1.size() == (num_nodes, 2 * out_channels)
jit_conv = conv.jittable(x=x, pos_edge_index=pos_ei, neg_edge_index=neg_ei)
jit_conv = torch.jit.script(jit_conv)
assert jit_conv(x, pos_ei, neg_ei).tolist() == out1.tolist()
conv = SignedConv(out_channels, out_channels, first_aggr=False)
assert conv.__repr__() == 'SignedConv(32, 32, first_aggr=False)'
out2 = conv(out1, pos_ei, neg_ei)
assert out2.size() == (num_nodes, 2 * out_channels)
jit_conv = conv.jittable(x=out1, pos_edge_index=pos_ei,
neg_edge_index=neg_ei)
jit_conv = torch.jit.script(jit_conv)
assert jit_conv(out1, pos_ei, neg_ei).tolist() == out2.tolist()
|
import requests
from pathlib import Path
import random
import streamlit as st
import numpy as np
from PIL import Image, ImageOps
import utils
st.set_page_config(
"Streamlit Theme Generator",
"https://emojipedia-us.s3.dualstack.us-west-1.amazonaws.com/thumbs/240/apple/271/woman-artist_1f469-200d-1f3a8.png",
)
utils.local_css("local_styles.css")
# Init state. This is only run whenever a new session starts (i.e. each time a new
# browser tab is opened).
if not st.session_state:
st.session_state.primaryColor = "#f63366"
st.session_state.backgroundColor = "#FFFFFF"
st.session_state.secondaryBackgroundColor = "#f0f2f6"
st.session_state.textColor = "#262730"
st.session_state.is_dark_theme = False
st.session_state.first_time = True
# Show header.
header_img = st.empty()
header_img.image(
"https://emojipedia-us.s3.dualstack.us-west-1.amazonaws.com/thumbs/240/apple/271/woman-artist_1f469-200d-1f3a8.png",
width=100,
)
header_text = st.empty()
header_text.write(
"""
# Streamlit Theme Generator
Generate beautiful color themes for Streamlit, powered by [colormind.io](http://colormind.io/bootstrap/).
Scroll down to see the theme in action 🎈
"""
)
""
col1, col2 = st.beta_columns([0.35, 0.65])
new_theme_clicked = col1.button("🔄 Generate new theme")
theme_type = col2.radio("", ["Light theme", "Dark theme"])
# spinner = st.empty()
# if not state.first_time:
# ""
# "Done! Scroll down to see your new theme 🎈 "
# TODO: Use a checkbox here instead. Doesn't work with current wheel file.
# dark_checked = st.checkbox("Use dark themes") # "Black is beautiful" or "Make it dark"
"---"
quote = st.beta_container()
# Show current theme colors.
locked = []
columns = st.beta_columns(4)
labels = ["backgroundColor", "secondaryBackgroundColor", "primaryColor", "textColor"]
for column, label in zip(columns, labels):
# c = column.color_picker(
# label.rstrip("Color").replace("B", " b").capitalize(),
# state[label],
# key="color_picker" + label,
# )
# st.write(c)
# st.text_input("c", state[label], key="test" + label)
img = Image.new("RGB", (100, 50), st.session_state[label])
img = ImageOps.expand(img, border=1, fill="black")
column.image(img, width=150)
column.markdown(
f"<small>{label.rstrip('Color').replace('B', ' b').capitalize()}</small>",
unsafe_allow_html=True,
)
# TODO: Do this with st.checkbox, but doesn't return the proper value with current wheel.
lock_value = column.radio("", ["Locked", "Unlocked"], index=1, key="lock-" + label)
locked.append(lock_value == "Locked")
# TODO: Show colorpicker above instead of images.
def apply_theme_from_session_state():
"""Retrieve theme from session state and apply it to streamlit config."""
# Only apply if theme in state differs from the current config. This is important
# to not trigger rerun repeatedly.
if st.config.get_option("theme.primaryColor") != st.session_state.primaryColor:
st.config.set_option("theme.primaryColor", st.session_state.primaryColor)
st.config.set_option("theme.backgroundColor", st.session_state.backgroundColor)
st.config.set_option(
"theme.secondaryBackgroundColor", st.session_state.secondaryBackgroundColor
)
st.config.set_option("theme.textColor", st.session_state.textColor)
# Trigger manual rerun (required to actually apply the theme to the app).
st.experimental_rerun()
def generate_new_theme():
"""Retrieve new theme from colormind, store in state, and apply to app."""
if any(locked):
# Generate only new colors for the colors that are not locked. These need to be
# represented as "N" in the list below. Locked colors need to be represented by
# their RGB values, e.g. [123, 123, 123].
input_list = ["N", "N", "N", "N", "N"]
# TODO: Refactor this.
if locked[0]:
if st.session_state.is_dark_theme:
input_list[4] = utils.hex2rgb(st.session_state.backgroundColor)
else:
input_list[0] = utils.hex2rgb(st.session_state.backgroundColor)
if locked[1]:
if st.session_state.is_dark_theme:
input_list[3] = utils.hex2rgb(st.session_state.secondaryBackgroundColor)
else:
input_list[1] = utils.hex2rgb(st.session_state.secondaryBackgroundColor)
if locked[2]:
input_list[2] = utils.hex2rgb(st.session_state.primaryColor)
if locked[3]:
if st.session_state.is_dark_theme:
input_list[0] = utils.hex2rgb(st.session_state.textColor)
else:
input_list[4] = utils.hex2rgb(st.session_state.textColor)
res = requests.get(
"http://colormind.io/api/", json={"input": input_list, "model": "ui"}
)
else:
# Generate new colors for all colors.
res = requests.get("http://colormind.io/api/", json={"model": "ui"})
# Retrieve results from colormind.io and convert to hex.
rgb_colors = res.json()["result"]
hex_colors = [utils.rgb2hex(*rgb) for rgb in res.json()["result"]]
# TODO: Refactor this with the stuff above.
# Store colors in session state. This is required so that separate tabs/users can
# have different themes. If we would apply the theme directly to `st.config`,
# every user would see the same theme!
if theme_type == "Light theme":
st.session_state.primaryColor = hex_colors[2]
st.session_state.backgroundColor = hex_colors[0]
st.session_state.secondaryBackgroundColor = hex_colors[1]
st.session_state.textColor = hex_colors[4]
st.session_state.is_dark_theme = False
else:
st.session_state.primaryColor = hex_colors[2]
st.session_state.backgroundColor = hex_colors[4]
st.session_state.secondaryBackgroundColor = hex_colors[3]
st.session_state.textColor = hex_colors[0]
st.session_state.is_dark_theme = True
""
if new_theme_clicked:
if st.session_state.first_time:
# Show some 🎈 🎈 the first time the user creates a new theme ;)
st.balloons()
st.session_state.first_time = False
wait_texts = [
"🎨 Mixing colors...",
"🌈 Collecting rainbows...",
"🖌️ Painting...",
"🐿️ Making happy little accidents...",
"🌲 Decision time...",
"☀️ Lighting up...",
]
# spinner.info(random.choice(wait_texts))
generate_new_theme()
# TODO: Try to do everything after this call, because this triggers a re-run.
apply_theme_from_session_state()
# st.write("---")
""
"""
To use this theme in your app, just create a file *.streamlit/config.toml* in your app's
root directory and add the following code:
"""
config = utils.CONFIG_TEMPLATE.format(
st.session_state.primaryColor,
st.session_state.backgroundColor,
st.session_state.secondaryBackgroundColor,
st.session_state.textColor,
)
st.code(config)
mode = st.radio("App mode", ["Normal", "Cute 🐿️"], key="mode")
if mode == "Cute 🐿️":
header_img.image(
"https://images0.gerstaecker.de/out/pictures/generated/1500_1500/pboxx-pixelboxx-47382/BOB+ROSS%C2%AE+Soft-%C3%96lfarben+%E2%80%93+Tiere.jpg",
width=100,
)
header_text.write(
"""
# The Joy of Streamlitting
Welcome back,
today we want to bring some color to this little Streamlit app. Everything you need
is your mouse to hit the button below. Let's start, and whatever you do, always remember:
There are no mistakes – only happy little accidents! 🐿️
And with that, I wish you happy streamlitting, and god bless my friend.
"""
)
st.write("And now scroll up ☝️")
bob_quotes = [
'🌈 *"If we all painted the same way, what a boring world it would be."*',
'☀️ *"...that may be the true joy of painting, when you share it with other people. I really believe that\'s the true joy."*',
'🌲 *"Friends are the most important commodity in the world. Even a tree needs a friend."*',
'🌚 *"We put some dark in, only so our light will show. You have to have dark in order to show the light."*',
'👩🎨️ *"There\'s an artist hidden at the bottom of every single one of us."*',
'☁️ *"Let\'s make some nice little clouds that just float around and have fun all day."*',
'🖌️ *"Every day is a good day when you paint."*',
'🦄 *"However you think it should be, that\'s exactly how it should be."*',
'🕊️ *"I aspire to create tranquility, a peaceful atmosphere to take people away form their everyday problems and frustrations."*',
'👣 *"You\'ll never believe what you can do until you get in there and try it."*',
]
block_methods = [st.error, st.warning, st.info, st.success]
with quote:
random.choice(block_methods)(random.choice(bob_quotes))
st.write("")
st.write("---")
# Draw some dummy content in main page and sidebar.
def draw_all(
key,
plot=False,
):
st.write(
"""
## Example Widgets
These widgets don't do anything. But look at all the new colors they got 👀
```python
# First some code.
streamlit = "cool"
theming = "fantastic"
both = "💥"
```
"""
)
st.checkbox("Is this cool or what?", key=key + "check")
st.radio(
"How many balloons?",
["1 balloon 🎈", "2 balloons 🎈🎈", "3 balloons 🎈🎈🎈"],
key=key + "radio",
)
st.button("🤡 Click me", key=key + "button")
# if plot:
# st.write("Oh look, a plot:")
# x1 = np.random.randn(200) - 2
# x2 = np.random.randn(200)
# x3 = np.random.randn(200) + 2
# hist_data = [x1, x2, x3]
# group_labels = ["Group 1", "Group 2", "Group 3"]
# fig = ff.create_distplot(hist_data, group_labels, bin_size=[0.1, 0.25, 0.5])
# st.plotly_chart(fig, use_container_width=True)
# st.file_uploader("You can now upload with style", key=key + "file_uploader")
st.slider(
"From 10 to 11, how cool are themes?",
min_value=10,
max_value=11,
key=key + "slider",
)
# st.select_slider("Pick a number", [1, 2, 3], key=key)
st.number_input("So many numbers", key=key + "number")
# st.text_area("A little writing space for you :)", key=key + "text")
st.selectbox(
"My favorite thing in the world is...",
["Streamlit", "Theming", "Baloooons 🎈 "],
key=key + "select",
)
# st.multiselect("Pick a number", [1, 2, 3], key=key)
# st.color_picker("Colors, colors, colors", key=key)
with st.beta_expander("Expand me!"):
st.write("Hey there! Nothing to see here 👀 ")
st.write("")
# st.write("That's our progress on theming:")
# st.progress(0.99)
if plot:
st.write("And here's some data and plots")
st.json({"data": [1, 2, 3, 4]})
st.dataframe({"data": [1, 2, 3, 4]})
st.table({"data": [1, 2, 3, 4]})
st.line_chart({"data": [1, 2, 3, 4]})
# st.help(st.write)
st.write("This is the end. Have fun building themes!")
draw_all("main", plot=True)
with st.sidebar:
draw_all("sidebar") |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-04-12 20:17
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('crowdcop_web', '0017_auto_20160412_1613'),
]
operations = [
migrations.AddField(
model_name='tip',
name='details',
field=models.TextField(default='empty'),
preserve_default=False,
),
migrations.AlterField(
model_name='tip',
name='suspect_hair_style',
field=models.CharField(choices=[('UNKNOWN', 'Unknown'), ('SHORT', 'Short'), ('MEDIUM', 'Medium'), ('LONG', 'Long'), ('DREADLOCKS', 'Dreadlocks'), ('BALDING', 'Balding'), ('BALD/SHAVED', 'Bald/Shaved'), ('OTHER', 'Other')], max_length=30),
),
]
|
# coding: utf-8
"""
Quay Frontend
This API allows you to perform many of the operations required to work with Quay repositories, users, and organizations. You can find out more at <a href=\"https://quay.io\">Quay</a>. # noqa: E501
OpenAPI spec version: v1
Contact: support@quay.io
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from quay.api_client import ApiClient
class SecscanApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def get_repo_image_security(self, repository, imageid, **kwargs): # noqa: E501
"""get_repo_image_security # noqa: E501
Fetches the features and vulnerabilities (if any) for a repository image. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_repo_image_security(repository, imageid, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str repository: The full path of the repository. e.g. namespace/name (required)
:param str imageid: The image ID (required)
:param bool vulnerabilities: Include vulnerabilities information
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_repo_image_security_with_http_info(repository, imageid, **kwargs) # noqa: E501
else:
(data) = self.get_repo_image_security_with_http_info(repository, imageid, **kwargs) # noqa: E501
return data
def get_repo_image_security_with_http_info(self, repository, imageid, **kwargs): # noqa: E501
"""get_repo_image_security # noqa: E501
Fetches the features and vulnerabilities (if any) for a repository image. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_repo_image_security_with_http_info(repository, imageid, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str repository: The full path of the repository. e.g. namespace/name (required)
:param str imageid: The image ID (required)
:param bool vulnerabilities: Include vulnerabilities information
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['repository', 'imageid', 'vulnerabilities'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_repo_image_security" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'repository' is set
if ('repository' not in params or
params['repository'] is None):
raise ValueError("Missing the required parameter `repository` when calling `get_repo_image_security`") # noqa: E501
# verify the required parameter 'imageid' is set
if ('imageid' not in params or
params['imageid'] is None):
raise ValueError("Missing the required parameter `imageid` when calling `get_repo_image_security`") # noqa: E501
collection_formats = {}
path_params = {}
if 'repository' in params:
path_params['repository'] = params['repository'] # noqa: E501
if 'imageid' in params:
path_params['imageid'] = params['imageid'] # noqa: E501
query_params = []
if 'vulnerabilities' in params:
query_params.append(('vulnerabilities', params['vulnerabilities'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2_implicit'] # noqa: E501
return self.api_client.call_api(
'/api/v1/repository/{repository}/image/{imageid}/security', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_repo_manifest_security(self, manifestref, repository, **kwargs): # noqa: E501
"""get_repo_manifest_security # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_repo_manifest_security(manifestref, repository, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str manifestref: The digest of the manifest (required)
:param str repository: The full path of the repository. e.g. namespace/name (required)
:param bool vulnerabilities: Include vulnerabilities informations
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_repo_manifest_security_with_http_info(manifestref, repository, **kwargs) # noqa: E501
else:
(data) = self.get_repo_manifest_security_with_http_info(manifestref, repository, **kwargs) # noqa: E501
return data
def get_repo_manifest_security_with_http_info(self, manifestref, repository, **kwargs): # noqa: E501
"""get_repo_manifest_security # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_repo_manifest_security_with_http_info(manifestref, repository, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str manifestref: The digest of the manifest (required)
:param str repository: The full path of the repository. e.g. namespace/name (required)
:param bool vulnerabilities: Include vulnerabilities informations
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['manifestref', 'repository', 'vulnerabilities'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_repo_manifest_security" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'manifestref' is set
if ('manifestref' not in params or
params['manifestref'] is None):
raise ValueError("Missing the required parameter `manifestref` when calling `get_repo_manifest_security`") # noqa: E501
# verify the required parameter 'repository' is set
if ('repository' not in params or
params['repository'] is None):
raise ValueError("Missing the required parameter `repository` when calling `get_repo_manifest_security`") # noqa: E501
collection_formats = {}
path_params = {}
if 'manifestref' in params:
path_params['manifestref'] = params['manifestref'] # noqa: E501
if 'repository' in params:
path_params['repository'] = params['repository'] # noqa: E501
query_params = []
if 'vulnerabilities' in params:
query_params.append(('vulnerabilities', params['vulnerabilities'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2_implicit'] # noqa: E501
return self.api_client.call_api(
'/api/v1/repository/{repository}/manifest/{manifestref}/security', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
|
"""
Mock implementation of a ``discord.state.ConnectionState``. Overwrites a Client's default state, allowing hooking of
its methods and support for test-related features.
"""
import asyncio
import typing
import discord
import discord.http as dhttp
import discord.state as dstate
from . import factories as facts
from . import backend as back
class FakeState(dstate.ConnectionState):
"""
A mock implementation of a ``ConnectionState``. Overrides methods that would otherwise cause issues, and
implements functionality such as disabling dispatch temporarily.
"""
http: 'back.FakeHttp' # String because of circular import
def __init__(self, client: discord.Client, http: dhttp.HTTPClient, user: discord.ClientUser = None, loop: asyncio.AbstractEventLoop = None) -> None:
if loop is None:
loop = asyncio.get_event_loop()
super().__init__(dispatch=client.dispatch,
handlers=None, hooks=None,
syncer=None, http=http,
loop=loop, intents=client.intents,
member_cache_flags=client._connection.member_cache_flags)
if user is None:
user = discord.ClientUser(state=self, data=facts.make_user_dict("FakeApp", "0001", None))
user.bot = True
self.user = user
self.shard_count = client.shard_count
self._get_websocket = lambda x: client.ws
self._do_dispatch = True
real_disp = self.dispatch
def dispatch(*args, **kwargs):
if not self._do_dispatch:
return
return real_disp(*args, **kwargs)
self.dispatch = dispatch
def stop_dispatch(self) -> None:
"""
Stop dispatching events to the client, if we are
"""
self._do_dispatch = False
def start_dispatch(self) -> None:
"""
Start dispatching events to the client, if we aren't already
"""
self._do_dispatch = True
# TODO: Respect limit parameters
async def query_members(self, guild: discord.Guild, query: str, limit: int, user_ids: int, cache: bool, presences: bool) -> None:
guild: discord.Guild = discord.utils.get(self.guilds, id=guild.id)
return guild.members
async def chunk_guild(self, guild: discord.Guild, *, wait: bool = True, cache: typing.Optional[bool] = None):
pass
def _guild_needs_chunking(self, guild: discord.Guild):
"""
Prevents chunking which can throw asyncio wait_for errors with tests under 60 seconds
"""
return False
|
__all__ = ['is_match_states_batch_size', 'verify_nmt_model', 'verify_nmt_inference']
import numpy.testing as npt
import numpy as np
import mxnet as mx
from mxnet.util import use_np
from .parameter import move_to_ctx
def is_match_states_batch_size(states, states_batch_axis, batch_size) -> bool:
"""Test whether the generated states have the specified batch size
Parameters
----------
states
The states structure
states_batch_axis
The states batch axis structure
batch_size
The batch size
Returns
-------
ret
"""
if states_batch_axis is None:
return True
if isinstance(states_batch_axis, int):
if states.shape[states_batch_axis] == batch_size:
return True
for ele_states_batch_axis, ele_states in zip(states_batch_axis, states):
ret = is_match_states_batch_size(ele_states, ele_states_batch_axis, batch_size)
if ret is False:
return False
return True
@use_np
def verify_nmt_model(model, batch_size: int = 4,
src_seq_length: int = 5,
tgt_seq_length: int = 10,
atol: float = 1E-4,
rtol: float = 1E-3):
"""Verify the correctness of an NMT model. Raise error message if it detects problems.
Parameters
----------
model
The machine translation model
batch_size
The batch size to test the nmt model
src_seq_length
Length of the source sequence
tgt_seq_length
Length of the target sequence
atol
Absolute tolerance.
rtol
Relative tolerance.
"""
src_word_sequence = mx.np.random.randint(0, model.src_vocab_size, (batch_size, src_seq_length))
tgt_word_sequence = mx.np.random.randint(0, model.tgt_vocab_size, (batch_size, tgt_seq_length))
src_valid_length = mx.np.random.randint(1, src_seq_length, (batch_size,))
min_tgt_seq_length = max(1, tgt_seq_length - 5)
tgt_valid_length = mx.np.random.randint(min_tgt_seq_length, tgt_seq_length, (batch_size,))
if model.layout == 'NT':
full_out = model(src_word_sequence, src_valid_length, tgt_word_sequence, tgt_valid_length)
else:
full_out = model(src_word_sequence.T, src_valid_length,
tgt_word_sequence.T, tgt_valid_length)
full_out = mx.np.swapaxes(full_out, 0, 1)
if full_out.shape != (batch_size, tgt_seq_length, model.tgt_vocab_size):
raise AssertionError('The output of NMT model does not match the expected output.'
' Model output shape = {}, Expected (B, T, V) = {}'
.format(full_out.shape,
(batch_size, tgt_seq_length, model.tgt_vocab_size)))
for partial_batch_size in range(1, batch_size + 1):
for i in range(1, min_tgt_seq_length):
if model.layout == 'NT':
partial_out = model(src_word_sequence[:partial_batch_size, :],
src_valid_length[:partial_batch_size],
tgt_word_sequence[:partial_batch_size, :(-i)],
tgt_valid_length[:partial_batch_size]
- mx.np.array(i, dtype=tgt_valid_length.dtype))
else:
partial_out = model(src_word_sequence[:partial_batch_size, :].T,
src_valid_length[:partial_batch_size],
tgt_word_sequence[:partial_batch_size, :(-i)].T,
tgt_valid_length[:partial_batch_size]
- mx.np.array(i, dtype=tgt_valid_length.dtype))
partial_out = mx.np.swapaxes(partial_out, 0, 1)
# Verify that the partial output matches the full output
for b in range(partial_batch_size):
partial_vl = tgt_valid_length.asnumpy()[b] - i
npt.assert_allclose(full_out[b, :partial_vl].asnumpy(),
partial_out[b, :partial_vl].asnumpy(), atol, rtol)
@use_np
def verify_nmt_inference(train_model, inference_model,
batch_size=4, src_seq_length=5,
tgt_seq_length=10, atol=1E-4, rtol=1E-3):
"""Verify the correctness of an NMT inference model. Raise error message if it detects
any problems.
Parameters
----------
train_model
The training model
inference_model
The inference model
batch_size
Batch size
src_seq_length
Length of the source sequence
tgt_seq_length
Length of the target sequence
atol
Absolute tolerance
rtol
Relative tolerance
"""
if train_model.layout == 'NT':
src_word_sequences = mx.np.random.randint(0, train_model.src_vocab_size,
(batch_size, src_seq_length))
tgt_word_sequences = mx.np.random.randint(0, train_model.tgt_vocab_size,
(batch_size, tgt_seq_length))
else:
src_word_sequences = mx.np.random.randint(0, train_model.src_vocab_size,
(src_seq_length, batch_size))
tgt_word_sequences = mx.np.random.randint(0, train_model.tgt_vocab_size,
(tgt_seq_length, batch_size))
src_valid_length = mx.np.random.randint(1, src_seq_length, (batch_size,))
min_tgt_seq_length = max(1, tgt_seq_length - 5)
tgt_valid_length = mx.np.random.randint(min_tgt_seq_length, tgt_seq_length, (batch_size,))
full_out = train_model(src_word_sequences, src_valid_length,
tgt_word_sequences, tgt_valid_length)
if train_model.layout == 'NT':
for partial_batch_size in range(1, batch_size + 1):
step_out_l = []
states = inference_model.init_states(src_word_sequences[:partial_batch_size, :],
src_valid_length[:partial_batch_size])
assert is_match_states_batch_size(states, inference_model.state_batch_axis,
partial_batch_size)
for i in range(min_tgt_seq_length):
step_out, states = inference_model(tgt_word_sequences[:partial_batch_size, i],
states)
step_out_l.append(step_out)
partial_out = mx.np.stack(step_out_l, axis=1)
npt.assert_allclose(full_out[:partial_batch_size, :min_tgt_seq_length].asnumpy(),
partial_out[:partial_batch_size, :].asnumpy(), atol, rtol)
elif train_model.layout == 'TN':
for partial_batch_size in range(1, batch_size + 1):
step_out_l = []
states = inference_model.init_states(src_word_sequences[:, :partial_batch_size],
src_valid_length[:partial_batch_size])
assert is_match_states_batch_size(states, inference_model.state_batch_axis,
partial_batch_size)
for i in range(min_tgt_seq_length):
step_out, states = inference_model(tgt_word_sequences[i, :partial_batch_size],
states)
step_out_l.append(step_out)
partial_out = mx.np.stack(step_out_l, axis=0)
npt.assert_allclose(full_out[:min_tgt_seq_length, :partial_batch_size].asnumpy(),
partial_out[:, :partial_batch_size].asnumpy(), atol, rtol)
else:
raise NotImplementedError
def _match_struct_output(lhs, rhs, atol=1E-2, rtol=1E-2):
if isinstance(lhs, (list, tuple)):
for lhs_ele, rhs_ele in zip(lhs, rhs):
_match_struct_output(lhs_ele, rhs_ele, atol=atol, rtol=rtol)
else:
npt.assert_allclose(lhs.asnumpy().astype('float32'),
rhs.asnumpy().astype('float32'), atol=atol, rtol=rtol)
def _cast_nested_to_fp16(nested_dat):
"""Cast the nested input to fp16
Parameters
----------
dat
The input nested data structure
Returns
-------
output
The casted output data
"""
if isinstance(nested_dat, (mx.np.ndarray, np.ndarray)):
if nested_dat.dtype == np.float32:
return nested_dat.astype(np.float16)
else:
return nested_dat
elif isinstance(nested_dat, list):
return [_cast_nested_to_fp16(ele) for ele in nested_dat]
elif isinstance(nested_dat, tuple):
return tuple([_cast_nested_to_fp16(ele) for ele in nested_dat])
else:
raise NotImplementedError('Type is not supported!')
def verify_backbone_fp16(model_cls, cfg, ctx, inputs,
atol=1E-2, rtol=1E-2, check_amp=True):
"""Test whether the backbone model has the comparable parameter gradient +
Parameters
----------
model_cls
The modeling class
cfg
The configuration
ctx
The context
inputs
The input tensors of the model. We will
atol
The absolute tolerance
rtol
The relative tolerance
check_amp
Whether to check the AMP process. You will need to ensure that there is no
randomness in the model when it is turned on.
"""
model_fp32 = model_cls.from_cfg(cfg, dtype='float32')
model_fp32.initialize(ctx=ctx)
model_fp32.hybridize()
# Check forward
fp32_inputs = move_to_ctx(inputs, ctx=ctx)
outputs_fp32 = model_fp32(*fp32_inputs)
mx.npx.waitall()
# Check forward of fp16
model_fp16 = model_cls.from_cfg(cfg, dtype='float16')
model_fp16.share_parameters(model_fp32.collect_params())
model_fp16.cast('float16')
model_fp16.hybridize()
for param in model_fp16.collect_params().values():
assert param.dtype == 'float16'
fp16_inputs = move_to_ctx(_cast_nested_to_fp16(inputs), ctx=ctx)
outputs_fp16 = model_fp16(*fp16_inputs)
mx.npx.waitall()
_match_struct_output(outputs_fp16, outputs_fp32, atol=atol, rtol=rtol)
if check_amp:
from mxnet import amp
amp.init()
# Reconstruct the fp32 model
model_fp32 = model_cls.from_cfg(cfg, dtype='float32')
model_fp32.initialize(ctx=ctx)
model_fp32.hybridize()
trainer = mx.gluon.Trainer(model_fp32.collect_params(), 'adam',
{'learning_rate': 1E-3, 'wd': 1E-4,
'multi_precision': True},
update_on_kvstore=False)
amp.init_trainer(trainer)
with mx.autograd.record():
outputs_amp = model_fp32(*fp32_inputs)
if not isinstance(outputs_amp, (tuple, list)):
loss = outputs_amp.mean()
else:
loss = sum([ele.mean() for ele in outputs_amp])
with amp.scale_loss(loss, trainer) as scaled_loss:
mx.autograd.backward(scaled_loss)
trainer.step(1)
mx.npx.waitall()
|
from django.urls import path
from .views import (
teacher_home_view, profile_view, change_password_view, update_profile_view,
add_assessment, ViewAssessments, MarkStudents, Mark, EditAssessment, DeleteAssessment
)
app_name = 'teachers'
urlpatterns = [
path('', teacher_home_view, name='teacher_home_view'),
# add assessment
path('add-assessment/', add_assessment, name='add_assessment'),
path('view-assessments/', ViewAssessments.as_view(), name='view_assessments'),
path('view-assessments/<int:pk>/mark-students/', MarkStudents.as_view(), name='mark_students'),
path('view-assessments/<int:id>/mark-students/<int:pk>', Mark.as_view(), name='mark'),
path('view-assessments/<int:pk>/edit', EditAssessment.as_view(), name='edit_assessment'),
path('view-assessments/<int:pk>/delete', DeleteAssessment.as_view(), name='delete_assessment'),
# student profile
path('profile/', profile_view, name='profile_view'),
path('profile/update-profile/', update_profile_view, name='update_profile_view'),
path('profile/change-password/', change_password_view, name='change_password_view'),
]
|
from bs4 import BeautifulSoup
from django.conf import settings
from django.utils.deprecation import MiddlewareMixin
from google_analytics.tasks import send_ga_tracking
from google_analytics.utils import build_ga_params, set_cookie
class GoogleAnalyticsMiddleware(MiddlewareMixin):
def process_response(self, request, response):
if hasattr(settings, 'GOOGLE_ANALYTICS_IGNORE_PATH'):
exclude = [p for p in settings.GOOGLE_ANALYTICS_IGNORE_PATH
if request.path.startswith(p)]
if any(exclude):
return response
# get the account id
try:
account = settings.GOOGLE_ANALYTICS['google_analytics_id']
except (KeyError, TypeError):
raise Exception("No Google Analytics ID configured")
try:
title = BeautifulSoup(
response.content, "html.parser").html.head.title.text
except AttributeError:
title = None
path = request.path
referer = request.META.get('HTTP_REFERER', '')
params = build_ga_params(
request, account, path=path, referer=referer, title=title)
response = set_cookie(params, response)
send_ga_tracking.delay(params)
return response
|
"""
Copyright 2018 EPAM Systems, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import concurrent
import json
from concurrent.futures import ALL_COMPLETED, ThreadPoolExecutor
from datetime import date, datetime
from functools import cmp_to_key
from syndicate.commons.log_helper import get_logger
from syndicate.core.build.bundle_processor import (create_deploy_output,
load_deploy_output,
load_failed_deploy_output,
load_meta_resources,
remove_deploy_output,
remove_failed_deploy_output)
from syndicate.core.build.meta_processor import resolve_meta
from syndicate.core.constants import (BUILD_META_FILE_NAME,
CLEAN_RESOURCE_TYPE_PRIORITY,
DEPLOY_RESOURCE_TYPE_PRIORITY,
LAMBDA_TYPE)
from syndicate.core.helper import exit_on_exception, prettify_json
from syndicate.core.resources import (APPLY_MAPPING, CREATE_RESOURCE,
DESCRIBE_RESOURCE, REMOVE_RESOURCE,
RESOURCE_CONFIGURATION_PROCESSORS,
RESOURCE_IDENTIFIER, UPDATE_RESOURCE)
_LOG = get_logger('syndicate.core.build.deployment_processor')
def get_dependencies(name, meta, resources_dict, resources):
""" Get dependencies from resources that needed to create them too.
:type name: str
:type meta: dict
:type resources_dict: dict
:param resources:
:param resources_dict: resources that will be created {name: meta}
"""
resources_dict[name] = meta
if meta.get('dependencies'):
for dependency in meta.get('dependencies'):
dep_name = dependency['resource_name']
dep_meta = resources[dep_name]
resources_dict[dep_name] = dep_meta
if dep_meta.get('dependencies'):
get_dependencies(dep_name, dep_meta, resources_dict, resources)
# todo implement resources sorter according to priority
def _process_resources(resources, handlers_mapping):
res_type = None
output = {}
args = []
resource_type = None
try:
for res_name, res_meta in resources:
res_type = res_meta['resource_type']
if resource_type is None:
resource_type = res_type
if res_type == resource_type:
args.append({'name': res_name, 'meta': res_meta})
continue
elif res_type != resource_type:
_LOG.info('Processing {0} resources ...'.format(resource_type))
func = handlers_mapping[resource_type]
response = func(args) # todo exception may be raised here
if response:
output.update(response)
del args[:]
args.append({'name': res_name, 'meta': res_meta})
resource_type = res_type
if args:
_LOG.info('Processing {0} resources ...'.format(resource_type))
func = handlers_mapping[resource_type]
response = func(args)
if response:
output.update(response)
return True, output
except Exception as e:
_LOG.exception('Error occurred while {0} '
'resource creating: {1}'.format(res_type, str(e)))
# args list always contains one item here
return False, update_failed_output(args[0]['name'], args[0]['meta'],
resource_type, output)
def update_failed_output(res_name, res_meta, resource_type, output):
describe_func = DESCRIBE_RESOURCE[resource_type]
failed_resource_output = describe_func(res_name, res_meta)
if failed_resource_output:
if isinstance(failed_resource_output, list):
for item in failed_resource_output:
output.update(item)
else:
output.update(failed_resource_output)
return output
def deploy_resources(resources):
return _process_resources(resources=resources,
handlers_mapping=CREATE_RESOURCE)
def update_resources(resources):
return _process_resources(resources=resources,
handlers_mapping=UPDATE_RESOURCE)
def clean_resources(output):
args = []
resource_type = None
# clean all resources
for arn, config in output:
res_type = config['resource_meta']['resource_type']
if resource_type is None:
resource_type = res_type
if res_type == resource_type:
args.append({'arn': arn, 'config': config})
continue
elif res_type != resource_type:
_LOG.info('Removing {0} resources ...'.format(resource_type))
func = REMOVE_RESOURCE[resource_type]
func(args)
del args[:]
args.append({'arn': arn, 'config': config})
resource_type = res_type
if args:
_LOG.info('Removing {0} resources ...'.format(resource_type))
func = REMOVE_RESOURCE[resource_type]
func(args)
# todo implement saving failed output
def continue_deploy_resources(resources, failed_output):
updated_output = {}
deploy_result = True
res_type = None
try:
args = []
resource_type = None
for res_name, res_meta in resources:
res_type = res_meta['resource_type']
if resource_type is None:
resource_type = res_type
if res_type == resource_type:
resource_output = __find_output_by_resource_name(
failed_output, res_name)
args.append(
{
'name': res_name,
'meta': res_meta,
'current_configurations': resource_output
})
continue
elif res_type != resource_type:
func = RESOURCE_CONFIGURATION_PROCESSORS.get(resource_type)
if func:
response = func(args)
if response:
updated_output.update(
json.loads(
json.dumps(response, default=_json_serial)))
else:
# function to update resource is not present
# move existing output for resources to new output
__move_output_content(args, failed_output, updated_output)
del args[:]
resource_output = __find_output_by_resource_name(
failed_output, res_name)
args.append({
'name': res_name,
'meta': res_meta,
'current_configurations': resource_output
})
resource_type = res_type
if args:
func = RESOURCE_CONFIGURATION_PROCESSORS.get(resource_type)
if func:
response = func(args)
if response:
updated_output.update(
json.loads(
json.dumps(response, default=_json_serial)))
else:
# function to update resource is not present
# move existing output- for resources to new output
__move_output_content(args, failed_output, updated_output)
except Exception as e:
_LOG.exception('Error occurred while {0} resource creating: {1}'.format(
res_type, str(e)))
deploy_result = False
return deploy_result, updated_output
def __move_output_content(args, failed_output, updated_output):
for arg in args:
resource_output = __find_output_by_resource_name(
failed_output, arg['name'])
if resource_output:
updated_output.update(resource_output)
def __find_output_by_resource_name(output, resource_name):
found_items = {}
for k, v in output.items():
if v['resource_name'] == resource_name:
found_items[k] = v
return found_items
@exit_on_exception
def create_deployment_resources(deploy_name, bundle_name,
deploy_only_resources=None,
deploy_only_types=None,
excluded_resources=None, excluded_types=None):
resources = resolve_meta(load_meta_resources(bundle_name))
_LOG.debug('Names were resolved')
_LOG.debug(prettify_json(resources))
# validate_deployment_packages(resources)
_LOG.info('{0} file was loaded successfully'.format(BUILD_META_FILE_NAME))
# TODO make filter chain
if deploy_only_resources:
resources = dict((k, v) for (k, v) in resources.items() if
k in deploy_only_resources)
if excluded_resources:
resources = dict((k, v) for (k, v) in resources.items() if
k not in excluded_resources)
if deploy_only_types:
resources = dict((k, v) for (k, v) in resources.items() if
v['resource_type'] in deploy_only_types)
if excluded_types:
resources = dict((k, v) for (k, v) in resources.items() if
v['resource_type'] not in excluded_types)
_LOG.debug('Going to create: {0}'.format(prettify_json(resources)))
# sort resources with priority
resources_list = list(resources.items())
resources_list.sort(key=cmp_to_key(_compare_deploy_resources))
_LOG.info('Going to deploy AWS resources')
success, output = deploy_resources(resources_list)
if success:
_LOG.info('AWS resources were deployed successfully')
# apply dynamic changes that uses ARNs
_LOG.info('Going to apply dynamic changes')
_apply_dynamic_changes(resources, output)
_LOG.info('Dynamic changes were applied successfully')
_LOG.info('Going to create deploy output')
output_str = json.dumps(output, default=_json_serial)
create_deploy_output(bundle_name, deploy_name, output_str, success)
_LOG.info('Deploy output for {0} was created.'.format(deploy_name))
return success
@exit_on_exception
def remove_deployment_resources(deploy_name, bundle_name,
clean_only_resources=None,
clean_only_types=None,
excluded_resources=None, excluded_types=None):
output = load_deploy_output(bundle_name, deploy_name)
_LOG.info('Output file was loaded successfully')
# TODO make filter chain
if clean_only_resources:
output = dict((k, v) for (k, v) in output.items() if
v['resource_name'] in clean_only_resources)
if excluded_resources:
output = dict((k, v) for (k, v) in output.items() if
v['resource_name'] not in excluded_resources)
if clean_only_types:
output = dict((k, v) for (k, v) in output.items() if
v['resource_meta']['resource_type'] in clean_only_types)
if excluded_types:
output = dict((k, v) for (k, v) in output.items() if
v['resource_meta'][
'resource_type'] not in excluded_types)
# sort resources with priority
resources_list = list(output.items())
resources_list.sort(key=cmp_to_key(_compare_clean_resources))
_LOG.debug('Resources to delete: {0}'.format(resources_list))
_LOG.info('Going to clean AWS resources')
clean_resources(resources_list)
# remove output from bucket
remove_deploy_output(bundle_name, deploy_name)
@exit_on_exception
def continue_deployment_resources(deploy_name, bundle_name,
deploy_only_resources=None,
deploy_only_types=None,
excluded_resources=None,
excluded_types=None):
output = load_failed_deploy_output(bundle_name, deploy_name)
_LOG.info('Failed output file was loaded successfully')
resources = resolve_meta(load_meta_resources(bundle_name))
_LOG.debug('Names were resolved')
_LOG.debug(prettify_json(resources))
# TODO make filter chain
if deploy_only_resources:
resources = dict((k, v) for (k, v) in resources.items() if
k in deploy_only_resources)
if excluded_resources:
resources = dict((k, v) for (k, v) in resources.items() if
k not in excluded_resources)
if deploy_only_types:
resources = dict((k, v) for (k, v) in resources.items() if
v['resource_type'] in deploy_only_types)
if excluded_types:
resources = dict((k, v) for (k, v) in resources.items() if
v['resource_type'] not in excluded_types)
# sort resources with priority
resources_list = list(resources.items())
resources_list.sort(key=cmp_to_key(_compare_deploy_resources))
success, updated_output = continue_deploy_resources(resources_list, output)
_LOG.info('AWS resources were deployed successfully')
if success:
# apply dynamic changes that uses ARNs
_LOG.info('Going to apply dynamic changes')
_apply_dynamic_changes(resources, updated_output)
_LOG.info('Dynamic changes were applied successfully')
# remove failed output from bucket
remove_failed_deploy_output(bundle_name, deploy_name)
_LOG.info('Going to create deploy output')
create_deploy_output(bundle_name, deploy_name,
prettify_json(updated_output), success=success)
return success
@exit_on_exception
def remove_failed_deploy_resources(deploy_name, bundle_name,
clean_only_resources=None,
clean_only_types=None,
excluded_resources=None,
excluded_types=None):
output = load_failed_deploy_output(bundle_name, deploy_name)
_LOG.info('Failed output file was loaded successfully')
# TODO make filter chain
if clean_only_resources:
output = dict((k, v) for (k, v) in output.items() if
v['resource_name'] in clean_only_resources)
if excluded_resources:
output = dict((k, v) for (k, v) in output.items() if
v['resource_name'] not in excluded_resources)
if clean_only_types:
output = dict((k, v) for (k, v) in output.items() if
v['resource_meta']['resource_type'] in clean_only_types)
if excluded_types:
output = dict((k, v) for (k, v) in output.items() if
v['resource_meta'][
'resource_type'] not in excluded_types)
# sort resources with priority
resources_list = list(output.items())
resources_list.sort(key=cmp_to_key(_compare_clean_resources))
_LOG.info('Going to clean AWS resources')
clean_resources(resources_list)
# remove output from bucket
remove_failed_deploy_output(bundle_name, deploy_name)
@exit_on_exception
def update_lambdas(bundle_name,
publish_only_lambdas,
excluded_lambdas_resources):
resources = resolve_meta(load_meta_resources(bundle_name))
_LOG.debug('Names were resolved')
_LOG.debug(prettify_json(resources))
# TODO make filter chain
resources = dict((k, v) for (k, v) in resources.items() if
v['resource_type'] == LAMBDA_TYPE)
if publish_only_lambdas:
resources = dict((k, v) for (k, v) in resources.items() if
k in publish_only_lambdas)
if excluded_lambdas_resources:
resources = dict((k, v) for (k, v) in resources.items() if
k not in excluded_lambdas_resources)
_LOG.debug('Going to update the following lambdas: {0}'.format(
prettify_json(resources)))
resources = list(resources.items())
update_resources(resources=resources)
def _json_serial(obj):
"""JSON serializer for objects not serializable by default json code"""
if isinstance(obj, (datetime, date)):
return obj.isoformat()
raise TypeError("Type %s not serializable" % type(obj))
def _apply_dynamic_changes(resources, output):
pool = ThreadPoolExecutor(max_workers=5)
futures = []
for name, meta in resources.items():
resource_type = meta['resource_type']
apply_changes = meta.get('apply_changes')
if apply_changes:
for apply_item in apply_changes:
change_type = apply_item['apply_type']
dependency_name = apply_item['dependency_name']
res_config = resources.get(dependency_name)
if not res_config:
_LOG.debug('Dependency resource {0} is not found, '
'skipping the apply'.format(dependency_name))
else:
dependency_type = res_config['resource_type']
func = RESOURCE_IDENTIFIER.get(resource_type)
if func:
resource_output = __find_output_by_resource_name(
output, name)
identifier = func(name, resource_output)
apply_func = APPLY_MAPPING.get(change_type)
if apply_func:
alias = '#{' + name + '}'
f = pool.submit(apply_func, alias, identifier,
apply_item)
futures.append(f)
else:
_LOG.warn('Dynamic apply is not defined '
'for {0} type'.format(change_type))
else:
_LOG.warn('Resource identifier is not defined '
'for {0} type'.format(dependency_type))
_LOG.info('Dynamic changes were applied to {0}'.format(name))
concurrent.futures.wait(futures, timeout=None, return_when=ALL_COMPLETED)
def _compare_deploy_resources(first, second):
first_resource_type = first[-1]['resource_type']
second_resource_type = second[-1]['resource_type']
first_res_priority = DEPLOY_RESOURCE_TYPE_PRIORITY[first_resource_type]
second_res_priority = DEPLOY_RESOURCE_TYPE_PRIORITY[second_resource_type]
return _compare_res(first_res_priority, second_res_priority)
def _compare_clean_resources(first, second):
first_resource_type = first[-1]['resource_meta']['resource_type']
second_resource_type = second[-1]['resource_meta']['resource_type']
first_res_priority = CLEAN_RESOURCE_TYPE_PRIORITY[first_resource_type]
second_res_priority = CLEAN_RESOURCE_TYPE_PRIORITY[second_resource_type]
return _compare_res(first_res_priority, second_res_priority)
def _compare_res(first_res_priority, second_res_priority):
if first_res_priority < second_res_priority:
return -1
elif first_res_priority > second_res_priority:
return 1
else:
return 0
|
import numpy as np
import codecs
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.datasets import make_classification
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import silhouette_samples, silhouette_score
from sklearn.cluster import KMeans
def load_SMS_dataset(path):
X, y = [], []
with codecs.open(path, "r", "utf-8") as fd:
for line in fd:
parts = line.split("\t")
X.append(parts[1].strip())
y.append(parts[0].strip())
return np.array(X), np.array(y)
def plot_silhouette(n_clusters, X):
# Kôd preuzet s http://scikit-learn.org/stable/auto_examples/cluster/plot_kmeans_silhouette_analysis.html
# Create a subplot with 1 row and 2 columns
fig, (ax1, ax2) = plt.subplots(1, 2)
fig.set_size_inches(18, 7)
# The 1st subplot is the silhouette plot
# The silhouette coefficient can range from -1, 1 but in this example all
# lie within [-0.1, 1]
ax1.set_xlim([-0.1, 1])
# The (n_clusters+1)*10 is for inserting blank space between silhouette
# plots of individual clusters, to demarcate them clearly.
ax1.set_ylim([0, len(X) + (n_clusters + 1) * 10])
# Initialize the clusterer with n_clusters value and a random generator
# seed of 10 for reproducibility.
clusterer = KMeans(n_clusters=n_clusters, random_state=10)
cluster_labels = clusterer.fit_predict(X)
# The silhouette_score gives the average value for all the samples.
# This gives a perspective into the density and separation of the formed
# clusters
silhouette_avg = silhouette_score(X, cluster_labels)
print("For n_clusters =", n_clusters,
"The average silhouette_score is :", silhouette_avg)
# Compute the silhouette scores for each sample
sample_silhouette_values = silhouette_samples(X, cluster_labels)
y_lower = 10
for i in range(n_clusters):
# Aggregate the silhouette scores for samples belonging to
# cluster i, and sort them
ith_cluster_silhouette_values = \
sample_silhouette_values[cluster_labels == i]
ith_cluster_silhouette_values.sort()
size_cluster_i = ith_cluster_silhouette_values.shape[0]
y_upper = y_lower + size_cluster_i
color = plt.cm.spectral(float(i) / n_clusters)
ax1.fill_betweenx(np.arange(y_lower, y_upper),
0, ith_cluster_silhouette_values,
facecolor=color, edgecolor=color, alpha=0.7)
# Label the silhouette plots with their cluster numbers at the middle
ax1.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i))
# Compute the new y_lower for next plot
y_lower = y_upper + 10 # 10 for the 0 samples
ax1.set_xlabel("Vrijednosti koeficijenta siluete")
ax1.set_ylabel("Oznaka grupe")
# The vertical line for average silhouette score of all the values
ax1.axvline(x=silhouette_avg, color="red", linestyle="--")
ax1.set_yticks([]) # Clear the yaxis labels / ticks
ax1.set_xticks([-0.1, 0, 0.2, 0.4, 0.6, 0.8, 1])
# 2nd Plot showing the actual clusters formed
colors = plt.cm.spectral(cluster_labels.astype(float) / n_clusters)
ax2.scatter(X[:, 0], X[:, 1], marker='.', s=30, lw=0, alpha=0.7,
c=colors, edgecolor='k')
# Labeling the clusters
centers = clusterer.cluster_centers_
# Draw white circles at cluster centers
ax2.scatter(centers[:, 0], centers[:, 1], marker='o',
c="white", alpha=1, s=200, edgecolor='k')
for i, c in enumerate(centers):
ax2.scatter(c[0], c[1], marker='$%d$' % i, alpha=1,
s=50, edgecolor='k')
ax2.set_xlabel(r"$x_1$")
ax2.set_ylabel(r"$x_2$")
plt.show()
def plot_2d_clf_problem(X, y, h=None):
'''
Plots a two-dimensional labeled dataset (X,y) and, if function h(x) is given,
the decision surfaces.
'''
assert X.shape[1] == 2, "Dataset is not two-dimensional"
if h!=None :
# Create a mesh to plot in
r = 0.02 # mesh resolution
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, r),
np.arange(y_min, y_max, r))
XX=np.c_[xx.ravel(), yy.ravel()]
try:
Z_test = h(XX)
if Z_test.shape == ():
# h returns a scalar when applied to a matrix; map explicitly
Z = np.array(list(map(h,XX)))
else :
Z = Z_test
except ValueError:
# can't apply to a matrix; map explicitly
Z = np.array(list(map(h,XX)))
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=plt.cm.Pastel1)
# Plot the dataset
plt.scatter(X[:,0],X[:,1], c=y, cmap=plt.cm.tab20b, marker='o', s=50);
def plot_2d_svc_problem(X, y, svc=None):
'''
Plots a two-dimensional labeled dataset (X,y) and, if SVC object is given,
the decision surfaces (with margin as well).
'''
assert X.shape[1] == 2, "Dataset is not two-dimensional"
if svc!=None :
# Create a mesh to plot in
r = 0.03 # mesh resolution
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, r),
np.arange(y_min, y_max, r))
XX=np.c_[xx.ravel(), yy.ravel()]
Z = np.array([svc_predict(svc, x) for x in XX])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=plt.cm.Pastel1)
# Plot the dataset
plt.scatter(X[:,0],X[:,1], c=y, cmap=plt.cm.Paired, marker='o', s=50)
#plt.show()
def svc_predict(svc, x) :
h = svc.decision_function([x])
if h >= -1 and h <= 1:
return 0.5
else:
return max(-1, min(1, h))
def plot_error_surface(err, c_range=(0,5), g_range=(0,5)):
c1, c2 = c_range[0], c_range[1]
g1, g2 = g_range[0], g_range[1]
plt.xticks(range(0,g2-g1+1,5),range(g1,g2,5)); plt.xlabel("gamma")
plt.yticks(range(0,c2-c1+1,5),range(c1,c2,5)); plt.ylabel("C")
p = plt.contour(err);
plt.imshow(1-err, interpolation='bilinear', origin='lower',cmap=plt.cm.gray)
plt.clabel(p, inline=1, fontsize=10)
#plt.show()
def knn_eval(n_instances=100, n_features=2, n_classes=2, n_informative=2,
test_size=0.3, k_range=(1, 20), n_trials=100):
train_errors = []
test_errors = []
ks = list(range(k_range[0], k_range[1] + 1))
for i in range(0, n_trials):
X, y = make_classification(n_instances, n_features, n_classes=n_classes,
n_informative=n_informative, n_redundant=0, n_clusters_per_class=1)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size)
train = []
test = []
for k in ks:
knn = KNeighborsClassifier(n_neighbors=k)
knn.fit(X_train, y_train)
train.append(1 - knn.score(X_train, y_train))
test.append(1 - knn.score(X_test, y_test))
train_errors.append(train)
test_errors.append(test)
train_errors = np.mean(np.array(train_errors), axis=0)
test_errors = np.mean(np.array(test_errors), axis=0)
best_k = ks[np.argmin(test_errors)]
return ks, best_k, train_errors, test_errors |
# Copyright 2018 D-Wave Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import logging
import warnings
import concurrent.futures
from operator import attrgetter
from functools import partial
from itertools import chain
import six
from hybrid.core import Runnable, State, States, stoppable
from hybrid.concurrency import Present, immediate_executor
from hybrid.exceptions import EndOfStream
from hybrid import traits
__all__ = [
'Branch', 'Branches', 'RacingBranches', 'Race', 'ParallelBranches', 'Parallel',
'Map', 'Reduce', 'Lambda', 'ArgMin', 'Unwind', 'TrackMin',
'Loop', 'LoopUntilNoImprovement', 'LoopWhileNoImprovement',
'Identity', 'InterruptableIdentity', 'Dup', 'Const', 'Wait'
]
logger = logging.getLogger(__name__)
class Branch(traits.NotValidated, Runnable):
"""Sequentially executed :class:`~hybrid.core.Runnable` components.
Args:
components (iterable of :class:`~hybrid.core.Runnable`):
Complete processing sequence to update a current set of samples,
such as: :code:`decomposer | sampler | composer`.
Input:
Defined by the first branch component.
Output:
Defined by the last branch component.
Examples:
This example runs one iteration of a branch comprising a decomposer,
local Tabu solver, and a composer. A 10-variable binary quadratic model
is decomposed by the energy impact of its variables into a 6-variable
subproblem to be sampled twice with a manually set initial state of
all -1 values.
>>> import dimod # Create a binary quadratic model
>>> bqm = dimod.BQM({t: 0 for t in range(10)},
... {(t, (t+1) % 10): 1 for t in range(10)},
... 0, 'SPIN')
>>> # Run one iteration on a branch
>>> branch = (EnergyImpactDecomposer(size=6, min_gain=-10) |
... TabuSubproblemSampler(num_reads=2) |
... SplatComposer())
>>> new_state = branch.next(State.from_sample(min_sample(bqm), bqm))
>>> print(new_state.subsamples) # doctest: +SKIP
4 5 6 7 8 9 energy num_occ.
0 +1 -1 -1 +1 -1 +1 -5.0 1
1 +1 -1 -1 +1 -1 +1 -5.0 1
[ 2 rows, 6 variables ]
"""
def __init__(self, components=(), **runopts):
super(Branch, self).__init__(**runopts)
self.components = tuple(components)
if not self.components:
raise ValueError("branch has to contain at least one component")
for component in self.components:
if not isinstance(component, Runnable):
raise TypeError("expected Runnable component, got {!r}".format(component))
def __or__(self, other):
"""Sequential composition of runnable components (L-to-R)
returns a new runnable Branch.
"""
if isinstance(other, Branch):
return Branch(components=chain(self, other))
elif isinstance(other, Runnable):
return Branch(components=chain(self, (other,)))
else:
raise TypeError("only Runnables can be composed into a Branch")
def __str__(self):
return " | ".join(map(str, self)) or "(empty branch)"
def __repr__(self):
return "{}(components={!r})".format(self.name, tuple(self))
def __iter__(self):
return iter(self.components)
def next(self, state, **runopts):
"""Start an iteration of an instantiated :class:`Branch`.
Accepts a state and returns a new state.
Args:
state (:class:`State`):
Computation state passed to the first component of the branch.
Examples:
This code snippet runs one iteration of a branch to produce a new state::
new_state = branch.next(core.State.from_sample(min_sample(bqm), bqm)
"""
runopts['executor'] = immediate_executor
for component in self.components:
state = component.run(state, **runopts)
return state.result()
def error(self, exc):
"""Pass on the exception from input to the error handler of the first
runnable in branch.
"""
return self.next(Present(exception=exc))
def halt(self):
"""Try terminating all components in an instantiated :class:`Branch`."""
for component in self.components:
component.stop()
class Branches(traits.NotValidated, Runnable):
"""Runs multiple workflows of type :class:`~hybrid.core.Runnable` in
parallel, blocking until all finish.
Branches operates similarly to :class:`~hybrid.flow.ParallelBranches`,
but each branch runs on a separate input :class:`~hybrid.core.State`
(while parallel branches all use the same input state).
Args:
*branches ([:class:`~hybrid.core.Runnable`]):
Runnable branches listed as positional arguments.
Input:
:class:`~hybrid.core.States`
Output:
:class:`~hybrid.core.States`
Note:
:class:`~hybrid.flow.Branches` is also available via implicit
parallelization binary operator `&`.
Examples:
This example runs two branches, a classical tabu search and a random
sampler, until both terminate::
Branches(TabuSubproblemSampler(), RandomSubproblemSampler())
Alternatively::
TabuSubproblemSampler() & RandomSubproblemSampler()
"""
def __init__(self, *branches, **runopts):
super(Branches, self).__init__(**runopts)
self.branches = tuple(branches)
if not self.branches:
raise ValueError("Branches require at least one branch")
for branch in self.branches:
if not isinstance(branch, Runnable):
raise TypeError("expected Runnable branch, got {!r}".format(branch))
def __and__(self, other):
"""Parallel composition of runnable components returns new Branches."""
if isinstance(other, Branches):
return Branches(*chain(self, other))
elif isinstance(other, Runnable):
return Branches(*chain(self, (other,)))
else:
raise TypeError("only Runnables can be composed into Branches")
def __str__(self):
return " & ".join("({})".format(b) for b in self) or "(zero branches)"
def __repr__(self):
return "{}{!r}".format(self.name, tuple(self))
def __iter__(self):
return iter(self.branches)
def next(self, states, **runopts):
futures = [
branch.run(state.updated(), **runopts)
for branch, state in zip(self.branches, states)]
logger.debug("{} running {} branches in parallel".format(
self.name, len(futures)))
# wait for all branches to finish
concurrent.futures.wait(
futures,
return_when=concurrent.futures.ALL_COMPLETED)
# collect resolved states (in original order, not completion order)
states = States()
for f in futures:
states.append(f.result())
return states
def halt(self):
for branch in self.branches:
branch.stop()
class RacingBranches(traits.NotValidated, Runnable):
"""Runs (races) multiple workflows of type :class:`~hybrid.core.Runnable`
in parallel, stopping all once the first finishes. Returns the results of
all, in the specified order.
Args:
*branches ([:class:`~hybrid.core.Runnable`]):
Comma-separated branches.
Note:
Each branch runnable is called with run option ``racing_context=True``,
so it can adapt its behaviour to the context.
Note:
`RacingBranches` is also available as `Race`.
Examples:
This example runs two branches: a classical tabu search interrupted by
samples of subproblems returned from a D-Wave system.
::
RacingBranches(
InterruptableTabuSampler(),
EnergyImpactDecomposer(size=2)
| QPUSubproblemAutoEmbeddingSampler()
| SplatComposer()
) | ArgMin()
"""
def __init__(self, *branches, **runopts):
self.branches = branches
super(RacingBranches, self).__init__(**runopts)
if not self.branches:
raise ValueError("racing branches requires at least one branch")
def __str__(self):
return " !! ".join("({})".format(b) for b in self) or "(zero racing branches)"
def __repr__(self):
return "{}{!r}".format(self.name, tuple(self))
def __iter__(self):
return iter(self.branches)
def next(self, state, **runopts):
runopts.update(racing_context=True)
futures = [branch.run(state.updated(), **runopts) for branch in self.branches]
# as soon as one is done, stop all others
done, _ = concurrent.futures.wait(
futures,
return_when=concurrent.futures.FIRST_COMPLETED)
logger.trace("RacingBranches done set: {}. Stopping remaining.".format(done))
self.stop()
# debug info
idx = futures.index(done.pop())
branch = self.branches[idx]
logger.debug("{name} won idx={idx} branch={branch!r}".format(
name=self.name, idx=idx, branch=branch))
# collect resolved states (in original order, not completion order!)
states = States()
for f in futures:
states.append(f.result())
return states
def halt(self):
"""Terminate an iteration of an instantiated :class:`RacingBranches`."""
for branch in self.branches:
branch.stop()
Race = RacingBranches
class Dup(traits.NotValidated, Runnable):
"""Duplicates input :class:`~hybrid.core.State`, n times, into output
:class:`~hybrid.core.States`.
"""
def __init__(self, n, *args, **kwargs):
super(Dup, self).__init__(*args, **kwargs)
self.n = n
def __repr__(self):
return "{}(n={!r})".format(self.name, self.n)
def next(self, state, **runopts):
logger.debug("{} cloning input state {} time(s)".format(self.name, self.n))
return States(*[state.updated() for _ in range(self.n)])
class ParallelBranches(traits.NotValidated, Runnable):
"""Runs multiple workflows of type :class:`~hybrid.core.Runnable` in
parallel, blocking until all finish.
Parallel/ParallelBranches operates similarly to :class:`~hybrid.flow.Branches`,
but every branch re-uses the same input :class:`~hybrid.core.State`.
Args:
*branches ([:class:`~hybrid.core.Runnable`]):
Comma-separated branches.
Input:
:class:`~hybrid.core.State`
Output:
:class:`~hybrid.core.States`
Note:
`Parallel` is implemented as::
Parallel(*branches) := Dup(len(branches)) | Branches(*branches)
Note:
`ParallelBranches` is also available as `Parallel`.
Examples:
This example runs two branches, a classical tabu search and a random
sampler, until both terminate::
Parallel(
TabuSubproblemSampler(),
RandomSubproblemSampler()
) | ArgMin()
"""
def __init__(self, *branches, **runopts):
super(ParallelBranches, self).__init__(**runopts)
self.branches = Branches(*branches)
self.runnable = Dup(len(tuple(self.branches))) | self.branches
def __repr__(self):
return "{}{!r}".format(self.name, tuple(self.branches))
def __iter__(self):
return iter(self.branches)
def next(self, state, **runopts):
runopts['executor'] = immediate_executor
return self.runnable.run(state, **runopts).result()
def halt(self):
return self.runnable.stop()
Parallel = ParallelBranches
class Map(traits.NotValidated, Runnable):
"""Runs a specified :class:`~hybrid.core.Runnable` in parallel on all input
states.
Args:
runnable (:class:`~hybrid.core.Runnable`):
A runnable executed for every input state.
Examples:
This example runs `TabuProblemSampler` on two input states in parallel,
returning when both are done.
>>> states = States(State(problem=bqm1), State(problem=bqm2)) # doctest: +SKIP
>>> Map(TabuProblemSampler()).run(states).result() # doctest: +SKIP
[<state_1_with_solution>, <state_2_with_solution>]
"""
def __init__(self, runnable, **runopts):
if not isinstance(runnable, Runnable):
raise TypeError("'runnable' is not instance of Runnable")
super(Map, self).__init__(**runopts)
self.runnable = runnable
# track running computations, so we can stop them on request
self._futures = []
def __str__(self):
return "[]()"
def __repr__(self):
return "{}(runnable={!r})".format(self.name, self.runnable)
def __iter__(self):
return iter((self.runnable,))
def next(self, states, **runopts):
self._futures = [self.runnable.run(state, **runopts) for state in states]
logger.debug("{} running {!r} on {} input states".format(
self.name, self.runnable, len(states)))
concurrent.futures.wait(self._futures,
return_when=concurrent.futures.ALL_COMPLETED)
return States(*(f.result() for f in self._futures))
def halt(self):
for future in self._futures:
future.cancel()
class Reduce(traits.NotValidated, Runnable):
"""Fold-left using the specified :class:`~hybrid.core.Runnable` on a
sequence of input states, producing a single output state.
Args:
runnable (:class:`~hybrid.core.Runnable`):
A runnable used as the fold-left operator. It should accept a
2-State input and produce a single State on output.
initial_state (:class:`~hybrid.core.State`, optional, default=None):
Optional starting state into which input states will be folded in.
If undefined, the first input state is used as the `initial_state`.
"""
def __init__(self, runnable, initial_state=None, **runopts):
if not isinstance(runnable, Runnable):
raise TypeError("'runnable' is not instance of Runnable")
if initial_state is not None and not isinstance(initial_state, State):
raise TypeError("'initial_state' is not instance of State")
super(Reduce, self).__init__(**runopts)
self.runnable = runnable
self.initial_state = initial_state
def __str__(self):
return "Reduce {}".format(self.runnable)
def __repr__(self):
return ("{self.name}(runnable={self.runnable!r}, "
"initial_state={self.initial_state!r}").format(self=self)
def __iter__(self):
return iter((self.runnable,))
def next(self, states, **runopts):
"""Collapse all `states` to a single output state using the `self.runnable`."""
logger.debug("{} collapsing {} input states with {!r}".format(
self.name, len(states), self.runnable))
states = iter(states)
if self.initial_state is None:
result = next(states)
else:
result = self.initial_state
runopts['executor'] = immediate_executor
for state in states:
result = self.runnable.run(States(result, state), **runopts).result()
return result
class Lambda(traits.NotValidated, Runnable):
"""Creates a runnable on fly, given just its `next` function (optionally
`init` and `error` functions can be specified too).
Args:
next (callable):
Implementation of runnable's `next` method, provided as a callable
(usually a lambda expression for simple operations). Signature of
the callable has to match the signature of
:meth:`~hybrid.core.Runnable.next()`; i.e., it accepts two
arguments: runnable instance and state instance.
error (callable):
Implementation of runnable's `error` method.
See :meth:`~hybrid.core.Runnable.error`.
init (callable):
Implementation of runnable's `init` method.
See :meth:`~hybrid.core.Runnable.init`.
Note:
Traits are not enforced, apart from the SISO requirement. Also, note
`Lambda` runnables can only implement SISO systems.
Examples:
This example creates and runs a simple runnable that multiplies state
variables `a` and `b`, storing them in `c`.
>>> Lambda(lambda _, s: s.updated(c=s.a * s.b)).run(State(a=2, b=3)).result() # doctest: +SKIP
{'a': 2, 'b': 3, 'c': 6}
This example applies `x += 1` to a sequence of input states.
>>> Map(Lambda(lambda _, s: s.updated(x=s.x + 1))).run(States(State(x=0), State(x=1))).result()
[{'x': 1}, {'x': 2}]
"""
def __init__(self, next, error=None, init=None, **runopts):
if not callable(next):
raise TypeError("'next' is not callable")
if error is not None and not callable(error):
raise TypeError("'error' is not callable")
if init is not None and not callable(init):
raise TypeError("'init' is not callable")
super(Lambda, self).__init__(**runopts)
# bind to self
self.next = partial(next, self, **runopts)
if error is not None:
self.error = partial(error, self)
if init is not None:
self.init = partial(init, self, **runopts)
# keep a copy for inspection (without cycles to `self`)
self._next = next
self._error = error
self._init = init
def __repr__(self):
return "{}(next={!r}, error={!r}, init={!r})".format(
self.name, self._next, self._error, self._init)
class ArgMin(traits.NotValidated, Runnable):
"""Selects the best state from a sequence of :class:`~hybrid.core.States`.
Args:
key (callable/str):
Best state is judged according to a metric defined with a `key`.
The `key` can be a `callable` with a signature::
key :: (State s, Ord k) => s -> k
or a string holding a key name/path to be extracted from the input
state with `operator.attrgetter` method.
By default, `key == operator.attrgetter('samples.first.energy')`,
thus favoring states containing a sample with the minimal energy.
Examples:
This example runs two branches---a classical tabu search interrupted by
samples of subproblems returned from a D-Wave system--- and selects the
state with the minimum-energy sample::
RacingBranches(
InterruptableTabuSampler(),
EnergyImpactDecomposer(size=2)
| QPUSubproblemAutoEmbeddingSampler()
| SplatComposer()
) | ArgMin()
"""
def __init__(self, key=None, **runopts):
"""Return the state which minimizes the objective function `key`."""
super(ArgMin, self).__init__(**runopts)
if key is None:
key = 'samples.first.energy'
if isinstance(key, six.string_types):
key = attrgetter(key)
self.key = key
def __str__(self):
return "[]>"
def __repr__(self):
return "{}(key={!r})".format(self.name, self.key)
def next(self, states, **runopts):
"""Execute one blocking iteration of an instantiated :class:`ArgMin`."""
# expand `return min(states, key=self.key)` for logging/tracking
values = [self.key(state) for state in states]
min_idx = values.index(min(values))
# debug info
for idx, val in enumerate(values):
logger.debug("{name} State(idx={idx}, val={val})".format(
name=self.name, idx=idx, val=val))
logger.debug("{name} min_idx={min_idx}".format(
name=self.name, min_idx=min_idx))
self.count('branch-%d' % min_idx)
return states[min_idx]
class TrackMin(traits.NotValidated, Runnable):
"""Tracks and records the best :class:`~hybrid.core.State` according to a
metric defined with a `key` function; typically this is the minimal state.
Args:
key (callable/str, optional, default=None):
Best state is judged according to a metric defined with a `key`.
`key` can be a `callable` with a signature::
key :: (State s, Ord k) => s -> k
or a string holding a key name/path to be extracted from the input
state with `operator.attrgetter` method.
By default, `key == operator.attrgetter('samples.first.energy')`,
thus favoring states containing a sample with the minimal energy.
output (bool, optional, default=False):
Update the output state's `output_key` with the `input_key` of the
best state seen so far.
input_key (str, optional, default='samples')
If `output=True`, then this defines the variable/key name in the
input state that shall be included in the output state.
output_key (str, optional, default='best_samples')
If `output=True`, then the key under which the `input_key` from the
best state seen so far is stored in the output state.
"""
def __init__(self, key=None, output=False, input_key='samples',
output_key='best_samples', **runopts):
super(TrackMin, self).__init__(**runopts)
if key is None:
key = 'samples.first.energy'
if isinstance(key, six.string_types):
key = attrgetter(key)
self.key = key
self.output = output
self.output_key = output_key
self.input_key = input_key
def __repr__(self):
return (
"{self.name}(key={self.key!r}, output={self.output!r}, "
"input_key={self.input_key!r}, output_key={self.output_key!r})"
).format(self=self)
def _set_new_best(self, state):
self.best = state
logger.debug("{} selected state with key={!r} for the new best state".format(
self.name, self.key(self.best)))
logger.trace("{} selected {!r} for the new best state".format(
self.name, self.best))
def init(self, state, **runopts):
self._set_new_best(state)
def next(self, state, **runopts):
if self.key(state) < self.key(self.best):
self._set_new_best(state)
self.count('new-best')
if self.output:
return state.updated(**{self.output_key: self.best[self.input_key]})
return state
@stoppable
class LoopUntilNoImprovement(traits.NotValidated, Runnable):
"""Iterates :class:`~hybrid.core.Runnable` for up to `max_iter` times, or
until a state quality metric, defined by the `key` function, shows no
improvement for at least `convergence` number of iterations. Alternatively,
maximum allowed runtime can be defined with `max_time`, or a custom
termination Boolean function can be given with `terminate` (a predicate
on `key`).
Args:
runnable (:class:`~hybrid.core.Runnable`):
A runnable that's looped over.
max_iter (int/None, optional, default=None):
Maximum number of times the `runnable` is run, regardless of other
termination criteria. This is the upper bound. By default, an upper
bound on the number of iterations is not set.
convergence (int/None, optional, default=None):
Terminates upon reaching this number of iterations with unchanged
output. By default, convergence is not checked, so the only
termination criteria is defined with `max_iter`. Setting neither
creates an infinite loop.
max_time (float/None, optional, default=None):
Wall clock runtime termination criterion. Unlimited by default.
key (callable/str):
Best state is judged according to a metric defined with a `key`.
`key` can be a `callable` with a signature::
key :: (State s, Ord k) => s -> k
or a string holding a key name/path to be extracted from the input
state with `operator.attrgetter` method.
By default, `key == operator.attrgetter('samples.first.energy')`,
thus favoring states containing a sample with the minimal energy.
terminate (callable, optional, default=None):
Loop termination Boolean function (a predicate on `key` value)::
terminate :: (Ord k) => k -> Bool
"""
def __init__(self, runnable, max_iter=None, convergence=None,
max_time=None, key=None, terminate=None, **runopts):
super(LoopUntilNoImprovement, self).__init__(**runopts)
self.runnable = runnable
self.max_iter = max_iter
self.max_time = max_time
self.convergence = convergence
if key is None:
key = 'samples.first.energy'
if isinstance(key, six.string_types):
key = attrgetter(key)
self.key = key
if terminate is not None and not callable(terminate):
raise TypeError("expecting a predicate on 'key' for 'terminate'")
self.terminate = terminate
def __str__(self):
return "Loop over {}".format(self.runnable)
def __repr__(self):
return ("{self.name}(runnable={self.runnable!r}, max_iter={self.max_iter!r}, "
"convergence={self.convergence!r}, max_time={self.max_time!r}, "
"key={self.key!r}, terminate={self.terminate!r})").format(self=self)
def __iter__(self):
return iter((self.runnable,))
def iteration_update(self, iterno, cnt, inp, out):
"""Implement "converge on unchanging output" behavior:
- loop `max_iter` times, but bail-out earlier if output doesn't change
(over input) for `convergence` number of iterations
- each iteration starts with the previous result state
Input: relevant counters and I/O states.
Output: next input state and next counter values
"""
input_state, input_key = inp
output_state, output_key = out
if self.convergence is None:
return iterno + 1, cnt, output_state
if output_key == input_key:
cnt -= 1
else:
cnt = self.convergence
return iterno + 1, cnt, output_state
def next(self, state, **runopts):
iterno = 0
cnt = self.convergence or 0
input_state = state
output_state = input_state
input_key = None
output_key = None
start = time.time()
runopts['executor'] = immediate_executor
while not self.stop_signal.is_set():
output_state = self.runnable.run(input_state, **runopts).result()
if self.convergence or self.terminate:
input_key = self.key(input_state)
output_key = self.key(output_state)
logger.info("{name} Iteration(iterno={iterno}, "
"input_state_key={inp}, output_state_key={out})".format(
name=self.name, iterno=iterno,
inp=input_key, out=output_key))
iterno, cnt, input_state = self.iteration_update(
iterno, cnt, (input_state, input_key), (output_state, output_key))
runtime = time.time() - start
if self.max_iter is not None and iterno >= self.max_iter:
break
if self.max_time is not None and runtime >= self.max_time:
break
if self.convergence is not None and cnt <= 0:
break
if self.terminate is not None and self.terminate(output_key):
break
return output_state
def halt(self):
self.runnable.stop()
class Loop(LoopUntilNoImprovement):
"""Alias for :class:`LoopUntilNoImprovement`."""
class SimpleIterator(LoopUntilNoImprovement):
"""Deprecated loop runnable. Use `Loop`/`LoopUntilNoImprovement` instead."""
def __init__(self, *args, **kwargs):
super(SimpleIterator, self).__init__(*args, **kwargs)
warnings.warn("SimpleIterator is deprecated, please use Loop instead.",
DeprecationWarning)
class LoopWhileNoImprovement(LoopUntilNoImprovement):
"""Iterates :class:`~hybrid.core.Runnable` until a state quality metric,
defined by the `key` function, shows no improvement for at least `max_tries`
number of iterations or until `max_iter` number of iterations is exceeded.
Alternatively, maximum allowed runtime can be defined with `max_time`, or a
custom termination Boolean function can be given with `terminate` (a
predicate on `key`).
Note:
Unlike `LoopUntilNoImprovement`/`Loop`, `LoopWhileNoImprovement` will
run the loop body runnable with the **same input** if output shows no
improvement (up to `max_tries` times), and it will use the new output
if it's better than the input.
Args:
runnable (:class:`~hybrid.core.Runnable`):
A runnable that's looped over.
max_iter (int/None, optional, default=None):
Maximum number of times the `runnable` is run, regardless of other
termination criteria. This is the upper bound. By default, an upper
bound on the number of iterations is not set.
max_tries (int, optional, default=None):
Maximum number of times the `runnable` is run for the **same** input
state. On each improvement, the better state is used for the next
input state, and the try/trial counter is reset. Defaults to an
infinite loop (unbounded number of tries).
max_time (float/None, optional, default=None):
Wall clock runtime termination criterion. Unlimited by default.
key (callable/str):
Best state is judged according to a metric defined with a `key`.
`key` can be a `callable` with a signature::
key :: (State s, Ord k) => s -> k
or a string holding a key name/path to be extracted from the input
state with `operator.attrgetter` method.
By default, `key == operator.attrgetter('samples.first.energy')`,
thus favoring states containing a sample with the minimal energy.
terminate (callable, optional, default=None):
Loop termination Boolean function (a predicate on `key` value)::
terminate :: (Ord k) => k -> Bool
"""
def __init__(self, runnable, max_iter=None, max_tries=None,
max_time=None, key=None, terminate=None, **runopts):
super(LoopWhileNoImprovement, self).__init__(
runnable=runnable, max_iter=max_iter, convergence=max_tries,
max_time=max_time, key=key, terminate=terminate, **runopts)
def iteration_update(self, iterno, cnt, inp, out):
"""Implement "no-improvement count-down" behavior:
- loop indefinitely, but bail-out if there's no improvement of output
over input for `max_tries` number of iterations
- each iteration uses the same input state, unless there was an improvement
in this iteration, in which case, use the current output as next input
Input: relevant counters and I/O states.
Output: next input state and next counter values
"""
input_state, input_key = inp
output_state, output_key = out
if self.convergence is None:
return iterno + 1, cnt, output_state
if output_key >= input_key:
# no improvement, re-use the same input
cnt -= 1
next_input_state = input_state
else:
# improvement, use the better output for next input, restart local counter
cnt = self.convergence
next_input_state = output_state
return iterno + 1, cnt, next_input_state
class Unwind(traits.NotValidated, Runnable):
"""Iterates :class:`~hybrid.core.Runnable` until :exc:`.EndOfStream` is
raised, collecting all output states along the way.
Note:
the child runnable is called with run option ``silent_rewind=False``,
and it is expected to raise :exc:`.EndOfStream` on unwind completion.
"""
def __init__(self, runnable, **runopts):
if not isinstance(runnable, Runnable):
raise TypeError("'runnable' is not instance of Runnable")
super(Unwind, self).__init__(**runopts)
self.runnable = runnable
def __str__(self):
return "Unwind {}".format(self.runnable)
def __repr__(self):
return ("{self.name}(runnable={self.runnable!r}").format(self=self)
def __iter__(self):
return iter((self.runnable,))
def next(self, state, **runopts):
output = States()
runopts.update(executor=immediate_executor, silent_rewind=False)
logger.debug("{} unwinding {!r}".format(self.name, self.runnable))
while True:
try:
state = self.runnable.run(state, **runopts).result()
output.append(state)
except EndOfStream:
break
logger.debug("{} collected {} states".format(self.name, len(output)))
return output
@stoppable
class Wait(traits.NotValidated, Runnable):
"""Run indefinitely (effectively blocking branch execution). Has to be
explicitly stopped.
Example:
To effectively exclude one branch from the race, i.e. prevent premature
stopping of the race between the remaining branches, use :class:`.Wait`
as the last element in a (fast-executing) racing branch::
Race(
Identity() | Wait(),
InterruptableTabuSampler(),
SimulatedAnnealingProblemSampler()
)
This is functionally identical to::
Parallel(
Identity(),
Race(
InterruptableTabuSampler(),
SimulatedAnnealingProblemSampler()
)
)
"""
def next(self, state, **runopts):
self.stop_signal.wait()
return state.updated()
class Identity(traits.NotValidated, Runnable):
"""Trivial identity runnable. The output is a direct copy of the input."""
def next(self, state, **runopts):
return state.updated()
def InterruptableIdentity(**runopts):
"""Trivial interruptable identity runnable. The output is a direct copy of
the input, with a distinction from :class:`.Identity` that it will halt
until explicitly stopped (useful for example in :class:`.RacingBranches`
to prevent short-circuiting of racing branches with the identity branch).
"""
return Identity(**runopts) | Wait(**runopts)
class Const(traits.NotValidated, Runnable):
"""Set state variables to constant values.
Args:
**consts (dict, optional):
Mapping of state variables to constant values, as keyword arguments.
Example:
This example defines a workflow that resets the set of samples before a
Tabu sampler call in order to avoid using existing samples as initial
states. Instead, Tabu will use randomly generated initial states::
random_tabu = Const(samples=None) | TabuProblemSampler(initial_states_generator='random')
"""
def __init__(self, **consts):
super(Const, self).__init__()
self.consts = consts
def next(self, state, **runopts):
return state.updated(**self.consts)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from .whoosh_schema import GPS
data = [
GSP(x=0, y=0, z=0, map=0),
]
data = [bm.to_dict() for bm in data]
|
"""
Contains the definition of Compound.
"""
from xdtools.artwork import Artwork
from xdtools.utils import Point
class Compound(Artwork):
"""
A compound shape.
=== Attributes ===
uid - the unique id of this Compound shape.
name - the name of this Compound shape as it appears in the Layers panel.
position - the position of this Compound shape.
path - the path of this Compound shape.
children - the children contained in this Compound shape.
operation - the operation performed on the paths of this Compound shape.
=== Operations ===
"""
def __init__(self, uid: int, path: str, operation: str, children=None,
name='Compound', x=0, y=0) -> None:
"""Instantiate a new Compound."""
super().__init__(uid, 'compound', name)
self.path = path
self.operation = operation
self.children = [] if children is None else children
self.position = Point(x, y)
def __repr__(self) -> str:
"""Return a constructor-style representation of this Compound."""
return str.format(
"Compound(uid={}, type={}, path={}, operation={}, " +
"children={}, name={}, position={}, styles={})",
repr(self.uid), repr(self.type), repr(self.path), repr(self.operation),
repr(self.children), repr(self.name),repr(self.position), repr(self.styles))
|
# eventpy library
# Copyright (C) 2020 Wang Qi (wqking)
# Github: https://github.com/wqking/eventpy
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class LockGuard :
def __init__(self, lock) :
self._lock = lock
def __enter__(self) :
self.lock()
return self
def __exit__(self, type, value, traceBack) :
self.unlock()
def lock(self) :
self._lock.acquire()
def unlock(self) :
self._lock.release()
|
# coding: utf-8
from django.template import RequestContext, TemplateSyntaxError
from django.test import RequestFactory, SimpleTestCase, override_settings
from django.urls import NoReverseMatch, resolve
from ..utils import setup
@override_settings(ROOT_URLCONF='template_tests.urls')
class UrlTagTests(SimpleTestCase):
# Successes
@setup({'url01': '{% url "client" client.id %}'})
def test_url01(self):
output = self.engine.render_to_string('url01', {'client': {'id': 1}})
self.assertEqual(output, '/client/1/')
@setup({'url02': '{% url "client_action" id=client.id action="update" %}'})
def test_url02(self):
output = self.engine.render_to_string('url02', {'client': {'id': 1}})
self.assertEqual(output, '/client/1/update/')
@setup({'url02a': '{% url "client_action" client.id "update" %}'})
def test_url02a(self):
output = self.engine.render_to_string('url02a', {'client': {'id': 1}})
self.assertEqual(output, '/client/1/update/')
@setup({'url02b': "{% url 'client_action' id=client.id action='update' %}"})
def test_url02b(self):
output = self.engine.render_to_string('url02b', {'client': {'id': 1}})
self.assertEqual(output, '/client/1/update/')
@setup({'url02c': "{% url 'client_action' client.id 'update' %}"})
def test_url02c(self):
output = self.engine.render_to_string('url02c', {'client': {'id': 1}})
self.assertEqual(output, '/client/1/update/')
@setup({'url03': '{% url "index" %}'})
def test_url03(self):
output = self.engine.render_to_string('url03')
self.assertEqual(output, '/')
@setup({'url04': '{% url "named.client" client.id %}'})
def test_url04(self):
output = self.engine.render_to_string('url04', {'client': {'id': 1}})
self.assertEqual(output, '/named-client/1/')
@setup({'url05': '{% url "метка_оператора" v %}'})
def test_url05(self):
output = self.engine.render_to_string('url05', {'v': 'Ω'})
self.assertEqual(output, '/%D0%AE%D0%BD%D0%B8%D0%BA%D0%BE%D0%B4/%CE%A9/')
@setup({'url06': '{% url "метка_оператора_2" tag=v %}'})
def test_url06(self):
output = self.engine.render_to_string('url06', {'v': 'Ω'})
self.assertEqual(output, '/%D0%AE%D0%BD%D0%B8%D0%BA%D0%BE%D0%B4/%CE%A9/')
@setup({'url08': '{% url "метка_оператора" v %}'})
def test_url08(self):
output = self.engine.render_to_string('url08', {'v': 'Ω'})
self.assertEqual(output, '/%D0%AE%D0%BD%D0%B8%D0%BA%D0%BE%D0%B4/%CE%A9/')
@setup({'url09': '{% url "метка_оператора_2" tag=v %}'})
def test_url09(self):
output = self.engine.render_to_string('url09', {'v': 'Ω'})
self.assertEqual(output, '/%D0%AE%D0%BD%D0%B8%D0%BA%D0%BE%D0%B4/%CE%A9/')
@setup({'url10': '{% url "client_action" id=client.id action="two words" %}'})
def test_url10(self):
output = self.engine.render_to_string('url10', {'client': {'id': 1}})
self.assertEqual(output, '/client/1/two%20words/')
@setup({'url11': '{% url "client_action" id=client.id action="==" %}'})
def test_url11(self):
output = self.engine.render_to_string('url11', {'client': {'id': 1}})
self.assertEqual(output, '/client/1/==/')
@setup({'url12': '{% url "client_action" id=client.id action="!$&\'()*+,;=~:@," %}'})
def test_url12(self):
output = self.engine.render_to_string('url12', {'client': {'id': 1}})
self.assertEqual(output, '/client/1/!$&'()*+,;=~:@,/')
@setup({'url13': '{% url "client_action" id=client.id action=arg|join:"-" %}'})
def test_url13(self):
output = self.engine.render_to_string('url13', {'client': {'id': 1}, 'arg': ['a', 'b']})
self.assertEqual(output, '/client/1/a-b/')
@setup({'url14': '{% url "client_action" client.id arg|join:"-" %}'})
def test_url14(self):
output = self.engine.render_to_string('url14', {'client': {'id': 1}, 'arg': ['a', 'b']})
self.assertEqual(output, '/client/1/a-b/')
@setup({'url15': '{% url "client_action" 12 "test" %}'})
def test_url15(self):
output = self.engine.render_to_string('url15')
self.assertEqual(output, '/client/12/test/')
@setup({'url18': '{% url "client" "1,2" %}'})
def test_url18(self):
output = self.engine.render_to_string('url18')
self.assertEqual(output, '/client/1,2/')
@setup({'url19': '{% url named_url client.id %}'})
def test_url19(self):
output = self.engine.render_to_string(
'url19', {'client': {'id': 1}, 'named_url': 'client'}
)
self.assertEqual(output, '/client/1/')
@setup({'url20': '{% url url_name_in_var client.id %}'})
def test_url20(self):
output = self.engine.render_to_string('url20', {'client': {'id': 1}, 'url_name_in_var': 'named.client'})
self.assertEqual(output, '/named-client/1/')
@setup({'url21': '{% autoescape off %}'
'{% url "client_action" id=client.id action="!$&\'()*+,;=~:@," %}'
'{% endautoescape %}'})
def test_url21(self):
output = self.engine.render_to_string('url21', {'client': {'id': 1}})
self.assertEqual(output, '/client/1/!$&\'()*+,;=~:@,/')
# Failures
@setup({'url-fail01': '{% url %}'})
def test_url_fail01(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('url-fail01')
@setup({'url-fail02': '{% url "no_such_view" %}'})
def test_url_fail02(self):
with self.assertRaises(NoReverseMatch):
self.engine.render_to_string('url-fail02')
@setup({'url-fail03': '{% url "client" %}'})
def test_url_fail03(self):
with self.assertRaises(NoReverseMatch):
self.engine.render_to_string('url-fail03')
@setup({'url-fail04': '{% url "view" id, %}'})
def test_url_fail04(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('url-fail04')
@setup({'url-fail05': '{% url "view" id= %}'})
def test_url_fail05(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('url-fail05')
@setup({'url-fail06': '{% url "view" a.id=id %}'})
def test_url_fail06(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('url-fail06')
@setup({'url-fail07': '{% url "view" a.id!id %}'})
def test_url_fail07(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('url-fail07')
@setup({'url-fail08': '{% url "view" id="unterminatedstring %}'})
def test_url_fail08(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('url-fail08')
@setup({'url-fail09': '{% url "view" id=", %}'})
def test_url_fail09(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('url-fail09')
@setup({'url-fail11': '{% url named_url %}'})
def test_url_fail11(self):
with self.assertRaises(NoReverseMatch):
self.engine.render_to_string('url-fail11')
@setup({'url-fail12': '{% url named_url %}'})
def test_url_fail12(self):
with self.assertRaises(NoReverseMatch):
self.engine.render_to_string('url-fail12', {'named_url': 'no_such_view'})
@setup({'url-fail13': '{% url named_url %}'})
def test_url_fail13(self):
with self.assertRaises(NoReverseMatch):
self.engine.render_to_string('url-fail13', {'named_url': 'template_tests.views.client'})
@setup({'url-fail14': '{% url named_url id, %}'})
def test_url_fail14(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.render_to_string('url-fail14', {'named_url': 'view'})
@setup({'url-fail15': '{% url named_url id= %}'})
def test_url_fail15(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.render_to_string('url-fail15', {'named_url': 'view'})
@setup({'url-fail16': '{% url named_url a.id=id %}'})
def test_url_fail16(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.render_to_string('url-fail16', {'named_url': 'view'})
@setup({'url-fail17': '{% url named_url a.id!id %}'})
def test_url_fail17(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.render_to_string('url-fail17', {'named_url': 'view'})
@setup({'url-fail18': '{% url named_url id="unterminatedstring %}'})
def test_url_fail18(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.render_to_string('url-fail18', {'named_url': 'view'})
@setup({'url-fail19': '{% url named_url id=", %}'})
def test_url_fail19(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.render_to_string('url-fail19', {'named_url': 'view'})
# {% url ... as var %}
@setup({'url-asvar01': '{% url "index" as url %}'})
def test_url_asvar01(self):
output = self.engine.render_to_string('url-asvar01')
self.assertEqual(output, '')
@setup({'url-asvar02': '{% url "index" as url %}{{ url }}'})
def test_url_asvar02(self):
output = self.engine.render_to_string('url-asvar02')
self.assertEqual(output, '/')
@setup({'url-asvar03': '{% url "no_such_view" as url %}{{ url }}'})
def test_url_asvar03(self):
output = self.engine.render_to_string('url-asvar03')
self.assertEqual(output, '')
@setup({'url-namespace01': '{% url "app:named.client" 42 %}'})
def test_url_namespace01(self):
request = RequestFactory().get('/')
request.resolver_match = resolve('/ns1/')
template = self.engine.get_template('url-namespace01')
context = RequestContext(request)
output = template.render(context)
self.assertEqual(output, '/ns1/named-client/42/')
@setup({'url-namespace02': '{% url "app:named.client" 42 %}'})
def test_url_namespace02(self):
request = RequestFactory().get('/')
request.resolver_match = resolve('/ns2/')
template = self.engine.get_template('url-namespace02')
context = RequestContext(request)
output = template.render(context)
self.assertEqual(output, '/ns2/named-client/42/')
@setup({'url-namespace03': '{% url "app:named.client" 42 %}'})
def test_url_namespace03(self):
request = RequestFactory().get('/')
template = self.engine.get_template('url-namespace03')
context = RequestContext(request)
output = template.render(context)
self.assertEqual(output, '/ns2/named-client/42/')
@setup({'url-namespace-no-current-app': '{% url "app:named.client" 42 %}'})
def test_url_namespace_no_current_app(self):
request = RequestFactory().get('/')
request.resolver_match = resolve('/ns1/')
request.current_app = None
template = self.engine.get_template('url-namespace-no-current-app')
context = RequestContext(request)
output = template.render(context)
self.assertEqual(output, '/ns2/named-client/42/')
@setup({'url-namespace-explicit-current-app': '{% url "app:named.client" 42 %}'})
def test_url_namespace_explicit_current_app(self):
request = RequestFactory().get('/')
request.resolver_match = resolve('/ns1/')
request.current_app = 'app'
template = self.engine.get_template('url-namespace-explicit-current-app')
context = RequestContext(request)
output = template.render(context)
self.assertEqual(output, '/ns2/named-client/42/')
|
# A linked list is given such that each node contains an additional random pointer which could point to any node in the list or null.
# Return a deep copy of the list.
# Definition for singly-linked list with a random pointer.
class RandomListNode:
def __init__(self, x):
self.label = x
self.next = None
self.random = None
class Solution:
# @param head, a RandomListNode
# @return a RandomListNode
def copyRandomList(self, head):
if not head:
return
cur = head
newCur = newHead = RandomListNode(cur.label)
while cur:
newCur.random = cur.random
cur.random = newCur
cur = cur.next
if cur:
newCur.next = RandomListNode(cur.label)
newCur = newCur.next
newCur = newHead
dupCur = dupHead = RandomListNode(head.label)
while newCur:
if newCur.random:
dupCur.random = newCur.random
newCur.random = newCur.random.random
dupCur.next = RandomListNode(0)
newCur, dupCur = newCur.next, dupCur.next
cur, dupCur = head, dupHead
while cur:
cur.random = dupCur.random
cur, dupCur = cur.next, dupCur.next
return newHead |
from setuptools import setup, find_packages, findall
with open('README.md', 'r', encoding='utf-8') as f:
long_description = f.read()
packages = find_packages(include=('nonebot', 'nonebot.*'))
stub_files = list(filter(lambda x: x.endswith('.pyi'), findall('nonebot')))
setup(
name='nonebot',
version='1.2.3',
url='https://github.com/richardchien/nonebot',
license='MIT License',
author='Richard Chien',
author_email='richardchienthebest@gmail.com',
description='An asynchronous QQ bot framework based on CoolQ.',
long_description=long_description,
long_description_content_type='text/markdown',
packages=packages,
data_files=stub_files,
install_requires=['aiocqhttp>=0.6.7', 'aiocache>=0.10', 'apscheduler', 'sqlalchemy', 'baidu-aip', 'jieba', 'requests',
'mysql-connector'],
extras_require={
'scheduler': ['apscheduler>=1.2'],
},
python_requires='>=3.6.1',
platforms='any',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Framework :: Robot Framework',
'Framework :: Robot Framework :: Library',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
)
|
#!/usr/bin/python
from UcsSdk import *
# This script shows how to create and use the filter in UCS Manager method "ConfigResolveClass".
if __name__ == "__main__":
try:
handle = UcsHandle()
handle.Login("0.0.0.0", "username", "password")
inFilter = FilterFilter()
andFilter0 = AndFilter()
andFilter1 = AndFilter()
eqFilter = EqFilter()
eqFilter.Class = "lsServer"
eqFilter.Property = "name"
eqFilter.Value = "sp_name"
andFilter1.AddChild(eqFilter)
eqFilter = EqFilter()
eqFilter.Class = "lsServer"
eqFilter.Property = "type"
eqFilter.Value = "instance"
andFilter1.AddChild(eqFilter)
wcardFilter = WcardFilter()
wcardFilter.Class = "lsServer"
wcardFilter.Property = "owner"
wcardFilter.Value = "^[mM][aA][nN].*$"
andFilter1.AddChild(wcardFilter)
anybitFilter = AnybitFilter()
anybitFilter.Class = "lsServer"
anybitFilter.Property = "dn"
anybitFilter.Value = "org-B"
andFilter1.AddChild(anybitFilter)
andFilter0.AddChild(andFilter1)
inFilter.AddChild(andFilter0)
crc = handle.ConfigResolveClass("lsServer", inFilter, YesOrNo.FALSE, YesOrNo.TRUE)
WriteObject(crc)
handle.Logout()
except Exception, err:
print "Exception:", str(err)
import traceback, sys
print '-'*60
traceback.print_exc(file=sys.stdout)
print '-'*60
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-04-24 02:12
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('resume', '0008_auto_20170423_2111'),
]
operations = [
migrations.RemoveField(
model_name='language',
name='id',
),
]
|
# Generated by Django 3.0.4 on 2020-03-31 08:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user_event_log', '0002_update_type'),
]
operations = [
migrations.AddIndex(
model_name='userevent',
index=models.Index(fields=['type', 'adviser'], name='user_event__type_6deef3_idx'),
),
migrations.AddIndex(
model_name='userevent',
index=models.Index(fields=['timestamp', 'type', 'adviser'], name='user_event__timesta_957ef8_idx'),
),
]
|
# Python script to generate metadata for Moffitt datasets
import os
import sys
from glob import glob
from shutil import copyfile
import pydicom
# 1)
# COLLECTION_NAME = "Sample_Mammography_Reference_Set"
# 2)
COLLECTION_NAME = "Automated_System_For_Breast_Cancer_Biomarker_Analysis"
DATA_DIR=os.environ['LABCAS_ARCHIVE'] + "/" + COLLECTION_NAME
METADATA_DIR=os.environ['LABCAS_METADATA'] + "/" + COLLECTION_NAME
INSTITUTION = "Moffitt"
def main():
# loop over sub-directories == ddatasets
subdirs = os.listdir(DATA_DIR)
for subdir in subdirs:
dataset_id = "%s/%s" % (COLLECTION_NAME, subdir)
print("Processing sub-directory=%s, dataset_id=%s" % (subdir, dataset_id))
# dataset directory
dataset_dir = '%s/%s' % (DATA_DIR, subdir)
# create dataset metadata file
template_file = METADATA_DIR + "/TEMPLATE_Moffitt.cfg"
dataset_metadata_file = dataset_dir + "/" + subdir + ".cfg"
if not os.path.exists(dataset_metadata_file):
print('Creating dataset metadata file: %s' % dataset_metadata_file)
# read in template metadata file
with open(template_file) as f:
metadata = f.read()
# replace metadata
metadata = metadata.replace("subdir", subdir)
if subdir[0]=='D':
dataset_name = 'Dummy patient #%s (%s)' % (subdir[1:], INSTITUTION)
elif subdir[0]=='C':
dataset_name = 'Case #%s (unilateral breast cancer)' % subdir[1:]
elif subdir[0]=='N':
dataset_name = 'Case #%s (control)' % subdir[1:]
else:
dataset_name = 'Patient #%s (%s)' % (subdir[1:], INSTITUTION)
dataset_description = dataset_name + " mammography images"
metadata = metadata.replace("DATASET_ID", dataset_id)
metadata = metadata.replace("DATASET_NAME", dataset_name)
metadata = metadata.replace("DATASET_DESCRIPTION", dataset_description)
# write out metadata
with open(dataset_metadata_file, 'w') as f:
f.write(metadata)
if __name__ == "__main__":
main()
|
a = int(input('Em que ano você nasceu? '))
b = 2021 - a
if b == 18:
print('Você já deve se alistar.')
elif b < 18:
c = 18 - b
print(f'Ainda não chegou seu tempo, faltam {c} anos')
else:
d = b - 18
print(f'Passou do tempo, exatamente {d} ano(s)')
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 24 09:38:23 2017
@author: yuhao
"""
import numpy as np
import scipy as sp
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF, ConstantKernel as C
from sklearn.gaussian_process.kernels import (Matern, RationalQuadratic,
ExpSineSquared, DotProduct)
from skopt import Optimizer
from matplotlib import pyplot as pl
import subprocess
import sys
from pexpect import pxssh
import getpass
import time
def generateinput(inputfilename,parameterlist):
templatefile=open("template.inp","r")
inputfile=open(inputfilename,"w")
for line in templatefile:
if("STRUCTPARA" in line):
line="STRUCTPARA "+' '.join(map(str, parameterlist[0:6]))+"\n"
elif("MagField" in line):
line="MagField "+str(parameterlist[6])+"\n"
elif("Kaniscub " in line):
line="Kaniscub "+str(parameterlist[7])+"\n"
elif("Kanistet " in line):
line="Kanistet "+str(parameterlist[8])+"\n"
inputfile.write(line)
templatefile.close()
inputfile.close()
return
def readinputfile(filename):
inputfile=open(filename,"r")
for line in inputfile:
if("STRUCTPARA" in line):
para=line[:-1].split(" ")
parameterlist=para[1:]
elif("MagField" in line):
para=line[:-1].split(" ")
parameterlist.append(para[1])
elif("Kaniscub " in line):
para=line[:-1].split(" ")
parameterlist.append(para[1])
elif("Kanistet " in line):
para=line[:-1].split(" ")
parameterlist.append(para[1])
return parameterlist
def readoutputfile(filename):
outputfile=open(filename,"r")
heat=[]
cool=[]
flag=0
count=0
for line in outputfile:
if(flag==1):
strains=line.split("\t")
strain=[float(i) for i in strains]
if(strain[0]>=200):
heat.append(strain[1])
if(strain[0]==400):
flag=2
elif (flag==2):
strains=line.split("\t")
strain=[float(i) for i in strains]
if(strain[0]==410):
count+=1
if(count==2):
flag=3
elif (flag==3):
strains=line.split("\t")
strain=[float(i) for i in strains]
cool.append(strain[1])
if(strain[0]==200):
flag=4
elif ("Entropy" in line):
flag=1
cool.reverse()
return [heat,cool]
def checkstrain(X,y,ii):
T=[]
name=["NiCoMnIn","NiCoMnInH1","NiCoMnInH7"]
epsheat=[0,0,0]
epscool=[0,0,0]
tt=[[260,240],[250,220],[0,0]]
for i in range(len(name)):
epsheat[i]=[]
epscool[i]=[]
for jj in range(len(name)):
for i in range(17):
T.append(i*5+200)
t=i*5+200
if (t<tt[jj][0]):
epsheat[jj].append(1)
else:
epsheat[jj].append(0)
if(t<tt[jj][1]):
epscool[jj].append(1)
else:
epscool[jj].append(0)
for i in range(ii):
yy=0
for ij in range(len(name)):
inputfilename=name[ij]+".inp."+str(i+1)
outputfilename=name[ij]+"."+str(i+1)+".out"
parameters=readinputfile(inputfilename)
parameterlist=[float(i) for i in parameters]
if(ij==0): X.append([parameterlist[0],parameterlist[1],parameterlist[4],parameterlist[5]])
cal=readoutputfile(outputfilename)
heat=cal[0]
cool=cal[1]
yyy=0
for j in range(len(heat)-1):
yyy+=abs(heat[j]+heat[j+1]-epsheat[ij][j]-epsheat[ij][j+1])*5/2+abs(cool[j]+cool[j+1]-epscool[ij][j]-epscool[ij][j+1])*5/2
yyy=yyy#*abs(deleps-delhc)
yy+=yyy
y.append(yy)
return
def checkmag(X,y,ii):
factor=35
nameexp=["0.05T.txt","1T.txt","7T.txt"]
epsheat=[[],[]]
epsmeanheat=[[],[]]
epscool=[[],[],[]]
epsmeancool=[[],[],[]]
pos=0
for i in nameexp:
expfile=open(i,"r")
for line in expfile:
spl=line[:-1].split(" ")
if(i=="7T.txt"):
data=[float(i) for i in spl[0:2]]
else:
data=[float(i) for i in spl]
if (len(data))==4:
epscool[pos].append([data[0],data[1]])
epsheat[pos].append([data[2],data[3]])
else:
epscool[pos].append([data[0],data[1]])
expfile.close()
epscool[pos].reverse()
pos+=1
temp=[10*i+200 for i in range(21)]
epsheat=np.array(epsheat)
epscool=np.array(epscool)
for nn,i in enumerate(epscool):
start=0;
end=0;
iters=0;
for num,d in enumerate(i):
tempdata=temp[iters]
if (d[0]>=tempdata-0.6 and start==0):
start=num
if (d[0]>tempdata-0.1 and end==0):
end=num
if(start!=0 and end !=0):
epsmeancool[nn].append(np.mean(i[start:end,1]))
iters+=1
start=0
end=0
if(abs(d[0]-temp[-1])<0.1): break
for nn,i in enumerate(epsheat):
start=0;
end=0;
iters=0;
for num,d in enumerate(i):
tempdata=temp[iters]
if (d[0]>=tempdata-0.6 and start==0):
start=num
if (d[0]>tempdata-0.1 and end==0):
end=num
if(start!=0 and end !=0):
epsmeanheat[nn].append(np.mean(i[start:end,1]))
iters+=1
start=0
end=0
if(abs(d[0]-temp[-1])<0.1): break
name=["NiCoMnIn","NiCoMnInH1","NiCoMnInH7"]
for i in range(ii):
yy=0
for ij in range(len(name)):
inputfilename=name[ij]+".inp."+str(i+1)
outputfilename=name[ij]+"."+str(i+1)+".out"
# parameterlist: J, K, Uc, Ut, K1(U1),U2, MagField,Kaniscub,Kanistet
parameters=readinputfile(inputfilename)
parameterlist=[float(i) for i in parameters]
if(ij==0): X.append(parameterlist[0:6]+parameterlist[7:9])
cal=readoutputfile(outputfilename)
heat=[i*factor for i in cal[0]]
cool=[i*factor for i in cal[1]]
yyy=0
if (name[ij]!="NiCoMnInH7"):
for j in range(len(heat)-1):
yyy+=abs(heat[j]+heat[j+1]-epsmeanheat[ij][j]-epsmeanheat[ij][j+1])*10/2+abs(cool[j]+cool[j+1]-epsmeancool[ij][j]-epsmeancool[ij][j+1])*10/2
yyy=yyy
yy+=yyy
else:
for j in range(len(heat)-1):
yyy+=abs(cool[j]+cool[j+1]-epsmeancool[ij][j]-epsmeancool[ij][j+1])*10/2
yyy=yyy
yy+=yyy
y.append(yy/(2*len(heat)))
ii=1 #number of outputfiles at first
X=[]
y=[]
checkmag(X,y,ii)
np.random.seed(1)
kernel=C(1.0, (1e-5, 1e5))*RBF([1,1,1,1,1,1,1,1], (0.01, 9))+C(1.0, (1e-5, 1e5)) * ExpSineSquared(length_scale=1000, periodicity=50,
length_scale_bounds=(0.01, 10),
periodicity_bounds=(0.01, 1e5))
gp = GaussianProcessRegressor(kernel=kernel,n_restarts_optimizer=100,normalize_y=True)
bounds=[(4.5, 4.5),(2.35,2.35),(0,2),(0,2),(-2.5,-2.5),(-0.3,-0.3),(-0.012,-0.005),(-0.2,-0.1)]
# J,K,Uc,Ut,U1,U2,anisc,anist
opt = Optimizer(bounds, gp,n_random_starts=100,acq_func="EI",acq_optimizer="sampling",acq_optimizer_kwargs={"n_points":9999999999,"n_restarts_optimizer":100})
#opt.n_points=999999999
for i in range(ii):
opt.tell(X[i],y[i])
maxEI=[]
for count in range(500):
X=np.array(X)
print('X combo: ')
print(X)
y=np.array(y)
print ('ymin combo')
print (X[np.argmin(y)])
print (np.argmin(y))
if(count>0):opt.tell(X[-1],y[-1])
print(X[-1])
next_x=opt.ask()
print("next_x")
print (next_x)
X=np.append(X,next_x)
X=np.reshape(X,(int(len(X)/8),8))
pp=next_x
parameterlist=[[pp[0], pp[1], pp[2], pp[3], pp[4],pp[5], 0.05, pp[6],pp[7]],
[pp[0], pp[1], pp[2], pp[3], pp[4],pp[5], 1, pp[6], pp[7]],
[pp[0], pp[1], pp[2], pp[3], pp[4],pp[5], 7, pp[6], pp[7]]]
inputfilename=["NiCoMnIn.inp."+str(ii+1),"NiCoMnInH1.inp."+str(ii+1),"NiCoMnInH7.inp."+str(ii+1)]
for iii in range(len(inputfilename)):
generateinput(inputfilename[iii],parameterlist[iii])
commands = [
"./yuhao_new_Magstr "+inputfilename[0],
"./yuhao_new_Magstr "+inputfilename[1],
"./yuhao_new_Magstr "+inputfilename[2],
]
processes = [subprocess.Popen(cmd, shell=True,stdout=subprocess.PIPE, stderr=subprocess.DEVNULL) for cmd in commands]
# do other things here..
# wait for completion
for p in processes: p.wait()
outputfilename=["NiCoMnIn."+str(ii+1)+".out","NiCoMnInH1."+str(ii+1)+".out","NiCoMnInH7."+str(ii+1)+".out"]
while True:
flag=0
for n in outputfilename:
while True:
try:
checkoutput=open(n,'r')
break
except FileNotFoundError:
print("Oops! No file keep trying")
time.sleep(5)
lines=checkoutput.readlines()
#print("read "+n)
if ('END\n' in lines):flag+=1
if (flag==3):break
else: time.sleep(1000)
ii+=1
X=[]
y=[]
checkmag(X,y,ii) |
"""
Utils for language models.
from https://github.com/litian96/FedProx/blob/master/flearn/utils/language_utils.py
"""
import re
import numpy as np
from collections import Counter
# ------------------------
# utils for shakespeare dataset
ALL_LETTERS = "\n !\"&'(),-.0123456789:;>?ABCDEFGHIJKLMNOPQRSTUVWXYZ[]abcdefghijklmnopqrstuvwxyz}"
NUM_LETTERS = len(ALL_LETTERS)
def _one_hot(index, size):
'''returns one-hot vector with given size and value 1 at given index
'''
vec = [0 for _ in range(size)]
vec[int(index)] = 1
return vec
def letter_to_vec(letter):
index = ALL_LETTERS.find(letter)
return index
def word_to_indices(word):
'''returns a list of character indices
Arguments:
word: string
:returns:
indices: int list with length len(word)
'''
indices = []
for c in word:
indices.append(ALL_LETTERS.find(c))
return indices
# ------------------------
# utils for sent140 dataset
def split_line(line):
'''split given line/phrase into list of words
Arguments:
line: string representing phrase to be split
:returns:
list of strings, with each string representing a word
'''
return re.findall(r"[\w']+|[.,!?;]", line)
def bag_of_words(line, vocab):
'''returns bag of words representation of given phrase using given vocab
Arguments:
line: string representing phrase to be parsed
vocab: dictionary with words as keys and indices as values
:returns:
integer list
'''
bag = [0] * len(vocab)
words = split_line(line)
for w in words:
if w in vocab:
bag[vocab[w]] += 1
return bag
def target_to_binary(label):
return int(label == 1)
def token_to_ids(texts, vocab):
to_ret = [[vocab[word] for word in line] for line in texts]
return np.array(to_ret)
def label_to_index(labels):
counter = Counter(labels)
sorted_tuples = sorted(counter.items(), key=lambda x: x[1], reverse=True)
label_list = [x[0] for x in sorted_tuples]
return [label_list.index(x) for x in labels]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.