code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
import click
import newsfeeds
import random
import sys
from config import GlobalConfig
def mixer(full_story_list, sample_number):
"""Selects a random sample of stories from the full list to display to the user.
Number of stories is set in config.py
Todo: Add argument support for number of stories to display
"""
mixed_story_list = random.sample(set(full_story_list), sample_number)
return mixed_story_list
def default_display(list_of_stories):
"""Displays a set of stories in the following format:
n - Story Title -- OutletName -- Section
Story abstract: Lorem ipsum dolor sit amet
"""
index_num = 0
for story in list_of_stories:
index_num += 1
click.secho('%r - ' % index_num, bold=True, nl=False)
click.secho('%s ' % story.title, fg=option.headline_color, bold=True, nl=False)
click.secho('-- %s -- ' % story.source, fg=option.source_color, bold=True, nl=False)
click.secho('%s' % story.section, fg=option.section_color)
click.secho('Story abstract: %s' % story.abstract, fg=option.abstract_color)
click.echo()
if index_num > 0:
exit_now == False
while exit_now != True:
click.secho("Select an index number to go to story, or [Enter] to exit: ", fg=option.prompt_color, bold=True, nl=False)
raw_selection = input()
if raw_selection.isdigit():
selection = int(raw_selection) - 1
if selection <= index_num - 1:
story = mixed_story_list[selection]
click.launch(story.url)
if option.prompt_until_exit == True:
pass
else:
return exit_now == True
else:
click.secho("Invalid entry", fg='red')
if option.prompt_until_exit == True:
pass
else:
return exit_now == True
elif raw_selection == '':
return exit_now == True
else:
click.secho("Invalid entry", fg='red')
if option.prompt_until_exit == True:
pass
else:
return exit_now == True
else:
click.secho("No recent headlines to display", fg=option.prompt_color, bold=True, nl=False)
click.echo()
def main():
global option
option = GlobalConfig()
click.echo("Loading the news...")
story_list = newsfeeds.feeder()
global exit_now
exit_now = False
click.clear()
global mixed_story_list
mixed_story_list = mixer(story_list, option.article_limit)
default_display(mixed_story_list)
if __name__ == '__main__':
main()
|
haaspt/whatsnew
|
main.py
|
Python
|
mit
| 2,815
|
#!/usr/bin/python3
from mymodule import *
sayhi()
# __version__ 不会导入
# print('Version: ', __version__)
|
louistin/thinkstation
|
a_byte_of_python/unit_9_module/mymodule_demo3.py
|
Python
|
mit
| 114
|
# The MIT License (MIT)
#
# Copyright (c) 2016 WUSTL ZPLAB
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Authors: Erik Hvatum <ice.rikh@gmail.com>
from PyQt5 import Qt
from ..shared_resources import UNIQUE_QGRAPHICSITEM_TYPE
class PointItem(Qt.QGraphicsRectItem):
# Omitting .type() or failing to return a unique causes PyQt to return a wrapper of the wrong type when retrieving an instance of this item as a base
# class pointer from C++. For example, if this item has a child and that child calls self.parentItem(), it would receive a Python object of type
# Qt.QGraphicsRectItem rather than PointItem unless PointItem has a correct .type() implementation.
QGRAPHICSITEM_TYPE = UNIQUE_QGRAPHICSITEM_TYPE()
def __init__(self, picker, x, y, w, h, parent_item):
super().__init__(x, y, w, h, parent_item)
self.picker = picker
flags = self.flags()
self.setFlags(
flags |
Qt.QGraphicsItem.ItemIsFocusable | # Necessary in order for item to receive keyboard events
Qt.QGraphicsItem.ItemIsSelectable |
Qt.QGraphicsItem.ItemIsMovable |
Qt.QGraphicsItem.ItemSendsGeometryChanges # Necessary in order for .itemChange to be called when item is moved
)
def itemChange(self, change, value):
if change == Qt.QGraphicsItem.ItemPositionHasChanged:
self.picker.point_item_position_has_changed.emit(self)
return super().itemChange(change, value)
def keyPressEvent(self, event):
if event.key() == Qt.Qt.Key_Delete and event.modifiers() == Qt.Qt.NoModifier:
self.picker.delete_selected()
def type(self):
return self.QGRAPHICSITEM_TYPE
# NB: deriving from Qt.QGraphicsObject is necessary in order to be a scene event filter target
class SimplePointPicker(Qt.QGraphicsObject):
"""ex:
from ris_widget.ris_widget import RisWidget
from ris_widget.examples.simple_point_picker import SimplePointPicker
rw = RisWidget()
simple_point_picker = SimplePointPicker(rw.main_view, rw.main_scene.layer_stack_item)"""
QGRAPHICSITEM_TYPE = UNIQUE_QGRAPHICSITEM_TYPE()
point_item_position_has_changed = Qt.pyqtSignal(PointItem)
point_item_list_content_reset = Qt.pyqtSignal()
def __init__(self, general_view, parent_item, points=None):
super().__init__(parent_item)
self.view = general_view
self.view.viewport_rect_item.size_changed.connect(self.on_viewport_size_changed)
self.point_items = []
self.pen = Qt.QPen(Qt.Qt.red)
self.pen.setWidth(2)
color = Qt.QColor(Qt.Qt.yellow)
color.setAlphaF(0.5)
self.brush = Qt.QBrush(color)
self.brush_selected = Qt.QBrush(Qt.QColor(255, 0, 255, 127))
parent_item.installSceneEventFilter(self)
if points:
for point in points:
self.make_and_store_point_item(Qt.QPointF(point[0], point[1]))
def boundingRect(self):
return Qt.QRectF()
def paint(self, QPainter, QStyleOptionGraphicsItem, QWidget_widget=None):
pass
def type(self):
return self.QGRAPHICSITEM_TYPE
def make_and_store_point_item(self, pos):
point_item = PointItem(self, -7, -7, 15, 15, self.parentItem())
point_item.setScale(1 / self.view.transform().m22())
point_item.setPen(self.pen)
point_item.setBrush(self.brush)
flags = point_item.flags()
point_item.setFlags(
flags |
Qt.QGraphicsItem.ItemIsFocusable | # Necessary in order for item to receive keyboard events
Qt.QGraphicsItem.ItemIsSelectable |
Qt.QGraphicsItem.ItemIsMovable |
Qt.QGraphicsItem.ItemSendsGeometryChanges
)
point_item.installSceneEventFilter(self)
self.point_items.append(point_item)
point_item.setPos(pos)
def delete_selected(self):
for idx, item in reversed(list(enumerate((self.point_items)))):
if item.isSelected():
self.scene().removeItem(item)
del self.point_items[idx]
self.point_item_list_content_reset.emit()
def sceneEventFilter(self, watched, event):
if watched is self.parentItem():
if event.type() == Qt.QEvent.GraphicsSceneMousePress and event.button() == Qt.Qt.RightButton:
self.make_and_store_point_item(event.pos())
return True
if event.type() == Qt.QEvent.KeyPress and event.key() == Qt.Qt.Key_Delete and event.modifiers() == Qt.Qt.NoModifier:
self.delete_selected()
return False
def on_viewport_size_changed(self):
scale = 1 / self.view.transform().m22()
for point_item in self.point_items:
point_item.setScale(scale)
def clear(self):
for point_item in self.point_items:
self.view.scene().removeItem(point_item)
self.point_items = []
self.point_item_list_content_reset.emit()
@property
def points(self):
return [(point_item.pos().x(), point_item.pos().y()) for point_item in self.point_items]
@points.setter
def points(self, points):
self.clear()
for point in points:
self.make_and_store_point_item(Qt.QPointF(point[0], point[1]))
|
erikhvatum/RisWidget
|
ris_widget/examples/simple_point_picker.py
|
Python
|
mit
| 6,323
|
import serial
import sys
from time import sleep
def move(dir):
print dir
ser.write(dir) # write a string
ser.sendBreak(0.25)
ser.flush()
sleep(1)
def ResetCoords():
dir = 'r'
print dir
ser.write(dir) # write a string
ser.sendBreak(0.25)
ser.flush()
sleep(1)
def DrawRect(dim):
print ""
print "Drawing 10-size rectangle"
out = ""
k = 2;
while(k > 1):
print "First side:"
dir = 'd'
for i in range(0, dim[0]):
move(dir)
print "Second side:"
dir = 'x'
for i in range(0, dim[1]):
move(dir)
print "Third side:"
dir = 'a'
for i in range(0, dim[0]):
move(dir)
print "Fourth side:"
dir = 'w'
for i in range(0, dim[1]):
move(dir)
print "Finished, starting over."
print "________________________"
k = k - 1
def ManualControl():
run = 1
while run == 1:
print ""
print ""
print "___________________________"
print "Use Keypad or following keys to control motors"
print "Direction:"
print "q w e"
print "a s d"
print "z x c"
print "Drill control:"
print " Up: f"
print "Down: v"
print ""
print "Press m to exit to menu"
print "___________________________"
select = raw_input(": ")
if select == "m":
run = 0
else:
move(select)
def DrawText():
print "This option is not ready yet"
return 0
def ClosePort():
ser.close() # close port
def OpenPort(port):
print ""
print "Initializing Com-port to device."
ser = serial.Serial(port, 9600, 8, serial.PARITY_NONE, 1, None, False, False, None, False, None)
print "" + ser.portstr + " is open"
return ser
def Menu():
print "___________________________"
print "Menu"
print "1. Manual Control"
print "2. Demonstration"
print "3. Text carving"
print "4. Quit"
print ""
select = raw_input("Select: ")
if select == "1":
ManualControl()
if select == "2":
DrawRect([5,5])
if select == "3":
DrawText()
if select == "4":
Quit()
def Quit():
ClosePort()
sys.exit()
print "Welcome to PyCNC 0.5"
print "Author: Heikki Juva @ 2011"
print ""
print "___________________________"
port = raw_input("Give CNC port name ")
ser = OpenPort(port)
print ""
while(1):
Menu()
|
Zokol/ArduinoCNC
|
cnc.py
|
Python
|
mit
| 2,290
|
"""TreeNode operation to find next node."""
import threading
from src.core import tree
from src.ops import abstract
class NextNodeOp(abstract.AbstractOp):
"""Finds next TreeNode instance in the tree in in-order traversal."""
def do(self, root, args):
"""Implements NextNodeOp.
See http://www.geeksforgeeks.org/inorder-successor-in-binary-search-tree.
Args:
root: TreeNode instance
args: Instance of a dictionary object of function args.
Returns:
TreeNode instance
Raises:
StopIteration if given node is the last one in the tree.
"""
# cache may have updated since lock has been acquired
next = root.metadata.get('next', None)
if next is not None:
return next
if root.parent is not None: # get sibling node from parent branch
try:
root.metadata['next'] = root.parent.children[root.parent_branch][
root.parent.children[root.parent_branch].next(root.id)]
return root.metadata.get('next')
except StopIteration: # no siblings exist
pass
# get minimum value from right subtree
if len(root.children[tree.TreeNode.RIGHT]):
node = root.children[tree.TreeNode.RIGHT][
root.children[tree.TreeNode.RIGHT].iloc[0]]
while len(node.children[tree.TreeNode.LEFT]):
node = node.children[tree.TreeNode.LEFT][
node.children[tree.TreeNode.LEFT].iloc[0]]
root.metadata['next'] = node
return root.metadata.get('next')
# current node is left leaf of the tree, traverse back up
else:
node = root
while node.parent is not None:
if node.parent_branch == tree.TreeNode.LEFT:
root.metadata['next'] = node.parent
return root.metadata.get('next')
node = node.parent
raise StopIteration('Last node reached.')
|
cripplet/treedoc-py
|
src/ops/next_node.py
|
Python
|
mit
| 1,836
|
"""
homeassistant.components.media_player.chromecast
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Provides functionality to interact with Cast devices on the network.
WARNING: This platform is currently not working due to a changed Cast API
"""
import logging
from homeassistant.const import (
STATE_PLAYING, STATE_PAUSED, STATE_IDLE, STATE_OFF,
STATE_UNKNOWN, CONF_HOST)
from homeassistant.components.media_player import (
MediaPlayerDevice,
SUPPORT_PAUSE, SUPPORT_VOLUME_SET, SUPPORT_VOLUME_MUTE,
SUPPORT_TURN_ON, SUPPORT_TURN_OFF, SUPPORT_YOUTUBE,
SUPPORT_PREVIOUS_TRACK, SUPPORT_NEXT_TRACK,
MEDIA_TYPE_MUSIC, MEDIA_TYPE_TVSHOW, MEDIA_TYPE_VIDEO)
REQUIREMENTS = ['pychromecast==0.6.10']
CONF_IGNORE_CEC = 'ignore_cec'
CAST_SPLASH = 'https://home-assistant.io/images/cast/splash.png'
SUPPORT_CAST = SUPPORT_PAUSE | SUPPORT_VOLUME_SET | SUPPORT_VOLUME_MUTE | \
SUPPORT_TURN_ON | SUPPORT_TURN_OFF | SUPPORT_PREVIOUS_TRACK | \
SUPPORT_NEXT_TRACK | SUPPORT_YOUTUBE
KNOWN_HOSTS = []
# pylint: disable=invalid-name
cast = None
# pylint: disable=unused-argument
def setup_platform(hass, config, add_devices, discovery_info=None):
""" Sets up the cast platform. """
global cast
import pychromecast
cast = pychromecast
logger = logging.getLogger(__name__)
# import CEC IGNORE attributes
ignore_cec = config.get(CONF_IGNORE_CEC, [])
if isinstance(ignore_cec, list):
cast.IGNORE_CEC += ignore_cec
else:
logger.error('Chromecast conig, %s must be a list.', CONF_IGNORE_CEC)
hosts = []
if discovery_info and discovery_info[0] not in KNOWN_HOSTS:
hosts = [discovery_info[0]]
elif CONF_HOST in config:
hosts = [config[CONF_HOST]]
else:
hosts = (host_port[0] for host_port
in cast.discover_chromecasts()
if host_port[0] not in KNOWN_HOSTS)
casts = []
for host in hosts:
try:
casts.append(CastDevice(host))
except cast.ChromecastConnectionError:
pass
else:
KNOWN_HOSTS.append(host)
add_devices(casts)
class CastDevice(MediaPlayerDevice):
""" Represents a Cast device on the network. """
# pylint: disable=too-many-public-methods
def __init__(self, host):
import pychromecast.controllers.youtube as youtube
self.cast = cast.Chromecast(host)
self.youtube = youtube.YouTubeController()
self.cast.register_handler(self.youtube)
self.cast.socket_client.receiver_controller.register_status_listener(
self)
self.cast.socket_client.media_controller.register_status_listener(self)
self.cast_status = self.cast.status
self.media_status = self.cast.media_controller.status
# Entity properties and methods
@property
def should_poll(self):
return False
@property
def name(self):
""" Returns the name of the device. """
return self.cast.device.friendly_name
# MediaPlayerDevice properties and methods
@property
def state(self):
""" State of the player. """
if self.media_status is None:
return STATE_UNKNOWN
elif self.media_status.player_is_playing:
return STATE_PLAYING
elif self.media_status.player_is_paused:
return STATE_PAUSED
elif self.media_status.player_is_idle:
return STATE_IDLE
elif self.cast.is_idle:
return STATE_OFF
else:
return STATE_UNKNOWN
@property
def volume_level(self):
""" Volume level of the media player (0..1). """
return self.cast_status.volume_level if self.cast_status else None
@property
def is_volume_muted(self):
""" Boolean if volume is currently muted. """
return self.cast_status.volume_muted if self.cast_status else None
@property
def media_content_id(self):
""" Content ID of current playing media. """
return self.media_status.content_id if self.media_status else None
@property
def media_content_type(self):
""" Content type of current playing media. """
if self.media_status is None:
return None
elif self.media_status.media_is_tvshow:
return MEDIA_TYPE_TVSHOW
elif self.media_status.media_is_movie:
return MEDIA_TYPE_VIDEO
elif self.media_status.media_is_musictrack:
return MEDIA_TYPE_MUSIC
return None
@property
def media_duration(self):
""" Duration of current playing media in seconds. """
return self.media_status.duration if self.media_status else None
@property
def media_image_url(self):
""" Image url of current playing media. """
if self.media_status is None:
return None
images = self.media_status.images
return images[0].url if images else None
@property
def media_title(self):
""" Title of current playing media. """
return self.media_status.title if self.media_status else None
@property
def media_artist(self):
""" Artist of current playing media. (Music track only) """
return self.media_status.artist if self.media_status else None
@property
def media_album(self):
""" Album of current playing media. (Music track only) """
return self.media_status.album_name if self.media_status else None
@property
def media_album_artist(self):
""" Album arist of current playing media. (Music track only) """
return self.media_status.album_artist if self.media_status else None
@property
def media_track(self):
""" Track number of current playing media. (Music track only) """
return self.media_status.track if self.media_status else None
@property
def media_series_title(self):
""" Series title of current playing media. (TV Show only)"""
return self.media_status.series_title if self.media_status else None
@property
def media_season(self):
""" Season of current playing media. (TV Show only) """
return self.media_status.season if self.media_status else None
@property
def media_episode(self):
""" Episode of current playing media. (TV Show only) """
return self.media_status.episode if self.media_status else None
@property
def app_id(self):
""" ID of the current running app. """
return self.cast.app_id
@property
def app_name(self):
""" Name of the current running app. """
return self.cast.app_display_name
@property
def supported_media_commands(self):
""" Flags of media commands that are supported. """
return SUPPORT_CAST
def turn_on(self):
""" Turns on the ChromeCast. """
# The only way we can turn the Chromecast is on is by launching an app
if not self.cast.status or not self.cast.status.is_active_input:
if self.cast.app_id:
self.cast.quit_app()
self.cast.play_media(
CAST_SPLASH, cast.STREAM_TYPE_BUFFERED)
def turn_off(self):
""" Turns Chromecast off. """
self.cast.quit_app()
def mute_volume(self, mute):
""" mute the volume. """
self.cast.set_volume_muted(mute)
def set_volume_level(self, volume):
""" set volume level, range 0..1. """
self.cast.set_volume(volume)
def media_play(self):
""" Send play commmand. """
self.cast.media_controller.play()
def media_pause(self):
""" Send pause command. """
self.cast.media_controller.pause()
def media_previous_track(self):
""" Send previous track command. """
self.cast.media_controller.rewind()
def media_next_track(self):
""" Send next track command. """
self.cast.media_controller.skip()
def media_seek(self, position):
""" Seek the media to a specific location. """
self.cast.media_controller.seek(position)
def play_youtube(self, media_id):
""" Plays a YouTube media. """
self.youtube.play_video(media_id)
# implementation of chromecast status_listener methods
def new_cast_status(self, status):
""" Called when a new cast status is received. """
self.cast_status = status
self.update_ha_state()
def new_media_status(self, status):
""" Called when a new media status is received. """
self.media_status = status
self.update_ha_state()
|
michaelarnauts/home-assistant
|
homeassistant/components/media_player/cast.py
|
Python
|
mit
| 8,631
|
"""Order a block storage replica volume."""
# :license: MIT, see LICENSE for more details.
import click
import SoftLayer
from SoftLayer.CLI import environment
from SoftLayer.CLI import exceptions
CONTEXT_SETTINGS = {'token_normalize_func': lambda x: x.upper()}
@click.command(context_settings=CONTEXT_SETTINGS)
@click.argument('volume_id')
@click.option('--snapshot-schedule', '-s',
help='Snapshot schedule to use for replication, '
'(HOURLY | DAILY | WEEKLY)',
required=True,
type=click.Choice(['HOURLY', 'DAILY', 'WEEKLY']))
@click.option('--location', '-l',
help='Short name of the data center for the replicant '
'(e.g.: dal09)',
required=True)
@click.option('--tier',
help='Endurance Storage Tier (IOPS per GB) of the primary'
' volume for which a replicant is ordered [optional]',
type=click.Choice(['0.25', '2', '4', '10']))
@click.option('--os-type',
help='Operating System Type (e.g.: LINUX) of the primary'
' volume for which a replica is ordered [optional]',
type=click.Choice([
'HYPER_V',
'LINUX',
'VMWARE',
'WINDOWS_2008',
'WINDOWS_GPT',
'WINDOWS',
'XEN']))
@environment.pass_env
def cli(env, volume_id, snapshot_schedule, location, tier, os_type):
"""Order a block storage replica volume."""
block_manager = SoftLayer.BlockStorageManager(env.client)
if tier is not None:
tier = float(tier)
try:
order = block_manager.order_replicant_volume(
volume_id,
snapshot_schedule=snapshot_schedule,
location=location,
tier=tier,
os_type=os_type,
)
except ValueError as ex:
raise exceptions.ArgumentError(str(ex))
if 'placedOrder' in order.keys():
click.echo("Order #{0} placed successfully!".format(
order['placedOrder']['id']))
for item in order['placedOrder']['items']:
click.echo(" > %s" % item['description'])
else:
click.echo("Order could not be placed! Please verify your options " +
"and try again.")
|
skraghu/softlayer-python
|
SoftLayer/CLI/block/replication/order.py
|
Python
|
mit
| 2,315
|
import time
import rlp
import trie
import db
import utils
import processblock
import transactions
import logging
import copy
import sys
from repoze.lru import lru_cache
# logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger()
INITIAL_DIFFICULTY = 2 ** 17
GENESIS_PREVHASH = '\00' * 32
GENESIS_COINBASE = "0" * 40
GENESIS_NONCE = utils.sha3(chr(42))
GENESIS_GAS_LIMIT = 10 ** 6
MIN_GAS_LIMIT = 125000
GASLIMIT_EMA_FACTOR = 1024
BLOCK_REWARD = 1500 * utils.denoms.finney
UNCLE_REWARD = 15 * BLOCK_REWARD / 16
NEPHEW_REWARD = BLOCK_REWARD / 32
BLOCK_DIFF_FACTOR = 1024
GENESIS_MIN_GAS_PRICE = 0
BLKLIM_FACTOR_NOM = 6
BLKLIM_FACTOR_DEN = 5
DIFF_ADJUSTMENT_CUTOFF = 5
RECORDING = 1
NONE = 0
VERIFYING = -1
GENESIS_INITIAL_ALLOC = \
{"51ba59315b3a95761d0863b05ccc7a7f54703d99": 2 ** 200, # (G)
"e6716f9544a56c530d868e4bfbacb172315bdead": 2 ** 200, # (J)
"b9c015918bdaba24b4ff057a92a3873d6eb201be": 2 ** 200, # (V)
"1a26338f0d905e295fccb71fa9ea849ffa12aaf4": 2 ** 200, # (A)
"2ef47100e0787b915105fd5e3f4ff6752079d5cb": 2 ** 200, # (M)
"cd2a3d9f938e13cd947ec05abc7fe734df8dd826": 2 ** 200, # (R)
"6c386a4b26f73c802f34673f7248bb118f97424a": 2 ** 200, # (HH)
"e4157b34ea9615cfbde6b4fda419828124b70c78": 2 ** 200, # (CH)
}
block_structure = [
["prevhash", "bin", "\00" * 32],
["uncles_hash", "bin", utils.sha3(rlp.encode([]))],
["coinbase", "addr", GENESIS_COINBASE],
["state_root", "trie_root", trie.BLANK_ROOT],
["tx_list_root", "trie_root", trie.BLANK_ROOT],
["difficulty", "int", INITIAL_DIFFICULTY],
["number", "int", 0],
["min_gas_price", "int", GENESIS_MIN_GAS_PRICE],
["gas_limit", "int", GENESIS_GAS_LIMIT],
["gas_used", "int", 0],
["timestamp", "int", 0],
["extra_data", "bin", ""],
["nonce", "bin", ""],
]
block_structure_rev = {}
for i, (name, typ, default) in enumerate(block_structure):
block_structure_rev[name] = [i, typ, default]
acct_structure = [
["nonce", "int", 0],
["balance", "int", 0],
["storage", "trie_root", trie.BLANK_ROOT],
["code", "hash", ""],
]
acct_structure_rev = {}
for i, (name, typ, default) in enumerate(acct_structure):
acct_structure_rev[name] = [i, typ, default]
def calc_difficulty(parent, timestamp):
offset = parent.difficulty / BLOCK_DIFF_FACTOR
sign = 1 if timestamp - parent.timestamp < DIFF_ADJUSTMENT_CUTOFF else -1
return parent.difficulty + offset * sign
def calc_gaslimit(parent):
prior_contribution = parent.gas_limit * (GASLIMIT_EMA_FACTOR - 1)
new_contribution = parent.gas_used * BLKLIM_FACTOR_NOM / BLKLIM_FACTOR_DEN
gl = (prior_contribution + new_contribution) / GASLIMIT_EMA_FACTOR
return max(gl, MIN_GAS_LIMIT)
class UnknownParentException(Exception):
pass
class TransientBlock(object):
"""
Read only, non persisted, not validated representation of a block
"""
def __init__(self, rlpdata):
self.rlpdata = rlpdata
self.header_args, transaction_list, uncles = rlp.decode(rlpdata)
self.hash = utils.sha3(rlp.encode(self.header_args))
self.transaction_list = transaction_list # rlp encoded transactions
self.uncles = uncles
for i, (name, typ, default) in enumerate(block_structure):
setattr(self, name, utils.decoders[typ](self.header_args[i]))
def __repr__(self):
return '<TransientBlock(#%d %s %s)>' %\
(self.number, self.hash.encode('hex')[
:4], self.prevhash.encode('hex')[:4])
def check_header_pow(header):
assert len(header[-1]) == 32
rlp_Hn = rlp.encode(header[:-1])
nonce = header[-1]
diff = utils.decoders['int'](header[block_structure_rev['difficulty'][0]])
h = utils.sha3(utils.sha3(rlp_Hn) + nonce)
return utils.big_endian_to_int(h) < 2 ** 256 / diff
class Block(object):
def __init__(self,
prevhash='\00' * 32,
uncles_hash=block_structure_rev['uncles_hash'][2],
coinbase=block_structure_rev['coinbase'][2],
state_root=trie.BLANK_ROOT,
tx_list_root=trie.BLANK_ROOT,
difficulty=block_structure_rev['difficulty'][2],
number=0,
min_gas_price=block_structure_rev['min_gas_price'][2],
gas_limit=block_structure_rev['gas_limit'][2],
gas_used=0, timestamp=0, extra_data='', nonce='',
transaction_list=[],
uncles=[],
header=None):
self.prevhash = prevhash
self.uncles_hash = uncles_hash
self.coinbase = coinbase
self.difficulty = difficulty
self.number = number
self.min_gas_price = min_gas_price
self.gas_limit = gas_limit
self.gas_used = gas_used
self.timestamp = timestamp
self.extra_data = extra_data
self.nonce = nonce
self.uncles = uncles
self.suicides = []
self.postqueue = []
self.caches = {
'balance': {},
'nonce': {},
'code': {},
'all': {}
}
self.journal = []
self.transactions = trie.Trie(utils.get_db_path(), tx_list_root)
self.transaction_count = 0
self.state = trie.Trie(utils.get_db_path(), state_root)
self.proof_mode = None
self.proof_nodes = []
# If transaction_list is None, then it's a block header imported for
# SPV purposes
if transaction_list is not None:
# support init with transactions only if state is known
assert self.state.root_hash_valid()
for tx_lst_serialized, state_root, gas_used_encoded \
in transaction_list:
self._add_transaction_to_list(
tx_lst_serialized, state_root, gas_used_encoded)
if tx_list_root != self.transactions.root_hash:
raise Exception("Transaction list root hash does not match!")
if not self.is_genesis() and self.nonce and\
not check_header_pow(header or self.list_header()):
raise Exception("PoW check failed")
# make sure we are all on the same db
assert self.state.db.db == self.transactions.db.db
# use de/encoders to check type and validity
for name, typ, d in block_structure:
v = getattr(self, name)
assert utils.decoders[typ](utils.encoders[typ](v)) == v
# Basic consistency verifications
if not self.state.root_hash_valid():
raise Exception(
"State Merkle root not found in database! %r" % self)
if not self.transactions.root_hash_valid():
raise Exception(
"Transactions root not found in database! %r" % self)
if len(self.extra_data) > 1024:
raise Exception("Extra data cannot exceed 1024 bytes")
if self.coinbase == '':
raise Exception("Coinbase cannot be empty address")
def validate_uncles(self):
if utils.sha3(rlp.encode(self.uncles)) != self.uncles_hash:
return False
# Check uncle validity
ancestor_chain = [self]
# Uncle can have a block from 2-7 blocks ago as its parent
for i in [1, 2, 3, 4, 5, 6, 7]:
if ancestor_chain[-1].number > 0:
ancestor_chain.append(ancestor_chain[-1].get_parent())
ineligible = []
# Uncles of this block cannot be direct ancestors and cannot also
# be uncles included 1-6 blocks ago
for ancestor in ancestor_chain[1:]:
ineligible.extend(ancestor.uncles)
ineligible.extend([b.list_header() for b in ancestor_chain])
eligible_ancestor_hashes = [x.hash for x in ancestor_chain[2:]]
for uncle in self.uncles:
if not check_header_pow(uncle):
sys.stderr.write('1\n\n')
return False
# uncle's parent cannot be the block's own parent
prevhash = uncle[block_structure_rev['prevhash'][0]]
if prevhash not in eligible_ancestor_hashes:
logger.debug("%r: Uncle does not have a valid ancestor", self)
sys.stderr.write('2 ' + prevhash.encode('hex') + ' ' + str(map(lambda x: x.encode('hex'), eligible_ancestor_hashes)) + '\n\n')
return False
if uncle in ineligible:
sys.stderr.write('3\n\n')
logger.debug("%r: Duplicate uncle %r", self, utils.sha3(rlp.encode(uncle)).encode('hex'))
return False
ineligible.append(uncle)
return True
def is_genesis(self):
return self.prevhash == GENESIS_PREVHASH and \
self.nonce == GENESIS_NONCE
def check_proof_of_work(self, nonce):
H = self.list_header()
H[-1] = nonce
return check_header_pow(H)
@classmethod
def deserialize_header(cls, header_data):
if isinstance(header_data, (str, unicode)):
header_data = rlp.decode(header_data)
assert len(header_data) == len(block_structure)
kargs = {}
# Deserialize all properties
for i, (name, typ, default) in enumerate(block_structure):
kargs[name] = utils.decoders[typ](header_data[i])
return kargs
@classmethod
def deserialize(cls, rlpdata):
header_args, transaction_list, uncles = rlp.decode(rlpdata)
kargs = cls.deserialize_header(header_args)
kargs['header'] = header_args
kargs['transaction_list'] = transaction_list
kargs['uncles'] = uncles
# if we don't have the state we need to replay transactions
_db = db.DB(utils.get_db_path())
if len(kargs['state_root']) == 32 and kargs['state_root'] in _db:
return Block(**kargs)
elif kargs['prevhash'] == GENESIS_PREVHASH:
return Block(**kargs)
else: # no state, need to replay
try:
parent = get_block(kargs['prevhash'])
except KeyError:
raise UnknownParentException(kargs['prevhash'].encode('hex'))
return parent.deserialize_child(rlpdata)
@classmethod
def init_from_header(cls, rlpdata):
kargs = cls.deserialize_header(rlpdata)
kargs['transaction_list'] = None
kargs['uncles'] = None
return Block(**kargs)
def deserialize_child(self, rlpdata):
"""
deserialization w/ replaying transactions
"""
header_args, transaction_list, uncles = rlp.decode(rlpdata)
assert len(header_args) == len(block_structure)
kargs = dict(transaction_list=transaction_list, uncles=uncles)
# Deserialize all properties
for i, (name, typ, default) in enumerate(block_structure):
kargs[name] = utils.decoders[typ](header_args[i])
block = Block.init_from_parent(self, kargs['coinbase'],
extra_data=kargs['extra_data'],
timestamp=kargs['timestamp'],
uncles=uncles)
# replay transactions
for tx_lst_serialized, _state_root, _gas_used_encoded in \
transaction_list:
tx = transactions.Transaction.create(tx_lst_serialized)
# logger.debug('state:\n%s', utils.dump_state(block.state))
# logger.debug('applying %r', tx)
success, output = processblock.apply_transaction(block, tx)
#block.add_transaction_to_list(tx) # < this is done by processblock
# logger.debug('state:\n%s', utils.dump_state(block.state))
logger.debug('d %s %s', _gas_used_encoded, block.gas_used)
assert utils.decode_int(_gas_used_encoded) == block.gas_used, \
"Gas mismatch (ours %d, theirs %d) on block: %r" % \
(block.gas_used, _gas_used_encoded, block.to_dict(False, True, True))
assert _state_root == block.state.root_hash, \
"State root mismatch (ours %r theirs %r) on block: %r" % \
(block.state.root_hash.encode('hex'),
_state_root.encode('hex'),
block.to_dict(False, True, True))
block.finalize()
block.uncles_hash = kargs['uncles_hash']
block.nonce = kargs['nonce']
block.min_gas_price = kargs['min_gas_price']
# checks
assert block.prevhash == self.hash
assert block.gas_used == kargs['gas_used']
assert block.gas_limit == kargs['gas_limit']
assert block.timestamp == kargs['timestamp']
assert block.difficulty == kargs['difficulty']
assert block.number == kargs['number']
assert block.extra_data == kargs['extra_data']
assert utils.sha3(rlp.encode(block.uncles)) == kargs['uncles_hash']
assert block.tx_list_root == kargs['tx_list_root']
assert block.state.root_hash == kargs['state_root'], (block.state.root_hash, kargs['state_root'])
return block
@classmethod
def hex_deserialize(cls, hexrlpdata):
return cls.deserialize(hexrlpdata.decode('hex'))
def mk_blank_acct(self):
if not hasattr(self, '_blank_acct'):
codehash = ''
self.state.db.put(codehash, '')
self._blank_acct = [utils.encode_int(0),
utils.encode_int(0),
trie.BLANK_ROOT,
codehash]
return self._blank_acct[:]
def get_acct(self, address):
if len(address) == 40:
address = address.decode('hex')
acct = rlp.decode(self.state.get(address)) or self.mk_blank_acct()
return tuple(utils.decoders[t](acct[i])
for i, (n, t, d) in enumerate(acct_structure))
# _get_acct_item(bin or hex, int) -> bin
def _get_acct_item(self, address, param):
''' get account item
:param address: account address, can be binary or hex string
:param param: parameter to get
'''
if param != 'storage' and address in self.caches[param]:
return self.caches[param][address]
return self.get_acct(address)[acct_structure_rev[param][0]]
# _set_acct_item(bin or hex, int, bin)
def _set_acct_item(self, address, param, value):
''' set account item
:param address: account address, can be binary or hex string
:param param: parameter to set
:param value: new value
'''
# logger.debug('set acct %r %r %d', address, param, value)
self.set_and_journal(param, address, value)
self.set_and_journal('all', address, True)
def set_and_journal(self, cache, index, value):
prev = self.caches[cache].get(index, None)
if prev != value:
self.journal.append([cache, index, prev, value])
self.caches[cache][index] = value
# _delta_item(bin or hex, int, int) -> success/fail
def _delta_item(self, address, param, value):
''' add value to account item
:param address: account address, can be binary or hex string
:param param: parameter to increase/decrease
:param value: can be positive or negative
'''
value = self._get_acct_item(address, param) + value
if value < 0:
return False
self._set_acct_item(address, param, value)
return True
def _add_transaction_to_list(self, tx_lst_serialized,
state_root, gas_used_encoded):
# adds encoded data # FIXME: the constructor should get objects
assert isinstance(tx_lst_serialized, list)
data = [tx_lst_serialized, state_root, gas_used_encoded]
self.transactions.update(
rlp.encode(utils.encode_int(self.transaction_count)),
rlp.encode(data))
self.transaction_count += 1
def add_transaction_to_list(self, tx):
tx_lst_serialized = rlp.decode(tx.serialize())
self._add_transaction_to_list(tx_lst_serialized,
self.state_root,
utils.encode_int(self.gas_used))
def _list_transactions(self):
# returns [[tx_lst_serialized, state_root, gas_used_encoded],...]
txlist = []
for i in range(self.transaction_count):
txlist.append(self.get_transaction(i))
return txlist
def get_transaction(self, num):
# returns [tx_lst_serialized, state_root, gas_used_encoded]
return rlp.decode(self.transactions.get(rlp.encode(utils.encode_int(num))))
def get_transactions(self):
return [transactions.Transaction.create(tx) for
tx, s, g in self._list_transactions()]
def get_nonce(self, address):
return self._get_acct_item(address, 'nonce')
def set_nonce(self, address, value):
return self._set_acct_item(address, 'nonce', value)
def increment_nonce(self, address):
return self._delta_item(address, 'nonce', 1)
def decrement_nonce(self, address):
return self._delta_item(address, 'nonce', -1)
def get_balance(self, address):
return self._get_acct_item(address, 'balance')
def set_balance(self, address, value):
self._set_acct_item(address, 'balance', value)
def delta_balance(self, address, value):
return self._delta_item(address, 'balance', value)
def transfer_value(self, from_addr, to_addr, value):
assert value >= 0
if self.delta_balance(from_addr, -value):
return self.delta_balance(to_addr, value)
return False
def get_code(self, address):
return self._get_acct_item(address, 'code')
def set_code(self, address, value):
self._set_acct_item(address, 'code', value)
def get_storage(self, address):
storage_root = self._get_acct_item(address, 'storage')
return trie.Trie(utils.get_db_path(), storage_root)
def get_storage_data(self, address, index):
if 'storage:'+address in self.caches:
if index in self.caches['storage:'+address]:
return self.caches['storage:'+address][index]
t = self.get_storage(address)
t.proof_mode = self.proof_mode
t.proof_nodes = self.proof_nodes
key = utils.zpad(utils.coerce_to_bytes(index), 32)
val = rlp.decode(t.get(key))
if self.proof_mode == RECORDING:
self.proof_nodes.extend(t.proof_nodes)
return utils.big_endian_to_int(val) if val else 0
def set_storage_data(self, address, index, val):
if 'storage:'+address not in self.caches:
self.caches['storage:'+address] = {}
self.set_and_journal('all', address, True)
self.set_and_journal('storage:'+address, index, val)
def commit_state(self):
changes = []
if not len(self.journal):
processblock.pblogger.log('delta', changes=[])
return
for address in self.caches['all']:
acct = rlp.decode(self.state.get(address.decode('hex'))) \
or self.mk_blank_acct()
for i, (key, typ, default) in enumerate(acct_structure):
if key == 'storage':
t = trie.Trie(utils.get_db_path(), acct[i])
t.proof_mode = self.proof_mode
t.proof_nodes = self.proof_nodes
for k, v in self.caches.get('storage:'+address, {}).iteritems():
enckey = utils.zpad(utils.coerce_to_bytes(k), 32)
val = rlp.encode(utils.int_to_big_endian(v))
changes.append(['storage', address, k, v])
if v:
t.update(enckey, val)
else:
t.delete(enckey)
acct[i] = t.root_hash
if self.proof_mode == RECORDING:
self.proof_nodes.extend(t.proof_nodes)
else:
if address in self.caches[key]:
v = self.caches[key].get(address, default)
changes.append([key, address, v])
acct[i] = utils.encoders[acct_structure[i][1]](v)
self.state.update(address.decode('hex'), rlp.encode(acct))
if self.proof_mode == RECORDING:
self.proof_nodes.extend(self.state.proof_nodes)
self.state.proof_nodes = []
if processblock.pblogger.log_state_delta:
processblock.pblogger.log('delta', changes=changes)
self.reset_cache()
def del_account(self, address):
self.commit_state()
if len(address) == 40:
address = address.decode('hex')
self.state.delete(address)
def account_to_dict(self, address, with_storage_root=False,
with_storage=True, for_vmtest=False):
if with_storage_root:
assert len(self.journal) == 0
med_dict = {}
for i, val in enumerate(self.get_acct(address)):
name, typ, default = acct_structure[i]
key = acct_structure[i][0]
if name == 'storage':
strie = trie.Trie(utils.get_db_path(), val)
if with_storage_root:
med_dict['storage_root'] = strie.get_root_hash().encode('hex')
else:
med_dict[key] = self.caches[key].get(address, utils.printers[typ](val))
if with_storage:
med_dict['storage'] = {}
d = strie.to_dict()
subcache = self.caches.get('storage:'+address, {})
subkeys = [utils.zpad(utils.coerce_to_bytes(kk), 32) for kk in subcache.keys()]
for k in d.keys() + subkeys:
v = d.get(k, None)
v2 = subcache.get(utils.big_endian_to_int(k), None)
hexkey = '0x'+utils.zunpad(k).encode('hex')
if v2 is not None:
if v2 != 0:
med_dict['storage'][hexkey] = \
'0x'+utils.int_to_big_endian(v2).encode('hex')
elif v is not None:
med_dict['storage'][hexkey] = '0x'+rlp.decode(v).encode('hex')
return med_dict
def reset_cache(self):
self.caches = {
'all': {},
'balance': {},
'nonce': {},
'code': {},
}
self.journal = []
# Revert computation
def snapshot(self):
return {
'state': self.state.root_hash,
'gas': self.gas_used,
'txs': self.transactions,
'txcount': self.transaction_count,
'postqueue': copy.copy(self.postqueue),
'suicides': self.suicides,
'suicides_size': len(self.suicides),
'journal': self.journal, # pointer to reference, so is not static
'journal_size': len(self.journal)
}
def revert(self, mysnapshot):
self.journal = mysnapshot['journal']
logger.debug('reverting')
while len(self.journal) > mysnapshot['journal_size']:
cache, index, prev, post = self.journal.pop()
logger.debug('%r %r %r %r', cache, index, prev, post)
if prev is not None:
self.caches[cache][index] = prev
else:
del self.caches[cache][index]
self.suicides = mysnapshot['suicides']
while len(self.suicides) > mysnapshot['suicides_size']:
self.suicides.pop()
self.state.root_hash = mysnapshot['state']
self.gas_used = mysnapshot['gas']
self.transactions = mysnapshot['txs']
self.transaction_count = mysnapshot['txcount']
self.postqueue = mysnapshot['postqueue']
def finalize(self):
"""
Apply rewards
We raise the block's coinbase account by Rb, the block reward,
and the coinbase of each uncle by 7 of 8 that.
Rb = 1500 finney
"""
self.delta_balance(self.coinbase,
BLOCK_REWARD + NEPHEW_REWARD * len(self.uncles))
for uncle_rlp in self.uncles:
uncle_data = Block.deserialize_header(uncle_rlp)
self.delta_balance(uncle_data['coinbase'], UNCLE_REWARD)
self.commit_state()
def serialize_header_without_nonce(self):
return rlp.encode(self.list_header(exclude=['nonce']))
def get_state_root(self):
self.commit_state()
return self.state.root_hash
def set_state_root(self, state_root_hash):
self.state = trie.Trie(utils.get_db_path(), state_root_hash)
self.reset_cache()
state_root = property(get_state_root, set_state_root)
def get_tx_list_root(self):
return self.transactions.root_hash
tx_list_root = property(get_tx_list_root)
def list_header(self, exclude=[]):
header = []
for name, typ, default in block_structure:
# print name, typ, default , getattr(self, name)
if name not in exclude:
header.append(utils.encoders[typ](getattr(self, name)))
return header
def serialize(self):
# Serialization method; should act as perfect inverse function of the
# constructor assuming no verification failures
return rlp.encode([self.list_header(),
self._list_transactions(),
self.uncles])
def hex_serialize(self):
return self.serialize().encode('hex')
def serialize_header(self):
return rlp.encode(self.list_header())
def hex_serialize_header(self):
return rlp.encode(self.list_header()).encode('hex')
def to_dict(self, with_state=False, full_transactions=False,
with_storage_roots=False, with_uncles=False):
"""
serializes the block
with_state: include state for all accounts
full_transactions: include serialized tx (hashes otherwise)
with_uncles: include uncle hashes
"""
b = {}
for name, typ, default in block_structure:
b[name] = utils.printers[typ](getattr(self, name))
txlist = []
for i in range(self.transaction_count):
tx_rlp = self.transactions.get(rlp.encode(utils.encode_int(i)))
tx, msr, gas = rlp.decode(tx_rlp)
if full_transactions:
txjson = transactions.Transaction.create(tx).to_dict()
else:
txjson = utils.sha3(rlp.descend(tx_rlp, 0)).encode('hex') # tx hash
txlist.append({
"tx": txjson,
"medstate": msr.encode('hex'),
"gas": str(utils.decode_int(gas))
})
b["transactions"] = txlist
if with_state:
state_dump = {}
for address, v in self.state.to_dict().iteritems():
state_dump[address.encode('hex')] = \
self.account_to_dict(address, with_storage_roots)
b['state'] = state_dump
if with_uncles:
b['uncles'] = [utils.sha3(rlp.encode(u)).encode('hex') for u in self.uncles]
return b
def _hash(self):
return utils.sha3(self.serialize_header())
@property
def hash(self):
return self._hash()
def hex_hash(self):
return self.hash.encode('hex')
def get_parent(self):
if self.number == 0:
raise UnknownParentException('Genesis block has no parent')
try:
parent = get_block(self.prevhash)
except KeyError:
raise UnknownParentException(self.prevhash.encode('hex'))
#assert parent.state.db.db == self.state.db.db
return parent
def has_parent(self):
try:
self.get_parent()
return True
except UnknownParentException:
return False
def chain_difficulty(self):
# calculate the summarized_difficulty
if self.is_genesis():
return self.difficulty
elif 'difficulty:'+self.hex_hash() in self.state.db:
return utils.decode_int(
self.state.db.get('difficulty:'+self.hex_hash()))
else:
_idx, _typ, _ = block_structure_rev['difficulty']
o = self.difficulty + self.get_parent().chain_difficulty()
o += sum([utils.decoders[_typ](u[_idx]) for u in self.uncles])
self.state.db.put('difficulty:'+self.hex_hash(), utils.encode_int(o))
return o
def __eq__(self, other):
return isinstance(other, (Block, CachedBlock)) and self.hash == other.hash
def __ne__(self, other):
return not self.__eq__(other)
def __gt__(self, other):
return self.number > other.number
def __lt__(self, other):
return self.number < other.number
def __repr__(self):
return '<Block(#%d %s %s)>' % (self.number,
self.hex_hash()[:4],
self.prevhash.encode('hex')[:4])
@classmethod
def init_from_parent(cls, parent, coinbase, extra_data='',
timestamp=int(time.time()), uncles=[]):
return Block(
prevhash=parent.hash,
uncles_hash=utils.sha3(rlp.encode(uncles)),
coinbase=coinbase,
state_root=parent.state.root_hash,
tx_list_root=trie.BLANK_ROOT,
difficulty=calc_difficulty(parent, timestamp),
number=parent.number + 1,
min_gas_price=0,
gas_limit=calc_gaslimit(parent),
gas_used=0,
timestamp=timestamp,
extra_data=extra_data,
nonce='',
transaction_list=[],
uncles=uncles)
def set_proof_mode(self, pm, pmnodes=None):
self.proof_mode = pm
self.state.proof_mode = pm
self.proof_nodes = pmnodes or []
self.state.proof_nodes = pmnodes or []
class CachedBlock(Block):
# note: immutable refers to: do not manipulate!
_hash_cached = None
def _set_acct_item(self): raise NotImplementedError
def _add_transaction_to_list(self): raise NotImplementedError
def set_state_root(self): raise NotImplementedError
def revert(self): raise NotImplementedError
def commit_state(self): pass
def _hash(self):
if not self._hash_cached:
self._hash_cached = Block._hash(self)
return self._hash_cached
@classmethod
def create_cached(cls, blk):
blk.__class__ = CachedBlock
return blk
@lru_cache(500)
def get_block(blockhash):
"""
Assumtion: blocks loaded from the db are not manipulated
-> can be cached including hash
"""
return CachedBlock.create_cached(Block.deserialize(db.DB(utils.get_db_path()).get(blockhash)))
def has_block(blockhash):
return blockhash in db.DB(utils.get_db_path())
def genesis(start_alloc=GENESIS_INITIAL_ALLOC, difficulty=INITIAL_DIFFICULTY):
# https://ethereum.etherpad.mozilla.org/11
block = Block(prevhash=GENESIS_PREVHASH, coinbase=GENESIS_COINBASE,
tx_list_root=trie.BLANK_ROOT,
difficulty=difficulty, nonce=GENESIS_NONCE,
gas_limit=GENESIS_GAS_LIMIT)
for addr, balance in start_alloc.iteritems():
block.set_balance(addr, balance)
block.state.db.commit()
return block
def dump_genesis_block_tests_data():
import json
g = genesis()
data = dict(
genesis_state_root=g.state_root.encode('hex'),
genesis_hash=g.hex_hash(),
genesis_rlp_hex=g.serialize().encode('hex'),
initial_alloc=dict()
)
for addr, balance in GENESIS_INITIAL_ALLOC.iteritems():
data['initial_alloc'][addr] = str(balance)
print json.dumps(data, indent=1)
|
jnnk/pyethereum
|
pyethereum/blocks.py
|
Python
|
mit
| 32,057
|
from .stackapi import StackAPI
from .stackapi import StackAPIError
|
AWegnerGitHub/stackapi
|
stackapi/__init__.py
|
Python
|
mit
| 66
|
from django.conf import settings
from django.core import mail
from django.test import RequestFactory
from django.test import TestCase
from django.contrib.sites.models import Site
from contact_form.forms import ContactForm
class ContactFormTests(TestCase):
def test_request_required(self):
"""
Can't instantiate without an HttpRequest.
"""
self.assertRaises(TypeError, ContactForm)
def test_valid_data_required(self):
"""
Can't try to build the message dict unless data is valid.
"""
request = RequestFactory().request()
data = {'name': 'Test',
'body': 'Test message'}
form = ContactForm(request=request, data=data)
self.assertRaises(ValueError, form.get_message_dict)
def test_send(self):
"""
Valid form can and does in fact send email.
"""
request = RequestFactory().request()
data = {'name': 'Test',
'email': 'test@example.com',
'message': 'Test message'}
form = ContactForm(request=request, data=data)
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(1, len(mail.outbox))
message = mail.outbox[0]
self.assertEqual([data['email']],
message.recipients())
self.assertTrue(data['message'] in message.body)
self.assertEqual(settings.DEFAULT_FROM_EMAIL,
message.from_email)
# def test_no_sites(self):
# """
# Sites integration works with or without installed
# contrib.sites.
# """
# old_installed = Site._meta.installed
# Site._meta.installed = False
# request = RequestFactory().request()
# data = {'name': 'Test',
# 'email': 'test@example.com',
# 'message': 'Test message'}
# form = ContactForm(request=request, data=data)
# self.assertTrue(form.is_valid())
# form.save()
# self.assertEqual(1, len(mail.outbox))
# Site._meta.installed = old_installed
|
nunataksoftware/anayluistango
|
contact_form/tests/forms.py
|
Python
|
mit
| 2,154
|
#!/usr/bin/env python
#
# Jetduino Example for using the Grove Sound Sensor and the Grove LED
#
# The Jetduino connects the Jetson and Grove sensors. You can learn more about the Jetduino here: http://www.NeuroRoboticTech.com/Projects/Jetduino
#
# Modules:
# http://www.seeedstudio.com/wiki/Grove_-_Sound_Sensor
# http://www.seeedstudio.com/wiki/Grove_-_LED_Socket_Kit
#
# Have a question about this example? Ask on the forums here: http://www.NeuroRoboticTech.com/Forum
#
'''
## License
The MIT License (MIT)
GrovePi for the Raspberry Pi: an open source platform for connecting Grove Sensors to the Raspberry Pi.
Copyright (C) 2015 Dexter Industries
Jetduino for the Jetson TK1/TX1: an open source platform for connecting
Grove Sensors to the Jetson embedded supercomputers.
Copyright (C) 2016 NeuroRobotic Technologies
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
import time
import jetduino
from jetduino_pins import *
# Connect the Grove Sound Sensor to analog port A0
# SIG,NC,VCC,GND
sound_sensor = ARD_A0
# Connect the Grove LED to digital port D5
# SIG,NC,VCC,GND
led = LED_D4
jetduino.pinMode(led, OUTPUT_PIN)
# The threshold to turn the led on 400.00 * 5 / 1024 = 1.95v
threshold_value = 600
while True:
try:
# Read the sound level
sensor_value = jetduino.analogRead(sound_sensor)
# If loud, illuminate LED, otherwise dim
if sensor_value > threshold_value:
jetduino.digitalWrite(led, HIGH)
else:
jetduino.digitalWrite(led, LOW)
print ("sensor_value =", sensor_value)
time.sleep(.5)
except IOError:
print ("Error")
|
NeuroRoboticTech/Jetduino
|
Software/Python/grove_sound_sensor.py
|
Python
|
mit
| 2,624
|
"""Common profiles are defined here to be easily used within a project using --profile {name}"""
from typing import Any, Dict
black = {
"multi_line_output": 3,
"include_trailing_comma": True,
"force_grid_wrap": 0,
"use_parentheses": True,
"ensure_newline_before_comments": True,
"line_length": 88,
}
django = {
"combine_as_imports": True,
"include_trailing_comma": True,
"multi_line_output": 5,
"line_length": 79,
}
pycharm = {
"multi_line_output": 3,
"force_grid_wrap": 2,
"lines_after_imports": 2,
}
google = {
"force_single_line": True,
"force_sort_within_sections": True,
"lexicographical": True,
"single_line_exclusions": ("typing",),
"order_by_type": False,
"group_by_package": True,
}
open_stack = {
"force_single_line": True,
"force_sort_within_sections": True,
"lexicographical": True,
}
plone = {
"force_alphabetical_sort": True,
"force_single_line": True,
"lines_after_imports": 2,
"line_length": 200,
}
attrs = {
"atomic": True,
"force_grid_wrap": 0,
"include_trailing_comma": True,
"lines_after_imports": 2,
"lines_between_types": 1,
"multi_line_output": 3,
"use_parentheses": True,
}
hug = {
"multi_line_output": 3,
"include_trailing_comma": True,
"force_grid_wrap": 0,
"use_parentheses": True,
"line_length": 100,
}
profiles: Dict[str, Dict[str, Any]] = {
"black": black,
"django": django,
"pycharm": pycharm,
"google": google,
"open_stack": open_stack,
"plone": plone,
"attrs": attrs,
"hug": hug,
}
|
TeamSPoon/logicmoo_workspace
|
packs_web/butterfly/lib/python3.7/site-packages/isort/profiles.py
|
Python
|
mit
| 1,601
|
from django.core.exceptions import ValidationError
from amweekly.slack.tests.factories import SlashCommandFactory
import pytest
pytest.mark.unit
def test_slash_command_raises_with_invalid_token(settings):
settings.SLACK_TOKENS = ''
with pytest.raises(ValidationError):
SlashCommandFactory()
|
akrawchyk/amweekly
|
amweekly/slack/tests/test_models.py
|
Python
|
mit
| 313
|
#!/usr/bin/env python
from collections import namedtuple
Payload = namedtuple('Payload', ['iden', 'body', 'send_date', 'sender'])
class Handler(object):
@staticmethod
def config():
return
def __init__(self, logger):
self.logger = logger
def create_translator():
return
def create_listener(task):
return
def configure_modules(modules, push_config):
return
class Translator(object):
def get_recent():
return
def is_valid(message):
return
def get_module(message, modules):
return
def cleanup(message):
return
def to_payload(message):
return
def respond(message, response):
return
|
JimboMonkey1234/pushserver
|
handlers/Handler.py
|
Python
|
mit
| 725
|
__author__ = 'leif'
import os
import socket
import logging
import logging.config
import logging.handlers
from autocomplete_trie import AutocompleteTrie
from ifind.search.engines.whooshtrec import Whooshtrec
from experiment_setup import ExperimentSetup
work_dir = os.getcwd()
# when deployed this needs to match up with the hostname, and directory to where the project is
my_whoosh_doc_index_dir = os.path.join(work_dir, 'data/fullindex/')
if 'local' not in socket.gethostname():
my_whoosh_doc_index_dir = '/home/leifos/test500index'
#my_whoosh_doc_index_dir = '/Users/david/Workspace/indexes/aquaint_test500_whoosh'
my_whoosh_query_index_dir = os.path.join(work_dir, "/trec_query_index/index")
my_experiment_log_dir = work_dir
qrels_file = os.path.join(work_dir, "data/TREC2005.qrels.txt")
qrels_diversity_file = os.path.join(work_dir, "data/sigir-combined.diversity.qrels")
stopword_file = os.path.join(work_dir, "data/stopwords.txt")
data_dir = os.path.join(work_dir, "data")
print "Work DIR: " + work_dir
print "QRELS File: " + qrels_file
print "my_whoosh_doc_index_dir: " + my_whoosh_doc_index_dir
print "Stopword file: " + stopword_file
event_logger = logging.getLogger('event_log')
event_logger.setLevel(logging.INFO)
event_logger_handler = logging.FileHandler(os.path.join(my_experiment_log_dir, 'experiment.log'))
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
event_logger_handler.setFormatter(formatter)
event_logger.addHandler(event_logger_handler)
# workflow must always start with startexperiment/
exp_work_flows = [
['startexperiment/', 'consent', 'preexperiment/AN/',
'prepracticetask/0/', 'search/0/', 'postpracticetask/0/',
# 'anitatimeinstructions/TC/',
'anitapretasksurvey/1/', 'search/1/', 'anitaposttask0survey/1/',
'anitaposttask1survey/1/', 'anitaposttask2survey/1/',
'anitaposttask3survey/1/', 'taskspacer/',
'anitapretasksurvey/2/', 'search/2/', 'anitaposttask0survey/2/',
'anitaposttask1survey/2/', 'anitaposttask2survey/2/',
'anitaposttask3survey/2/', 'taskspacer/',
'anitapretasksurvey/3/', 'search/3/', 'anitaposttask0survey/3/',
'anitaposttask1survey/3/', 'anitaposttask2survey/3/',
'anitaposttask3survey/3/', 'taskspacer/',
'anitapretasksurvey/4/', 'search/4/', 'anitaposttask0survey/4/',
'anitaposttask1survey/4/', 'anitaposttask2survey/4/',
'anitaposttask3survey/4/',
'anitaexit1survey/', 'anitaexit2survey/', 'anitaexit3survey/',
'anitademographicssurvey/', 'logout/'],
['startexperiment/', 'consent', 'preexperiment/AN/',
'prepracticetask/0/', 'search/0/', 'postpracticetask/0/',
# 'anitatimeinstructions/NTC/',
'anitapretasksurvey/1/', 'search/1/', 'anitaposttask0survey/1/',
'anitaposttask1survey/1/', 'anitaposttask2survey/1/',
'anitaposttask3survey/1/', 'taskspacer/',
'anitapretasksurvey/2/', 'search/2/', 'anitaposttask0survey/2/',
'anitaposttask1survey/2/', 'anitaposttask2survey/2/',
'anitaposttask3survey/2/', 'taskspacer/',
'anitapretasksurvey/3/', 'search/3/', 'anitaposttask0survey/3/',
'anitaposttask1survey/3/', 'anitaposttask2survey/3/',
'anitaposttask3survey/3/', 'taskspacer/',
'anitapretasksurvey/4/', 'search/4/', 'anitaposttask0survey/4/',
'anitaposttask1survey/4/', 'anitaposttask2survey/4/',
'anitaposttask3survey/4/',
'anitaexit1survey/', 'anitaexit2survey/', 'anitaexit3survey/',
'anitademographicssurvey/', 'logout/'],
['startexperiment/', 'consent', 'preexperiment/AN/',
'anitaexit1survey/', 'anitaexit2survey/', 'anitaexit3survey/',
'anitademographicssurvey/', 'logout/'],
]
snippet_flow = [
'startexperiment/', 'preexperiment/UK/',
'demographicssurvey/',
'prepracticetask/0/','taskspacer2/0/', 'search/0/', 'postpracticetask/0/', 'taskspacer',
'snippetpretask/1/','taskspacer2/1/', 'search/1/', 'snippetposttask/1/','systemsnippetposttask/1/',
'taskspacer',
'snippetpretask/2/', 'taskspacer2/2/','search/2/', 'snippetposttask/2/','systemsnippetposttask/2/',
'taskspacer',
'snippetpretask/3/','taskspacer2/3/', 'search/3/', 'snippetposttask/3/','systemsnippetposttask/3/',
'taskspacer',
'snippetpretask/4/','taskspacer2/4/', 'search/4/', 'snippetposttask/4/','systemsnippetposttask/4/',
'taskspacer', 'snippetexitsurvey/', 'performance/', 'endexperiment/',
'logout/'
]
diversity_flow = [
'startexperiment/', 'preexperiment/UK/',
'demographicssurvey/',
'prepracticetask/0/', 'search/0/', 'diversityperformancepractice/', 'postpracticetask/0/', 'taskspacer/',
'snippetpretask/1/', 'taskspacerwithdetails/1/', 'search/1/', 'diversityposttask/1/','systemdiversityposttask/1/',
'taskspacer',
'snippetpretask/2/','taskspacerwithdetails/2/','search/2/', 'diversityposttask/2/','systemdiversityposttask/2/',
'taskspacer',
'snippetpretask/3/','taskspacerwithdetails/3/', 'search/3/', 'diversityposttask/3/','systemdiversityposttask/3/',
'taskspacer',
'snippetpretask/4/','taskspacerwithdetails/4/', 'search/4/', 'diversityposttask/4/','systemdiversityposttask/4/',
'taskspacer', 'diversityexitsurvey/', 'diversityperformance/', 'endexperiment/',
'logout/'
]
jaana_flow = [
'startexperiment/', 'preexperiment/UK/',
'demographicssurvey/',
'prepracticetask/0/','taskspacer2/0/', 'search/0/', 'postpracticetask/0/', 'taskspacer',
'snippetpretask/1/','taskspacer2/1/', 'search/1/', 'posttaskquestions/1/',
'taskspacer',
'snippetpretask/2/', 'taskspacer2/2/','search/2/', 'posttaskquestions/2/',
'taskspacer',
'performance/', 'endexperiment/',
'logout/'
]
test_flow = [
'startexperiment/', 'snippetexitsurvey/','snippetpretask/1/', 'snippetposttask/1/','systemsnippetposttask/1/',
'pretask/1/', 'search/1/','taskspacer',
'pretask/2/', 'search/2/','taskspacer',
'pretask/3/', 'search/3/',
'pretask/4/', 'search/4/','endexperiment/',
'logout/'
]
suggestion_trie = AutocompleteTrie(
min_occurrences=3,
suggestion_count=8,
include_stopwords=False,
stopwords_path=os.path.join(work_dir, "data/stopwords.txt"),
vocab_path=os.path.join(work_dir, "data/vocab.txt"),
vocab_trie_path=os.path.join(work_dir, "data/vocab_trie.dat"))
search_engine = Whooshtrec(
whoosh_index_dir=my_whoosh_doc_index_dir,
stopwords_file=stopword_file,
model=1,
newschema=True)
search_engine.key_name = 'bm25'
search_engine.set_fragmenter(frag_type=2, surround=30)
exp_chiir2016 = ExperimentSetup(
workflow= exp_work_flows[0],
engine=search_engine,
practice_topic='408',
topics=['347', '367', '435','354'],
rpp=10,
practice_interface=1,
practice_diversity = 4,
interface=[1, 1, 1, 1],
diversity=[4,4,4,4],
rotation_type=1,
description='standard condition bm25 test',
trie=suggestion_trie,
autocomplete=True,
timeout=[150,600,600,600, 600],
delay_results = [0,5,0,5,0]
)
exp_sigir2017 = ExperimentSetup(
workflow=snippet_flow,
engine=search_engine,
practice_topic='367',
topics=['347', '341', '435','408'],
rpp=10,
practice_interface=1,
interface=[1, 2, 3, 4],
rotation_type=1,
description='standard condition bm25 test',
trie=suggestion_trie,
autocomplete=True,
timeout=[150,600,600,600, 600]) # 300s = 5min; 600s = 10min; 1200s = 20min
exp_jaana = ExperimentSetup(
workflow=jaana_flow,
engine=search_engine,
practice_topic='367',
topics=['347', '435'],
rpp=10,
practice_interface=1,
interface=[1, 1],
rotation_type=1,
description='standard condition bm25 test',
trie=suggestion_trie,
autocomplete=True,
timeout=[150,1200,1200])
exp_sigir2018 = ExperimentSetup(
workflow=diversity_flow,
engine=search_engine,
practice_topic='367',
topics=['347', '341', '435','408'],
rpp=10,
practice_interface=1,
interface=[1, 1, 1, 1],
rotation_type=2,
practice_diversity=2,
diversity=[1,2,3,4],
description='standard condition bm25 test',
trie=suggestion_trie,
autocomplete=True,
target=4,
timeout=[10000, 10000, 10000, 10000, 10000]) # 300s = 5min; 600s = 10min; 1200s = 20min, 10000 to stop timeout events firing
# these correspond to conditions
experiment_setups = [exp_sigir2018, exp_jaana, exp_chiir2016]
|
leifos/treconomics
|
treconomics_project/treconomics/experiment_configuration.py
|
Python
|
mit
| 8,405
|
import unittest
from .context import json_stable_stringify_python as stringify
class TestStringify(unittest.TestCase):
def test_simple_object(self):
node = {'c':6, 'b': [4,5], 'a': 3, 'z': None}
actual = stringify.stringify(node)
expected = '{"a":3,"b":[4,5],"c":6,"z":null}'
self.assertEqual(actual, expected)
def test_object_with_empty_string(self):
node = {'a': 3, 'z': ''}
actual = stringify.stringify(node)
expected = '{"a":3,"z":""}'
self.assertEqual(actual, expected)
def test_nested_object(self):
node = {
'a': {
'b': {
'c': [1,2,3,None]
}
}
}
actual = stringify.stringify(node)
expected = '{"a":{"b":{"c":[1,2,3,null]}}}'
self.assertEqual(actual, expected)
def test_array_with_objects(self):
node = [{'z': 1, 'a': 2}]
actual = stringify.stringify(node)
expected = '[{"a":2,"z":1}]'
self.assertEqual(actual, expected)
def test_nested_array_objects(self):
node = [{'z': [[{'y': 1, 'b': 2}]], 'a': 2}]
actual = stringify.stringify(node)
expected = '[{"a":2,"z":[[{"b":2,"y":1}]]}]'
self.assertEqual(actual, expected)
def test_array_with_none(self):
node = [1, None]
actual = stringify.stringify(node)
expected = '[1,null]'
self.assertEqual(actual, expected)
def test_array_with_empty_string(self):
node = [1, '']
actual = stringify.stringify(node)
expected = '[1,""]'
self.assertEqual(actual, expected)
if __name__ == '__main__':
unittest.main()
|
haochi/json-stable-stringify-python
|
tests/test_stringify.py
|
Python
|
mit
| 1,700
|
import datetime
import os
import uuid
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from django.db import models
from django.urls import reverse
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from pinax.invitations.models import JoinInvitation
from reversion import revisions as reversion
from slugify import slugify
from . import signals
from .hooks import hookset
def avatar_upload(instance, filename):
ext = filename.split(".")[-1]
filename = f"{uuid.uuid4()}.{ext}"
return os.path.join("avatars", filename)
def create_slug(name):
return slugify(name)[:50]
class BaseTeam(models.Model):
MEMBER_ACCESS_OPEN = "open"
MEMBER_ACCESS_APPLICATION = "application"
MEMBER_ACCESS_INVITATION = "invitation"
MANAGER_ACCESS_ADD = "add someone"
MANAGER_ACCESS_INVITE = "invite someone"
MEMBER_ACCESS_CHOICES = [
(MEMBER_ACCESS_OPEN, _("open")),
(MEMBER_ACCESS_APPLICATION, _("by application")),
(MEMBER_ACCESS_INVITATION, _("by invitation"))
]
MANAGER_ACCESS_CHOICES = [
(MANAGER_ACCESS_ADD, _("add someone")),
(MANAGER_ACCESS_INVITE, _("invite someone"))
]
member_access = models.CharField(max_length=20, choices=MEMBER_ACCESS_CHOICES, verbose_name=_("member access"))
manager_access = models.CharField(max_length=20, choices=MANAGER_ACCESS_CHOICES, verbose_name=_("manager access"))
class Meta:
abstract = True
verbose_name = _("Base")
verbose_name_plural = _("Bases")
def can_join(self, user):
state = self.state_for(user)
if self.member_access == BaseTeam.MEMBER_ACCESS_OPEN and state is None:
return True
elif state == BaseMembership.STATE_INVITED:
return True
else:
return False
def can_leave(self, user):
# managers can't leave at the moment
role = self.role_for(user)
return role == BaseMembership.ROLE_MEMBER
def can_apply(self, user):
state = self.state_for(user)
return self.member_access == BaseTeam.MEMBER_ACCESS_APPLICATION and state is None
@property
def applicants(self):
return self.memberships.filter(state=BaseMembership.STATE_APPLIED)
@property
def invitees(self):
return self.memberships.filter(state=BaseMembership.STATE_INVITED)
@property
def declines(self):
return self.memberships.filter(state=BaseMembership.STATE_DECLINED)
@property
def rejections(self):
return self.memberships.filter(state=BaseMembership.STATE_REJECTED)
@property
def waitlisted(self):
return self.memberships.filter(state=BaseMembership.STATE_WAITLISTED)
@property
def acceptances(self):
return self.memberships.filter(state__in=[
BaseMembership.STATE_ACCEPTED,
BaseMembership.STATE_AUTO_JOINED]
)
@property
def members(self):
return self.acceptances.filter(role=BaseMembership.ROLE_MEMBER)
@property
def managers(self):
return self.acceptances.filter(role=BaseMembership.ROLE_MANAGER)
@property
def owners(self):
return self.acceptances.filter(role=BaseMembership.ROLE_OWNER)
def is_owner_or_manager(self, user):
return self.acceptances.filter(
role__in=[
BaseMembership.ROLE_OWNER,
BaseMembership.ROLE_MANAGER
],
user=user
).exists()
def is_member(self, user):
return self.members.filter(user=user).exists()
def is_manager(self, user):
return self.managers.filter(user=user).exists()
def is_owner(self, user):
return self.owners.filter(user=user).exists()
def is_on_team(self, user):
return self.acceptances.filter(user=user).exists()
def add_member(self, user, role=None, state=None, by=None):
# we do this, rather than put the BaseMembership constants in declaration
# because BaseMembership is not yet defined
if role is None:
role = BaseMembership.ROLE_MEMBER
if state is None:
state = BaseMembership.STATE_AUTO_JOINED
membership, created = self.memberships.get_or_create(
team=self,
user=user,
defaults={"role": role, "state": state},
)
signals.added_member.send(sender=self, membership=membership, by=by)
return membership
def add_user(self, user, role, by=None):
state = BaseMembership.STATE_AUTO_JOINED
if self.manager_access == BaseTeam.MANAGER_ACCESS_INVITE:
state = BaseMembership.STATE_INVITED
membership, _ = self.memberships.get_or_create(
user=user,
defaults={"role": role, "state": state}
)
signals.added_member.send(sender=self, membership=membership, by=by)
return membership
def invite_user(self, from_user, to_email, role, message=None):
if not JoinInvitation.objects.filter(signup_code__email=to_email).exists():
invite = JoinInvitation.invite(from_user, to_email, message, send=False)
membership, _ = self.memberships.get_or_create(
invite=invite,
defaults={"role": role, "state": BaseMembership.STATE_INVITED}
)
invite.send_invite()
signals.invited_user.send(sender=self, membership=membership, by=from_user)
return membership
def for_user(self, user):
try:
return self.memberships.get(user=user)
except ObjectDoesNotExist:
pass
def state_for(self, user):
membership = self.for_user(user=user)
if membership:
return membership.state
def role_for(self, user):
if hookset.user_is_staff(user):
return Membership.ROLE_MANAGER
membership = self.for_user(user)
if membership:
return membership.role
class SimpleTeam(BaseTeam):
class Meta:
verbose_name = _("Simple Team")
verbose_name_plural = _("Simple Teams")
class Team(BaseTeam):
slug = models.SlugField(unique=True)
name = models.CharField(max_length=100, verbose_name=_("name"))
avatar = models.ImageField(upload_to=avatar_upload, blank=True, verbose_name=_("avatar"))
description = models.TextField(blank=True, verbose_name=_("description"))
creator = models.ForeignKey(settings.AUTH_USER_MODEL, related_name="teams_created", verbose_name=_("creator"), on_delete=models.CASCADE)
created = models.DateTimeField(default=timezone.now, editable=False, verbose_name=_("created"))
class Meta:
verbose_name = _("Team")
verbose_name_plural = _("Teams")
def get_absolute_url(self):
return reverse("pinax_teams:team_detail", args=[self.slug])
def __str__(self):
return self.name
def save(self, *args, **kwargs):
if not self.id:
self.slug = create_slug(self.name)
self.full_clean()
super().save(*args, **kwargs)
class BaseMembership(models.Model):
STATE_APPLIED = "applied"
STATE_INVITED = "invited"
STATE_DECLINED = "declined"
STATE_REJECTED = "rejected"
STATE_ACCEPTED = "accepted"
STATE_WAITLISTED = "waitlisted"
STATE_AUTO_JOINED = "auto-joined"
ROLE_MEMBER = "member"
ROLE_MANAGER = "manager"
ROLE_OWNER = "owner"
STATE_CHOICES = [
(STATE_APPLIED, _("applied")),
(STATE_INVITED, _("invited")),
(STATE_DECLINED, _("declined")),
(STATE_REJECTED, _("rejected")),
(STATE_ACCEPTED, _("accepted")),
(STATE_WAITLISTED, _("waitlisted")),
(STATE_AUTO_JOINED, _("auto joined"))
]
ROLE_CHOICES = [
(ROLE_MEMBER, _("member")),
(ROLE_MANAGER, _("manager")),
(ROLE_OWNER, _("owner"))
]
state = models.CharField(max_length=20, choices=STATE_CHOICES, verbose_name=_("state"))
role = models.CharField(max_length=20, choices=ROLE_CHOICES, default=ROLE_MEMBER, verbose_name=_("role"))
created = models.DateTimeField(default=timezone.now, verbose_name=_("created"))
class Meta:
abstract = True
def is_owner(self):
return self.role == BaseMembership.ROLE_OWNER
def is_manager(self):
return self.role == BaseMembership.ROLE_MANAGER
def is_member(self):
return self.role == BaseMembership.ROLE_MEMBER
def promote(self, by):
role = self.team.role_for(by)
if role in [BaseMembership.ROLE_MANAGER, BaseMembership.ROLE_OWNER]:
if self.role == Membership.ROLE_MEMBER:
self.role = Membership.ROLE_MANAGER
self.save()
signals.promoted_member.send(sender=self, membership=self, by=by)
return True
return False
def demote(self, by):
role = self.team.role_for(by)
if role in [Membership.ROLE_MANAGER, Membership.ROLE_OWNER]:
if self.role == Membership.ROLE_MANAGER:
self.role = Membership.ROLE_MEMBER
self.save()
signals.demoted_member.send(sender=self, membership=self, by=by)
return True
return False
def accept(self, by):
role = self.team.role_for(by)
if role in [Membership.ROLE_MANAGER, Membership.ROLE_OWNER]:
if self.state == Membership.STATE_APPLIED:
self.state = Membership.STATE_ACCEPTED
self.save()
signals.accepted_membership.send(sender=self, membership=self)
return True
return False
def reject(self, by):
role = self.team.role_for(by)
if role in [Membership.ROLE_MANAGER, Membership.ROLE_OWNER]:
if self.state == Membership.STATE_APPLIED:
self.state = Membership.STATE_REJECTED
self.save()
signals.rejected_membership.send(sender=self, membership=self)
return True
return False
def joined(self):
self.user = self.invite.to_user
if self.team.manager_access == Team.MANAGER_ACCESS_ADD:
self.state = Membership.STATE_AUTO_JOINED
else:
self.state = Membership.STATE_INVITED
self.save()
def status(self):
if self.user:
return self.get_state_display()
if self.invite:
return self.invite.get_status_display()
return "Unknown"
def resend_invite(self, by=None):
if self.invite is not None:
code = self.invite.signup_code
code.expiry = timezone.now() + datetime.timedelta(days=5)
code.save()
code.send()
signals.resent_invite.send(sender=self, membership=self, by=by)
def remove(self, by=None):
if self.invite is not None:
self.invite.signup_code.delete()
self.invite.delete()
self.delete()
signals.removed_membership.send(sender=Membership, team=self.team, user=self.user, invitee=self.invitee, by=by)
@property
def invitee(self):
return self.user or self.invite.to_user_email()
class SimpleMembership(BaseMembership):
team = models.ForeignKey(SimpleTeam, related_name="memberships", verbose_name=_("team"), on_delete=models.CASCADE)
user = models.ForeignKey(settings.AUTH_USER_MODEL, related_name="simple_memberships", null=True, blank=True, verbose_name=_("user"), on_delete=models.SET_NULL)
invite = models.ForeignKey(JoinInvitation, related_name="simple_memberships", null=True, blank=True, verbose_name=_("invite"), on_delete=models.SET_NULL)
def __str__(self):
return f"{self.user} in {self.team}"
class Meta:
unique_together = [("team", "user", "invite")]
verbose_name = _("Simple Membership")
verbose_name_plural = _("Simple Memberships")
class Membership(BaseMembership):
team = models.ForeignKey(Team, related_name="memberships", verbose_name=_("team"), on_delete=models.CASCADE)
user = models.ForeignKey(settings.AUTH_USER_MODEL, related_name="memberships", null=True, blank=True, verbose_name=_("user"), on_delete=models.SET_NULL)
invite = models.ForeignKey(JoinInvitation, related_name="memberships", null=True, blank=True, verbose_name=_("invite"), on_delete=models.SET_NULL)
def __str__(self):
return f"{self.user} in {self.team}"
class Meta:
unique_together = [("team", "user", "invite")]
verbose_name = _("Membership")
verbose_name_plural = _("Memberships")
reversion.register(SimpleMembership)
reversion.register(Membership)
|
pinax/pinax-teams
|
pinax/teams/models.py
|
Python
|
mit
| 12,754
|
#!/usr/bin/env python
import socket
import random
SERVERHOST = 'localhost'
SERVERPORT = 4080
LOCALIP = '127.0.0.2'
LOCALPORT = 4082
LOCALNAME = "30_PERCENT_SEE"
def higher(dice_a, dice_b):
ad1, ad2 = dice_a[0], dice_a[1]
bd1, bd2 = dice_b[0], dice_b[1]
if ad1 == bd1 and ad2 == bd2: return False
if ad1 == "2" and ad2 == "1": return True
if bd1 == "2" and bd2 == "1": return False
if ad1 == ad2 and bd1 == bd2: return int(ad1) > int(bd1)
if ad1 == ad2: return True
if bd1 == bd2: return False
if ad1 == bd1: return int(ad2) > int(bd2)
return int(ad1) > int(bd1)
def one_higher(dice):
d1, d2 = dice[0],dice[1]
if d1 == "6" and d2 == "6":
return "2,1"
if d1 == d2:
return str(int(d1)+1)+","+str(int(d1)+1)
if d1 == "6" and d2 == "5":
return "1,1"
if int(d1) == int(d2)+1:
return str(int(d1)+1)+",1"
return d1+","+str(int(d2)+1)
def connect_to_miaserver(sock):
sock.settimeout(2)
while True:
sock.sendto("REGISTER;" + LOCALNAME, (SERVERHOST, SERVERPORT))
try:
data = sock.recv(1024)
if "REGISTERED" in data:
break
else:
print "Received '" + data + "'"
except socket.timeout:
print "MIA Server does not respond, retrying"
print "Registered at MIA Server"
sock.setblocking(1)
def play_mia(sock):
announced = None
while True:
data = sock.recv(1024)
if data.startswith("ROUND STARTING;"):
_, _, token = data.strip().partition(";")
sock.sendto("JOIN;" + token, (SERVERHOST, SERVERPORT))
announced = None
elif data.startswith("ANNOUNCED;"):
d1, _, d2 = data.strip().split(";")[2].partition(",")
announced = (d1, d2)
elif data.startswith("YOUR TURN;"):
_, _, token = data.strip().partition(";")
if announced == None or random.uniform(0,100) > 30.0:
sock.sendto("ROLL;" + token, (SERVERHOST, SERVERPORT))
else:
sock.sendto("SEE;" + token, (SERVERHOST, SERVERPORT))
elif data.startswith("ROLLED;"):
token = data.split(";")[2]
d1, _, d2 = data.strip().split(";")[1].partition(",")
if announced == None or higher((d1,d2), announced):
sock.sendto("ANNOUNCE;"+d1+","+d2+";"+token, (SERVERHOST, SERVERPORT))
else:
sock.sendto("ANNOUNCE;"+one_higher(announced)+";"+token, (SERVERHOST, SERVERPORT))
def mia_client_start():
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind((LOCALIP, LOCALPORT))
connect_to_miaserver(sock)
play_mia(sock)
if __name__ == "__main__":
mia_client_start()
|
SteffenBauer/mia_elixir
|
python/mia_client1.py
|
Python
|
mit
| 2,536
|
#!/usr/bin/python
import os
os.system('git add -A && git commit -m "Working on changes in my website (Amaia)" && git push origin master');
|
amaiagiralt/amaiagiralt
|
upload.py
|
Python
|
mit
| 140
|
"""Tests for hermite_e module.
"""
from __future__ import division, absolute_import, print_function
import numpy as np
import numpy.polynomial.hermite_e as herme
from numpy.polynomial.polynomial import polyval
from numpy.testing import (
TestCase, assert_almost_equal, assert_raises,
assert_equal, assert_, run_module_suite)
He0 = np.array([1])
He1 = np.array([0, 1])
He2 = np.array([-1, 0, 1])
He3 = np.array([0, -3, 0, 1])
He4 = np.array([3, 0, -6, 0, 1])
He5 = np.array([0, 15, 0, -10, 0, 1])
He6 = np.array([-15, 0, 45, 0, -15, 0, 1])
He7 = np.array([0, -105, 0, 105, 0, -21, 0, 1])
He8 = np.array([105, 0, -420, 0, 210, 0, -28, 0, 1])
He9 = np.array([0, 945, 0, -1260, 0, 378, 0, -36, 0, 1])
Helist = [He0, He1, He2, He3, He4, He5, He6, He7, He8, He9]
def trim(x):
return herme.hermetrim(x, tol=1e-6)
class TestConstants(TestCase):
def test_hermedomain(self):
assert_equal(herme.hermedomain, [-1, 1])
def test_hermezero(self):
assert_equal(herme.hermezero, [0])
def test_hermeone(self):
assert_equal(herme.hermeone, [1])
def test_hermex(self):
assert_equal(herme.hermex, [0, 1])
class TestArithmetic(TestCase):
x = np.linspace(-3, 3, 100)
def test_hermeadd(self):
for i in range(5):
for j in range(5):
msg = "At i=%d, j=%d" % (i, j)
tgt = np.zeros(max(i, j) + 1)
tgt[i] += 1
tgt[j] += 1
res = herme.hermeadd([0] * i + [1], [0] * j + [1])
assert_equal(trim(res), trim(tgt), err_msg=msg)
def test_hermesub(self):
for i in range(5):
for j in range(5):
msg = "At i=%d, j=%d" % (i, j)
tgt = np.zeros(max(i, j) + 1)
tgt[i] += 1
tgt[j] -= 1
res = herme.hermesub([0] * i + [1], [0] * j + [1])
assert_equal(trim(res), trim(tgt), err_msg=msg)
def test_hermemulx(self):
assert_equal(herme.hermemulx([0]), [0])
assert_equal(herme.hermemulx([1]), [0, 1])
for i in range(1, 5):
ser = [0] * i + [1]
tgt = [0] * (i - 1) + [i, 0, 1]
assert_equal(herme.hermemulx(ser), tgt)
def test_hermemul(self):
# check values of result
for i in range(5):
pol1 = [0] * i + [1]
val1 = herme.hermeval(self.x, pol1)
for j in range(5):
msg = "At i=%d, j=%d" % (i, j)
pol2 = [0] * j + [1]
val2 = herme.hermeval(self.x, pol2)
pol3 = herme.hermemul(pol1, pol2)
val3 = herme.hermeval(self.x, pol3)
assert_(len(pol3) == i + j + 1, msg)
assert_almost_equal(val3, val1 * val2, err_msg=msg)
def test_hermediv(self):
for i in range(5):
for j in range(5):
msg = "At i=%d, j=%d" % (i, j)
ci = [0] * i + [1]
cj = [0] * j + [1]
tgt = herme.hermeadd(ci, cj)
quo, rem = herme.hermediv(tgt, ci)
res = herme.hermeadd(herme.hermemul(quo, ci), rem)
assert_equal(trim(res), trim(tgt), err_msg=msg)
class TestEvaluation(TestCase):
# coefficients of 1 + 2*x + 3*x**2
c1d = np.array([4., 2., 3.])
c2d = np.einsum('i,j->ij', c1d, c1d)
c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d)
# some random values in [-1, 1)
x = np.random.random((3, 5)) * 2 - 1
y = polyval(x, [1., 2., 3.])
def test_hermeval(self):
# check empty input
assert_equal(herme.hermeval([], [1]).size, 0)
# check normal input)
x = np.linspace(-1, 1)
y = [polyval(x, c) for c in Helist]
for i in range(10):
msg = "At i=%d" % i
tgt = y[i]
res = herme.hermeval(x, [0] * i + [1])
assert_almost_equal(res, tgt, err_msg=msg)
# check that shape is preserved
for i in range(3):
dims = [2] * i
x = np.zeros(dims)
assert_equal(herme.hermeval(x, [1]).shape, dims)
assert_equal(herme.hermeval(x, [1, 0]).shape, dims)
assert_equal(herme.hermeval(x, [1, 0, 0]).shape, dims)
def test_hermeval2d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
# test exceptions
assert_raises(ValueError, herme.hermeval2d, x1, x2[:2], self.c2d)
# test values
tgt = y1 * y2
res = herme.hermeval2d(x1, x2, self.c2d)
assert_almost_equal(res, tgt)
# test shape
z = np.ones((2, 3))
res = herme.hermeval2d(z, z, self.c2d)
assert_(res.shape == (2, 3))
def test_hermeval3d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
# test exceptions
assert_raises(ValueError, herme.hermeval3d, x1, x2, x3[:2], self.c3d)
# test values
tgt = y1 * y2 * y3
res = herme.hermeval3d(x1, x2, x3, self.c3d)
assert_almost_equal(res, tgt)
# test shape
z = np.ones((2, 3))
res = herme.hermeval3d(z, z, z, self.c3d)
assert_(res.shape == (2, 3))
def test_hermegrid2d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
# test values
tgt = np.einsum('i,j->ij', y1, y2)
res = herme.hermegrid2d(x1, x2, self.c2d)
assert_almost_equal(res, tgt)
# test shape
z = np.ones((2, 3))
res = herme.hermegrid2d(z, z, self.c2d)
assert_(res.shape == (2, 3) * 2)
def test_hermegrid3d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
# test values
tgt = np.einsum('i,j,k->ijk', y1, y2, y3)
res = herme.hermegrid3d(x1, x2, x3, self.c3d)
assert_almost_equal(res, tgt)
# test shape
z = np.ones((2, 3))
res = herme.hermegrid3d(z, z, z, self.c3d)
assert_(res.shape == (2, 3) * 3)
class TestIntegral(TestCase):
def test_hermeint(self):
# check exceptions
assert_raises(ValueError, herme.hermeint, [0], .5)
assert_raises(ValueError, herme.hermeint, [0], -1)
assert_raises(ValueError, herme.hermeint, [0], 1, [0, 0])
# test integration of zero polynomial
for i in range(2, 5):
k = [0] * (i - 2) + [1]
res = herme.hermeint([0], m=i, k=k)
assert_almost_equal(res, [0, 1])
# check single integration with integration constant
for i in range(5):
scl = i + 1
pol = [0] * i + [1]
tgt = [i] + [0] * i + [1 / scl]
hermepol = herme.poly2herme(pol)
hermeint = herme.hermeint(hermepol, m=1, k=[i])
res = herme.herme2poly(hermeint)
assert_almost_equal(trim(res), trim(tgt))
# check single integration with integration constant and lbnd
for i in range(5):
scl = i + 1
pol = [0] * i + [1]
hermepol = herme.poly2herme(pol)
hermeint = herme.hermeint(hermepol, m=1, k=[i], lbnd=-1)
assert_almost_equal(herme.hermeval(-1, hermeint), i)
# check single integration with integration constant and scaling
for i in range(5):
scl = i + 1
pol = [0] * i + [1]
tgt = [i] + [0] * i + [2 / scl]
hermepol = herme.poly2herme(pol)
hermeint = herme.hermeint(hermepol, m=1, k=[i], scl=2)
res = herme.herme2poly(hermeint)
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with default k
for i in range(5):
for j in range(2, 5):
pol = [0] * i + [1]
tgt = pol[:]
for k in range(j):
tgt = herme.hermeint(tgt, m=1)
res = herme.hermeint(pol, m=j)
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with defined k
for i in range(5):
for j in range(2, 5):
pol = [0] * i + [1]
tgt = pol[:]
for k in range(j):
tgt = herme.hermeint(tgt, m=1, k=[k])
res = herme.hermeint(pol, m=j, k=list(range(j)))
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with lbnd
for i in range(5):
for j in range(2, 5):
pol = [0] * i + [1]
tgt = pol[:]
for k in range(j):
tgt = herme.hermeint(tgt, m=1, k=[k], lbnd=-1)
res = herme.hermeint(pol, m=j, k=list(range(j)), lbnd=-1)
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with scaling
for i in range(5):
for j in range(2, 5):
pol = [0] * i + [1]
tgt = pol[:]
for k in range(j):
tgt = herme.hermeint(tgt, m=1, k=[k], scl=2)
res = herme.hermeint(pol, m=j, k=list(range(j)), scl=2)
assert_almost_equal(trim(res), trim(tgt))
def test_hermeint_axis(self):
# check that axis keyword works
c2d = np.random.random((3, 4))
tgt = np.vstack([herme.hermeint(c) for c in c2d.T]).T
res = herme.hermeint(c2d, axis=0)
assert_almost_equal(res, tgt)
tgt = np.vstack([herme.hermeint(c) for c in c2d])
res = herme.hermeint(c2d, axis=1)
assert_almost_equal(res, tgt)
tgt = np.vstack([herme.hermeint(c, k=3) for c in c2d])
res = herme.hermeint(c2d, k=3, axis=1)
assert_almost_equal(res, tgt)
class TestDerivative(TestCase):
def test_hermeder(self):
# check exceptions
assert_raises(ValueError, herme.hermeder, [0], .5)
assert_raises(ValueError, herme.hermeder, [0], -1)
# check that zeroth derivative does nothing
for i in range(5):
tgt = [0] * i + [1]
res = herme.hermeder(tgt, m=0)
assert_equal(trim(res), trim(tgt))
# check that derivation is the inverse of integration
for i in range(5):
for j in range(2, 5):
tgt = [0] * i + [1]
res = herme.hermeder(herme.hermeint(tgt, m=j), m=j)
assert_almost_equal(trim(res), trim(tgt))
# check derivation with scaling
for i in range(5):
for j in range(2, 5):
tgt = [0] * i + [1]
res = herme.hermeder(
herme.hermeint(tgt, m=j, scl=2), m=j, scl=.5)
assert_almost_equal(trim(res), trim(tgt))
def test_hermeder_axis(self):
# check that axis keyword works
c2d = np.random.random((3, 4))
tgt = np.vstack([herme.hermeder(c) for c in c2d.T]).T
res = herme.hermeder(c2d, axis=0)
assert_almost_equal(res, tgt)
tgt = np.vstack([herme.hermeder(c) for c in c2d])
res = herme.hermeder(c2d, axis=1)
assert_almost_equal(res, tgt)
class TestVander(TestCase):
# some random values in [-1, 1)
x = np.random.random((3, 5)) * 2 - 1
def test_hermevander(self):
# check for 1d x
x = np.arange(3)
v = herme.hermevander(x, 3)
assert_(v.shape == (3, 4))
for i in range(4):
coef = [0] * i + [1]
assert_almost_equal(v[..., i], herme.hermeval(x, coef))
# check for 2d x
x = np.array([[1, 2], [3, 4], [5, 6]])
v = herme.hermevander(x, 3)
assert_(v.shape == (3, 2, 4))
for i in range(4):
coef = [0] * i + [1]
assert_almost_equal(v[..., i], herme.hermeval(x, coef))
def test_hermevander2d(self):
# also tests hermeval2d for non-square coefficient array
x1, x2, x3 = self.x
c = np.random.random((2, 3))
van = herme.hermevander2d(x1, x2, [1, 2])
tgt = herme.hermeval2d(x1, x2, c)
res = np.dot(van, c.flat)
assert_almost_equal(res, tgt)
# check shape
van = herme.hermevander2d([x1], [x2], [1, 2])
assert_(van.shape == (1, 5, 6))
def test_hermevander3d(self):
# also tests hermeval3d for non-square coefficient array
x1, x2, x3 = self.x
c = np.random.random((2, 3, 4))
van = herme.hermevander3d(x1, x2, x3, [1, 2, 3])
tgt = herme.hermeval3d(x1, x2, x3, c)
res = np.dot(van, c.flat)
assert_almost_equal(res, tgt)
# check shape
van = herme.hermevander3d([x1], [x2], [x3], [1, 2, 3])
assert_(van.shape == (1, 5, 24))
class TestFitting(TestCase):
def test_hermefit(self):
def f(x):
return x * (x - 1) * (x - 2)
def f2(x):
return x ** 4 + x ** 2 + 1
# Test exceptions
assert_raises(ValueError, herme.hermefit, [1], [1], -1)
assert_raises(TypeError, herme.hermefit, [[1]], [1], 0)
assert_raises(TypeError, herme.hermefit, [], [1], 0)
assert_raises(TypeError, herme.hermefit, [1], [[[1]]], 0)
assert_raises(TypeError, herme.hermefit, [1, 2], [1], 0)
assert_raises(TypeError, herme.hermefit, [1], [1, 2], 0)
assert_raises(TypeError, herme.hermefit, [1], [1], 0, w=[[1]])
assert_raises(TypeError, herme.hermefit, [1], [1], 0, w=[1, 1])
assert_raises(ValueError, herme.hermefit, [1], [1], [-1, ])
assert_raises(ValueError, herme.hermefit, [1], [1], [2, -1, 6])
assert_raises(TypeError, herme.hermefit, [1], [1], [])
# Test fit
x = np.linspace(0, 2)
y = f(x)
#
coef3 = herme.hermefit(x, y, 3)
assert_equal(len(coef3), 4)
assert_almost_equal(herme.hermeval(x, coef3), y)
coef3 = herme.hermefit(x, y, [0, 1, 2, 3])
assert_equal(len(coef3), 4)
assert_almost_equal(herme.hermeval(x, coef3), y)
#
coef4 = herme.hermefit(x, y, 4)
assert_equal(len(coef4), 5)
assert_almost_equal(herme.hermeval(x, coef4), y)
coef4 = herme.hermefit(x, y, [0, 1, 2, 3, 4])
assert_equal(len(coef4), 5)
assert_almost_equal(herme.hermeval(x, coef4), y)
# check things still work if deg is not in strict increasing
coef4 = herme.hermefit(x, y, [2, 3, 4, 1, 0])
assert_equal(len(coef4), 5)
assert_almost_equal(herme.hermeval(x, coef4), y)
#
coef2d = herme.hermefit(x, np.array([y, y]).T, 3)
assert_almost_equal(coef2d, np.array([coef3, coef3]).T)
coef2d = herme.hermefit(x, np.array([y, y]).T, [0, 1, 2, 3])
assert_almost_equal(coef2d, np.array([coef3, coef3]).T)
# test weighting
w = np.zeros_like(x)
yw = y.copy()
w[1::2] = 1
y[0::2] = 0
wcoef3 = herme.hermefit(x, yw, 3, w=w)
assert_almost_equal(wcoef3, coef3)
wcoef3 = herme.hermefit(x, yw, [0, 1, 2, 3], w=w)
assert_almost_equal(wcoef3, coef3)
#
wcoef2d = herme.hermefit(x, np.array([yw, yw]).T, 3, w=w)
assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T)
wcoef2d = herme.hermefit(x, np.array([yw, yw]).T, [0, 1, 2, 3], w=w)
assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T)
# test scaling with complex values x points whose square
# is zero when summed.
x = [1, 1j, -1, -1j]
assert_almost_equal(herme.hermefit(x, x, 1), [0, 1])
assert_almost_equal(herme.hermefit(x, x, [0, 1]), [0, 1])
# test fitting only even Legendre polynomials
x = np.linspace(-1, 1)
y = f2(x)
coef1 = herme.hermefit(x, y, 4)
assert_almost_equal(herme.hermeval(x, coef1), y)
coef2 = herme.hermefit(x, y, [0, 2, 4])
assert_almost_equal(herme.hermeval(x, coef2), y)
assert_almost_equal(coef1, coef2)
class TestCompanion(TestCase):
def test_raises(self):
assert_raises(ValueError, herme.hermecompanion, [])
assert_raises(ValueError, herme.hermecompanion, [1])
def test_dimensions(self):
for i in range(1, 5):
coef = [0] * i + [1]
assert_(herme.hermecompanion(coef).shape == (i, i))
def test_linear_root(self):
assert_(herme.hermecompanion([1, 2])[0, 0] == -.5)
class TestGauss(TestCase):
def test_100(self):
x, w = herme.hermegauss(100)
# test orthogonality. Note that the results need to be normalized,
# otherwise the huge values that can arise from fast growing
# functions like Laguerre can be very confusing.
v = herme.hermevander(x, 99)
vv = np.dot(v.T * w, v)
vd = 1 / np.sqrt(vv.diagonal())
vv = vd[:, None] * vv * vd
assert_almost_equal(vv, np.eye(100))
# check that the integral of 1 is correct
tgt = np.sqrt(2 * np.pi)
assert_almost_equal(w.sum(), tgt)
class TestMisc(TestCase):
def test_hermefromroots(self):
res = herme.hermefromroots([])
assert_almost_equal(trim(res), [1])
for i in range(1, 5):
roots = np.cos(np.linspace(-np.pi, 0, 2 * i + 1)[1::2])
pol = herme.hermefromroots(roots)
res = herme.hermeval(roots, pol)
tgt = 0
assert_(len(pol) == i + 1)
assert_almost_equal(herme.herme2poly(pol)[-1], 1)
assert_almost_equal(res, tgt)
def test_hermeroots(self):
assert_almost_equal(herme.hermeroots([1]), [])
assert_almost_equal(herme.hermeroots([1, 1]), [-1])
for i in range(2, 5):
tgt = np.linspace(-1, 1, i)
res = herme.hermeroots(herme.hermefromroots(tgt))
assert_almost_equal(trim(res), trim(tgt))
def test_hermetrim(self):
coef = [2, -1, 1, 0]
# Test exceptions
assert_raises(ValueError, herme.hermetrim, coef, -1)
# Test results
assert_equal(herme.hermetrim(coef), coef[:-1])
assert_equal(herme.hermetrim(coef, 1), coef[:-3])
assert_equal(herme.hermetrim(coef, 2), [0])
def test_hermeline(self):
assert_equal(herme.hermeline(3, 4), [3, 4])
def test_herme2poly(self):
for i in range(10):
assert_almost_equal(herme.herme2poly([0] * i + [1]), Helist[i])
def test_poly2herme(self):
for i in range(10):
assert_almost_equal(herme.poly2herme(Helist[i]), [0] * i + [1])
def test_weight(self):
x = np.linspace(-5, 5, 11)
tgt = np.exp(-.5 * x ** 2)
res = herme.hermeweight(x)
assert_almost_equal(res, tgt)
if __name__ == "__main__":
run_module_suite()
|
DailyActie/Surrogate-Model
|
01-codes/numpy-master/numpy/polynomial/tests/test_hermite_e.py
|
Python
|
mit
| 18,726
|
import floq.core.fixed_system as fs
import floq.evolution as ev
import floq.errors as er
import floq.helpers.index as h
class ParametricSystemBase(object):
"""
Base class to specify a physical system that still has open parameters,
such as the control amplitudes, the control duration, or other arbitrary
parameters in the Hamiltonian.
This needs to be sub-classed, and a subclass should provide:
- get_system(controls)
"""
def get_system(self, controls, t):
raise NotImplementedError("get_system not implemented.")
def is_nz_ok(self, controls, t):
system = self.get_system(controls, t)
try:
u = ev.evolve_system(system)
except er.EigenvalueNumberError:
return False
return h.is_unitary(u)
def set_nz(self, controls, t):
if self.is_nz_ok(controls, t):
self.decrease_nz_until_not_ok(controls, t, step=max(10, self.nz/5))
self.decrease_nz_until_not_ok(controls, t, step=max(10, self.nz/10))
self.decrease_nz_until_not_ok(controls, t, step=2)
self.increase_nz_until_ok(controls, t, step=2)
else:
self.increase_nz_until_ok(controls, t, step=max(10, self.nz/5))
self.decrease_nz_until_not_ok(controls, t, step=2)
self.increase_nz_until_ok(controls, t, step=2)
def increase_nz_until_ok(self, controls, t, step=2):
while self.is_nz_ok(controls, t) is False:
self.nz += h.make_even(step)
def decrease_nz_until_not_ok(self, controls, t, step=2):
while self.is_nz_ok(controls, t) and self.nz-step > 3:
self.nz -= h.make_even(step)
class ParametricSystemWithFunctions(ParametricSystemBase):
"""
A system with parametric hf and dhf, which are passed as callables to the constructor.
hf has to have the form hf(a,parameters)
"""
def __init__(self, hf, dhf, nz, omega, parameters):
"""
hf: callable hf(controls,parameters,omega)
dhf: callable dhf(controls,parameters,omega)
omega: 2 pi/T, the period of the Hamiltonian
nz: number of Fourier modes to be considered during evolution
parameters: a data structure that holds parameters for hf and dhf
(dictionary is probably the best idea)
"""
self.hf = hf
self.dhf = dhf
self.omega = omega
self.nz = nz
self.parameters = parameters
def calculate_hf(self, controls):
return self.hf(controls, self.parameters, self.omega)
def calculate_dhf(self, controls):
return self.dhf(controls, self.parameters, self.omega)
def get_system(self, controls, t):
hf = self.calculate_hf(controls)
dhf = self.calculate_dhf(controls)
return fs.FixedSystem(hf, dhf, self.nz, self.omega, t)
|
sirmarcel/floq
|
benchmark/museum_of_evolution/uncompiled_floq/parametric_system.py
|
Python
|
mit
| 2,852
|
#!python
# coding: utf-8
# edit by gistnu
# reference from lejedi76
# https://gis.stackexchange.com/questions/173127/generating-equal-sized-polygons-along-line-with-pyqgis
from qgis.core import QgsMapLayerRegistry, QgsGeometry, QgsField, QgsFeature, QgsPoint
from PyQt4.QtCore import QVariant
def getAllbbox(layer, width, height, srid, overlap):
for feature in layer.selectedFeatures():
geom = feature.geometry()
if geom.type() <> QGis.Line:
print "Geometry type should be a LineString"
return 2
bbox = QgsVectorLayer("Polygon?crs=epsg:"+str(srid),
layer.name()+'_id_'+str(feature.id()),
"memory")
gid = QgsField("gid", QVariant.Int, "int")
angle = QgsField("angle", QVariant.Double, "double")
attributes = [gid, angle]
bbox.startEditing()
bboxProvider = bbox.dataProvider()
bboxProvider.addAttributes(attributes)
curs = 0
numbbox = geom.length()/(width)
step = 1.0/numbbox
stepnudge = (1.0-overlap) * step
pageFeatures = []
r = 1
currangle = 0
while curs <= 1:
# print 'r =' + str(r)
# print 'curs = ' + str(curs)
startpoint = geom.interpolate(curs*geom.length())
endpoint = geom.interpolate((curs+step)*geom.length())
x_start = startpoint.asPoint().x()
y_start = startpoint.asPoint().y()
x_end = endpoint.asPoint().x()
y_end = endpoint.asPoint().y()
print 'x_start :' + str(x_start)
print 'y_start :' + str(y_start)
currline = QgsGeometry().fromWkt('LINESTRING({} {}, {} {})'.format(x_start, y_start, x_end, y_end))
currpoly = QgsGeometry().fromWkt(
'POLYGON((0 0, 0 {height},{width} {height}, {width} 0, 0 0))'.format(height=height, width=width))
currpoly.translate(0,-height/2)
azimuth = startpoint.asPoint().azimuth(endpoint.asPoint())
currangle = (startpoint.asPoint().azimuth(endpoint.asPoint())+270)%360
currpoly.rotate(currangle, QgsPoint(0,0))
currpoly.translate(x_start, y_start)
currpoly.asPolygon()
page = currpoly
curs = curs + stepnudge
feat = QgsFeature()
feat.setAttributes([r, currangle])
feat.setGeometry(page)
pageFeatures.append(feat)
r = r + 1
bboxProvider.addFeatures(pageFeatures)
bbox.commitChanges()
QgsMapLayerRegistry.instance().addMapLayer(bbox)
return 0
layer = iface.activeLayer()
getAllbbox(layer, 100, 200, 32647, 0.2) #layer, width, height, crs, overlap
|
chingchai/workshop
|
qgis-scripts/generate_stripmap_index.py
|
Python
|
mit
| 2,746
|
from django.conf import settings
from django.conf.urls import url
from plans.views import CreateOrderView, OrderListView, InvoiceDetailView, AccountActivationView, \
OrderPaymentReturnView, CurrentPlanView, UpgradePlanView, OrderView, BillingInfoRedirectView, \
BillingInfoCreateView, BillingInfoUpdateView, BillingInfoDeleteView, CreateOrderPlanChangeView, ChangePlanView, \
PricingView, FakePaymentsView
urlpatterns = [
url(r'^pricing/$', PricingView.as_view(), name='pricing'),
url(r'^account/$', CurrentPlanView.as_view(), name='current_plan'),
url(r'^account/activation/$', AccountActivationView.as_view(), name='account_activation'),
url(r'^upgrade/$', UpgradePlanView.as_view(), name='upgrade_plan'),
url(r'^order/extend/new/(?P<pk>\d+)/$', CreateOrderView.as_view(), name='create_order_plan'),
url(r'^order/upgrade/new/(?P<pk>\d+)/$', CreateOrderPlanChangeView.as_view(), name='create_order_plan_change'),
url(r'^change/(?P<pk>\d+)/$', ChangePlanView.as_view(), name='change_plan'),
url(r'^order/$', OrderListView.as_view(), name='order_list'),
url(r'^order/(?P<pk>\d+)/$', OrderView.as_view(), name='order'),
url(r'^order/(?P<pk>\d+)/payment/success/$', OrderPaymentReturnView.as_view(status='success'),
name='order_payment_success'),
url(r'^order/(?P<pk>\d+)/payment/failure/$', OrderPaymentReturnView.as_view(status='failure'),
name='order_payment_failure'),
url(r'^billing/$', BillingInfoRedirectView.as_view(), name='billing_info'),
url(r'^billing/create/$', BillingInfoCreateView.as_view(), name='billing_info_create'),
url(r'^billing/update/$', BillingInfoUpdateView.as_view(), name='billing_info_update'),
url(r'^billing/delete/$', BillingInfoDeleteView.as_view(), name='billing_info_delete'),
url(r'^invoice/(?P<pk>\d+)/preview/html/$', InvoiceDetailView.as_view(), name='invoice_preview_html'),
]
if getattr(settings, 'DEBUG', False):
urlpatterns += [
url(r'^fakepayments/(?P<pk>\d+)/$', FakePaymentsView.as_view(), name='fake_payments'),
]
|
cypreess/django-plans
|
plans/urls.py
|
Python
|
mit
| 2,070
|
from handlers import Handler
from models import User
# Login handler
class Login(Handler):
def get(self):
self.render('login-form.html')
def post(self):
username = self.request.get('username')
password = self.request.get('password')
u = User.login(username, password)
if u:
self.login(u)
self.redirect('/blog')
else:
msg = 'Invalid login'
self.render('login-form.html', error=msg)
|
YuhanLin1105/Multi-User-Blog
|
handlers/login.py
|
Python
|
mit
| 489
|
import unittest
import json
import sys
try:
from unittest.mock import Mock
except ImportError:
from mock import Mock
import requests
from tabpy_client.rest import *
class TestRequestsNetworkWrapper(unittest.TestCase):
def test_init(self):
rnw = RequestsNetworkWrapper()
def test_init_with_session(self):
session = {}
rnw = RequestsNetworkWrapper(session=session)
self.assertIs(session, rnw.session)
def setUp(self):
def mock_response(status_code):
response = Mock(requests.Response())
response.json.return_value = 'json'
response.status_code = status_code
return response
session = Mock(requests.session())
session.get.return_value = mock_response(200)
session.post.return_value = mock_response(200)
session.put.return_value = mock_response(200)
session.delete.return_value = mock_response(204)
self.rnw = RequestsNetworkWrapper(session=session)
def test_GET(self):
url = 'abc'
data = {'foo':'bar'}
self.assertEqual(self.rnw.GET(url, data), 'json')
self.rnw.session.get.assert_called_once_with(url,
params=data,
timeout=None)
def test_GET_InvalidData(self):
url = 'abc'
data = {'cat'}
try:
self.assertEqual(self.rnw.GET(url, data), 'json')
except:
e=sys.exc_info()[0]
self.assertEquals(e,TypeError)
def test_GET_InvalidURL(self):
url = ''
data = {'foo':'bar'}
try:
self.assertEqual(self.rnw.GET(url, data), 'json')
except:
e=sys.exc_info()[0]
self.assertEquals(e,TypeError)
def test_POST(self):
url = 'abc'
data = {'foo':'bar'}
self.assertEqual(self.rnw.POST(url, data), 'json')
self.rnw.session.post.assert_called_once_with(url,
data=json.dumps(data),
headers={'content-type':'application/json'},
timeout=None)
def test_POST_InvalidURL(self):
url = ''
data = {'foo':'bar'}
try:
self.assertEqual(self.rnw.POST(url, data), 'json')
except:
e=sys.exc_info()[0]
self.assertEquals(e,TypeError)
def test_POST_InvalidData(self):
url = 'url'
data = {'cat'}
try:
self.assertEqual(self.rnw.POST(url, data), 'json')
except:
e=sys.exc_info()[0]
self.assertEquals(e,TypeError)
def test_PUT(self):
url = 'abc'
data = {'foo':'bar'}
self.assertEqual(self.rnw.PUT(url, data), 'json')
self.rnw.session.put.assert_called_once_with(url,
data=json.dumps(data),
headers={'content-type':'application/json'},
timeout=None)
def test_PUT_InvalidData(self):
url = 'url'
data = {'cat'}
try:
self.assertEqual(self.rnw.PUT(url, data), 'json')
except:
e=sys.exc_info()[0]
self.assertEquals(e,TypeError)
def test_PUT_InvalidURL(self):
url = ''
data = {'foo:bar'}
try:
self.assertEqual(self.rnw.PUT(url, data), 'json')
except:
e=sys.exc_info()[0]
self.assertEquals(e,TypeError)
def test_DELETE(self):
url = 'abc'
data = {'foo':'bar'}
self.assertIs(self.rnw.DELETE(url, data), None)
self.rnw.session.delete.assert_called_once_with(url,
data=json.dumps(data),
timeout=None)
def test_DELETE_InvalidData(self):
url = 'abc'
data = {'cat'}
try:
self.assertEqual(self.rnw.DELETE(url, data), 'json')
except:
e=sys.exc_info()[0]
self.assertEquals(e,TypeError)
def test_DELETE_InvalidURL(self):
url = ''
data = {'foo:bar'}
try:
self.assertEqual(self.rnw.DELETE(url, data), 'json')
except:
e=sys.exc_info()[0]
self.assertEquals(e,TypeError)
class TestServiceClient(unittest.TestCase):
def setUp(self):
nw = Mock(RequestsNetworkWrapper())
nw.GET.return_value = 'GET'
nw.POST.return_value = 'POST'
nw.PUT.return_value = 'PUT'
nw.DELETE.return_value = 'DELETE'
self.sc = ServiceClient('endpoint', network_wrapper=nw)
def test_GET(self):
self.assertEquals(self.sc.GET('test'), 'GET')
self.sc.network_wrapper.GET.assert_called_once_with('endpoint/test',
None, None)
def test_POST(self):
self.assertEquals(self.sc.POST('test'), 'POST')
self.sc.network_wrapper.POST.assert_called_once_with('endpoint/test',
None, None)
def test_PUT(self):
self.assertEquals(self.sc.PUT('test'), 'PUT')
self.sc.network_wrapper.PUT.assert_called_once_with('endpoint/test',
None, None)
def test_DELETE(self):
self.assertEquals(self.sc.DELETE('test'), None)
self.sc.network_wrapper.DELETE.assert_called_once_with('endpoint/test',
None, None)
|
slewt/TabPy
|
tabpy-server/tests/test_rest.py
|
Python
|
mit
| 5,180
|
# This module imports names for backwards compatibility and to ensure
# that pickled objects in existing sessions can be unpickled.
__all__ = ['DAObject', 'DAList', 'DADict', 'DAOrderedDict', 'DASet', 'DAFile', 'DAFileCollection', 'DAFileList', 'DAStaticFile', 'DAEmail', 'DAEmailRecipient', 'DAEmailRecipientList', 'DATemplate', 'DAEmpty', 'DALink', 'RelationshipTree', 'DAContext']
from docassemble.base.util import DAObject, DAList, DADict, DAOrderedDict, DASet, DAFile, DAFileCollection, DAFileList, DAStaticFile, DAEmail, DAEmailRecipient, DAEmailRecipientList, DATemplate, DAEmpty, DALink, RelationshipTree, DAContext, DAObjectPlusParameters, DACatchAll, RelationshipDir, RelationshipPeer, DALazyTemplate, DALazyTableTemplate, selections, DASessionLocal, DADeviceLocal, DAUserLocal
|
jhpyle/docassemble
|
docassemble_base/docassemble/base/core.py
|
Python
|
mit
| 790
|
"""
Basic utility functions
"""
import redislite
from .server import RDB_FILE
def header(message, width=80):
header_message = '## ' + message + ' '
end_chars = width - (len(message) + 4)
header_message += '#'*end_chars
print(header_message)
def connect_to_redis():
return redislite.Redis(dbfilename=RDB_FILE)
host = read_rc_config()["settings"].get('redis_server', '127.0.0.1')
port = read_rc_config()["settings"].get('redis_port', '18266')
port = int(port)
return redis.Redis(host=host, port=port)
|
dwighthubbard/micropython-cloudmanager
|
cloudmanager/utility.py
|
Python
|
mit
| 538
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "hector_imu_tools"
PROJECT_SPACE_DIR = "/home/trevor/ROS/catkin_ws/devel"
PROJECT_VERSION = "0.3.3"
|
siketh/ASR
|
catkin_ws/build/hector_slam/hector_imu_tools/catkin_generated/pkg.develspace.context.pc.py
|
Python
|
mit
| 382
|
#!/usr/bin/python
import unittest
import os
import random
import numpy as np
from pymatgen.core.structure import Structure
from pymatgen.core.lattice import Lattice
from pymatgen.core.surface import Slab, SlabGenerator, generate_all_slabs, \
get_symmetrically_distinct_miller_indices
from pymatgen.symmetry.groups import SpaceGroup
from pymatgen.util.testing import PymatgenTest
def get_path(path_str):
cwd = os.path.abspath(os.path.dirname(__file__))
path = os.path.join(cwd, "..", "..", "..", "test_files", "surface_tests",
path_str)
return path
class SlabTest(PymatgenTest):
def setUp(self):
zno1 = Structure.from_file(get_path("ZnO-wz.cif"), primitive=False)
zno55 = SlabGenerator(zno1, [1, 0, 0], 5, 5, lll_reduce=False,
center_slab=False).get_slab()
self.zno1 = zno1
self.zno55 = zno55
self.h = Structure(Lattice.cubic(3), ["H"],
[[0, 0, 0]])
self.libcc = Structure(Lattice.cubic(3.51004), ["Li", "Li"],
[[0, 0, 0], [0.5, 0.5, 0.5]])
def test_init(self):
zno_slab = Slab(self.zno55.lattice, self.zno55.species,
self.zno55.frac_coords,
self.zno55.miller_index,
self.zno55.oriented_unit_cell,
0, self.zno55.scale_factor)
m =self.zno55.lattice.matrix
area = np.linalg.norm(np.cross(m[0], m[1]))
self.assertAlmostEqual(zno_slab.surface_area, area)
self.assertEqual(zno_slab.lattice.lengths_and_angles,
self.zno55.lattice.lengths_and_angles)
self.assertEqual(zno_slab.oriented_unit_cell.composition,
self.zno1.composition)
self.assertEqual(len(zno_slab), 8)
def test_add_adsorbate_atom(self):
zno_slab = Slab(self.zno55.lattice, self.zno55.species,
self.zno55.frac_coords,
self.zno55.miller_index,
self.zno55.oriented_unit_cell,
0, self.zno55.scale_factor)
zno_slab.add_adsorbate_atom([1], 'H', 1)
self.assertEqual(len(zno_slab), 9)
self.assertEqual(str(zno_slab[8].specie), 'H')
self.assertAlmostEqual(zno_slab.get_distance(1, 8), 1.0)
self.assertTrue(zno_slab[8].c > zno_slab[0].c)
m = self.zno55.lattice.matrix
area = np.linalg.norm(np.cross(m[0], m[1]))
self.assertAlmostEqual(zno_slab.surface_area, area)
self.assertEqual(zno_slab.lattice.lengths_and_angles,
self.zno55.lattice.lengths_and_angles)
def test_get_sorted_structure(self):
species = [str(site.specie) for site in
self.zno55.get_sorted_structure()]
self.assertEqual(species, ["Zn2+"] * 4 + ["O2-"] * 4)
def test_methods(self):
#Test various structure methods
self.zno55.get_primitive_structure()
def test_as_from_dict(self):
d = self.zno55.as_dict()
obj = Slab.from_dict(d)
self.assertEqual(obj.miller_index, (1, 0, 0))
def test_dipole_and_is_polar(self):
self.assertArrayAlmostEqual(self.zno55.dipole, [0, 0, 0])
self.assertFalse(self.zno55.is_polar())
cscl = self.get_structure("CsCl")
cscl.add_oxidation_state_by_element({"Cs": 1, "Cl": -1})
slab = SlabGenerator(cscl, [1, 0, 0], 5, 5,
lll_reduce=False, center_slab=False).get_slab()
self.assertArrayAlmostEqual(slab.dipole, [-4.209, 0, 0])
self.assertTrue(slab.is_polar())
class SlabGeneratorTest(PymatgenTest):
def test_get_slab(self):
s = self.get_structure("LiFePO4")
gen = SlabGenerator(s, [0, 0, 1], 10, 10)
s = gen.get_slab(0.25)
self.assertAlmostEqual(s.lattice.abc[2], 20.820740000000001)
fcc = Structure.from_spacegroup("Fm-3m", Lattice.cubic(3), ["Fe"],
[[0, 0, 0]])
gen = SlabGenerator(fcc, [1, 1, 1], 10, 10)
slab = gen.get_slab()
gen = SlabGenerator(fcc, [1, 1, 1], 10, 10, primitive=False)
slab_non_prim = gen.get_slab()
self.assertEqual(len(slab), 6)
self.assertEqual(len(slab_non_prim), len(slab) * 4)
#Some randomized testing of cell vectors
for i in range(1, 231):
i = random.randint(1, 230)
sg = SpaceGroup.from_int_number(i)
if sg.crystal_system == "hexagonal" or (sg.crystal_system == \
"trigonal" and sg.symbol.endswith("H")):
latt = Lattice.hexagonal(5, 10)
else:
#Cubic lattice is compatible with all other space groups.
latt = Lattice.cubic(5)
s = Structure.from_spacegroup(i, latt, ["H"], [[0, 0, 0]])
miller = (0, 0, 0)
while miller == (0, 0, 0):
miller = (random.randint(0, 6), random.randint(0, 6),
random.randint(0, 6))
gen = SlabGenerator(s, miller, 10, 10)
a, b, c = gen.oriented_unit_cell.lattice.matrix
self.assertAlmostEqual(np.dot(a, gen._normal), 0)
self.assertAlmostEqual(np.dot(b, gen._normal), 0)
def test_normal_search(self):
fcc = Structure.from_spacegroup("Fm-3m", Lattice.cubic(3), ["Fe"],
[[0, 0, 0]])
for miller in [(1, 0, 0), (1, 1, 0), (1, 1, 1), (2, 1, 1)]:
gen = SlabGenerator(fcc, miller, 10, 10)
gen_normal = SlabGenerator(fcc, miller, 10, 10,
max_normal_search=max(miller))
slab = gen_normal.get_slab()
self.assertAlmostEqual(slab.lattice.alpha, 90)
self.assertAlmostEqual(slab.lattice.beta, 90)
self.assertGreaterEqual(len(gen_normal.oriented_unit_cell),
len(gen.oriented_unit_cell))
graphite = self.get_structure("Graphite")
for miller in [(1, 0, 0), (1, 1, 0), (0, 0, 1), (2, 1, 1)]:
gen = SlabGenerator(graphite, miller, 10, 10)
gen_normal = SlabGenerator(graphite, miller, 10, 10,
max_normal_search=max(miller))
self.assertGreaterEqual(len(gen_normal.oriented_unit_cell),
len(gen.oriented_unit_cell))
sc = Structure(Lattice.hexagonal(3.32, 5.15), ["Sc", "Sc"],
[[1/3, 2/3, 0.25], [2/3, 1/3, 0.75]])
gen = SlabGenerator(sc, (1, 1, 1), 10, 10, max_normal_search=1)
self.assertAlmostEqual(gen.oriented_unit_cell.lattice.angles[1], 90)
def test_get_slabs(self):
gen = SlabGenerator(self.get_structure("CsCl"), [0, 0, 1], 10, 10)
#Test orthogonality of some internal variables.
a, b, c = gen.oriented_unit_cell.lattice.matrix
self.assertAlmostEqual(np.dot(a, gen._normal), 0)
self.assertAlmostEqual(np.dot(b, gen._normal), 0)
self.assertEqual(len(gen.get_slabs()), 1)
s = self.get_structure("LiFePO4")
gen = SlabGenerator(s, [0, 0, 1], 10, 10)
self.assertEqual(len(gen.get_slabs()), 5)
self.assertEqual(len(gen.get_slabs(bonds={("P", "O"): 3})), 2)
# There are no slabs in LFP that does not break either P-O or Fe-O
# bonds for a miller index of [0, 0, 1].
self.assertEqual(len(gen.get_slabs(
bonds={("P", "O"): 3, ("Fe", "O"): 3})), 0)
#If we allow some broken bonds, there are a few slabs.
self.assertEqual(len(gen.get_slabs(
bonds={("P", "O"): 3, ("Fe", "O"): 3},
max_broken_bonds=2)), 2)
# At this threshold, only the origin and center Li results in
# clustering. All other sites are non-clustered. So the of
# slabs is of sites in LiFePO4 unit cell - 2 + 1.
self.assertEqual(len(gen.get_slabs(tol=1e-4)), 15)
LiCoO2 = Structure.from_file(get_path("icsd_LiCoO2.cif"),
primitive=False)
gen = SlabGenerator(LiCoO2, [0, 0, 1], 10, 10)
lco = gen.get_slabs(bonds={("Co", "O"): 3})
self.assertEqual(len(lco), 1)
a, b, c = gen.oriented_unit_cell.lattice.matrix
self.assertAlmostEqual(np.dot(a, gen._normal), 0)
self.assertAlmostEqual(np.dot(b, gen._normal), 0)
scc = Structure.from_spacegroup("Pm-3m", Lattice.cubic(3), ["Fe"],
[[0, 0, 0]])
gen = SlabGenerator(scc, [0, 0, 1], 10, 10)
slabs = gen.get_slabs()
self.assertEqual(len(slabs), 1)
gen = SlabGenerator(scc, [1, 1, 1], 10, 10, max_normal_search=1)
slabs = gen.get_slabs()
self.assertEqual(len(slabs), 1)
def test_triclinic_TeI(self):
# Test case for a triclinic structure of TeI. Only these three
# Miller indices are used because it is easier to identify which
# atoms should be in a surface together. The closeness of the sites
# in other Miller indices can cause some ambiguity when choosing a
# higher tolerance.
numb_slabs = {(0, 0, 1): 5, (0, 1, 0): 3, (1, 0, 0): 7}
TeI = Structure.from_file(get_path("icsd_TeI.cif"),
primitive=False)
for k, v in numb_slabs.items():
trclnc_TeI = SlabGenerator(TeI, k, 10, 10)
TeI_slabs = trclnc_TeI.get_slabs()
self.assertEqual(v, len(TeI_slabs))
def test_get_orthogonal_c_slab(self):
TeI = Structure.from_file(get_path("icsd_TeI.cif"),
primitive=False)
trclnc_TeI = SlabGenerator(TeI, (0, 0, 1), 10, 10)
TeI_slabs = trclnc_TeI.get_slabs()
slab = TeI_slabs[0]
norm_slab = slab.get_orthogonal_c_slab()
self.assertAlmostEqual(norm_slab.lattice.angles[0], 90)
self.assertAlmostEqual(norm_slab.lattice.angles[1], 90)
class FuncTest(PymatgenTest):
def setUp(self):
self.cscl = self.get_structure("CsCl")
self.lifepo4 = self.get_structure("LiFePO4")
self.tei = Structure.from_file(get_path("icsd_TeI.cif"),
primitive=False)
self.LiCoO2 = Structure.from_file(get_path("icsd_LiCoO2.cif"),
primitive=False)
self.p1 = Structure(Lattice.from_parameters(3, 4, 5, 31, 43, 50),
["H", "He"], [[0, 0, 0], [0.1, 0.2, 0.3]])
self.graphite = self.get_structure("Graphite")
def test_get_symmetrically_distinct_miller_indices(self):
indices = get_symmetrically_distinct_miller_indices(self.cscl, 1)
self.assertEqual(len(indices), 3)
indices = get_symmetrically_distinct_miller_indices(self.cscl, 2)
self.assertEqual(len(indices), 6)
self.assertEqual(len(get_symmetrically_distinct_miller_indices(
self.lifepo4, 1)), 7)
# The TeI P-1 structure should have 13 unique millers (only inversion
# symmetry eliminates pairs)
indices = get_symmetrically_distinct_miller_indices(self.tei, 1)
self.assertEqual(len(indices), 13)
# P1 and P-1 should have the same # of miller indices since surfaces
# always have inversion symmetry.
indices = get_symmetrically_distinct_miller_indices(self.p1, 1)
self.assertEqual(len(indices), 13)
indices = get_symmetrically_distinct_miller_indices(self.graphite, 2)
self.assertEqual(len(indices), 12)
def test_generate_all_slabs(self):
slabs = generate_all_slabs(self.cscl, 1, 10, 10)
# Only three possible slabs, one each in (100), (110) and (111).
self.assertEqual(len(slabs), 3)
slabs = generate_all_slabs(self.cscl, 1, 10, 10,
bonds={("Cs", "Cl"): 4})
# No slabs if we don't allow broken Cs-Cl
self.assertEqual(len(slabs), 0)
slabs = generate_all_slabs(self.cscl, 1, 10, 10,
bonds={("Cs", "Cl"): 4},
max_broken_bonds=100)
self.assertEqual(len(slabs), 3)
slabs1 = generate_all_slabs(self.lifepo4, 1, 10, 10, tol=0.1,
bonds={("P", "O"): 3})
self.assertEqual(len(slabs1), 4)
slabs2 = generate_all_slabs(self.lifepo4, 1, 10, 10,
bonds={("P", "O"): 3, ("Fe", "O"): 3})
self.assertEqual(len(slabs2), 0)
# There should be only one possible stable surfaces, all of which are
# in the (001) oriented unit cell
slabs3 = generate_all_slabs(self.LiCoO2, 1, 10, 10,
bonds={("Co", "O"): 3})
self.assertEqual(len(slabs3), 1)
mill = (0, 0, 1)
for s in slabs3:
self.assertEqual(s.miller_index, mill)
if __name__ == "__main__":
unittest.main()
|
sonium0/pymatgen
|
pymatgen/core/tests/test_surface.py
|
Python
|
mit
| 13,101
|
# -*- coding: utf-8 -*-
import unittest
import datetime
from cwr.parser.encoder.dictionary import ComponentDictionaryEncoder
from cwr.work import ComponentRecord
"""
ComponentRecord to dictionary encoding tests.
The following cases are tested:
"""
__author__ = 'Bernardo Martínez Garrido'
__license__ = 'MIT'
__status__ = 'Development'
class TestComponentRecordDictionaryEncoding(unittest.TestCase):
def setUp(self):
self._encoder = ComponentDictionaryEncoder()
def test_encoded(self):
data = ComponentRecord(record_type='COM',
transaction_sequence_n=3,
record_sequence_n=15,
title='TITLE',
writer_1_last_name='LAST NAME 1',
submitter_work_n='ABCD123',
writer_1_first_name='FIRST NAME 1',
writer_2_first_name='FIRST NAME 2',
writer_2_last_name='LAST NAME 2',
writer_1_ipi_base_n='I-000000229-7',
writer_1_ipi_name_n=14107338,
writer_2_ipi_base_n='I-000000339-7',
writer_2_ipi_name_n=14107400,
iswc='T0123456789',
duration=datetime.datetime.strptime('011200',
'%H%M%S').time())
encoded = self._encoder.encode(data)
self.assertEqual('COM', encoded['record_type'])
self.assertEqual(3, encoded['transaction_sequence_n'])
self.assertEqual(15, encoded['record_sequence_n'])
self.assertEqual('TITLE', encoded['title'])
self.assertEqual('LAST NAME 1', encoded['writer_1_last_name'])
self.assertEqual('ABCD123', encoded['submitter_work_n'])
self.assertEqual('FIRST NAME 1', encoded['writer_1_first_name'])
self.assertEqual('FIRST NAME 2', encoded['writer_2_first_name'])
self.assertEqual('LAST NAME 2', encoded['writer_2_last_name'])
self.assertEqual('LAST NAME 2', encoded['writer_2_last_name'])
self.assertEqual(14107338, encoded['writer_1_ipi_name_n'])
self.assertEqual(14107400, encoded['writer_2_ipi_name_n'])
self.assertEqual(datetime.datetime.strptime('011200', '%H%M%S').time(),
encoded['duration'])
self.assertEqual('I-000000229-7', encoded['writer_1_ipi_base_n'])
self.assertEqual('I-000000339-7', encoded['writer_2_ipi_base_n'])
self.assertEqual('T0123456789', encoded['iswc'])
|
weso/CWR-DataApi
|
tests/parser/dictionary/encoder/record/test_component.py
|
Python
|
mit
| 2,680
|
from django.db import models
from django.utils.translation import ugettext_lazy as _
from cms.models.pluginmodel import CMSPlugin
from djangocms_text_ckeditor.fields import HTMLField
from easy_thumbnails.alias import aliases
from easy_thumbnails.signals import saved_file
from easy_thumbnails.signal_handlers import generate_aliases_global
# Define aliases for easy_thumbnail
# See http://easy-thumbnails.readthedocs.org/en/latest/usage/#thumbnail-aliases
if not aliases.get('badge'):
aliases.set('badge', {'size': (150, 80), 'crop': True})
class PersonBiography(models.Model):
"""Stores biographical information about a Person."""
first_name = models.CharField(max_length=200)
last_name = models.CharField(max_length=250)
suffix = models.CharField(max_length=40, blank=True)
title = models.CharField(max_length=250, blank=True)
employer = models.CharField(max_length=250, blank=True)
description = HTMLField()
image = models.ImageField(upload_to='biography_person', blank=True)
active = models.BooleanField(default=True,
help_text=_('If checked, this biography will be available in the plugin list.'))
class Meta:
ordering = ('last_name', 'first_name', )
verbose_name = 'Person biography'
verbose_name_plural = 'Person biographies'
def __unicode__(self):
return '%s, %s' % (self.last_name, self.first_name)
class PersonBiographyPluginModel(CMSPlugin):
"""
Stores a reference to a PersonBiography. This is used so a given
PersonBiography can be referenced by 0 or more PersonBiographyPlugins.
"""
person = models.ForeignKey(PersonBiography)
short_description = HTMLField(blank=True, help_text="If specified, this text will replace the person's normal description.")
event_description = HTMLField(blank=True, help_text="If specified, this text will appear after the person's normal description.")
class Meta:
ordering = ('person', )
def __unicode__(self):
return unicode(self.person)
def copy_relations(self, oldinstance):
self.person = oldinstance.person
# Generate thumbnails when an image is uploaded.
saved_file.connect(generate_aliases_global)
|
kfr2/cmsplugin-biography
|
cmsplugin_biography/models.py
|
Python
|
mit
| 2,213
|
import struct
from common import *
from objects import ObjectAppType
from bcddevice import BCDDevice
# element types:
# X X ???? XX
# class format subtype
# class:
# 1 = Library
# 2 = Application
# 3 = Device
# format:
# 0 = Unknown
# 1 = Device
# 2 = String
# 3 = Object
# 4 = Object List
# 5 = Integer
# 6 = Boolean
# 7 = IntegerList
ElementClass = enum(Library=0x1,
Application=0x2,
Device=0x3,
Hidden=0x4)
ElementFormat = enum(Unknown=0,
Device=1,
String=2,
Object=3,
ObjectList=4,
Integer=5,
Boolean=6,
IntegerList=7)
# based on both my personal findings and on this website:
# http://www.geoffchappell.com/notes/windows/boot/bcd/elements.htm?tx=5
_library = {
0x01: (1, 'device'),
0x02: (2, 'path'),
0x04: (2, 'description'),
0x05: (2, 'locale'),
0x06: (4, 'inherit'),
0x07: (5, 'truncatememory'),
0x08: (4, 'recoverysequence'),
0x09: (6, 'recoveryenabled'),
0x0A: (7, 'badmemorylist'),
0x0B: (6, 'badmemoryaccess'),
0x0C: (5, 'firstmegabytepolicy', enum('UseNone','UseAll','UsePrivate')),
0x0D: (5, 'relocatephysical'),
0x0E: (5, 'avoidlowmemory'),
0x0F: (6, 'traditionalksegmappings'),
0x10: (6, 'bootdebug'),
0x11: (5, 'debugtype', enum('Serial','1394','USB')),
0x12: (5, 'debugaddress'),
0x13: (5, 'debugport'),
0x14: (5, 'baudrate'),
0x15: (5, 'channel'),
0x16: (2, 'targetname'),
0x17: (6, 'noumex'),
0x18: (5, 'debugstart', enum('Active', 'AutoEnable', 'Disable')),
0x19: (2, 'busparams'),
0x20: (6, 'bootems'),
0x22: (5, 'emsport'),
0x23: (5, 'emsbaudrate'),
0x30: (2, 'loadoptions'),
0x31: (6, 'attemptnonbcdstart'),
0x40: (6, 'advancedoptions'),
0x41: (6, 'optionsedit'),
0x42: (5, 'keyringaddress'),
# no alias
0x43: (1, 'bootstatusdatalogdevice'),
# no alias
0x44: (2, 'bootstatusdatalogfile'),
# no alias
0x45: (6, 'bootstatusdatalogappend'),
0x46: (6, 'graphicsmodedisabled'),
0x47: (5, 'configaccesspolicy', enum('Default', 'DisallowMmConfig')),
0x48: (6, 'nointegritychecks'),
0x49: (6, 'testsigning'),
0x4A: (2, 'fontpath'),
# seems to be wrong in the table?
0x4B: (5, 'integrityservices'),
0x50: (6, 'extendedinput'),
0x51: (5, 'initialconsoleinput'),
# not in table
0x60: (6, 'isolatedcontext'),
# not in table
0x65: (5, 'displaymessage', enum('Default','Resume','HyperV', 'Recovery','StartupRepair', 'SystemImageRecovery','CommandPrompt', 'SystemRestore', 'PushButtonReset')),
# not in table
0x77: (7, 'allowedinmemorysettings'),
}
_bootmgr = {
0x01: (4, 'displayorder'),
0x02: (4, 'bootsequence'),
0x03: (3, 'default'),
0x04: (5, 'timeout'),
0x05: (6, 'resume'),
0x06: (3, 'resumeobject'),
0x10: (4, 'toolsdisplayorder'),
0x20: (6, 'displaybootmenu'),
0x21: (6, 'noerrordisplay'),
0x22: (1, 'bcddevice'),
0x23: (2, 'bcdfilepath'),
0x30: (7, 'customactions'),
}
_osloader = {
0x001: (1, 'osdevice'),
0x002: (2, 'systemroot'),
0x003: (3, 'resumeobject'),
0x004: (6, 'stampdisks'),
0x010: (6, 'detecthal'),
0x011: (2, 'kernel'),
0x012: (2, 'hal'),
0x013: (2, 'dbgtransport'),
0x020: (5, 'nx', enum('OptIn', 'OptOut', 'AlwaysOff', 'AlwaysOn')),
0x021: (5, 'pae', enum('Default', 'ForceEnable', 'ForceDisable')),
0x022: (6, 'winpe'),
0x024: (6, 'nocrashautoreboot'),
0x025: (6, 'lastknowngood'),
0x026: (6, 'oslnointegritychecks'),
0x027: (6, 'osltestsigning'),
0x030: (6, 'nolowmem'),
0x031: (5, 'removememory'),
0x032: (5, 'increaseuserva'),
0x033: (5, 'perfmem'),
0x040: (6, 'vga'),
0x041: (6, 'quietboot'),
0x042: (6, 'novesa'),
0x050: (5, 'clustermodeaddressing'),
0x051: (6, 'usephysicaldestination'),
0x052: (5, 'restrictapiccluster'),
0x053: (2, 'evstore'),
0x054: (6, 'uselegacyapicmode'),
0x060: (6, 'onecpu'),
0x061: (5, 'numproc'),
0x062: (6, 'maxproc'),
0x063: (5, 'configflags'),
0x064: (6, 'maxgroup'),
0x065: (6, 'groupaware'),
0x066: (5, 'groupsize'),
0x070: (6, 'usefirmwarepcisettings'),
0x071: (5, 'msi', enum('Default', 'ForceDisable')),
0x072: (5, 'pciexpress', enum('Default', 'ForceDisable')),
0x080: (5, 'safeboot', enum('Minimal', 'Network', 'DsRepair')),
0x081: (6, 'safebootalternateshell'),
0x090: (6, 'bootlog'),
0x091: (6, 'sos'),
0x0A0: (6, 'debug'),
0x0A1: (6, 'halbreakpoint'),
0x0A2: (6, 'useplatformclock'),
0x0B0: (6, 'ems'),
# no alias
0x0C0: (5, 'forcefailure', enum('Load', 'Hive', 'Acpi', 'General')),
0x0C1: (5, 'driverloadfailurepolicy', enum('Fatal', 'UseErrorControl')),
# not in table
0x0C2: (5, 'bootmenupolicy', enum('TODO0', 'Standard', 'TODO2', 'TODO3')),
0x0E0: (5, 'bootstatuspolicy', enum('DisplayAllFailures', 'IgnoreAllFailures', 'IgnoreShutdownFailures', 'IgnoreBootFailures')),
0x0F0: (5, 'hypervisorlaunchtype', enum('Off', 'Auto')),
0x0F1: (2, 'hypervisorpath'),
0x0F2: (6, 'hypervisordebug'),
0x0F3: (5, 'hypervisordebugtype', enum('Serial', '1394')),
0x0F4: (5, 'hypervisordebugport'),
0x0F5: (5, 'hypervisorbaudrate'),
0x0F6: (5, 'hypervisorchannel'),
# not a lot known
0x0F7: (5, 'bootuxpolicy'),
0x0F8: (6, 'hypervisordisableslat'),
0x100: (5, 'tpmbootentropy', enum('Default', 'ForceDisable', 'ForceEnable')),
0x120: (5, 'xsavepolicy'),
0x121: (5, 'xsaveaddfeature0'),
0x122: (5, 'xsaveaddfeature1'),
0x123: (5, 'xsaveaddfeature2'),
0x124: (5, 'xsaveaddfeature3'),
0x125: (5, 'xsaveaddfeature4'),
0x126: (5, 'xsaveaddfeature5'),
0x127: (5, 'xsaveaddfeature6'),
0x128: (5, 'xsaveaddfeature7'),
0x129: (5, 'xsaveremovefeature'),
0x12A: (5, 'xsaveprocessorsmask'),
0x12B: (5, 'xsavedisable'),
}
_resume = {
0x01: (1, 'filedevice'),
0x02: (2, 'filepath'),
0x03: (6, 'customsettings'),
0x04: (6, 'pae'),
0x05: (1, 'associatedosdevice'),
0x06: (6, 'debugoptionenabled'),
0x07: (5, 'bootux', enum('Disabled', 'Basic', 'Standard')),
# not in table
0x08: (5, 'bootmenupolicy', enum('TODO0', 'Standard', 'TODO2', 'TODO3')),
}
_memdiag = {
0x01: (5, 'passcount'),
0x02: (5, 'testmix', enum('Basic', 'Extended')),
0x03: (5, 'failurecount'),
0x04: (5, 'testtofail', enum('Stride', 'Mats', 'InverseCoupling', 'RandomPattern', 'Checkerboard')),
0x05: (6, 'cacheenable'),
}
_ntldr = {
0x01: (2, 'bpbstring'),
}
_startup = {
0x01: (6, 'pxesoftreboot'),
0x02: (2, 'applicationname'),
}
_device = {
0x01: (5, 'ramdiskimageoffset'),
0x02: (5, 'ramdiskftpclientport'),
0x03: (1, 'ramdisksdidevice'),
0x04: (2, 'ramdisksdipath'),
0x05: (5, 'ramdiskimagelength'),
0x06: (6, 'exportascd'),
0x07: (5, 'ramdisktftpblocksize'),
0x08: (5, 'ramdisktftpwindowsize'),
0x09: (6, 'ramdiskmcenabled'),
0x0A: (6, 'ramdiskmctftpfallback'),
}
# All of these are hidden during a bcdedit /enum all command
# I design good software, so I'll show it even if bcdedit doesn't.
_setup = {
0x01: (1, 'devicetype'),
0x02: (2, 'applicationrelativepath'),
0x03: (2, 'ramdiskdevicerelativepath'),
0x04: (6, 'omitosloaderelements'),
0x10: (6, 'recoveryos'),
}
alias_dict = {
# applies to all object types
ElementClass.Library: _library,
# these depend on the application
ElementClass.Application: {
#objectapptype
0: {},
ObjectAppType.FirmwareMgr: _bootmgr,
ObjectAppType.WinBootMgr: _bootmgr,
ObjectAppType.WinBootLdr: _osloader,
ObjectAppType.WinResume: _resume,
ObjectAppType.WinMemTest: _memdiag,
ObjectAppType.Ntldr: _ntldr,
ObjectAppType.Setupldr: _ntldr,
ObjectAppType.BootSect: {},
ObjectAppType.Startup: _startup,
},
# only works for devices
ElementClass.Device : _device,
# setup template elements
ElementClass.Hidden: _setup,
}
def element_info(type):
if isinstance(type, str):
type = int(type, 16)
return ((0xF0000000 & type) >> 28,
(0x0F000000 & type) >> 24,
0x00FFFFFF & type)
# transformation functions from the BCD raw format to Python.
# tuple of to/from functions
_bcdqword = (lambda v:struct.pack('Q', v),
lambda v:struct.unpack('Q', v)[0])
_bcdqwordlist = (lambda v:b''.join((struct.pack('Q', v) for v in l)),
lambda v:list((struct.unpack('Q', bytes(j))[0]
for j in zip(*[v[i::8]
for i in range(8)]))))
_bcdtodo = (lambda v:'TODO',
lambda v:'TODO')
_bcdraw = (identity, identity)
_bcdobj = _bcdraw
_bcdobjlist = _bcdraw
# different ways to express booleans
_boolnames = {'0' : False,
'1' : True,
'on' : True,
'off' : False,
'true' : True,
'false': False,
'yes' : True,
'no' : False}
_bcdbool = (lambda v: bytes([int(_boolnames.get(v.lower(), v)
if isinstance(v,str) else v)]),
lambda v: bool(v[0]),
lambda v: ('No', 'Yes')[int(v)])
_bcddevice = (None, lambda v:BCDDevice(v))
# Match transformation functions to ElementFormats.
element_transform = {
ElementFormat.Device: _bcddevice,#_bcdtodo,
ElementFormat.String: _bcdraw,
ElementFormat.Object: _bcdobj,
ElementFormat.ObjectList: _bcdobjlist,
ElementFormat.Integer: _bcdqword,
ElementFormat.Boolean: _bcdbool,
ElementFormat.IntegerList: _bcdqwordlist,
}
element_transform_str = {
ElementFormat.IntegerList: lambda v:[hex(i) for i in v],
}
# END OF LINE.
|
kupiakos/pybcd
|
elements.py
|
Python
|
mit
| 10,086
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------
# tests/test_filename.py
#
# Test thumbnail file name generation.
# ----------------------------------------------------------------
# copyright (c) 2015 - Domen Ipavec
# Distributed under The MIT License, see LICENSE
# ----------------------------------------------------------------
from unittest import TestCase
from stm.configuration import Configuration
from stm.image import Image
import os, os.path
class Test_filename(TestCase):
def checkImage(self, input, output):
img = Image(self.conf)
img.loadImage(input)
self.assertEqual(img.getThumbnailName(), output)
def setUp(self):
self.conf = Configuration()
def removeFolders(self, l):
for f in l:
if os.path.exists(f):
try:
os.rmdir(f)
except:
pass
def tearDown(self):
self.conf = None
self.removeFolders(('thumbs', '/tmp/thumbs', 'test', '/tmp/test', 'test-folder', '/tmp/test-folder'))
def test_output_file(self):
self.conf.output = "output.jpg"
self.checkImage("input.png", "output.jpg")
def test_no_config(self):
self.checkImage("input.png", "thumbs/input.png")
self.checkImage("input.jpg", "thumbs/input.png")
self.checkImage("13 De_(com)čšž.test.jpg", "thumbs/13 De_(com)čšž.test.png")
self.checkImage("/tmp/input.jpg", "/tmp/thumbs/input.png")
def test_folder(self):
self.conf.folder = "test-folder"
self.checkImage("input.png", "test-folder/input.png")
self.checkImage("input.jpg", "test-folder/input.png")
self.checkImage("13 De_(com)čšž.test.jpg", "test-folder/13 De_(com)čšž.test.png")
self.checkImage("/tmp/input.jpg", "/tmp/test-folder/input.png")
def test_abs_folder(self):
self.conf.folder = "/tmp"
self.checkImage("input.png", "/tmp/input.png")
self.checkImage("input.jpg", "/tmp/input.png")
self.checkImage("13 De_(com)čšž.test.jpg", "/tmp/13 De_(com)čšž.test.png")
self.checkImage("/tmp/input.jpg", "/tmp/input.png")
def test_format_jpg(self):
self.conf.fileFormat = 'jpg'
self.checkImage("input.png", "thumbs/input.jpg")
self.checkImage("input.jpg", "thumbs/input.jpg")
self.checkImage("13 De_(com)čšž.test.jpg", "thumbs/13 De_(com)čšž.test.jpg")
self.checkImage("/tmp/input.jpg", "/tmp/thumbs/input.jpg")
def test_format_source(self):
self.conf.fileFormat = 'source'
self.checkImage("input.png", "thumbs/input.png")
self.checkImage("image.jpg", "thumbs/image.jpg")
self.checkImage("13 De_(com)čšž.test.jpg", "thumbs/13 De_(com)čšž.test.jpg")
self.checkImage("/tmp/input.png", "/tmp/thumbs/input.png")
def test_postfix(self):
self.conf.name_postfix = "_thumb"
self.checkImage("input.png", "input_thumb.png")
self.checkImage("input.jpg", "input_thumb.png")
self.checkImage("13 De_(com)čšž.test.jpg", "13 De_(com)čšž.test_thumb.png")
self.checkImage("/tmp/input.jpg", "/tmp/input_thumb.png")
def test_prefix(self):
self.conf.name_prefix = "thumb_"
self.checkImage("input.png", "thumb_input.png")
self.checkImage("input.jpg", "thumb_input.png")
self.checkImage("13 De_(com)čšž.test.jpg", "thumb_13 De_(com)čšž.test.png")
self.checkImage("/tmp/input.jpg", "/tmp/thumb_input.png")
def test_all(self):
self.conf.folder = "test"
self.conf.fileFormat = 'jpg'
self.conf.name_prefix = "thumb_"
self.conf.name_postfix = "_thumb"
self.checkImage("input.png", "test/thumb_input_thumb.jpg")
self.checkImage("input.jpg", "test/thumb_input_thumb.jpg")
self.checkImage("13 De_(com)čšž.test.jpg", "test/thumb_13 De_(com)čšž.test_thumb.jpg")
self.checkImage("/tmp/input.png", "/tmp/test/thumb_input_thumb.jpg")
|
matematik7/STM
|
tests/test_filename.py
|
Python
|
mit
| 4,121
|
import pymongo
from pprint import pprint
def mongo_conn(db, collection):
conn = pymongo.MongoClient('localhost', 27017)
db_conn = conn[db]
coll_conn = db_conn[collection]
print coll_conn
return coll_conn
def list_and_count(coll_field, coll_conn, display_limit=10):
a = list(coll_conn.aggregate(
[{
"$group":
{"_id": "$" + coll_field, "count":
{
"$sum": 1
}
}
}, {"$sort": {"count": -1}}, {"$limit": display_limit}
]))
for i, enum in enumerate(a):
print enum
my_conn = mongo_conn('imdb', 'imdb_top250')
# list_and_count('author', my_conn, 20)
##########################################################
# def list_imdb(coll_conn):
# curr_conn = coll_conn
# print curr_conn
# all_titles,all_ids,all_nquotes = [],[],[]
# all_docs = coll_conn.find()
# for doc in all_docs:
# pprint(doc)
# doc["len_quotes"] = len(doc['quotes'])
# title,imdb_id,n_quotes = doc['name'], doc['imdb_id'], doc['len_quotes']
# all_titles.append(title)
# all_ids.append(imdb_id)
# all_nquotes.append(n_quotes)
# return (zip(all_ids,all_titles,all_nquotes))
# my_list = list_imdb(my_conn)
# print len(my_list)
# print my_list[0]
# sort a list of tuples based on the third value
# q_desc = sorted(my_list, key = lambda tup:tup[2], reverse = True)
# print q_desc
# print my_dict.items()[1]
def title_and_count(coll_conn):
curr_conn = coll_conn
all_titles = list(curr_conn.aggregate([
{"$unwind": "$quotes"},
{"$group":
{"_id":{
"name": "$name"
,"imdb_id": "$imdb_id"
,"rating": "$rating"
,"desc": "$description"
,"director": "$director"
,"img_src": "$img_src"
}
,"count": {"$sum": 1}
}
},
{"$sort":{"count": -1}
}
,{"$limit":3}
])
)
# print len(all_titles)
# pprint(all_titles)
return all_titles
all_titles = title_and_count(my_conn)
print len(all_titles)
print all_titles[0]
#########################################################
def find_by_title_id(title_id, coll_conn):
curr_conn=coll_conn
quotes_by_id=curr_conn.find_one({"imdb_id": title_id})
return quotes_by_id
#########################################################
curr_movie = find_by_title_id('tt0266543', my_conn)
# print curr_movie['name']
# for i,enum in enumerate(curr_movie['quotes']):
# print str(i+1) + enum.encode('utf-8',errors='ignore') + '\n\t' + 10 * '*'
|
darth-dodo/Quotable
|
quotable_queries.py
|
Python
|
mit
| 2,932
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from base import Problem
class Solution(Problem):
def solve(self, input_):
numberLargest = 0
for a in range(1, 100):
if a % 10 != 0:
for b in range(1, 100):
num_pow = a**b
number_sum = self.digits_sum(num_pow)
if number_sum > numberLargest:
numberLargest = number_sum
print('Solve problem {}'.format(self.number))
print(numberLargest)
if __name__ == '__main__':
solution = Solution(56)
solution.solve(100)
|
phapdv/project_euler
|
pe56.py
|
Python
|
mit
| 616
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
import HTMLParser
import smtplib, quopri
from frappe import msgprint, throw, _
from frappe.email.smtp import SMTPServer, get_outgoing_email_account
from frappe.email.email_body import get_email, get_formatted_html
from frappe.utils.verified_command import get_signed_params, verify_request
from html2text import html2text
from frappe.utils import get_url, nowdate, encode, now_datetime, add_days, split_emails, cstr, cint
from rq.timeouts import JobTimeoutException
from frappe.utils.scheduler import log
class EmailLimitCrossedError(frappe.ValidationError): pass
def send(recipients=None, sender=None, subject=None, message=None, reference_doctype=None,
reference_name=None, unsubscribe_method=None, unsubscribe_params=None, unsubscribe_message=None,
attachments=None, reply_to=None, cc=[], message_id=None, in_reply_to=None, send_after=None,
expose_recipients=None, send_priority=1, communication=None, now=False, read_receipt=None,
queue_separately=False, is_notification=False, add_unsubscribe_link=1):
"""Add email to sending queue (Email Queue)
:param recipients: List of recipients.
:param sender: Email sender.
:param subject: Email subject.
:param message: Email message.
:param reference_doctype: Reference DocType of caller document.
:param reference_name: Reference name of caller document.
:param send_priority: Priority for Email Queue, default 1.
:param unsubscribe_method: URL method for unsubscribe. Default is `/api/method/frappe.email.queue.unsubscribe`.
:param unsubscribe_params: additional params for unsubscribed links. default are name, doctype, email
:param attachments: Attachments to be sent.
:param reply_to: Reply to be captured here (default inbox)
:param in_reply_to: Used to send the Message-Id of a received email back as In-Reply-To.
:param send_after: Send this email after the given datetime. If value is in integer, then `send_after` will be the automatically set to no of days from current date.
:param communication: Communication link to be set in Email Queue record
:param now: Send immediately (don't send in the background)
:param queue_separately: Queue each email separately
:param is_notification: Marks email as notification so will not trigger notifications from system
:param add_unsubscribe_link: Send unsubscribe link in the footer of the Email, default 1.
"""
if not unsubscribe_method:
unsubscribe_method = "/api/method/frappe.email.queue.unsubscribe"
if not recipients and not cc:
return
if isinstance(recipients, basestring):
recipients = split_emails(recipients)
if isinstance(cc, basestring):
cc = split_emails(cc)
if isinstance(send_after, int):
send_after = add_days(nowdate(), send_after)
email_account = get_outgoing_email_account(True, append_to=reference_doctype)
if not sender or sender == "Administrator":
sender = email_account.default_sender
check_email_limit(recipients)
formatted = get_formatted_html(subject, message, email_account=email_account)
try:
text_content = html2text(formatted)
except HTMLParser.HTMLParseError:
text_content = "See html attachment"
if reference_doctype and reference_name:
unsubscribed = [d.email for d in frappe.db.get_all("Email Unsubscribe", "email",
{"reference_doctype": reference_doctype, "reference_name": reference_name})]
unsubscribed += [d.email for d in frappe.db.get_all("Email Unsubscribe", "email",
{"global_unsubscribe": 1})]
else:
unsubscribed = []
recipients = [r for r in list(set(recipients)) if r and r not in unsubscribed]
email_content = formatted
email_text_context = text_content
if add_unsubscribe_link and reference_doctype and (unsubscribe_message or reference_doctype=="Newsletter") and add_unsubscribe_link==1:
unsubscribe_link = get_unsubscribe_message(unsubscribe_message, expose_recipients)
email_content = email_content.replace("<!--unsubscribe link here-->", unsubscribe_link.html)
email_text_context += unsubscribe_link.text
# add to queue
add(recipients, sender, subject,
formatted=email_content,
text_content=email_text_context,
reference_doctype=reference_doctype,
reference_name=reference_name,
attachments=attachments,
reply_to=reply_to,
cc=cc,
message_id=message_id,
in_reply_to=in_reply_to,
send_after=send_after,
send_priority=send_priority,
email_account=email_account,
communication=communication,
add_unsubscribe_link=add_unsubscribe_link,
unsubscribe_method=unsubscribe_method,
unsubscribe_params=unsubscribe_params,
expose_recipients=expose_recipients,
read_receipt=read_receipt,
queue_separately=queue_separately,
is_notification = is_notification,
now=now)
def add(recipients, sender, subject, **kwargs):
"""Add to Email Queue"""
if kwargs.get('queue_separately') or len(recipients) > 20:
email_queue = None
for r in recipients:
if not email_queue:
email_queue = get_email_queue([r], sender, subject, **kwargs)
if kwargs.get('now'):
email_queue(email_queue.name, now=True)
else:
duplicate = email_queue.get_duplicate([r])
duplicate.insert(ignore_permissions=True)
if kwargs.get('now'):
send_one(duplicate.name, now=True)
frappe.db.commit()
else:
email_queue = get_email_queue(recipients, sender, subject, **kwargs)
if kwargs.get('now'):
send_one(email_queue.name, now=True)
def get_email_queue(recipients, sender, subject, **kwargs):
'''Make Email Queue object'''
e = frappe.new_doc('Email Queue')
e.priority = kwargs.get('send_priority')
try:
mail = get_email(recipients,
sender=sender,
subject=subject,
formatted=kwargs.get('formatted'),
text_content=kwargs.get('text_content'),
attachments=kwargs.get('attachments'),
reply_to=kwargs.get('reply_to'),
cc=kwargs.get('cc'),
email_account=kwargs.get('email_account'),
expose_recipients=kwargs.get('expose_recipients'))
mail.set_message_id(kwargs.get('message_id'),kwargs.get('is_notification'))
if kwargs.get('read_receipt'):
mail.msg_root["Disposition-Notification-To"] = sender
if kwargs.get('in_reply_to'):
mail.set_in_reply_to(kwargs.get('in_reply_to'))
e.message_id = mail.msg_root["Message-Id"].strip(" <>")
e.message = cstr(mail.as_string())
e.sender = mail.sender
except frappe.InvalidEmailAddressError:
# bad Email Address - don't add to queue
frappe.log_error('Invalid Email ID Sender: {0}, Recipients: {1}'.format(mail.sender,
', '.join(mail.recipients)), 'Email Not Sent')
e.set_recipients(recipients + kwargs.get('cc', []))
e.reference_doctype = kwargs.get('reference_doctype')
e.reference_name = kwargs.get('reference_name')
e.add_unsubscribe_link = kwargs.get("add_unsubscribe_link")
e.unsubscribe_method = kwargs.get('unsubscribe_method')
e.unsubscribe_params = kwargs.get('unsubscribe_params')
e.expose_recipients = kwargs.get('expose_recipients')
e.communication = kwargs.get('communication')
e.send_after = kwargs.get('send_after')
e.show_as_cc = ",".join(kwargs.get('cc', []))
e.insert(ignore_permissions=True)
return e
def check_email_limit(recipients):
# if using settings from site_config.json, check email limit
# No limit for own email settings
smtp_server = SMTPServer()
if (smtp_server.email_account
and getattr(smtp_server.email_account, "from_site_config", False)
or frappe.flags.in_test):
monthly_email_limit = frappe.conf.get('limits', {}).get('emails')
if frappe.flags.in_test:
monthly_email_limit = 500
if not monthly_email_limit:
return
# get count of mails sent this month
this_month = get_emails_sent_this_month()
if (this_month + len(recipients)) > monthly_email_limit:
throw(_("Cannot send this email. You have crossed the sending limit of {0} emails for this month.").format(monthly_email_limit),
EmailLimitCrossedError)
def get_emails_sent_this_month():
return frappe.db.sql("""select count(name) from `tabEmail Queue` where
status='Sent' and MONTH(creation)=MONTH(CURDATE())""")[0][0]
def get_unsubscribe_message(unsubscribe_message, expose_recipients):
if not unsubscribe_message:
unsubscribe_message = _("Unsubscribe from this list")
html = """<div style="margin: 15px auto; padding: 0px 7px; text-align: center; color: #8d99a6;">
<!--cc message-->
<p style="margin: 15px auto;">
<a href="<!--unsubscribe url-->" style="color: #8d99a6; text-decoration: underline;
target="_blank">{unsubscribe_message}
</a>
</p>
</div>""".format(unsubscribe_message=unsubscribe_message)
if expose_recipients == "footer":
text = "\n<!--cc message-->"
else:
text = ""
text += "\n\n{unsubscribe_message}: <!--unsubscribe url-->\n".format(unsubscribe_message=unsubscribe_message)
return frappe._dict({
"html": html,
"text": text
})
def get_unsubcribed_url(reference_doctype, reference_name, email, unsubscribe_method, unsubscribe_params):
params = {"email": email.encode("utf-8"),
"doctype": reference_doctype.encode("utf-8"),
"name": reference_name.encode("utf-8")}
if unsubscribe_params:
params.update(unsubscribe_params)
query_string = get_signed_params(params)
# for test
frappe.local.flags.signed_query_string = query_string
return get_url(unsubscribe_method + "?" + get_signed_params(params))
@frappe.whitelist(allow_guest=True)
def unsubscribe(doctype, name, email):
# unsubsribe from comments and communications
if not verify_request():
return
try:
frappe.get_doc({
"doctype": "Email Unsubscribe",
"email": email,
"reference_doctype": doctype,
"reference_name": name
}).insert(ignore_permissions=True)
except frappe.DuplicateEntryError:
frappe.db.rollback()
else:
frappe.db.commit()
return_unsubscribed_page(email, doctype, name)
def return_unsubscribed_page(email, doctype, name):
frappe.respond_as_web_page(_("Unsubscribed"),
_("{0} has left the conversation in {1} {2}").format(email, _(doctype), name),
indicator_color='green')
def flush(from_test=False):
"""flush email queue, every time: called from scheduler"""
# additional check
cache = frappe.cache()
check_email_limit([])
auto_commit = not from_test
if frappe.are_emails_muted():
msgprint(_("Emails are muted"))
from_test = True
smtpserver = SMTPServer()
make_cache_queue()
for i in xrange(cache.llen('cache_email_queue')):
email = cache.lpop('cache_email_queue')
if cint(frappe.defaults.get_defaults().get("hold_queue"))==1:
break
if email:
send_one(email, smtpserver, auto_commit, from_test=from_test)
# NOTE: removing commit here because we pass auto_commit
# finally:
# frappe.db.commit()
def make_cache_queue():
'''cache values in queue before sendign'''
cache = frappe.cache()
emails = frappe.db.sql('''select
name
from
`tabEmail Queue`
where
(status='Not Sent' or status='Partially Sent') and
(send_after is null or send_after < %(now)s)
order
by priority desc, creation asc
limit 500''', { 'now': now_datetime() })
# reset value
cache.delete_value('cache_email_queue')
for e in emails:
cache.rpush('cache_email_queue', e[0])
def send_one(email, smtpserver=None, auto_commit=True, now=False, from_test=False):
'''Send Email Queue with given smtpserver'''
email = frappe.db.sql('''select
name, status, communication, message, sender, reference_doctype,
reference_name, unsubscribe_param, unsubscribe_method, expose_recipients,
show_as_cc, add_unsubscribe_link
from
`tabEmail Queue`
where
name=%s
for update''', email, as_dict=True)[0]
recipients_list = frappe.db.sql('''select name, recipient, status from
`tabEmail Queue Recipient` where parent=%s''',email.name,as_dict=1)
if frappe.are_emails_muted():
frappe.msgprint(_("Emails are muted"))
return
if cint(frappe.defaults.get_defaults().get("hold_queue"))==1 :
return
if email.status not in ('Not Sent','Partially Sent') :
# rollback to release lock and return
frappe.db.rollback()
return
frappe.db.sql("""update `tabEmail Queue` set status='Sending', modified=%s where name=%s""",
(now_datetime(), email.name), auto_commit=auto_commit)
if email.communication:
frappe.get_doc('Communication', email.communication).set_delivery_status(commit=auto_commit)
try:
if not frappe.flags.in_test:
if not smtpserver: smtpserver = SMTPServer()
smtpserver.setup_email_account(email.reference_doctype)
for recipient in recipients_list:
if recipient.status != "Not Sent":
continue
message = prepare_message(email, recipient.recipient, recipients_list)
if not frappe.flags.in_test:
smtpserver.sess.sendmail(email.sender, recipient.recipient, encode(message))
recipient.status = "Sent"
frappe.db.sql("""update `tabEmail Queue Recipient` set status='Sent', modified=%s where name=%s""",
(now_datetime(), recipient.name), auto_commit=auto_commit)
#if all are sent set status
if any("Sent" == s.status for s in recipients_list):
frappe.db.sql("""update `tabEmail Queue` set status='Sent', modified=%s where name=%s""",
(now_datetime(), email.name), auto_commit=auto_commit)
else:
frappe.db.sql("""update `tabEmail Queue` set status='Error', error=%s
where name=%s""", ("No recipients to send to", email.name), auto_commit=auto_commit)
if frappe.flags.in_test:
frappe.flags.sent_mail = message
return
if email.communication:
frappe.get_doc('Communication', email.communication).set_delivery_status(commit=auto_commit)
except (smtplib.SMTPServerDisconnected,
smtplib.SMTPConnectError,
smtplib.SMTPHeloError,
smtplib.SMTPAuthenticationError,
JobTimeoutException):
# bad connection/timeout, retry later
if any("Sent" == s.status for s in recipients_list):
frappe.db.sql("""update `tabEmail Queue` set status='Partially Sent', modified=%s where name=%s""",
(now_datetime(), email.name), auto_commit=auto_commit)
else:
frappe.db.sql("""update `tabEmail Queue` set status='Not Sent', modified=%s where name=%s""",
(now_datetime(), email.name), auto_commit=auto_commit)
if email.communication:
frappe.get_doc('Communication', email.communication).set_delivery_status(commit=auto_commit)
# no need to attempt further
return
except Exception, e:
frappe.db.rollback()
if any("Sent" == s.status for s in recipients_list):
frappe.db.sql("""update `tabEmail Queue` set status='Partially Errored', error=%s where name=%s""",
(unicode(e), email.name), auto_commit=auto_commit)
else:
frappe.db.sql("""update `tabEmail Queue` set status='Error', error=%s
where name=%s""", (unicode(e), email.name), auto_commit=auto_commit)
if email.communication:
frappe.get_doc('Communication', email.communication).set_delivery_status(commit=auto_commit)
if now:
raise e
else:
# log to Error Log
log('frappe.email.queue.flush', unicode(e))
def prepare_message(email, recipient, recipients_list):
message = email.message
if email.add_unsubscribe_link and email.reference_doctype: # is missing the check for unsubscribe message but will not add as there will be no unsubscribe url
unsubscribe_url = get_unsubcribed_url(email.reference_doctype, email.reference_name, recipient,
email.unsubscribe_method, email.unsubscribe_params)
message = message.replace("<!--unsubscribe url-->", quopri.encodestring(unsubscribe_url))
if email.expose_recipients == "header":
pass
else:
if email.expose_recipients == "footer":
if isinstance(email.show_as_cc, basestring):
email.show_as_cc = email.show_as_cc.split(",")
email_sent_to = [r.recipient for r in recipients_list]
email_sent_cc = ", ".join([e for e in email_sent_to if e in email.show_as_cc])
email_sent_to = ", ".join([e for e in email_sent_to if e not in email.show_as_cc])
if email_sent_cc:
email_sent_message = _("This email was sent to {0} and copied to {1}").format(email_sent_to,email_sent_cc)
else:
email_sent_message = _("This email was sent to {0}").format(email_sent_to)
message = message.replace("<!--cc message-->", quopri.encodestring(email_sent_message))
message = message.replace("<!--recipient-->", recipient)
return message
def clear_outbox():
"""Remove low priority older than 31 days in Outbox and expire mails not sent for 7 days.
Called daily via scheduler."""
frappe.db.sql("""delete q, r from `tabEmail Queue` as q, `tabEmail Queue Recipient` as r where q.name = r.parent and q.priority=0 and
datediff(now(), q.modified) > 31""")
frappe.db.sql("""update `tabEmail Queue` as q, `tabEmail Queue Recipient` as r set q.status='Expired', r.status='Expired'
where q.name = r.parent and datediff(curdate(), q.modified) > 7 and q.status='Not Sent' and r.status='Not Sent'""")
|
rohitwaghchaure/frappe
|
frappe/email/queue.py
|
Python
|
mit
| 16,756
|
# coding: utf-8
import os
import click
from chado import ChadoInstance
from chakin.cli import pass_context
from chakin import config
from chakin.io import warn, info
CONFIG_TEMPLATE = """## Chado's chakin: Global Configuration File.
# Each stanza should contain a single chado server to control.
#
# You can set the key __default to the name of a default instance
__default: local
local:
dbhost: "%(dbhost)s"
dbname: "%(dbname)s"
dbuser: "%(dbuser)s"
dbpass: "%(dbpass)s"
dbport: "%(dbport)s"
dbschema: "%(schema)s"
"""
SUCCESS_MESSAGE = (
"Ready to go! Type `chakin` to get a list of commands you can execute."
)
@click.command("config_init")
@pass_context
def cli(ctx, url=None, api_key=None, admin=False, **kwds):
"""Help initialize global configuration (in home directory)
"""
click.echo("""Welcome to Chado's Chakin! (茶巾)""")
if os.path.exists(config.global_config_path()):
info("Your chakin configuration already exists. Please edit it instead: %s" % config.global_config_path())
return 0
while True:
# Check environment
dbhost = click.prompt("PGHOST")
dbname = click.prompt("PGDATABASE")
dbuser = click.prompt("PGUSER")
dbpass = click.prompt("PGPASS", hide_input=True)
dbport = click.prompt("PGPORT")
schema = click.prompt("PGSCHEMA")
info("Testing connection...")
try:
instance = ChadoInstance(dbhost=dbhost, dbname=dbname, dbuser=dbuser, dbpass=dbpass, dbport=dbport, dbschema=schema)
# We do a connection test during startup.
info("Ok! Everything looks good.")
break
except Exception as e:
warn("Error, we could not access the configuration data for your instance: %s", e)
should_break = click.prompt("Continue despite inability to contact this instance? [y/n]")
if should_break in ('Y', 'y'):
break
config_path = config.global_config_path()
if os.path.exists(config_path):
warn("File %s already exists, refusing to overwrite." % config_path)
return -1
with open(config_path, "w") as f:
f.write(CONFIG_TEMPLATE % {
'dbhost': dbhost,
'dbname': dbname,
'dbuser': dbuser,
'dbpass': dbpass,
'dbport': dbport,
'schema': schema,
})
info(SUCCESS_MESSAGE)
|
abretaud/python-chado
|
chakin/commands/cmd_init.py
|
Python
|
mit
| 2,442
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
** marun flavor: compoom **
usage: compoom=/path/to/append.script
add parameter -XX:OnOutOfMemoryError=
"""
import util
def apply(conf, currentflags, flavor_conf):
compressor = flavor_conf.get('compressor', 'gzip,lz4 --rm')
for cmd in compressor.split(','):
cmds = cmd.split()
if util.find_cmds(cmds[0]):
return { 'XX': "OnOutOfMemoryError=%s" % ' '.join(cmds) }
return {}
|
nishemon/marun
|
marun/flavor/compoom.py
|
Python
|
mit
| 483
|
from flask import Flask
from flask.ext.bootstrap import Bootstrap
from flask.ext.mail import Mail
from flask.ext.moment import Moment
from flask.ext.sqlalchemy import SQLAlchemy
from flask.ext.login import LoginManager
from flask.ext.pagedown import PageDown
from config import config
import os
bootstrap = Bootstrap()
mail = Mail()
moment = Moment()
db = SQLAlchemy()
pagedown = PageDown()
login_manager = LoginManager()
login_manager.session_protection = 'strong'
login_manager.login_view = 'auth.login'
def create_app(config_name):
app = Flask(__name__)
app.config.from_object(config[config_name])
config[config_name].init_app(app)
bootstrap.init_app(app)
mail.init_app(app)
moment.init_app(app)
db.init_app(app)
login_manager.init_app(app)
pagedown.init_app(app)
from .main import main as main_blueprint
app.register_blueprint(main_blueprint)
from .auth import auth as auth_blueprint
app.register_blueprint(auth_blueprint, url_prefix='/auth')
return app
#
#app = create_app(os.getenv('FLASK_CONFIG') or 'default')
|
Summerotter/furryyellowpages
|
app/__init__.py
|
Python
|
mit
| 1,090
|
#!/usr/bin/env python3
# Copyright (c) 2015-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test transaction signing using the signrawtransaction* RPCs."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error
class SignRawTransactionsTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [["-deprecatedrpc=signrawtransaction"]]
def successful_signing_test(self):
"""Create and sign a valid raw transaction with one input.
Expected results:
1) The transaction has a complete set of signatures
2) No script verification error occurred"""
privKeys = ['cUeKHd5orzT3mz8P9pxyREHfsWtVfgsfDjiZZBcjUBAaGk1BTj7N', 'cVKpPfVKSJxKqVpE9awvXNWuLHCa5j5tiE7K6zbUSptFpTEtiFrA']
inputs = [
# Valid pay-to-pubkey scripts
{'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 0,
'scriptPubKey': '76a91460baa0f494b38ce3c940dea67f3804dc52d1fb9488ac'},
{'txid': '83a4f6a6b73660e13ee6cb3c6063fa3759c50c9b7521d0536022961898f4fb02', 'vout': 0,
'scriptPubKey': '76a914669b857c03a5ed269d5d85a1ffac9ed5d663072788ac'},
]
outputs = {'mpLQjfK79b7CCV4VMJWEWAj5Mpx8Up5zxB': 0.1}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
rawTxSigned = self.nodes[0].signrawtransactionwithkey(rawTx, privKeys, inputs)
# 1) The transaction has a complete set of signatures
assert rawTxSigned['complete']
# 2) No script verification error occurred
assert 'errors' not in rawTxSigned
# Perform the same test on signrawtransaction
rawTxSigned2 = self.nodes[0].signrawtransaction(rawTx, inputs, privKeys)
assert_equal(rawTxSigned, rawTxSigned2)
def script_verification_error_test(self):
"""Create and sign a raw transaction with valid (vin 0), invalid (vin 1) and one missing (vin 2) input script.
Expected results:
3) The transaction has no complete set of signatures
4) Two script verification errors occurred
5) Script verification errors have certain properties ("txid", "vout", "scriptSig", "sequence", "error")
6) The verification errors refer to the invalid (vin 1) and missing input (vin 2)"""
privKeys = ['cUeKHd5orzT3mz8P9pxyREHfsWtVfgsfDjiZZBcjUBAaGk1BTj7N']
inputs = [
# Valid pay-to-pubkey script
{'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 0},
# Invalid script
{'txid': '5b8673686910442c644b1f4993d8f7753c7c8fcb5c87ee40d56eaeef25204547', 'vout': 7},
# Missing scriptPubKey
{'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 1},
]
scripts = [
# Valid pay-to-pubkey script
{'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 0,
'scriptPubKey': '76a91460baa0f494b38ce3c940dea67f3804dc52d1fb9488ac'},
# Invalid script
{'txid': '5b8673686910442c644b1f4993d8f7753c7c8fcb5c87ee40d56eaeef25204547', 'vout': 7,
'scriptPubKey': 'badbadbadbad'}
]
outputs = {'mpLQjfK79b7CCV4VMJWEWAj5Mpx8Up5zxB': 0.1}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
# Make sure decoderawtransaction is at least marginally sane
decodedRawTx = self.nodes[0].decoderawtransaction(rawTx)
for i, inp in enumerate(inputs):
assert_equal(decodedRawTx["vin"][i]["txid"], inp["txid"])
assert_equal(decodedRawTx["vin"][i]["vout"], inp["vout"])
# Make sure decoderawtransaction throws if there is extra data
assert_raises_rpc_error(-22, "TX decode failed", self.nodes[0].decoderawtransaction, rawTx + "00")
rawTxSigned = self.nodes[0].signrawtransactionwithkey(rawTx, privKeys, scripts)
# 3) The transaction has no complete set of signatures
assert not rawTxSigned['complete']
# 4) Two script verification errors occurred
assert 'errors' in rawTxSigned
assert_equal(len(rawTxSigned['errors']), 2)
# 5) Script verification errors have certain properties
assert 'txid' in rawTxSigned['errors'][0]
assert 'vout' in rawTxSigned['errors'][0]
assert 'witness' in rawTxSigned['errors'][0]
assert 'scriptSig' in rawTxSigned['errors'][0]
assert 'sequence' in rawTxSigned['errors'][0]
assert 'error' in rawTxSigned['errors'][0]
# 6) The verification errors refer to the invalid (vin 1) and missing input (vin 2)
assert_equal(rawTxSigned['errors'][0]['txid'], inputs[1]['txid'])
assert_equal(rawTxSigned['errors'][0]['vout'], inputs[1]['vout'])
assert_equal(rawTxSigned['errors'][1]['txid'], inputs[2]['txid'])
assert_equal(rawTxSigned['errors'][1]['vout'], inputs[2]['vout'])
assert not rawTxSigned['errors'][0]['witness']
# Perform same test with signrawtransaction
rawTxSigned2 = self.nodes[0].signrawtransaction(rawTx, scripts, privKeys)
assert_equal(rawTxSigned, rawTxSigned2)
# Now test signing failure for transaction with input witnesses
p2wpkh_raw_tx = "01000000000102fff7f7881a8099afa6940d42d1e7f6362bec38171ea3edf433541db4e4ad969f00000000494830450221008b9d1dc26ba6a9cb62127b02742fa9d754cd3bebf337f7a55d114c8e5cdd30be022040529b194ba3f9281a99f2b1c0a19c0489bc22ede944ccf4ecbab4cc618ef3ed01eeffffffef51e1b804cc89d182d279655c3aa89e815b1b309fe287d9b2b55d57b90ec68a0100000000ffffffff02202cb206000000001976a9148280b37df378db99f66f85c95a783a76ac7a6d5988ac9093510d000000001976a9143bde42dbee7e4dbe6a21b2d50ce2f0167faa815988ac000247304402203609e17b84f6a7d30c80bfa610b5b4542f32a8a0d5447a12fb1366d7f01cc44a0220573a954c4518331561406f90300e8f3358f51928d43c212a8caed02de67eebee0121025476c2e83188368da1ff3e292e7acafcdb3566bb0ad253f62fc70f07aeee635711000000"
rawTxSigned = self.nodes[0].signrawtransactionwithwallet(p2wpkh_raw_tx)
# 7) The transaction has no complete set of signatures
assert not rawTxSigned['complete']
# 8) Two script verification errors occurred
assert 'errors' in rawTxSigned
assert_equal(len(rawTxSigned['errors']), 2)
# 9) Script verification errors have certain properties
assert 'txid' in rawTxSigned['errors'][0]
assert 'vout' in rawTxSigned['errors'][0]
assert 'witness' in rawTxSigned['errors'][0]
assert 'scriptSig' in rawTxSigned['errors'][0]
assert 'sequence' in rawTxSigned['errors'][0]
assert 'error' in rawTxSigned['errors'][0]
# Non-empty witness checked here
assert_equal(rawTxSigned['errors'][1]['witness'], ["304402203609e17b84f6a7d30c80bfa610b5b4542f32a8a0d5447a12fb1366d7f01cc44a0220573a954c4518331561406f90300e8f3358f51928d43c212a8caed02de67eebee01", "025476c2e83188368da1ff3e292e7acafcdb3566bb0ad253f62fc70f07aeee6357"])
assert not rawTxSigned['errors'][0]['witness']
# Perform same test with signrawtransaction
rawTxSigned2 = self.nodes[0].signrawtransaction(p2wpkh_raw_tx)
assert_equal(rawTxSigned, rawTxSigned2)
def run_test(self):
self.successful_signing_test()
self.script_verification_error_test()
if __name__ == '__main__':
SignRawTransactionsTest().main()
|
Bushstar/UFO-Project
|
test/functional/rpc_signrawtransaction.py
|
Python
|
mit
| 7,698
|
# -*- coding: utf-8 -*-
#
# This file is part of REANA.
# Copyright (C) 2021, 2022 CERN.
#
# REANA is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""REANA client validation utilities."""
import sys
from typing import Dict, NoReturn, Union
import click
from reana_commons.errors import REANAValidationError
from reana_commons.validation.operational_options import validate_operational_options
from reana_commons.validation.utils import validate_reana_yaml, validate_workflow_name
from reana_client.printer import display_message
from reana_client.validation.compute_backends import validate_compute_backends
from reana_client.validation.environments import validate_environment
from reana_client.validation.parameters import validate_parameters
from reana_client.validation.workspace import _validate_workspace
def validate_reana_spec(
reana_yaml,
filepath,
access_token=None,
skip_validation=False,
skip_validate_environments=True,
pull_environment_image=False,
server_capabilities=False,
):
"""Validate REANA specification file."""
if "options" in reana_yaml.get("inputs", {}):
workflow_type = reana_yaml["workflow"]["type"]
workflow_options = reana_yaml["inputs"]["options"]
try:
reana_yaml["inputs"]["options"] = validate_operational_options(
workflow_type, workflow_options
)
except REANAValidationError as e:
display_message(e.message, msg_type="error")
sys.exit(1)
if not skip_validation:
display_message(
f"Verifying REANA specification file... {filepath}", msg_type="info",
)
validate_reana_yaml(reana_yaml)
display_message(
"Valid REANA specification file.", msg_type="success", indented=True,
)
validate_parameters(reana_yaml)
if server_capabilities:
_validate_server_capabilities(reana_yaml, access_token)
if not skip_validate_environments:
display_message(
"Verifying environments in REANA specification file...", msg_type="info",
)
validate_environment(reana_yaml, pull=pull_environment_image)
def _validate_server_capabilities(reana_yaml: Dict, access_token: str) -> None:
"""Validate server capabilities in REANA specification file.
:param reana_yaml: dictionary which represents REANA specification file.
:param access_token: access token of the current user.
"""
from reana_client.api.client import info
info_response = info(access_token)
display_message(
"Verifying compute backends in REANA specification file...", msg_type="info",
)
supported_backends = info_response.get("compute_backends", {}).get("value")
validate_compute_backends(reana_yaml, supported_backends)
root_path = reana_yaml.get("workspace", {}).get("root_path")
available_workspaces = info_response.get("workspaces_available", {}).get("value")
_validate_workspace(root_path, available_workspaces)
def validate_input_parameters(live_parameters, original_parameters):
"""Return validated input parameters."""
parsed_input_parameters = dict(live_parameters)
for parameter in parsed_input_parameters.keys():
if parameter not in original_parameters:
display_message(
"Given parameter - {0}, is not in reana.yaml".format(parameter),
msg_type="error",
)
del live_parameters[parameter]
return live_parameters
def validate_workflow_name_parameter(
ctx: click.core.Context, _: click.core.Option, workflow_name: str
) -> Union[str, NoReturn]:
"""Validate workflow name parameter."""
try:
return validate_workflow_name(workflow_name)
except ValueError as e:
display_message(str(e), msg_type="error")
sys.exit(1)
|
reanahub/reana-client
|
reana_client/validation/utils.py
|
Python
|
mit
| 3,953
|
#!/usr/bin/env python
from .util import Spec
class Port(Spec):
STATES = [
"listening", "closed", "open",
"bound_to",
"tcp", "tcp6", "udp"
]
def __init__(self, portnumber):
self.portnumber = portnumber
self.get_state()
self.state = {
'state': 'closed',
'bound': False,
'uid': None,
'inode': None,
'proto': None,
}
self.get_state()
#
self.WIN = "Port %s is %%s" % self.portnumber
def get_state(self):
import os
for line in os.popen("netstat -tnle").readlines():
line = line.strip().split()
if len(line) != 8:
continue
(proto, _, _, local, foreign, state, uid, inode) = line
if proto == 'tcp':
(bound, port) = local.split(':')
if proto == 'tcp6':
port = local.split(':::')[-1]
port = int(port)
if port == self.portnumber:
self.state = {
'state': 'listening',
'bound': bound,
'uid': uid,
'inode': inode,
'proto': proto,
}
def _make_sure(self, x, y):
if x == y:
return True
else:
return False
def sb_listening(self, *args):
if self._make_sure(self.state['state'], "listening"):
return True, "Port %s is listening" % self.portnumber
return False, "Port %s is current %s not listening" % (
self.portnumber,
self.state['state']
)
def sb_closed(self, *args):
if self._make_sure(self.state['state'], "closed"):
return True, "Port %s is closed" % self.portnumber
return False, "Port %s is current %s not closed" % (
self.portnumber, self.state['state']
)
def sb_tcp(self, *args):
if self._make_sure(self.state['proto'], "tcp"):
return True
return "Port %s is using protocol %s not TCP" % (
self.portnumber, self.state['proto']
)
def sb_udp(self, *args):
if self._make_sure(self.state['proto'], "udp"):
return True
return "Port %s is using protocol %s not udp" % (
self.portnumber, self.state['proto']
)
def sb_tcp6(self, *args):
if self._make_sure(self.state['proto'], "tcp6"):
return True
return "Port %s is using protocol %s not TCP6" % (
self.portnumber, self.state['proto']
)
def sb_bound_to(self, bound_ip):
if self._make_sure(self.state['bound'], bound_ip):
return True, "Port %s is bound to %s" % (self.portnumber, bound_ip)
return False, "The port currently bound to %s not %s" % (
self.state['bound'], bound_ip
)
|
daniellawrence/pyspeccheck
|
speccheck/port.py
|
Python
|
mit
| 2,928
|
__version__ = '0.3.0'
|
alexhayes/django-psi
|
psi/__init__.py
|
Python
|
mit
| 22
|
"""
To create an Attribute Editor template using python, do the following:
1. create a subclass of `uitypes.AETemplate`
2. set its ``_nodeType`` class attribute to the name of the desired node type, or name the class using the
convention ``AE<nodeType>Template``
3. import the module
AETemplates which do not meet one of the two requirements listed in step 2 will be ignored. To ensure that your
Template's node type is being detected correctly, use the ``AETemplate.nodeType()`` class method::
import AETemplates
AETemplates.AEmib_amb_occlusionTemplate.nodeType()
As a convenience, when pymel is imported it will automatically import the module ``AETemplates``, if it exists,
thereby causing any AETemplates within it or its sub-modules to be registered. Be sure to import pymel
or modules containing your ``AETemplate`` classes before opening the Atrribute Editor for the node types in question.
To check which python templates are loaded::
from pymel.core.uitypes import AELoader
print AELoader.loadedTemplates()
The example below demonstrates the simplest case, which is the first. It provides a layout for the mib_amb_occlusion
mental ray shader.
"""
import pymel.core as pm
import mymagicbox.AETemplateBase as AETemplateBase
import mymagicbox.log as log
class AEtestNodeATemplate(AETemplateBase.mmbTemplateBase):
def buildBody(self, nodeName):
log.debug("building AETemplate for node: %s", nodeName)
self.AEswatchDisplay(nodeName)
self.beginLayout("Common Material Attributes",collapse=0)
self.addControl("attribute0")
self.endLayout()
self.beginLayout("Version",collapse=0)
self.addControl("mmbversion")
self.endLayout()
pm.mel.AEdependNodeTemplate(self.nodeName)
self.addExtraControls()
|
yaoyansi/mymagicbox
|
version_control/scripts/AETemplates/AEtestNodeATemplate.py
|
Python
|
mit
| 1,749
|
from rpython.rlib import jit
import interpret
import parse
import kernel_type as kt
def entry_point(argv):
jit.set_param(None, "trace_limit", 20000)
interpret.run(argv)
return 0
def target(driver, args):
return entry_point, None
|
euccastro/icbink
|
entry_point.py
|
Python
|
mit
| 248
|
import pytest
from playlog.lib.json import Encoder
def test_encoder():
encoder = Encoder()
with pytest.raises(TypeError):
encoder.default(object())
|
rossnomann/playlog
|
tests/src/tests/common/test_json.py
|
Python
|
mit
| 167
|
class Solution(object):
def strStr(self, haystack, needle):
"""
:type haystack: str
:type needle: str
:rtype: int
"""
if not needle:
return 0
if len(haystack) < len(needle):
return -1
for i in xrange(len(haystack)):
if i + len(needle) > len(haystack):
return -1
if haystack[i] != needle[0] or haystack[i+len(needle)-1] != needle[-1]:
continue
else:
j=0
while j < len(needle) and i+j < len(haystack):
if haystack[i+j] != needle[j]:
break
j += 1
if j == len(needle):
return i
return -1
if __name__ == '__main__':
s1 = ""
s2 = ""
s3 = ""
print Solution().strStr(s1,s2)
|
comicxmz001/LeetCode
|
Python/28 Implement strStr.py
|
Python
|
mit
| 792
|
import os
import json
class FakeResponse(object):
status_code = 200
text = None
headers = []
def __init__(self, text):
self.text = text
def json(self):
return json.loads(self.text)
def response(name):
content = open(os.path.join(os.path.dirname(__file__), 'responses', name)).read()
return FakeResponse(content)
|
VerosK/python-fakturoid
|
tests/mock.py
|
Python
|
mit
| 329
|
import numpy as np
def gauss(win, sigma):
x = np.arange(0, win, 1, float)
y = x[:,np.newaxis]
x0 = y0 = win // 2
g=1/(2*np.pi*sigma**2)*np.exp((((x-x0)**2+(y-y0)**2))/2*sigma**2)
return g
def gaussx(win, sigma):
x = np.arange(0, win, 1, float)
y = x[:,np.newaxis]
x0 = y0 = win // 2
gx=(x-x0)/(2*np.pi*sigma**4)*np.exp((((x-x0)**2+(y-y0)**2))/2*sigma**2)
return gx
def gaussy(win, sigma):
x = np.arange(0, win, 1, float)
y = x[:,np.newaxis]
x0 = y0 = win // 2
gy=(y-y0)/(2*np.pi*sigma**4)*np.exp((((x-x0)**2+(y-y0)**2))/2*sigma**2)
return gy
|
pranka02/image_processing_py
|
gaussian.py
|
Python
|
mit
| 652
|
#!/usr/bin/env python
'''
'roi_gcibs.py' compares two groups informed by an a priori bootstrap analysis.
'''
import os
import sys
import argparse
import tempfile, shutil
import json
import pprint
import copy
from collections import defaultdict
from _common import systemMisc as misc
from _common import crun
import error
import message
import stage
import fnndsc as base
class FNNDSC_roigcibs(base.FNNDSC):
'''
This class is a specialization of the FNNDSC base and geared to dyslexia
curvature analysis.
'''
#
# Class member variables -- if declared here are shared
# across all instances of this class
#
_dictErr = {
'subjectSpecFail' : {
'action' : 'examining command line arguments, ',
'error' : 'it seems that no subjects were specified.',
'exitCode' : 10},
'noFreeSurferEnv' : {
'action' : 'examining environment, ',
'error' : 'it seems that the FreeSurfer environment has not been sourced.',
'exitCode' : 11},
'noStagePostConditions' : {
'action' : 'querying a stage for its exitCode, ',
'error' : 'it seems that the stage has not been specified.',
'exitCode' : 12},
'subjectDirnotExist': {
'action' : 'examining the <subjectDirectories>, ',
'error' : 'the directory does not exist.',
'exitCode' : 13},
'Load' : {
'action' : 'attempting to pickle load object, ',
'error' : 'a PickleError occured.',
'exitCode' : 14},
'outDirNotCreate': {
'action' : 'attempting to create the <outDir>, ',
'error' : 'a system error was encountered. Do you have create permission?',
'exitCode' : 15},
'workingDirNotExist': {
'action' : 'attempting to access the <workingDir>, ',
'error' : 'a system error was encountered. Does the directory exist?',
'exitCode' : 16},
}
def l_pval(self):
return self._l_pval
def l_roi(self):
return self._l_ROI
def l_hemisphere(self):
return self._l_hemi
def l_surface(self):
return self._l_surface
def l_statFunc(self):
return self._l_statFunc
def l_group(self):
return self._l_group
def l_curvFunc(self):
return self._l_curvFunc
def pval(self):
return self._str_pval
def topDir(self, *args):
if len(args):
self._topDir = args[0]
else:
return self._topDir
def dirSpec(self):
"""
Return the dirSpec based on internal pipeline._str_* variables
"""
return '%s/%s/%s/%s/%s/%s/%s' % (
self.outDir(),
self._str_annotation,
self._str_group,
self._str_pval,
self._str_statFunc,
self._str_surface,
self._str_hemi
)
def dirSpecPartial(self):
"""
Return the dirSpec based on internal pipeline._str_* variables w/o
the leading directories.
"""
return '%s/%s/%s/%s' % ( self._str_pval,
self._str_statFunc,
self._str_surface,
self._str_hemi)
def namespec(self, *args):
'''
Return the namespec based on internal pipeline._str_* variables.
'''
str_sep = "-"
if len(args): str_sep = args[0]
return '%s%s%s%s%s%s%s%s%s%s%s' % (
self._str_annotation, str_sep,
self._str_group, str_sep,
self._str_pval, str_sep,
self._str_statFunc, str_sep,
self._str_surface, str_sep,
self._str_hemi
)
def schedulerStdOutDir(self, *args):
if len(args):
self._str_schedulerStdOutDir = args[0]
else:
return self._str_schedulerStdOutDir
def schedulerStdErrDir(self, *args):
if len(args):
self._str_schedulerStdErrDir = args[0]
else:
return self._str_schedulerStdErrDir
def roi(self):
return self._str_roi
def surface(self):
return self._str_surface
def hemi(self):
return self._str_hemi
def statFunc(self):
return self._str_statFunc
def curvFunc(self):
return self._str_curvFunc
def outDir(self, *args):
if len(args):
self._outDir = args[0]
else:
return self._outDir
def workingDir(self, *args):
if len(args):
self._workingDir = args[0]
else:
return self._workingDir
def clobber(self, *args):
if len(args):
self._b_clobber = args[0]
else:
return self._b_clobber
def group(self):
return self._str_group
def __init__(self, **kwargs):
"""
Basic constructor. Checks on named input args, checks that files
exist and creates directories.
"""
base.FNNDSC.__init__(self, **kwargs)
self._lw = 120
self._rw = 20
self._l_ROI = []
self._l_pval = []
self._l_group = []
self._l_surface = []
self._l_statFunc = []
self._l_curvFunc = []
self._l_hemi = []
self._l_annot = []
self._outDir = ''
self._workingDir = ''
self._stageslist = '12'
self._f_lowerBoundHard = 0.0
self._f_lowerBoundSoft = 0.0
self._f_upperBoundSoft = 0.0
# Internal tracking vars
self._str_pval = ''
self._str_group = ''
self._str_roi = ''
self._str_hemi = ''
self._str_surface = ''
self._str_statFunc = ''
self._str_curvFunc = ''
self._str_annotation = ''
self._topDir = ''
self._d_bootstrapOccurrence = Tree()
self._d_bootstrapThreshold = Tree()
self._d_bootstrapFiltered = Tree()
# Scheduler std out/err dirs
self._str_schedulerStdOutDir = '~/scratch'
self._str_schedulerStdErrDir = '~/scratch'
self._b_clobber = False
for key, value in kwargs.iteritems():
if key == 'outDir': self._outDir = value
if key == 'workingDir': self._workingDir = value
if key == 'stages': self._stageslist = value
if key == 'curvFunc': self._l_curvFunc = value.split(':')
if key == 'pval': self._l_pval = value.split(',')
if key == 'group': self._l_group = value.split(',')
if key == 'surface': self._l_surface = value.split(',')
if key == 'statFunc': self._l_statFunc = value.split(',')
if key == 'hemi': self._l_hemi = value.split(',')
if key == 'annot': self._l_annot = value.split(',')
if key == 'lowerBoundSoft': self._f_lowerBoundSoft = float(value)
if key == 'lowerBoundHard': self._f_lowerBoundHard = float(value)
if key == 'upperBoundSoft': self._f_upperBoundSoft = float(value)
if key == 'schedulerStdOutDir': self._str_schedulerStdOutDir = value
if key == 'schedulerStdErrDir': self._str_schedulerStdErrDir = value
if not os.path.isdir(self._workingDir): errorFatal(self, 'workingDirNotExist')
def initialize(self):
"""
This method provides some "post-constructor" initialization. It is
typically called after the constructor and after other class flags
have been set (or reset).
"""
# Set the stages
self._pipeline.stages_canRun(False)
lst_stages = list(self._stageslist)
for index in lst_stages:
stage = self._pipeline.stage_get(int(index))
stage.canRun(True)
def run(self):
"""
The main 'engine' of the class.
"""
base.FNNDSC.run(self)
def innerLoop(self, func_callBack, *args, **callBackArgs):
'''
A loop function that calls func_callBack(**callBackArgs)
at the innermost loop the nested data dictionary structure.
The loop order:
annotation, group, pval, statFunc, surface, hemi
Note that internal tracking object variables, _str_gid ... _str_ctype
are automatically updated by this method.
The **callBackArgs is a generic dictionary holder that is interpreted
by both this loop controller and also passed down to the callback
function.
In the context of the loop controller, loop conditions can
be changed by passing appropriately name args in the
**callBackArgs structure.
'''
ret = True
_str_log = ''
for key, val in callBackArgs.iteritems():
if key == 'hemi': self._l_hemi = val
if key == 'surface': self._l_surface = val
if key == 'curv': self._l_curvFunc = val
if key == 'group': self._l_group = val
if key == 'log': _str_log = val
if len(_str_log): self._log(_str_log)
for self._str_annotation in self._l_annot:
for self._str_group in self._l_group:
for self._str_pval in self._l_pval:
for self._str_statFunc in self._l_statFunc:
for self._str_surface in self._l_surface:
for self._str_hemi in self._l_hemi:
for self._str_curvFunc in self._l_curvFunc:
ret = func_callBack(**callBackArgs)
if len(_str_log): self._log('[ ok ]\n', rw=self._rw)
return ret
def outputDirTree_build(self, **kwargs):
'''Build the tree structure containing output images
'''
OSshell('mkdir -p %s/%s' % (
self.dirSpec(),
self._str_curvFunc
))
def tcl_append(self, str_prefix, str_suffix, str_hemi):
"""
Append some text to each tcl file to save snapshots of different brain aspects.
"""
if str_hemi == "lh":
frontal_or_distal = "frontal"
distal_or_frontal = "distal"
medial_or_lateral = "medial"
lateral_or_medial = "lateral"
if str_hemi == "rh":
frontal_or_distal = "distal"
distal_or_frontal = "frontal"
medial_or_lateral = "lateral"
lateral_or_medial = "medial"
str_content = '''
# Initial lateral view
read_binary_curv
redraw
save_tiff %s/lateral-%s.tiff
# inferior view
rotate_brain_x 90
redraw
save_tiff %s/inferior-%s.tiff
# superior view
rotate_brain_x -180
redraw
save_tiff %s/superior-%s.tiff
# reset
rotate_brain_x 90
# %s view
rotate_brain_y 90
redraw
save_tiff %s/%s-%s.tiff
# medial view
rotate_brain_y 90
redraw
save_tiff %s/medial-%s.tiff
# %s view
rotate_brain_y 90
redraw
save_tiff %s/%s-%s.tiff
exit 0
''' % ( str_prefix, str_suffix,
str_prefix, str_suffix,
str_prefix, str_suffix,
distal_or_frontal,
str_prefix, distal_or_frontal, str_suffix,
str_prefix, str_suffix,
frontal_or_distal,
str_prefix, frontal_or_distal, str_suffix)
return str_content
def static_vars(**kwargs):
def decorate(func):
for k in kwargs:
setattr(func, k, kwargs[k])
return func
return decorate
def labelScript_process(self, **kwargs):
"""
Write the tcl files to display filtered ROIs
:param kwargs:
:return:
"""
spec = self._d_bootstrapFiltered['%s-filtered' % self._str_group][self._str_annotation][self._str_hemi][self._str_surface][self._str_curvFunc].keys()[0]
innerDict = self._d_bootstrapFiltered['%s-filtered' % self._str_group][self._str_annotation][self._str_hemi][self._str_surface][self._str_curvFunc][spec]
# print(innerDict)
str_dirSpec = '%s/%s' % (self.dirSpec(), self._str_curvFunc)
os.chdir(str_dirSpec)
str_fileStem = "%s-%s" % (self.namespec("-"), self._str_curvFunc)
str_TCLfileName = '%s.tcl' % (str_fileStem)
str_JSONfileName = '%s.json' % (str_fileStem)
self._log("\n")
str_currentDir = os.getcwd()
l_currentDir = str_currentDir.split('/')
l_workingDir = l_currentDir[-8:-1]
l_workingDir.append(l_currentDir[-1])
index = 0
self._log("Current dir: %s\n" % '/'.join(l_workingDir))
self._log('Creating tcl file: %s...\n' % str_TCLfileName)
with open(str_JSONfileName, 'w') as JSONfile:
json.dump(innerDict, JSONfile, indent=4, sort_keys=True)
self._log('Creating JSON file: %s...\n' % str_JSONfileName)
for key,val in innerDict.iteritems():
if val <= self._f_lowerBoundSoft:
misc.file_writeOnce(str_TCLfileName,
'labl_load %s ; labl_set_color %d 0 0 %d\n' %\
(key, index, 2*int(val)), mode='a')
if val > self._f_lowerBoundSoft and val < self._f_upperBoundSoft:
misc.file_writeOnce(str_TCLfileName,
'labl_load %s ; labl_set_color %d 0 %d 0\n' %\
(key, index, 2*int(val), 2*int(val)), mode='a')
if val >= self._f_upperBoundSoft:
misc.file_writeOnce(str_TCLfileName,
'labl_load %s ; labl_set_color %d %d 0 0\n' %\
(key, index, 2*int(val)), mode='a')
index += 1
misc.file_writeOnce(str_TCLfileName, self.tcl_append(str_dirSpec, str_fileStem, self._str_hemi), mode='a')
str_scriptDir = '/neuro/users/rudolphpienaar/projects/dyslexia-curv-analysis-2/sh'
str_subjectDir = '/neuro/users/rudolphpienaar/projects/dyslexia-curv-analysis-2/results/6-exp-dyslexia-run'
str_execCmd = 'cd %s; %s/%s -S %s -D %s -h %s' % \
(
os.getcwd(),
str_scriptDir,
"./tksurfer-run.bash",
str_subjectDir,
os.getcwd(),
self._str_hemi,
)
self._log("Shell command = %s\n" % str_execCmd)
self._log('Executing tcl file...\n')
OSshell(str_execCmd)
return {"return": "ok"}
def bootstrap_filteredDictionaryBuild(self, **kwargs):
'''Filters group compared results.
:param kwargs:
:return:
'''
spec = self._d_bootstrapOccurrence[self._str_group][self._str_annotation][self._str_hemi][self._str_surface][self._str_curvFunc].keys()[0]
self._d_bootstrapFiltered['%s-filtered' % self._str_group][self._str_annotation][self._str_hemi][self._str_surface][self._str_curvFunc] = self._d_bootstrapOccurrence[self._str_group][self._str_annotation][self._str_hemi][self._str_surface][self._str_curvFunc].copy()
for key in self._d_bootstrapFiltered['%s-filtered' % self._str_group][self._str_annotation][self._str_hemi][self._str_surface][self._str_curvFunc][spec].keys():
if self._d_bootstrapFiltered['%s-filtered' % self._str_group][self._str_annotation][self._str_hemi][self._str_surface][self._str_curvFunc][spec][key] < \
self._d_bootstrapThreshold['%s-threshold' % self._str_group][self._str_annotation][self._str_hemi][self._str_surface][self._str_curvFunc][spec][key] or \
self._d_bootstrapFiltered['%s-filtered' % self._str_group][self._str_annotation][self._str_hemi][self._str_surface][self._str_curvFunc][spec][key] <= self._f_lowerBoundHard:
self._d_bootstrapFiltered['%s-filtered' % self._str_group][self._str_annotation][self._str_hemi][self._str_surface][self._str_curvFunc][spec].pop(key, None)
return {"return": self._d_bootstrapFiltered}
def bootstrap_thresholdDictionaryBuild(self, **kwargs):
'''Sum the intra-group occurrences for a lower confidence bound
:param kwargs:
:return:
'''
str_thresholdOperation = "sum"
for kwarg,val in kwargs.iteritems():
if kwarg == 'threshold': str_thresholdOperation = val
str_g1 = self._str_group[0]
str_g2 = self._str_group[1]
spec = self._d_bootstrapOccurrence[str_g1][self._str_annotation][self._str_hemi][self._str_surface][self._str_curvFunc].keys()[0]
self._d_bootstrapThreshold['%s-threshold' % self._str_group][self._str_annotation][self._str_hemi][self._str_surface][self._str_curvFunc] = copy.deepcopy(self._d_bootstrapOccurrence[self._str_group][self._str_annotation][self._str_hemi][self._str_surface][self._str_curvFunc])
for key in self._d_bootstrapOccurrence[str_g1][self._str_annotation][self._str_hemi][self._str_surface][self._str_curvFunc][spec].keys():
if str_thresholdOperation == "sum":
self._d_bootstrapThreshold['%s-threshold' % self._str_group][self._str_annotation][self._str_hemi][self._str_surface][self._str_curvFunc][spec][key] = \
self._d_bootstrapOccurrence[str_g1][self._str_annotation][self._str_hemi][self._str_surface][self._str_curvFunc][spec][key] + \
self._d_bootstrapOccurrence[str_g2][self._str_annotation][self._str_hemi][self._str_surface][self._str_curvFunc][spec][key]
if str_thresholdOperation == "max":
if self._d_bootstrapOccurrence[str_g1][self._str_annotation][self._str_hemi][self._str_surface][self._str_curvFunc][spec][key] >= self._d_bootstrapOccurrence[str_g2][self._str_annotation][self._str_hemi][self._str_surface][self._str_curvFunc][spec][key]:
self._d_bootstrapThreshold['%s-threshold' % self._str_group][self._str_annotation][self._str_hemi][self._str_surface][self._str_curvFunc][spec][key] = \
self._d_bootstrapOccurrence[str_g1][self._str_annotation][self._str_hemi][self._str_surface][self._str_curvFunc][spec][key]
else:
self._d_bootstrapThreshold['%s-threshold' % self._str_group][self._str_annotation][self._str_hemi][self._str_surface][self._str_curvFunc][spec][key] = \
self._d_bootstrapOccurrence[str_g2][self._str_annotation][self._str_hemi][self._str_surface][self._str_curvFunc][spec][key]
return {"return": self._d_bootstrapThreshold}
def bootstrap_occurrenceDictionariesBuild(self, **kwargs):
"""Build the occurrence dictionaries:
This method captures the bootstrap occurrence dictionaries for the
comparison group as well as the two intra-group variance occurrences.
"""
str_g1 = self._str_group[0]
str_g2 = self._str_group[1]
str_g3 = self._str_group
l_subgroup = [str_g1, str_g2, str_g3]
for str_subgroup in l_subgroup:
os.chdir(self._workingDir)
if str_subgroup in [str_g1, str_g2]:
str_g = "%s000" % str_subgroup
str_compGroup = '12'
else:
str_g = '6'
str_compGroup = self._str_group
str_bsDir = "bootstrap-%s-%s/%s/%s" % (str_g,
self._str_curvFunc,
self._str_annotation,
str_compGroup)
os.chdir(str_bsDir)
if self._str_curvFunc == "thickness":
str_cfilt = "thickness"
else:
str_cfilt = "curv"
self._log('Parsing occurrence data for %2d.%s.%s.%s.%s\n' % (int(str_subgroup), self._str_annotation, self._str_hemi, self._str_surface, self._str_curvFunc))
OSshell('find . -iname occurence.txt | grep %s | grep %s | grep %s | ../../../../sh/ocfilt.sh -t 0 | python -m json.tool ' % \
(str_cfilt, self._str_hemi, self._str_surface))
self._d_bootstrapOccurrence[str_subgroup][self._str_annotation][self._str_hemi][self._str_surface][self._str_curvFunc] = json.loads(OSshell.stdout())
spec = self._d_bootstrapOccurrence[str_subgroup][self._str_annotation][self._str_hemi][self._str_surface][self._str_curvFunc].keys()[0]
self._d_bootstrapOccurrence[str_subgroup][self._str_annotation][self._str_hemi][self._str_surface][self._str_curvFunc][spec].pop("", None)
return {"return": self._d_bootstrapOccurrence}
def synopsis(ab_shortOnly = False):
scriptName = os.path.basename(sys.argv[0])
shortSynopsis = '''
SYNOPSIS
%s \\
[--stages <stages>] \\
[-o|--outDir <outputRootDir>] \\
[-w|--workingDir <workingDir>] \\
[-v|--verbosity <verboseLevel>] \\
[-s|--stages <stages>] \\
[-p|--pval <pvalCutoffList>] \\
[-g|--group <groupList>] \\
[-S|--surface <surfaceList>] \\
[-f|--statFunc <statFuncList>] \\
[-c|--curvFunc <curvFuncList>] \\
[-a|--annot <annotList>] \\
[-m|--hemi <hemiList>] \\
[--schedulerStdOutDir <dir>] \\
[--schedulerStdErrDir <dir>] \\
''' % scriptName
description = '''
DESCRIPTION
`%s' performs a group comparison informed by an a priori bootstrap
analysis. The bootstrap analysis provides a bound on the feature
variance within a group, and the bottom bound of the threshold
of significance becomes the sum of the underlying group variances.
ARGS
--stages <stages>
The stages to execute. This is specified in a string, such as '1234'
which would imply stages 1, 2, 3, and 4.
The special keyword 'all' can be used to turn on all stages.
--pval <pvalCutoffList>
The pval cutoffs to consider. In practice, this is always 'le1,le5'
--group <groupList>
The group list to process.
--surface <surfaceList>
The surface list to process. In practice, 'smoothwm,pial'.
--statFunc <statFuncList>
The statistical functional data to analyze. Typically
'ptile-raw,ptile-convex'.
--curvFunc <curvFuncList>
The curvature bootstrap analysis to use. Typically
'K,BE,S', 'H,K1,K2', 'thickness'.
--hemi <hemiList>
The hemispheres to process. In practice, this is always 'lh,rh'.
--annot <annotList>
The annotation list to process.
--threshold <thresholdOperation>
The operation to apply in thresholding variances from the underlying
bootstrap variances. Either "sum" or "max".
--lowerBound <lowerBound>
The lower bound for filtered (and thresholded) comparisons.
--workingDir <workingDir>
The working directory for the script.
--output <outputRootDir>
The top level directory name to contain results. The fully qualified
output dir is <workingDir>/<outputDir>
--clobber
A boolean flag. If true, will not delete existing output directories
but simply add more results down existing trees, clobbering existing
files if they already exist. If not specified then existing output
trees are deleted. This assures that a given run contains only data
from that run.
Note that running the same experiment multiple times with "--clobber"
will *grow* resultant ROI label files!
For a distributed experiment, delete *all* existing roigcibs trees
*before* running the experiment!
EXAMPLES
''' % (scriptName)
if ab_shortOnly:
return shortSynopsis
else:
return shortSynopsis + description
def f_stageShellExitCode(**kwargs):
'''
A simple function that returns a conditional based on the
exitCode of the passed stage object. It assumes global access
to the <pipeline> object.
**kwargs:
obj=<stage>
The stage to query for exitStatus.
'''
stage = None
for key, val in kwargs.iteritems():
if key == 'obj': stage = val
if not stage: error.fatal(pipeline, "noStagePostConditions")
if not stage.callCount(): return True
if not stage.exitCode(): return True
else: return False
#
# entry point
#
if __name__ == "__main__":
# always show the help if no arguments were specified
if len( sys.argv ) == 1:
print synopsis()
sys.exit( 1 )
verbosity = 0
parser = argparse.ArgumentParser(description = synopsis(True))
parser.add_argument('--verbosity', '-v',
dest='verbosity',
action='store',
default=0,
help='verbosity level')
parser.add_argument('--output', '-o',
dest='outDir',
action='store',
default='roigcibs',
help='output root directory')
parser.add_argument('--workingDir', '-w',
dest='workingDir',
action='store',
default='./',
help='output working directory')
parser.add_argument('--clobber', '-C',
dest='clobber',
action='store_true',
default=False,
help='if specified, do not erase existing output dir if found.')
parser.add_argument('--stages', '-s',
dest='stages',
action='store',
default='01',
help='analysis stages')
parser.add_argument('--pval', '-p',
dest='pval',
action='store',
default='le1',
help='comma separated p-val cutoff threshold')
parser.add_argument('--group', '-g',
dest='group',
action='store',
default='13',
help='comma separated group list to process')
parser.add_argument('--surface', '-S',
dest='surface',
action='store',
default='smoothwm',
help='comma separated surface list to process')
parser.add_argument('--statFunc', '-f',
dest='statFunc',
action='store',
default='ptile-raw',
help='comma separated statistical function list to process')
parser.add_argument('--curvFunc', '-c',
dest='curvFunc',
action='store',
default='H,K,K1,K2,C,BE,S,thickness',
help='comma separated curvature function list to process')
parser.add_argument('--hemi', '-m',
dest='hemi',
action='store',
default='lh,rh',
help='comma separated hemisphere list to process')
parser.add_argument('--annot', '-a',
dest='annot',
action='store',
default='aparc.annot',
help='comma separated annotation list to process')
parser.add_argument('--threshold', '-t',
dest='threshold',
action='store',
default='max',
help='the threshold operation -- "max" or "sum"')
parser.add_argument('--lowerBoundHard', '-L',
dest='lowerBoundHard',
action='store',
default=0.0,
help='the hard lower bound for filtered occurrences.')
parser.add_argument('--lowerBoundSoft', '-l',
dest='lowerBoundSoft',
action='store',
default=80.0,
help='the soft lower bound for filtered occurrences.')
parser.add_argument('--upperBoundSoft', '-u',
dest='upperBoundSoft',
action='store',
default=94.0,
help='the soft upper bound for filtered occurrences.')
parser.add_argument('--schedulerStdOutDir',
dest='schedulerStdOutDir',
action='store',
default='~/scratch',
help='top level directory containing stdout from scheduled jobs')
parser.add_argument('--schedulerStdErrDir',
dest='schedulerStdErrDir',
action='store',
default='~/scratch',
help='top level directory containing stderr from scheduled jobs')
args = parser.parse_args()
# A generic "shell"
OSshell = crun.crun()
OSshell.echo(False)
OSshell.echoStdOut(False)
OSshell.detach(False)
OSshell.waitForChild(True)
Tree = lambda: defaultdict(Tree)
roigcibs = FNNDSC_roigcibs(
outDir = '%s' % (args.outDir),
workingDir = args.workingDir,
stages = args.stages,
pval = args.pval,
group = args.group,
surface = args.surface,
curvFunc = args.curvFunc,
statFunc = args.statFunc,
hemi = args.hemi,
annot = args.annot,
lowerBoundHard = args.lowerBoundHard,
lowerBoundSoft = args.lowerBoundSoft,
upperBoundSoft = args.upperBoundSoft,
schedulerStdOutDir = args.schedulerStdOutDir,
schedulerStdErrDir = args.schedulerStdErrDir,
logTo = '%s/roigcibs.log' % args.workingDir,
syslog = True,
logTee = True)
roigcibs.clobber(args.clobber)
roigcibs.verbosity(args.verbosity)
pipeline = roigcibs.pipeline()
pipeline.poststdout(True)
pipeline.poststderr(True)
os.chdir(roigcibs._workingDir)
roigcibs._workingDir = os.getcwd()
roigcibs.topDir(os.getcwd())
stage0 = stage.Stage(
name = 'roigcibs-0-init',
fatalConditions = True,
syslog = True,
logTo = '%s/roigcibs-0-init.log' % args.workingDir,
logTee = True,
)
def f_stage0callback(**kwargs):
for key, val in kwargs.iteritems():
if key == 'obj': stage = val
if key == 'pipe': pipeline = val
log = stage._log
os.chdir(pipeline._workingDir)
if os.path.isdir(pipeline.outDir()) and not pipeline.clobber():
log('Existing outDir tree found... deleting...\n')
shutil.rmtree(pipeline.outDir())
OSshell('mkdir -p %s' % pipeline.outDir())
os.chdir(pipeline.outDir())
pipeline.outDir(os.getcwd())
if OSshell.exitCode() != 0: error.fatal(pipeline, 'outDirNotCreate')
d_ret = pipeline.innerLoop(
pipeline.outputDirTree_build,
log = "Building output directory tree...\n"
)
stage.exitCode(0)
return True
stage0.def_stage(f_stage0callback, obj=stage0, pipe=roigcibs)
stage1 = stage.Stage(
name = 'roigcibs-1-filter',
fatalConditions = True,
syslog = True,
logTo = '%s/roigcibs-1-filter.log' % args.workingDir,
logTee = True,
)
def f_stage1callback(**kwargs):
for key, val in kwargs.iteritems():
if key == 'obj': stage = val
if key == 'pipe': pipeline = val
os.chdir(pipeline._workingDir)
d_ret = pipeline.innerLoop(
pipeline.bootstrap_occurrenceDictionariesBuild,
log = "Parsing bootstrap occurrences...\n"
)
d_bootstrapOccurrence = d_ret["return"]
# print(json.dumps(d_bootstrapOccurrence, indent=4, sort_keys=True))
d_ret = pipeline.innerLoop(
pipeline.bootstrap_thresholdDictionaryBuild,
threshold = args.threshold,
log = "Building threshold dictionaries...\n"
)
# print(json.dumps(pipeline._d_bootstrapThreshold, indent=4, sort_keys=True))
d_ret = pipeline.innerLoop(
pipeline.bootstrap_filteredDictionaryBuild,
log = "Building filtered dictionaries...\n"
)
# print(json.dumps(pipeline._d_bootstrapFiltered, indent=4, sort_keys=True))
stage.exitCode(0)
return True
stage1.def_stage(f_stage1callback, obj=stage1, pipe=roigcibs)
stage2 = stage.Stage(
name = 'roigcibs-2-labelReader',
fatalConditions = True,
syslog = True,
logTo = '%s/roigcibs-2-labelReader.log' % args.workingDir,
logTee = True
)
# stage4.def_preconditions(stage1.def_postconditions()[0], **stage1.def_postconditions()[1])
stage2.def_preconditions(lambda **x: True)
def f_stage2callback(**kwargs):
for key, val in kwargs.iteritems():
if key == 'obj': stage = val
if key == 'pipe': pipeline = val
os.chdir(pipeline._workingDir)
d_ret = pipeline.innerLoop(
pipeline.labelScript_process,
log = "Writing and processing FreeSurfer tcl label script files...\n"
)
stage.exitCode(0)
return True
stage2.def_stage(f_stage2callback, obj=stage2, pipe=roigcibs)
roigcibslog = roigcibs.log()
roigcibslog('INIT: %s\n' % ' '.join(sys.argv))
roigcibs.stage_add(stage0)
roigcibs.stage_add(stage1)
roigcibs.stage_add(stage2)
# roigcibs.stage_add(stage3)
# roigcibs.stage_add(stage4)
roigcibs.initialize()
roigcibs.run()
|
FNNDSC/roi_tag
|
roi_gcibs.py
|
Python
|
mit
| 37,159
|
from collections import UserList
from copy import copy
from npc.util import print_err
class Tag(UserList):
"""
Defines a mult-value tag object
"""
def __init__(self, name: str, *args, required: bool=False, hidden: bool=False, limit: int=-1):
"""
Create a new Tag object
Args:
name (str): This tag's name
args (str): Initial values to populate for this tag
required (bool): Whether this tag must be filled for the character
to be valid.
hidden (bool): Whether this tag should be displayed in character
listings. When filled, a coresponding @hide tag will be
generated by to_header.
limit (int): Maximum number of values allowed in the tag. Passing a
negative number allows an infinite number of values to be
stored.
"""
self.name = name
self.required = required
self.hidden = hidden
self.hidden_values = []
self.limit = limit
self.problems = []
self.subtag_name = None
super().__init__(args)
def __repr__(self):
return "{cls}('{name}', {data}, required={req}, hidden={hidden}, hidden_values={hidden_vals}, limit={limit})".format(
cls=type(self).__name__,
name=self.name,
data=self.data,
req=self.required,
hidden=self.hidden,
hidden_vals=self.hidden_values,
limit=self.limit
)
def update(self, values):
"""
Add the strings in values to our own list of values
Args:
values (list[str]): List of string values to add to this tag
"""
if hasattr(values, 'hidden'):
self.hidden = values.hidden
if hasattr(values, 'hidden_values'):
self.hidden_values.extend(values.hidden_values)
if hasattr(values, 'data'):
self.data = values.data
else:
self.data = values
def append(self, value: str):
"""
Append a value to the tag
If the value is blank, skip it
Args:
value (str): The value to append
"""
if not value.strip():
return
super().append(value)
@property
def filled(self):
"""
bool: Whether this tag has meaningful data
True whenever the tag has data
"""
return len(self.filled_data) > 0
@property
def present(self):
"""
bool: Whether this tag is considered "present" in the character
True whenever the tag should be included in the character. Defaults to
whether the tag has data.
"""
return self.filled
def __bool__(self):
"""
Delegate truthyness to the `present` property
"""
return True and self.present
@property
def filled_data(self):
"""
list: All non-whitespace values
"""
return [v for v in self.data if v.strip()]
def touch(self, present: bool = True):
"""
No-op for compatibility with Flag class
Args:
present (bool): Whether to mark the flag present or not present.
Defaults to True.
Returns:
None
"""
print_err("Calling touch() on non-flag class {} object '{}'".format(type(self).__name__, self.name))
def subtag(self, val: str):
"""
No-op for compatibility with GroupTag class
Args:
val (str): Group name
Returns:
None
"""
print_err("Calling touch() on non-flag class {} object '{}'".format(type(self).__name__, self.name))
@property
def valid(self):
"""
bool: Whether this tag is internally valid
This property is only meaningful after calling validate()
"""
return len(self.problems) == 0
def validate(self, strict: bool=False):
"""
Validate this tag's attributes
Validations:
* If required is set, at least one value must be filled
* Required is incompatible with a limit of zero
* Hidden values must exist
* (strict) If limit is non-negative, the total values must be <= limit
Args:
strict (bool): Whether to report non-critical errors and omissions
Returns:
True if this tag has no validation problems, false if not
"""
self.problems = []
if self.required and not self.filled:
self.problems.append("No values for tag '{}'".format(self.name))
if self.required and self.limit == 0:
self.problems.append("Tag '{}' is required but limited to zero values".format(self.name))
for value in [v for v in self.hidden_values if not v in self.data]:
self.problems.append("Value '{}' for tag '{}' cannot be hidden, because it does not exist".format(value, self.name))
if strict:
if self.limit > -1 and len(self.data) > self.limit:
self.problems.append("Too many values for tag '{}'. Limit of {}".format(self.name, self.limit))
return self.valid
def to_header(self):
"""
Generate a text header representation of this tag
This creates the appropriate `@tag value` lines that, when parsed, will
recreate the data of this tag. If the tag is marked as hidden, an
additional `@hide tag` will be appended. If `.filled` is false, an
empty string is returned.
Returns:
A string of the header lines needed to create this tag, or an empty
string if the filled is false.
"""
if not self.filled:
return ''
header_lines = []
for val in self.data:
header_lines.append("@{} {}".format(self.name, val))
if val in self.hidden_values:
header_lines.append("@hide {} >> {}".format(self.name, val))
if self.hidden:
header_lines.append("@hide {}".format(self.name))
return "\n".join(header_lines)
def tagslice(self, start, stop):
"""
Create a new tag with a slice of our data
This applies a basic slice operation to the stored values and creates a
copy of this tag object whose data is the new slice. This is primarily
useful for generating limited headers.
Args:
start (int|None): First index included in the slice
stop (int|None): Index to end the slice, not included in the slice itself
Returns:
Tag object containing the sliced values
"""
new_tag = copy(self)
new_tag.data = self.data[start:stop]
return new_tag
def first(self):
"""
Get a copy of this tag containing only the first value
Convenience method that's identical to calling `tagslice(0, 1)`
Returns:
Tag object containing the first value
"""
return self.tagslice(0, 1)
def remaining(self):
"""
Get a copy of this tag excluding the first value
Convenience method that's identical to calling `tagslice(1, None)`
Returns:
Tag object excluding the first value
"""
return self.tagslice(1, None)
def first_value(self):
"""
Get the first stored value as a bare string
Returns:
String or None
"""
if self.filled:
return self[0]
else:
return None
def contains(self, value: str):
"""
Determine whether this tag contains a particular value
When value is the special '*' char, contains will be true if the tag is
filled.
Otherwise, contains is only true when the value is wholly or partially
filled in at least one value. The comparison is done using casefold()
to avoid case conflicts.
Args:
value (str): Value to search for
Returns:
True if this tag contains value, false if not.
"""
if value == '*' and self.filled:
return True
value = value.casefold()
for real_value in self:
if value in real_value.casefold():
return True
return False
def hide_value(self, value):
"""
Hide a single value for this tag
Hiding will only work if the value to be hidden exactly matches a value
present in this tag's data.
Args:
value (str): The value to hide
"""
self.hidden_values.append(value)
def sanitize(self):
"""
Remove this tag's values if the tag is marked hidden
"""
if self.hidden:
self.clear()
for value in self.hidden_values:
try:
self.data.remove(value)
except ValueError:
continue
|
aurule/npc
|
npc/character/tags/tag.py
|
Python
|
mit
| 9,073
|
"""srt URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf import settings
from django.conf.urls.static import static
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^', include("cut_link.urls", namespace='cut')),
]
if settings.DEBUG:
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
true7/srt
|
src/srt/urls.py
|
Python
|
mit
| 1,086
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-12-23 10:13
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('blog', '0006_auto_20160321_1527'),
]
operations = [
migrations.CreateModel(
name='Blog',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
migrations.AddField(
model_name='post',
name='blog',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='blog.Blog'),
),
]
|
pinax/pinax-blog
|
pinax/blog/migrations/0007_auto_20161223_1013.py
|
Python
|
mit
| 752
|
from contextlib import contextmanager
@contextmanager
def failnow():
try:
yield
except Exception:
import sys
sys.excepthook(*sys.exc_info())
sys.exit(1)
|
ActiveState/code
|
recipes/Python/577863_Context_manager_prevent_calling_code_catching/recipe-577863.py
|
Python
|
mit
| 195
|
"""
Qxf2 Services: A plug-n-play class for logging.
This class wraps around Python's loguru module.
"""
import os, inspect
import pytest,logging
from loguru import logger
from pytest_reportportal import RPLogger, RPLogHandler
class Base_Logging():
"A plug-n-play class for logging"
def __init__(self,log_file_name=None,level="DEBUG",format="{time:YYYY-MM-DD HH:mm:ss} | {level} | {module} | {message}"):
"Constructor for the logging class"
self.log_file_name=log_file_name
self.log_file_dir = os.path.abspath(os.path.join(os.path.dirname(__file__),'..','log'))
self.level=level
self.format=format
self.log = self.set_log(self.log_file_name,self.level,self.format)
self.rp_logger = None
def set_log(self,log_file_name,level,format,test_module_name=None):
"Add an handler sending log messages to a sink"
if test_module_name is None:
test_module_name = self.get_calling_module()
if not os.path.exists(self.log_file_dir):
os.makedirs(self.log_file_dir)
if log_file_name is None:
log_file_name = self.log_file_dir + os.sep + test_module_name + '.log'
else:
log_file_name = self.log_file_dir + os.sep + log_file_name
logger.add(log_file_name,level=level,format=format,
rotation="30 days", filter=None, colorize=None, serialize=False, backtrace=True, enqueue=False, catch=True)
def get_calling_module(self):
"Get the name of the calling module"
calling_file = inspect.stack()[-1][1]
if 'runpy' in calling_file:
calling_file = inspect.stack()[4][1]
calling_filename = calling_file.split(os.sep)
#This logic bought to you by windows + cygwin + git bash
if len(calling_filename) == 1: #Needed for
calling_filename = calling_file.split('/')
self.calling_module = calling_filename[-1].split('.')[0]
return self.calling_module
def setup_rp_logging(self, rp_pytest_service):
"Setup reportportal logging"
try:
# Setting up a logging.
logging.setLoggerClass(RPLogger)
self.rp_logger = logging.getLogger(__name__)
self.rp_logger.setLevel(logging.INFO)
# Create handler for Report Portal.
rp_handler = RPLogHandler(rp_pytest_service)
# Set INFO level for Report Portal handler.
rp_handler.setLevel(logging.INFO)
return self.rp_logger
except Exception as e:
self.write("Exception when trying to set rplogger")
self.write(str(e))
self.exceptions.append("Error when setting up the reportportal logger")
def write(self,msg,level='info'):
"Write out a message"
#fname = inspect.stack()[2][3] #May be use a entry-exit decorator instead
all_stack_frames = inspect.stack()
for stack_frame in all_stack_frames[1:]:
if 'Base_Page' not in stack_frame[1]:
break
fname = stack_frame[3]
d = {'caller_func': fname}
if self.rp_logger:
if level.lower()== 'debug':
self.rp_logger.debug(msg=msg)
elif level.lower()== 'info':
self.rp_logger.info(msg)
elif level.lower()== 'warn' or level.lower()=='warning':
self.rp_logger.warning(msg)
elif level.lower()== 'error':
self.rp_logger.error(msg)
elif level.lower()== 'critical':
self.rp_logger.critical(msg)
else:
self.rp_logger.critical(msg)
return
if level.lower()== 'debug':
logger.debug("{module} | {msg}",module=d['caller_func'],msg=msg)
elif level.lower()== 'info':
logger.info("{module} | {msg}",module=d['caller_func'],msg=msg)
elif level.lower()== 'warn' or level.lower()=='warning':
logger.warning("{module} | {msg}",module=d['caller_func'],msg=msg)
elif level.lower()== 'error':
logger.error("{module} | {msg}",module=d['caller_func'],msg=msg)
elif level.lower()== 'critical':
logger.critical("{module} | {msg}",module=d['caller_func'],msg=msg)
else:
logger.critical("Unknown level passed for the msg: {}", msg)
|
qxf2/qxf2-page-object-model
|
utils/Base_Logging.py
|
Python
|
mit
| 4,369
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "maiziblog2.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
llinmeng/PythonStudy
|
maiziedu/3-Pycharm-Study/maiziblog2/manage.py
|
Python
|
mit
| 253
|
#!/usr/bin/python
# ----------------------------------------------------------------------------
# cocos "jscompile" plugin
#
# Copyright 2013 (C) Intel
#
# License: MIT
# ----------------------------------------------------------------------------
'''
"jscompile" plugin for cocos command line tool
'''
__docformat__ = 'restructuredtext'
import sys
import subprocess
import os
import json
import inspect
import platform
import cocos
from MultiLanguage import MultiLanguage
class CCPluginJSCompile(cocos.CCPlugin):
"""
compiles (encodes) and minifies JS files
"""
@staticmethod
def plugin_name():
return "jscompile"
@staticmethod
def brief_description():
# returns a short description of this module
return MultiLanguage.get_string('JSCOMPILE_BRIEF')
# This is not the constructor, just an initializator
def init(self, options, workingdir):
"""
Arguments:
- `options`:
"""
self._current_src_dir = None
self._src_dir_arr = self.normalize_path_in_list(options.src_dir_arr)
self._dst_dir = options.dst_dir
self._use_closure_compiler = options.use_closure_compiler
self._verbose = options.verbose
self._config = None
self._workingdir = workingdir
self._closure_params = ''
if options.compiler_config != None:
f = open(options.compiler_config)
self._config = json.load(f)
f.close()
self._pre_order = self._config["pre_order"]
self.normalize_path_in_list(self._pre_order)
self._post_order = self._config["post_order"]
self.normalize_path_in_list(self._post_order)
self._skip = self._config["skip"]
self.normalize_path_in_list(self._skip)
self._closure_params = self._config["closure_params"]
if options.closure_params is not None:
self._closure_params = options.closure_params
self._js_files = {}
self._compressed_js_path = os.path.join(self._dst_dir, options.compressed_filename)
self._compressed_jsc_path = os.path.join(self._dst_dir, options.compressed_filename+"c")
def normalize_path_in_list(self, list):
for i in list:
tmp = os.path.normpath(i)
list[list.index(i)] = tmp
return list
def get_relative_path(self, jsfile):
try:
# print "current src dir: "+self._current_src_dir)
pos = jsfile.index(self._current_src_dir)
if pos != 0:
raise cocos.CCPluginError(MultiLanguage.get_string('LUACOMPILE_ERROR_SRCDIR_NAME_NOT_FOUND'),
cocos.CCPluginError.ERROR_WRONG_ARGS)
# print "origin js path: "+ jsfile
# print "relative path: "+jsfile[len(self._current_src_dir)+1:]
return jsfile[len(self._current_src_dir)+1:]
except ValueError:
raise cocos.CCPluginError(MultiLanguage.get_string('LUACOMPILE_ERROR_SRCDIR_NAME_NOT_FOUND'),
cocos.CCPluginError.ERROR_WRONG_ARGS)
def get_output_file_path(self, jsfile):
"""
Gets output file path by source js file
"""
# create folder for generated file
jsc_filepath = ""
relative_path = self.get_relative_path(jsfile)+"c"
jsc_filepath = os.path.join(self._dst_dir, relative_path)
dst_rootpath = os.path.split(jsc_filepath)[0]
try:
# print "creating dir (%s)" % (dst_rootpath)
os.makedirs(dst_rootpath)
except OSError:
if os.path.exists(dst_rootpath) == False:
# There was an error on creation, so make sure we know about it
raise cocos.CCPluginError(MultiLanguage.get_string('LUACOMPILE_ERROR_MKDIR_FAILED_FMT', dst_rootpath),
cocos.CCPluginError.ERROR_PATH_NOT_FOUND)
# print "return jsc path: "+jsc_filepath
return jsc_filepath
def compile_js(self, jsfile, output_file):
"""
Compiles js file
"""
cocos.Logging.debug(MultiLanguage.get_string('JSCOMPILE_DEBUG_COMPILE_FILE_FMT', jsfile))
jsbcc_exe_path = ""
if(cocos.os_is_linux()):
if(platform.architecture()[0] == "32bit"):
jsbcc_exe_path = os.path.join(self._workingdir, "bin", "linux", "jsbcc_x86")
else:
jsbcc_exe_path = os.path.join(self._workingdir, "bin", "linux", "jsbcc_x64")
else:
jsbcc_exe_path = os.path.join(self._workingdir, "bin", "jsbcc")
cmd_str = "\"%s\" \"%s\" \"%s\"" % (jsbcc_exe_path, jsfile, output_file)
self._run_cmd(cmd_str)
def compress_js(self):
"""
Compress all js files into one big file.
"""
jsfiles = ""
for src_dir in self._src_dir_arr:
# print "\n----------src:"+src_dir
jsfiles = jsfiles + " --js ".join(self._js_files[src_dir]) + " "
compiler_jar_path = os.path.join(self._workingdir, "bin", "compiler.jar")
command = "java -jar \"%s\" %s --js %s --js_output_file \"%s\"" % (compiler_jar_path, self._closure_params, jsfiles, self._compressed_js_path)
self._run_cmd(command)
def deep_iterate_dir(self, rootDir):
for lists in os.listdir(rootDir):
path = os.path.join(rootDir, lists)
if os.path.isdir(path):
self.deep_iterate_dir(path)
elif os.path.isfile(path):
if os.path.splitext(path)[1] == ".js":
self._js_files[self._current_src_dir].append(path)
def index_in_list(self, jsfile, l):
"""
Arguments:
- `self`:
- `jsfile`:
- `l`:
"""
index = -1
for el in l:
if jsfile.rfind(el) != -1:
# print "index:"+str(index+1)+", el:"+el
return index+1
index = index + 1
return -1
def js_filename_pre_order_compare(self, a, b):
return self._js_filename_compare(a, b, self._pre_order, 1)
def js_filename_post_order_compare(self, a, b):
return self._js_filename_compare(a, b, self._post_order, -1)
def _js_filename_compare(self, a, b, files, delta):
index_a = self.index_in_list(a, files)
index_b = self.index_in_list(b, files)
is_a_in_list = index_a != -1
is_b_in_list = index_b != -1
if is_a_in_list and not is_b_in_list:
return -1 * delta
elif not is_a_in_list and is_b_in_list:
return 1 * delta
elif is_a_in_list and is_b_in_list:
if index_a > index_b:
return 1
elif index_a < index_b:
return -1
else:
return 0
else:
return 0
def reorder_js_files(self):
if self._config == None:
return
# print "before:"+str(self._js_files)
for src_dir in self._js_files:
# Remove file in exclude list
need_remove_arr = []
for jsfile in self._js_files[src_dir]:
for exclude_file in self._skip:
if jsfile.rfind(exclude_file) != -1:
# print "remove:" + jsfile
need_remove_arr.append(jsfile)
for need_remove in need_remove_arr:
self._js_files[src_dir].remove(need_remove)
self._js_files[src_dir].sort(cmp=self.js_filename_pre_order_compare)
self._js_files[src_dir].sort(cmp=self.js_filename_post_order_compare)
# print '-------------------'
# print "after:" + str(self._js_files)
def handle_all_js_files(self):
"""
Arguments:
- `self`:
"""
if self._use_closure_compiler == True:
cocos.Logging.info(MultiLanguage.get_string('JSCOMPILE_INFO_COMPRESS_TIP'))
self.compress_js()
self.compile_js(self._compressed_js_path, self._compressed_jsc_path)
# remove tmp compressed file
os.remove(self._compressed_js_path)
else:
cocos.Logging.info(MultiLanguage.get_string('JSCOMPILE_INFO_COMPILE_TO_BYTECODE'))
for src_dir in self._src_dir_arr:
for jsfile in self._js_files[src_dir]:
self._current_src_dir = src_dir
self.compile_js(jsfile, self.get_output_file_path(jsfile))
# will be called from the cocos.py script
def run(self, argv, dependencies):
"""
"""
self.parse_args(argv)
# create output directory
try:
os.makedirs(self._dst_dir)
except OSError:
if os.path.exists(self._dst_dir) == False:
raise cocos.CCPluginError(MultiLanguage.get_string('LUACOMPILE_ERROR_MKDIR_FAILED_FMT', self._dst_dir),
cocos.CCPluginError.ERROR_PATH_NOT_FOUND)
# download the bin folder
jsbcc_exe_path = os.path.join(self._workingdir, "bin", "jsbcc")
if not os.path.exists(jsbcc_exe_path):
download_cmd_path = os.path.join(self._workingdir, os.pardir, os.pardir)
subprocess.call("python %s -f -r no" % (os.path.join(download_cmd_path, "download-bin.py")), shell=True, cwd=download_cmd_path)
# deep iterate the src directory
for src_dir in self._src_dir_arr:
self._current_src_dir = src_dir
self._js_files[self._current_src_dir] = []
self.deep_iterate_dir(src_dir)
self.reorder_js_files()
self.handle_all_js_files()
cocos.Logging.info(MultiLanguage.get_string('LUACOMPILE_INFO_FINISHED'))
def parse_args(self, argv):
"""
"""
from argparse import ArgumentParser
parser = ArgumentParser(prog="cocos %s" % self.__class__.plugin_name(),
description=self.__class__.brief_description())
parser.add_argument("-v", "--verbose",
action="store_true",
dest="verbose",
help=MultiLanguage.get_string('LUACOMPILE_ARG_VERBOSE'))
parser.add_argument("-s", "--src",
action="append", dest="src_dir_arr",
help=MultiLanguage.get_string('JSCOMPILE_ARG_SRC'))
parser.add_argument("-d", "--dst",
action="store", dest="dst_dir",
help=MultiLanguage.get_string('JSCOMPILE_ARG_DST'))
parser.add_argument("-c", "--use_closure_compiler",
action="store_true", dest="use_closure_compiler", default=False,
help=MultiLanguage.get_string('JSCOMPILE_ARG_CLOSURE'))
parser.add_argument("-o", "--output_compressed_filename",
action="store", dest="compressed_filename", default="game.min.js",
help=MultiLanguage.get_string('JSCOMPILE_ARG_OUT_FILE_NAME'))
parser.add_argument("-j", "--compiler_config",
action="store", dest="compiler_config",
help=MultiLanguage.get_string('JSCOMPILE_ARG_JSON_FILE'))
parser.add_argument("-m", "--closure_params",
action="store", dest="closure_params",
help=MultiLanguage.get_string('JSCOMPILE_ARG_EXTRA_PARAM'))
options = parser.parse_args(argv)
if options.src_dir_arr == None:
raise cocos.CCPluginError(MultiLanguage.get_string('JSCOMPILE_ERROR_SRC_NOT_SPECIFIED'),
cocos.CCPluginError.ERROR_WRONG_ARGS)
elif options.dst_dir == None:
raise cocos.CCPluginError(MultiLanguage.get_string('LUACOMPILE_ERROR_DST_NOT_SPECIFIED'),
cocos.CCPluginError.ERROR_WRONG_ARGS)
else:
for src_dir in options.src_dir_arr:
if os.path.exists(src_dir) == False:
raise cocos.CCPluginError(MultiLanguage.get_string('LUACOMPILE_ERROR_DIR_NOT_EXISTED_FMT',
(src_dir)),
cocos.CCPluginError.ERROR_PATH_NOT_FOUND)
# script directory
if getattr(sys, 'frozen', None):
workingdir = os.path.realpath(os.path.dirname(sys.executable))
else:
workingdir = os.path.realpath(os.path.dirname(__file__))
self.init(options, workingdir)
|
dios-game/dios-cocos
|
src/oslibs/cocos/cocos-src/tools/cocos2d-console/plugins/plugin_jscompile/__init__.py
|
Python
|
mit
| 12,716
|
import socket
import struct
import sys
from time import sleep
import logging
class SocketChannelFactory():
'''
Provides method to create channel connection.
'''
def openChannel(self, host, port):
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((host, port))
return SocketChannel(sock)
except socket.error:
print "Cannot connect to {0} at port {1}. Please make sure the server is running.".format(host, port)
raise
class SocketChannel():
'''
SocketChannel provides an abstraction layer above the
underlying socket, which sends and receives messages framed
by their length as 4 bytes in Big Endian.
'''
def __init__(self, sock):
self.sock = sock
self.connected = True
def write(self, byteStream):
'''
Write a byte stream message to the channel.
The message will be prepended by its length packed
in 4 bytes in Big Endian.
'''
streamLen = struct.pack('>L', len(byteStream))
framedStream = streamLen + byteStream
try:
self.sock.sendall(framedStream)
except socket.error:
self.close()
raise Exception("socket send fail, close")
def read(self):
'''
Read a byte stream message prepended by its length
in 4 bytes in Big Endian from channel.
The message content is returned.
'''
lenField = self.readnbytes(4)
length = struct.unpack('>L', lenField)[0]
byteStream = self.readnbytes(length)
return byteStream
def readnbytes(self, n):
buf = ''
while n > 0:
data = self.sock.recv(n)
if data == '':
print "The socket between this client and the server has been broken."
logging.info("Socket broken or connection closed - data was empty while attempting to read")
raise Exception("socket broken or connection closed")
buf += data
n -= len(data)
return buf
def close(self):
print("closing connection")
self.sock.close()
self.connected = False
|
PaulFlorea/Orbis2014
|
lib/tronclient/SocketChannel.py
|
Python
|
mit
| 2,095
|
#!/usr/bin/python
#
# Copyright (c) 2011 The VirtaCoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class VirtaCoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = VirtaCoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 22815
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
virtacoin/VirtaCoinProject
|
contrib/pyminer/pyminer.py
|
Python
|
mit
| 6,441
|
from django.conf import settings
from mock import Mock
from cabot.cabotapp import defs
from datetime import datetime
def build_absolute_url(relative_url):
"""Prepend https?://host to a url, useful for links going into emails"""
return '{}://{}{}'.format(settings.WWW_SCHEME, settings.WWW_HTTP_HOST, relative_url)
def create_failing_service_mock():
"""
Create a Mock object mimicking a critical service, with a single (also mocked) failing check.
Note that not all attributes are mocked (notably hipchat_instance, mattermost_instance).
Primary keys/IDs are mocked to be 0. Functions that return querysets in reality (like active_status_checks)
will return hard-coded lists.
This is typically called by an AlertPlugin.send_test_alert() implementation, and further configured by calling
service_mock.configure_mock(attr=value, ...) to add any plugin-specific attributes (like mattermost_instance).
:return: Mock emulating a service with 1 failing check
"""
check_mock = Mock()
check_mock.configure_mock(id=0, pk=0, name='Alert Testing Check', active=True,
get_status_image=lambda: None, check_category=lambda: "Mock Check",
get_importance_display=lambda: "Critical")
service_mock = Mock()
service_mock.configure_mock(id=0, pk=0, name='Alert Testing Service', alerts_enabled=True,
# plugins use service.CRITICAL_STATUS etc, so we mock these constants too
CRITICAL_STATUS=defs.CRITICAL_STATUS, PASSING_STATUS=defs.PASSING_STATUS,
WARNING_STATUS=defs.WARNING_STATUS, ERROR_STATUS=defs.ERROR_STATUS,
status_checks=[check_mock], recent_snapshots=[],
overall_status=defs.CRITICAL_STATUS,
active_status_checks=lambda: [check_mock],
all_passing_checks=lambda: [], all_failing_checks=lambda: [check_mock])
return service_mock
def format_datetime(dt):
'''
Convert datetime to string. None is converted to empty string. This is used
primarily for formatting datetimes in API responses, whereas format_timestamp
is used for a more human-readable format to be displayed on the web.
'''
return '' if dt is None else datetime.strftime(dt, '%Y-%m-%d %H:%M:%S')
|
Affirm/cabot
|
cabot/cabotapp/utils.py
|
Python
|
mit
| 2,425
|
'''
alignment utility functions
'''
import os
import sys
import subprocess
import logging
import mmap
import gzip
import multiprocessing
from operator import itemgetter
import numpy as np
## public functions ##
def pair_alignment(paths, args):
""" creates the alignment """
# validate parameters.
assert os.path.isdir(args.base_dir), 'base_dir'
assert os.path.isfile(args.ctg_fasta), 'ctg_fasta'
assert os.path.isfile(args.read1_sam), 'read1_fastq'
assert os.path.isfile(args.read2_sam), 'read2_fastq'
assert os.path.isfile(args.size_file), 'size_file'
# key size.
key_size = args.key_size
# relavent files.
base_dir = os.path.abspath(args.base_dir)
size_file = os.path.abspath(args.size_file)
ctg_fasta = os.path.abspath(args.ctg_fasta)
in1_sam = os.path.abspath(args.read1_sam)
in2_sam = os.path.abspath(args.read2_sam)
read1_sam = os.path.abspath('%s/read1.sam' % base_dir)
read2_sam = os.path.abspath('%s/read2.sam' % base_dir)
names1_npy = os.path.abspath('%s/name1.npy' % base_dir)
names2_npy = os.path.abspath('%s/name2.npy' % base_dir)
sort1_npy = os.path.abspath('%s/sort1.npy' % base_dir)
sort2_npy = os.path.abspath('%s/sort2.npy' % base_dir)
ant_dir = '%s/ant' % base_dir
idx_dir = '%s/index' % base_dir
idx_file = '%s/index' % idx_dir
# ensure annotation dir exists.
subprocess.call(['mkdir', '-p', ant_dir])
# compute name sizes.
names_size1 = _name_size(in1_sam)
names_size2 = _name_size(in2_sam)
# check if sorted is present.
if os.path.isfile(sort1_npy) == False:
# create / load name array.
if os.path.isfile(names1_npy) == False:
logging.info("creating name array 1")
names1 = _extract_names(in1_sam, names_size1, key_size)
_names(file_name=names1_npy, data=names1)
else:
logging.info("loading name array 1")
names1 = _names(file_name=names1_npy)
# sort it.
logging.info("sorting name array 1")
names1.sort(order=['name'])
_names(file_name=sort1_npy, data=names1)
del names1
subprocess.call(["rm", "-f", names1_npy])
# check if sorted is present.
if os.path.isfile(sort2_npy) == False:
# create / load name array.
if os.path.isfile(names2_npy) == False:
logging.info("creating name array 2")
names2 = _extract_names(in2_sam, names_size2, key_size)
_names(file_name=names2_npy, data=names2)
else:
logging.info("loading name array 2")
names2 = _names(file_name=names2_npy)
# sort it.
logging.info("sorting name array 2")
names2.sort(order=['name'])
_names(file_name=sort2_npy, data=names2)
del names2
subprocess.call(["rm", "-f", names2_npy])
# create sizes.
sizes = dict()
with open(size_file, "rb") as fin:
lines = fin.readlines()
for line in lines:
sz, name = line.strip().split()
sz = int(sz)
sizes[name] = sz
# create the annotation arrays.
annotes = dict()
for ref in sizes:
annotes[ref] = np.zeros(sizes[ref], dtype=np.int)
# do work.
_dual_loop(sort1_npy, sort2_npy, in1_sam, in2_sam, read1_sam, read2_sam, annotes)
# save repeat annotation., ant_dir
for ref in annotes:
# create name.
fname = '%s/%s.npy' % (ant_dir, ref)
# look for existing.
if os.path.isfile(fname):
tmp = np.load(fname)
annotes[ref] = annotes[ref] + tmp
# serialize it.
np.save(fname, annotes[ref])
def create_alignment(paths, args):
""" creates the alignment """
# validate parameters.
assert os.path.isdir(args.base_dir), 'base_dir'
assert os.path.isfile(args.ctg_fasta), 'ctg_fasta'
assert os.path.isfile(args.read1_fastq), 'read1_fastq'
assert os.path.isfile(args.read2_fastq), 'read2_fastq'
# relavent files.
base_dir = os.path.abspath(args.base_dir)
ctg_fasta = os.path.abspath(args.ctg_fasta)
read1_fastq = os.path.abspath(args.read1_fastq)
read2_fastq = os.path.abspath(args.read2_fastq)
tmp1_sam = os.path.abspath('%s/tmp1.sam' % base_dir)
tmp2_sam = os.path.abspath('%s/tmp2.sam' % base_dir)
ant_dir = '%s/ant' % base_dir
idx_dir = '%s/index' % base_dir
idx_file = '%s/index' % idx_dir
# build index if not present.
if os.path.isdir(idx_dir) == False:
subprocess.call(["mkdir", "-p", idx_dir])
create_idx(ctg_fasta, idx_file)
# remove annotation dir if present.
if os.path.isdir(ant_dir) == True:
subprocess.call(["rm", "-rf", ant_dir])
subprocess.call(["mkdir", "-p", ant_dir])
# perform alignment.
cmd1 = ['bowtie2','--reorder', '-k', '10', '-q','-p',str(args.num_cpu), '-x', idx_file, '-U', read1_fastq, '-S', tmp1_sam]
cmd2 = ['bowtie2','--reorder', '-k', '10', '-q','-p',str(args.num_cpu), '-x', idx_file, '-U', read2_fastq, '-S', tmp2_sam]
#print ' '.join(cmd1)
subprocess.call(cmd1)
#print ' '.join(cmd2)
subprocess.call(cmd2)
def create_idx(asm_fasta, index_file):
""" make bowtie2 index
Parameters:
-----------
asm_fasta : str
index_file : str
"""
# run the command.
subprocess.call(['bowtie2-build', '-f', asm_fasta, index_file])
## internal functions ##
def _dual_loop(sort1_npy, sort2_npy, in1_sam, in2_sam, out1_sam, out2_sam, annotes):
""" extract unique alignments, pairs them and annotate repeats"""
# open SAM files.
sam1 = open(in1_sam, "rb")
sam2 = open(in2_sam, "rb")
out1 = open(out1_sam, "wb")
out2 = open(out2_sam, "wb")
# create iterators.
itr1 = _uniq_gen(sort1_npy, sam1, annotes)
itr2 = _uniq_gen(sort2_npy, sam2, annotes)
# first git.
u1 = itr1.next()
u2 = itr2.next()
cnt = 0
while u1 != None and u2 != None:
# peek for a match.
if u1['name'] == u2['name']:
# seek to it.
sam1.seek(u1['row'])
sam2.seek(u2['row'])
out1.write(sam1.readline())
out2.write(sam2.readline())
# change both.
u1 = itr1.next()
u2 = itr2.next()
else:
# pop smaller.
if u1['name'] > u2['name']:
u2 = itr2.next()
else:
u1 = itr1.next()
# die after 5
cnt += 1
#if cnt > 5: break
# close them.
sam1.close()
sam2.close()
out1.close()
out2.close()
def _names(file_name=None, data=None, size=None, name_size=None):
""" returns pointer to mapped file """
if size != None and name_size != None:
return np.zeros(size, dtype=np.dtype([('name','S%d' % name_size),('row',np.int)]))
elif file_name != None and data == None:
return np.load(file_name)
elif file_name != None and data != None:
np.save(file_name, data)
else:
logging.error("bad usage")
print dick
sys.exit(1)
def _name_size(file_path):
""" guess string size """
# determine name size.
with open(file_path, "rb") as fin:
for line1 in fin:
if line1[0] == '@': continue
name_size = len(line1.split("\t")[0]) + 10
break
return name_size
def _extract_names(file_name, name_size, key_size):
""" builds numpy array of name hits"""
# count lines.
logging.info("reading lines")
with open(file_name, "rb") as fin:
size = 0
for line in fin:
if line[0] == '@': continue
size += 1
#if size > 10000000: break
# allocate array.
names = _names(size=size, name_size=name_size)
# copy data into array.
logging.info("copying data")
with open(file_name, "rb") as fin:
offset = 0
idx = 0
for line1 in fin:
# skip header.
if line1[0] == '@':
offset += len(line1)
continue
# tokenize.
tokens = line1.split("\t")
# skip no map.
if tokens[2] == "*":
offset += len(line1)
continue
# operate.
if key_size == 0:
names[idx]['name'] = tokens[0]
else:
names[idx]['name'] = tokens[0][0:-key_size]
names[idx]['row'] = offset
# reset.
idx += 1
offset += len(line1)
# resize.
names.resize(idx)
# return the size.
return names
def _uniq_gen(names_npy, sam, annotes):
""" generator for unique reads in list """
# create mmap object.
mmap = np.load(names_npy, mmap_mode='r')
# setup buffered loop.
buffstep = 10000000
buffo = 0
buffs = buffstep
if buffo + buffstep > mmap.shape[0]:
buffs = mmap.shape[0] - buffo
# buffer loop.
while buffo < mmap.shape[0]:
# make buffer.
logging.info("unique: buffering: %d %d" % (buffo, buffs))
names = mmap[buffo:buffs]
# iterate over non-boundry cases.
for i in range(1, names.shape[0]-1):
# must not match its neighbors.
if names[i-1]['name'] != names[i]['name'] and names[i+1]['name'] != names[i]['name']:
yield names[i]
else:
# annotate repeat.
sam.seek(names[i]['row'])
tokens = sam.readline().split("\t")
ctg = tokens[2]
start = int(tokens[3])
stop = start + len(tokens[9])
annotes[ctg][start:stop] = 1
buffo += 1
# check the first one.
if names[0]['name'] != names[1]['name']:
yield names[i]
else:
sam.seek(names[i]['row'])
tokens = sam.readline().split("\t")
ctg = tokens[2]
start = int(tokens[3])
stop = start + len(tokens[9])
annotes[ctg][start:stop] = 1
buffo += 1
# check the last one.
if names[-1]['name'] != names[-2]['name']:
yield names[i]
else:
sam.seek(names[i]['row'])
tokens = sam.readline().split("\t")
ctg = tokens[2]
start = int(tokens[3])
stop = start + len(tokens[9])
annotes[ctg][start:stop] = 1
buffo += 1
# update for buffer.
if buffo + buffstep > mmap.shape[0]:
buffs = buffo + (mmap.shape[0] - buffo)
else:
buffs = buffo + buffstep
# yield poison pill
yield None
|
jim-bo/silp2
|
creation/align.py
|
Python
|
mit
| 10,726
|
from os import remove, mkdir, listdir, rmdir
from os.path import join, expanduser, isdir
from os.path import split as splitdir
import codecs
from shutil import copy2
indir = join(expanduser("~"),"Desktop")
orgdir = ""
bakdir = ""
with codecs.open(join(indir,"_diff.txt"), 'r', encoding='utf8') as diff:
# Read first line. Should contain original directory
line = diff.readline()
try:
line = line.replace("\n","").split("**")
if line[0] == "[ORG_DIR]":
orgdir = line[1]
except:
print("error: Bad logfile")
quit()
# Read second line. Should contain backup directory
line = diff.readline()
try:
line = line.replace("\n","").split("**")
if line[0] == "[BAK_DIR]":
bakdir = line[1]
except:
print("error: Bad logfile")
quit()
# If either of the directories weren't read in, then quit
print("orig: %s, bak: %s" % (orgdir, bakdir))
if orgdir == "" or bakdir == "":
print("error: Bad logfile")
quit()
with codecs.open(join(indir,"_log.txt"), 'w', encoding='utf8') as log:
log.write("Original directory: " + orgdir + "\n")
log.write("Backup directory : " + bakdir + "\n\n")
for line in diff:
if line.startswith("[ADD]"):
line = line.replace("\n","").split("**")
src = join(orgdir,line[1])
dst = join(bakdir,line[1])
if not isdir(splitdir(dst)[0]):
print("Directory \'" + splitdir(dst)[0] + "\' does not exist. Creating directory.")
log.write("Directory \'" + splitdir(dst)[0] + "\' does not exist. Creating directory.\n")
mkdir(splitdir(dst)[0])
try:
print("Copying " + src + " to " + dst + "")
log.write("Copying " + src + " to " + dst + "\n")
copy2(src, dst)
except:
print("error: %s not copied" % join(orgdir,line[1]))
log.write("error: " + join(orgdir,line[1]) + " not copied\n")
elif line.startswith("[DEL]"):
line = line.replace("\n","").split("**")
dst = join(bakdir,line[1])
try:
print("Deleting " + dst + "")
log.write("Deleting " + dst + "\n")
remove(dst)
if listdir(splitdir(dst)[0]) == []:
print("Directory " + splitdir(dst)[0] + "is empty, removing")
log.write("Directory " + splitdir(dst)[0] + "is empty, removing\n")
rmdir(splitdir(dst)[0])
except:
print("error: %s not removed" % join(orgdir,line[1]))
log.write("error: " + join(orgdir,line[1]) + " not removed\n")
elif line.startswith("====Removed files===="):
print("\n\n")
log.write("\n\n")
|
RagingRoosevelt/BackupMediaSyncer
|
_sync.py
|
Python
|
mit
| 3,135
|
from operator import itemgetter
import gym
from gym import spaces
from gym.utils import seeding
from .game import Game
from .card import Card
from .player import PlayerAction, PlayerTools
from .agents.random import AgentRandom
class LoveLetterEnv(gym.Env):
"""Love Letter Game Environment
The goal of hotter colder is to guess closer to a randomly selected number
After each step the agent receives an observation of:
0 - No guess yet submitted (only after reset)
1 - Guess is lower than the target
2 - Guess is equal to the target
3 - Guess is higher than the target
The rewards is calculated as:
(min(action, self.number) + self.range) / (max(action, self.number) + self.range)
Ideally an agent will be able to recognize the 'scent' of a higher reward and
increase the rate in which is guesses in that direction until the reward reaches
its maximum
"""
def __init__(self, agent_other, seed=451):
self.action_space = spaces.Discrete(15)
self.observation_space = spaces.Box(low=0, high=1, shape=(24,))
self._agent_other = AgentRandom(
seed) if agent_other is None else agent_other
self._seed(seed)
self._reset()
self._game = Game.new(4, self.np_random.random_integers(5000000))
def _seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def _step(self, action):
assert self.action_space.contains(action)
player_action = self.action_from_index(action)
if player_action is None:
return self._game.state(), -1, False, {"round": self._game.round()}
self._game, reward = LoveLetterEnv.advance_game(
self._game, player_action, self._agent_other)
done = self._game.over() or not PlayerTools.is_playing(
self._game.players()[0])
return self._game.state(), reward, done, {"round": self._game.round()}
def _reset(self):
self._game = Game.new(4, self.np_random.random_integers(5000000))
return self._game.state()
def force(self, game):
"""Force the environment to a certain game state"""
self._game = game
return game.state()
@staticmethod
def advance_game(game, action, agent):
"""Advance a game with an action
* Play an action
* Advance the game using the agent
* Return the game pending for the same player turn _unless_ the game ends
returns <game, reward>
"""
if not game.is_action_valid(action):
return game, -1
player_idx = game.player_turn()
game_current, _ = game.move(action)
while game_current.active():
if not game_current.is_current_player_playing():
game_current = game_current.skip_eliminated_player()
elif game_current.player_turn() != player_idx:
game_current, _ = game_current.move(agent.move(game_current))
else:
break
# print("Round", game.round(), '->', game_current.round(), ':', 'OVER' if game_current.over() else 'RUNN')
if game_current.over():
if game_current.winner() == player_idx:
return game_current, 15
else:
return game_current, -5
return game_current, 0
def action_by_score(self, scores, game=None):
"""
Returns best action based on assigned scores
return (action, score, idx)
"""
if len(scores) != 15:
raise Exception("Invalid scores length: {}".format(len(scores)))
game = self._game if game is None else game
assert game.active()
actions_possible = self.actions_set(game)
actions = [(action, score, idx) for action, score, idx in
zip(actions_possible,
scores,
range(len(actions_possible)))
if game.is_action_valid(action)]
action = max(actions, key=itemgetter(2))
return action
def action_from_index(self, action_index, game=None):
"""Returns valid action based on index and game"""
game = self._game if game is None else game
action_candidates = self.actions_set(game)
actions = [(idx, action) for idx, action in
enumerate(action_candidates)
if game.is_action_valid(action) and idx == action_index]
return actions[0][1] if len(actions) == 1 else None
def actions_possible(self, game=None):
"""Returns valid (idx, actions) based on a current game"""
game = self._game if game is None else game
action_candidates = self.actions_set(game)
actions = [(idx, action) for idx, action in
enumerate(action_candidates)
if game.is_action_valid(action)]
return actions
def actions_set(self, game=None):
"""Returns all actions for a game"""
game = self._game if game is None else game
player_self = game.player_turn()
opponents = game.opponent_turn()
actions_possible = [
PlayerAction(Card.guard,
self.np_random.choice(opponents),
Card.priest,
Card.noCard),
PlayerAction(Card.guard,
self.np_random.choice(opponents),
Card.baron,
Card.noCard),
PlayerAction(Card.guard,
self.np_random.choice(opponents),
Card.handmaid,
Card.noCard),
PlayerAction(Card.guard,
self.np_random.choice(opponents),
Card.prince,
Card.noCard),
PlayerAction(Card.guard,
self.np_random.choice(opponents),
Card.king,
Card.noCard),
PlayerAction(Card.guard,
self.np_random.choice(opponents),
Card.countess,
Card.noCard),
PlayerAction(Card.guard,
self.np_random.choice(opponents),
Card.princess,
Card.noCard),
PlayerAction(Card.priest,
self.np_random.choice(opponents),
Card.noCard,
Card.noCard),
PlayerAction(Card.baron,
self.np_random.choice(opponents),
Card.noCard,
Card.noCard),
PlayerAction(Card.king,
self.np_random.choice(opponents),
Card.noCard,
Card.noCard),
PlayerAction(Card.prince,
self.np_random.choice(opponents),
Card.noCard,
Card.noCard),
PlayerAction(Card.prince, player_self, Card.noCard, Card.noCard),
PlayerAction(Card.handmaid, player_self, Card.noCard, Card.noCard),
PlayerAction(Card.countess, player_self, Card.noCard, Card.noCard),
PlayerAction(Card.princess, player_self, Card.noCard, Card.noCard)
]
return actions_possible
|
user01/love-letter
|
loveletter/env.py
|
Python
|
mit
| 7,457
|
# coding: utf-8
# This file is a part of VK4XMPP transport
# © simpleApps, 2013 — 2015.
from datetime import datetime
if not require("attachments"):
raise AssertionError("'forwardMessages' requires 'attachments'")
BASE_SPACER = chr(32) + unichr(183) + chr(32)
def parseForwardedMessages(self, msg, depth=0):
body = ""
if msg.has_key("fwd_messages"):
spacer = BASE_SPACER * depth
body = "\n" + spacer
body += _("Forwarded messages:")
fwd_messages = sorted(msg["fwd_messages"], sortMsg)
for fwd in fwd_messages:
source = fwd["user_id"]
date = fwd["date"]
fwdBody = escape("", uhtml(compile_eol.sub("\n" + spacer + BASE_SPACER, fwd["body"])))
date = datetime.fromtimestamp(date).strftime("%d.%m.%Y %H:%M:%S")
name = self.vk.getUserData(source)["name"]
body += "\n%s[%s] <%s> %s" % (spacer + BASE_SPACER, date, name, fwdBody)
body += parseAttachments(self, fwd, spacer + (BASE_SPACER * 2))
if depth < MAXIMUM_FORWARD_DEPTH:
body += parseForwardedMessages(self, fwd, (depth + 1))
return body
if not isdef("MAXIMUM_FORWARD_DEPTH"):
MAXIMUM_FORWARD_DEPTH = 29
registerHandler("msg01", parseForwardedMessages)
|
unclev/vk.unclev.ru
|
extensions/forwarded_messages.py
|
Python
|
mit
| 1,156
|
from .core import Report, ReportResponse
__all__ = [
Report,
ReportResponse,
]
|
smices/mWorkerService
|
src/3rd/jpush/report/__init__.py
|
Python
|
mit
| 87
|
from flask import Flask
from os.path import expanduser
def create_app():
app = Flask(__name__)
app.config.from_pyfile(expanduser('~/.directory-tools.py'))
from directory_tools.frontend import frontend
app.register_blueprint(frontend)
return app
|
FunTimeCoding/directory-tools
|
directory_tools/application.py
|
Python
|
mit
| 269
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "aalto_fitness.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
jessenieminen/aalto-fitness-homepage
|
manage.py
|
Python
|
mit
| 256
|
__author__ = 'hujin'
import sys
from os import path
from twisted.internet import reactor
from twisted.web import server, resource
from twisted.python import log
from dockerman.storage import ServiceStore
from dockerman.api import Root
from dockerman.docker import Client
from dockerman.manager import Manager
from dockerman.event import Dispatcher
class Application(object):
def __init__(self, config):
self.config = config
log.startLogging(sys.stdout)
self._initialize()
def _initialize(self):
store_file = self.config['store_file']
if not path.exists(store_file):
open(store_file, 'w').close()
self.store = ServiceStore(store_file)
self.store.applicaion = self
host = self.config['docker_host']
port = self.config['docker_port']
self.client = Client(host, port)
self.dispatcher = Dispatcher(self)
self.manager = Manager(self.client, self.store, self.dispatcher)
def get_config(self, name, default=None):
try:
return self.config[name]
except KeyError:
return default
def _on_event(self, message):
self.manager.handle_event(message)
def start(self, port):
self.startHttpServer(port)
self.client.subscribe(self._on_event)
self.client.monitor()
reactor.run()
def startHttpServer(self, port):
site = server.Site(Root(self))
reactor.listenTCP(port, site)
|
bixuehujin/dockerman
|
dockerman/application.py
|
Python
|
mit
| 1,498
|
"""Unit tests for reviewboard.diffviewer.models.filediff."""
from itertools import chain
from reviewboard.diffviewer.models import DiffSet, FileDiff
from reviewboard.diffviewer.tests.test_diffutils import \
BaseFileDiffAncestorTests
from reviewboard.testing import TestCase
class FileDiffTests(TestCase):
"""Unit tests for FileDiff."""
fixtures = ['test_scmtools']
def setUp(self):
super(FileDiffTests, self).setUp()
diff = (
b'diff --git a/README b/README\n'
b'index 3d2b777..48272a3 100644\n'
b'--- README\n'
b'+++ README\n'
b'@@ -2 +2,2 @@\n'
b'-blah blah\n'
b'+blah!\n'
b'+blah!!\n'
)
self.repository = self.create_repository(tool_name='Test')
self.diffset = DiffSet.objects.create(name='test',
revision=1,
repository=self.repository)
self.filediff = FileDiff(source_file='README',
dest_file='README',
diffset=self.diffset,
diff64=diff,
parent_diff64=b'')
def test_get_line_counts_with_defaults(self):
"""Testing FileDiff.get_line_counts with default values"""
counts = self.filediff.get_line_counts()
self.assertIn('raw_insert_count', counts)
self.assertIn('raw_delete_count', counts)
self.assertIn('insert_count', counts)
self.assertIn('delete_count', counts)
self.assertIn('replace_count', counts)
self.assertIn('equal_count', counts)
self.assertIn('total_line_count', counts)
self.assertEqual(counts['raw_insert_count'], 2)
self.assertEqual(counts['raw_delete_count'], 1)
self.assertEqual(counts['insert_count'], 2)
self.assertEqual(counts['delete_count'], 1)
self.assertIsNone(counts['replace_count'])
self.assertIsNone(counts['equal_count'])
self.assertIsNone(counts['total_line_count'])
diff_hash = self.filediff.diff_hash
self.assertEqual(diff_hash.insert_count, 2)
self.assertEqual(diff_hash.delete_count, 1)
def test_set_line_counts(self):
"""Testing FileDiff.set_line_counts"""
self.filediff.set_line_counts(
raw_insert_count=1,
raw_delete_count=2,
insert_count=3,
delete_count=4,
replace_count=5,
equal_count=6,
total_line_count=7)
counts = self.filediff.get_line_counts()
self.assertEqual(counts['raw_insert_count'], 1)
self.assertEqual(counts['raw_delete_count'], 2)
self.assertEqual(counts['insert_count'], 3)
self.assertEqual(counts['delete_count'], 4)
self.assertEqual(counts['replace_count'], 5)
self.assertEqual(counts['equal_count'], 6)
self.assertEqual(counts['total_line_count'], 7)
diff_hash = self.filediff.diff_hash
self.assertEqual(diff_hash.insert_count, 1)
self.assertEqual(diff_hash.delete_count, 2)
def test_long_filenames(self):
"""Testing FileDiff with long filenames (1024 characters)"""
long_filename = 'x' * 1024
filediff = FileDiff.objects.create(source_file=long_filename,
dest_file='foo',
diffset=self.diffset)
self.assertEqual(filediff.source_file, long_filename)
def test_diff_hashes(self):
"""Testing FileDiff with multiple entries and same diff data
deduplicates data
"""
data = (
b'diff -rcN orig_src/foo.c new_src/foo.c\n'
b'*** orig_src/foo.c\t2007-01-24 02:11:31.000000000 -0800\n'
b'--- new_src/foo.c\t2007-01-24 02:14:42.000000000 -0800\n'
b'***************\n'
b'*** 1,5 ****\n'
b' int\n'
b' main()\n'
b' {\n'
b'! \tprintf("foo\n");\n'
b' }\n'
b'--- 1,8 ----\n'
b'+ #include <stdio.h>\n'
b'+ \n'
b' int\n'
b' main()\n'
b' {\n'
b'! \tprintf("foo bar\n");\n'
b'! \treturn 0;\n'
b' }\n')
filediff1 = FileDiff.objects.create(diff=data, diffset=self.diffset)
filediff2 = FileDiff.objects.create(diff=data, diffset=self.diffset)
self.assertEqual(filediff1.diff_hash, filediff2.diff_hash)
def test_get_base_filediff(self):
"""Testing FileDiff.get_base_filediff"""
commit1 = self.create_diffcommit(
diffset=self.diffset,
commit_id='r1',
parent_id='r0',
diff_contents=(
b'diff --git a/ABC b/ABC\n'
b'index 94bdd3e..197009f 100644\n'
b'--- ABC\n'
b'+++ ABC\n'
b'@@ -1,1 +1,1 @@\n'
b'-line!\n'
b'+line..\n'
))
commit2 = self.create_diffcommit(
diffset=self.diffset,
commit_id='r2',
parent_id='r1',
diff_contents=(
b'diff --git a/README b/README\n'
b'index 94bdd3e..197009f 100644\n'
b'--- README\n'
b'+++ README\n'
b'@@ -1,1 +1,1 @@\n'
b'-Hello, world!\n'
b'+Hi, world!\n'
))
commit3 = self.create_diffcommit(
diffset=self.diffset,
commit_id='r3',
parent_id='r2',
diff_contents=(
b'diff --git a/FOO b/FOO\n'
b'index 84bda3e..b975034 100644\n'
b'--- FOO\n'
b'+++ FOO\n'
b'@@ -1,1 +0,0 @@\n'
b'-Some line\n'
))
commit4 = self.create_diffcommit(
diffset=self.diffset,
commit_id='r4',
parent_id='r3',
diff_contents=(
b'diff --git a/README b/README\n'
b'index 197009f..87abad9 100644\n'
b'--- README\n'
b'+++ README\n'
b'@@ -1,1 +1,1 @@\n'
b'-Hi, world!\n'
b'+Yo, world.\n'
))
self.diffset.finalize_commit_series(
cumulative_diff=(
b'diff --git a/ABC b/ABC\n'
b'index 94bdd3e..197009f 100644\n'
b'--- ABC\n'
b'+++ ABC\n'
b'@@ -1,1 +1,1 @@\n'
b'-line!\n'
b'+line..\n'
b'diff --git a/FOO b/FOO\n'
b'index 84bda3e..b975034 100644\n'
b'--- FOO\n'
b'+++ FOO\n'
b'@@ -1,1 +0,0 @@\n'
b'-Some line\n'
b'diff --git a/README b/README\n'
b'index 94bdd3e..87abad9 100644\n'
b'--- README\n'
b'+++ README\n'
b'@@ -1,1 +1,1 @@\n'
b'-Hello, world!\n'
b'+Yo, world.\n'
),
validation_info=None,
validate=False,
save=True)
filediff1 = commit1.files.get()
filediff2 = commit2.files.get()
filediff3 = commit3.files.get()
filediff4 = commit4.files.get()
for commit in (commit1, commit2, commit3, commit4):
self.assertIsNone(filediff1.get_base_filediff(base_commit=commit))
self.assertIsNone(filediff2.get_base_filediff(base_commit=commit))
self.assertIsNone(filediff3.get_base_filediff(base_commit=commit))
self.assertIsNone(filediff4.get_base_filediff(base_commit=commit1))
self.assertEqual(filediff4.get_base_filediff(base_commit=commit2),
filediff2)
self.assertEqual(filediff4.get_base_filediff(base_commit=commit3),
filediff2)
self.assertEqual(filediff4.get_base_filediff(base_commit=commit4),
filediff2)
def test_get_base_filediff_without_commit(self):
"""Testing FileDiff.get_base_filediff without associated commit"""
filediff = self.create_filediff(self.diffset)
self.assertIsNone(filediff.get_base_filediff(base_commit=None))
def test_is_symlink_with_true(self):
"""Testing FileDiff.is_symlink with True"""
filediff = self.create_filediff(self.diffset)
filediff.is_symlink = True
# Explicitly test against the booleans, to avoid truthiness tests.
self.assertIs(filediff.is_symlink, True)
self.assertIs(filediff.extra_data.get('is_symlink'), True)
def test_is_symlink_with_false(self):
"""Testing FileDiff.is_symlink with False"""
filediff = self.create_filediff(self.diffset)
filediff.extra_data['is_symlink'] = True
filediff.is_symlink = False
# Explicitly test against the booleans, to avoid truthiness tests.
self.assertIs(filediff.is_symlink, False)
self.assertIs(filediff.extra_data.get('is_symlink'), False)
def test_old_symlink_target(self):
"""Testing FileDiff.old_symlink_target"""
filediff = self.create_filediff(self.diffset)
filediff.old_symlink_target = 'old/path'
self.assertEqual(filediff.old_symlink_target, 'old/path')
self.assertEqual(filediff.extra_data.get('old_symlink_target'),
'old/path')
def test_new_symlink_target(self):
"""Testing FileDiff.new_symlink_target"""
filediff = self.create_filediff(self.diffset)
filediff.new_symlink_target = 'new/path'
self.assertEqual(filediff.new_symlink_target, 'new/path')
self.assertEqual(filediff.extra_data.get('new_symlink_target'),
'new/path')
def test_old_unix_mode(self):
"""Testing FileDiff.old_unix_mode"""
filediff = self.create_filediff(self.diffset)
filediff.old_unix_mode = '0100644'
self.assertEqual(filediff.old_unix_mode, '0100644')
self.assertEqual(filediff.extra_data.get('old_unix_mode'), '0100644')
def test_new_unix_mode(self):
"""Testing FileDiff.new_unix_mode"""
filediff = self.create_filediff(self.diffset)
filediff.new_unix_mode = '0100750'
self.assertEqual(filediff.new_unix_mode, '0100750')
self.assertEqual(filediff.extra_data.get('new_unix_mode'), '0100750')
class FileDiffAncestorTests(BaseFileDiffAncestorTests):
"""Unit tests for FileDiff.get_ancestors"""
def setUp(self):
super(FileDiffAncestorTests, self).setUp()
self.set_up_filediffs()
def test_get_ancestors_minimal(self):
"""Testing FileDiff.get_ancestors with minimal=True"""
ancestors = {}
with self.assertNumQueries(9):
for filediff in self.filediffs:
ancestors[filediff] = filediff.get_ancestors(
minimal=True,
filediffs=self.filediffs)
self._check_ancestors(ancestors, minimal=True)
def test_get_ancestors_full(self):
"""Testing FileDiff.get_ancestors with minimal=False"""
ancestors = {}
with self.assertNumQueries(len(self.filediffs)):
for filediff in self.filediffs:
ancestors[filediff] = filediff.get_ancestors(
minimal=False,
filediffs=self.filediffs)
self._check_ancestors(ancestors, minimal=False)
def test_get_ancestors_cached(self):
"""Testing FileDiff.get_ancestors with cached results"""
ancestors = {}
for filediff in self.filediffs:
filediff.get_ancestors(minimal=True, filediffs=self.filediffs)
for filediff in self.filediffs:
with self.assertNumQueries(0):
ancestors[filediff] = filediff.get_ancestors(
minimal=True,
filediffs=self.filediffs)
self._check_ancestors(ancestors, minimal=True)
def test_get_ancestors_no_update(self):
"""Testing FileDiff.get_ancestors without caching"""
ancestors = {}
for filediff in self.filediffs:
with self.assertNumQueries(0):
ancestors[filediff] = filediff.get_ancestors(
minimal=True,
filediffs=self.filediffs,
update=False)
self._check_ancestors(ancestors, minimal=True)
def test_get_ancestors_no_filediffs(self):
"""Testing FileDiff.get_ancestors when no FileDiffs are provided"""
ancestors = {}
with self.assertNumQueries(2 * len(self.filediffs)):
for filediff in self.filediffs:
ancestors[filediff] = filediff.get_ancestors(minimal=True)
self._check_ancestors(ancestors, minimal=True)
def test_get_ancestors_cached_no_filediffs(self):
"""Testing FileDiff.get_ancestors with cached results when no
FileDiffs are provided
"""
ancestors = {}
for filediff in self.filediffs:
filediff.get_ancestors(minimal=True,
filediffs=self.filediffs)
with self.assertNumQueries(5):
for filediff in self.filediffs:
ancestors[filediff] = filediff.get_ancestors(minimal=True)
self._check_ancestors(ancestors, minimal=True)
def _check_ancestors(self, all_ancestors, minimal):
paths = {
(1, 'foo', 'PRE-CREATION', 'foo', 'e69de29'): ([], []),
(1, 'bar', '5716ca5', 'bar', '8e739cc'): ([], []),
(2, 'foo', 'e69de29', 'foo', '257cc56'): (
[],
[
(1, 'foo', 'PRE-CREATION', 'foo', 'e69de29'),
],
),
(2, 'bar', '8e739cc', 'bar', '0000000'): (
[],
[
(1, 'bar', '5716ca5', 'bar', '8e739cc'),
],
),
(2, 'baz', '7601807', 'baz', '280beb2'): ([], []),
(3, 'foo', '257cc56', 'qux', '03b37a0'): (
[],
[
(1, 'foo', 'PRE-CREATION', 'foo', 'e69de29'),
(2, 'foo', 'e69de29', 'foo', '257cc56'),
],
),
(3, 'bar', 'PRE-CREATION', 'bar', '5716ca5'): (
[
(1, 'bar', '5716ca5', 'bar', '8e739cc'),
(2, 'bar', '8e739cc', 'bar', '0000000'),
],
[],
),
(3, 'corge', 'e69de29', 'corge', 'f248ba3'): ([], []),
(4, 'bar', '5716ca5', 'quux', 'e69de29'): (
[
(1, 'bar', '5716ca5', 'bar', '8e739cc'),
(2, 'bar', '8e739cc', 'bar', '0000000'),
],
[
(3, 'bar', 'PRE-CREATION', 'bar', '5716ca5'),
],
),
}
by_details = self.get_filediffs_by_details()
for filediff, ancestors in all_ancestors.items():
rest_ids, minimal_ids = paths[(
filediff.commit_id,
filediff.source_file,
filediff.source_revision,
filediff.dest_file,
filediff.dest_detail,
)]
if minimal:
ids = minimal_ids
else:
ids = chain(rest_ids, minimal_ids)
expected_ancestors = [
by_details[details] for details in ids
]
self.assertEqual(ancestors, expected_ancestors)
|
reviewboard/reviewboard
|
reviewboard/diffviewer/tests/test_filediff.py
|
Python
|
mit
| 15,804
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import csv, sys, subprocess
from lib_time import *
# returns the given float number with only 2 decimals and a % appended
def float_to_percentage(float_number):
return("%0.2f" % float_number +"%")
# normalize the dictionary with the word count to generate the wordcloud
def normalize_dict(dic):
max_elem = max(dic.values())
for key, value in dic.items():
normalized_val = int((100 * value)/max_elem)
if normalized_val == 0:
normalized_val = 1
dic[key]= normalized_val
return dic
# writes the normalized dict in a txt to be pasted manually in wordle.net
def dict_to_txt_for_wordle(dict_in, filename, sort_key=lambda t:t, value_key=lambda t:t):
if not dict_in:
dict_in = {'No hashtags found':1}
ordered_list = []
dict_in = normalize_dict(dict_in)
for key, value in dict_in.items():
ordered_list.append([key, value_key(value)])
ordered_list = sorted(ordered_list, key=sort_key, reverse=True)
out = open(filename, 'w', encoding= 'utf-8')
for item in ordered_list[:120]:
i = 0
while i < item[1]:
out.write(item[0] + ' ')
i+=1
out.close()
# creates a CSV file of the dictionary data received
def top_something_to_csv(dict_in, filename, column_titles, reverse, sort_key, value_format=lambda t: t):
ordered_list = []
for key, value in dict_in.items():
ordered_list.append([key, value_format(value)])
ordered_list = sorted(ordered_list, key=sort_key, reverse=reverse)
with open(filename, 'w', newline='', encoding="utf8") as csvfile:
file_writer = csv.writer(csvfile, delimiter='|', quotechar='"', quoting=csv.QUOTE_MINIMAL)
file_writer.writerow(column_titles)
for item in ordered_list:
file_writer.writerow([item[0], item[1]])
csvfile.close()
# writes a CSV file in the following format:
# post_type | interactions_# | %_of_total
# where interactions can be shares, likes or comments
def int_dictionary_to_csv(int_dict_in, filename, column_titles):
total = sum(int_dict_in.values())
float_dict_post_percent = {}
for key, value in int_dict_in.items():
float_dict_post_percent[key] = (value * 100)/total
with open(filename, 'w', newline='', encoding="utf8") as csvfile:
file_writer = csv.writer(csvfile, delimiter='|', quotechar='"', quoting=csv.QUOTE_MINIMAL)
file_writer.writerow(column_titles)
for key, value in float_dict_post_percent.items():
file_writer.writerow([key, int_dict_in[key], float_to_percentage(value)])
# writes a CSV file in the following format:
# date(dd/mm/yyyy) | post_type | post_text| interactions_#
# where interactions can be shares, likes or comments and post_type can be status, photo, video or share
def int_dictionary_interactions_summary_to_csv(int_dict_comments_in, int_dict_shares_in, int_dict_likes_in, filename):
column_titles = ['post_type', 'comments_#', 'comments_%', '', 'likes_#', 'likes_%','', 'shares_#', 'shares_%',]
total_comments = sum(int_dict_comments_in.values())
total_shares = sum(int_dict_shares_in.values())
total_likes = sum(int_dict_likes_in.values())
with open(filename, 'w', newline='', encoding="utf8") as csvfile:
file_writer = csv.writer(csvfile, delimiter='|', quotechar='"', quoting=csv.QUOTE_MINIMAL)
file_writer.writerow(column_titles)
for key in int_dict_comments_in.keys():
pct_comments = (int_dict_comments_in[key]*100)/total_comments
pct_likes = (int_dict_likes_in[key]*100)/total_likes
pct_shares = (int_dict_shares_in[key]*100)/total_shares
file_writer.writerow([key, int_dict_comments_in[key], float_to_percentage(pct_comments),' ', int_dict_likes_in[key], float_to_percentage(pct_likes), ' ', int_dict_shares_in[key], float_to_percentage(pct_shares)])
# writes a CSV file in the following format:
# dd/mm/YYYY | post_type | post_text | interactions_#
# where interactions can be shares, likes or comments
def interactions_summary_to_csv(list_summary, filename, column_titles):
list_summary = sorted(list_summary, key = lambda x: x[0])
with open(filename, 'w', newline='', encoding="utf8") as csvfile:
file_writer = csv.writer(csvfile, delimiter='|', quotechar='"', quoting=csv.QUOTE_MINIMAL)
file_writer.writerow(column_titles)
for item in list_summary:
line = [timestamp_to_str_date(item[0])] + item[1:]
file_writer.writerow(line)
# creates a CSV file of the dictionary data received
def top_something_to_csv(dict_in, filename, column_titles, reverse, sort_key, value_format=lambda t: t):
ordered_list = []
for key, value in dict_in.items():
ordered_list.append([key, value_format(value)])
ordered_list = sorted(ordered_list, key=sort_key, reverse=reverse)
with open(filename, 'w', newline='', encoding="utf8") as csvfile:
file_writer = csv.writer(csvfile, delimiter='|', quotechar='"', quoting=csv.QUOTE_MINIMAL)
file_writer.writerow(column_titles)
for item in ordered_list:
file_writer.writerow([item[0], item[1]])
csvfile.close()
def comments_timeline():
list_datetime_commments = []
with open('comments.tab', 'rt', encoding="utf8") as csvfile:
csv_in = csv.reader(csvfile, delimiter='\t')
next(csv_in)
for line in csv_in:
str_raw_time = line[3]
temp_datetime = datetime.datetime.strptime(str_raw_time, "%Y-%m-%dT%H:%M:%S+0000")
list_datetime_commments.append(temp_datetime)
dict_int_str_date = comments_per_day(list_datetime_commments)
dict_int_str_date_hour = comments_per_hour(list_datetime_commments)
top_something_to_csv(dict_int_str_date, 'comments_per_day.csv', ['date', 'number_of_comments'], reverse=False, sort_key=lambda t: datetime.date(int(t[0][6:]), int(t[0][3:5]), int(t[0][:2])))
top_something_to_csv(dict_int_str_date_hour, 'comments_per_hour.csv', ['date', 'number_of_comments'], reverse=False, sort_key=lambda t: datetime.datetime.strptime(t[0], "%d/%m/%Y %H"))
def write_top_comment_replies(top_comments_list):
with open('top_comments_replies.csv', 'w', newline='', encoding="utf8") as csvfile:
file_writer = csv.writer(csvfile, delimiter='|', quotechar='"', quoting=csv.QUOTE_MINIMAL)
file_writer.writerow(['post_text', 'comment_text', 'likes_#'])
for item in top_comments_list:
if item[3] == '1':
file_writer.writerow([item[0], item[1], item[2]])
def write_top_comments(top_comments_list):
with open('top_comments.csv', 'w', newline='', encoding="utf8") as csvfile:
file_writer = csv.writer(csvfile, delimiter='|', quotechar='"', quoting=csv.QUOTE_MINIMAL)
file_writer.writerow(['post_text', 'comment_text', 'likes_#', 'is_reply'])
for item in top_comments_list:
file_writer.writerow(item)
def cleanup_posts():
subprocess.call(["sh", "cleanup_posts.sh"])
def cleanup_comments():
subprocess.call(["sh", "cleanup_comments.sh"])
|
ufeslabic/parse-facebook
|
lib_output.py
|
Python
|
mit
| 6,654
|
#!/usr/bin/env python
#
# Copyright (c) 2017 DevicePilot Ltd.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import time
from datetime import datetime
import zeromq_rx
def printIt(params):
print(datetime.now(),str(params))
if __name__ == "__main__":
zeromq_rx.init(printIt)
print("Watching...")
while True:
time.sleep(1)
|
DevicePilot/synth
|
synth/watch_zeromq.py
|
Python
|
mit
| 1,364
|
from __future__ import unicode_literals
from django.apps import AppConfig
class SequencerConfig(AppConfig):
name = 'sequencer'
|
CARPEM/GalaxyDocker
|
data-manager-hegp/analysisManager/analysismanager/sequencer/apps.py
|
Python
|
mit
| 134
|
import urllib.request
import time
def pega_preço():
pagina = urllib.request.urlopen('http://beans.itcarlow.ie/prices-loyalty.html')
texto = pagina.read().decode('utf8')
onde = texto.find('>$')
inicio= onde + 2
fim = inicio + 4
return float(texto[inicio:fim])
opção = input("deseja comprar já? (S/N)")
if opção == 'S' :
preço = pega_preço()
print('Você comprou por %5.2f R$' % preço)
else:
preço = 99.99
while preço >= 4.74:
preço = pega_preço()
if preço >= 4.74:
time.sleep(5)
print ('comprar ! Preço: %5.2f' %preço)
|
andersonsilvade/python_C
|
Python32/aulas/hakeandositeprecodescontowhiletemposimounao.py
|
Python
|
mit
| 607
|
import logging
import json
import os
import re
#from pprint import pprint
#from itertools import count
from urlparse import urljoin
from lxml import html
from thready import threaded
import requests
from scrapekit.util import collapse_whitespace
from connectedafrica.scrapers.util import MultiCSV
from connectedafrica.scrapers.util import make_path
log = logging.getLogger('npo')
URL_PATTERN = "http://www.npo.gov.za/PublicNpo/Npo/DetailsAllDocs/%s"
def make_cache(i):
return make_path('.cache/npo/%s/%s/%s/%s/%s.json' % (
i % 10, i % 100, i % 1000, i % 10000, i))
def make_urls():
for i in xrange(1, 16000000):
yield i
def scrape_npo(csv, i):
url = URL_PATTERN % i
cache_path = make_cache(i)
if not os.path.exists(cache_path):
res = requests.get(url)
page = {
'url': url,
'http_status': res.status_code,
'content': res.content.decode('utf-8')
}
with open(cache_path, 'wb') as fh:
json.dump(page, fh)
else:
with open(cache_path, 'rb') as fh:
page = json.load(fh)
if 'internal server error' in page['content']:
return
data = {}
doc = html.fromstring(page['content'])
data = {
'source_url': url,
'name': doc.find('.//h1').find('.//span').text.strip(),
'status': doc.find('.//h1').find('.//span[@class="npo-status"]').text,
'email': None
}
log.info("Scraping: %s", data['name'])
sub_titles = doc.findall('.//h5')
next_heading = None
for sub_title in sub_titles:
text = collapse_whitespace(sub_title.text)
if 'Registration No' in text:
data['reg_no'] = sub_title.find('./span').text.strip()
next_heading = 'category'
elif 'Your Name' in text:
next_heading = None
elif next_heading == 'category':
data['category'] = text
next_heading = 'legal_form'
elif next_heading == 'legal_form':
data['legal_form'] = text
next_heading = None
for span in doc.findall('.//span'):
text = collapse_whitespace(span.text)
if text is not None and 'Registered on' in text:
match = re.search(r'\d+.\d+.\d+', text)
if match:
data['reg_date'] = match.group(0)
for addr in doc.findall('.//div[@class="address"]'):
addr_type = collapse_whitespace(addr.find('./h4').text)
addrs = [collapse_whitespace(a) for a in
addr.xpath('string()').split('\n')]
addrs = '\n'.join([a for a in addrs if len(a)][1:])
if 'Physical' in addr_type:
data['physical_address'] = addrs
elif 'Postal' in addr_type:
data['postal_address'] = addrs
elif 'Contact' in addr_type:
data['contact_name'] = collapse_whitespace(addr.find('./p').text)
for li in addr.findall('.//li'):
contact = collapse_whitespace(li.xpath('string()'))
contact_type = {
'phone': 'phone',
'mailinfo': 'email',
'fax': 'fax'
}.get(li.get('class'))
data[contact_type] = contact
off_div = './/li[@data-sha-context-enttype="Npo.AppointedOfficeBearer"]'
csv.write('npo/npo_organisations.csv', data)
for li in doc.findall(off_div):
s = li.find('.//strong')
a = s.find('./a')
id_number = li.find('.//div/span')
if id_number is not None:
id_number = id_number.text
id_number = id_number.replace('(', '')
id_number = id_number.replace(')', '')
id_number = id_number.strip()
if 'Neither ID or Passport' in id_number:
id_number = None
officer = {
'role': collapse_whitespace(s.text).replace(' :', ''),
'npo_name': data['name'],
'source_url': url,
'officer_id': urljoin(url, a.get('href')),
'officer_name': collapse_whitespace(a.text),
'officer_id_number': id_number
}
csv.write('npo/npo_officers.csv', officer)
def scrape_npos():
csv = MultiCSV()
threaded(make_urls(), lambda i: scrape_npo(csv, i), num_threads=30)
csv.close()
if __name__ == '__main__':
scrape_npos()
|
ANCIR/siyazana.co.za
|
connectedafrica/scrapers/npo.py
|
Python
|
mit
| 4,358
|
'''This module contains some glue code encapsulating a "main" process.
The code here is aimed at wrapping the most common tasks involved in creating
and, especially, training a neural network model.
'''
import climate
import datetime
import downhill
import os
import warnings
from . import graph
from . import trainer
logging = climate.get_logger(__name__)
class Experiment:
'''This class encapsulates tasks for training and evaluating a network.
Parameters
----------
model : :class:`Network <graph.Network>` or str
A specification for obtaining a model. If a string is given, it is
assumed to name a file containing a pickled model; this file will be
loaded and used. If a network instance is provided, it will be used
as the model. If a callable (such as a subclass) is provided, it
will be invoked using the provided keyword arguments to create a
network instance.
'''
def __init__(self, network, *args, **kwargs):
if isinstance(network, str) and os.path.isfile(network):
self.load(network)
elif isinstance(network, graph.Network):
self.network = network
else:
assert network is not graph.Network, \
'use a concrete theanets.Network subclass ' \
'like theanets.{Autoencoder,Regressor,...}'
self.network = network(*args, **kwargs)
def create_trainer(self, train, algo='rmsprop'):
'''Create a trainer.
Additional keyword arguments are passed directly to the trainer.
Parameters
----------
train : str
A string describing a trainer to use.
algo : str
A string describing an optimization algorithm.
Returns
-------
trainer : :class:`Trainer <trainer.Trainer>`
A trainer instance to alter the parameters of our network.
'''
train = train.lower()
if train == 'sample':
return trainer.SampleTrainer(self.network)
if train.startswith('layer') or train.startswith('sup'):
return trainer.SupervisedPretrainer(algo, self.network)
if train.startswith('pre') or train.startswith('unsup'):
return trainer.UnsupervisedPretrainer(algo, self.network)
return trainer.DownhillTrainer(train, self.network)
def create_dataset(self, data, **kwargs):
'''Create a dataset for this experiment.
Parameters
----------
data : sequence of ndarray or callable
The values that you provide for data will be encapsulated inside a
:class:`Dataset <downhill.Dataset>` instance; see that class for
documentation on the types of things it needs. In particular, you
can currently pass in either a list/array/etc. of data, or a
callable that generates data dynamically.
Returns
-------
data : :class:`Dataset <downhill.Dataset>`
A dataset capable of providing mini-batches of data to a training
algorithm.
'''
default_axis = 0
if not callable(data) and not callable(data[0]) and len(data[0].shape) == 3:
default_axis = 1
name = kwargs.get('name', 'dataset')
b, i, s = 'batch_size', 'iteration_size', '{}_batches'.format(name)
return downhill.Dataset(
data,
name=name,
batch_size=kwargs.get(b, 32),
iteration_size=kwargs.get(i, kwargs.get(s)),
axis=kwargs.get('axis', default_axis))
def train(self, *args, **kwargs):
'''Train the network until the trainer converges.
All arguments are passed to :func:`itertrain`.
Returns
-------
training : dict
A dictionary of monitor values computed using the training dataset,
at the conclusion of training. This dictionary will at least contain
a 'loss' key that indicates the value of the loss function. Other
keys may be available depending on the trainer being used.
validation : dict
A dictionary of monitor values computed using the validation
dataset, at the conclusion of training.
'''
monitors = None
for monitors in self.itertrain(*args, **kwargs):
pass
return monitors
def itertrain(self, train, valid=None, algorithm='rmsprop', **kwargs):
'''Train our network, one batch at a time.
This method yields a series of ``(train, valid)`` monitor pairs. The
``train`` value is a dictionary mapping names to monitor values
evaluated on the training dataset. The ``valid`` value is also a
dictionary mapping names to values, but these values are evaluated on
the validation dataset.
Because validation might not occur every training iteration, the
validation monitors might be repeated for multiple training iterations.
It is probably most helpful to think of the validation monitors as being
the "most recent" values that have been computed.
After training completes, the network attribute of this class will
contain the trained network parameters.
Parameters
----------
train : sequence of ndarray or :class:`downhill.Dataset`
A dataset to use when training the network. If this is a
``downhill.Dataset`` instance, it will be used directly as the
training datset. If it is another type, like a numpy array, it will
be converted to a ``downhill.Dataset`` and then used as the training
set.
valid : sequence of ndarray or :class:`downhill.Dataset`, optional
If this is provided, it will be used as a validation dataset. If not
provided, the training set will be used for validation. (This is not
recommended!)
algorithm : str or list of str, optional
One or more optimization algorithms to use for training our network.
If not provided, RMSProp will be used.
Yields
------
training : dict
A dictionary of monitor values computed using the training dataset,
at the conclusion of training. This dictionary will at least contain
a 'loss' key that indicates the value of the loss function. Other
keys may be available depending on the trainer being used.
validation : dict
A dictionary of monitor values computed using the validation
dataset, at the conclusion of training.
'''
# set up datasets
if valid is None:
valid = train
if not isinstance(valid, downhill.Dataset):
valid = self.create_dataset(valid, name='valid', **kwargs)
if not isinstance(train, downhill.Dataset):
train = self.create_dataset(train, name='train', **kwargs)
# set up training algorithm(s)
if 'optimize' in kwargs:
warnings.warn(
'please use the "algorithm" keyword arg instead of "optimize"',
DeprecationWarning)
algorithm = kwargs.pop('optimize')
if isinstance(algorithm, str):
algorithm = algorithm.split()
# set up auto-saving if enabled
progress = kwargs.get('save_progress')
timeout = kwargs.get('save_every', 0)
if timeout < 0: # timeout < 0 is in minutes instead of iterations.
timeout *= 60
# loop over trainers, saving every N minutes/iterations if enabled
for algo in algorithm:
if not callable(getattr(algo, 'itertrain', None)):
algo = self.create_trainer(algo)
start = datetime.datetime.now()
for i, monitors in enumerate(algo.itertrain(train, valid, **kwargs)):
yield monitors
now = datetime.datetime.now()
elapsed = (now - start).total_seconds()
if i and progress and (
(timeout < 0 and elapsed > -timeout) or
(timeout > 0 and i % int(timeout) == 0)):
self.save(progress)
start = now
def save(self, path):
'''Save the current network to a pickle file on disk.
Parameters
----------
path : str
Location of the file to save the network.
'''
self.network.save(path)
def load(self, path):
'''Load a saved network from a pickle file on disk.
This method sets the ``network`` attribute of the experiment to the
loaded network model.
Parameters
----------
filename : str
Load the keyword arguments and parameters of a network from a pickle
file at the named path. If this name ends in ".gz" then the input
will automatically be gunzipped; otherwise the input will be treated
as a "raw" pickle.
Returns
-------
network : :class:`Network <graph.Network>`
A newly-constructed network, with topology and parameters loaded
from the given pickle file.
'''
self.network = graph.Network.load(path)
return self.network
|
masterkeywikz/seq2graph
|
src/theanets-0.6.1/theanets/main.py
|
Python
|
mit
| 9,363
|
import unittest
import asyncio
import io
import multiprocessing
import urllib.request
import time
import grole
def simple_server():
app = grole.Grole()
@app.route('/')
def hello(env, req):
return 'Hello, World!'
app.run(host='127.0.0.1')
class TestServe(unittest.TestCase):
def test_simple(self):
p = multiprocessing.Process(target=simple_server)
p.start()
time.sleep(0.1)
with urllib.request.urlopen('http://127.0.0.1:1234') as response:
html = response.read()
self.assertEqual(html, b'Hello, World!')
p.terminate()
def test_fileserver(self):
p = multiprocessing.Process(target=grole.main, args=[['-a', '127.0.0.1']])
p.start()
time.sleep(0.1)
with urllib.request.urlopen('http://127.0.0.1:1234/test/test.dat') as response:
html = response.read()
self.assertEqual(html, b'foo\n')
p.terminate()
def test_https(self):
p = multiprocessing.Process(target=simple_server)
p.start()
time.sleep(0.1)
self.assertRaises(urllib.error.URLError)
p.terminate()
|
witchard/grole
|
test/test_serve.py
|
Python
|
mit
| 1,174
|
# -*- coding: utf-8 -*-
# a hack for pytest to allow imports
if __package__ is None:
import sys
import os.path
sys.path[0:0] = [
os.path.dirname( # project_root
os.path.dirname( # tests
os.path.abspath(__file__) # this file
)
)
]
import httmock
import pytest
from six import moves
from iblocklist2ipset.networks import extract_networks, fetch_networks, \
convert_to_ipnetworks, ParseError
from tests import CommonTest
# noinspection PyUnresolvedReferences
class TestConvertToIPNetwork(object):
@pytest.mark.parametrize("input_", (
"HELLO:123.123.123.123-123.123.123.255",
"EVIL HACKER:150.250.250.250-150.251.250.250",
":150.250.250.250-150.251.250.250"
))
def test_ok(self, input_):
network = convert_to_ipnetworks(input_)
assert network and len(network) > 0
@pytest.mark.parametrize("input_", (
"HELLO:223.123.123.123-123.123.123.255",
"EVIL HACKER:150.250.250.250-",
":150.250.250.250-15",
"::15.12"
))
def test_nok(self, input_):
with pytest.raises(ParseError):
convert_to_ipnetworks(input_)
@pytest.mark.parametrize("input_", (
"",
"#commentary"
"#commented:127.0.0.1-127.0.0.12"
))
def test_empty(self, input_):
assert convert_to_ipnetworks(input_) == []
# noinspection PyUnresolvedReferences,PyMethodMayBeStatic
class TestFetchNetworks(CommonTest):
def test_ok(self):
with httmock.HTTMock(self.fake_response(self.FAKE_CONTENT)):
networks = [str(ntw) for ntw in fetch_networks("http://fake.url")]
assert set(networks) == set(self.FAKE_NETWORKS)
@pytest.mark.parametrize("input_", (
" ",
"#commentary",
"""
# commentary
# another commentary
"""
))
def test_empty(self, input_):
with httmock.HTTMock(self.fake_response(input_)):
assert list(fetch_networks("http://fake.url")) == []
# noinspection PyMethodMayBeStatic
class TestExtractNetworks(CommonTest):
def test_no_repeats(self):
urls = ["http://fake{0}.url".format(idx) for idx in moves.range(3)]
with httmock.HTTMock(self.fake_response(self.FAKE_CONTENT)):
networks = extract_networks(urls)
assert set(networks) == set(self.FAKE_NETWORKS)
|
9seconds/iblocklist2ipset
|
tests/test_networks.py
|
Python
|
mit
| 2,438
|
import django
import time
from uuid import uuid1
from datetime import timedelta
from threading import Thread
from django.template import Template
from django.test import TestCase, TransactionTestCase
from django.contrib.auth.models import User, Group
from django.utils import timezone
from django.core import management, mail
from django.core.mail import send_mail
from django.conf import settings
from django.db.models.signals import post_save
from alert.utils import BaseAlert, ALERT_TYPES, BaseAlertBackend, ALERT_BACKENDS,\
super_accepter, unsubscribe_user
from alert.exceptions import AlertIDAlreadyInUse, AlertBackendIDAlreadyInUse, CouldNotSendError
from alert.models import Alert, AlertPreference, AdminAlert
from alert.forms import AlertPreferenceForm, UnsubscribeForm
from alert.admin import AdminAlertAdmin
class SubclassTestingAlert(BaseAlert):
"""
This will never send any alerts - it's just a check to make sure that
subclassing alerts doesn't explode
"""
title = 'Welcome new users'
description = 'When a new user signs up, send them a welcome email'
signal = post_save
sender = User
default = True
def before(self, **kwargs):
return False
def get_applicable_users(self, instance, **kwargs):
return [instance]
class WelcomeAlert(SubclassTestingAlert):
"""
everything is inherited from SubclassTestingAlert
only change is that alerts will actually be sent
"""
def before(self, created, **kwargs):
return created
class DummyBackend(BaseAlertBackend):
title = "Dummy"
def send(self, alert):
pass
class EpicFailBackend(BaseAlertBackend):
"""
Backend that fails to send on the first try for every alert
"""
id = "EpicFail"
title = "Epic Fail"
def send(self, alert):
if not alert.failed:
raise CouldNotSendError
class SlowBackend(BaseAlertBackend):
"""
Backend that takes a full second to send an alert
"""
title = "Slow backend"
def send(self, alert):
time.sleep(1)
send_mail("asdf", 'woot', 'fake@gmail.com', ['superfake@gmail.com'])
#################################################
### Tests ###
#################################################
class AlertTests(TestCase):
def setUp(self):
pass
def test_alert_creation(self):
username = str(uuid1().hex)[:16]
email = "%s@example.com" % username
user = User.objects.create(username=username, email=email)
alerts = Alert.objects.filter(user=user)
self.assertEqual(len(alerts), len(ALERT_BACKENDS))
for alert in alerts:
self.assertEqual(alert.alert_type, "WelcomeAlert")
if alert.backend == 'EmailBackend':
self.assertEqual(alert.title, "email subject")
self.assertEqual(alert.body, "email body")
else:
self.assertEqual(alert.title, "default title")
self.assertEqual(alert.body, "default body")
def test_alert_registration_only_happens_once(self):
self.assertTrue(isinstance(ALERT_TYPES["WelcomeAlert"], WelcomeAlert))
self.assertEquals(len(ALERT_TYPES), 3)
def define_again():
class WelcomeAlert(BaseAlert):
title = 'Welcome new users'
signal = post_save
self.assertRaises(AlertIDAlreadyInUse, define_again)
def test_alert_id_is_key_in_ALERT_TYPES(self):
for key, alert in ALERT_TYPES.items():
self.assertEqual(key, alert.id)
class AlertBackendTests(TestCase):
def setUp(self):
username = str(uuid1().hex)[:16]
email = "%s@example.com" % username
self.user = User.objects.create(username=username, email=email)
def test_backend_creation(self):
self.assertTrue(isinstance(ALERT_BACKENDS["DummyBackend"], DummyBackend))
def test_backends_use_supplied_id(self):
self.assertTrue(isinstance(ALERT_BACKENDS["EpicFail"], EpicFailBackend))
def test_pending_manager(self):
self.assertEqual(Alert.pending.all().count(), len(ALERT_BACKENDS))
management.call_command("send_alerts")
self.assertEqual(Alert.pending.all().count(), 1)
def test_backend_registration_only_happens_once(self):
self.assertEquals(len(ALERT_BACKENDS), 4)
def define_again():
class DummyBackend(BaseAlertBackend):
title = 'dummy'
self.assertRaises(AlertBackendIDAlreadyInUse, define_again)
def test_backend_fails_to_send(self):
alert_that_should_fail = Alert.objects.filter(backend='EpicFail')[0]
before_send = timezone.now()
alert_that_should_fail.send()
after_send = timezone.now()
self.assertTrue(alert_that_should_fail.failed)
self.assertFalse(alert_that_should_fail.is_sent)
self.assertTrue(alert_that_should_fail.last_attempt is not None)
self.assertTrue(alert_that_should_fail.last_attempt > before_send)
self.assertTrue(alert_that_should_fail.last_attempt < after_send)
# and now retry
before_send = timezone.now()
alert_that_should_fail.send()
after_send = timezone.now()
self.assertFalse(alert_that_should_fail.failed)
self.assertTrue(alert_that_should_fail.is_sent)
self.assertTrue(alert_that_should_fail.last_attempt is not None)
self.assertTrue(alert_that_should_fail.last_attempt > before_send)
self.assertTrue(alert_that_should_fail.last_attempt < after_send)
class ConcurrencyTests(TransactionTestCase):
def setUp(self):
username = str(uuid1().hex)[:16]
email = "%s@example.com" % username
self.user = User.objects.create(username=username, email=email)
def testMultipleSimultaneousSendScripts(self):
# Sqlite uses an in-memory database, which does not work with the concurrency tests.
if "sqlite" in settings.DATABASES['default']['ENGINE']:
# Note that the alert django app will work fine with Sqlite. It's only the
# concurrency *tests* that do not work with sqlite.""")
return
self.assertEqual(len(mail.outbox), 0)
threads = [Thread(target=management.call_command, args=('send_alerts',)) for i in range(100)]
for t in threads:
t.start()
# space them out a little tiny bit
time.sleep(0.001)
[t.join() for t in threads]
self.assertEqual(len(mail.outbox), 2)
class EmailBackendTests(TestCase):
def setUp(self):
pass
class FormTests(TestCase):
def setUp(self):
self.user = User.objects.create(username='wootz', email='wootz@woot.com')
def testNoArgs(self):
pref_form = self.assertRaises(TypeError, AlertPreferenceForm)
unsubscribe_form = self.assertRaises(TypeError, UnsubscribeForm)
def testSimpleCase(self):
pref_form = AlertPreferenceForm(user=self.user)
unsubscribe_form = UnsubscribeForm(user=self.user)
self.assertEqual(len(pref_form.fields), len(ALERT_TYPES) * len(ALERT_BACKENDS))
self.assertEqual(len(unsubscribe_form.fields), len(ALERT_TYPES) * len(ALERT_BACKENDS))
def testUnsubscribeFormHasNoVisibleFields(self):
from django.forms import HiddenInput
unsubscribe_form = UnsubscribeForm(user=self.user)
for field in unsubscribe_form.fields.values():
self.assertTrue(isinstance(field.widget, HiddenInput))
def testSuperAccepterNone(self):
types = super_accepter(None, ALERT_TYPES)
backends = super_accepter(None, ALERT_BACKENDS)
self.assertEqual(len(types), len(ALERT_TYPES))
self.assertEqual(len(backends), len(ALERT_BACKENDS))
def testSuperAccepterSingle(self):
backends_by_class = super_accepter(EpicFailBackend, ALERT_BACKENDS)
backends_by_id = super_accepter("EpicFail", ALERT_BACKENDS)
self.assertEqual(len(backends_by_class), 1)
self.assertEqual(len(backends_by_id), 1)
self.assertEqual(backends_by_class, backends_by_id)
def testSuperAccepterList(self):
backends_by_class = super_accepter([EpicFailBackend, DummyBackend], ALERT_BACKENDS)
backends_by_id = super_accepter(["EpicFail", "DummyBackend"], ALERT_BACKENDS)
backends_by_mixed = super_accepter(["EpicFail", DummyBackend], ALERT_BACKENDS)
self.assertEqual(len(backends_by_class), 2)
self.assertEqual(len(backends_by_id), 2)
self.assertEqual(len(backends_by_mixed), 2)
self.assertEqual(backends_by_class, backends_by_id)
self.assertEqual(backends_by_class, backends_by_mixed)
self.assertEqual(backends_by_mixed, backends_by_id)
def testSuperAccepterDuplicates(self):
backends = super_accepter([EpicFailBackend, DummyBackend, "EpicFail"], ALERT_BACKENDS)
self.assertEqual(len(backends), 2)
def testUnsubscribe(self):
details = {
"alert_type": WelcomeAlert.id,
"backend": EpicFailBackend.id,
"user": self.user,
}
AlertPreference.objects.create(preference=True, **details)
self.assertEqual(AlertPreference.objects.get(**details).preference, True)
unsubscribe_user(self.user, alerts=WelcomeAlert, backends=EpicFailBackend)
self.assertEqual(AlertPreference.objects.get(**details).preference, False)
class AdminAlertTests(TestCase):
def setUp(self):
group = Group.objects.create(name='test_group')
self.admin_alert = AdminAlert(
title="Hello users!",
body="woooord!",
recipients=group
)
def send_it(self):
AdminAlertAdmin.save_model(AdminAlertAdmin(AdminAlert, None), None, self.admin_alert, None, None)
def testDraftMode(self):
self.admin_alert.draft = True
self.send_it()
self.assertEqual(Alert.objects.count(), 0)
self.send_it()
self.assertEqual(Alert.objects.count(), User.objects.count())
def testScheduling(self):
send_at = timezone.now() + timedelta(days=1)
self.admin_alert.send_at = send_at
self.send_it()
for alert in Alert.objects.all():
self.assertEqual(alert.when, send_at)
def testOnlySendOnce(self):
self.assertFalse(self.admin_alert.sent)
self.send_it()
self.assertTrue(self.admin_alert.sent)
alert_count = Alert.objects.count()
self.send_it()
self.assertEqual(alert_count, Alert.objects.count())
# Email Templates aren't supported before django 1.8
if django.VERSION[:2] >= (1, 8):
from django.template import engines
from alert.utils import render_email_to_string
def get_template_contents(tmpl):
fs_loader = engines['django'].engine.template_loaders[0]
source, origin = fs_loader.load_template_source(tmpl)
return source
class EmailTemplateTests(TestCase):
def check_template(self, name, cx):
template_file = "{0}.email".format(name)
expected_txt = get_template_contents("{0}.expected.txt".format(name))
expected_html = get_template_contents("{0}.expected.html".format(name))
rendered_default = render_email_to_string(template_file, cx)
rendered_txt = render_email_to_string(template_file, cx, alert_type="txt")
rendered_html = render_email_to_string(template_file, cx, alert_type="html")
# Default shard ext is "txt"
self.assertEqual(rendered_default, rendered_txt)
self.assertEqual(rendered_txt, expected_txt)
self.assertEqual(rendered_html, expected_html)
def test_basic_use(self):
self.check_template("basic", {
"username": "Alex"
})
|
jiaaro/django-alert
|
test_project/alert_tests/tests.py
|
Python
|
mit
| 12,663
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
class HTTPError(Exception):
'''
Represents an HTTP Exception when response status code >= 300.
:ivar int status:
the status code of the response
:ivar str message:
the message
:ivar list headers:
the returned headers, as a list of (name, value) pairs
:ivar bytes body:
the body of the response
'''
def __init__(self, status, message, respheader, respbody):
self.status = status
self.respheader = respheader
self.respbody = respbody
Exception.__init__(self, message)
class HTTPResponse(object):
'''
Represents a response from an HTTP request.
:ivar int status:
the status code of the response
:ivar str message:
the message
:ivar dict headers:
the returned headers
:ivar bytes body:
the body of the response
'''
def __init__(self, status, message, headers, body):
self.status = status
self.message = message
self.headers = headers
self.body = body
class HTTPRequest(object):
'''
Represents an HTTP Request.
:ivar str host:
the host name to connect to
:ivar str method:
the method to use to connect (string such as GET, POST, PUT, etc.)
:ivar str path:
the uri fragment
:ivar dict query:
query parameters
:ivar dict headers:
header values
:ivar bytes body:
the body of the request.
'''
def __init__(self):
self.host = ''
self.method = ''
self.path = ''
self.query = {} # list of (name, value)
self.headers = {} # list of (header name, header value)
self.body = ''
|
Azure/azure-storage-python
|
azure-storage-common/azure/storage/common/_http/__init__.py
|
Python
|
mit
| 2,018
|
#import matplotlib.pyplot as plt
import numpy as np
from collections import deque
import numbers
"""
Created on Jun 29, 2016
@author: hans-werner
"""
def convert_to_array(x, dim=None, return_is_singleton=False):
"""
Convert point or list of points to a numpy array.
Inputs:
x: (list of) point(s) to be converted to an array. Allowable inputs are
1. a list of Vertices,
2. a list of tuples,
3. a list of numbers or (2,) arrays
4. a numpy array of the approriate size
dim: int, (1 or 2) optional number used to adjudicate ambiguous cases.
return_is_singleton: bool, if True, return whether the input x is a
singleton.
Outputs:
x: double, numpy array containing the points in x.
If x is one-dimensional (i.e. a list of 1d Vertices, 1-tuples, or
a 1d vector), convert to an (n,1) array.
If x is two-dimensional (i.e. a list of 2d Vertices, 2-tupples, or
a 2d array), return an (n,2) array.
"""
is_singleton = False
if type(x) is list:
#
# Points in list
#
if all(isinstance(xi, Vertex) for xi in x):
#
# All points are of type vertex
#
x = [xi.coordinates() for xi in x]
x = np.array(x)
elif all(type(xi) is tuple for xi in x):
#
# All points are tuples
#
x = np.array(x)
elif all(type(xi) is numbers.Real for xi in x):
#
# List of real numbers -> turn into (n,1) array
#
x = np.array(x)
x = x[:,np.newaxis]
elif all(type(xi) is np.ndarray for xi in x):
#
# list of (2,) arrays
#
x = np.array(x)
else:
raise Exception(['For x, use arrays or lists'+\
'of tuples or vertices.'])
elif isinstance(x, Vertex):
#
# A single vertex
#
x = np.array([x.coordinates()])
is_singleton = True
elif isinstance(x, numbers.Real):
if dim is not None:
assert dim==1, 'Dimension should be 1.'
x = np.array([[x]])
is_singleton = True
elif type(x) is tuple:
#
# A tuple
#
if len(x)==1:
#
# A oneple
#
x, = x
x = np.array([[x]])
is_singleton = True
elif len(x)==2:
#
# A tuple
#
x,y = x
x = np.array([[x,y]])
is_singleton = True
elif type(x) is np.ndarray:
#
# Points in numpy array
#
if len(x.shape)==1:
#
# x is a one-dimensional vector
if len(x)==1:
#
# x is a vector with one entry
#
if dim is not None:
assert dim==1, 'Incompatible dimensions'
x = x[:,np.newaxis]
if len(x) == 2:
#
# x is a vector 2 entries: ambiguous
#
if dim == 2:
#
# Turn 2-vector into a (1,2) array
#
x = x[np.newaxis,:]
else:
#
# Turn vector into (2,1) array
#
x = x[:,np.newaxis]
else:
#
# Turn vector into (n,1) array
#
x = x[:,np.newaxis]
elif len(x.shape)==2:
assert x.shape[1]<=2,\
'Dimension of array should be at most 2'
else:
raise Exception('Only 1- or 2 dimensional arrays allowed.')
if return_is_singleton:
# Specify whether x is a singleton
return x, is_singleton
else:
return x
class Markable(object):
"""
Description: Any object that can be assigned a flag
"""
def __init__(self):
"""
Constructor
"""
self.__flag = None
def mark(self, flag):
"""
"""
pass
def unmark(self, flag):
"""
Remove flag
"""
pass
def is_marked(self, flag):
"""
Determine whether
"""
pass
class Tree(object):
"""
Description: Tree object for storing and manipulating adaptively
refined quadtree meshes.
Attributes:
node_type: str, specifying node's relation to parents and/or children
'ROOT' (no parent node),
'BRANCH' (parent & children), or
'LEAF' (parent but no children)
address: int, list allowing access to node's location within the tree
General form [k0, k1, ..., kd], d=depth, ki in [0,...,n_children_i]
address = [] if ROOT node.
depth: int, depth within the tree (ROOT nodes are at depth 0).
parent: Tree/Mesh whose child this is
children: list of child nodes.
flag: set, of str/int/bool allowing tree nodes to be marked.
"""
def __init__(self, n_children=None, regular=True, flag=None,
parent=None, position=None, forest=None):
"""
Constructor
"""
#
# Set some attributes
#
self._is_regular = regular
self._parent = parent
self._forest = None
self._in_forest = False
self._node_position = position
#
# Set flags
#
self._flags = set()
if flag is not None:
if type(flag) is set:
# Add all flags in set
for f in flag:
self.mark(f)
else:
# Add single flag
self.mark(flag)
if parent is None:
#
# ROOT Tree
#
self._node_type = 'ROOT'
self._node_depth = 0
self._node_address = []
if self.is_regular():
# Ensure that the number of ROOT children is specified
assert n_children is not None, \
'ROOT node: Specify number of children.'
else:
# Not a regular tree: number of children 0 initially
n_children = 0
if forest is not None:
#
# Tree contained in a Forest
#
assert isinstance(forest, Forest), \
'Input grid must be an instance of Grid class.'
#
# Add tree to forest
#
forest.add_tree(self)
self._in_forest = True
self._forest = forest
self._node_address = [self.get_node_position()]
else:
#
# Free standing ROOT cell
#
assert self.get_node_position() is None, \
'Unattached ROOT cell has no position.'
#
# Assign space for children
#
self._children = [None]*n_children
self._n_children = n_children
else:
#
# LEAF Node
#
position_missing = 'Position within parent cell must be specified.'
assert self.get_node_position() is not None, position_missing
self._node_type = 'LEAF'
# Determine cell's depth and address
self._node_depth = parent.get_depth() + 1
self._node_address = parent.get_node_address() + [position]
if regular:
#
# Regular tree -> same number of children in every generation
#
if n_children is not None:
assert n_children == self.get_parent().n_children(),\
'Regular tree: parents should have the same ' + \
'number of children than oneself.'
else:
n_children = self.get_parent().n_children()
else:
n_children = 0
#
# Assign space for children
#
self._children = [None]*n_children
self._n_children = n_children
# Change parent type (from LEAF)
if parent.get_node_type() == 'LEAF':
parent.set_node_type('BRANCH')
def info(self):
"""
Display essential information about Tree
"""
print('')
print('-'*50)
print('Tree Info')
print('-'*50)
print('{0:10}: {1}'.format('Address', self._node_address))
print('{0:10}: {1}'.format('Type', self._node_type))
if self._node_type != 'ROOT':
print('{0:10}: {1}'.format('Parent', \
self.get_parent().get_node_address()))
print('{0:10}: {1}'.format('Position', self._node_position))
print('{0:10}: {1}'.format('Flags', self._flags))
if self.has_children():
child_string = ''
for i in range(len(self._children)):
child = self.get_child(i)
if child is not None:
child_string += str(i) + ': 1, '
else:
child_string += str(i) + ': 0, '
print('{0:10}: {1}'.format('Children',child_string))
else:
child_string = 'None'
print('{0:10}: {1}'.format('Children',child_string))
print('')
def get_node_type(self):
"""
Returns whether node is a ROOT, a BRANCH, or a LEAF
"""
return self._node_type
def get_node_position(self):
"""
Returns position of current node within parent/forest
"""
return self._node_position
def set_node_type(self, node_type):
"""
Sets a node's type
"""
assert node_type in ['ROOT', 'BRANCH', 'LEAF'], \
'Input "node_type" should be "ROOT", "BRANCH", or "LEAF".'
if node_type == 'ROOT':
assert not self.has_parent(), \
'ROOT nodes should not have a parent.'
elif node_type == 'LEAF':
assert not self.has_children(), \
'LEAF nodes should not have children.'
elif node_type == 'BRANCH':
assert self.has_parent(),\
'BRANCH nodes should have a parent.'
self._node_type = node_type
def get_node_address(self):
"""
Return address of the node
"""
return self._node_address
def get_depth(self):
"""
Return depth of current node
"""
return self._node_depth
def tree_depth(self, flag=None):
"""
Return the maximum depth of the tree
"""
depth = self.get_depth()
if self.has_children():
for child in self.get_children(flag=flag):
d = child.tree_depth()
if d > depth:
depth = d
return depth
def in_forest(self):
"""
Determine whether a (ROOT)cell lies within a forest
"""
return self._in_forest
def get_forest(self):
"""
Returns the forest containing the node
"""
return self._forest
def plant_in_forest(self, forest, position):
"""
Modify own attributes to reflect node's containment within a forest
"""
assert self.get_node_type() == 'ROOT', \
'Only ROOT nodes are in the forest.'
self._node_position = position
self._node_address = [position]
self._in_forest = True
self._forest = forest
def remove_from_forest(self):
"""
Remove node from forest
"""
self._in_forest = False
self._node_position = None
self._node_address = []
self._forest = None
def is_regular(self):
"""
Determine whether node is a regular tree, that is all subnodes
have the same number of children.
"""
return self._is_regular
def mark(self, flag=None, recursive=False, reverse=False):
"""
Mark Tree and its progeny/ancestors
Inputs:
flag: int, optional label used to mark node
recursive: bool, also mark all sub-/super nodes
"""
if flag is None:
#
# No flag specified: add "True" flag
#
self._flags.add(True)
else:
#
# Add given flag
#
self._flags.add(flag)
#
# Add flag to progeny/parents
#
if recursive:
if reverse:
#
# Mark ancestors
#
if self.has_parent():
parent = self.get_parent()
parent.mark(flag=flag, recursive=recursive, \
reverse=reverse)
else:
#
# Mark progeny
#
if self.has_children():
for child in self.get_children():
child.mark(flag=flag, recursive=recursive)
def unmark(self, flag=None, recursive=False, reverse=False):
"""
Unmark Cell
Inputs:
flag: label to be removed
recursive: bool, also unmark all subcells
"""
#
# Remove label from own list
#
if flag is None:
# No flag specified -> delete all
self._flags.clear()
else:
# Remove specified flag (if present)
if flag in self._flags: self._flags.remove(flag)
#
# Remove label from children if applicable
#
if recursive:
if reverse:
#
# Unmark ancestors
#
if self.has_parent():
parent = self.get_parent()
parent.unmark(flag=flag, recursive=recursive, \
reverse=reverse)
else:
#
# Unmark progeny
#
if self.has_children():
for child in self.get_children():
child.unmark(flag=flag, recursive=recursive)
def is_marked(self,flag=None):
"""
Check whether cell is marked
Input: flag, label for QuadCell: usually one of the following:
True (catchall), 'split' (split cell), 'count' (counting)
"""
if flag is None:
# No flag -> check whether set is empty
if self._flags:
return True
else:
return False
else:
# Check wether given label is contained in cell's set
return flag in self._flags
def has_parent(self, flag=None):
"""
Returns True if node has (flagged) parent node, False otherwise
"""
if flag is not None:
return self._parent is not None and self._parent.is_marked(flag)
else:
return self._parent is not None
def get_parent(self, flag=None):
"""
Return cell's parent, or first ancestor with given flag (None if there
are none).
"""
if flag is None:
if self.has_parent():
return self._parent
else:
if self.has_parent(flag):
parent = self._parent
if parent.is_marked(flag):
return parent
else:
return parent.get_parent(flag=flag)
def get_root(self):
"""
Find the ROOT cell for a given cell
"""
if self._node_type == 'ROOT':
return self
else:
return self._parent.get_root()
def has_children(self, position=None, flag=None):
"""
Determine whether node has children
Inputs:
position: int, position of the child node within self
flag: str/int/bool, required marker for positive answer
Output:
has_children: bool, true if self has (marked) children, false
otherwise.
"""
if position is None:
#
# Check for any children
#
if flag is None:
return any(child is not None for child in self._children)
else:
#
# Check for flagged children
#
for child in self._children:
if child is not None and child.is_marked(flag):
return True
return False
else:
#
# Check for child in specific position
#
# Ensure position is valid
assert position < self._n_children, \
'Position exceeds the number of children.'
if flag is None:
#
# No flag specified
#
return self._children[position] is not None
else:
#
# With flag
#
return (self._children[position] is not None) and \
self._children[position].is_marked(flag)
def get_child(self, position):
"""
Return the child in a given position
"""
assert position<self.n_children() and position>-self.n_children(), \
'Input "position" exceeds number of children.'
return self._children[position]
def get_children(self, flag=None, reverse=False):
"""
Iterator: Returns (flagged) children, in (reverse) order
Inputs:
flag: [None], optional marker
reverse: [False], option to list children in reverse order
(useful for the 'traverse' function).
Note: Only returns children that are not None
Use this to obtain a consistent iteration of children
"""
if self.has_children(flag=flag):
if not reverse:
#
# Go in usual order
#
for child in self._children:
if child is not None:
if flag is None:
yield child
elif child.is_marked(flag):
yield child
else:
#
# Go in reverse order
#
for child in reversed(self._children):
if child is not None:
if flag is None:
yield child
elif child.is_marked(flag):
yield child
def n_children(self):
"""
Returns the number of children
"""
return self._n_children
def remove(self):
"""
Remove node (self) from parent's list of children
"""
assert self.get_node_type() != 'ROOT', 'Cannot delete ROOT node.'
self.get_parent()._children[self._node_position] = None
def add_child(self):
"""
Add a child to current node (only works if node is not regular).
"""
assert not self.is_regular(),\
'Regular tree: add children by method "split".'
child = Tree(parent=self, regular=False, position=self.n_children())
self._children.append(child)
self._n_children += 1
def delete_children(self, position=None):
"""
Delete all sub-nodes of given node
"""
#
# Change children to None
#
if position is None:
for child in self.get_children():
child.remove()
else:
assert position < self.n_children(), \
'Position exceeds number of children '
child = self._children[position]
child.remove()
#
# Change node type from LEAF to BRANCH
#
if self._node_type == 'BRANCH' and not self.has_children():
self._node_type = 'LEAF'
def split(self, n_children=None):
"""
Split node into subnodes
"""
if self.is_regular():
#
# Regular tree: Number of grandchildren inherited
#
for i in range(self.n_children()):
#
# Instantiate Children
#
self._children[i] = Tree(parent=self, position=i)
else:
#
# Not a regular tree: Must specify number of children
#
assert self.n_children() == 0, \
'Cannot split irregular tree with children. ' + \
'Use "add_child" method.'
for i in range(n_children):
#
# Instantiate Children
#
self.add_child()
def traverse(self, queue=None, flag=None, mode='depth-first'):
"""
Iterator: Return current cell and all its (flagged) sub-cells
Inputs:
flag [None]: cell flag
mode: str, type of traversal
'depth-first' [default]: Each cell's progeny is visited before
proceeding to next cell.
'breadth-first': All cells at a given depth are returned before
proceeding to the next level.
Output:
all_nodes: list, of all nodes in tree (marked with flag).
"""
if queue is None:
queue = deque([self])
while len(queue) != 0:
if mode == 'depth-first':
node = queue.pop()
elif mode == 'breadth-first':
node = queue.popleft()
else:
raise Exception('Input "mode" must be "depth-first"'+\
' or "breadth-first".')
if node.has_children():
reverse = True if mode=='depth-first' else False
for child in node.get_children(reverse=reverse):
queue.append(child)
if flag is not None:
if node.is_marked(flag):
yield node
else:
yield node
def get_leaves(self, flag=None, subtree_flag=None, mode='breadth-first'):
"""
Return all marked LEAF nodes (nodes with no children) of current subtree
Inputs:
*flag: If flag is specified, return all leaf nodes within rooted
subtree marked with flag (or an empty list if there are none).
*subtree_flag: Label specifying the rooted subtree (rs) within which
to search for (flagged) leaves.
*mode: Method by which to traverse the tree ('breadth-first' or
'depth-first').
Outputs:
leaves: list, of LEAF nodes.
Note:
The rooted subtree must contain all ancestors of a marked node
"""
#
# Get all leaves of the subtree
#
leaves = []
for node in self.traverse(flag=subtree_flag, mode=mode):
#
# Iterate over all sub-nodes within subtree
#
if not node.has_children(flag=subtree_flag):
#
# Nodes without marked children are the subtree leaves
#
leaves.append(node)
#
# Return marked leaves
#
if flag is None:
return leaves
else:
return [leaf for leaf in leaves if leaf.is_marked(flag)]
def make_rooted_subtree(self, flag):
"""
Mark all ancestors of flagged node with same flag, to turn flag into
a subtree marker.
"""
#
# Search through all nodes
#
for node in self.get_root().traverse(mode='breadth-first'):
if node.is_marked(flag):
#
# If node is flagged, mark all its ancestors
#
ancestor = node
while ancestor.has_parent():
ancestor = ancestor.get_parent()
ancestor.mark(flag)
def is_rooted_subtree(self, flag):
"""
Determine whether a given flag defines a rooted subtree
Note: This takes roughly the same amount of work as make_rooted_subtree
"""
#
# Search through all nodes
#
for node in self.get_root().traverse(mode='breadth-first'):
if node.is_marked(flag):
#
# Check that ancestors of flagged node are also marked
#
ancestor = node
while ancestor.has_parent():
ancestor = ancestor.get_parent()
if not ancestor.is_marked(flag):
#
# Ancestor not marked: not a rooted subtree
#
return False
#
# No problems: it's a rooted subtree
#
return True
def find_node(self, address):
"""
Locate node by its address
"""
node = self.get_root()
if address != []:
#
# Not the ROOT node
#
for a in address:
if node.has_children() and a in range(node.n_children()):
node = node.get_child(a)
else:
return None
return node
def nearest_ancestor(self, flag):
"""
Returns the nearest ancestor with given flag
"""
if flag is None:
return self
candidate = self
while not candidate.is_marked(flag):
if candidate.get_depth()==0:
return None
else:
candidate = candidate.get_parent()
return candidate
def contains(self, tree):
"""
Determine whether self contains a given node
"""
if tree.get_depth() < self.get_depth():
return False
elif tree == self:
return True
else:
while tree.get_depth() > self.get_depth():
tree = tree.get_parent()
if self == tree:
return True
#
# Reached the end
#
return False
def coarsen(self, subforest_flag=None, coarsening_flag=None,
new_label=None, clean_up=True, debug=False):
"""
Coarsen tree by
"""
if subforest_flag is not None:
#
# Subforest specified
#
if not self.is_marked(subforest_flag):
#
# Tree not in subforest, nothing to coarsen
#
return
#
# Check whether to coarsen
#
coarsen = False
if coarsening_flag is not None:
#
# Check whether tree is flagged (if applicable)
#
if self.is_marked(coarsening_flag):
coarsen = True
else:
#
# Are children LEAF nodes?
#
if self.has_children(flag=subforest_flag):
#
# Check if children are in subforest
#
for child in self.get_children():
#
# All children have to be LEAF nodes
#
coarsen = True
if child.get_node_type()!='LEAF':
coarsen = False
break
if new_label is not None:
#
# Apply new label to node (regardless of whether to coarsen)
#
self.mark(new_label)
if coarsen:
#
# Coarsen tree
#
if new_label is not None:
#
# If new_label specified, don't mess with children
#
pass
elif subforest_flag is not None:
#
# Remove subforest flag from children
#
for child in self.get_children():
child.unmark(subforest_flag)
else:
#
# Delete children
#
self.delete_children()
if coarsening_flag is not None and clean_up:
#
# Remove coarsening flag if necessary
#
self.unmark(coarsening_flag)
else:
#
# Recursion step, check children
#
if self.has_children(flag=subforest_flag):
for child in self.get_children():
child.coarsen(subforest_flag=subforest_flag,
coarsening_flag=coarsening_flag,
new_label=new_label,
clean_up=clean_up,
debug=debug)
class Forest(object):
"""
Collection of Trees
"""
def __init__(self, trees=None, n_trees=None):
"""
Constructor
"""
if trees is not None:
#
# List of trees specified
#
assert type(trees) is list, 'Trees should be passed as a list.'
self._trees = []
for tree in trees:
self.add_tree(tree)
elif n_trees is not None:
#
# No trees specified, only the number of slots
#
assert type(n_trees) is np.int and n_trees > 0,\
'Input "n_children" should be a positive integer.'
self._trees = [None]*n_trees
else:
#
# No trees specified: create an empty list.
#
self._trees = []
def n_children(self):
"""
Return the number of trees
"""
return len(self._trees)
def is_regular(self):
"""
Determine whether the forest contains only regular trees
"""
for tree in self._trees:
if not tree.is_regular():
return False
return True
def depth(self):
"""
Determine the depth of the largest tree in the forest
"""
current_depth = 0
for tree in self.get_children():
new_depth = tree.tree_depth()
if new_depth > current_depth:
current_depth = new_depth
return current_depth
def traverse(self, flag=None, mode='depth-first'):
"""
Iterator: Visit every (flagged) node in the forest
Inputs:
flag [None]: node flag
mode: str, type of traversal
'depth-first' [default]: Each node's progeny is visited before
proceeding to next cell.
'breadth-first': All nodes at a given depth are returned before
proceeding to the next level.
Output:
all_nodes: list, of all nodes in tree (marked with flag).
"""
if mode=='depth-first':
queue = deque(reversed(self._trees))
elif mode=='breadth-first':
queue = deque(self._trees)
else:
raise Exception('Input "mode" must be "depth-first"'+\
' or "breadth-first".')
while len(queue) != 0:
if mode == 'depth-first':
node = queue.pop()
elif mode == 'breadth-first':
node = queue.popleft()
if node.has_children():
reverse = True if mode=='depth-first' else False
for child in node.get_children(reverse=reverse):
queue.append(child)
if flag is not None:
if node.is_marked(flag):
yield node
else:
yield node
def get_leaves(self, flag=None, subforest_flag=None, mode='breadth-first'):
"""
Return all marked LEAF nodes (nodes with no children) of current subtree
Inputs:
*flag: If flag is specified, return all leaf nodes within rooted
subtree marked with flag (or an empty list if there are none).
*subforest_flag: Label specifying the rooted subtrees (rs) within which
to search for (flagged) leaves.
Outputs:
leaves: list, of LEAF nodes.
Note:
The rooted subtree must contain all ancestors of a marked node
"""
#
# Get all leaves of the subtree
#
leaves = []
for node in self.traverse(flag=subforest_flag, mode=mode):
if not node.has_children(flag=subforest_flag):
leaves.append(node)
#
# Return marked leaves
#
if flag is None:
return leaves
else:
return [leaf for leaf in leaves if leaf.is_marked(flag)]
def root_subtrees(self, flag):
"""
Mark all ancestors of flagged node with same flag, to turn flag into
a subtree marker.
Note: If no node is flagged, then only flag the root nodes.
"""
#
# Search through all nodes
#
for root_node in self.get_children():
#
# Mark all root nodes with flag
#
root_node.mark(flag)
for node in root_node.traverse():
#
# Look for marked subnodes
#
if node.is_marked(flag):
#
# If node is flagged, mark all its ancestors & siblings
#
ancestor = node
while ancestor.has_parent():
ancestor = ancestor.get_parent()
# Mark ancestor
ancestor.mark(flag)
for child in ancestor.get_children():
# Mark siblings
child.mark(flag)
def subtrees_rooted(self, flag):
"""
Determine whether a given flag defines a rooted subtree
Note: This takes roughly the same amount of work as make_rooted_subtree
"""
if flag is None:
#
# Forest itself is always one of rooted subtrees
#
return True
#
# Search through all nodes
#
for root_node in self.get_children():
#
# Check if root nodes are marked
#
if not root_node.is_marked(flag):
return False
else:
for node in root_node.traverse():
if node.is_marked(flag):
#
# Check that ancestors and sibilngs of flagged node
# are also marked
#
ancestor = node
while ancestor.has_parent():
ancestor = ancestor.get_parent()
if not ancestor.is_marked(flag):
#
# Ancestor not marked: not a rooted subtree
#
return False
for child in ancestor.get_children():
if not child.is_marked(flag):
#
# Sibling not marked
#
return False
#
# No problems: it's a forest of rooted subtrees
#
return True
def find_node(self, address):
"""
Locate a tree node by its address
Inputs:
address: list of branches along which to find node in tree
"""
# Reverse address
address = address[::-1]
node = self
while len(address)>0:
a = address.pop()
if node.has_children():
if a not in range(node.n_children()):
return None
else:
node = node.get_child(a)
return node
def has_children(self, flag=None):
"""
Determine whether the forest contains any trees
"""
if len(self._trees) > 0:
if flag is None:
return True
else:
return any(tree for tree in self.get_children(flag=flag))
else:
return False
def get_child(self, position):
"""
Returns the tree at a given position
"""
assert position < len(self._trees),\
'Input "position" exceeds number of trees.'
assert type(position) is np.int, \
'Input "position" should be a nonnegative integer. '
return self._trees[position]
def get_children(self, flag=None, reverse=False):
"""
Iterate over (all) (flagged) trees in the forest
"""
if not reverse:
if flag is None:
return self._trees
else:
children = []
for tree in self._trees:
if tree.is_marked(flag):
children.append(tree)
return children
else:
if flag is None:
return self._trees[::-1]
else:
children = []
for tree in reversed(self._trees):
if tree.is_marked():
children.append(tree)
def add_tree(self, tree):
"""
Add a new tree to the current forest
"""
assert isinstance(tree, Tree), \
'Can only add trees to the forest.'
self._trees.append(tree)
tree.plant_in_forest(self, self.n_children()-1)
def remove_tree(self, position):
"""
Remove a tree from the forest.
"""
assert type(position) is np.int, \
'Input "position" should be an integer.'
assert position < len(self._trees), \
'Input "position" exceeds number of trees.'
tree = self.get_child(position)
tree.remove_from_forest()
del self._trees[position]
def record(self, flag):
"""
Mark all trees in current forest with flag
"""
for tree in self.get_children():
tree.mark(flag, recursive=True)
def coarsen(self, subforest_flag=None, coarsening_flag=None,
new_label=None, clean_up=True, debug=False):
"""
Coarsen (sub)forest (delimited by 'subforest_flag', by (possibly)
merging (=deleting or unlabeling the siblings of) children of nodes
marked with 'coarsening_flag' and labeling said nodes with new_label.
If subforest_flag is None, coarsen all nodes
If new_label is None, then:
- either remove subforest flag (if there is one), or
- delete child nodes
Inputs:
*subforest_flag: flag, specifying the subforest being coarsened.
*coarsening_flag: flag, specyfying nodes in subforest whose children
are to be deleted/unmarked.
*new_label: flag, specifying the new subforest.
*clean_up: bool, remove coarsening_flag after use.
"""
#
# Ensure the subforest is rooted
#
if subforest_flag is not None:
self.root_subtrees(subforest_flag)
for tree in self.get_children():
tree.coarsen(subforest_flag=subforest_flag,
coarsening_flag=coarsening_flag,
new_label=new_label,
clean_up=clean_up, debug=debug)
"""
if coarsening_flag is not None:
#
# Coarsen
#
for tree in self.get_children():
coarsened = False
if tree.is_marked(coarsening_flag):
#
# Coarsen tree
#
if new_label is not None:
#
# Mark tree with new label and move on
#
tree.mark(new_label)
continue
elif subforest_flag is not None:
#
# Remove subforest flag from progeny
#
for child in tree.get_children(subforest_flag):
child.unmark(subforest_flag, recursive=True)
else:
#
# Delete children
#
tree.delete_children()
# Record coarsened
coarsened = True
elif tree.get_node_type()=='LEAF':
#
# Already a leaf: no need to coarsen
#
coarsened = True
while not coarsened:
if subforest_flag is not None:
if tree.has_children(subforest_flag):
for child in tree.get_children():
else:
pass
for child in tree.get_children():
pass
else:
#
# Don't coarsen yet, go to children
#
if tree.has_children(subforest_flag):
for child in tree.get_children():
pass
if clean_up:
to_clean = []
#
# Look for marked leaves within the submesh
#
for leaf in self.get_leaves(subforest_flag=subforest_flag):
#
# During coarsening, some leaves may already be unmarked
#
if debug:
print('leaf info')
leaf.info()
if subforest_flag is not None:
if not leaf.is_marked(subforest_flag):
continue
#
# Find nodes that must be coarsened
#
if not leaf.has_parent():
if debug:
print('ROOT Node')
#
# Leaf without parent is a ROOT: must be part of the new mesh.
#
if new_label is not None:
#
# Mark leaf with new_label
#
leaf.mark(new_label)
if clean_up and coarsening_flag is not None:
#
# Remove coarsening flag
#
to_clean.append(leaf)
# On to the next leaf
continue
#
# Can get parent
#
parent = leaf.get_parent()
if debug:
print('LEAF has parent')
parent.info()
#
# Determine whether to coarsen
#
if coarsening_flag is None:
coarsen = True
elif parent.is_marked(coarsening_flag):
coarsen = True
if clean_up:
#
# Remove coarsening flag
#
parent.unmark(coarsening_flag, recursive=True)
else:
coarsen = False
if debug:
print('Coarsen', coarsen)
if not coarsen:
#
# Don't coarsen
#
if new_label is not None:
#
# Apply new label to leaf and siblings
#
for child in parent.get_children():
child.mark(new_label)
# Move to the next LEAF
continue
else:
#
# Coarsen
#
if subforest_flag is None and new_label is None:
#
# Delete marked node's children
#
parent.delete_children()
if debug:
print('Deleting children')
parent.info()
elif new_label is None:
#
# Remove 'subforest_label' from leaf and siblings
#
for child in parent.get_children():
child.unmark(subforest_flag)
if debug:
print('Removing subforest_flag')
for child in parent.get_children():
print(child.is_marked(subforest_flag))
else:
#
# Mark parents with new_label
#
parent.mark(new_label)
if debug:
print('Marking parent with new label')
parent.info()
if clean_up and coarsening_flag is not None:
#
# Remove coarsening flag
#
parent.unmark(coarsening_flag)
if debug:
print('removing flag', coarsening_flag)
parent.info()
#
# Apply new label to coarsened submesh if necessary
#
if new_label is not None:
self.root_subtrees(new_label)
"""
def refine(self, subforest_flag=None, refinement_flag=None, new_label=None,
clean_up=True):
"""
Refine (sub)forest (delimited by 'subforest_flag'), by (possibly)
splitting (subforest)nodes with refinement_flag and marking their
children (with new_label).
Inputs:
subforest_flag: flag, used to specify the subforest being refined
refinement_flag: flag, specifying the nodes within the submesh that
are being refined.
new_label: flag, new label to be applied to refined submesh
clean_up: bool, remove the "refinement_flag" once the cell is split.
"""
#
# Ensure that the subforest is rooted
#
if subforest_flag is not None:
self.root_subtrees(subforest_flag)
#
# Look for marked leaves within the submesh
#
for leaf in self.get_leaves(subforest_flag=subforest_flag):
#
# Mark tree with new label to ensure new forest contains old subforest
#
if new_label is not None:
leaf.mark(new_label)
#
# If the refinement flag is used, ensure that the node is marked
# before continuing.
#
if refinement_flag is not None:
if not leaf.is_marked(refinement_flag):
continue
#
# Add new children if necessary
#
if not leaf.has_children():
leaf.split()
#
# Label each (new) child
#
for child in leaf.get_children():
if new_label is None and subforest_flag is None:
#
# No labels specified: do nothing
#
continue
elif new_label is None:
#
# No new label given, use the subforest label
#
child.mark(subforest_flag)
else:
#
# New label given, mark child with new label
#
child.mark(new_label)
#
# Remove refinement flag
#
if refinement_flag is not None and clean_up:
leaf.unmark(refinement_flag)
#
# Label ancestors of newly labeled children
#
if new_label is not None:
self.root_subtrees(new_label)
class Vertex(object):
"""
Description:
Attributes:
coordinates: double, tuple (x,y)
flag: boolean
Methods:
"""
def __init__(self, coordinates):
"""
Description: Constructor
Inputs:
coordinates: double tuple, x- and y- coordinates of vertex
on_boundary: boolean, true if on boundary
"""
if isinstance(coordinates, numbers.Real):
#
# Coordinate passed as a real number 1D
#
dim = 1
coordinates = (coordinates,) # recast coordinates as tuple
elif type(coordinates) is tuple:
#
# Coordinate passed as a tuple
#
dim = len(coordinates)
assert dim <= 2, 'Only 1D and 2D meshes supported.'
else:
raise Exception('Enter coordinates as a number or a tuple.')
self.__coordinate = coordinates
self._flags = set()
self.__dim = dim
self.__periodic_pair = set()
self.__is_periodic = False
def coordinates(self):
"""
Return coordinates tuple
"""
return self.__coordinate
def dim(self):
"""
Return the dimension of the vertex
"""
return self.__dim
def mark(self, flag=None):
"""
Mark Vertex
Inputs:
flag: int, optional label
"""
if flag is None:
self._flags.add(True)
else:
self._flags.add(flag)
def unmark(self, flag=None):
"""
Unmark Vertex
Inputs:
flag: label to be removed
"""
#
# Remove label from own list
#
if flag is None:
# No flag specified -> delete all
self._flags.clear()
else:
# Remove specified flag (if present)
if flag in self._flags: self._flags.remove(flag)
def is_marked(self,flag=None):
"""
Check whether Vertex is marked
Input: flag, label for QuadCell: usually one of the following:
True (catchall), 'split' (split cell), 'count' (counting)
"""
if flag is None:
# No flag -> check whether set is empty
if self._flags:
return True
else:
return False
else:
# Check wether given label is contained in cell's set
return flag in self._flags
def is_periodic(self):
"""
Determine whether a Vertex lies on a periodic boundary
"""
return self.__is_periodic
def set_periodic(self, periodic=True):
"""
Label vertex periodic
"""
self.__is_periodic = periodic
def set_periodic_pair(self, cell_vertex_pair):
"""
Pair a periodic vertex with its periodic counterpart. The periodic
pair can be accessed by specifying the neighboring interval (in 1D)
or cell (in 2D).
Inputs:
half_edge: HalfEdge/Interval
In 1D: half_edge represents the Interval on which the vertex pair resides
In 2D: half_edge represents the HalfEdge on which the vertex itself resides
vertex: Vertex associated with
See also: get_periodic_pair
"""
assert self.is_periodic(), 'Vertex should be periodic.'
if self.dim()==1:
#
# 1D: There is only one pairing for the entire mesh
#
interval, vertex = cell_vertex_pair
assert isinstance(vertex, Vertex), \
'Input "vertex" should be of class "Vertex".'
assert isinstance(interval, Interval), \
'Input "interval" should be of class "Interval".'
assert vertex.is_periodic(), \
'Input "vertex" should be periodic.'
#
# 1D: Store periodic pair
#
self.__periodic_pair.add((interval, vertex))
elif self.dim()==2:
#
# 2D
#
c_nb, v_nb = cell_vertex_pair
assert isinstance(v_nb, Vertex), \
'Input "cell_vertex_pair[1]" should be of class "Vertex".'
assert isinstance(c_nb, Cell), \
'Input "cell_vertex_pair[0]" should be of class "HalfEdge".'
assert v_nb.is_periodic(), \
'Input "cell_vertex_pair[1]" should be periodic.'
#
# Collect all possible c/v pairs in a set
#
cell_vertex_pairs = v_nb.get_periodic_pair().union(set([cell_vertex_pair]))
assert len(cell_vertex_pairs)!=0, 'Set of pairs should be nonempty'
for c_nb, v_nb in cell_vertex_pairs:
#
# Check whether v_nb already in list
#
in_list = False
for c, v in self.get_periodic_pair():
if v==v_nb and c.contains(c_nb):
#
# Vertex already appears in list
#
in_list = True
break
if not in_list:
#
# Not in list, add it
#
self.__periodic_pair.add((c_nb, v_nb))
def get_periodic_pair(self, cell=None):
"""
Returns the other vertex that is mapped onto self through periodicity
Input:
cell: Cell/HalfEdge in which paired vertex resides
"""
if cell is None:
#
# Return all cell, vertex pairs
#
return self.__periodic_pair
else:
#
# Return all paired vertices within a given cell
#
vertices = [v for c, v in self.__periodic_pair if c==cell]
return vertices
class HalfEdge(Tree):
"""
Description: Half-Edge in Quadtree mesh
Attributes:
base: Vertex, at base
head: Vertex, at head
twin: HalfEdge, in adjoining cell pointing from head to base
cell: QuadCell, lying to half edge's left
Methods:
"""
def __init__(self, base, head, cell=None, previous=None, nxt=None,
twin=None, parent=None, position=None, n_children=2,
regular=True, forest=None, flag=None, periodic=False):
"""
Constructor
Inputs:
base: Vertex, at beginning
head: Vertex, at end
parent: HalfEdge, parental
cell: QuadCell, lying to the left of half edge
previous: HalfEdge, whose head is self's base
nxt: HalfEdge, whose base is self's head
twin: Half-Edge, in adjoining cell pointing from head to base
position: int, position within parental HalfEdge
n_children: int, number of sub-HalfEdges
regular: bool, do all tree subnodes have the same no. of children?
forest: Forest, clever list of trees containing self
flag: (set of) int/string/bool, used to mark half-edge
periodic: bool, True if HalfEdge lies on a periodic boundary
"""
#
# Initialize Tree structure
#
Tree.__init__(self, n_children=n_children, regular=regular,
parent=parent, position=position, forest=forest, flag=flag)
#
# Assign head and base
#
self.set_vertices(base, head)
#
# Check parent
#
if parent is not None:
assert isinstance(parent, HalfEdge), \
'Parent should be a HalfEdge.'
#
# Assign incident cell
#
if cell is not None:
assert isinstance(cell, Cell), \
'Input "cell" should be a Cell object.'
self.__cell = cell
#
# Assign previous half-edge
#
if previous is not None:
assert isinstance(previous, HalfEdge), \
'Input "previous" should be a HalfEdge object.'
assert self.base()==previous.head(),\
'Own base should equal previous head.'
self.__previous = previous
#
# Assign next half-edge
#
if nxt is not None:
assert isinstance(nxt, HalfEdge), \
'Input "nxt" should be a HalfEdge object.'
assert self.head()==nxt.base(), \
'Own head should equal base of next.'
self.__next = nxt
#
# Mark periodic
#
self.__is_periodic = periodic
#
# Assign twin half-edge
#
if twin is not None:
assert isinstance(twin, HalfEdge), \
'Input "twin" should be a HalfEdge object.'
self.assign_twin(twin)
else:
self.__twin = None
def is_periodic(self):
"""
Returns True is the HalfEdge lies on a periodic boundary
"""
return self.__is_periodic
def set_periodic(self, periodic=True):
"""
Flag HalfEdge as periodic
"""
self.__is_periodic = periodic
def pair_periodic_vertices(self):
"""
Pair up HalfEdge vertices that are periodic
"""
if self.is_periodic():
#
# Pair up periodic vertices along half_edge
#
cell = self.cell()
cell_nb = self.twin().cell()
assert cell_nb is not None,\
'Periodic HalfEdge: Neighboring cell should not be None.'
#
# Pair up adjacent vertices
#
for v, v_nb in [(self.base(), self.twin().head()),
(self.head(), self.twin().base())]:
# Label vertices 'periodic'
v.set_periodic()
v_nb.set_periodic()
# Add own vertex-cell pair to own set of periodic pairs
v.set_periodic_pair((cell, v))
v_nb.set_periodic_pair((cell_nb, v_nb))
# Add adjoining vertex-cell pair to set of periodic pairs
v.set_periodic_pair((cell_nb, v_nb))
v_nb.set_periodic_pair((cell, v))
def base(self):
"""
Returns half-edge's base vertex
"""
return self.__base
def head(self):
"""
Returns half-edge's head vertex
"""
return self.__head
def get_vertices(self):
"""
Returns all half-edge vertices
"""
return [self.__base, self.__head]
def set_vertices(self, base, head):
"""
Define base and head vertices
"""
assert isinstance(base, Vertex) and isinstance(head, Vertex),\
'Inputs "base" and "head" should be Vertex objects.'
self.__base = base
self.__head = head
def cell(self):
"""
Returns the cell containing half-edge
"""
return self.__cell
def assign_cell(self, cell):
"""
Assign cell to half-edge
"""
self.__cell = cell
def twin(self):
"""
Returns the half-edge's twin
"""
return self.__twin
def assign_twin(self, twin):
"""
Assigns twin to half-edge
"""
if not self.is_periodic():
assert self.base()==twin.head() and self.head()==twin.base(),\
'Own head vertex should be equal to twin base vertex & vice versa.'
self.__twin = twin
def delete_twin(self):
"""
Deletes half-edge's twin
"""
self.__twin = None
def make_twin(self):
"""
Construct a twin HalfEdge
"""
assert not self.is_periodic(), \
'Twin HalfEdge of a periodic HalfEdge may have different vertices.'
if self.has_parent() and self.get_parent().twin() is not None:
twin_parent = self.get_parent().twin()
twin_position = 1-self.get_node_position()
else:
twin_parent = None
twin_position = None
twin = HalfEdge(self.head(), self.base(), parent=twin_parent,
position=twin_position)
self.assign_twin(twin)
twin.assign_twin(self)
return twin
def next(self):
"""
Returns the next half-edge, whose base is current head
"""
return self.__next
def assign_next(self, nxt):
"""
Assigns half edge to next
"""
if nxt is None:
return
else:
if not self.is_periodic():
assert self.head() == nxt.base(), \
'Own head vertex is not equal to next base vertex.'
self.__next = nxt
if nxt.previous() != self:
nxt.assign_previous(self)
def previous(self):
"""
Returns previous half-edge, whose head is current base
"""
return self.__previous
def assign_previous(self, previous):
"""
Assigns half-edge to previous
"""
if previous is None:
return
else:
if not self.is_periodic():
assert self.base() == previous.head(), \
'Own base vertex is not equal to previous head vertex.'
self.__previous = previous
if previous.next()!=self:
previous.assign_next(self)
def split(self):
"""
Refine current half-edge (overwrite Tree.split)
Note:
This function could potentially be generalized to HalfEdges with
multiple children (already implemented for Intervals).
"""
#
# Check if twin has been split
#
twin_split = False
twin = self.twin()
if twin is not None and twin.has_children():
t0, t1 = twin.get_children()
twin_split = True
else:
t0, t1 = None, None
#
# Determine whether to inherit midpoint vertex
#
if twin_split and not self.is_periodic():
#
# Share twin's midpoint Vertex
#
vm = t0.head()
else:
#
# Compute new midpoint vertex
#
x = convert_to_array([self.base().coordinates(),\
self.head().coordinates()])
xm = 0.5*(x[0,:]+x[1,:])
vm = Vertex(tuple(xm))
#
# Define own children and combine with twin children
#
c0 = HalfEdge(self.base(), vm, parent=self, twin=t1, position=0, periodic=self.is_periodic())
c1 = HalfEdge(vm, self.head(), parent=self, twin=t0, position=1, periodic=self.is_periodic())
#
# Assign new HalfEdges to twins if necessary
#
if twin_split:
t0.assign_twin(c1)
t1.assign_twin(c0)
#
# Save the babies
#
self._children[0] = c0
self._children[1] = c1
def to_vector(self):
"""
Returns the vector associated with the HalfEdge
"""
x = convert_to_array([self.base().coordinates(),\
self.head().coordinates()])
return x[1,:] - x[0,:]
def length(self):
"""
Returns the HalfEdge's length
"""
return np.linalg.norm(self.to_vector())
def unit_normal(self):
"""
Returns the unit normal vector of HalfEdge, pointing to the right
Note: This only works in 2D
"""
x0, y0 = self.base().coordinates()
x1, y1 = self.head().coordinates()
u = np.array([y1-y0, x0-x1])
return u/np.linalg.norm(u, 2)
def contains_points(self, points):
"""
Determine whether points lie on a HalfEdge
Inputs:
points: double,
"""
tol = 1e-10
x0 = convert_to_array(self.base().coordinates())
v = self.to_vector()
dim = x0.shape[1]
p = convert_to_array(points, dim)
n_points = p.shape[0]
in_half_edge = np.ones(n_points, dtype=np.bool)
if np.abs(v[0])<tol:
#
# Vertical line
#
assert np.abs(v[1])>tol, 'Half-edge is too short'
# Locate y-coordinate along segment
t = (p[:,1]-x0[:,1])/v[1]
# Discard points whose location parameter t is not in [0,1]
in_half_edge[np.abs(t-0.5)>0.5] = False
# Discard points whose x-values don't lie on Edge
in_half_edge[np.abs(p[:,0]-x0[0,0])>tol] = False
elif dim==1 or np.abs(v[1]<1e-14):
#
# Horizontal line
#
assert np.abs(v[0])>tol, 'Half-edge is too short'
# Locate x-coordinate along line
t = (p[:,0]-x0[:,0])/v[0]
# Check that t in [0,1]
in_half_edge[np.abs(t-0.5)>0.5] = False
if dim > 1:
# Check distance between y-values
in_half_edge[np.abs(p[:,1]-x0[0,1])>tol] = False
else:
#
# Skew line
#
s = (p[:,0]-x0[:,0])/v[0]
t = (p[:,1]-x0[:,1])/v[1]
# Check coordinates have same location parameters
in_half_edge[np.abs(t-s)>tol] = False
# Check that location parameter lies in [0,1]
in_half_edge[np.abs(t-0.5)>0.5] = False
return in_half_edge
def intersects_line_segment(self, line):
"""
Determine whether the HalfEdge intersects with a given line segment
Input:
line: double, list of two tuples
Output:
boolean, true if intersection, false otherwise.
Note: This only works in 2D
"""
# Express edge as p + t*r, t in [0,1]
p = np.array(self.base().coordinates())
r = np.array(self.head().coordinates()) - p
# Express line as q + u*s, u in [0,1]
q = np.array(line[0])
s = np.array(line[1]) - q
if abs(np.cross(r,s)) < 1e-14:
#
# Lines are parallel
#
if abs(np.cross(q-p,r)) < 1e-14:
#
# Lines are collinear
#
t0 = np.dot(q-p,r)/np.dot(r,r)
t1 = t0 + np.dot(s,r)/np.dot(r,r)
if (max(t0,t1) >= 0) and (min(t0,t1) <= 1):
#
# Line segments overlap
#
return True
else:
return False
else:
#
# Lines not collinear
#
return False
else:
#
# Lines not parallel
#
t = np.cross(q-p,s)/np.cross(r,s)
u = np.cross(p-q,r)/np.cross(s,r)
if 0 <= t <= 1 and 0 <= u <= 1:
#
# Line segments meet
#
return True
else:
return False
def reference_map(self, x_in, mapsto='physical',
jac_p2r=False, jac_r2p=False,
hess_p2r=False, hess_r2p=False):
"""
Map points x from the reference interval to the physical HalfEdge or
vice versa.
Inputs:
x_in: double, (n,) array or a list of points to be mapped.
jac_p2r: bool, return jacobian of mapping from physical to
reference domain
jac_r2p: bool, return jacobian of mapping from reference to
physical domain
hess_p2r: bool, return hessian of mapping from physical to
reference domain
hess_r2p: bool, return hessian of mapping from phyical to
reference domain
mapsto: str, 'physical' (map from reference to physical), or
'reference' (map from physical to reference).
Outputs:
x_trg: double, (n,) array of mapped points
mg: dictionary, of jacobians and hessians associated with the
mapping.
jac_p2r: double, n-list of physical-to-reference jacobians
jac_r2p: double, n-list of reference-to-physical jacobians
hess_p2r: double, n-list of physical-to-reference hessians
hess_r2p: double, n-list of reference-to-phyiscal hessians
"""
#
# Preprocessing
#
if mapsto=='physical':
#
# Check that input is an array
#
assert type(x_in) is np.ndarray, \
'If "mapsto" is "physical", then input should '+\
'be an array.'
#
# Check that points contained in [0,1]
#
assert x_in.max()>=0 and x_in.min()<=1, \
'Reference point should be between 0 and 1.'
elif mapsto=='reference':
x_in = convert_to_array(x_in, dim=self.head().dim())
#
# Check that points lie on the HalfEdge
#
assert all(self.contains_points(x_in)), \
'Some points are not contained in the HalfEdge.'
#
# Compute mapped points
#
n = x_in.shape[0]
x0, y0 = self.base().coordinates()
x1, y1 = self.head().coordinates()
if mapsto == 'physical':
x_trg = [(x0 + (x1-x0)*xi, y0 + (y1-y0)*xi) for xi in x_in]
elif mapsto == 'reference':
if not np.isclose(x0, x1):
#
# Not a vertical line
#
x_trg = list((x_in[:,0]-x0)/(x1-x0))
elif not np.isclose(y0, y1):
#
# Not a horizontal line
#
x_trg = list((x_in[:,1]-y0)/(y1-y0))
#
# Compute the Jacobians and Hessians (if asked for)
#
if any([jac_r2p, jac_p2r, hess_r2p, hess_p2r]):
#
# Gradients of the mapping sought
#
# Initialize map gradients (mg) dictionary
mg = {}
if jac_r2p:
#
# Jacobian of mapping from reference to physical region
#
mg['jac_r2p'] = [np.array([[x1-x0],[y1-y0]])]*n
if jac_p2r:
#
# Jacobian of mapping from physical to reference region
# TODO: Shouldn't this also be a list?
mg['jac_p2r'] = np.array([[1/(x1-x0), 1/(y1-y0)]])
if hess_r2p:
#
# Hessian of mapping from reference to physical region
#
mg['hess_r2p'] = [np.zeros((2,2))]*n
if hess_p2r:
#
# Hessian of mappring from physical to reference region
#
mg['hess_p2r'] = [np.zeros((2,2))]*n
return x_trg, mg
else:
#
# No gradients of the mapping sought
#
return x_trg
"""
# TODO: Remove this...
#
# Compute the Jacobian
#
if jacobian:
if mapsto == 'physical':
#
# Derivative of mapping from refence to physical cell
#
jac = [np.array([[x1-x0],[y1-y0]])]*n
elif mapsto == 'reference':
#
# Derivative of inverse map
#
jac = np.array([[1/(x1-x0), 1/(y1-y0)]])
#
# Compute the Hessian (linear mapping, so Hessian = 0)
#
hess = np.zeros((2,2))
#
# Return output
#
if jacobian and hessian:
return x_trg, jac, hess
elif jacobian and not hessian:
return x_trg, jac
elif hessian and not jacobian:
return x_trg, hess
else:
return x_trg
"""
class Interval(HalfEdge):
"""
Interval Class (1D equivalent of a Cell)
"""
def __init__(self, vertex_left, vertex_right, n_children=2, \
regular=True, parent=None, position=None, forest=None, \
periodic=False):
"""
Constructor
"""
assert vertex_left.dim()==1 and vertex_right.dim()==1, \
'Input "half_edge" should be one dimensional.'
HalfEdge.__init__(self, vertex_left, vertex_right, \
n_children=n_children, regular=regular,\
parent=parent, position=position, forest=forest,\
periodic=periodic)
def get_vertices(self):
"""
Return interval endpoints
"""
return [self.base(), self.head()]
def get_vertex(self, position):
"""
Return a given vertex
"""
assert position in [0,1], 'Position should be 0 or 1.'
return self.base() if position==0 else self.head()
def assign_previous(self, prev):
"""
Assign a previous interval
"""
if prev is not None:
assert isinstance(prev, Interval), \
'Input "prev" should be an Interval.'
HalfEdge.assign_previous(self, prev)
def assign_next(self, nxt):
"""
Assign the next interval
"""
if nxt is not None:
assert isinstance(nxt, Interval), \
'Input "nxt" should be an Interval.'
HalfEdge.assign_next(self,nxt)
def get_neighbor(self, pivot, subforest_flag=None, mode='physical'):
"""
Returns the neighboring interval
Input:
pivot: int, 0 (=left) or 1 (=right) or Vertex
subforest_flag (optional): marker to specify submesh
mode: str, specify the type of neighbor search. When intervals are
arranged within a forest, two adjoining intervals may be on
different refinement levels.
mode='physical': return the interval adjoining input interval
on the mesh
mode='level-wise': return the neighboring interval on the same
level in the forest.
"""
#
# Pivot is a vertex
#
if isinstance(pivot, Vertex):
if pivot==self.base():
pivot = 0
elif pivot==self.head():
pivot = 1
else:
raise Exception('Vertex not an interval endpoint')
if mode=='level-wise':
# =================================================================
# Return Level-wise Neighbor
# =================================================================
if pivot == 0:
#
# Left neighbor
#
nbr = self.previous()
if nbr is None:
#
# No previous, may still be periodic
#
v = self.base()
if v.is_periodic():
#
# Get coarsest cell periodically associated with v
#
for pair in v.get_periodic_pair():
nbr, dummy = pair
while nbr.get_depth()<self.get_depth():
#
# Search children until depth matches
#
if nbr.has_children(flag=subforest_flag):
nbr = nbr.get_child(0)
else:
#
# There are no children at same depth as interval
#
return None
#
# Found nbr at correct depth
#
return nbr
else:
#
# Return previous interval
#
return nbr
elif pivot == 1:
#
# Right neighbor
#
nbr = self.next()
if nbr is None:
#
# No next, may still be periodic
#
v = self.head()
if v.is_periodic():
#
# Get coarsest cell periodically associated with v
#
for pair in v.get_periodic_pair():
nbr, dummy = pair
while nbr.get_depth()<self.get_depth():
#
# Iterate through children until depth matches
#
if nbr.has_children(flag=subforest_flag):
nbr = nbr.get_child(1)
else:
#
# There are no cells matching cell's depth
#
return None
#
# Found nbr at correct depth
#
return nbr
else:
#
# Return next interval
#
return nbr
elif mode=='physical':
# =================================================================
# Return Physical Neighbor
# =================================================================
#
# Move left or right
#
if pivot == 0:
#
# Left neighbor
#
itv = self
prev = itv.previous()
#
# Go up the tree until there is a "previous"
#
while prev is None:
if itv.has_parent():
#
# Go up one level and check
#
itv = itv.get_parent()
prev = itv.previous()
else:
#
# No parent: check whether vertex is periodic
#
if itv.base().is_periodic():
for pair in itv.base().get_periodic_pair():
prev, dummy = pair
else:
return None
#
# Go down tree (to the right) as far as you can
#
nxt = prev
while nxt.has_children(flag=subforest_flag):
nxt = nxt.get_child(nxt.n_children()-1)
return nxt
elif pivot==1:
#
# Right neighbor
#
itv = self
nxt = itv.next()
#
# Go up the tree until there is a "next"
#
while nxt is None:
if itv.has_parent():
#
# Go up one level and check
#
itv = itv.get_parent()
nxt = itv.next()
else:
#
# No parent: check whether vertex is periodic
#
if itv.head().is_periodic():
for nxt, dummy in itv.head().get_periodic_pair():
pass
else:
return None
#
# Go down tree (to the left) as far as you can
#
prev = nxt
while prev.has_children(flag=subforest_flag):
prev = prev.get_child(0)
return prev
def split(self, n_children=None):
"""
Split a given interval into subintervals
"""
#
# Determine interval endpoints
#
x0, = self.base().coordinates()
x1, = self.head().coordinates()
n = self.n_children()
#
# Loop over children
#
for i in range(n):
#
# Determine children base and head Vertices
#
if i==0:
base = self.base()
if i==n-1:
head = self.head()
else:
head = Vertex(x0+(i+1)*(x1-x0)/n)
#
# Define new child interval
#
subinterval = Interval(base, head, parent=self, \
regular=self.is_regular(),\
position=i, n_children=n_children)
#
# Store in children
#
self._children[i] = subinterval
#
# The head of the current subinterval
# becomes the base of the next one
base = subinterval.head()
#
# Assign previous/next
#
for child in self.get_children():
i = child.get_node_position()
#
# Assign previous
#
if i != 0:
# Middle children
child.assign_previous(self.get_child(i-1))
def bin_points(self, points, i_points=None, subforest_flag=None):
"""
Determine the set of smallest subintervals (within submesh) that
contain the set of points, as well as the indices of these.
Inputs:
points: set of points
i_points: indices of these points
subforest_flag: submesh flag
Outputs:
bins: (cell, index) tuples of cells containing subsets of the
points, and the points' indices.
"""
assert all(self.contains_points(points)), \
'Not all points contained in cell'
sf = subforest_flag
# Convert points to array
x = convert_to_array(points)
if i_points is None:
i_points = np.arange(x.shape[0])
bins = []
#
# Cell is not in submesh
#
if not (sf is None or self.is_marked(flag=sf)):
#
# Move up tree until in submesh
#
if self.has_parent():
cell = self.get_parent()
bins.extend(cell.bin_points(x, i_points, subforest_flag=sf))
return bins
#
# Cell in submesh
#
if self.has_children(flag=sf):
#
# Points must be contained in some child cells
#
for child in self.get_children(flag=sf):
in_cell = child.contains_points(x)
if any(in_cell):
# Extract the points in child and bin
y = x[in_cell]
i_y = i_points[in_cell]
c_bin = child.bin_points(y,i_y, subforest_flag=sf)
bins.extend(c_bin)
# Remove points contained in child from list
x = x[~in_cell]
i_points = i_points[~in_cell]
else:
#
# Base case
#
bins.append((self, i_points))
return bins
return bins
def contains_points(self, points):
"""
Determine which of the points in x are contained in the interval.
Inputs:
points: double, collection of 1D points
Outputs:
in_cell: bool, (n_points,) array whose ith entry is True if point i
is contained in interval, False otherwise.
"""
# Get interval enpoints
x0, = self.base().coordinates()
x1, = self.head().coordinates()
# Convert points to (n_points,1) array
x = convert_to_array(points,1)
in_cell = np.ones(x.shape, dtype=bool)
in_cell[x<x0] = False
in_cell[x>x1] = False
return in_cell.ravel()
def reference_map(self, x_in, mapsto='physical',
jac_r2p=False, jac_p2r=False,
hess_r2p=False, hess_p2r=False,
jacobian=False, hessian=False):
"""
Map points x from the reference to the physical Interval or vice versa
Inputs:
x_in: double, (n,) array or a list of points to be mapped
jac_p2r: bool, return jacobian of mapping from physical to
reference domain
jac_r2p: bool, return jacobian of mapping from reference to
physical domain
hess_p2r: bool, return hessian of mapping from physical to
reference domain
hess_r2p: bool, return hessian of mapping from phyical to
reference domain
mapsto: str, 'physical' (map from reference to physical), or
'reference' (map from physical to reference).
Outputs:
x_trg: double, (n,) array of mapped points
mg: dictionary, of jacobians and hessians associated with the
mapping.
jac_p2r: double, n-list of physical-to-reference jacobians
jac_r2p: double, n-list of reference-to-physical jacobians
hess_p2r: double, n-list of physical-to-reference hessians
hess_r2p: double, n-list of reference-to-phyiscal hessians
"""
#
# Convert input to array
#
x_in = convert_to_array(x_in,dim=1)
#
# Compute mapped points
#
n = len(x_in)
x0, = self.get_vertex(0).coordinates()
x1, = self.get_vertex(1).coordinates()
#
# Compute target point
#
if mapsto == 'physical':
x_trg = x0 + (x1-x0)*x_in
elif mapsto == 'reference':
x_trg = (x_in-x0)/(x1-x0)
#
# Compute the Jacobians and Hessians (if asked for)
#
if any([jac_r2p, jac_p2r, hess_r2p, hess_p2r]):
#
# Gradients of the mapping sought
#
# Initialize map gradients (mg) dictionary
mg = {}
if jac_r2p:
#
# Jacobian of mapping from reference to physical region
#
mg['jac_r2p'] = [(x1-x0)]*n
if jac_p2r:
#
# Jacobian of mapping from physical to reference region
#
mg['jac_p2r'] = [1/(x1-x0)]*n
if hess_r2p:
#
# Hessian of mapping from reference to physical region
#
mg['hess_r2p'] = list(np.zeros(n))
if hess_p2r:
#
# Hessian of mappring from physical to reference region
#
mg['hess_p2r'] = list(np.zeros(n))
return x_trg, mg
else:
#
# No gradients of the mapping sought
#
return x_trg
# TODO: Remove whatever is underneath
if jacobian:
if mapsto == 'physical':
#
# Derivative of mapping from refence to physical cell
#
jac = [(x1-x0)]*n
elif mapsto == 'reference':
#
# Derivative of inverse map
#
jac = [1/(x1-x0)]*n
#
# Compute the Hessian (linear mapping, so Hessian = 0)
#
hess = list(np.zeros(n))
#
# Return output
#
if jacobian and hessian:
return x_trg, jac, hess
elif jacobian and not hessian:
return x_trg, jac
elif hessian and not jacobian:
return x_trg, hess
else:
return x_trg
class Cell(Tree):
"""
Cell object: A two dimensional polygon
"""
def __init__(self, half_edges, n_children=0, parent=None, position=None, grid=None):
"""
Constructor
Inputs:
half_edges: HalfEdge, list of half-edges that determine the cell
n_children: int, number of sub-cells within cell
"""
Tree.__init__(self, n_children=n_children, parent=parent, \
position=position, forest=grid)
# =====================================================================
# Half-Edges
# =====================================================================
assert type(half_edges) is list, 'Input "half_edges" should be a list.'
#
# 2D Cells are constructed from lists of HalfEdges
#
for he in half_edges:
assert isinstance(he, HalfEdge), 'Not a HalfEdge.'
self._half_edges = half_edges
for he in self._half_edges:
# Assign self as incident cell
he.assign_cell(self)
#
# String half-edges together
#
n_hes = self.n_half_edges()
for i in range(n_hes):
he_nxt = self._half_edges[(i+1)%n_hes]
he_cur = self._half_edges[i]
he_cur.assign_next(he_nxt)
he_nxt.assign_previous(he_cur)
#
# Check that base of first halfedge coincides with head of last
#
assert half_edges[0].base()==half_edges[-1].head(),\
'HalfEdges should form a closed loop.'
#
# Check winding order
#
self.check_winding_order()
def n_half_edges(self):
"""
Return the number of half_edges
"""
return len(self._half_edges)
def get_half_edge(self, position):
"""
Return specific half_edge
"""
assert position>=0 and position<self.n_half_edges(),\
'Input "position" incompatible with number of HalfEdges'
return self._half_edges[position]
def get_half_edges(self):
"""
Iterate over half-edges
"""
return self._half_edges
def incident_half_edge(self, vertex, reverse=False):
"""
Returns the edge whose head (base) is the given vertex
"""
assert isinstance(vertex, Vertex), \
'Input "vertex" should be of type Vertex.'
for half_edge in self.get_half_edges():
if reverse:
#
# HalfEdge's base coincides with vertex
#
if half_edge.base()==vertex:
return half_edge
else:
#
# HalfEdge's head coincides with vertex
#
if half_edge.head()==vertex:
return half_edge
#
# No such HalfEdge
#
return None
def area(self):
"""
Determine the area of the polygon
"""
area = 0
for half_edge in self.get_half_edges():
x0, y0 = half_edge.base().coordinates()
x1, y1 = half_edge.head().coordinates()
area += (x0+x1)*(y1-y0)
return 0.5*area
def bounding_box(self):
"""
Returns the cell's bounding box in the form of a tuple (x0,x1,y0,y1),
so that the cell is contained in the rectangle [x0,x1]x[y0,y1]
"""
xy = convert_to_array(self.get_vertices(), 2)
x0 = np.min(xy[:,0], axis=0)
x1 = np.max(xy[:,0], axis=0)
y0 = np.min(xy[:,1], axis=0)
y1 = np.max(xy[:,1], axis=0)
return x0, x1, y0, y1
def check_winding_order(self):
"""
Check whether the winding order is correct
"""
winding_error = 'Cell vertices not ordered correctly.'
area = self.area()
assert area > 0, winding_error
def n_vertices(self):
"""
Return the number of vertices
"""
return self.n_half_edges()
def get_vertex(self, position):
"""
Return a specific vertex
"""
assert position < self.n_vertices(), 'Input "position" incorrect.'
half_edge = self.get_half_edge(position)
return half_edge.base()
def get_vertices(self):
"""
Returns the vertices of the current cell.
Outputs:
vertices: list of vertices
"""
return [half_edge.base() for half_edge in self.get_half_edges()]
def get_neighbors(self, pivot, flag=None):
"""
Returns all neighboring cells about a given pivot
Input:
pivot: Vertex/HalfEdge,
- If the pivot is a HalfEdge, then neighbors are cells
containing the twin HalfEdge
- If it's a Vertex, then the neighbors are all cells (of
the "same" size) that contain the vertex
flag: marker - only return neighbors with given marker
Output:
neighbor(s):
- If the pivot is a HalfEdge, then return a Cell/None
- If the pivot is a Vertex, then return a list of Cells
Note: Neighbors are chosen via shared edges, which means
Not OK, Ok + is a neighbor of o, but x is not
----- ----- -------------
| x | | x | | + | |
---*---- ----*---- ----- x
| x | | x | x | | o | |
----- --------- -------------
"""
if isinstance(pivot, HalfEdge):
# =================================================================
# Direction is given by a HalfEdge
# =================================================================
twin = pivot.twin()
if twin is not None:
#
# Halfedge has a twin
#
neighbor = twin.cell()
if flag is not None:
if neighbor.is_marked(flag):
return neighbor
else:
return None
else:
return neighbor
elif isinstance(pivot, Vertex):
# =================================================================
# Direction is determined by a Vertex
# =================================================================
#
# Anti-clockwise
#
neighbors = []
cell = self
while True:
#
# Get neighbor
#
half_edge = cell.incident_half_edge(pivot)
neighbor = cell.get_neighbors(half_edge)
#
# Move on
#
if neighbor is None:
break
elif neighbor==self:
#
# Full rotation or no neighbors
#
return neighbors
else:
#
# Got at neighbor!
#
neighbors.append(neighbor)
cell = neighbor
if pivot.is_periodic() and len(pivot.get_periodic_pair(cell))!=0:
pivot = pivot.get_periodic_pair(cell)[0]
#
# Clockwise
#
neighbors_clockwise = []
cell = self
while True:
#
# Get neighbor
#
half_edge = cell.incident_half_edge(pivot, reverse=True)
neighbor = cell.get_neighbors(half_edge)
#
# Move on
#
if neighbor is None:
break
elif neighbor==self:
#
# Full rotation or no neighbors
#
return neighbors
else:
#
# Got a neighbor
#
neighbors_clockwise.append(neighbor)
cell = neighbor
if pivot.is_periodic() and len(pivot.get_periodic_pair(cell))!=0:
pivot = pivot.get_periodic_pair(cell)[0]
#
# Combine clockwise and anticlockwise neighbors
#
neighbors.extend(reversed(neighbors_clockwise))
if flag is not None:
return [nb for nb in neighbors if nb.is_marked(flag)]
else:
return neighbors
def contains_points(self, points, tol=1e-10):
"""
Determine whether the given cell contains a point
Input:
point: tuple (x,y), list of tuples, or (n,2) array
Output:
in_cell: boolean array (n,1), True if cell contains points,
False otherwise
"""
xy = convert_to_array(points, 2)
x,y = xy[:,0], xy[:,1]
n_points = len(x)
in_cell = np.ones(n_points, dtype=np.bool)
for half_edge in self.get_half_edges():
#
# Traverse vertices in counter-clockwise order
#
x0, y0 = half_edge.base().coordinates()
x1, y1 = half_edge.head().coordinates()
# Determine which points lie outside cell
pos_means_left = (y-y0)*(x1-x0)-( x-x0)*(y1-y0)
in_cell[pos_means_left<-tol] = False
"""
if len(in_cell)==1:
return in_cell[0]
else:
return in_cell
"""
return in_cell
def intersects_line_segment(self, line):
"""
Determine whether cell intersects with a given line segment
Input:
line: double, list of two tuples (x0,y0) and (x1,y1)
Output:
intersects: bool, true if line segment and cell intersect
Modified: 06/04/2016
"""
#
# Check whether line is contained in rectangle
#
if all(self.contains_points([line[0], line[1]])):
return True
#
# Check whether line intersects with any cell half_edge
#
for half_edge in self.get_half_edges():
if half_edge.intersects_line_segment(line):
return True
#
# If function has not terminated yet, there is no intersection
#
return False
class QuadCell(Cell, Tree):
"""
Quadrilateral cell
"""
def __init__(self, half_edges, parent=None, position=None, grid=None):
"""
Constructor
"""
assert len(half_edges)==4, 'QuadCells contain only 4 HalfEdges.'
Cell.__init__(self, half_edges, n_children=4, parent=parent,
position=position, grid=grid)
#
# Check whether cell's parent is a rectangle
#
if self.has_parent():
is_rectangle = self.get_parent().is_rectangle()
elif self.in_forest() and self.get_forest().is_rectangular:
is_rectangle = True
else:
is_rectangle = True
for i in range(4):
he = half_edges[i].to_vector()
he_nxt = half_edges[(i+1)%4].to_vector()
on_axis = min(abs(he)) <1e-12
perpendicular = abs(np.dot(he, he_nxt)) < 1e-12
if not (perpendicular and on_axis):
is_rectangle = False
break
self._is_rectangle = is_rectangle
def is_rectangle(self):
"""
Is the cell a rectangle?
"""
return self._is_rectangle
def split(self, flag=None):
"""
Split QuadCell into 4 subcells (and mark children with flag)
"""
assert not self.has_children(), 'Cell already split.'
#
# Middle Vertex
#
xx = convert_to_array(self.get_vertices())
v_m = Vertex((np.mean(xx[:,0]),np.mean(xx[:,1])))
interior_half_edges = []
for half_edge in self.get_half_edges():
#
# Split each half_edge
#
if not half_edge.has_children():
half_edge.split()
#
# Form new HalfEdges to and from the center
#
h_edge_up = HalfEdge(half_edge.get_child(0).head(),v_m)
h_edge_down = h_edge_up.make_twin()
# Add to list
interior_half_edges.append([h_edge_up, h_edge_down])
#
# Form new cells using new half_edges
#
i = 0
for half_edge in self.get_half_edges():
#
# Define Child's HalfEdges
#
h1 = half_edge.get_child(0)
h2 = interior_half_edges[i][0]
h3 = interior_half_edges[(i-1)%self.n_half_edges()][1]
h4 = half_edge.previous().get_child(1)
hes = deque([h1, h2, h3, h4])
hes.rotate(i)
hes = list(hes)
#
# Define new QuadCell
#
self._children[i] = QuadCell(hes, parent=self, position=i)
# Increment counter
i += 1
if flag is not None:
for child in self.get_children():
child.mark(flag)
#
# Pair up periodic vertices
#
for half_edge in self.get_half_edges():
for he_child in half_edge.get_children():
if he_child.is_periodic() and he_child.twin() is not None:
he_child.pair_periodic_vertices()
def bin_points(self, points, i_points=None, subforest_flag=None):
"""
Returns a list of the smallest flagged subcells in containing at least
one point, together with the indices of the included points
Inputs:
points: points in cell, to be categorized
i_points: point indices (if contained within a larger array).
subforest_flag: submesh indicator
Outputs:
bins: list of (cell, i_points) pairs enumerating all cells
that contain points, and the indices of these.
"""
#
# Check that cell contains points
#
assert all(self.contains_points(points)), \
'Not all points contained in cell'
sf = subforest_flag
# Convert points to array
x = convert_to_array(points)
if i_points is None:
i_points = np.arange(x.shape[0])
bins = []
#
# Cell is not in submesh
#
if not (sf is None or self.is_marked(flag=sf)):
#
# Move up tree until in submesh
#
if self.has_parent():
cell = self.get_parent()
bins.extend(cell.bin_points(x, i_points, subforest_flag=sf))
return bins
#
# Cell is in submesh
#
if self.has_children(flag=sf):
#
# Points must be contained in some child cells
#
for child in self.get_children(flag=sf):
in_cell = child.contains_points(x)
if any(in_cell):
# Extract the points in child and bin
y = x[in_cell]
i_y = i_points[in_cell]
c_bin = child.bin_points(y,i_y, subforest_flag=sf)
bins.extend(c_bin)
# Remove points contained in child from list
x = x[~in_cell]
i_points = i_points[~in_cell]
else:
#
# Base case
#
bins.append((self, i_points))
return bins
return bins
def reference_map(self, x_in, mapsto='physical',
jac_p2r=False, jac_r2p=False,
hess_p2r=False, hess_r2p=False,
jacobian=False, hessian=False):
"""
Bilinear map between reference cell [0,1]^2 and physical QuadCell
Inputs:
x_in: double, (n,) array or a list of points.
jac_p2r: bool, return jacobian of mapping from physical to
reference domain
jac_r2p: bool, return jacobian of mapping from reference to
physical domain
hess_p2r: bool, return hessian of mapping from physical to
reference domain
hess_r2p: bool, return hessian of mapping from phyical to
reference domain
mapsto: str, 'physical' (map from reference to physical), or
'reference' (map from physical to reference).
Outputs:
x_trg: double, (n,) array of mapped points
mg: dictionary, of jacobians and hessians associated with the
mapping.
jac_p2r: double, n-list of (2,2) physical-to-reference
jacobians.
jac_r2p: double, n-list of (2,2) reference-to-physical
jacobians.
hess_p2r: double, n-list of (2,2,2) physical-to-reference
hessians.
hess_r2p: double, n-list of (2,2,2) reference-to-phyiscal
hessians.
"""
#
# Convert input to array
#
x_in = convert_to_array(x_in, dim=2)
n = x_in.shape[0]
assert x_in.shape[1]==2, 'Input "x" has incorrect dimension.'
#
# Get cell corner vertices
#
x_verts = convert_to_array(self.get_vertices())
p_sw_x, p_sw_y = x_verts[0,:]
p_se_x, p_se_y = x_verts[1,:]
p_ne_x, p_ne_y = x_verts[2,:]
p_nw_x, p_nw_y = x_verts[3,:]
if mapsto=='physical':
#
# Map points from [0,1]^2 to the physical cell, using bilinear
# nodal basis functions
#
# Points in reference domain
s, t = x_in[:,0], x_in[:,1]
# Mapped points
x = p_sw_x*(1-s)*(1-t) + p_se_x*s*(1-t) +\
p_ne_x*s*t + p_nw_x*(1-s)*t
y = p_sw_y*(1-s)*(1-t) + p_se_y*s*(1-t) +\
p_ne_y*s*t + p_nw_y*(1-s)*t
# Store points in an array
x_trg = np.array([x,y]).T
elif mapsto=='reference':
#
# Map from physical- to reference domain using Newton iteration
#
# Points in physical domain
x, y = x_in[:,0], x_in[:,1]
if self.is_rectangle():
#
# Cell is a rectangle - the inverse mapping is explicit
#
s = (x-p_sw_x)/(p_se_x-p_sw_x)
t = (y-p_sw_y)/(p_nw_y-p_sw_y)
x_trg = np.array([s,t]).T
else:
#
# Cell is quadrilateral - the inverse mapping must be estimated
#
# Initialize points in reference domain
s, t = 0.5*np.ones(n), 0.5*np.ones(n)
n_iterations = 5
for dummy in range(n_iterations):
#
# Compute residual
#
rx = p_sw_x*(1-s)*(1-t) + p_se_x*s*(1-t) \
+ p_ne_x*s*t + p_nw_x*(1-s)*t - x
ry = p_sw_y*(1-s)*(1-t) + p_se_y*s*(1-t) \
+ p_ne_y*s*t + p_nw_y*(1-s)*t - y
#
# Compute jacobian
#
drx_ds = -p_sw_x*(1-t) + p_se_x*(1-t) + p_ne_x*t - p_nw_x*t # J11
dry_ds = -p_sw_y*(1-t) + p_se_y*(1-t) + p_ne_y*t - p_nw_y*t # J21
drx_dt = -p_sw_x*(1-s) - p_se_x*s + p_ne_x*s + p_nw_x*(1-s) # J12
dry_dt = -p_sw_y*(1-s) - p_se_y*s + p_ne_y*s + p_nw_y*(1-s) # J22
#
# Newton Update:
#
Det = drx_ds*dry_dt - drx_dt*dry_ds
s -= ( dry_dt*rx - drx_dt*ry)/Det
t -= (-dry_ds*rx + drx_ds*ry)/Det
#
# Project onto [0,1]^2
#
s = np.minimum(np.maximum(s,0),1)
t = np.minimum(np.maximum(t,0),1)
x_trg = np.array([s,t]).T
#
# Compute the Jacobians and Hessians (if asked for)
#
if any([jac_r2p, jac_p2r, hess_r2p, hess_p2r]):
#
# Gradients of the mapping sought
#
# Initialize map gradients (mg) dictionary
mg = {}
if jac_r2p or jac_p2r:
#
# Compute Jacobian of the forward mapping
#
xs = -p_sw_x*(1-t) + p_se_x*(1-t) + p_ne_x*t - p_nw_x*t # J11
ys = -p_sw_y*(1-t) + p_se_y*(1-t) + p_ne_y*t - p_nw_y*t # J21
xt = -p_sw_x*(1-s) - p_se_x*s + p_ne_x*s + p_nw_x*(1-s) # J12
yt = -p_sw_y*(1-s) - p_se_y*s + p_ne_y*s + p_nw_y*(1-s) # J22
if jac_r2p:
#
# Jacobian of mapping from reference to physical region
#
mg['jac_r2p'] = [np.array([[xs[i], xt[i]], [ys[i], yt[i]]])\
for i in range(n)]
if jac_p2r:
#
# Jacobian of mapping from physical to reference region
#
# Compute matrix inverse of jacobian for backward mapping
Det = xs*yt-xt*ys
sx = yt/Det
sy = -xt/Det
tx = -ys/Det
ty = xs/Det
mg['jac_p2r'] = [np.array([[sx[i], sy[i]],[tx[i], ty[i]]])\
for i in range(n)]
if hess_r2p:
#
# Hessian of mapping from reference to physical region
#
if self.is_rectangle():
# Linear mapping (no curvature)
hr2p = [np.zeros((2,2,2)) for dummy in range(n)]
else:
hr2p = []
# Nonlinear mapping: compute curvature for each point
for i in range(n):
h = np.zeros((2,2,2))
xts = p_sw_x - p_se_x + p_ne_x - p_nw_x
yts = p_sw_y - p_se_y + p_ne_y - p_nw_y
h[:,:,0] = np.array([[0, xts], [xts, 0]])
h[:,:,1] = np.array([[0, yts], [yts, 0]])
hr2p.append(h)
# Store result
mg['hess_r2p'] = hr2p
if hess_p2r:
#
# Hessian of mapping from physical to reference region
#
if self.is_rectangle():
# Linear mapping (no curvature)
hp2r = [np.zeros((2,2,2)) for dummy in range(n)]
else:
# Nonlinear mapping: compute curvature for each point
hp2r = []
Dx = p_sw_x - p_se_x + p_ne_x - p_nw_x
Dy = p_sw_y - p_se_y + p_ne_y - p_nw_y
dxt_dx = Dx*sx
dxt_dy = Dx*sy
dyt_dx = Dy*sx
dyt_dy = Dy*sy
dxs_dx = Dx*tx
dxs_dy = Dx*ty
dys_dx = Dy*tx
dys_dy = Dy*ty
dDet_dx = dxs_dx*yt + dyt_dx*xs - dys_dx*xt - dxt_dx*ys
dDet_dy = dxs_dy*yt + dyt_dy*xs - dys_dy*xt - dxt_dy*ys
sxx = dyt_dx/Det - yt*dDet_dx/Det**2
sxy = dyt_dy/Det - yt*dDet_dy/Det**2
syy = -dxt_dy/Det + xt*dDet_dy/Det**2
txx = -dys_dx/Det + ys*dDet_dx/Det**2
txy = -dys_dy/Det + ys*dDet_dy/Det**2
tyy = dxs_dy/Det - xs*dDet_dy/Det**2
for i in range(n):
h = np.zeros((2,2,2))
h[:,:,0] = np.array([[sxx[i], sxy[i]],
[sxy[i], syy[i]]])
h[:,:,1] = np.array([[txx[i], txy[i]],
[txy[i], tyy[i]]])
hp2r.append(h)
# Store result
mg['hess_p2r'] = hp2r
#
# Return points and gradients
#
return x_trg, mg
else:
#
# No gradients of the mapping sought
#
return x_trg
class RVertex(Vertex):
"""
Vertex on the reference cell
"""
def __init__(self, coordinates):
"""
Constructor
"""
Vertex.__init__(self, coordinates)
self.__pos = {0: None, 1: {0: None, 1: None, 2: None, 3: None}}
self.__basis_index = None
def set_pos(self, pos, level=0, child=None):
"""
Set the position of the Dof Vertex
Inputs:
pos: int, a number not exceeding the element's number of dofs
level: int in {0,1}, number specifying the refinement level
( 0 = coarse, 1 = fine ).
child: int in {0,1,2,3}, number specifying the child cell
"""
assert level in [0,1], 'Level should be either 0 or 1.'
if level==0:
self.__pos[level] = pos
if level==1:
assert child in [0,1,2,3], 'Level=1. Child should be specified.'
self.__pos[level][child] = pos
def get_pos(self, level, child=None, debug=False):
"""
Return the dof vertex's position at a given level for a given child
"""
if debug:
print(self.__pos)
if level==1:
assert child is not None, 'On fine level, child must be specified.'
return self.__pos[level][child]
else:
return self.__pos[level]
def set_basis_index(self, idx):
self.__basis_index = idx
class RHalfEdge(HalfEdge):
"""
HalfEdge for reference element
"""
def __init__(self, base, head, dofs_per_edge,
parent=None, position=None, twin=None):
"""
Constructor
"""
HalfEdge.__init__(self, base, head, parent=parent, \
position=position, twin=twin)
#
# Assign edge dof vertices
#
self.__dofs_per_edge = dofs_per_edge
self.assign_edge_dof_vertices()
def get_edge_dof_vertices(self, pos=None):
"""
Returns all dof vertices associated with HalfEdge
"""
if pos is None:
return self.__edge_dof_vertices
else:
return self.__edge_dof_vertices[pos]
def assign_edge_dof_vertices(self):
if self.twin() is not None:
#
# Use RHalfEdge's twin's dof vertices
#
assert isinstance(self.twin(),RHalfEdge), \
'Twin should also be an RHalfEdge'
edge_dofs = self.twin().get_edge_dof_vertices()
edge_dofs.reverse()
else:
#
# Make new dof Vertices
#
dofs_per_edge = self.n_dofs()
x0, y0 = self.base().coordinates()
x1, y1 = self.head().coordinates()
edge_dofs = []
if dofs_per_edge!=0:
h = 1/(dofs_per_edge+1)
for i in range(dofs_per_edge):
#
# Compute coordinates for dof vertex
#
t = (i+1)*h
x = x0 + t*(x1-x0)
y = y0 + t*(y1-y0)
v = RVertex((x,y))
if self.has_parent():
#
# Check if vertex already exists
#
for v_p in self.get_parent().get_edge_dof_vertices():
if np.allclose(v.coordinates(),v_p.coordinates()):
v = v_p
edge_dofs.append(v)
#
# Store edge dof vertices
#
self.__edge_dof_vertices = edge_dofs
def make_twin(self):
"""
Returns the twin RHalfEdge
"""
return RHalfEdge(self.head(), self.base(), self.n_dofs(), twin=self)
def n_dofs(self):
"""
Returns the number of dofs associated with the HalfEdge
"""
return self.__dofs_per_edge
def split(self):
"""
Refine current half-edge (overwrite Tree.split)
"""
#
# Compute new midpoint vertex
#
x = convert_to_array([self.base().coordinates(),\
self.head().coordinates()])
xm = 0.5*(x[0,:]+x[1,:])
vm = RVertex(tuple(xm))
for v in self.get_edge_dof_vertices():
if np.allclose(vm.coordinates(), v.coordinates()):
vm = v
#
# Define own children independently of neighbor
#
c0 = RHalfEdge(self.base(), vm, self.n_dofs(), parent=self, position=0)
c1 = RHalfEdge(vm, self.head(), self.n_dofs(), parent=self, position=1)
#
# Save the babies
#
self._children[0] = c0
self._children[1] = c1
class RQuadCell(QuadCell):
"""
Quadrilateral Reference Cell
"""
def __init__(self, element, half_edges=None, parent=None, position=None):
"""
Constructor
"""
#
# Check if the element is correct
#
self.element = element
# Extract numbers of degrees of freedom
dofs_per_vertex = element.n_dofs('vertex')
assert dofs_per_vertex<=1, \
'Only elements with at most one dof per vertex supported'
#
# Determine Cell's RHalfEdges
#
if parent is None:
#
# Corner Vertices
#
vertices = [RVertex((0,0)), RVertex((1,0)),
RVertex((1,1)), RVertex((0,1))]
#
# Reference HalfEdges
#
dofs_per_edge = element.n_dofs('edge')
half_edges = []
for i in range(4):
he = RHalfEdge(vertices[i], vertices[(i+1)%4], dofs_per_edge)
half_edges.append(he)
else:
assert half_edges is not None, 'Cell has parent. Specify RefHalfEdges.'
# Define Quadcell
QuadCell.__init__(self, half_edges, parent=parent, position=position)
#
# Assign cell dof vertices
#
self.assign_cell_dof_vertices()
if not self.has_parent():
#
# Assign positions on coarse level
#
self.assign_dof_positions(0)
#
# Split
#
self.split()
#
# Assign positions
#
self.assign_dof_positions(1)
def split(self):
"""
Split refQuadCell into 4 subcells
"""
assert not self.has_children(), 'Cell already split.'
#
# Middle Vertex
#
xx = convert_to_array(self.get_vertices())
v_m = RVertex((np.mean(xx[:,0]),np.mean(xx[:,1])))
# Check if this vertex is contained in cell
for v_p in self.get_dof_vertices():
if np.allclose(v_m.coordinates(), v_p.coordinates()):
# Vertex already exists
v_m = v_p
break
dofs_per_edge = self.element.n_dofs('edge')
interior_half_edges = []
for half_edge in self.get_half_edges():
#
# Split each half_edge
#
if not half_edge.has_children():
half_edge.split()
#
# Form new HalfEdges to and from the center
#
h_edge_up = RHalfEdge(half_edge.get_child(0).head(),v_m, dofs_per_edge)
h_edge_down = h_edge_up.make_twin()
# Add to list
interior_half_edges.append([h_edge_up, h_edge_down])
#
# Form new cells using new half_edges
#
i = 0
for half_edge in self.get_half_edges():
#
# Define Child's HalfEdges
# key
h1 = half_edge.get_child(0)
h2 = interior_half_edges[i][0]
h3 = interior_half_edges[(i-1)%self.n_half_edges()][1]
h4 = half_edge.previous().get_child(1)
hes = deque([h1, h2, h3, h4])
hes.rotate(i)
hes = list(hes)
#hes = [h1, h2, h3, h4]
#
# Define new QuadCell
#
self._children[i] = RQuadCell(self.element, hes, parent=self, position=i)
# Increment counter
i += 1
def assign_cell_dof_vertices(self):
"""
Assign interior dof vertices to cell
"""
dofs_per_cell = self.element.n_dofs('cell')
cell_dofs = []
if dofs_per_cell!=0:
n = int(np.sqrt(dofs_per_cell)) # number of dofs per direction
x0, x1, y0, y1 = self.bounding_box()
h = 1/(n+1) # subcell width
for i in range(n): # y-coordinates
for j in range(n): # x-coordinates
#
# Compute new Vertex
#
v_c = RVertex((x0+(j+1)*h*(x1-x0),y0+(i+1)*h*(y1-y0)))
#
# Check if vertex exists within parent cell
#
inherits_dof_vertex = False
if self.has_parent():
for v_p in self.get_parent().get_cell_dof_vertices():
if np.allclose(v_c.coordinates(), v_p.coordinates()):
cell_dofs.append(v_p)
inherits_dof_vertex = True
break
if not inherits_dof_vertex:
cell_dofs.append(v_c)
self.__cell_dof_vertices = cell_dofs
def get_cell_dof_vertices(self, pos=None):
"""
Return the interior dof vertices
"""
if pos is None:
return self.__cell_dof_vertices
else:
return self.__cell_dof_vertices[pos]
def assign_dof_positions(self, level):
"""
"""
if level==0:
#
# Level 0: Assign positions to vertices on coarse level
#
self.__dof_vertices = {0: [], 1: {0: [], 1: [], 2: [], 3: []}}
count = 0
# Corner dof vertices
for vertex in self.get_vertices():
if self.element.n_dofs('vertex')!=0:
vertex.set_pos(count, level)
self.__dof_vertices[level].append(vertex)
count += 1
# HalfEdge dof vertices
for half_edge in self.get_half_edges():
for vertex in half_edge.get_edge_dof_vertices():
vertex.set_pos(count, level)
self.__dof_vertices[level].append(vertex)
count += 1
# Cell dof vertices
for vertex in self.get_cell_dof_vertices():
vertex.set_pos(count, level)
self.__dof_vertices[level].append(vertex)
count += 1
elif level==1:
#
# Assign positions to child vertices
#
coarse_dofs = [i for i in range(self.element.n_dofs())]
for i_child in range(4):
#
# Add all dof vertices to one list
#
child = self.get_child(i_child)
child_dof_vertices = []
# Dofs at Corners
for vertex in child.get_vertices():
if self.element.n_dofs('vertex')!=0:
child_dof_vertices.append(vertex)
# Dofs on HalfEdges
for half_edge in child.get_half_edges():
for vertex in half_edge.get_edge_dof_vertices():
child_dof_vertices.append(vertex)
# Dofs in Cell
for vertex in child.get_cell_dof_vertices():
child_dof_vertices.append(vertex)
count = 0
for vertex in child_dof_vertices:
if not self.element.torn_element():
#
# Continuous Element (Dof Vertex can be inherited multiple times)
#
vertex.set_pos(count, level=level, child=i_child)
self.__dof_vertices[level][i_child].append(vertex)
count += 1
else:
#
# Discontinuous Element (Dof Vertex can be inherited once)
#
if vertex in self.__dof_vertices[0]:
i_vertex = self.__dof_vertices[0].index(vertex)
if i_vertex in coarse_dofs:
#
# Use vertex within child cell
#
vertex.set_pos(count, level=level, child=i_child)
self.__dof_vertices[level][i_child].append(vertex)
count += 1
# Delete the entry (preventing reuse).
coarse_dofs.pop(coarse_dofs.index(i_vertex))
else:
#
# Vertex has already been used, make a new one
#
vcopy = RVertex(vertex.coordinates())
vcopy.set_pos(count, level=level, child=i_child)
self.__dof_vertices[level][i_child].append(vcopy)
count += 1
else:
#
# Not contained in coarse vertex set
#
vertex.set_pos(count, level=level, child=i_child)
self.__dof_vertices[level][i_child].append(vertex)
count += 1
def get_dof_vertices(self, level=0, child=None, pos=None):
"""
Returns all dof vertices in cell
"""
if level==0:
return self.__dof_vertices[0]
elif level==1:
assert child is not None, 'On level 1, child must be specified.'
if pos is None:
return self.__dof_vertices[1][child]
else:
return self.__dof_vertices[1][child][pos]
class RInterval(Interval):
def __init__(self, element, base=None, head=None,
parent=None, position=None):
"""
Constructor
"""
assert element.dim()==1, 'Element must be one dimensional'
self.element = element
if parent is None:
base = RVertex(0)
head = RVertex(1)
else:
assert isinstance(head, RVertex), 'Input "head" must be an RVertex.'
assert isinstance(base, RVertex), 'Input "base" must be an RVertex.'
Interval.__init__(self, base, head, parent=parent, position=position)
#
# Assign cell dof vertices
#
self.assign_cell_dof_vertices()
if not self.has_parent():
#
# Assign positions on coarse level
#
self.assign_dof_positions(0)
#
# Split
#
self.split()
#
# Assign positions
#
self.assign_dof_positions(1)
def split(self):
"""
Split a given interval into 2 subintervals
"""
#
# Determine interval endpoints
#
x0, = self.base().coordinates()
x1, = self.head().coordinates()
n = self.n_children()
#
# Loop over children
#
for i in range(n):
#
# Determine children base and head Vertices
#
if i==0:
base = self.base()
if i==n-1:
head = self.head()
else:
head = RVertex(x0+(i+1)*(x1-x0)/n)
#
# Check whether Vertex appears in parent
#
for v_p in self.get_dof_vertices():
if np.allclose(head.coordinates(), v_p.coordinates()):
head = v_p
#
# Define new child interval
#
subinterval = RInterval(self.element, base, head, \
parent=self, position=i)
#
# Store in children
#
self._children[i] = subinterval
#
# The head of the current subinterval
# becomes the base of the next one
base = subinterval.head()
#
# Assign previous/next
#
for child in self.get_children():
i = child.get_node_position()
#
# Assign previous
#
if i==0:
# Leftmost child assign own previous
child.assign_previous(self.previous())
else:
# Child in the middle
#print(child.get_node_position(), child.base().coordinates())
#print(self.get_child(i-1).get_node_position(), child.base().coordinates())
child.assign_previous(self.get_child(i-1))
#
# Assign next
#
if i==n-1:
# Rightmost child, assign own right
child.assign_next(self.next())
def assign_cell_dof_vertices(self):
dofs_per_cell = self.element.n_dofs('edge')
cell_dofs = []
if dofs_per_cell !=0:
#
# Compute coordinates for cell dof vertices
#
x0, = self.base().coordinates()
x1, = self.head().coordinates()
h = 1/(dofs_per_cell+1)
for i in range(dofs_per_cell):
x = x0 + (i+1)*h*(x1-x0)
v_c = RVertex(x)
#
# Check if vertex exists within parent cell
#
inherits_dof_vertex = False
if self.has_parent():
for v_p in self.get_parent().get_cell_dof_vertices():
if np.allclose(v_c.coordinates(), v_p.coordinates()):
cell_dofs.append(v_p)
inherits_dof_vertex = True
break
if not inherits_dof_vertex:
cell_dofs.append(v_c)
self.__cell_dof_vertices = cell_dofs
def get_cell_dof_vertices(self, pos=None):
"""
Returns the Dofs associated with the interior of the cell
Note: This function is only used during construction
"""
if pos is None:
return self.__cell_dof_vertices
else:
return self.__cell_dof_vertices[pos]
def get_dof_vertices(self, level=0, child=None, pos=None):
"""
Returns all dof vertices in cell
Inputs:
level: int 0/1, 0=coarse, 1=fine
child: int, child node position within parent (0/1)
pos: int, 0,...n_dofs-1, dof number within cell
"""
if level==0:
return self.__dof_vertices[0]
elif level==1:
assert child is not None, 'On level 1, child must be specified.'
if pos is None:
return self.__dof_vertices[1][child]
else:
return self.__dof_vertices[1][child][pos]
def assign_dof_positions(self, level):
"""
Assigns a number to each dof vertex in the interval.
Note: We only deal with bisection
"""
if level==0:
#
# Level 0: Assign position to vertices on coarse level
#
self.__dof_vertices = {0: [], 1: {0: [], 1: []}}
count = 0
#
# Add endpoints
#
dpv = self.element.n_dofs('vertex')
if dpv != 0:
for vertex in self.get_vertices():
vertex.set_pos(count, level)
self.__dof_vertices[level].append(vertex)
count += 1
#
# Add cell dof vertices
#
for vertex in self.get_cell_dof_vertices():
vertex.set_pos(count, level)
self.__dof_vertices[level].append(vertex)
count += 1
elif level==1:
#
# Assign positions to child vertices
#
coarse_dofs = [i for i in range(self.element.n_dofs())]
for i_child in range(2):
#
# Add all dof vertices to a list
#
child = self.get_child(i_child)
child_dof_vertices = []
# Dofs at corners
for vertex in child.get_vertices():
if self.element.n_dofs('vertex')!=0:
child_dof_vertices.append(vertex)
# Dofs in Interval
for vertex in child.get_cell_dof_vertices():
child_dof_vertices.append(vertex)
#
# Inspect each vertex in the child, to see
# whether it is duplicated in the parent.
#
count = 0
for vertex in child_dof_vertices:
if not self.element.torn_element():
#
# Continuous Element (Dof Vertex can be inherited multiple times)
#
vertex.set_pos(count, level=level, child=i_child)
self.__dof_vertices[level][i_child].append(vertex)
count += 1
else:
#
# Discontinuous Element (Dof Vertex can be inherited once)
#
if vertex in self.__dof_vertices[0]:
i_vertex = self.__dof_vertices[0].index(vertex)
if i_vertex in coarse_dofs:
#
# Use vertex within child cell
#
vertex.set_pos(count, level=level, child=i_child)
self.__dof_vertices[level][i_child].append(vertex)
count += 1
# Delete the entry (preventing reuse).
coarse_dofs.pop(coarse_dofs.index(i_vertex))
else:
#
# Vertex has already been used, make a new one
#
vcopy = RVertex(vertex.coordinates())
vcopy.set_pos(count, level=level, child=i_child)
self.__dof_vertices[level][i_child].append(vcopy)
count += 1
else:
#
# Not contained in coarse vertex set
#
vertex.set_pos(count, level=level, child=i_child)
self.__dof_vertices[level][i_child].append(vertex)
count += 1
'''
class Mesh(object):
"""
Mesh class, consisting of a grid (a doubly connected edge list), as well
as a list of root cells, -half-edges and vertices.
Attributes:
Methods:
"""
def __init__(self, grid):
"""
Constructor
class Mesh(object):
"""
Mesh class, consisting of a grid (a doubly connected edge list), as well
as a list of root cells, -half-edges and vertices.
Attributes:
Methods:
"""
def __init__(self, grid):
"""
Constructor
Inputs:
grid: DCEL object, doubly connected edge list specifying
the mesh topology.
"""
self.__grid = grid
# =====================================================================
# Vertices
# =====================================================================
n_vertices = grid.points['n']
vertices = []
for i in range(n_vertices):
vertices.append(Vertex(grid.points['coordinates'][i]))
# =====================================================================
# Half-edges
# =====================================================================
n_he = grid.half_edges['n']
#
# Define Half-Edges via base and head vertices
#
half_edges = []
for i in range(n_he):
i_base, i_head = grid.half_edges['connectivity'][i]
v_base = grid.points['coordinates'][i_base]
v_head = grid.points['coordinates'][i_head]
half_edges.append(HalfEdge(Vertex(v_base), Vertex(v_head)))
#
# Specify relations among Half-Edges
#
for i in range(n_he):
he = half_edges[i]
i_prev = grid.half_edges['prev'][i]
i_next = grid.half_edges['next'][i]
i_twin = grid.half_edges['twin'][i]
he.assign_next(half_edges[i_next])
he.assign_prev(half_edges[i_prev])
if i_twin != -1:
he.assign_twin(half_edges[i_twin])
# =====================================================================
# Cells
# =====================================================================
n_faces = grid.faces['n']
cells = []
for i in range(n_faces):
cell_type = grid.faces['type'][i]
if cell_type == 'interval':
cell = BCell()
pass
elif cell_type == 'triangle':
#cell = TriCell()
pass
elif cell_type == 'quadrilateral':
cell = QuadCell()
else:
unknown_cell_type = 'Unknown cell type. Use "interval", '+\
'"triangle", or "quadrilateral".'
raise Exception(unknown_cell_type)
cells.append(cell)
if grid is not None:
#
# grid specified
#
#assert all(i is None for i in [node, cell, dim]),\
#'Grid specified: All other inputs should be None.'
#
# ROOT node
#
dim = grid.dim()
if dim == 1:
node = BiNode(grid=grid)
elif dim == 2:
node = QuadNode(grid=grid)
else:
raise Exception('Only dimensions 1 and 2 supported.')
#
# Cells
#
node.split()
for pos in node._child_positions:
#
# ROOT cells
#
if dim == 1:
cell = BiCell(grid=grid, position=pos)
elif dim == 2:
cell = QuadCell(grid=grid, position=pos)
child = node.children[pos]
child.link(cell)
#
# Mark nodes, edges, and vertices
#
elif cell is not None:
#
# Cell specified
#
assert all(i is None for i in [node, grid, dim]),\
'Cell specified: All other inputs should be None.'
#
# ROOT node linked to cell
#
dim = cell.dim()
if dim == 1:
node = BiNode(bicell=cell)
elif dim == 2:
node = QuadNode(quadcell=cell)
else:
raise Exception('Only dimensions 1 and 2 supported.')
elif node is not None:
#
# Tree specified
#
assert all(i is None for i in [cell, grid, dim]),\
'Tree specified: All other inputs should be None.'
#
# Default cell
#
dim = node.dim()
if dim == 1:
cnr_vtcs = [0,1]
cell = BiCell(corner_vertices=cnr_vtcs)
elif dim == 2:
cnr_vtcs = [0,1,0,1]
cell = QuadCell(corner_vertices=cnr_vtcs)
node.link(cell)
elif dim is not None:
#
# Dimension specified
#
assert all(i is None for i in [node, cell, grid]),\
'Dimension specified: All other inputs should be None.'
#
# Default cell
#
if dim == 1:
cnr_vtcs = [0,1]
cell = BiCell(corner_vertices=cnr_vtcs)
elif dim == 2:
cnr_vtcs = [0,1,0,1]
cell = QuadCell(corner_vertices=cnr_vtcs)
#
# Default node, linked to cell
#
if dim == 1:
node = BiNode(bicell=cell)
elif dim==2:
node = QuadNode(quadcell=cell)
else:
raise Exception('Only dimensions 1 or 2 supported.')
else:
#
# Default cell
#
cnr_vtcs = [0,1,0,1]
cell = QuadCell(corner_vertices=cnr_vtcs)
node = QuadNode(quadcell=cell)
dim = 2
self.__root_node = node
self.grid = grid
self.__mesh_count = 0
self.__dim = dim
def dim(self):
"""
Return the spatial dimension of the region
"""
return self.__dim
def depth(self):
"""
Return the maximum refinement level
"""
return self.root_node().tree_depth()
def n_nodes(self, flag=None):
"""
Return the number of cells
"""
if hasattr(self, '__n_cells'):
return self.__n_cells
else:
self.__n_cells = len(self.__root_node.get_leaves(flag=flag))
return self.__n_cells
def root_node(self):
"""
Return tree node used for mesh
"""
return self.__root_node
def boundary(self, entity, flag=None):
"""
Returns a set of all boundary entities (vertices/edges)
Input:
entity: str, 'vertices', 'edges', or 'quadcells'
flag:
TODO: Add support for tricells
"""
boundary = set()
print(entity)
print(len(boundary))
for node in self.root_node().get_leaves(flag=flag):
cell = node.cell()
for direction in ['W','E','S','N']:
#
# Look in 4 directions
#
if node.get_neighbor(direction) is None:
if entity=='quadcells':
boundary.add(cell)
break
edge = cell.get_edges(direction)
if entity=='edges':
boundary.add(edge)
if entity=='vertices':
for v in edge.vertices():
boundary.add(np.array(v.coordinates()))
return boundary
def bounding_box(self):
"""
Returns the mesh's bounding box
Output:
box: double, [x_min, x_max, y_min, y_max] if mesh is 2d
and [x_min, x_max] if mesh is 1d.
"""
root = self.root_node()
if root.grid is not None:
#
# DCEL on coarsest level
#
grid = root.grid
if self.dim() == 1:
x_min, x_max = grid.points['coordinates'][[0,-1]]
return [x_min, x_max]
elif self.dim() == 2:
#
# Determine bounding box from boundary points
#
i_vbnd = grid.get_boundary_points()
v_bnd = []
for k in i_vbnd:
v_bnd.append( \
grid.points['coordinates'][i_vbnd[k]].coordinates())
v_bnd = np.array(v_bnd)
x_min, x_max = v_bnd[:,0].min(), v_bnd[:,0].max()
y_min, y_max = v_bnd[:,1].min(), v_bnd[:,1].max()
return [x_min, x_max, y_min, y_max]
else:
#
# No DCEL: Use Cell
#
cell = root.cell()
if cell.dim()==1:
x_min, x_max = cell.get_vertices(pos='corners', as_array=True)
return [x_min, x_max]
elif cell.dim()==2:
vbnd = cell.get_vertices(pos='corners', as_array=True)
x_min, x_max = vbnd[:,0].min(), vbnd[:,0].max()
y_min, y_max = vbnd[:,1].min(), vbnd[:,1].max()
return [x_min, x_max, y_min, y_max]
else:
raise Exception('Only 1D and 2D supported.')
def unmark_all(self, flag=None, nodes=False, cells=False, edges=False,
vertices=False, all_entities=False):
"""
Unmark all nodes, cells, edges, or vertices.
"""
if all_entities:
#
# Unmark everything
#
nodes = True
cells = True
edges = True
vertices = True
for node in self.root_node().traverse():
if nodes:
#
# Unmark node
#
node.unmark(flag=flag, recursive=True)
if cells:
#
# Unmark quad cell
#
node.cell().unmark(flag=flag, recursive=True)
if edges:
#
# Unmark quad edges
#
for edge in node.cell().edges.values():
edge.unmark(flag=flag)
if vertices:
#
# Unmark quad vertices
#
for vertex in node.cell().vertices.values():
vertex.unmark(flag=flag)
def iter_quadedges(self, flag=None, nested=False):
"""
Iterate over cell edges
Output:
quadedge_list, list of all active cell edges
"""
quadedge_list = []
#
# Unmark all edges
#
self.unmark_all(quadedges=True)
for cell in self.iter_quadcells(flag=flag, nested=nested):
for edge_key in [('NW','SW'),('SE','NE'),('SW','SE'),('NE','NW')]:
edge = cell.edges[edge_key]
if not(edge.is_marked()):
#
# New edge: add it to the list
#
quadedge_list.append(edge)
edge.mark()
#
# Unmark all edges again
#
self.unmark_all(quadedges=True)
return quadedge_list
def quadvertices(self, coordinate_array=True, flag=None, nested=False):
"""
Iterate over quad cell vertices
Inputs:
coordinate_array: bool, if true, return vertices as arrays
nested: bool, traverse tree depthwise
Output:
quadvertex_list, list of all active cell vertices
"""
quadvertex_list = []
#
# Unmark all vertices
#
self.unmark_all(quadvertices=True)
for cell in self.iter_quadcells(flag=flag, nested=nested):
for direction in ['SW','SE','NW','NE']:
vertex = cell.vertices[direction]
if not(vertex.is_marked()):
#
# New vertex: add it to the list
#
quadvertex_list.append(vertex)
vertex.mark()
self.unmark_all(quadvertices=True)
if coordinate_array:
return np.array([v.coordinates() for v in quadvertex_list])
else:
return quadvertex_list
def refine(self, flag=None):
"""
Refine mesh by splitting marked LEAF nodes
"""
for leaf in self.root_node().get_leaves(flag=flag):
leaf.split()
def coarsen(self, flag=None):
"""
Coarsen mesh by merging marked LEAF nodes.
Inputs:
flag: str/int, marker flag.
If flag is specified, merge a node if all
of its children are flagged.
If no flag is specified, merge nodes so that
mesh depth is reduced by 1.
"""
root = self.root_node()
if flag is None:
tree_depth = root.tree_depth()
for leaf in root.get_leaves():
if leaf.depth == tree_depth:
leaf.parent.merge()
else:
for leaf in root.get_leaves(flag=flag):
parent = leaf.parent
if all(child.is_marked(flag=flag) \
for child in parent.get_children()):
parent.merge()
def record(self,flag=None):
"""
Mark all mesh nodes with flag
"""
count = self.__mesh_count
for node in self.root_node().traverse(mode='breadth-first'):
if flag is None:
node.mark(count)
else:
node.mark(flag)
self.__mesh_count += 1
def n_meshes(self):
"""
Return the number of recorded meshes
"""
return self.__mesh_count
'''
class DCEL(object):
"""
Description: Doubly connected edge list
Attributes:
__dim: int, dimension of grid
format: str, version of mesh file
is_rectangular: bool, specifying whether 2D grid has rectangular faces
subregions: struct, encoding the mesh's subregions, with fields:
n: int, number of subregions
dim: int, dimension of subregion
tags: int, tags of subregions
names: str, names of subregions
points: struct, encoding the mesh's vertices, with fields:
n: int, number of points
n_dofs: int, number of dofs associated with point
tags: tags associated with vertices
phys: int list, indicating membership to one of the
physical subregions listed above.
geom: int list, indicating membership to certain
geometric entities.
partition: int, list indicating membership to certain
mesh partitions.
half_edge: int array, pointing to a half-edge based at
point.
coordinates: double, list of tuples
edges: struct, encoding the mesh's edges associated with
specific subregions, w. fields:
n: int, number of edges
n_dofs: int, number of dofs associated with edge
tags: struct, tags associated with edges (see points)
connectivity: int, list of sets containing edge vertices
half_edge: int, array pointing to associated half-edge
Edges: Edge list in same order as connectivity
half_edges: struct, encoding the mesh's half-edges
n: int, number of half-edges
n_dofs: int, number of dofs associated with half_edge
tags: struct, tags associated with half-edges (see points)
connectivity: int, list pointing to initial and final
vertices [v1,v2].
prev: int, array pointing to the preceding half-edge
next: int, array pointing to the next half-edge
twin: int, array pointing to the reversed half-edge
edge: int, array pointing to an associated edge
face: int, array pointing to an incident face
faces: struct, encoding the mesh's faces w. fields:
n: int, number of faces
n_dofs: int, list containing number of dofs per face
type: str, type of face (interval, triangle, or quadrilateral)
tags: tags associated with faces (same as for points)
connectivity: int, list of indices of vertices that make
up faces.
half_edge: int, array pointing to a half-edge on the boundary
Methods:
__init__
initialize_grid_structure
rectangular_grid
grid_from_gmsh
determine_half_edges
dim
get_neighbor
contains_node
Note: The grid can be used to describe the connectivity associated with a
ROOT Tree.
"""
def __init__(self, box=None, resolution=None, periodic=None, dim=None,
x=None, connectivity=None, file_path=None, file_format='gmsh'):
"""
Constructor
Inputs:
box: list of endpoints for rectangular mesh
1D [x_min, x_max]
2D [x_min, x_max, y_min, y_max]
resolution: tuple, with number of cells in each direction
dim: int, spatial dimension of the grid
x: double, (n,) array of points in for constructing a grid
connectivity: int, list of cell connectivities
file_path: str, path to mesh file
file_format: str, type of mesh file (currently only gmsh)
periodic: int, set containing integers 0 and/or 1.
0 in periodic: make periodic in x-direction
1 in periodic: make periodic in y-direction
"""
#
# Initialize struct
#
self.is_rectangular = False
self.is_periodic = False
self.resolution = resolution
self.initialize_grid_structure()
if file_path is not None:
# =================================================================
# Import grid from gmsh
# =================================================================
assert file_format=='gmsh', \
'For input file_format, use "gmsh".'
#
# Import grid from gmsh
#
self.grid_from_gmsh(file_path)
elif x is not None:
# =================================================================
# Generate grid from connectivity
# =================================================================
self.grid_from_connectivity(x, connectivity)
else:
# =================================================================
# Rectangular Grid
# =================================================================
#
# Determine dimension
#
if dim is None:
if resolution is not None:
assert type(resolution) is tuple, \
'Input "resolution" should be a tuple.'
dim = len(resolution)
elif box is not None:
assert type(box) is list, 'Input "box" should be a list.'
if len(box) == 2:
dim = 1
elif len(box) == 4:
dim = 2
else:
box_length = 'Box should be a list of length 2 or 4.'
raise Exception(box_length)
else:
raise Exception('Unable to verify dimension of grid')
self.__dim = dim
#
# Specify box
#
if box is None:
#
# Default boundary box
#
if dim==1:
box = [0,1]
elif dim==2:
box = [0,1,0,1]
#
# Specify resolution
#
if resolution is None:
#
# Default resolution
#
if dim==1:
resolution = (1,)
elif dim==2:
resolution = (1,1)
self.is_rectangular = True
self.rectangular_grid(box=box, resolution=resolution)
# =====================================================================
# Generate doubly connected edge list
# =====================================================================
self.determine_half_edges()
#
# Add periodicity
#
self.periodic_coordinates = {}
if periodic is not None:
if self.dim()==2:
assert self.is_rectangular, \
'Only rectangular meshes can be made periodic'
self.make_periodic(periodic, box)
self.is_periodic = True
def initialize_grid_structure(self):
"""
Initialize empty grid.
"""
self.format = None
# Subregions
self.subregions = {'dim': [], 'n': None, 'names': [], 'tags': []}
# Points
self.points = {'half_edge': [], 'n': None, 'tags': {}, 'n_dofs': None,
'coordinates': []}
# Edges
# TODO: Remove
self.edges = {'n': None, 'tags': {}, 'n_dofs': None, 'connectivity': []}
# Half-Edges
self.half_edges = {'n': None, 'tags': {}, 'n_dofs': None,
'connectivity': [], 'prev': [], 'next': [],
'twin': [], 'edge': [], 'face': [], 'position': []}
# Faces
self.faces = {'n': None, 'type': [], 'tags': {}, 'n_dofs': [],
'connectivity': []}
def rectangular_grid(self, box, resolution):
"""
Construct a grid on a rectangular region
Inputs:
box: int, tuple giving bounding vertices of rectangular domain:
(x_min, x_max) in 1D, (x_min, x_max, y_min, y_max) in 2D.
resolution: int, tuple giving the number of cells in each direction
"""
assert type(resolution) is tuple, \
'Input "resolution" should be a tuple.'
dim = len(resolution)
if dim == 1:
# =================================================================
# One dimensional grid
# =================================================================
# Generate DCEL
x_min, x_max = box
n_points = resolution[0] + 1
x = np.linspace(x_min, x_max, n_points)
# Store grid information
self.__dim = 1
self.points['coordinates'] = [(xi,) for xi in x]
self.points['n'] = n_points
elif dim == 2:
# =================================================================
# Two dimensional grid
# =================================================================
self.__dim = 2
x_min, x_max, y_min, y_max = box
nx, ny = resolution
n_points = (nx+1)*(ny+1)
self.points['n'] = n_points
#
# Record vertices
#
x = np.linspace(x_min, x_max, nx+1)
y = np.linspace(y_min, y_max, ny+1)
for i_y in range(ny+1):
for i_x in range(nx+1):
self.points['coordinates'].append((x[i_x],y[i_y]))
#
# Face connectivities
#
# Vertex indices
idx = np.arange((nx+1)*(ny+1)).reshape(ny+1,nx+1).T
for i_y in range(ny):
for i_x in range(nx):
fv = [idx[i_x,i_y], idx[i_x+1,i_y],
idx[i_x+1,i_y+1], idx[i_x,i_y+1]]
self.faces['connectivity'].append(fv)
self.faces['n'] = nx*ny
self.faces['type'] = ['quadrilateral']*self.faces['n']
else:
raise Exception('Only 1D/2D supported.')
def grid_from_connectivity(self, x, connectivity):
"""
Construct grid from connectivity information
"""
points = self.points
x = convert_to_array(x, dim=1)
dim = x.shape[1]
if dim==1:
#
# 1D
#
self.__dim = 1
#
# Store points
#
x = np.sort(x, axis=0) # ensure the vector is sorted
points['coordinates'] = [(xi[0],) for xi in x]
points['n'] = len(x)
elif dim==2:
#
# 2D
#
self.__dim = 2
#
# Store points
#
n_points = x.shape[0]
points['coordinates'] = [(x[i,0],x[i,1]) for i in range(n_points)]
points['n'] = n_points
#
# Store faces
#
faces = self.faces
assert connectivity is not None, 'Specify connectivity.'
assert type(connectivity) is list, \
'Connectivity should be passed as a list.'
n_faces = len(connectivity)
faces['n'] = n_faces
for i in range(n_faces):
assert type(connectivity[i]) is list, \
'Connectivity entries should be lists'
faces['connectivity'].append(connectivity[i])
faces['n_dofs'].append(len(connectivity[i]))
def grid_from_gmsh(self, file_path):
"""
Import computational mesh from a .gmsh file and store it in the grid.
Input:
file_path: str, path to gmsh file
"""
points = self.points
edges = self.edges
faces = self.faces
subregions = self.subregions
#
# Initialize tag categories
#
for entity in [points, edges, faces]:
entity['tags'] = {'phys': [], 'geom': [], 'partition': []}
with open(file_path, 'r') as infile:
while True:
line = infile.readline()
#
# Mesh format
#
if line == '$MeshFormat\n':
# Read next line
line = infile.readline()
self.format = line.rstrip()
# TODO: Put an assert statement here to check version
while line != '$EndMeshFormat\n':
line = infile.readline()
line = infile.readline()
#
# Subregions
#
if line == '$PhysicalNames\n':
#
# Record number of subregions
#
line = infile.readline()
subregions['n'] = int(line.rstrip())
line = infile.readline()
while True:
if line == '$EndPhysicalNames\n':
line = infile.readline()
break
#
# Record names, dimensions, and tags of subregions
#
words = line.split()
name = words[2].replace('"','')
subregions['names'].append(name)
subregions['dim'].append(int(words[0]))
subregions['tags'].append(int(words[1]))
line = infile.readline()
# TODO: Is this necessary?
# =============================================================
# Cell Vertices
# =============================================================
if line == '$Nodes\n':
#
# Record number of nodes
#
line = infile.readline()
points['n'] = int(line.rstrip())
line = infile.readline()
while True:
if line == '$EndNodes\n':
line = infile.readline()
break
#
# Record vertex coordinates
#
words = line.split()
vtx = (float(words[1]),float(words[2]))
points['coordinates'].append(vtx)
line = infile.readline()
# =============================================================
# Faces
# =============================================================
if line == '$Elements\n':
next(infile) # skip 'number of elements' line
line = infile.readline()
n_faces = 0 # count number of faces
while True:
"""
General format for elements
$Elements
n_elements
el_number | el_type* | num_tags** | ...
tag1 .. tag_num_tags |...
node_number_list
*el_type: element type
points: 15 (1 node point)
lines: 1 (2 node line), 0 --------- 1
8 (3 node 2nd order line), 0 --- 2 --- 1
26 (4 node 3rd order line) 0 - 2 - 3 - 1
triangles: 2 (3 node 1st order triangle)
9 (6 node 2nd order triangle)
21 (9 node 3rd order triangle)
quadrilateral: 3 (4 node first order quadrilateral)
10 (9 node second order quadrilateral)
**num_tags:
1st tag - physical entity to which element belongs
(often 0)
2nd tag - number of elementary geometrical entity to
which element belongs (as defined in the
.geo file).
3rd tag - number of the mesh partition to which the
element belongs.
"""
if line == '$EndElements\n':
faces['n'] = n_faces
line = infile.readline()
break
words = line.split()
#
# Identify entity
#
element_type = int(words[1])
if element_type==15:
#
# Point (1 node)
#
dofs_per_entity = 1
entity = points
if element_type==1:
#
# Linear edge (2 nodes)
#
dofs_per_entity = 2
entity = edges
elif element_type==8:
#
# Quadratic edge (3 nodes)
#
dofs_per_entity = 3
entity = edges
elif element_type==26:
#
# Cubic edge (4 nodes)
#
dofs_per_entity = 4
entity = edges
elif element_type==2:
#
# Linear triangular element (3 nodes)
#
dofs_per_entity = 3
entity = faces
entity['type'].append('triangle')
n_faces += 1
elif element_type==9:
#
# Quadratic triangular element (6 nodes)
#
dofs_per_entity = 6
entity = faces
entity['type'].append('triangle')
n_faces += 1
elif element_type==21:
#
# Cubic triangle (10 nodes)
#
dofs_per_entity = 10
entity = faces
entity['type'].append('triangle')
n_faces += 1
elif element_type==3:
#
# Linear quadrilateral (4 nodes)
#
dofs_per_entity = 4
entity = faces
entity['type'].append('quadrilateral')
n_faces += 1
elif element_type==10:
#
# Quadratic quadrilateral (9 nodes)
#
dofs_per_entity = 9
entity = faces
entity['type'].append('quadrilateral')
n_faces += 1
entity['n_dofs'] = dofs_per_entity
#
# Record tags
#
num_tags = int(words[2])
if num_tags > 0:
#
# Record Physical Entity tag
#
entity['tags']['phys'].append(int(words[3]))
else:
#
# Tag not included ... delete
#
entity['tags'].pop('phys', None)
if num_tags > 1:
#
# Record Geometrical Entity tag
#
entity['tags']['geom'].append(int(words[4]))
else:
#
# Tag not included ... delete
#
entity['tags'].pop('geom', None)
if num_tags > 2:
#
# Record Mesh Partition tag
#
entity['tags']['partition'].append(int(words[5]))
else:
#
# Tag not included ... delete
#
entity['tags'].pop('partition', None)
if dofs_per_entity > 1:
#
# Connectivity
#
i_begin = 3 + num_tags
i_end = 3 + num_tags + dofs_per_entity
connectivity = [int(words[i])-1 for i in \
np.arange(i_begin,i_end) ]
entity['connectivity'].append(connectivity)
line = infile.readline()
if line == '':
break
#
# Check for mixed Faces
#
if len(set(faces['type']))>1:
raise Warning('Face types are mixed')
#
# Turn Edge connectivities into sets
#
for i in range(len(edges['connectivity'])):
edges['connectivity'][i] = frozenset(edges['connectivity'][i])
#
# There are faces, dimension = 2
#
if n_faces > 0:
self.__dim = 2
def determine_half_edges(self):
"""
Returns a doubly connected edge list.
The grid should already have the following specified:
1D: points
2D: points, faces
Currently,
"""
#
# Update Point Fields
#
n_points = self.points['n']
self.points['half_edge'] = np.full((n_points,), -1, dtype=np.int)
# =====================================================================
# Initialize Half-Edges
# =====================================================================
if self.dim()==1:
#
# 1D mesh
#
n_he = self.points['n']-1
elif self.dim()==2:
#
# 2D mesh
#
n_faces = self.faces['n']
n_he = 0
for i in range(n_faces):
n_he += len(self.faces['connectivity'][i])
self.half_edges['n'] = n_he
self.half_edges['connectivity'] = np.full((n_he,2), -1, dtype=np.int)
self.half_edges['prev'] = np.full((n_he,), -1, dtype=np.int)
self.half_edges['next'] = np.full((n_he,), -1, dtype=np.int)
self.half_edges['twin'] = np.full((n_he,), -1, dtype=np.int)
self.half_edges['edge'] = np.full((n_he,), -1, dtype=np.int)
self.half_edges['face'] = np.full((n_he,), -1, dtype=np.int)
# =====================================================================
# Define Half-Edges
# =====================================================================
if self.dim()==1:
#
# 1D: Define HE's and link with others and points
#
n_points = self.points['n']
for i in range(n_points-1):
# Connectivity
self.half_edges['connectivity'][i] = [i,i+1]
# Previous and next half_edge in the DCEL
# NOTE: Here (unlike 2D), prev and next are used to
# navigate in the grid.
self.half_edges['prev'][i] = i-1
self.half_edges['next'][i] = i+1 if i+1<n_points-1 else -1
# Incident half_edge to left endpoint
self.points['half_edge'][i] = i
'''
#
# Twin
#
# Define twin half-edge
self.half_edges['connectivity'][n_points-1+i] = [i+1,i]
self.half_edges['twin'][i] = n_points-1+i
self.half_edges['twin'][n_points-1+i] = i
# Incident half-edge to right endpoint
self.points['half_edge'][i+1] = n_points + i
# Next and previous
self.half_edges['next'][n_points-1+i] = i-1
self.half_edges['prev'][n_points-1+i] = \
i+1 if i+1<n_points else -1
'''
elif self.dim()==2:
#
# 2D: Define HE's and link with others, faces, and points
#
n_faces = self.faces['n']
self.faces['half_edge'] = np.full((n_faces,), -1, dtype=np.int)
#
# Loop over faces
#
half_edge_count = 0
for i_fce in range(n_faces):
fc = self.faces['connectivity'][i_fce]
n_sides = len(fc)
#
# Face's half-edge numbers
#
fhe = [half_edge_count + j for j in range(n_sides)]
#
# Update face information
#
self.faces['half_edge'][i_fce] = fhe[0]
for i in range(n_sides):
#
# Update half-edge information
#
#
# Connectivity
#
hec = [fc[i%n_sides], fc[(i+1)%n_sides]]
self.half_edges['connectivity'][fhe[i],:] = hec
'''
DEBUG
if fhe[i] >= n_he:
print('Half-edge index exceeds matrix dimensions.')
print('Number of faces: {0}'.format(self.faces['n']))
print('Number of half-edges: 3x#faces =' + \
' {0}'.format(3*self.faces['n']))
print('#Half-Edges recorded: {0}'+\
''.format(self.half_edges['n']))
'''
#
# Previous Half-Edge
#
self.half_edges['prev'][fhe[i]] = fhe[(i-1)%n_sides]
#
# Next Half-Edge
#
self.half_edges['next'][fhe[i]] = fhe[(i+1)%n_sides]
#
# Face
#
self.half_edges['face'][fhe[i]] = i_fce
#
# Points
#
self.points['half_edge'][fc[i%n_sides]] = fhe[i]
#
# Update half-edge count
#
half_edge_count += n_sides
hec = self.half_edges['connectivity']
# =====================================================================
# Determine twin half_edges
# =====================================================================
for i in range(n_he):
#
# Find the row whose reversed entries match current entry
#
row = np.argwhere((hec[:,0]==hec[i,1]) & (hec[:,1]==hec[i,0]))
if len(row) == 1:
#
# Update twin field
#
self.half_edges['twin'][i] = int(row)
"""
# =====================================================================
# Link with Edges
# =====================================================================
#
# Update Edge Fields
#
# TODO: Delete when safe to do so!!
edge_set = set(self.edges['connectivity'])
self.edges['half_edge'] = [None]*len(edge_set)
for i_he in range(n_he):
#
# Loop over half-edges
#
hec = self.half_edges['connectivity'][i_he]
'''
DEBUG
#print('Size of edge_set: {0}'.format(len(edge_set)))
#print('Size of edge connectivity: {0}'.format(len(self.edges['connectivity'])))
'''
if set(hec) in edge_set:
'''
DEBUG
print('Set {0} is in edge_set. Locating it'.format(hec))
'''
#
# Edge associated with Half-Edge exists
#
i_edge = self.edges['connectivity'].index(set(hec))
'''
DEBUG
print('Location: {0}'.format(i_edge))
print('Here it is: {0}'.format(self.edges['connectivity'][i_edge]))
#print('Linking half edge with edge:')
#print('Half-edge: {0}'.format(self.edges['connectivity'][i_edge]))
#print('Edge: {0}'.format(self.half_edges['connectivity'][fhe[i]]))
#print(len(self.edges['half_edge']))
#print('Length of edge_set {0}'.format(len(edge_set)))
#print(edge_set)
'''
#
# Link edge to half edge
#
self.edges['half_edge'][i_edge] = i_he
else:
#print('Set {0} is not in edge_set \n '.format(hec))
#
# Add edge
#
new_edge = frozenset(hec)
self.edges['connectivity'].append(new_edge)
edge_set.add(new_edge)
i_edge =len(self.edges['connectivity'])-1
#
# Assign empty tags
#
for tag in self.edges['tags'].values():
tag.append(None)
#
# Link edge to half-edge
#
self.edges['half_edge'].append(i)
#
# Link half-edge to edge
#
self.half_edges['edge'][i] = i_edge
#
# Update size of edge list
#
self.edges['n'] = len(self.edges['connectivity'])
"""
def dim(self):
"""
Returns the underlying dimension of the grid
"""
return self.__dim
def get_neighbor(self, i_entity, i_direction):
"""
Returns the neighbor of an entity in a given direction
Inputs:
i_entity: int, index of the entity whose neighbor we seek
In 1D: i_entity indexes a half_edge
In 2D: i_entity indexes a face
i_direction: int, index of an entity specifying a direction
In 1D: i_direction indexes an interval endpoint
In 2D: i_direction indexes a half_edge
"""
if self.dim() == 1:
#
# 1D grid
#
hec = self.half_edges['connectivity'][i_entity]
assert i_direction in hec, \
'Point index not in connectivity of this Half-Edge.'
if i_direction == hec[0]:
#
# Left endpoint: go to previous half-edge
#
i_nbr = self.half_edges['prev'][i_entity]
elif i_direction == hec[1]:
#
# Right endpoint: go to next Half-Edge
#
i_nbr = self.half_edges['next'][i_entity]
elif self.dim() == 2:
#
# 2D grid: use half_edges
#
assert self.half_edges['face'][i_direction] == i_entity,\
'Cell not incident to Half-Edge.'
i_nbr_he = self.half_edges['twin'][i_direction]
i_nbr = self.half_edges['face'][i_nbr_he]
if i_nbr != -1:
return i_nbr
else:
return None
def get_boundary_half_edges(self):
"""
Returns a list of the boundary half_edge indices
"""
assert self.dim()==2, 'Half edges only present in 2D grids.'
bnd_hes_conn = []
bnd_hes = []
#
# Locate half-edges on the boundary
#
for i_he in range(self.half_edges['n']):
if self.half_edges['twin'][i_he] == -1:
bnd_hes.append(i_he)
bnd_hes_conn.append(self.half_edges['connectivity'][i_he])
#
# Group and sort half-edges
#
bnd_hes_sorted = [deque([he]) for he in bnd_hes]
while True:
for g1 in bnd_hes_sorted:
#
# Check if g1 can add a deque in bnd_hes_sorted
#
merger_activity = False
for g2 in bnd_hes_sorted:
#
# Does g1's head align with g2's tail?
#
if self.half_edges['connectivity'][g1[-1]][1]==\
self.half_edges['connectivity'][g2[0]][0]:
# Remove g2 from list
if len(bnd_hes_sorted) > 1:
g2 = bnd_hes_sorted.pop(bnd_hes_sorted.index(g2))
g1.extend(g2)
merger_activity = True
#
# Does g1's tail align with g2's head?
#
elif self.half_edges['connectivity'][g1[0]][0]==\
self.half_edges['connectivity'][g2[-1]][1]:
if len(bnd_hes_sorted) > 1:
g2 = bnd_hes_sorted.pop(bnd_hes_sorted.index(g2))
g1.extendleft(g2)
merger_activity = True
if not merger_activity:
break
#
# Multiple boundary segments
#
return [list(segment) for segment in bnd_hes_sorted]
"""
bnd_hes_sorted = []
i_he_left = bnd_hes.pop()
i_he_right = i_he_left
he_conn_left = bnd_hes_conn.pop()
he_conn_right = he_conn_left
subbnd_hes_sorted = deque([i_he])
while len(bnd_hes)>0:
added_to_left = False
added_to_right = False
for i in range(len(bnd_hes)):
if bnd_hes_conn[i][0] == he_conn_right[1]:
#
# Base vertex of he in list matches
# head vertex of popped he.
#
i_he_right = bnd_hes.pop(i)
he_conn_right = bnd_hes_conn.pop(i)
subbnd_hes_sorted.append(i_he_right)
added_to_right = True
elif bnd_hes_conn[i][1] == he_conn_left[0]:
#
# Head vertex of he in list matches
# base vertex of popped he.
#
i_he_left = bnd_hes_conn.pop(i)
he_conn_left = bnd_hes_conn.pop(i)
subbnd_hes_sorted.appendleft(i_he_left)
added_to_left = True
if added_to_left and added_to_right:
break
if not added_to_left and not added_to_right:
# Could not find any half-edges to add
#
# Add boundary segment to sorted hes
#
bnd_hes_sorted.extend(ihe for ihe in subbnd_hes_sorted)
#
# Reinitialize subbnd_hes_sorted
#
i_he_left = bnd_hes.pop()
i_he_right = i_he_left
he_conn_left = bnd_hes_conn.pop()
he_conn_right = he_conn_left
subbnd_hes_sorted = deque([i_he])
return bnd_hes_sorted
"""
'''
def get_boundary_edges(self):
"""
Returns a list of the boundary edge indices
TODO: Get rid of this
"""
bnd_hes_sorted = self.get_boundary_half_edges()
#
# Extract boundary edges
#
bnd_edges = [self.half_edges['edge'][i] for i in bnd_hes_sorted]
return bnd_edges
'''
def get_boundary_points(self):
"""
Returns a list of boundary point indices
"""
if self.dim() == 1:
#
# One dimensional grid (assume sorted)
#
bnd_points = [0, self.points['n']-1]
elif self.dim() == 2:
#
# Two dimensional grid
#
bnd_points = []
for i_he in self.get_boundary_half_edges():
#
# Add initial point of each boundary half edge
#
bnd_points.append(self.half_edges['connectivity'][i_he][0])
else:
raise Exception('Only dimensions 1 and 2 supported.')
return bnd_points
def make_periodic(self, coordinates, box):
"""
Make a rectangular DCEL periodic by assigning the correct twins to
HalfEdges on the boundary.
Inputs:
Coordinates: set, containing 0 (x-direction) and/or 1 (y-direction).
TODO: Cannot make periodic (1,1) DCEL objects
"""
if self.dim()==1:
#
# In 1D, first half-edge becomes "next" of last half-edge
#
self.half_edges['next'][-1] = 0
self.half_edges['prev'][0] = self.half_edges['n']-1
elif self.dim()==2:
#
# In 2D, must align vertices on both side of the box
#
x_min, x_max, y_min, y_max = box
if 0 in coordinates:
#
# Make periodic in the x-direction
#
left_hes = []
right_hes = []
for segment in self.get_boundary_half_edges():
for he in segment:
#
# Record coordinates of half-edge's base and head
#
i_base, i_head = self.half_edges['connectivity'][he][:]
x_base, y_base = self.points['coordinates'][i_head]
x_head, y_head = self.points['coordinates'][i_base]
if np.isclose(x_base,x_max) and np.isclose(x_head,x_max):
#
# If x-values are near x_max, it's on the right
#
right_hes.append((he, y_base, y_head))
elif np.isclose(x_base,x_min) and np.isclose(x_head,x_min):
#
# If x-values are near x_min, it's on the left
#
left_hes.append((he, y_base, y_head))
#
# Look for twin half-edges
#
n_right = len(left_hes)
n_left = len(right_hes)
assert n_right==n_left, \
'Number of half-edges on either side of domain differ.'+\
'Cannot make periodic.'
while len(left_hes)>0:
l_he, l_ybase, l_yhead = left_hes.pop()
for ir in range(len(right_hes)):
#
# For each halfedge on the left, check if there is a
# corresponding one on the right.
#
r_he, r_ybase, r_yhead = right_hes[ir]
if np.isclose(l_ybase, r_yhead) and np.isclose(l_yhead, r_ybase):
self.half_edges['twin'][l_he] = r_he
self.half_edges['twin'][r_he] = l_he
del right_hes[ir]
break
assert len(right_hes)==0, \
'All HalfEdges on the left should be matched with '+\
'one on the right.'
if 1 in coordinates:
#
# Make periodic in the y-direction
#coordinates
top_hes = []
bottom_hes = []
for segment in self.get_boundary_half_edges():
for he in segment:
#
# Record coordinates of half-edge's base and head
#
i_base, i_head = self.half_edges['connectivity'][he]
x_base, y_base = self.points['coordinates'][i_head]
x_head, y_head = self.points['coordinates'][i_base]
if np.isclose(y_base,y_max) and np.isclose(y_head,y_max):
#
# If y-values are near y_max, it's on the top
#
top_hes.append((he, x_base, x_head))
elif np.isclose(y_base,y_min) and np.isclose(y_head,y_min):
#
# If y-values are near y_min, it's on the bottom
#
bottom_hes.append((he, x_base, x_head))
#
# Look for twin half-edges
#
while len(bottom_hes)>0:
b_he, b_xbase, b_xhead = bottom_hes.pop()
for it in range(len(top_hes)):
#
# For each halfedge on the left, check if there is a
# corresponding one on the right.
#
t_he, t_xbase, t_xhead = top_hes[it]
if np.isclose(t_xbase, b_xhead) and np.isclose(t_xhead, b_xbase):
self.half_edges['twin'][b_he] = t_he
self.half_edges['twin'][t_he] = b_he
del top_hes[it]
break
assert len(top_hes)==0, \
'All HalfEdges on the left should be matched with '+\
'one on the right.'
self.periodic_coordinates = coordinates
class Mesh(object):
"""
Mesh class
"""
def __init__(self, dcel=None, box=None, resolution=None, periodic=None,
dim=None, x=None, connectivity=None, file_path=None,
file_format='gmsh'):
# =====================================================================
# Doubly connected Edge List
# =====================================================================
if dcel is None:
#
# Initialize doubly connected edge list if None
#
dcel = DCEL(box=box, resolution=resolution, periodic=periodic,
dim=dim, x=x, connectivity=connectivity,
file_path=file_path, file_format=file_format)
else:
assert isinstance(dcel,DCEL)
self.dcel = dcel
#
# Determine mesh dimension
#
dim = dcel.dim()
self._dim = dim
# =====================================================================
# Vertices
# =====================================================================
vertices = []
n_points = dcel.points['n']
for i in range(n_points):
vertices.append(Vertex(dcel.points['coordinates'][i]))
self.vertices = vertices
def dim(self):
"""
Returns the dimension of the mesh (1 or 2)
"""
return self._dim
class Mesh1D(Mesh):
"""
1D Mesh Class
"""
def __init__(self, dcel=None, box=None, resolution=None, periodic=False,
x=None, connectivity=None, file_path=None, file_format='gmsh'):
#
# Convert input "periodic" to something intelligible for DCEL
#
if periodic is True:
periodic = {0}
else:
periodic = None
Mesh.__init__(self, dcel=dcel, box=box, resolution=resolution,
periodic=periodic, dim=1, x=x, connectivity=connectivity,
file_path=file_path, file_format=file_format)
assert self.dim()==1, 'Mesh dimension not 1.'
# =====================================================================
# Intervals
# =====================================================================
intervals = []
n_intervals = self.dcel.half_edges['n']
for i in range(n_intervals):
#
# Make list of intervals
#
i_vertices = self.dcel.half_edges['connectivity'][i]
v_base = self.vertices[i_vertices[0]]
v_head = self.vertices[i_vertices[1]]
interval = Interval(v_base, v_head)
intervals.append(interval)
#
# Align intervals (assign next)
#
for i in range(n_intervals):
i_nxt = self.dcel.half_edges['next'][i]
if i_nxt!=-1:
if intervals[i].head() != intervals[i_nxt].base():
assert self.dcel.is_periodic, 'DCEL should be periodic'
#
# Intervals linked by periodicity
#
itv_1, vtx_1 = intervals[i], intervals[i].head()
itv_2, vtx_2 = intervals[i_nxt], intervals[i_nxt].base()
# Mark intervals periodic
itv_1.set_periodic()
itv_2.set_periodic()
# Mark vertices periodic
vtx_1.set_periodic()
vtx_2.set_periodic()
# Associate vertices with one another
vtx_1.set_periodic_pair((itv_2, vtx_2))
vtx_2.set_periodic_pair((itv_1, vtx_1))
else:
intervals[i].assign_next(intervals[i_nxt])
#
# Store intervals in Forest
#
self.cells = Forest(intervals)
self.__periodic_coordinates = self.dcel.periodic_coordinates
def is_periodic(self):
"""
Returns true if the mesh is periodic
"""
return 0 in self.__periodic_coordinates
def bin_points(self, points, i_points=None, subforest_flag=None):
"""
Determine a list of LEAF cells in the submesh, each of which contains
at least one point in points. Return the list of tuples of LEAF cells
and point indices.
Inputs:
points: Set of admissible points
subforest_flag: submesh flag
Outputs:
bins: tuple of (cell, index) pairs detailing the bins and indices
of points.
"""
x = convert_to_array(points)
n_points = x.shape[0]
if i_points is None:
i_points = np.arange(n_points)
else:
assert n_points==len(i_points)
bins = []
for cell in self.cells.get_children(flag=subforest_flag):
in_cell = cell.contains_points(x)
if any(in_cell):
#
# Cell contains (some) points
#
# Isolate points in cell and their indices
y = x[in_cell] # subset of points
y_idx = i_points[in_cell] # index of subset
# Recursion step
c_bin = cell.bin_points(y, y_idx, subforest_flag)
bins.extend(c_bin)
# Eliminate points from list
x = x[~in_cell]
i_points = i_points[~in_cell]
assert len(x)==0, 'Some points are not in domain.'
return bins
def get_boundary_vertices(self):
"""
Returns the mesh endpoint vertices
"""
if self.is_periodic():
return None
else:
v0 = self.cells.get_child(0).base()
v1 = self.cells.get_child(-1).head()
return v0, v1
def get_boundary_cells(self, subforest_flag=None):
"""
Returns the mesh endpoint cells
"""
if self.is_periodic():
#
# Periodic Mesh: No cells on the boundary
#
return None
else:
for cell in self.cells.get_leaves(subforest_flag=subforest_flag):
#
# Iterate over cells
#
if cell.get_neighbor(0, subforest_flag=subforest_flag) is None:
#
# Cannot find a left neighbor: found left boundary cell
#
cell_left = cell
if cell.get_neighbor(1, subforest_flag=subforest_flag) is None:
#
# Cannot find a right neighbor: found right boundary cell
#
cell_right = cell
return cell_left, cell_right
def bounding_box(self):
"""
Returns the interval endpoints
"""
if self.is_periodic():
#
# Periodic meshes have no boundary vertices, get them explicitly
#
v0 = self.cells.get_child(0).base()
v1 = self.cells.get_child(-1).head()
else:
v0, v1 = self.get_boundary_vertices()
x0, = v0.coordinates()
x1, = v1.coordinates()
return x0, x1
def mark_region(self, flag, f, entity_type='vertex', strict_containment=True,
on_boundary=False, subforest_flag=None):
"""
Flags all entities of specified type within specified 1D region in mesh
Inputs:
flag: str/int/tuple, marker
f: boolean function whose input is a number x and whose
output is True if the point is contained in the region to be
marked, False otherwise.
entity_type: str, entity to be marked ('cell', 'vertex')
strict_containment: bool, if True, an entity is marked only
if all its vertices are contained in the region. If False,
one vertex suffices
on_boundary: bool, if True, consider only entities on the boundary
subforest_flag: str/int/tuple, mesh marker.
"""
if on_boundary:
#
# Entity adjacent to boundary
#
if entity_type=='vertex':
#
# Vertices
#
for v in self.get_boundary_vertices():
x, = v.coordinates()
if f(x):
#
# Vertex in region -> mark it
#
v.mark(flag)
elif entity_type=='cell':
#
# Intervals
#
for cell in self.get_boundary_cells(subforest_flag=subforest_flag):
#
# Iterate over boundary cells
#
if strict_containment:
#
# Only mark interval if all vertices are in region
#
mark = True
for v in cell.get_vertices():
x, = v.coordinates()
if not f(x):
#
# One vertex outide region -> don't mark interval
#
mark = False
break
else:
#
# Mark interval if any vertex is in region
#
mark = False
for v in cell.get_vertices():
x, = v.coordinates()
if f(x):
#
# One vertex in region -> mark interval
#
mark = True
break
if mark:
#
# Mark interval if necessary
#
cell.mark(flag)
else:
#
# Region not adjacent to boundary
#
for cell in self.cells.get_leaves(subforest_flag=subforest_flag):
if entity_type=='vertex':
#
# Mark vertices
#
for v in cell.get_vertices():
x, = v.coordinates()
if f(x):
#
# Vertex is in region -> mark it
#
v.mark(flag)
elif entity_type=='cell':
#
# Mark intervals
#
if strict_containment:
mark = True
for v in cell.get_vertices():
x, = v.coordinates()
if not f(x):
#
# One cell vertex outside region -> don't mark
#
mark = False
break
else:
mark = False
for v in cell.get_vertices():
x, = v.coordinates()
if f(x):
#
# One vertex in region -> mark interval
#
mark = True
break
if mark:
#
# Mark interval if necessary
#
cell.mark(flag)
def get_region(self, flag=None, entity_type='vertex', on_boundary=False,
subforest_flag=None, return_cells=False):
"""
Returns a list of entities marked with the specified flag in 1D mesh
Inputs:
flag: str/int/tuple, entity marker
entity_type: str, type of entity to be returned
('vertex', 'cell', or 'half_edge')
on_boundary: bool, if True, seek region only along boundary
subforest_flag: str/int/tuple, submesh flag
return_cells: bool, if True, return tuples of the form
(entity, cell), i.e. include the cell containing the entity.
Outputs:
region_entities: list, or Cells/Intervals/HalfEdges/Vertices
located within region.
"""
region_entities = set()
if on_boundary:
#
# Restrict region to boundary
#
cells = self.get_boundary_cells(subforest_flag=subforest_flag)
bnd_vertices = self.get_boundary_vertices()
else:
#
# Region within 1D domain
#
cells = self.cells.get_leaves(subforest_flag=subforest_flag)
for cell in cells:
#
# Iterate over cells
#
if entity_type=='vertex':
#
# Vertex
#
for v in cell.get_vertices():
add_entity = flag is None or v.is_marked(flag)
if on_boundary:
#
# Additional check when on boundary
#
add_entity = add_entity and v in bnd_vertices
if add_entity:
#
# Add vertex to set
#
if return_cells:
#
# Add (vertex, cell) tuple
#
region_entities.add((v,cell))
else:
#
# Add only vertex
#
region_entities.add(v)
elif entity_type=='cell':
#
# Intervals
#
add_entity = flag is None or cell.is_marked(flag)
if add_entity:
#
# Add cell to set
#
if return_cells:
#
# Add (cell, cell) tuple
#
region_entities.add((cell, cell))
else:
#
# Add only cell
#
region_entities.add(cell)
return list(region_entities)
def record(self, subforest_flag):
"""
Record current mesh (intervals)
Input:
subforest_flag: str/int/tuple, name of mesh
"""
self.cells.record(subforest_flag)
class Mesh2D(Mesh):
"""
2D Mesh class
"""
def __init__(self, dcel=None, box=None, resolution=None, x=None,
periodic=None, connectivity=None, file_path=None,
file_format='gmsh'):
Mesh.__init__(self, dcel=dcel, box=box, resolution=resolution,
periodic=periodic, dim=2, x=x, connectivity=connectivity,
file_path=file_path, file_format=file_format)
self._is_rectangular = self.dcel.is_rectangular
self._periodic_coordinates = self.dcel.periodic_coordinates
# ====================================================================
# HalfEdges
# ====================================================================
half_edges = []
n_hes = self.dcel.half_edges['n']
for i in range(n_hes):
i_vertices = self.dcel.half_edges['connectivity'][i]
v_base = self.vertices[i_vertices[0]]
v_head = self.vertices[i_vertices[1]]
half_edge = HalfEdge(v_base, v_head)
half_edges.append(half_edge)
#
# Assign twins (2D)
#
for i_he in range(n_hes):
i_twin = self.dcel.half_edges['twin'][i_he]
if i_twin!=-1:
#
# HalfEdge has twin
#
he_nodes = self.dcel.half_edges['connectivity'][i_he]
twin_nodes = self.dcel.half_edges['connectivity'][i_twin]
if not all(he_nodes == list(reversed(twin_nodes))):
#
# Heads and Bases don't align, periodic boundary
#
assert self.is_periodic(), 'Mesh is not periodic.'\
'All HalfEdges should align.'
half_edges[i_he].set_periodic()
half_edges[i_twin].set_periodic()
half_edges[i_he].assign_twin(half_edges[i_twin])
half_edges[i_twin].assign_twin(half_edges[i_he])
#
# Store HalfEdges in Forest.
#
self.half_edges = Forest(half_edges)
# =====================================================================
# Cells
# =====================================================================
cells = []
n_cells = self.dcel.faces['n']
is_quadmesh = True
for ic in range(n_cells):
i_he_pivot = self.dcel.faces['half_edge'][ic]
i_he = i_he_pivot
one_rotation = False
i_hes = []
while not one_rotation:
i_hes.append(i_he)
i_he = self.dcel.half_edges['next'][i_he]
if i_he==i_he_pivot:
one_rotation = True
if len(i_hes)==4:
cells.append(QuadCell([half_edges[i] for i in i_hes]))
else:
cells.append(Cell([half_edges[i] for i in i_hes]))
is_quadmesh = False
self._is_quadmesh = is_quadmesh
self.cells = Forest(cells)
# =====================================================================
# Pair Periodic Vertices
# =====================================================================
for half_edge in self.half_edges.get_children():
# Pair periodic vertices
#
if half_edge.is_periodic():
half_edge.pair_periodic_vertices()
def is_rectangular(self):
"""
Check whether the Mesh is rectangular
"""
return self._is_rectangular
def is_periodic(self, coordinates=None):
"""
Check whether the Mesh is periodic in the x- and/or the y direction
Input:
*coordinates: int, set containing 0 (x-direction) and/or 1 (y-direction)
if directions is None, check for periodicity in any direction
"""
if coordinates is None:
return 0 in self._periodic_coordinates or 1 in self._periodic_coordinates
else:
is_periodic = True
for i in coordinates:
if i not in self._periodic_coordinates:
return False
return is_periodic
def is_quadmesh(self):
"""
Check if the mesh is a quadmesh
"""
return self._is_quadmesh
def locate_point(self, point, flag=None):
"""
Returns the smallest (flagged) cell containing a given point
or None if current cell doesn't contain the point
Input:
point: Vertex
Output:
cell: smallest cell that contains x
"""
for cell in self.cells.get_children():
if flag is None:
if cell.contains_points(point):
return cell
else:
if cell.is_marked(flag) and cell.contains_points(point):
return cell
def get_boundary_segments(self, subforest_flag=None, flag=None):
"""
Returns a list of segments of boundary half edges
Inputs:
subforest_flag: optional flag (int/str) specifying the submesh
within which boundary segments are sought.
Note: This flag is applied to the cells in the submesh, not the edges
flag: optional flag (int/str) specifying boundary segments
Notes:
- The subforest flag specified above refers to the mesh cells,
not to the half-edges
- This implementation assumes that the boundary edges on the coarsest
mesh are a good representation of the computational region.
"""
bnd_hes = []
#
# Locate half-edges on the boundary (coarsest level)
#
for he in self.half_edges.get_children():
if he.twin() is None:
bnd_hes.append(he)
#
# Group and sort half-edges
#
bnd_hes_sorted = [deque([he]) for he in bnd_hes]
while True:
merger_activity = False
for g1 in bnd_hes_sorted:
#
# Check if g1 can add a deque in bnd_hes_sorted
#
merger_activity = False
for g2 in bnd_hes_sorted:
#
# Does g1's head align with g2's tail?
#
if g1[-1].head()==g2[0].base():
# Remove g2 from list
g2 = bnd_hes_sorted.pop(bnd_hes_sorted.index(g2))
g1.extend(g2)
merger_activity = True
#
# Does g1's tail align with g2's head?
#
elif g1[0].base()==g2[-1].head():
g2 = bnd_hes_sorted.pop(bnd_hes_sorted.index(g2))
g2.reverse()
g1.extendleft(g2)
merger_activity = True
if not merger_activity or len(bnd_hes_sorted)==1:
break
#
# Multiple boundary segments
#
bnd = [list(segment) for segment in bnd_hes_sorted]
#
# Get edges on finest level (allowed by submesh)
#
for segment in bnd:
hes_todo = [he for he in segment]
while len(hes_todo)>0:
#
# Pop out first half-edge in list
#
he = hes_todo.pop(0)
if he.cell().has_children(flag=subforest_flag):
#
# Half-Edge has valid sub-edges:
# Replace he in list with these.
#
i_he = segment.index(he)
del segment[i_he]
for che in he.get_children():
segment.insert(i_he, che)
i_he += 1
#
# Add che's to the list of he's to do
#
hes_todo.append(che)
#
# Throw out he's that are not flagged
#
if flag is not None:
for he, i_he in zip(segment, range(len(segment))):
if not he.is_marked(flag):
#
# Not flagged: remove from list
#
del segment[i_he]
return bnd
def get_boundary_vertices(self, flag=None, subforest_flag=None):
"""
Returns the Vertices on the boundary
"""
vertices = []
for segment in self.get_boundary_segments(subforest_flag=subforest_flag,
flag=flag):
for he in segment:
vertices.append(he.base())
return vertices
def mark_region(self, flag, f, entity_type='vertex', strict_containment=True,
on_boundary=False, subforest_flag=None):
"""
This method marks all entities within a 2D region.
Inputs:
flag: str, int, tuple marker
f: boolean function whose inputs are an x and a y vector and whose
output is True if the point is contained in the region to be
marked, False otherwise.
entity_type: str, entity to be marked ('cell', 'half_edge', 'vertex')
strict_containment: bool, if True, an entity is marked only
if all its vertices are contained in the region. If False,
one vertex suffices
on_boundary: bool, if True, consider only entities on the boundary
subforest_flag: str/int/tuple, mesh marker.
"""
if on_boundary:
#
# Iterate only over boundary segments
#
for segment in self.get_boundary_segments(subforest_flag=subforest_flag):
#
# Iterate over boundary segments
#
for he in segment:
#
# Iterate over half_edges within each segment
#
if entity_type=='vertex':
#
# Mark vertices
#
for v in he.get_vertices():
#
# Iterate over half-edge vertices
#
x,y = v.coordinates()
if f(x,y):
#
# Mark
#
v.mark(flag)
elif entity_type=='half_edge':
#
# Mark Half-Edges
#
if strict_containment:
#
# All vertices must be within region
#
mark = True
for v in he.get_vertices():
x,y = v.coordinates()
if not f(x,y):
#
# One vertex not in region, don't mark edge
#
mark = False
break
else:
#
# Only one vertex need be in the region
#
mark = False
for v in he.get_vertices():
x,y = v.coordinates()
if f(x,y):
#
# One vertex in region is enough
#
mark = True
break
if mark:
#
# Mark half_edge
#
he.mark(flag)
elif entity_type=='cell':
#
# Mark Cells
#
cell = he.cell()
if strict_containment:
mark = True
for v in cell.get_vertices():
x,y = v.coordinates()
if not f(x,y):
#
# One vertex not in region -> don't mark
#
mark = False
break
else:
mark = False
for v in cell.get_vertices():
x,y = v.coordinates()
if f(x,y):
#
# One vertex in region -> mark
#
mark = True
break
if mark:
#
# Mark cell
#
cell.mark(flag)
else:
raise Exception('Entity %s not supported'%(entity_type))
else:
#
# Region may lie within interior of the domain
#
for cell in self.cells.get_leaves(subforest_flag=subforest_flag):
#
# Iterate over mesh cells
#
if entity_type=='vertex':
#
# Mark vertices
#
for v in cell.get_vertices():
x,y = v.coordinates()
if f(x,y):
#
# Mark vertex
#
v.mark(flag)
elif entity_type=='half_edge':
#
# Mark half-edges
#
for he in cell.get_half_edges():
if strict_containment:
mark = True
for v in he.get_vertices():
x,y = v.coordinates()
if not f(x,y):
#
# Single vertex outside region disqualifies half_edge
#
mark = False
break
else:
mark = False
for v in he.get_vertices():
x,y = v.coordinates()
if f(x,y):
#
# Single vertex in region -> mark half_edge
#
mark = True
break
if mark:
#
# Mark half_edge
#
he.mark(flag)
elif entity_type=='cell':
#
# Mark cells
#
if strict_containment:
#
# All vertices must be in region
#
mark = True
for v in cell.get_vertices():
x,y = v.coordinates()
if not f(x,y):
mark = False
break
else:
#
# Only one vertex need be in region
#
mark = False
for v in cell.get_vertices():
x,y = v.coordinates()
if f(x,y):
mark = True
break
if mark:
#
# Mark cell
#
cell.mark(flag)
def tear_region(self, flag, subforest_flag=None):
"""
Tear the domain along an interior half-edge region.
As a consequence,
- Vertices on either side of the half-edge are separate
(although they still have the same coordinates).
- Adjoining half-edges along the region will no longer be
neighbors of each other.
Inputs:
flag: str/int/tuple, flag specifying the region of half-edges
subforest_flag: str/int/tuple, flag specifying the submesh
"""
#
# Iterate over half-edges along region
#
for he in self.get_region(flag=flag, entity_type='half_edge',
subforest_flag=subforest_flag):
#
# Assign New Vertices to half-edge
#
base = Vertex(he.base().coordinates())
head = Vertex(he.head().coordinates())
he.set_vertices(base, head)
#
# Disassociate from neighboring half-edge
#
twin = he.twin()
twin.delete_twin()
he.delete_twin()
def get_region(self, flag=None, entity_type='vertex', on_boundary=False,
subforest_flag=None, return_cells=False):
"""
Returns a list of entities marked with the specified flag
Inputs:
flag: str/int/tuple, entity marker
entity_type: str, type of entity to be returned
('vertex', 'cell', or 'half_edge')
on_boundary: bool, if True, seek region only along boundary
subforest_flag: str/int/tuple, submesh flag
return_cells: bool, if True, return a list of tuples of the form
(entity, cell)
Outputs:
region_entities: list, or Cells/Intervals/HalfEdges/Vertices
located within region.
"""
debug = False
region_entities = set()
if on_boundary:
if debug: print('On boundary')
#
# Region is a subset of the boundary
#
for segment in self.get_boundary_segments(subforest_flag=subforest_flag):
#
# Iterate over boundary segments
#
for he in segment:
#
# Iterate over boundary edges
#
if entity_type=='cell':
#
# Get cell associated with half-edge
#
cell = he.cell()
#
# Add cell to set
#
add_entity = flag is None or cell.is_marked(flag)
if add_entity:
if return_cells:
#
# Return containing cell as cell
#
region_entities.add((cell, cell))
else:
#
# Return only entity
#
region_entities.add(cell)
elif entity_type=='half_edge':
#
# Half-edge
#
add_entity = flag is None or he.is_marked(flag)
if add_entity:
if return_cells:
#
# Return half-edge and cell
#
cell = he.cell()
region_entities.add((he, cell))
else:
#
# Return only entity
#
region_entities.add(he)
elif entity_type=='vertex':
#
# Vertices
#
for v in he.get_vertices():
if debug:
print('considering vertex', v.coordinates())
add_entity = flag is None or v.is_marked(flag)
if debug:
print('to add?', add_entity)
print('marked?', v.is_marked(flag))
if add_entity:
if return_cells:
#
# Return containing cell and entity
#
cell = he.cell()
region_entities.add((v, cell))
else:
#
# Return only entity
#
region_entities.add(v)
else:
#
# Iterate over entire mesh.
#
for cell in self.cells.get_leaves(subforest_flag=subforest_flag):
#
# Iterate over mesh cells
#
if entity_type=='cell':
#
# Cells
#
add_entity = flag is None or cell.is_marked(flag)
if add_entity:
if return_cells:
#
# Return containing cell as cell
#
region_entities.add((cell, cell))
else:
#
# Return only entity
#
region_entities.add(cell)
elif entity_type=='half_edge':
#
# Half-Edges
#
for he in cell.get_half_edges():
add_entity = flag is None or he.is_marked(flag)
if add_entity:
if return_cells:
#
# Return half-edge and cell
#
region_entities.add((he, cell))
else:
#
# Return only entity
#
region_entities.add(he)
elif entity_type=='vertex':
#
# Vertices
#
for he in cell.get_half_edges():
for v in he.get_vertices():
add_entity = flag is None or v.is_marked(flag)
if add_entity:
if return_cells:
#
# Return containing cell and entity
#
region_entities.add((v, cell))
else:
#
# Return only entity
#
region_entities.add(v)
return region_entities
def bounding_box(self):
"""
Returns the bounding box of the mesh
"""
xy = convert_to_array(self.vertices, dim=2)
x0, x1 = xy[:,0].min(), xy[:,0].max()
y0, y1 = xy[:,1].min(), xy[:,1].max()
return x0, x1, y0, y1
def record(self, subforest_flag):
"""
Mark all cells and half-edges within current mesh with subforest_flag
"""
self.cells.record(subforest_flag)
self.half_edges.record(subforest_flag)
'''
def get_boundary_edges(self, flag=None):
"""
Returns the half-nodes on the boundary
"""
bnd_hes_unsorted = []
#
# Locate ROOT half-edges on the boundary
#
for he in self.half_edges.get_children():
if he.twin() is None:
bnd_hes_unsorted.append(he)
n_bnd = len(bnd_hes_unsorted)
#
# Sort half-edges
#
he = bnd_hes_unsorted.pop()
bnd_hes_sorted = [he]
while n_bnd>0:
for i in range(n_bnd):
nxt_he = bnd_hes_unsorted[i]
if he.head()==nxt_he.base():
bnd_hes_sorted.append(nxt_he)
he = bnd_hes_unsorted.pop(i)
n_bnd -= 1
break
#
# Get LEAF half-edges
#
bnd_hes = []
for he in bnd_hes_sorted:
bnd_hes.extend(he.get_leaves(flag=flag))
'''
class QuadMesh(Mesh2D):
"""
Two dimensional mesh with quadrilateral cells.
Note:
When coarsening and refining a QuadMesh, the HalfEdges are not deleted
Rather use submeshes.
"""
def __init__(self, dcel=None, box=None, resolution=None, x=None,
periodic=None, connectivity=None, file_path=None,
file_format='gmsh'):
#
# Initialize 2D Mesh.
#
Mesh2D.__init__(self, dcel=dcel, box=box, resolution=resolution,
periodic=periodic, x=x, connectivity=connectivity,
file_path=file_path, file_format=file_format)
self.cells = Forest(self.cells.get_children())
def bin_points(self, points, i_points=None, subforest_flag=None):
"""
Determine a list of LEAF cells in the submesh, each of which contains
at least one point in points. Return the list of tuples of LEAF cells
and point indices.
Inputs:
points: Set of admissible points
subforest_flag: submesh flag
Outputs:
bins: tuple of (cell, index) pairs detailing the bins and indices
of points.
"""
x = convert_to_array(points)
n_points = x.shape[0]
if i_points is None:
i_points = np.arange(n_points)
else:
assert n_points==len(i_points)
bins = []
for cell in self.cells.get_children(flag=subforest_flag):
in_cell = cell.contains_points(x)
if any(in_cell):
#
# Cell contains (some) points
#
# Isolate points in cell and their indices
y = x[in_cell] # subset of points
y_idx = i_points[in_cell] # index of subset
# Recursion step
c_bin = cell.bin_points(y, y_idx, subforest_flag)
bins.extend(c_bin)
# Eliminate points from list
x = x[~in_cell]
i_points = i_points[~in_cell]
assert len(x)==0, 'Some points are not in domain.'
return bins
def is_balanced(self, subforest_flag=None):
"""
Check whether the mesh is balanced
Inputs:
flag (optional): marker, allowing for the restriction to
a submesh.
"""
for cell in self.cells.get_leaves(subforest_flag=subforest_flag):
for half_edge in cell.get_half_edges():
nb = cell.get_neighbors(half_edge, flag=subforest_flag)
if nb is not None and nb.has_children(flag=subforest_flag):
twin = half_edge.twin()
for the_child in twin.get_children():
if the_child.cell().has_children(flag=subforest_flag):
return False
return True
def balance(self, subforest_flag=None):
"""
Ensure that subcells of current cell conform to the 2:1 rule
"""
assert self.cells.subtrees_rooted(subforest_flag)
#
# Get all LEAF cells
#
leaves = set(self.cells.get_leaves(subforest_flag=subforest_flag)) # set: no duplicates
while len(leaves)>0:
leaf = leaves.pop()
#
# For each Cell
#
is_split = False
for half_edge in leaf.get_half_edges():
#
# Look for neighbors in each direction
#
nb = leaf.get_neighbors(half_edge, flag=subforest_flag)
if nb is not None and nb.has_children(flag=subforest_flag):
#
# Check if neighbor has children (still fine)
#
twin = half_edge.twin()
for the_child in twin.get_children():
if the_child.cell().has_children(flag=subforest_flag):
#
# Neighbor has grandchildren
#
if not leaf.has_children(flag=subforest_flag):
#
# LEAF does not have any flagged children
#
if leaf.has_children():
#
# LEAF has children (just not flagged)
#
for child in leaf.get_children():
child.mark(subforest_flag)
else:
#
# LEAF needs new children.
#
leaf.split(flag=subforest_flag)
#
# Add children to the leaf nodes to be considered
#
for child in leaf.get_children():
leaves.add(child)
#
# If LEAF is split, add all its neighbors to leaves
# to be considered for splitting.
#
for half_edge in leaf.get_half_edges():
hep = half_edge.get_parent()
if hep is not None:
hep_twin = hep.twin()
if hep_twin is not None:
leaves.add(hep_twin.cell())
#
# Current LEAF cell has been split, move on to next one
#
is_split = True
break
if is_split:
#
# LEAF already split, no need to check other directions
#
break
def remove_supports(self, subforest_flag=None, coarsening_flag=None):
"""
Given a submesh (subforest_flag) and a coarsening_flag,
Input:
subforest_flag: flag specifying the submesh to be considered
coarsening_flag: flag specifying the cells to be removed
during coarsening
TODO: Unfinished. Loop over cells to be coarsened. Check if it's
safe to coarsen neighbors.
"""
#
# Get all flagged LEAF nodes
#
leaves = self.get_leaves(subforest_flag=subforest_flag,
coarsening_flag=coarsening_flag)
while len(leaves) > 0:
#
# For each LEAF
#
leaf = leaves.pop()
#
# Check if leaf is a support leaf
#
if subforest_flag is None:
is_support = leaf.is_marked('support')
else:
is_support = leaf.is_marked((subforest_flag, 'support'))
if is_support:
#
# Check whether its safe to delete the support cell
#
safe_to_coarsen = True
for half_edge in leaf.get_half_edges():
nb = leaf.get_neighbor(half_edge, flag=subforest_flag)
if nb is not None and nb.has_children(flag=subforest_flag):
#
# Neighbor has (flagged) children, coarsening will lead
# to an unbalanced tree
#
safe_to_coarsen = False
break
if safe_to_coarsen:
#
# Remove support by marking self with coarsening flag
#
self.mark(coarsening_flag)
leaves.append(leaf.get_parent())
'''
class TriCell(object):
"""
TriCell object
Attributes:
Methods:
"""
def __init__(self, vertices, parent=None):
"""
Inputs:
vertices: Vertex, list of three vertices (ordered counter-clockwise)
parent: QuadCell that contains triangle
"""
v = []
e = []
assert len(vertices) == 3, 'Must have exactly 3 vertices.'
for i in range(3):
#
# Define vertices and Half-Edges with minimun information
#
v.append(Vertex(vertices[i],2))
#
# Some edge on outerboundary
#
self.outer_component = e[0]
for i in range(3):
#
# Half edge originating from v[i]
#
v[i].incident_edge = e[i]
#
# Edges preceding/following e[i]
#
j = np.remainder(i+1,3)
e[i].next = e[j]
e[j].previous = e[i]
#
# Incident face
#
e[i].incident_face = self
self.parent_node = parent
self.__vertices = v
self.__edges = [
Edge(vertices[0], vertices[1], parent=self), \
Edge(vertices[1], vertices[2], parent=self), \
Edge(vertices[2], vertices[0], parent=self)
]
self.__element_no = None
self._flags = set()
def vertices(self,n):
return self.__vertices[n]
def edges(self):
return self.__edges
def area(self):
"""
Compute the area of the triangle
"""
v = self.__vertices
a = [v[1].coordinates()[i] - v[0].coordinates()[i] for i in range(2)]
b = [v[2].coordinates()[i] - v[0].coordinates()[i] for i in range(2)]
return 0.5*abs(a[0]*b[1]-a[1]*b[0])
def unit_normal(self, edge):
#p = ((y1-y0)/nnorm,(x0-x1)/nnorm)
pass
def number(self, num, overwrite=False):
"""
Assign a number to the triangle
"""
if self.__element_no == None or overwrite:
self.__element_no = num
else:
raise Warning('Element already numbered. Overwrite disabled.')
return
def get_neighbor(self, edge, tree):
"""
Find neighboring triangle across edge wrt a given tree
"""
pass
def mark(self, flag=None):
"""
Mark TriCell
Inputs:
flag: optional label used to mark cell
"""
if flag is None:
self._flags.add(True)
else:
self._flags.add(flag)
def unmark(self, flag=None, recursive=False):
"""
Remove label from TriCell
Inputs:
flag: label to be removed
recursive: bool, also unmark all subcells
"""
#
# Remove label from own list
#
if flag is None:
# No flag specified -> delete all
self._flags.clear()
else:
# Remove specified flag (if present)
if flag in self._flags: self._flags.remove(flag)
#
# Remove label from children if applicable
#
if recursive and self.has_children():
for child in self.children.values():
child.unmark(flag=flag, recursive=recursive)
def is_marked(self,flag=None):
"""
Check whether cell is marked
Input: flag, label for QuadCell: usually one of the following:
True (catchall), 'split' (split cell), 'count' (counting)
TODO: Possible to add/remove set? Useful?
"""
if flag is None:
# No flag -> check whether set is empty
if self._flags:
return True
else:
return False
else:
# Check wether given label is contained in cell's set
return flag in self._flags
'''
|
hvanwyk/quadmesh
|
src/mesh.py
|
Python
|
mit
| 263,904
|
# Generated by Django 2.0.10 on 2019-05-12 17:44
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('userprofile', '0020_auto_20190507_0150'),
('news', '0019_auto_20190512_1608'),
]
operations = [
migrations.AddField(
model_name='article',
name='author',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='userprofile.Profile'),
),
migrations.AddField(
model_name='event',
name='author',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='userprofile.Profile'),
),
]
|
hackerspace-ntnu/website
|
news/migrations/0020_auto_20190512_1744.py
|
Python
|
mit
| 780
|
# coding: utf-8
# # Simple Character-level Language Model using vanilla RNN
# 2017-04-21 jkang
# Python3.5
# TensorFlow1.0.1
#
# - <p style="color:red">Different window sizes were applied</p> e.g. n_window = 3 (three-character window)
# - input: 'hello_world_good_morning_see_you_hello_grea'
# - output: 'ello_world_good_morning_see_you_hello_great'
#
# ### Reference:
# - https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/rnn/python/ops/core_rnn_cell_impl.py
# - https://github.com/aymericdamien/TensorFlow-Examples
# - https://hunkim.github.io/ml/
#
# ### Comment:
# - 단어 단위가 아닌 문자 단위로 훈련함
# - 하나의 example만 훈련에 사용함
# : 하나의 example을 windowing하여 여러 샘플을 만들어 냄 (새로운 샘플의 크기는 window_size)
# - Cell의 종류는 BasicRNNCell을 사용함 (첫번째 Reference 참조)
# - dynamic_rnn방식 사용 (기존 tf.nn.rnn보다 더 시간-계산 효율적이라고 함)
# - AdamOptimizer를 사용
# In[1]:
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import random
# Input/Ouput data
char_raw = 'hello_world_good_morning_see_you_hello_great'
char_list = sorted(list(set(char_raw)))
char_to_idx = {c: i for i, c in enumerate(char_list)}
idx_to_char = {i: c for i, c in enumerate(char_list)}
char_data = [char_to_idx[c] for c in char_raw]
char_data_one_hot = tf.one_hot(char_data, depth=len(
char_list), on_value=1., off_value=0., axis=1, dtype=tf.float32)
char_input = char_data_one_hot[:-1, :] # 'hello_world_good_morning_see_you_hello_grea'
char_output = char_data_one_hot[1:, :] # 'ello_world_good_morning_see_you_hello_great'
with tf.Session() as sess:
char_input = char_input.eval()
char_output = char_output.eval()
# In[2]:
# Learning parameters
learning_rate = 0.001
max_iter = 1000
# Network Parameters
n_input_dim = char_input.shape[1]
n_input_len = char_input.shape[0]
n_output_dim = char_output.shape[1]
n_output_len = char_output.shape[0]
n_hidden = 100
n_window = 2 # number of characters in one window (like a mini-batch)
# TensorFlow graph
# (batch_size) x (time_step) x (input_dimension)
x_data = tf.placeholder(tf.float32, [None, None, n_input_dim])
# (batch_size) x (time_step) x (output_dimension)
y_data = tf.placeholder(tf.float32, [None, None, n_output_dim])
# Parameters
weights = {
'out': tf.Variable(tf.truncated_normal([n_hidden, n_output_dim]))
}
biases = {
'out': tf.Variable(tf.truncated_normal([n_output_dim]))
}
# In[3]:
def make_window_batch(x, y, window_size):
'''
This function will generate samples based on window_size from (x, y)
Although (x, y) is one example, it will create multiple examples with the length of window_size
x: (time_step) x (input_dim)
y: (time_step) x (output_dim)
x_out: (total_batch) x (batch_size) x (window_size) x (input_dim)
y_out: (total_batch) x (batch_size) x (window_size) x (output_dim)
total_batch x batch_size <= examples
'''
# (batch_size) x (window_size) x (dim)
# n_examples is calculated by sliding one character with window_size
n_examples = x.shape[0] - window_size + 1 # n_examples = batch_size
x_batch = np.empty((n_examples, window_size, x.shape[1]))
y_batch = np.empty((n_examples, window_size, y.shape[1]))
for i in range(n_examples):
x_batch[i, :, :] = x[i:i + window_size, :]
y_batch[i, :, :] = y[i:i + window_size, :]
z = list(zip(x_batch, y_batch))
random.shuffle(z)
x_batch, y_batch = zip(*z)
x_batch = np.array(x_batch)
y_batch = np.array(y_batch)
# (total_batch) x (batch_size) x (window_size) x (dim)
# total_batch is set to 1 (no mini-batch)
x_new = x_batch.reshape((n_examples, window_size, x_batch.shape[2]))
y_new = y_batch.reshape((n_examples, window_size, y_batch.shape[2]))
return x_new, y_new, n_examples
# In[4]:
def RNN(x, weights, biases):
cell = tf.contrib.rnn.BasicRNNCell(n_hidden) # Make RNNCell
outputs, states = tf.nn.dynamic_rnn(cell, x, time_major=False, dtype=tf.float32)
'''
**Notes on tf.nn.dynamic_rnn**
- 'x' can have shape (batch)x(time)x(input_dim), if time_major=False or
(time)x(batch)x(input_dim), if time_major=True
- 'outputs' can have the same shape as 'x'
(batch)x(time)x(input_dim), if time_major=False or
(time)x(batch)x(input_dim), if time_major=True
- 'states' is the final state, determined by batch and hidden_dim
'''
# outputs[-1] is outputs for the last example in the mini-batch
return tf.matmul(outputs[-1], weights['out']) + biases['out']
def softmax(x):
rowmax = np.max(x, axis=1)
x -= rowmax.reshape((x.shape[0] ,1)) # for numerical stability
x = np.exp(x)
sum_x = np.sum(x, axis=1).reshape((x.shape[0],1))
return x / sum_x
pred = RNN(x_data, weights, biases)
cost = tf.reduce_mean(tf.squared_difference(pred, y_data))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
# In[5]:
# Learning
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(max_iter):
loss = 0
x_batch, y_batch, n_examples = make_window_batch(char_input, char_output, n_window)
for ibatch in range(x_batch.shape[0]):
x_train = x_batch[ibatch, :, :].reshape((1,-1,n_input_dim))
y_train = y_batch[ibatch, :, :].reshape((1,-1,n_output_dim))
x_test = char_input.reshape((1, n_input_len, n_input_dim))
y_test = char_output.reshape((1, n_input_len, n_input_dim))
c, _ = sess.run([cost, optimizer], feed_dict={
x_data: x_train, y_data: y_train})
p = sess.run(pred, feed_dict={x_data: x_test, y_data: y_test})
loss += c
mean_mse = loss / n_examples
if i == (max_iter-1):
pred_act = softmax(p)
if (i+1) % 100 == 0:
pred_out = np.argmax(p, axis=1)
accuracy = np.sum(char_data[1:] == pred_out)/n_output_len*100
print('Epoch:{:>4}/{},'.format(i+1,max_iter),
'Cost:{:.4f},'.format(mean_mse),
'Acc:{:>.1f},'.format(accuracy),
'Predict:', ''.join([idx_to_char[i] for i in pred_out]))
# In[6]:
# Probability plot
fig, ax = plt.subplots()
fig.set_size_inches(15,20)
plt.title('Input Sequence', y=1.08, fontsize=20)
plt.xlabel('Probability of Next Character(y) Given Current One(x)'+
'\n[window_size={}, accuracy={:.1f}]'.format(n_window, accuracy),
fontsize=20, y=1.5)
plt.ylabel('Character List', fontsize=20)
plot = plt.imshow(pred_act.T, cmap=plt.get_cmap('plasma'))
fig.colorbar(plot, fraction=0.015, pad=0.04)
plt.xticks(np.arange(len(char_data)-1), list(char_raw)[:-1], fontsize=15)
plt.yticks(np.arange(len(char_list)), [idx_to_char[i] for i in range(len(char_list))], fontsize=15)
ax.xaxis.tick_top()
# Annotate
for i, idx in zip(range(len(pred_out)), pred_out):
annotation = idx_to_char[idx]
ax.annotate(annotation, xy=(i-0.2, idx+0.2), fontsize=12)
plt.show()
# f.savefig('result_' + idx + '.png')
|
jaekookang/useful_bits
|
Machine_Learning/RNN_LSTM/predict_character/rnn_char_windowing.py
|
Python
|
mit
| 7,262
|
from mindfeed.mindfeed import main
if __name__ == "__main__":
main()
|
zeckalpha/mindfeed
|
mindfeed/__init__.py
|
Python
|
mit
| 75
|
import falcon
import json
class QuoteResource:
def on_get(self, req, resp):
"""Handles GET requests"""
quote = {
'quote': 'I\'ve always been more interested in the future than in the past.',
'author': 'Grace Hopper'
}
resp.body = json.dumps(quote)
api = falcon.API()
api.add_route('/quote', QuoteResource())
|
lotrekagency/heimdall
|
server/server.py
|
Python
|
mit
| 371
|
from __future__ import absolute_import
from celery import shared_task
import os.path
import logging
import csv
from django.core.exceptions import ObjectDoesNotExist
from .RandomAuthorSet import RandomAuthorSet
from ..CitationFinder import CitationFinder, EmptyPublicationSetException
from scholarly_citation_finder import config
from scholarly_citation_finder.lib.file import create_dir
logger = logging.getLogger(__name__)
AUTHOR_SET_FILENAME = 'authors.csv'
@shared_task
def evaluation_create_author_set(name, setsize, num_min_publications, database='mag'):
'''
Task to create a random author set.
:param name: Evaluation name
:param setsize: Size of the site, i.e. the number of authors
:param num_min_publications: Minimum number of an author's publications
:param database: Database name
'''
dir = create_dir(os.path.join(config.EVALUATION_DIR, name))
author_set = RandomAuthorSet(database=database)
logger.info('{} -- create random author set of size {}'.format(name, setsize))
author_set.create(setsize=setsize, num_min_publications=num_min_publications)
logger.info('{} -- create random author set done'.format(name))
filename_author_set = author_set.store(os.path.join(dir, AUTHOR_SET_FILENAME))
#for author_info in e.get():
# author_id = author_info['author_id']
# pass
return filename_author_set
@shared_task
def evaluation_run(name, strategies):
'''
Evaluation run task.
:param name: Evaluation name
:param strategies: List of strategies
'''
evaluation_dir = os.path.join(config.EVALUATION_DIR, name)
with open(os.path.join(evaluation_dir, AUTHOR_SET_FILENAME)) as author_set_file:
reader = csv.DictReader(author_set_file)
for row in reader:
if len(row) == 3:
try:
strategies_result = evaluation_citations(author_id=row['author_id'], evaluation_name=name, strategies=strategies)
for strategy_result in strategies_result:
__store_evaluation_result(path=evaluation_dir,
filename=strategy_result['strategy_name'],
row=[row['author_id'],
row['num_citations'],
row['num_publications'],
strategy_result['num_inspected_publications'],
strategy_result['num_citations']])
except(EmptyPublicationSetException):
continue
except(ObjectDoesNotExist) as e:
raise e
return True
@shared_task
def evaluation_citations(author_id, strategies=None, evaluation_name='default'):
'''
Evaluation run view.
:param author_id: Author ID
:param strategies: List of strategies
:param evaluation_name: Evaluation name
:raise ObjectDoesNotExits:
:raise MultipleObjectsReturned:
:raise EmptyPublicationSetException:
'''
result = []
try:
citationfinder = CitationFinder(database='mag', evaluation=True)
author_id, length_publication_set = citationfinder.publication_set.set_by_author(id=int(author_id))
logger.info('{} author: set {} publications'.format(author_id, length_publication_set))
citationfinder.load_stored_citations()
for strategy in strategies:
strategy_name = citationfinder.run(strategy)
logger.info('{}: finished strategy "{}"'.format(author_id, strategy_name))
num_inspected_publications, num_citations = citationfinder.store_evaluation(path=create_dir(os.path.join(config.EVALUATION_DIR, evaluation_name, strategy_name)),
filename=author_id)
result.append({'strategy_name': strategy_name,
'num_inspected_publications': num_inspected_publications,
'num_citations': num_citations})
return result
except(ObjectDoesNotExist) as e:
raise e
except(EmptyPublicationSetException) as e:
raise e
def __store_evaluation_result(path, filename, row):
'''
Store evaluation result.
:param path: Path
:param filename: Name of the file
:param row: Row to append to the file
'''
filename = os.path.join(path, 'meta_{}.csv'.format(filename))
file_exists = os.path.isfile(filename)
try:
with open(filename, 'a+') as csvfile:
writer = csv.writer(csvfile)
if not file_exists:
writer.writerow(['author_id', 'author_num_citations', 'author_num_publications', 'num_inspected_publications', 'num_citations'])
writer.writerow(row)
return filename
except(IOError) as e:
raise e
|
citationfinder/scholarly_citation_finder
|
scholarly_citation_finder/apps/citation/evaluation/tasks.py
|
Python
|
mit
| 5,022
|
"""
helloworld.py
Author: Nils Kingston
Credit: none
Assignment:
Write and submit a Python program that prints the following:
Hello, world!
"""
print("Hello, world!")
|
nilskingston/Hello-world
|
helloworld.py
|
Python
|
mit
| 170
|
#!/usr/bin/env python3
# Fortwrangler is a tool that attempts to resolve issues with fortran lines over standard length.
# Global libraries
import sys
# Global variables
# Strings inserted for continuation
CONTINUATION_ENDLINE = "&\n"
CONTINUATION_STARTLINE = " &"
# Line length settings
MIN_LENGTH = len(CONTINUATION_STARTLINE) + len(CONTINUATION_ENDLINE) + 1
FIXED_LINE_LENGTH = 80 # We don't actually do fixed format files, but I prefer 80 col anyway.
FREE_LINE_LENGTH = 132
DEFAULT_LINE_LENGTH = FREE_LINE_LENGTH
# I/O settings
STDERR = sys.stderr
STDOUT = sys.stdout
# We can't use Python's string splitter as we want to handle string literals properly.
def string_split(s, sep=" "):
inquotes=False
retlist = []
token = ""
for character in s.strip():
if character == sep and not inquotes:
if not (token == ""):
token = token + sep
retlist.append(token)
token = ""
else:
token = token + character
elif character == '"' and not inquotes:
inquotes = True
token = token + character
elif character == '"' and inquotes:
inquotes = False
token = token + character
else:
token = token + character
if not (token == ""):
retlist.append(token)
return retlist
# Fix a given file.
def force_fix_file(filename, maxlength=DEFAULT_LINE_LENGTH, output=STDOUT):
with open(filename) as infile:
for line in infile:
if len(line) > maxlength + 1:
tempstr=line[:(len(line) - (len(line.lstrip())-1)-1)]
tokens = string_split(line)
index = 0
for t in tokens:
if t == "!":
# Comments can be longer because the compiler just ignores them.
tempstr = tempstr + " ".join(tokens[index:len(tokens)])
break
else:
if (len(tempstr + t + " " + CONTINUATION_ENDLINE)) < maxlength + 1:
tempstr = tempstr + t + " "
else:
if (t.startswith('"') and t.endswith('"')):
tempstr = tempstr + t + " "
while (len(tempstr) > maxlength + 1):
outstr = tempstr[:(maxlength-1)] + CONTINUATION_ENDLINE
output.write(outstr)
tempstr = CONTINUATION_STARTLINE + tempstr[(maxlength-1):]
output.write(tempstr)
tempstr=""
else:
output.write(tempstr + " " + CONTINUATION_ENDLINE)
tempstr=CONTINUATION_STARTLINE + " " + t + " "
index += 1
output.write(tempstr + "\n")
else:
output.write(line)
# Only fix files if the violate the length rules!
def fix_file(filename, maxlength=DEFAULT_LINE_LENGTH, output=STDOUT):
if not check_file(filename):
force_fix_file(filename, maxlength, output)
else:
STDERR.write(filename + " not over line length, not modifying\n")
# Check to see if a file has lines longer than allowed, optionally report.
def check_file(filename, maxlength=DEFAULT_LINE_LENGTH, report=None):
overlengthlines = {}
counter = 0
with open(filename) as f:
for line in f:
counter += 1
if (len(line)) > maxlength + 1: # New lines count in Python line length.
overlengthlines[counter] = len(line)
if report != None:
report.write(filename + ": " + str(len(overlengthlines)) + "\n")
for a in sorted(overlengthlines.keys()):
report.write(str(a) + ": " + str(overlengthlines[a]) + "\n")
return len(overlengthlines) == 0
# Our main procedure.
# Arguments at the command-line:
# -o <file> - write out to file instead of stdout
# -i <extension> - do in place
# -c - check only
# -w <number> - set line length
def main():
import argparse
#check_file("example.f90", report=STDERR)
#fix_file("example.f")
maxlength = DEFAULT_LINE_LENGTH
output = STDOUT
parser = argparse.ArgumentParser(description="Fix free format Fortran files with invalid line lengths.")
parser.add_argument("-c", action="store_true", help="Check only.")
parser.add_argument("-i", metavar="ext", type=str, help="Do in place, back up copy with extension specified.")
parser.add_argument("-w", metavar="linelength", type=int, help="Custom line length.")
parser.add_argument("-o", metavar="outputfilename", type=str, help="Output to a file instead of STDOUT.")
parser.add_argument("files", metavar="file", type=str, nargs="+",help="Files to fix.")
args=parser.parse_args()
if args.w != None:
if args.w >= MIN_LENGTH:
maxlength = args.w
else:
STDERR.write("Error - you have specified a length [" + str(args.w) + "] smaller than the minimum possible ["+ str(MIN_LENGTH) + "]\n")
sys.exit(2)
if args.o and args.i:
STDERR.write("Error - you cannot both write output to a separate file and write it in place.\n")
sys.exit(1)
else:
if args.o != None:
outfile = open(args.o, 'w')
output = outfile
if args.c:
for a in args.files:
check_file(a, maxlength=maxlength, report=output)
elif args.i != None:
import os
for a in args.files:
if not check_file(a):
STDERR.write("Fixing file: " + a + "\n")
os.rename(a, a + args.i)
inplacefile = open(a, 'w')
force_fix_file(a + args.i, maxlength=maxlength, output=inplacefile)
inplacefile.close()
else:
for a in args.files:
fix_file(a, maxlength=maxlength, output=output)
if args.o != None:
outfile.close()
if __name__ == "__main__":
main()
|
owainkenwayucl/utils
|
src/fortwrangler.py
|
Python
|
mit
| 6,251
|
import socket
import pytest
import mock
from pygelf import GelfTcpHandler, GelfUdpHandler, GelfHttpHandler, GelfTlsHandler, GelfHttpsHandler
from tests.helper import logger, get_unique_message, log_warning, log_exception
SYSLOG_LEVEL_ERROR = 3
SYSLOG_LEVEL_WARNING = 4
@pytest.fixture(params=[
GelfTcpHandler(host='127.0.0.1', port=12201),
GelfUdpHandler(host='127.0.0.1', port=12202),
GelfUdpHandler(host='127.0.0.1', port=12202, compress=False),
GelfHttpHandler(host='127.0.0.1', port=12203),
GelfHttpHandler(host='127.0.0.1', port=12203, compress=False),
GelfTlsHandler(host='127.0.0.1', port=12204),
GelfHttpsHandler(host='127.0.0.1', port=12205, validate=False),
GelfHttpsHandler(host='localhost', port=12205, validate=True, ca_certs='tests/config/cert.pem'),
GelfTlsHandler(host='127.0.0.1', port=12204, validate=True, ca_certs='tests/config/cert.pem'),
])
def handler(request):
return request.param
def test_simple_message(logger):
message = get_unique_message()
graylog_response = log_warning(logger, message)
assert graylog_response['message'] == message
assert graylog_response['level'] == SYSLOG_LEVEL_WARNING
assert 'full_message' not in graylog_response
assert 'file' not in graylog_response
assert 'module' not in graylog_response
assert 'func' not in graylog_response
assert 'logger_name' not in graylog_response
assert 'line' not in graylog_response
def test_formatted_message(logger):
message = get_unique_message()
template = message + '_%s_%s'
graylog_response = log_warning(logger, template, args=('hello', 'gelf'))
assert graylog_response['message'] == message + '_hello_gelf'
assert graylog_response['level'] == SYSLOG_LEVEL_WARNING
assert 'full_message' not in graylog_response
def test_full_message(logger):
message = get_unique_message()
try:
raise ValueError(message)
except ValueError as e:
graylog_response = log_exception(logger, message, e)
assert graylog_response['message'] == message
assert graylog_response['level'] == SYSLOG_LEVEL_ERROR
assert message in graylog_response['full_message']
assert 'Traceback (most recent call last)' in graylog_response['full_message']
assert 'ValueError: ' in graylog_response['full_message']
assert 'file' not in graylog_response
assert 'module' not in graylog_response
assert 'func' not in graylog_response
assert 'logger_name' not in graylog_response
assert 'line' not in graylog_response
def test_source(logger):
original_source = socket.gethostname()
with mock.patch('socket.gethostname', return_value='different_domain'):
message = get_unique_message()
graylog_response = log_warning(logger, message)
assert graylog_response['source'] == original_source
|
keeprocking/pygelf
|
tests/test_common_fields.py
|
Python
|
mit
| 2,884
|
#!/usr/bin/env python
from setuptools import setup, find_packages
try:
from pyqt_distutils.build_ui import build_ui
cmdclass = {'build_ui': build_ui}
except ImportError:
cmdclass = {}
setup(
name='foo',
version='0.1',
packages=find_packages(),
license='MIT',
author='Colin Duquesnoy',
author_email='colin.duquesnoy@gmail.com',
description='Example of use of pyqt',
cmdclass=cmdclass,
)
|
ColinDuquesnoy/pyqt_distutils
|
example/PySide/setup.py
|
Python
|
mit
| 432
|
from __future__ import absolute_import, unicode_literals, division
import hashlib
import hmac
import time
from quadriga.exceptions import RequestError
class RestClient(object):
"""REST client using HMAC SHA256 authentication.
:param url: QuadrigaCX URL.
:type url: str | unicode
:param api_key: QuadrigaCX API key.
:type api_key: str | unicode
:param api_secret: QuadrigaCX API secret.
:type api_secret: str | unicode
:param client_id: QuadrigaCX client ID (number used for user login).
:type client_id: str | unicode | int
:param timeout: Number of seconds to wait for QuadrigaCX to respond to an
API request.
:type timeout: int | float
:param session: User-defined requests.Session object.
:type session: requests.Session
"""
http_success_status_codes = {200, 201, 202}
def __init__(self, url, api_key, api_secret, client_id, timeout, session):
self._url = url
self._api_key = str(api_key)
self._hmac_key = str(api_secret).encode('utf-8')
self._client_id = str(client_id)
self._timeout = timeout
self._session = session
def _handle_response(self, resp):
"""Handle the response from QuadrigaCX.
:param resp: Response from QuadrigaCX.
:type resp: requests.models.Response
:return: Response body.
:rtype: dict
:raise quadriga.exceptions.RequestError: If HTTP OK was not returned.
"""
http_code = resp.status_code
if http_code not in self.http_success_status_codes:
raise RequestError(
response=resp,
message='[HTTP {}] {}'.format(http_code, resp.reason)
)
try:
body = resp.json()
except ValueError:
raise RequestError(
response=resp,
message='[HTTP {}] response body: {}'.format(
http_code,
resp.text
)
)
else:
if 'error' in body:
error_code = body['error'].get('code', '?')
raise RequestError(
response=resp,
message='[HTTP {}][ERR {}] {}'.format(
resp.status_code,
error_code,
body['error'].get('message', 'no error message')
),
error_code=error_code
)
return body
def get(self, endpoint, params=None):
"""Send an HTTP GET request to QuadrigaCX.
:param endpoint: API endpoint.
:type endpoint: str | unicode
:param params: URL parameters.
:type params: dict
:return: Response body from QuadrigaCX.
:rtype: dict
:raise quadriga.exceptions.RequestError: If HTTP OK was not returned.
"""
response = self._session.get(
url=self._url + endpoint,
params=params,
timeout=self._timeout
)
return self._handle_response(response)
def post(self, endpoint, payload=None):
"""Send an HTTP POST request to QuadrigaCX.
:param endpoint: API endpoint.
:type endpoint: str | unicode
:param payload: Request payload.
:type payload: dict
:return: Response body from QuadrigaCX.
:rtype: dict
:raise quadriga.exceptions.RequestError: If HTTP OK was not returned.
"""
nonce = int(time.time() * 10000)
hmac_msg = str(nonce) + self._client_id + self._api_key
signature = hmac.new(
key=self._hmac_key,
msg=hmac_msg.encode('utf-8'),
digestmod=hashlib.sha256
).hexdigest()
if payload is None:
payload = {}
payload['key'] = self._api_key
payload['nonce'] = nonce
payload['signature'] = signature
response = self._session.post(
url=self._url + endpoint,
json=payload,
timeout=self._timeout
)
return self._handle_response(response)
|
joowani/quadriga
|
quadriga/rest.py
|
Python
|
mit
| 4,123
|
# This license covers everything within this project, except for a few pieces
# of code that we either did not write ourselves or which we derived from code
# that we did not write ourselves. These few pieces have their license specified
# in a header, or by a file called LICENSE.txt, which will explain exactly what
# it covers. The few relevant pieces of code are all contained inside these
# directories:
#
# - pwnlib/constants/
# - pwnlib/data/
#
#
# Copyright (c) 2015 Gallopsled and Zach Riggle
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import os
import stat
def which(name, all = False):
"""which(name, flags = os.X_OK, all = False) -> str or str set
Works as the system command ``which``; searches $PATH for ``name`` and
returns a full path if found.
If `all` is :const:`True` the set of all found locations is returned, else
the first occurrence or :const:`None` is returned.
Arguments:
`name` (str): The file to search for.
`all` (bool): Whether to return all locations where `name` was found.
Returns:
If `all` is :const:`True` the set of all locations where `name` was found,
else the first location or :const:`None` if not found.
Example:
>>> which('sh')
'/bin/sh'
"""
# If name is a path, do not attempt to resolve it.
if os.path.sep in name:
return name
isroot = os.getuid() == 0
out = set()
try:
path = os.environ['PATH']
except KeyError:
log.exception('Environment variable $PATH is not set')
for p in path.split(os.pathsep):
p = os.path.join(p, name)
if os.access(p, os.X_OK):
st = os.stat(p)
if not stat.S_ISREG(st.st_mode):
continue
# work around this issue: https://bugs.python.org/issue9311
if isroot and not \
st.st_mode & (stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH):
continue
if all:
out.add(p)
else:
return p
if all:
return out
else:
return None
|
pwndbg/pwndbg
|
pwndbg/which.py
|
Python
|
mit
| 3,111
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import time
#import struct
from pycket import impersonators as imp
from pycket import values, values_string
from pycket.cont import continuation, loop_label, call_cont
from pycket.arity import Arity
from pycket import values_parameter
from pycket import values_struct
from pycket import values_regex
from pycket import vector as values_vector
from pycket.error import SchemeException, UserException
from pycket.foreign import W_CPointer, W_CType
from pycket.hash.equal import W_EqualHashTable
from pycket.hash.base import W_HashTable
from pycket.hash.simple import (W_EqImmutableHashTable, W_EqvImmutableHashTable, W_EqMutableHashTable, W_EqvMutableHashTable, make_simple_immutable_table)
from pycket.prims.expose import (unsafe, default, expose, expose_val, prim_env,
procedure, define_nyi, subclass_unsafe, make_procedure)
from pycket.prims.primitive_tables import *
from pycket.prims import string
from pycket.racket_paths import racket_sys_paths
from pycket.env import w_global_config
from rpython.rlib import jit, objectmodel, unroll, rgc
from rpython.rlib.rsre import rsre_re as re
# import for side effects
from pycket.prims import control
from pycket.prims import continuation_marks
from pycket.prims import char
from pycket.prims import box
from pycket.prims import equal as eq_prims
from pycket.prims import foreign
from pycket.prims import hash
from pycket.prims import impersonator
from pycket.prims import input_output
from pycket.prims import logging
from pycket.prims import numeric
from pycket.prims import parameter
from pycket.prims import random
from pycket.prims import regexp
from pycket.prims import string
from pycket.prims import struct_structinfo
from pycket.prims import undefined
from pycket.prims import vector
from rpython.rlib import jit
def make_pred(name, cls):
@expose(name, [values.W_Object], simple=True)
def predicate_(a):
return values.W_Bool.make(isinstance(a, cls))
predicate_.__name__ += cls.__name__
def make_dummy_char_pred(name):
@expose(name, [values.W_Character], simple=True)
def predicate_(a):
return values.w_false
predicate_.__name__ += name
def make_pred_eq(name, val):
typ = type(val)
@expose(name, [values.W_Object], simple=True)
def pred_eq(a):
return values.W_Bool.make(a is val)
for args in [
("output-port?", values.W_OutputPort),
("pair?", values.W_Cons),
("mpair?", values.W_MCons),
("number?", values.W_Number),
("complex?", values.W_Number),
("fixnum?", values.W_Fixnum),
("flonum?", values.W_Flonum),
("vector?", values.W_MVector),
("string?", values_string.W_String),
("symbol?", values.W_Symbol),
("boolean?", values.W_Bool),
("inspector?", values_struct.W_StructInspector),
("struct-type?", values_struct.W_StructType),
("struct-constructor-procedure?", values_struct.W_StructConstructor),
("struct-predicate-procedure?", values_struct.W_StructPredicate),
("struct-type-property?", values_struct.W_StructProperty),
("struct-type-property-accessor-procedure?",
values_struct.W_StructPropertyAccessor),
("box?", values.W_Box),
("variable-reference?", values.W_VariableReference),
("thread-cell?", values.W_ThreadCell),
("thread-cell-values?", values.W_ThreadCellValues),
("semaphore?", values.W_Semaphore),
("semaphore-peek-evt?", values.W_SemaphorePeekEvt),
("path?", values.W_Path),
("bytes?", values.W_Bytes),
("pseudo-random-generator?", values.W_PseudoRandomGenerator),
("char?", values.W_Character),
("continuation?", values.W_Continuation),
("continuation-mark-set?", values.W_ContinuationMarkSet),
("continuation-mark-key?", values.W_ContinuationMarkKey),
("primitive?", values.W_Prim),
("keyword?", values.W_Keyword),
("weak-box?", values.W_WeakBox),
("ephemeron?", values.W_Ephemeron),
("placeholder?", values.W_Placeholder),
("hash-placeholder?", values.W_HashTablePlaceholder),
("module-path-index?", values.W_ModulePathIndex),
("resolved-module-path?", values.W_ResolvedModulePath),
("impersonator-property-accessor-procedure?",
imp.W_ImpPropertyAccessor),
("impersonator-property?", imp.W_ImpPropertyDescriptor),
("parameter?", values_parameter.W_BaseParameter),
("parameterization?", values_parameter.W_Parameterization),
("hash?", W_HashTable),
("cpointer?", W_CPointer),
("ctype?", W_CType),
("continuation-prompt-tag?", values.W_ContinuationPromptTag),
("logger?", values.W_Logger),
("log-receiver?", values.W_LogReciever),
("evt?", values.W_Evt),
("unquoted-printing-string?", values.W_UnquotedPrintingString),
("port?", values.W_Port),
("security-guard?", values.W_SecurityGuard),
# FIXME
("will-executor?", values.W_WillExecutor),
("bytes-converter?", values.W_Impossible),
("fsemaphore?", values.W_Impossible),
("thread-group?", values.W_Impossible),
("udp?", values.W_Impossible),
("extflonum?", values.W_ExtFlonum),
("custodian-box?", values.W_Impossible),
("custodian?", values.W_Impossible),
("future?", values.W_Impossible),
]:
make_pred(*args)
for args in [
("void?", values.w_void),
("false?", values.w_false),
("null?", values.w_null),
]:
make_pred_eq(*args)
@expose("hash-weak?", [values.W_Object], simple=True)
def hash_weah_huh(obj):
# FIXME
return values.w_false
@expose("hash-strong?", [values.W_Object], simple=True)
def hash_strong_huh(obj):
# FIXME: /pypy/rpython/rlib/rweakref.py
return values.W_Bool.make(isinstance(obj, W_HashTable))
@expose("hash-ephemeron?", [values.W_Object], simple=True)
def hash_strong_huh(obj):
# FIXME
return values.w_false
@expose("hash-equal?", [values.W_Object], simple=True)
def hash_eq(obj):
inner = obj
if isinstance(obj, imp.W_ImpHashTable) or isinstance(obj, imp.W_ChpHashTable):
inner = obj.get_proxied()
return values.W_Bool.make(isinstance(inner, W_EqualHashTable))
@expose("hash-eq?", [values.W_Object], simple=True)
def hash_eq(obj):
inner = obj
if isinstance(obj, imp.W_ImpHashTable) or isinstance(obj, imp.W_ChpHashTable):
inner = obj.get_proxied()
eq_mutable = isinstance(inner, W_EqMutableHashTable)
eq_immutable = isinstance(inner, W_EqImmutableHashTable)
return values.W_Bool.make(eq_mutable or eq_immutable)
@expose("hash-eqv?", [values.W_Object], simple=True)
def hash_eqv(obj):
inner = obj
if isinstance(obj, imp.W_ImpHashTable) or isinstance(obj, imp.W_ChpHashTable):
inner = obj.get_proxied()
eqv_mutable = isinstance(inner, W_EqvMutableHashTable)
eqv_immutable = isinstance(inner, W_EqvImmutableHashTable)
return values.W_Bool.make(eqv_mutable or eqv_immutable)
def struct_port_huh(w_struct):
w_in, w_out = struct_port_prop_huh(w_struct)
return (w_in is not None) or (w_out is not None)
def struct_port_prop_huh(w_struct):
w_type = w_struct.struct_type()
in_property = out_property = None
for property in w_type.properties:
w_property, w_value = property
if w_property is values_struct.w_prop_input_port:
in_property = w_value
elif w_property is values_struct.w_prop_output_port:
out_property = w_value
return in_property, out_property
def struct_input_port_huh(w_struct):
w_in, w_out = struct_port_prop_huh(w_struct)
return w_in is not None
def struct_output_port_huh(w_struct):
w_in, w_out = struct_port_prop_huh(w_struct)
return w_out is not None
@expose("input-port?", [values.W_Object], simple=True)
def input_port_huh(a):
if isinstance(a, values.W_InputPort):
return values.w_true
elif isinstance(a, values_struct.W_Struct):
if struct_input_port_huh(a):
return values.w_true
return values.w_false
@expose("datum-intern-literal", [values.W_Object])
def datum_intern_literal(v):
return v
@expose("byte?", [values.W_Object])
def byte_huh(val):
if isinstance(val, values.W_Fixnum):
return values.W_Bool.make(0 <= val.value <= 255)
return values.w_false
@expose("regexp?", [values.W_Object])
def regexp_huh(r):
if isinstance(r, values_regex.W_Regexp) or isinstance(r, values_regex.W_PRegexp):
return values.w_true
return values.w_false
@expose("pregexp?", [values.W_Object])
def pregexp_huh(r):
if isinstance(r, values_regex.W_PRegexp):
return values.w_true
return values.w_false
@expose("byte-regexp?", [values.W_Object])
def byte_regexp_huh(r):
if isinstance(r, values_regex.W_ByteRegexp) or isinstance(r, values_regex.W_BytePRegexp):
return values.w_true
return values.w_false
@expose("byte-pregexp?", [values.W_Object])
def byte_pregexp_huh(r):
if isinstance(r, values_regex.W_BytePRegexp):
return values.w_true
return values.w_false
@expose("true-object?", [values.W_Object])
def true_object_huh(val):
if val is values.w_true:
return values.w_true
return values.w_false
@expose("procedure?", [values.W_Object])
def procedurep(n):
return values.W_Bool.make(n.iscallable())
@expose("syntax-original?", [values.W_Object], only_old=True)
def syntax_original(v):
return values.w_false
@expose("syntax-tainted?", [values.W_Object], only_old=True)
def syntax_tainted(v):
return values.w_false
@expose("syntax-source-module", [values.W_Object, default(values.W_Object, values.w_false)], only_old=True)
def syntax_source_module(stx, src):
# XXX Obviously not correct
return values.W_ResolvedModulePath(values.W_Symbol.make("fake symbol"))
@expose("srcloc->string", [values.W_Object])
def srcloc_to_string(obj):
return values.w_false
expose_val("null", values.w_null)
expose_val("true", values.w_true)
expose_val("false", values.w_false)
expose_val("break-enabled-key", values.break_enabled_key)
expose_val("exception-handler-key", values.exn_handler_key)
# FIXME: need stronger guards for all of these
for name in ["prop:evt",
"prop:impersonator-of",
"prop:method-arity-error"]:
expose_val(name, values_struct.W_StructProperty(
values.W_Symbol.make(name), values.w_false))
for name in ["exn:srclocs",
"custom-print-quotable"]:
prop = values_struct.W_StructProperty(values.W_Symbol.make(name), values.w_false)
expose_val("prop:"+name, prop)
expose_val(name+"?", values_struct.W_StructPropertyPredicate(prop))
expose_val(name+"-accessor", values_struct.W_StructPropertyAccessor(prop))
expose_val("prop:authentic", values_struct.w_prop_authentic)
expose_val("prop:sealed", values_struct.w_prop_sealed)
expose_val("prop:object-name", values_struct.w_prop_object_name)
expose_val("prop:procedure", values_struct.w_prop_procedure)
expose_val("prop:checked-procedure", values_struct.w_prop_checked_procedure)
expose_val("prop:arity-string", values_struct.w_prop_arity_string)
expose_val("prop:incomplete-arity", values_struct.w_prop_incomplete_arity)
expose_val("prop:custom-write", values_struct.w_prop_custom_write)
expose_val("prop:equal+hash", values_struct.w_prop_equal_hash)
expose_val("prop:chaperone-unsafe-undefined",
values_struct.w_prop_chaperone_unsafe_undefined)
expose_val("prop:set!-transformer", values_struct.w_prop_set_bang_transformer, only_old=True)
expose_val("prop:rename-transformer", values_struct.w_prop_rename_transformer, only_old=True)
expose_val("prop:expansion-contexts", values_struct.w_prop_expansion_contexts, only_old=True)
expose_val("prop:output-port", values_struct.w_prop_output_port)
expose_val("prop:input-port", values_struct.w_prop_input_port)
@continuation
def check_cont(proc, v, v1, v2, app, env, cont, _vals):
from pycket.interpreter import check_one_val, return_value
val = check_one_val(_vals)
if val is not values.w_false:
return v.ref_with_extra_info(1, app, env, cont)
return proc.call([v, v1, v2], env, cont)
@continuation
def receive_first_field(proc, v, v1, v2, app, env, cont, _vals):
from pycket.interpreter import check_one_val
first_field = check_one_val(_vals)
return first_field.call([v1, v2], env,
check_cont(proc, v, v1, v2, app, env, cont))
@expose("checked-procedure-check-and-extract",
[values_struct.W_StructType, values.W_Object, procedure,
values.W_Object, values.W_Object], simple=False, extra_info=True)
@jit.unroll_safe
def do_checked_procedure_check_and_extract(type, v, proc, v1, v2, env, cont, calling_app):
from pycket.interpreter import check_one_val, return_value
if isinstance(v, values_struct.W_RootStruct):
struct_type = jit.promote(v.struct_type())
if type.has_subtype(struct_type):
offset = struct_type.get_offset(type)
assert offset != -1
return v.ref_with_extra_info(offset, calling_app, env,
receive_first_field(proc, v, v1, v2, calling_app, env, cont))
return proc.call([v, v1, v2], env, cont)
################################################################
# printing
@expose("system-library-subpath", [default(values.W_Object, values.w_false)])
def sys_lib_subpath(mode):
# Pycket is 64bit only a.t.m.
if w_system_sym == w_windows_sym:
return values.W_Path(r"win32\\x86_64")
elif w_system_sym == w_macosx_sym:
return values.W_Path("x86_64-macosx")
else:
# FIXME: pretend all unicies are linux for now
return values.W_Path("x86_64-linux")
@expose("primitive-closure?", [values.W_Object])
def prim_clos(v):
return values.w_false
################################################################
# built-in struct types
def define_struct(name, w_super=values.w_null, fields=[]):
immutables = range(len(fields))
symname = values.W_Symbol.make(name)
w_struct_type = values_struct.W_StructType.make_simple(
w_name=symname,
w_super_type=w_super,
init_field_count=len(fields),
auto_field_count=0,
immutables=immutables)
expose_val("struct:" + name, w_struct_type)
expose_val(name, w_struct_type.constructor)
# this is almost always also provided
expose_val("make-" + name, w_struct_type.constructor)
expose_val(name + "?", w_struct_type.predicate)
struct_acc = w_struct_type.accessor
for field, field_name in enumerate(fields):
w_name = values.W_Symbol.make(field_name)
acc = values_struct.W_StructFieldAccessor(struct_acc, field, w_name)
expose_val(name + "-" + field_name, acc)
return w_struct_type
exn = \
define_struct("exn", values.w_null, ["message", "continuation-marks"])
exn_fail = \
define_struct("exn:fail", exn)
exn_fail_contract = \
define_struct("exn:fail:contract", exn_fail)
exn_fail_contract_arity = \
define_struct("exn:fail:contract:arity", exn_fail)
exn_fail_contract_divide_by_zero = \
define_struct("exn:fail:contract:divide-by-zero", exn_fail)
exn_fail_contract_non_fixnum_result = \
define_struct("exn:fail:contract:non-fixnum-result", exn_fail)
exn_fail_contract_continuation = \
define_struct("exn:fail:contract:continuation", exn_fail)
exn_fail_contract_variable = \
define_struct("exn:fail:contract:variable", exn_fail, ["id"])
exn_fail_syntax = \
define_struct("exn:fail:syntax", exn_fail, ["exprs"])
exn_fail_syntax_unbound = \
define_struct("exn:fail:syntax:unbound", exn_fail_syntax)
exn_fail_syntax_missing_module = \
define_struct("exn:fail:syntax:missing-module", exn_fail_syntax, ["path"])
exn_fail_read = \
define_struct("exn:fail:read", exn_fail, ["srclocs"])
exn_fail_read_eof = \
define_struct("exn:fail:read:eof", exn_fail_read)
exn_fail_read_non_char = \
define_struct("exn:fail:read:non-char", exn_fail_read)
exn_fail_fs = \
define_struct("exn:fail:filesystem", exn_fail)
exn_fail_fs_exists = \
define_struct("exn:fail:filesystem:exists", exn_fail_fs)
exn_fail_fs_version = \
define_struct("exn:fail:filesystem:version", exn_fail_fs)
exn_fail_fs_errno = \
define_struct("exn:fail:filesystem:errno", exn_fail_fs, ["errno"])
exn_fail_fs_missing_module = \
define_struct("exn:fail:filesystem:missing-module", exn_fail_fs, ["path"])
exn_fail_network = \
define_struct("exn:fail:network", exn_fail)
exn_fail_network_errno = \
define_struct("exn:fail:network:errno", exn_fail_network, ["errno"])
exn_fail_out_of_memory = \
define_struct("exn:fail:out-of-memory", exn_fail)
exn_fail_unsupported = \
define_struct("exn:fail:unsupported", exn_fail)
exn_fail_user = \
define_struct("exn:fail:user", exn_fail)
exn_break = \
define_struct("exn:break", exn)
exn_break_hang_up = \
define_struct("exn:break:hang-up", exn_break)
exn_break_terminate = \
define_struct("exn:break:terminate", exn_break)
srcloc = define_struct("srcloc",
fields=["source", "line", "column", "position", "span"])
date_struct = define_struct("date", fields=["second",
"minute",
"hour",
"day",
"month",
"year",
"week-day",
"year-day",
"dst?"
"time-zone-offset"])
date_star_struct = define_struct("date*", date_struct,
fields=["nanosecond", "time-zone-name"])
arity_at_least = define_struct("arity-at-least", values.w_null, ["value"])
for args in [ ("char-symbolic?",),
("char-graphic?",),
("char-blank?",),
("char-iso-control?",),
("char-punctuation?",),
("char-upper-case?",),
("char-title-case?",),
("char-lower-case?",),
]:
make_dummy_char_pred(*args)
for args in [ ("subprocess?",),
("file-stream-port?",),
("terminal-port?",),
("byte-ready?",),
("char-ready?",),
("handle-evt?",),
("thread?",),
("thread-running?",),
("thread-dead?",),
("semaphore-try-wait?",),
("link-exists?",),
("chaperone-channel",),
("impersonate-channel",),
]:
define_nyi(*args)
@expose("unsafe-make-place-local", [values.W_Object])
def unsafe_make_place_local(v):
return values.W_MBox(v)
@expose("unsafe-place-local-ref", [values.W_MBox], simple=False)
def unsafe_make_place_local(p, env, cont):
return p.unbox(env, cont)
@expose("unsafe-place-local-set!", [values.W_MBox, values.W_Object], simple=False)
def unsafe_make_place_local(p, v, env, cont):
return p.set_box(v, env, cont)
@expose("set!-transformer?", [values.W_Object], only_old=True)
def set_bang_transformer(v):
if isinstance(v, values.W_AssignmentTransformer):
return values.w_true
elif isinstance(v, values_struct.W_RootStruct):
w_property = v.struct_type().read_property(
values_struct.w_prop_set_bang_transformer)
return values.W_Bool.make(w_property is not None)
else:
return values.w_false
@expose("object-name", [values.W_Object])
def object_name(v):
if isinstance(v, values.W_Prim):
return v.name
elif isinstance(v, values_regex.W_AnyRegexp) or isinstance(v, values.W_Port):
return v.obj_name()
return values_string.W_String.fromstr_utf8(v.tostring()) # XXX really?
@expose("find-main-config", [])
def find_main_config():
return values.w_false
@expose("version", [])
def version():
from pycket.env import w_version
version = w_version.get_version()
if version == '':
version = "old-pycket"
return values_string.W_String.fromascii("unknown version" if version is None else version)
@continuation
def sem_post_cont(sem, env, cont, vals):
sem.post()
from pycket.interpreter import return_multi_vals
return return_multi_vals(vals, env, cont)
@expose("call-with-semaphore", simple=False, extra_info=True)
def call_with_sem(args, env, cont, extra_call_info):
if len(args) < 2:
raise SchemeException("error call-with-semaphore")
sem = args[0]
f = args[1]
if len(args) == 2:
new_args = []
fail = None
else:
new_args = args[3:]
if args[2] is values.w_false:
fail = None
else:
fail = args[2]
assert isinstance(sem, values.W_Semaphore)
assert f.iscallable()
sem.wait()
return f.call_with_extra_info(new_args, env, sem_post_cont(sem, env, cont), extra_call_info)
c_thread = values.W_Thread()
@expose("current-thread", [])
def current_thread():
return c_thread
# FIXME : implementation
@expose("current-memory-use", [default(values.W_Object, values.w_false)])
def current_memory_use(mode):
# mode is : (or/c #f 'cumulative custodian?)
return values.W_Fixnum(1)
@expose("semaphore-post", [values.W_Semaphore])
def sem_post(s):
s.post()
@expose("semaphore-wait", [values.W_Semaphore])
def sem_wait(s):
s.wait()
@expose("procedure-rename", [procedure, values.W_Object])
def procedure_rename(p, n):
return p
@expose("procedure->method", [procedure])
def procedure_to_method(proc):
# TODO provide a real implementation
return proc
@jit.unroll_safe
def make_arity_list(arity, extra=None):
jit.promote(arity)
acc = values.w_null
if extra is not None:
acc = values.W_Cons.make(extra, acc)
for item in reversed(arity.arity_list):
i = values.W_Fixnum(item)
acc = values.W_Cons.make(i, acc)
return acc
@continuation
def proc_arity_cont(arity, env, cont, _vals):
from pycket.interpreter import check_one_val, return_value
val = check_one_val(_vals)
if not arity.arity_list:
return return_value(val, env, cont)
result = make_arity_list(arity, val)
return return_value(result, env, cont)
def arity_to_value(arity, env, cont):
from pycket.interpreter import return_value
if arity.at_least != -1:
val = [values.W_Fixnum(arity.at_least)]
constructor = arity_at_least.constructor
return constructor.call(val, env, proc_arity_cont(arity, env, cont))
if len(arity.arity_list) == 1:
item = values.W_Fixnum(arity.arity_list[0])
return return_value(item, env, cont)
result = make_arity_list(arity)
return return_value(result, env, cont)
@expose("procedure-arity", [procedure], simple=False)
@jit.unroll_safe
def do_procedure_arity(proc, env, cont):
arity = proc.get_arity()
return arity_to_value(arity, env, cont)
@expose("procedure-arity-mask", [procedure], simple=True)
@jit.unroll_safe
def do_procedure_arity_mask(proc):
arity = proc.get_arity()
return arity.arity_bits()
@make_procedure("default-read-handler",[values.W_InputPort, default(values.W_Object, None)], simple=False)
def default_read_handler(ip, src, env, cont):
# default to the "read" and "read-syntax" defined in the expander linklet
if src is None:
return prim_env[values.W_Symbol.make("read")].call([ip], env, cont)
else:
return prim_env[values.W_Symbol.make("read-syntax")].call([ip, src], env, cont)
@continuation
def get_read_handler_cont(env, cont, _vals):
from pycket.interpreter import check_one_val, return_value
ip = check_one_val(_vals)
assert isinstance(ip, values.W_InputPort)
if ip.get_read_handler():
return return_value(ip.get_read_handler(), env, cont)
else:
return return_value(default_read_handler, env, cont)
@expose("port-read-handler", [values.W_Object, default(values.W_Procedure, None)], simple=False)
def do_port_read_handler(ip, proc, env, cont):
from pycket.interpreter import return_value
if not isinstance(ip, values.W_InputPort):
assert isinstance(ip, values_struct.W_Struct)
st = ip.struct_type()
return st.accessor.call([ip, values.W_Fixnum(0)], env, get_read_handler_cont(env, cont))
if proc is None:
#get
if ip.get_read_handler():
return return_value(ip.get_read_handler(), env, cont)
else:
return return_value(default_read_handler, env, cont)
else:
#set
if proc is default_read_handler:
ip.set_read_handler(default_read_handler)
else:
ip.set_read_handler(proc)
return return_value(values.w_void, env, cont)
@expose("procedure-arity?", [values.W_Object])
@jit.unroll_safe
def do_is_procedure_arity(n):
if isinstance(n, values.W_Fixnum):
return values.W_Bool.make(n.value >= 0)
elif (isinstance(n, values_struct.W_RootStruct) and
n.struct_type() is arity_at_least):
return values.w_true
elif isinstance(n, values.W_List) and n.is_proper_list():
for item in values.from_list_iter(n):
if not (isinstance(item, values.W_Fixnum) or
(isinstance(item, values_struct.W_RootStruct) and
item.struct_type() is arity_at_least)):
return values.w_false
return values.w_true
return values.w_false
@expose("procedure-arity-includes?",
[procedure, values.W_Integer, default(values.W_Object, values.w_false)])
def procedure_arity_includes(proc, k, kw_ok):
if kw_ok is values.w_false and isinstance(proc, values_struct.W_RootStruct):
w_prop_val = proc.struct_type().read_property(values_struct.w_prop_incomplete_arity)
if w_prop_val is not None:
return values.w_false
if isinstance(k, values.W_Integer):
try:
k_val = k.toint()
except OverflowError:
pass
else:
arity = proc.get_arity(promote=True)
return values.W_Bool.make(arity.arity_includes(k_val))
return values.w_false
@expose("procedure-result-arity", [procedure], simple=False)
def procedure_result_arity(proc, env, cont):
from pycket.interpreter import return_multi_vals
arity = proc.get_result_arity()
if arity is None:
return return_multi_vals(values.w_false, env, cont)
return arity_to_value(arity, env, cont)
@expose("procedure-reduce-arity", [procedure, values.W_Object, default(values.W_Object, None)])
def procedure_reduce_arity(proc, arity, e):
# FIXME : this code is all wrong
#assert isinstance(arity, Arity)
#proc.set_arity(arity)
return proc
@expose("procedure-reduce-arity-mask", [procedure, values.W_Fixnum, default(values.W_Object, values.w_false)])
def procedure_reduce_arity_mask(proc, mask, name):
import math
return proc # FIXME: do this without mutation
v = mask.value
# turn the given mask into an arity
if v < 0:
# it's an at least value
ar_value = int(math.log(abs(v))/math.log(2))
# for some reason the 2 argument log doesn't exist
ar = Arity([], ar_value)
else:
ar_value = int(math.log(v)/math.log(2))
ar = Arity([ar_value], -1)
# FIXME: what if the mask represents a list? see math_arity_cont
# FIXME: mutation is wrong!
proc.set_arity(ar)
return proc
@expose("procedure-struct-type?", [values_struct.W_StructType])
def do_is_procedure_struct_type(struct_type):
return values.W_Bool.make(struct_type.prop_procedure is not None)
@expose("procedure-extract-target", [procedure], simple=False)
def do_procedure_extract_target(proc, env, cont):
from pycket.interpreter import return_value
if not isinstance(proc, values_struct.W_RootStruct):
return return_value(values.w_false, env, cont)
struct_type = proc.struct_type()
prop_procedure = struct_type.prop_procedure
if isinstance(prop_procedure, values.W_Fixnum):
idx = prop_procedure.value
return struct_type.accessor.access(proc, idx, env, cont)
return return_value(values.w_false, env, cont)
@expose("variable-reference-constant?",
[values.W_VariableReference], simple=False)
def varref_const(varref, env, cont):
from pycket.interpreter import return_value
return return_value(values.W_Bool.make(not(varref.varref.is_mutable(env))),
env, cont)
@expose("variable-reference->resolved-module-path",
[values.W_VariableReference], only_old=True)
def varref_rmp(varref):
return values.W_ResolvedModulePath(values.W_Path(varref.varref.path))
@expose("variable-reference->module-source", [values.W_VariableReference], only_old=True)
def varref_ms(varref):
# FIXME: not implemented
return values.W_Symbol.make("dummy_module")
@expose("variable-reference->module-path-index", [values.W_VariableReference], only_old=True)
def varref_to_mpi(ref):
from pycket.interpreter import ModuleVar
if not isinstance(ref, ModuleVar):
return values.w_false
return values.W_ModulePathIndex()
@expose("variable-reference->module-base-phase", [values.W_VariableReference], only_old=True)
def varref_to_mbp(ref):
# XXX Obviously not correct
return values.W_Fixnum.ZERO
@expose("resolved-module-path-name", [values.W_ResolvedModulePath], only_old=True)
def rmp_name(rmp):
return rmp.name
def is_module_path(v):
if isinstance(v, values.W_Symbol):
# FIXME: not always right
return True
if isinstance(v, values.W_Path):
return True
if isinstance(v, values_string.W_String):
return True
if isinstance(v, values.W_List):
vs = values.from_list(v)
for p in vs:
if not is_module_path(p):
return False
return True
# FIXME
return False
@expose("module-path?", [values.W_Object], only_old=True)
def module_pathp(v):
return values.W_Bool.make(is_module_path(v))
@expose("values")
def do_values(args_w):
return values.Values.make(args_w)
@expose("call-with-values", [procedure] * 2, simple=False, extra_info=True)
def call_with_values (producer, consumer, env, cont, extra_call_info):
# FIXME: check arity
return producer.call_with_extra_info([], env, call_cont(consumer, env, cont), extra_call_info)
@continuation
def time_apply_cont(initial, initial_user, initial_gc, env, cont, vals):
from pycket.interpreter import return_multi_vals
final = time.time()
final_gc = current_gc_time()
final_user = time.clock()
ms = values.W_Fixnum(int((final - initial) * 1000))
ms_gc = values.W_Fixnum(int((final_gc - initial_gc)))
ms_user = values.W_Fixnum(int((final_user - initial_user) * 1000))
vals_w = vals.get_all_values()
results = values.Values.make([values.to_list(vals_w),
ms_user, ms, ms_gc])
return return_multi_vals(results, env, cont)
@jit.dont_look_inside
def current_gc_time():
if objectmodel.we_are_translated():
memory = rgc.get_stats(rgc.TOTAL_GC_TIME)
else:
memory = 0
return memory
@expose("time-apply", [procedure, values.W_List], simple=False, extra_info=True)
def time_apply(a, args, env, cont, extra_call_info):
initial = time.time()
initial_user = time.clock()
initial_gc = current_gc_time()
return a.call_with_extra_info(values.from_list(args),
env, time_apply_cont(initial, initial_user, initial_gc, env, cont),
extra_call_info)
@expose("apply", simple=False, extra_info=True)
def apply(args, env, cont, extra_call_info):
if len(args) < 2:
raise SchemeException("apply expected at least 2 arguments, given %s" % len(args))
fn = args[0]
if not fn.iscallable():
raise SchemeException("apply expected a procedure, got something else")
lst = args[-1]
try:
fn_arity = fn.get_arity(promote=True)
if fn_arity is Arity.unknown or fn_arity.at_least == -1:
unroll_to = 3
elif fn_arity.arity_list:
unroll_to = fn_arity.arity_list[-1]
else:
unroll_to = fn_arity.at_least + 7
rest = values.from_list(lst, unroll_to=unroll_to, force=True)
except SchemeException:
raise SchemeException(
"apply expected a list as the last argument, got something else")
args_len = len(args) - 1
assert args_len >= 0
others = args[1:args_len]
new_args = others + rest
return fn.call_with_extra_info(new_args, env, cont, extra_call_info)
@expose("make-semaphore", [default(values.W_Fixnum, values.W_Fixnum.ZERO)])
def make_semaphore(n):
return values.W_Semaphore(n.value)
@expose("semaphore-peek-evt", [values.W_Semaphore])
def sem_peek_evt(s):
return values.W_SemaphorePeekEvt(s)
@expose("not", [values.W_Object])
def notp(a):
return values.W_Bool.make(a is values.w_false)
@jit.elidable
def elidable_length(lst):
n = 0
while isinstance(lst, values.W_Cons):
n += 1
lst = lst.cdr()
return n
@objectmodel.always_inline
def unroll_pred(lst, idx, unroll_to=0):
if not jit.we_are_jitted():
return False
return not jit.isvirtual(lst) and idx > unroll_to
@jit.unroll_safe
def virtual_length(lst, unroll_to=0):
n = 0
while isinstance(lst, values.W_Cons):
if unroll_pred(lst, n, unroll_to):
return elidable_length(lst) + n
n += 1
lst = lst.cdr()
return n
@expose("length", [values.W_List])
def length(a):
if not a.is_proper_list():
raise SchemeException("length: not given a proper list (either cyclic or not null terminated)")
return values.W_Fixnum(virtual_length(a, unroll_to=2))
@expose("list")
def do_list(args):
return values.to_list(args)
@expose("list*")
def do_liststar(args):
if not args:
raise SchemeException("list* expects at least one argument")
return values.to_improper(args[:-1], args[-1])
@expose("assq", [values.W_Object, values.W_List])
def assq(a, b):
while isinstance(b, values.W_Cons):
head, b = b.car(), b.cdr()
if not isinstance(head, values.W_Cons):
raise SchemeException("assq: found a non-pair element")
if eq_prims.eqp_logic(a, head.car()):
return head
if b is not values.w_null:
raise SchemeException("assq: reached a non-pair")
return values.w_false
@expose("memq", [values.W_Object, values.W_List])
def memq(w_o, w_l):
while isinstance(w_l, values.W_Cons):
if eq_prims.eqp_logic(w_o, w_l.car()):
return w_l
w_l = w_l.cdr()
return values.w_false
@expose("memv", [values.W_Object, values.W_List])
def memv(w_o, w_l):
while isinstance(w_l, values.W_Cons):
if w_o.eqv(w_l.car()):
return w_l
w_l = w_l.cdr()
return values.w_false
@expose("cons", [values.W_Object, values.W_Object])
def do_cons(a, b):
return values.W_Cons.make(a, b)
def make_list_eater(name):
"""
For generating car, cdr, caar, cadr, etc...
"""
spec = name[1:-1]
unrolled = unroll.unrolling_iterable(reversed(spec))
contract = "pair?"
for letter in spec[1::-1]:
if letter == 'a':
contract = "(cons/c %s any/c)" % contract
elif letter == 'd':
contract = "(cons/c any/c %s)" % contract
else:
assert False, "Bad list eater specification"
@expose(name, [values.W_Object])
def process_list(_lst):
lst = _lst
for letter in unrolled:
if not isinstance(lst, values.W_Cons):
raise SchemeException("%s: expected %s given %s" % (name, contract, _lst))
if letter == 'a':
lst = lst.car()
elif letter == 'd':
lst = lst.cdr()
else:
assert False, "Bad list eater specification"
return lst
process_list.__name__ = "do_" + name
return process_list
def list_eater_names(n):
names = []
for i in range(n):
names = [n + 'a' for n in names] + [n + 'd' for n in names] + ['a', 'd']
return ["c%sr" % name for name in names]
for name in list_eater_names(4):
make_list_eater(name)
@expose("mlist")
def do_mlist(args):
return values.to_mlist(args)
@expose("mcons", [values.W_Object, values.W_Object])
def do_mcons(a, b):
return values.W_MCons(a,b)
@expose("mcar", [values.W_MCons])
def do_mcar(a):
return a.car()
@expose("mcdr", [values.W_MCons])
def do_mcdr(a):
return a.cdr()
@expose("set-mcar!", [values.W_MCons, values.W_Object])
def do_set_mcar(a, b):
a.set_car(b)
@expose("set-mcdr!", [values.W_MCons, values.W_Object])
def do_set_mcdr(a, b):
a.set_cdr(b)
@expose("map", simple=False, arity=Arity.geq(2))
def do_map(args, env, cont):
# XXX this is currently not properly jitted
if len(args) < 2:
raise SchemeException("map expected at least two argument, got %s"%len(args))
fn, lists = args[0], args[1:]
if not fn.iscallable():
raise SchemeException("map expected a procedure, got something else")
# FIXME: more errorchecking
assert len(args) >= 0
return map_loop(fn, lists, env, cont)
@loop_label
def map_loop(f, lists, env, cont):
from pycket.interpreter import return_value
lists_new = []
args = []
for l in lists:
if not isinstance(l, values.W_Cons):
if l is not values.w_null:
raise SchemeException("map: not given a proper list")
return return_value(values.w_null, env, cont)
args.append(l.car())
lists_new.append(l.cdr())
return f.call(args, env, map_first_cont(f, lists_new, env, cont))
@continuation
def map_first_cont(f, lists, env, cont, _vals):
from pycket.interpreter import check_one_val
val = check_one_val(_vals)
return map_loop(f, lists, env, map_cons_cont(f, lists, val, env, cont))
@continuation
def map_cons_cont(f, lists, val, env, cont, _vals):
from pycket.interpreter import check_one_val, return_value
rest = check_one_val(_vals)
return return_value(values.W_Cons.make(val, rest), env, cont)
@expose("for-each", simple=False, arity=Arity.geq(2))
@jit.unroll_safe
def for_each(args, env, cont):
from pycket.interpreter import return_value
if len(args) < 2:
raise SchemeException("for-each: expected at least a procedure and a list")
f = args[0]
if not f.iscallable():
raise SchemeException("for-each: expected a procedure, but got %s" % f)
ls = args[1:]
for l in ls:
if not l.is_proper_list():
raise SchemeException("for-each: expected a list, but got %s" % l)
return for_each_loop(f, ls, env, cont)
@loop_label
@jit.unroll_safe
def for_each_loop(func, args, env, cont):
from pycket.interpreter import return_value
nargs = jit.promote(len(args))
heads = [None] * nargs
tails = [None] * nargs
for i in range(nargs):
arg = args[i]
if arg is values.w_null:
for v in args:
if v is not values.w_null:
raise SchemeException("for-each: all lists must have same size")
return return_value(values.w_void, env, cont)
assert isinstance(arg, values.W_Cons)
heads[i] = arg.car()
tails[i] = arg.cdr()
return func.call(heads, env,
for_each_cont(func, tails, env, cont))
@continuation
def for_each_cont(func, tails, env, cont, _vals):
return for_each_loop(func, tails, env, cont)
@expose("andmap", simple=False, arity=Arity.geq(2))
def andmap(args, env, cont):
from pycket.interpreter import return_value
if len(args) < 2:
raise SchemeException("andmap: expected at least a procedure and a list")
f = args[0]
if not f.iscallable():
raise SchemeException("andmap: expected a procedure, but got %s"%f)
ls = args[1:]
for l in ls:
if not isinstance(l, values.W_List):
raise SchemeException("andmap: expected a list, but got %s"%l)
return return_value(values.w_void, env, andmap_cont(f, ls, env, cont))
@continuation
def andmap_cont(f, ls, env, cont, vals):
# XXX this is currently not properly jitted
from pycket.interpreter import return_value, check_one_val
val = check_one_val(vals)
if val is values.w_false:
return return_value(val, env, cont)
for l in ls:
if l is values.w_null:
return return_value(values.w_true, env, cont)
cars = [l.car() for l in ls]
cdrs = [l.cdr() for l in ls]
return f.call(cars, env, andmap_cont(f, cdrs, env, cont))
@expose("ormap", simple=False, arity=Arity.geq(2))
def ormap(args, env, cont):
from pycket.interpreter import return_value
if len(args) < 2:
raise SchemeException("ormap: expected at least a procedure and a list")
f = args[0]
if not f.iscallable():
raise SchemeException("ormap: expected a procedure, but got %s"%f)
ls = args[1:]
for l in ls:
if not isinstance(l, values.W_List):
raise SchemeException("ormap: expected a list, but got %s"%l)
return return_value(values.w_false, env, ormap_cont(f, ls, env, cont))
@continuation
def ormap_cont(f, ls, env, cont, vals):
# XXX this is currently not properly jitted
from pycket.interpreter import return_value, check_one_val
val = check_one_val(vals)
if val is not values.w_false:
return return_value(val, env, cont)
for l in ls:
if l is values.w_null:
return return_value(values.w_false, env, cont)
cars = [l.car() for l in ls]
cdrs = [l.cdr() for l in ls]
return f.call(cars, env, ormap_cont(f, cdrs, env, cont))
@expose("append", arity=Arity.geq(0))
@jit.look_inside_iff(
lambda l: jit.loop_unrolling_heuristic(l, len(l), values.UNROLLING_CUTOFF))
def append(lists):
if not lists:
return values.w_null
acc = lists[-1]
for i in range(len(lists) - 2, -1, -1):
curr = lists[i]
if not curr.is_proper_list():
raise SchemeException("append: expected proper list")
acc = append_two(curr, acc)
return acc
def append_two(l1, l2):
first = None
last = None
while isinstance(l1, values.W_Cons):
v = l1.clone()
if first is None:
first = v
else:
last._unsafe_set_cdr(v)
last = v
l1 = l1.cdr()
if last is None:
return l2
last._unsafe_set_cdr(l2)
return first
@expose("reverse", [values.W_List])
def reverse(w_l):
acc = values.w_null
while isinstance(w_l, values.W_Cons):
val, w_l = w_l.car(), w_l.cdr()
acc = values.W_Cons.make(val, acc)
if w_l is not values.w_null:
raise SchemeException("reverse: not given proper list")
return acc
@expose("void", arity=Arity.geq(0))
def do_void(args):
return values.w_void
@expose("make-ephemeron", [values.W_Object] * 2)
def make_ephemeron(key, val):
return values.W_Ephemeron(key, val)
@expose("ephemeron-value",
[values.W_Ephemeron, default(values.W_Object, values.w_false)])
def ephemeron_value(ephemeron, default):
v = ephemeron.get()
return v if v is not None else default
@expose("make-placeholder", [values.W_Object])
def make_placeholder(val):
return values.W_Placeholder(val)
@expose("placeholder-set!", [values.W_Placeholder, values.W_Object])
def placeholder_set(ph, datum):
ph.value = datum
return values.w_void
@expose("placeholder-get", [values.W_Placeholder])
def placeholder_get(ph):
return ph.value
@expose("make-hash-placeholder", [values.W_List])
def make_hash_placeholder(vals):
return values.W_HashTablePlaceholder([], [])
@expose("make-hasheq-placeholder", [values.W_List])
def make_hasheq_placeholder(vals):
return values.W_HashTablePlaceholder([], [])
@expose("make-hasheqv-placeholder", [values.W_List])
def make_hasheqv_placeholder(vals):
return values.W_HashTablePlaceholder([], [])
@expose("list?", [values.W_Object])
def listp(v):
return values.W_Bool.make(v.is_proper_list())
@expose("list-pair?", [values.W_Object])
def list_pair(v):
return values.W_Bool.make(isinstance(v, values.W_Cons) and v.is_proper_list())
def enter_list_ref_iff(lst, pos):
if jit.isconstant(lst) and jit.isconstant(pos):
return True
return jit.isconstant(pos) and pos <= 16
@jit.look_inside_iff(enter_list_ref_iff)
def list_ref_impl(lst, pos):
if pos < 0:
raise SchemeException("list-ref: negative index")
for i in range(pos):
lst = lst.cdr()
if not isinstance(lst, values.W_Cons):
raise SchemeException("list-ref: index out of range")
return lst.car()
@expose("list-ref", [values.W_Cons, values.W_Fixnum])
def list_ref(lst, pos):
return list_ref_impl(lst, pos.value)
@expose("unsafe-list-ref", [subclass_unsafe(values.W_Cons), values.W_Fixnum])
def unsafe_list_ref(lst, pos):
return list_ref_impl(lst, pos.value)
@expose("unsafe-list-tail", [subclass_unsafe(values.W_Object), values.W_Fixnum])
def unsafe_list_tail(lst, pos):
return list_tail_impl(lst, pos)
@expose("list-tail", [values.W_Object, values.W_Fixnum])
def list_tail(lst, pos):
return list_tail_impl(lst, pos)
def list_tail_impl(lst, pos):
start_pos = pos.value
while start_pos > 0:
if not isinstance(lst, values.W_Cons):
msg = "index too large for list" if lst is values.w_null else "index reaches a non-pair"
raise SchemeException("list-tail : %s\n -- lst : %s\n -- index : %s\n" % (msg, lst.tostring(), start_pos))
lst = lst.cdr()
start_pos -= 1
return lst
@expose("assoc", [values.W_Object, values.W_List, default(values.W_Object, values.w_false)])
def assoc(v, lst, is_equal):
if is_equal is not values.w_false:
raise SchemeException("assoc: using a custom equal? is not yet implemented")
while isinstance(lst, values.W_Cons):
c = lst.car()
if not isinstance(lst, values.W_Cons):
raise SchemeException("assoc: non-pair found in list: %s in %s" % (c.tostring(), lst.tostring()))
cc = c.car()
if v.equal(cc):
return c
lst = lst.cdr()
return values.w_false
@expose("current-seconds", [])
def current_seconds():
tick = int(time.time())
return values.W_Fixnum(tick)
@expose("current-inexact-milliseconds", [])
def curr_millis():
return values.W_Flonum(time.time() * 1000.0)
@expose("seconds->date", [values.W_Fixnum])
def seconds_to_date(s):
# TODO: Proper implementation
return values.w_false
def _error(args, is_user=False):
reason = ""
if len(args) == 1:
sym = args[0]
reason = "error: %s" % sym.tostring()
else:
first_arg = args[0]
if isinstance(first_arg, values_string.W_String):
from rpython.rlib.rstring import StringBuilder
msg = StringBuilder()
msg.append(first_arg.tostring())
v = args[1:]
for item in v:
msg.append(" %s" % item.tostring())
reason = msg.build()
else:
src = first_arg
form = args[1]
v = args[2:]
assert isinstance(src, values.W_Symbol)
assert isinstance(form, values_string.W_String)
reason = "%s: %s" % (
src.tostring(), input_output.format(form, v, "error"))
if is_user:
raise UserException(reason)
else:
raise SchemeException(reason)
@expose("error", arity=Arity.geq(1))
def error(args):
return _error(args, False)
@expose("raise-user-error", arity=Arity.geq(1))
def error(args):
return _error(args, True)
@expose("raise-arity-error", arity=Arity.geq(2))
def raise_arity_error(args):
return _error(args, False)
@expose("raise-result-arity-error", arity=Arity.geq(3))
def raise_result_arity_error(args):
return _error(args, False)
@expose("list->vector", [values.W_List])
def list2vector(l):
return values_vector.W_Vector.fromelements(values.from_list(l))
# FIXME: make this work with chaperones/impersonators
@expose("vector->list", [values.W_MVector], simple=False)
def vector2list(v, env, cont):
from pycket.interpreter import return_value
if isinstance(v, values_vector.W_Vector):
# Fast path for unproxied vectors
result = values.vector_to_improper(v, values.w_null)
return return_value(result, env, cont)
return vector_to_list_loop(v, v.length() - 1, values.w_null, env, cont)
@loop_label
def vector_to_list_loop(vector, idx, acc, env, cont):
from pycket.interpreter import return_value
if idx < 0:
return return_value(acc, env, cont)
return vector.vector_ref(idx, env,
vector_to_list_read_cont(vector, idx, acc, env, cont))
@continuation
def vector_to_list_read_cont(vector, idx, acc, env, cont, _vals):
from pycket.interpreter import check_one_val, return_value
val = check_one_val(_vals)
acc = values.W_Cons.make(val, acc)
return vector_to_list_loop(vector, idx - 1, acc, env, cont)
# Unsafe pair ops
@expose("unsafe-car", [subclass_unsafe(values.W_Cons)])
def unsafe_car(p):
return p.car()
@expose("unsafe-mcar", [subclass_unsafe(values.W_MCons)])
def unsafe_mcar(p):
return p.car()
@expose("unsafe-cdr", [subclass_unsafe(values.W_Cons)])
def unsafe_cdr(p):
return p.cdr()
@expose("unsafe-mcdr", [subclass_unsafe(values.W_MCons)])
def unsafe_mcdr(p):
return p.cdr()
@continuation
def struct_port_loc_cont(input_huh, env, cont, _vals):
from pycket.interpreter import check_one_val, return_multi_vals
pr = check_one_val(_vals)
if not isinstance(pr, values.W_Port):
if input_huh:
# empty string input port is used for prop:input-port
pr = values.W_StringInputPort("")
else:
# a port that discards all data is used for prop:output-port
pr = values.W_StringOutputPort()
assert isinstance(pr, values.W_Port)
lin = pr.get_line()
col = pr.get_column()
pos = pr.get_position()
return return_multi_vals(values.Values.make([lin, col, pos]), env, cont)
@expose("port-next-location", [values.W_Object], simple=False)
def port_next_loc(p, env, cont):
from pycket.interpreter import return_multi_vals
lin = col = pos = values.w_false
if isinstance(p, values_struct.W_Struct):
i, o = struct_port_prop_huh(p)
if (i is None) and (o is None):
raise SchemeException("given struct doesn't have neither prop:input-port nor prop:output-port")
if i:
if isinstance(i, values.W_InputPort):
lin = i.get_line()
col = i.get_column()
pos = i.get_position()
elif isinstance(i, values.W_Fixnum):
port_index = i.value
return p.struct_type().accessor.call([p, values.W_Fixnum(port_index)], env, struct_port_loc_cont(True, env, cont))
else:
raise SchemeException("invalid value %s for prop:input-port of the given struct : %s" % (i, p.tostring()))
elif o:
if isinstance(o, values.W_OutputPort):
lin = o.get_line()
col = o.get_column()
pos = o.get_position()
elif isinstance(o, values.W_Fixnum):
port_index = o.value
return p.struct_type().accessor.call([p, values.W_Fixnum(port_index)], env, struct_port_loc_cont(False, env, cont))
else:
raise SchemeException("invalid value %s for prop:output-port of the given struct : %s" % (o, p.tostring()))
else:
assert isinstance(p, values.W_Port)
lin = p.get_line()
col = p.get_column()
pos = p.get_position()
return return_multi_vals(values.Values.make([lin, col, pos]), env, cont)
@expose("port-writes-special?", [values.W_Object])
def port_writes_special(v):
return values.w_false
@expose("port-writes-atomic?", [values.W_Object])
def port_writes_atomic(v):
return values.w_false
@expose("port-provides-progress-evts?", [values.W_Object])
def port_ppe(v):
return values.w_false
@expose("file-position*", [values.W_Object])
def file_pos_star(v):
return values.w_false
@expose("symbol-unreadable?", [values.W_Symbol])
def sym_unreadable(v):
if v.unreadable:
return values.w_true
return values.w_false
@expose("symbol-interned?", [values.W_Symbol])
def string_to_symbol(v):
return values.W_Bool.make(v.is_interned())
@expose("symbol<?", arity=Arity.geq(1))
def symbol_lt(args):
name = "symbol<?"
if len(args) < 2:
raise SchemeException(name + ": requires at least 2 arguments")
head = args[0]
if not isinstance(head, values.W_Symbol):
raise SchemeException(name + ": not given a string")
for i in range(1, len(args)):
t = args[i]
if not isinstance(t, values.W_Symbol):
raise SchemeException(name + ": not given a string")
# FIXME: shouldn't need to convert to W_String
# but this is much easier than recreating the logic
if string.symbol_to_string_impl(head).cmp(string.symbol_to_string_impl(t)) >= 0:
return values.w_false
head = t
return values.w_true
@expose("immutable?", [values.W_Object])
def immutable(v):
return values.W_Bool.make(v.immutable())
@expose("make-thread-cell",
[values.W_Object, default(values.W_Bool, values.w_false)])
def make_thread_cell(v, pres):
return values.W_ThreadCell(v, False if pres is values.w_false else True)
@expose("thread-cell-ref", [values.W_ThreadCell])
def thread_cell_ref(cell):
return cell.value
@expose("thread-cell-set!", [values.W_ThreadCell, values.W_Object])
def thread_cell_set(cell, v):
cell.value = v
return values.w_void
@expose("current-preserved-thread-cell-values",
[default(values.W_ThreadCellValues, None)])
def current_preserved_thread_cell_values(v):
# Generate a new thread-cell-values object
if v is None:
return values.W_ThreadCellValues()
# Otherwise, we restore the values
for cell, val in v.assoc.iteritems():
assert cell.preserved
cell.value = val
return values.w_void
@expose("place-enabled?")
def do_is_place_enabled(args):
return values.w_false
@expose("gensym", [default(values.W_Object, values.W_Symbol.make("g"))])
def gensym(init):
from pycket.interpreter import Gensym
if not isinstance(init, values.W_Symbol) and not isinstance(init, values_string.W_String):
raise SchemeException("gensym exptected a string or symbol but got : %s" % init.tostring())
gensym_key = init.tostring()
return Gensym.gensym(gensym_key)
@expose("keyword<?", [values.W_Keyword, values.W_Keyword])
def keyword_less_than(a_keyword, b_keyword):
return values.W_Bool.make(a_keyword.value < b_keyword.value)
initial_env_vars = values.W_EnvVarSet({}, True)
expose_val("current-environment-variables", values_parameter.W_Parameter(initial_env_vars))
@expose("environment-variables-ref", [values.W_EnvVarSet, values.W_Bytes])
def env_var_ref(set, name):
r = set.get(name.as_str())
if r is None:
return values.w_false
else:
return values.W_Bytes.from_string(r)
@expose("environment-variables-set!", [values.W_EnvVarSet, values.W_Bytes, values.W_Bytes, default(values.W_Object, None)])
def env_var_ref(set, name, val, fail):
return set.set(name.as_str(), val.as_str())
@expose("make-environment-variables")
def make_env_var(args):
return values.W_EnvVarSet({}, False)
@expose("environment-variables-names", [values.W_EnvVarSet])
def env_var_names(set):
names = set.get_names()
return values.to_list([values.W_Bytes.from_string(n) for n in names])
@expose("check-for-break", [])
def check_for_break():
return values.w_false
@expose("find-system-path", [values.W_Symbol], simple=True)
def find_sys_path(kind):
return racket_sys_paths.get_path(kind)
@expose("find-main-collects", [])
def find_main_collects():
return values.w_false
@expose("module-path-index-join",
[values.W_Object, values.W_Object, default(values.W_Object, None)], only_old=True)
def mpi_join(a, b, c):
return values.W_ModulePathIndex()
@expose("module-path-index-resolve",
[values.W_ModulePathIndex], only_old=True)
def mpi_resolve(a):
return values.W_ResolvedModulePath(values.W_Path("."))
# Loading
# FIXME: Proper semantics.
@expose("load", [values_string.W_String], simple=False, only_old=True)
def load(lib, env, cont):
from pycket.expand import ensure_json_ast_run
lib_name = lib.tostring()
json_ast = ensure_json_ast_run(lib_name)
if json_ast is None:
raise SchemeException(
"can't gernerate load-file for %s " % lib.tostring())
#ast = load_json_ast_rpython(json_ast)
raise NotImplementedError(
"would crash anyway when trying to interpret the Module")
#return ast, env, cont
expose_val("current-load-relative-directory", values_parameter.W_Parameter(values.w_false))
expose_val("current-write-relative-directory", values_parameter.W_Parameter(values.w_false))
initial_security_guard = values.W_SecurityGuard()
expose_val("current-security-guard", values_parameter.W_Parameter(initial_security_guard))
@expose("make-security-guard", [values.W_SecurityGuard, values.W_Procedure, values.W_Procedure, default(values.W_Procedure, values.w_false)])
def make_security_guard(parent, file, network, link):
return values.W_SecurityGuard()
@expose("unsafe-make-security-guard-at-root")
def unsafe_make_sec_guard(args):
return values.W_SecurityGuard()
@make_procedure("current-directory-guard", [values.W_Object], simple=False)
def current_directory_guard(path, env, cont):
from pycket.interpreter import return_value
# "cd"s at the os level
if not (isinstance(path, values_string.W_String) or isinstance(path, values.W_Path)):
raise SchemeException("current-directory: exptected a path-string? as argument 0, but got : %s" % path.tostring())
path_str = input_output.extract_path(path)
# if path is a complete-path?, set it
if path_str[0] == os.path.sep:
new_current_dir = path_str
else: # relative to the current one
current_dir = current_directory_param.get(cont)
current_path_str = input_output.extract_path(current_dir)
# let's hope that there's no symbolic links etc.
new_current_dir = os.path.normpath(os.path.sep.join([current_path_str, path_str]))
try:
os.chdir(new_current_dir)
except OSError:
raise SchemeException("path doesn't exist : %s" % path_str)
out_port = input_output.current_out_param.get(cont)
assert isinstance(out_port, values.W_OutputPort)
out_port.write("; now in %s\n" % new_current_dir)
return return_value(values.W_Path(new_current_dir), env, cont)
current_directory_param = values_parameter.W_Parameter(values.W_Path(os.getcwd()), current_directory_guard)
expose_val("current-directory", current_directory_param)
w_unix_sym = values.W_Symbol.make("unix")
w_windows_sym = values.W_Symbol.make("windows")
w_macosx_sym = values.W_Symbol.make("macosx")
_platform = sys.platform
def detect_platform():
if _platform == "darwin":
return w_macosx_sym
elif _platform in ['win32', 'cygwin']:
return w_windows_sym
else:
return w_unix_sym
w_system_sym = detect_platform()
w_os_sym = values.W_Symbol.make("os")
w_os_so_suffix = values.W_Symbol.make("so-suffix")
w_os_so_mode_sym = values.W_Symbol.make("so-mode")
w_fs_change_mode = values.W_Symbol.make("fs-change")
w_local_mode = values.W_Symbol.make("local")
w_unix_so_suffix = values.W_Bytes.from_string(".so")
w_word_sym = values.W_Symbol.make("word")
w_link_sym = values.W_Symbol.make("link")
w_vm_sym = values.W_Symbol.make("vm")
w_gc_sym = values.W_Symbol.make("gc")
w_machine_sym = values.W_Symbol.make("machine")
w_cross_sym = values.W_Symbol.make("cross")
w_fs_supported = values.W_Symbol.make("supported")
w_fs_scalable = values.W_Symbol.make("scalable")
w_fs_low_latency = values.W_Symbol.make("low-latency")
w_fs_file_level = values.W_Symbol.make("file-level")
w_target_machine_sym = values.W_Symbol.make("target-machine")
def system_type(w_what):
# os
if w_what is w_os_sym:
return w_system_sym
# word
if w_what is w_word_sym:
#return values.W_Fixnum(8*struct.calcsize("P"))
return values.W_Fixnum(64)
# vm
if w_what is w_vm_sym:
return values.W_Symbol.make("pycket")
# gc
if w_what is w_gc_sym:
return values.W_Symbol.make("3m") # ??
# link
#
# 'static (Unix)
# 'shared (Unix)
# 'dll (Windows)
# 'framework (Mac OS)
if w_what is w_link_sym:
return values.W_Symbol.make("static")
# machine
if w_what is w_machine_sym:
return values_string.W_String.make("further details about the current machine in a platform-specific format")
# so-suffix
if w_what is w_os_so_suffix:
return w_unix_so_suffix
# so-mode
if w_what is w_os_so_mode_sym:
return w_local_mode
# fs-change
if w_what is w_fs_change_mode:
from pycket.prims.vector import vector
w_f = values.w_false
# FIXME: Is there a way to get this info from sys or os?
if w_system_sym is w_unix_sym:
return vector([w_fs_supported, w_fs_scalable, w_f, w_fs_file_level])
else:
return vector([w_f, w_f, w_f, w_f])
# cross
if w_what is w_cross_sym:
return values.W_Symbol.make("infer")
# cross
if w_what is w_target_machine_sym:
return values.W_Symbol.make("pycket")
raise SchemeException("unexpected system-type symbol '%s" % w_what.utf8value)
expose("system-type", [default(values.W_Symbol, w_os_sym)])(system_type)
def system_path_convention_type():
if w_system_sym is w_windows_sym:
return w_windows_sym
else:
return w_unix_sym
expose("system-path-convention-type", [])(system_path_convention_type)
@expose("bytes->path", [values.W_Bytes, default(values.W_Symbol, system_path_convention_type())])
def bytes_to_path(bstr, typ):
# FIXME : ignores the type, won't work for windows
return values.W_Path(bstr.as_str())
major_gc_sym = values.W_Symbol.make("major")
minor_gc_sym = values.W_Symbol.make("minor")
incremental_gc_sym = values.W_Symbol.make("incremental")
@expose("collect-garbage", [default(values.W_Symbol, major_gc_sym)])
@jit.dont_look_inside
def do_collect_garbage(request):
from rpython.rlib import rgc
rgc.collect()
return values.w_void
@continuation
def vec2val_cont(vals, vec, n, s, l, env, cont, new_vals):
from pycket.interpreter import return_multi_vals, check_one_val
new = check_one_val(new_vals)
vals[n] = new
if s+n+1 == l:
return return_multi_vals(values.Values.make(vals), env, cont)
else:
return vec.vector_ref(s+n+1, env, vec2val_cont(vals, vec, n+1, s, l, env, cont))
@expose("vector->values", [values_vector.W_Vector,
default(values.W_Fixnum, values.W_Fixnum.ZERO),
default(values.W_Fixnum, None)],
simple=False)
def vector_to_values(v, start, end, env, cont):
from pycket.interpreter import return_multi_vals
l = end.value if end else v.length()
s = start.value
if s == l:
return return_multi_vals(values.Values.make([]), env, cont)
else:
vals = [None] * (l - s)
return v.vector_ref(s, env, vec2val_cont(vals, v, 0, s, l, env, cont))
class ReaderGraphBuilder(object):
def __init__(self):
self.state = {}
def reader_graph_loop_cons(self, v):
assert isinstance(v, values.W_Cons)
p = values.W_WrappedConsMaybe(values.w_unsafe_undefined, values.w_unsafe_undefined)
self.state[v] = p
car = self.reader_graph_loop(v.car())
cdr = self.reader_graph_loop(v.cdr())
p._car = car
p._cdr = cdr
# FIXME: should change this to say if it's a proper list now ...
return p
def reader_graph_loop_vector(self, v):
assert isinstance(v, values_vector.W_Vector)
len = v.length()
p = values_vector.W_Vector.fromelement(values.w_false, len)
self.state[v] = p
for i in range(len):
vi = v.ref(i)
p.set(i, self.reader_graph_loop(vi))
return p
def reader_graph_loop_struct(self, v):
assert isinstance(v, values_struct.W_Struct)
type = v.struct_type()
if not type.isprefab:
return v
size = v._get_size_list()
p = values_struct.W_Struct.make_n(size, type)
self.state[v] = p
for i in range(size):
val = self.reader_graph_loop(v._ref(i))
p._set_list(i, val)
return p
def reader_graph_loop_proxy(self, v):
assert v.is_proxy()
inner = self.reader_graph_loop(v.get_proxied())
p = v.replace_proxied(inner)
self.state[v] = p
return p
def reader_graph_loop_equal_hash(self, v):
from pycket.hash.equal import W_EqualHashTable
assert isinstance(v, W_EqualHashTable)
empty = v.make_empty()
self.state[v] = empty
for key, val in v.hash_items():
key = self.reader_graph_loop(key)
val = self.reader_graph_loop(val)
empty._set(key, val)
return empty
def reader_graph_loop(self, v):
assert v is not None
from pycket.hash.equal import W_EqualHashTable
if v in self.state:
return self.state[v]
if v.is_proxy():
return self.reader_graph_loop_proxy(v)
if isinstance(v, values.W_Cons):
return self.reader_graph_loop_cons(v)
if isinstance(v, values_vector.W_Vector):
return self.reader_graph_loop_vector(v)
if isinstance(v, values_struct.W_Struct):
return self.reader_graph_loop_struct(v)
if isinstance(v, W_EqualHashTable):
return self.reader_graph_loop_equal_hash(v)
if isinstance(v, values.W_Placeholder):
return self.reader_graph_loop(v.value)
# XXX FIXME: doesn't handle stuff
return v
@expose("make-reader-graph", [values.W_Object])
@jit.dont_look_inside
def make_reader_graph(v):
from rpython.rlib.nonconst import NonConstant
builder = ReaderGraphBuilder()
if NonConstant(False):
# XXX JIT seems be generating questionable code when the argument of
# make-reader-graph is a virtual cons cell. The car and cdr fields get
# set by the generated code after the call, causing reader_graph_loop to
# crash. I suspect the problem has to do with the translators effect analysis.
# Example:
# p29 = new_with_vtable(descr=<SizeDescr 24>)
# p31 = call_r(ConstClass(make_reader_graph), p29, descr=<Callr 8 r EF=5>)
# setfield_gc(p29, p15, descr=<FieldP pycket.values.W_WrappedCons.inst__car 8 pure>)
# setfield_gc(p29, ConstPtr(ptr32), descr=<FieldP pycket.values.W_WrappedCons.inst__cdr 16 pure>)
if isinstance(v, values.W_WrappedCons):
print v._car.tostring()
print v._cdr.tostring()
return builder.reader_graph_loop(v)
@expose("procedure-specialize", [procedure])
def procedure_specialize(proc):
from pycket.ast_visitor import copy_ast
# XXX This is the identity function simply for compatibility.
# Another option is to wrap closures in a W_PromotableClosure, which might
# get us a similar effect from the RPython JIT.
if not isinstance(proc, values.W_Closure1AsEnv):
return proc
code = copy_ast(proc.caselam)
vals = proc._get_full_list()
new_closure = values.W_Closure1AsEnv.make(vals, code, proc._prev)
return proc
@expose("processor-count", [])
def processor_count():
return values.W_Fixnum.ONE
cached_values = {}
@continuation
def thunk_cont(index, env, cont, _vals):
from pycket.interpreter import check_one_val, return_value
val = check_one_val(_vals)
cached_values[index] = val
return return_value(val, env, cont)
@expose("cache-configuration", [values.W_Fixnum, values.W_Object], simple=False)
def cache_configuration(index, proc, env, cont):
from pycket.interpreter import return_value
if index in cached_values:
return return_value(cached_values[index], env, cont)
return proc.call([], env, thunk_cont(index, env, cont))
@expose("make-readtable", [values.W_Object, values.W_Character, values.W_Symbol, procedure], only_old=True)
def make_readtable(parent, char, sym, proc):
print "making readtable", [parent, char, sym, proc]
return values.W_ReadTable(parent, char, sym, proc)
@expose("read/recursive", only_old=True)
def read_recursive(args):
return values.w_false
def make_stub_predicates(names):
for name in names:
message = "%s: not yet implemented" % name
@expose(name, [values.W_Object])
def predicate(obj):
if not objectmodel.we_are_translated():
print message
return values.w_false
predicate.__name__ = "stub_predicate(%s)" % name
def make_stub_predicates_no_linklet():
STUB_PREDICATES_NO_LINKLET = ["namespace-anchor?",
"rename-transformer?",
"readtable?",
"liberal-define-context?",
"compiled-expression?",
"special-comment?",
"internal-definition-context?",
"namespace?",
"compiled-module-expression?"]
make_stub_predicates(STUB_PREDICATES_NO_LINKLET)
if not w_global_config.is_expander_loaded():
make_stub_predicates_no_linklet()
@expose("unsafe-start-atomic", [])
def unsafe_start_atomic():
return values.w_void
@expose("unsafe-start-breakable-atomic", [])
def unsafe_start_atomic():
return values.w_void
@expose("unsafe-end-breakable-atomic", [])
def unsafe_start_atomic():
return values.w_void
@expose("unsafe-end-atomic", [])
def unsafe_start_atomic():
return values.w_void
@expose("__dummy-function__", [])
def __dummy__():
from rpython.rlib.rbigint import ONERBIGINT
from rpython.rlib.runicode import str_decode_utf_8
ex = ONERBIGINT.touint()
print ex
@expose("primitive-table", [values.W_Object])
def primitive_table(v):
if v not in select_prim_table:
return values.w_false
if v in prim_table_cache:
return prim_table_cache[v]
expose_env = {}
for prim_name_sym in select_prim_table[v]:
if prim_name_sym in prim_env:
expose_env[prim_name_sym] = prim_env[prim_name_sym]
table = make_simple_immutable_table(W_EqImmutableHashTable,
expose_env.keys(),
expose_env.values())
prim_table_cache[v] = table
return table
@expose("unquoted-printing-string", [values_string.W_String])
def up_string(s):
return values.W_UnquotedPrintingString(s)
@expose("unquoted-printing-string-value", [values.W_UnquotedPrintingString])
def ups_val(v):
return v.string
# Any primitive on Pycket can use "w_global_config.is_debug_active()"
# to control debug outputs (or breakpoints in the interpreter) (with
# an even greater output control with the console_log with verbosity
# levels)
@expose("pycket:activate-debug", [])
def activate_debug():
w_global_config.activate_debug()
@expose("pycket:deactivate-debug", [])
def activate_debug():
w_global_config.deactivate_debug()
@expose("pycket:is-debug-active", [])
def debug_status():
return values.W_Bool.make(w_global_config.is_debug_active())
# Maybe we should do it with just one Racket level parameter
@expose("pycket:get-verbosity", [])
def get_verbosity():
lvl = w_global_config.get_config_val('verbose')
return values.W_Fixnum(lvl)
@expose("pycket:set-verbosity", [values.W_Fixnum])
def set_verbosity(v):
w_global_config.set_config_val('verbose', v.value)
@expose("pycket:activate-keyword", [values.W_Symbol])
def activate_debug_keyword(v):
w_global_config.activate_keyword(v.variable_name())
@expose("pycket:deactivate-keyword", [values.W_Symbol])
def deactivate_debug_keyword(v):
w_global_config.deactivate_keyword(v.variable_name())
@expose("pycket:report-undefined-prims", [])
def report_undefined_prims():
from pycket.prims.primitive_tables import report_undefined_prims
report_undefined_prims()
addr_sym = values.W_Symbol.make("mem-address")
@expose("pycket:print", [values.W_Object, default(values.W_Symbol, addr_sym)])
def pycket_print(o, sym):
from pycket.util import console_log
if sym is addr_sym:
console_log("PYCKET:PRINT : %s" % o, debug=True)
else:
console_log("PYCKET:PRINT : %s" % o.tostring(), debug=True)
@expose("pycket:eq?", [values.W_Object, values.W_Object])
def pycket_eq(o1, o2):
return values.W_Bool.make(o1 is o2)
expose_val("error-print-width", values_parameter.W_Parameter(values.W_Fixnum.make(256)))
@expose("banner", [])
def banner():
from pycket.env import w_version
version = w_version.get_version()
return values_string.W_String.make("Welcome to Pycket %s.\n"%version)
executable_yield_handler = values_parameter.W_Parameter(do_void.w_prim)
expose_val("executable-yield-handler", executable_yield_handler)
current_load_extension = values_parameter.W_Parameter(do_void.w_prim)
expose_val("current-load-extension", current_load_extension)
@expose("system-language+country", [])
def lang_country():
return values_string.W_String.make("en_US.UTF-8")
@expose("unsafe-add-post-custodian-shutdown", [values.W_Object])
def add_post(p):
return values.w_void
@expose("make-will-executor", [])
def make_will_exec():
return values.W_WillExecutor()
@expose("will-register", [values.W_WillExecutor, values.W_Object, values.W_Object])
def will_register(w, v, p):
return values.w_void
@expose("will-execute", [values.W_WillExecutor])
def will_exec(w):
return values.w_void
@expose("will-try-execute", [values.W_WillExecutor, default(values.W_Object, values.w_false)])
def will_exec(w, v):
return v
@expose("thread", [values.W_Object])
def thread(p):
return values.W_Thread()
@expose("thread/suspend-to-kill", [values.W_Object])
def thread_susp(p):
return values.W_Thread()
@expose("make-channel", [])
def make_channel():
return values.W_Channel()
@expose("primitive-lookup", [values.W_Symbol], simple=True)
def primitive_lookup(sym):
return prim_env.get(sym, values.w_false)
|
pycket/pycket
|
pycket/prims/general.py
|
Python
|
mit
| 75,231
|
class Item(object):
def __init__(self, path, name):
self.path = path
self.name = name
|
nickw444/MediaBrowser
|
Item.py
|
Python
|
mit
| 106
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
JSONType = Any
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_request(
subscription_id: str,
resource_group_name: str,
account_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-08-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetApp/netAppAccounts/{accountName}/backupPolicies')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
"accountName": _SERIALIZER.url("account_name", account_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_request(
subscription_id: str,
resource_group_name: str,
account_name: str,
backup_policy_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-08-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetApp/netAppAccounts/{accountName}/backupPolicies/{backupPolicyName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
"accountName": _SERIALIZER.url("account_name", account_name, 'str'),
"backupPolicyName": _SERIALIZER.url("backup_policy_name", backup_policy_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_create_request_initial(
subscription_id: str,
resource_group_name: str,
account_name: str,
backup_policy_name: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2021-08-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetApp/netAppAccounts/{accountName}/backupPolicies/{backupPolicyName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
"accountName": _SERIALIZER.url("account_name", account_name, 'str'),
"backupPolicyName": _SERIALIZER.url("backup_policy_name", backup_policy_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_update_request_initial(
subscription_id: str,
resource_group_name: str,
account_name: str,
backup_policy_name: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2021-08-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetApp/netAppAccounts/{accountName}/backupPolicies/{backupPolicyName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
"accountName": _SERIALIZER.url("account_name", account_name, 'str'),
"backupPolicyName": _SERIALIZER.url("backup_policy_name", backup_policy_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PATCH",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_delete_request_initial(
subscription_id: str,
resource_group_name: str,
account_name: str,
backup_policy_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-08-01"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetApp/netAppAccounts/{accountName}/backupPolicies/{backupPolicyName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
"accountName": _SERIALIZER.url("account_name", account_name, 'str'),
"backupPolicyName": _SERIALIZER.url("backup_policy_name", backup_policy_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
return HttpRequest(
method="DELETE",
url=url,
params=query_parameters,
**kwargs
)
class BackupPoliciesOperations(object):
"""BackupPoliciesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.netapp.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list(
self,
resource_group_name: str,
account_name: str,
**kwargs: Any
) -> Iterable["_models.BackupPoliciesList"]:
"""List backup policies.
List backup policies for Netapp Account.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param account_name: The name of the NetApp account.
:type account_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either BackupPoliciesList or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.netapp.models.BackupPoliciesList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.BackupPoliciesList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
account_name=account_name,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
account_name=account_name,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("BackupPoliciesList", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetApp/netAppAccounts/{accountName}/backupPolicies'} # type: ignore
@distributed_trace
def get(
self,
resource_group_name: str,
account_name: str,
backup_policy_name: str,
**kwargs: Any
) -> "_models.BackupPolicy":
"""Get a backup Policy.
Get a particular backup Policy.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param account_name: The name of the NetApp account.
:type account_name: str
:param backup_policy_name: Backup policy Name which uniquely identify backup policy.
:type backup_policy_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: BackupPolicy, or the result of cls(response)
:rtype: ~azure.mgmt.netapp.models.BackupPolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.BackupPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
account_name=account_name,
backup_policy_name=backup_policy_name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('BackupPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetApp/netAppAccounts/{accountName}/backupPolicies/{backupPolicyName}'} # type: ignore
def _create_initial(
self,
resource_group_name: str,
account_name: str,
backup_policy_name: str,
body: "_models.BackupPolicy",
**kwargs: Any
) -> Optional["_models.BackupPolicy"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.BackupPolicy"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(body, 'BackupPolicy')
request = build_create_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
account_name=account_name,
backup_policy_name=backup_policy_name,
content_type=content_type,
json=_json,
template_url=self._create_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('BackupPolicy', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('BackupPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetApp/netAppAccounts/{accountName}/backupPolicies/{backupPolicyName}'} # type: ignore
@distributed_trace
def begin_create(
self,
resource_group_name: str,
account_name: str,
backup_policy_name: str,
body: "_models.BackupPolicy",
**kwargs: Any
) -> LROPoller["_models.BackupPolicy"]:
"""Create a backup policy.
Create a backup policy for Netapp Account.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param account_name: The name of the NetApp account.
:type account_name: str
:param backup_policy_name: Backup policy Name which uniquely identify backup policy.
:type backup_policy_name: str
:param body: Backup policy object supplied in the body of the operation.
:type body: ~azure.mgmt.netapp.models.BackupPolicy
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either BackupPolicy or the result of
cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.netapp.models.BackupPolicy]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.BackupPolicy"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_initial(
resource_group_name=resource_group_name,
account_name=account_name,
backup_policy_name=backup_policy_name,
body=body,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('BackupPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetApp/netAppAccounts/{accountName}/backupPolicies/{backupPolicyName}'} # type: ignore
def _update_initial(
self,
resource_group_name: str,
account_name: str,
backup_policy_name: str,
body: "_models.BackupPolicyPatch",
**kwargs: Any
) -> "_models.BackupPolicy":
cls = kwargs.pop('cls', None) # type: ClsType["_models.BackupPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(body, 'BackupPolicyPatch')
request = build_update_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
account_name=account_name,
backup_policy_name=backup_policy_name,
content_type=content_type,
json=_json,
template_url=self._update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('BackupPolicy', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('BackupPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetApp/netAppAccounts/{accountName}/backupPolicies/{backupPolicyName}'} # type: ignore
@distributed_trace
def begin_update(
self,
resource_group_name: str,
account_name: str,
backup_policy_name: str,
body: "_models.BackupPolicyPatch",
**kwargs: Any
) -> LROPoller["_models.BackupPolicy"]:
"""Patch a backup policy.
Patch a backup policy for Netapp Account.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param account_name: The name of the NetApp account.
:type account_name: str
:param backup_policy_name: Backup policy Name which uniquely identify backup policy.
:type backup_policy_name: str
:param body: Backup policy object supplied in the body of the operation.
:type body: ~azure.mgmt.netapp.models.BackupPolicyPatch
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either BackupPolicy or the result of
cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.netapp.models.BackupPolicy]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.BackupPolicy"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_initial(
resource_group_name=resource_group_name,
account_name=account_name,
backup_policy_name=backup_policy_name,
body=body,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('BackupPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetApp/netAppAccounts/{accountName}/backupPolicies/{backupPolicyName}'} # type: ignore
def _delete_initial(
self,
resource_group_name: str,
account_name: str,
backup_policy_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
account_name=account_name,
backup_policy_name=backup_policy_name,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetApp/netAppAccounts/{accountName}/backupPolicies/{backupPolicyName}'} # type: ignore
@distributed_trace
def begin_delete(
self,
resource_group_name: str,
account_name: str,
backup_policy_name: str,
**kwargs: Any
) -> LROPoller[None]:
"""Delete a backup policy.
Delete backup policy.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param account_name: The name of the NetApp account.
:type account_name: str
:param backup_policy_name: Backup policy Name which uniquely identify backup policy.
:type backup_policy_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
account_name=account_name,
backup_policy_name=backup_policy_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetApp/netAppAccounts/{accountName}/backupPolicies/{backupPolicyName}'} # type: ignore
|
Azure/azure-sdk-for-python
|
sdk/netapp/azure-mgmt-netapp/azure/mgmt/netapp/operations/_backup_policies_operations.py
|
Python
|
mit
| 31,820
|
from sys import platform as sys_plat
import platform
import os
from ctypes import *
if sys_plat == "win32":
def find_win_dll(arch):
""" Finds the highest versioned windows dll for the specified architecture. """
dlls = []
filename = 'VimbaC.dll'
# look in local working directory first
if os.path.isfile(filename):
dlls.append(filename)
if not dlls:
if 'VIMBA_HOME' in os.environ:
candidate = os.environ['VIMBA_HOME'] + r'\VimbaC\Bin\Win%i\VimbaC.dll' % (arch)
if os.path.isfile(candidate):
dlls.append(candidate)
if not dlls:
bases = [
r'C:\Program Files\Allied Vision Technologies\AVTVimba_%i.%i\VimbaC\Bin\Win%i\VimbaC.dll',
r'C:\Program Files\Allied Vision\Vimba_%i.%i\VimbaC\Bin\Win%i\VimbaC.dll'
]
for base in bases:
for major in range(4):
for minor in range(10):
candidate = base % (major, minor, arch)
if os.path.isfile(candidate):
dlls.append(candidate)
if not dlls:
raise IOError("VimbaC.dll not found.")
return dlls[-1]
if '64' in platform.architecture()[0]:
vimbaC_path = find_win_dll(64)
else:
vimbaC_path = find_win_dll(32)
dll_loader = windll
else:
dll_loader = cdll
def find_so(platform, genicam_path):
vimbaC_found = False
for tlPath in [p for p in os.environ.get(genicam_path).split(":") if p]:
vimba_dir = "/".join(tlPath.split("/")[1:-3])
vimbaC_path = "/" + vimba_dir + "/VimbaC/DynamicLib/" + platform + "/libVimbaC.so"
if os.path.isfile(vimbaC_path):
vimbaC_found = True
break
if not vimbaC_found:
raise OSError('No libVimbaC.so found')
return vimbaC_path
if 'x86_64' in os.uname()[4]:
assert os.environ.get(
"GENICAM_GENTL64_PATH"), "you need your GENICAM_GENTL64_PATH environment set. Make sure you have Vimba installed, and you have loaded the /etc/profile.d/ scripts"
vimbaC_path = find_so('x86_64bit', "GENICAM_GENTL64_PATH")
elif 'x86_32' in os.uname()[4]:
print("Warning: x86_32 reached!")
assert os.environ.get(
"GENICAM_GENTL32_PATH"), "you need your GENICAM_GENTL32_PATH environment set. Make sure you have Vimba installed, and you have loaded the /etc/profile.d/ scripts"
vimbaC_path = find_so('x86_32bit', 'GENICAM_GENTL32_PATH')
elif 'arm' in os.uname()[4]:
assert os.environ.get(
"GENICAM_GENTL32_PATH"), "you need your GENICAM_GENTL32_PATH environment set. Make sure you have Vimba installed, and you have loaded the /etc/profile.d/ scripts"
vimbaC_path = find_so('arm_32bit', 'GENICAM_GENTL32_PATH')
elif 'aarch64' in os.uname()[4]:
assert os.environ.get(
"GENICAM_GENTL64_PATH"), "you need your GENICAM_GENTL64_PATH environment set. Make sure you have Vimba installed, and you have loaded the /etc/profile.d/ scripts"
vimbaC_path = find_so('arm_64bit', "GENICAM_GENTL64_PATH")
else:
raise ValueError("Pymba currently doesn't support %s" % os.uname()[4])
# Callback function type
if sys_plat == "win32":
CALLBACK_FUNCTYPE = WINFUNCTYPE
else:
CALLBACK_FUNCTYPE = CFUNCTYPE
class NiceStructure(Structure):
def __repr__(self):
field_names = (field[0] for field in self._fields_)
return '{}({})'.format(
type(self).__name__,
", ".join("=".join((field, str(getattr(self, field))))
for field in field_names)
)
class VmbVersionInfo(NiceStructure):
_fields_ = [
('major', c_uint32),
('minor', c_uint32),
('patch', c_uint32)]
class VmbInterfaceInfo(NiceStructure):
_fields_ = [
# Unique identifier for each interface
('interfaceIdString', c_char_p),
# Interface type, see VmbInterfaceType
('interfaceType', c_uint32),
# Interface name, given by the transport layer
('interfaceName', c_char_p),
# Serial number
('serialString', c_char_p),
# Used access mode, see VmbAccessModeType
('permittedAccess', c_uint32)]
class VmbCameraInfo(NiceStructure):
_fields_ = [
# Unique identifier for each camera
('cameraIdString', c_char_p),
# Name of the camera
('cameraName', c_char_p),
# Model name
('modelName', c_char_p),
# Serial number
('serialString', c_char_p),
# Used access mode, see VmbAccessModeType
('permittedAccess', c_uint32),
# Unique value for each interface or bus
('interfaceIdString', c_char_p)]
class VmbFeatureInfo(NiceStructure):
_fields_ = [
('name', c_char_p),
('featureDataType', c_uint32),
('featureFlags', c_uint32),
('category', c_char_p),
('displayName', c_char_p),
('pollingTime', c_uint32),
('unit', c_char_p),
('representation', c_char_p),
('visibility', c_uint32),
('tooltip', c_char_p),
('description', c_char_p),
('sfncNamespace', c_char_p),
('isStreamable', c_bool),
('hasAffectedFeatures', c_bool),
('hasSelectedFeatures', c_bool)]
class VmbFrame(Structure):
_fields_ = [
# ---- IN ----
# Comprises image and ancillary data
('buffer', c_void_p),
# Size of the data buffer
('bufferSize', c_uint32),
# User context filled during queuing
('context', c_void_p * 4),
# ---- OUT ----
# Resulting status of the receive operation
('receiveStatus', c_int32),
# Resulting flags of the receive operation
('receiveFlags', c_uint32),
# Size of the image data inside the data buffer
('imageSize', c_uint32),
# Size of the ancillary data inside the data buffer
('ancillarySize', c_uint32),
# Pixel format of the image
('pixelFormat', c_uint32),
# Width of an image
('width', c_uint32),
# Height of an image
('height', c_uint32),
# Horizontal offset of an image
('offsetX', c_uint32),
# Vertical offset of an image
('offsetY', c_uint32),
# Unique ID of this frame in this stream
('frameID', c_uint64),
# Timestamp of the data transfer
('timestamp', c_uint64)]
class VimbaFeaturePersistSettings(NiceStructure):
_fields_ = [
('persistType', c_uint32),
('maxIterations', c_uint32),
('loggingLevel', c_uint32)]
_vimba_lib = dll_loader.LoadLibrary(vimbaC_path)
# ----- The below function signatures are defined in VimbaC.h -----
# callback for frame queue
vmb_frame_callback_func = CALLBACK_FUNCTYPE(None,
c_void_p,
POINTER(VmbFrame))
# Callback for Invalidation events
vmb_feature_invalidation_callback_fun = CALLBACK_FUNCTYPE(None,
c_void_p, # const VmbHandle_t handle
c_char_p, # const char* name
c_void_p) # void* pUserContext
vmb_version_query = _vimba_lib.VmbVersionQuery
vmb_version_query.restype = c_int32
vmb_version_query.argtypes = (POINTER(VmbVersionInfo),
c_uint32)
vmb_startup = _vimba_lib.VmbStartup
vmb_startup.restype = c_int32
vmb_shutdown = _vimba_lib.VmbShutdown
vmb_cameras_list = _vimba_lib.VmbCamerasList
vmb_cameras_list.restype = c_int32
vmb_cameras_list.argtypes = (POINTER(VmbCameraInfo),
c_uint32,
POINTER(c_uint32),
c_uint32)
vmb_camera_info_query = _vimba_lib.VmbCameraInfoQuery
vmb_camera_info_query.restype = c_int32
vmb_camera_info_query.argtypes = (c_char_p,
POINTER(VmbCameraInfo),
c_uint32)
vmb_camera_open = _vimba_lib.VmbCameraOpen
vmb_camera_open.restype = c_int32
vmb_camera_open.argtypes = (c_char_p,
c_uint32,
c_void_p)
vmb_camera_close = _vimba_lib.VmbCameraClose
vmb_camera_close.restype = c_int32
vmb_camera_close.argtypes = (c_void_p,)
vmb_features_list = _vimba_lib.VmbFeaturesList
vmb_features_list.restype = c_int32
vmb_features_list.argtypes = (c_void_p,
POINTER(VmbFeatureInfo),
c_uint32,
POINTER(c_uint32),
c_uint32)
vmb_feature_info_query = _vimba_lib.VmbFeatureInfoQuery
vmb_feature_info_query.restype = c_int32
vmb_feature_info_query.argtypes = (c_void_p,
c_char_p,
POINTER(VmbFeatureInfo),
c_uint32)
# todo VmbFeatureListAffected
# todo VmbFeatureListSelected
# todo VmbFeatureAccessQuery
vmb_feature_int_get = _vimba_lib.VmbFeatureIntGet
vmb_feature_int_get.restype = c_int32
vmb_feature_int_get.argtypes = (c_void_p,
c_char_p,
POINTER(c_int64))
vmb_feature_int_set = _vimba_lib.VmbFeatureIntSet
vmb_feature_int_set.restype = c_int32
vmb_feature_int_set.argtypes = (c_void_p,
c_char_p,
c_int64)
vmb_feature_int_range_query = _vimba_lib.VmbFeatureIntRangeQuery
vmb_feature_int_range_query.restype = c_int32
vmb_feature_int_range_query.argtypes = (c_void_p,
c_char_p,
POINTER(c_int64),
POINTER(c_int64))
# todo VmbFeatureIntIncrementQuery
vmb_feature_float_get = _vimba_lib.VmbFeatureFloatGet
vmb_feature_float_get.restype = c_int32
vmb_feature_float_get.argtypes = (c_void_p,
c_char_p,
POINTER(c_double))
vmb_feature_float_set = _vimba_lib.VmbFeatureFloatSet
vmb_feature_float_set.restype = c_int32
vmb_feature_float_set.argtypes = (c_void_p,
c_char_p,
c_double)
vmb_feature_float_range_query = _vimba_lib.VmbFeatureFloatRangeQuery
vmb_feature_float_range_query.restype = c_int32
vmb_feature_float_range_query.argtypes = (c_void_p,
c_char_p,
POINTER(c_double),
POINTER(c_double))
# todo VmbFeatureFloatIncrementQuery
vmb_feature_enum_get = _vimba_lib.VmbFeatureEnumGet
vmb_feature_enum_get.restype = c_int32
vmb_feature_enum_get.argtypes = (c_void_p,
c_char_p,
POINTER(c_char_p))
vmb_feature_enum_set = _vimba_lib.VmbFeatureEnumSet
vmb_feature_enum_set.restype = c_int32
vmb_feature_enum_set.argtypes = (c_void_p,
c_char_p,
c_char_p)
vmb_feature_enum_range_query = _vimba_lib.VmbFeatureEnumRangeQuery
vmb_feature_enum_range_query.restype = c_int32
vmb_feature_enum_range_query.argtypes = (c_void_p,
c_char_p,
POINTER(c_char_p),
c_uint32,
POINTER(c_uint32))
# todo VmbFeatureEnumIsAvailable
# todo VmbFeatureEnumAsInt
# todo VmbFeatureEnumAsString
# todo VmbFeatureEnumEntryGet
vmb_feature_string_get = _vimba_lib.VmbFeatureStringGet
vmb_feature_string_get.restype = c_int32
vmb_feature_string_get.argtypes = (c_void_p,
c_char_p,
c_char_p,
c_uint32,
POINTER(c_uint32))
vmb_feature_string_set = _vimba_lib.VmbFeatureStringSet
vmb_feature_string_set.restype = c_int32
vmb_feature_string_set.argtypes = (c_void_p,
c_char_p,
c_char_p)
# todo VmbFeatureStringMaxlengthQuery
vmb_feature_bool_get = _vimba_lib.VmbFeatureBoolGet
vmb_feature_bool_get.restype = c_int32
vmb_feature_bool_get.argtypes = (c_void_p,
c_char_p,
POINTER(c_bool))
vmb_feature_bool_set = _vimba_lib.VmbFeatureBoolSet
vmb_feature_bool_set.restype = c_int32
vmb_feature_bool_set.argtypes = (c_void_p,
c_char_p,
c_bool)
vmb_feature_command_run = _vimba_lib.VmbFeatureCommandRun
vmb_feature_command_run.restype = c_int32
vmb_feature_command_run.argtypes = (c_void_p,
c_char_p)
vmb_feature_command_is_done = _vimba_lib.VmbFeatureCommandIsDone
vmb_feature_command_is_done.restype = c_int32
vmb_feature_command_is_done.argtypes = (c_void_p,
c_char_p,
POINTER(c_bool))
# todo VmbFeatureRawGet
# todo VmbFeatureRawSet
# todo VmbFeatureRawLengthQuery
vmb_feature_invalidation_register = _vimba_lib.VmbFeatureInvalidationRegister
vmb_feature_invalidation_register.restype = c_int32
vmb_feature_invalidation_register.argtypes = (c_void_p,
c_char_p,
vmb_feature_invalidation_callback_fun,
c_void_p)
vmb_feature_invalidation_unregister = _vimba_lib.VmbFeatureInvalidationUnregister
vmb_feature_invalidation_unregister.restype = c_int32
vmb_feature_invalidation_unregister.argtypes = (c_void_p,
c_char_p,
vmb_feature_invalidation_callback_fun)
vmb_frame_announce = _vimba_lib.VmbFrameAnnounce
vmb_frame_announce.restype = c_int32
vmb_frame_announce.argtypes = (c_void_p,
POINTER(VmbFrame),
c_uint32)
vmb_frame_revoke = _vimba_lib.VmbFrameRevoke
vmb_frame_revoke.restype = c_int32
vmb_frame_revoke.argtypes = (c_void_p,
POINTER(VmbFrame))
vmb_frame_revoke_all = _vimba_lib.VmbFrameRevokeAll
vmb_frame_revoke_all.restype = c_int32
vmb_frame_revoke_all.argtypes = (c_void_p,)
vmb_capture_start = _vimba_lib.VmbCaptureStart
vmb_capture_start.restype = c_int32
vmb_capture_start.argtypes = (c_void_p,)
vmb_capture_end = _vimba_lib.VmbCaptureEnd
vmb_capture_end.restype = c_int32
vmb_capture_end.argtypes = (c_void_p,)
vmb_capture_frame_queue = _vimba_lib.VmbCaptureFrameQueue
vmb_capture_frame_queue.restype = c_int32
vmb_capture_frame_queue.argtypes = (c_void_p,
POINTER(VmbFrame),
c_void_p)
vmb_capture_frame_wait = _vimba_lib.VmbCaptureFrameWait
vmb_capture_frame_wait.restype = c_int32
vmb_capture_frame_wait.argtypes = (c_void_p,
POINTER(VmbFrame),
c_uint32)
vmb_capture_queue_flush = _vimba_lib.VmbCaptureQueueFlush
vmb_capture_queue_flush.restype = c_int32
vmb_capture_queue_flush.argtypes = (c_void_p,)
vmb_interfaces_list = _vimba_lib.VmbInterfacesList
vmb_interfaces_list.restype = c_int32
vmb_interfaces_list.argtypes = (POINTER(VmbInterfaceInfo),
c_uint32,
POINTER(c_uint32),
c_uint32)
vmb_interface_open = _vimba_lib.VmbInterfaceOpen
vmb_interface_open.restype = c_int32
vmb_interface_open.argtypes = (c_char_p,
c_void_p)
vmb_interface_close = _vimba_lib.VmbInterfaceClose
vmb_interface_close.restype = c_int32
vmb_interface_close.argtypes = (c_void_p,)
vmb_ancillary_data_open = _vimba_lib.VmbAncillaryDataOpen
vmb_interface_close.restype = c_int32
vmb_interface_close.argtypes = (POINTER(VmbFrame), POINTER(c_void_p))
vmb_ancillary_data_close = _vimba_lib.VmbAncillaryDataClose
vmb_interface_close.restype = c_int32
vmb_interface_close.argtypes = (c_void_p, )
# todo VmbMemoryRead
# todo VmbMemoryWrite
vmb_registers_read = _vimba_lib.VmbRegistersRead
vmb_registers_read.restype = c_int32
vmb_registers_read.argtypes = (c_void_p,
c_uint32,
POINTER(c_uint64),
POINTER(c_uint64),
POINTER(c_uint32))
vmb_registers_write = _vimba_lib.VmbRegistersWrite
vmb_registers_write.restype = c_int32
vmb_registers_write.argtypes = (c_void_p,
c_uint32,
POINTER(c_uint64),
POINTER(c_uint64),
POINTER(c_uint32))
vmb_camera_settings_load = _vimba_lib.VmbCameraSettingsLoad
vmb_camera_settings_load.restype = c_int32
vmb_camera_settings_load.argtypes = (c_void_p,
c_char_p,
POINTER(VimbaFeaturePersistSettings),
c_uint32)
vmb_camera_settings_save = _vimba_lib.VmbCameraSettingsSave
vmb_camera_settings_save.restype = c_int32
vmb_camera_settings_save.argtypes = (c_void_p,
c_char_p,
POINTER(VimbaFeaturePersistSettings),
c_uint32)
|
morefigs/pymba
|
pymba/vimba_c.py
|
Python
|
mit
| 17,849
|
#!/usr/bin/env python2
import sys
sys.path.append('../fml/')
import os
import numpy as np
import fml
import random
t_width = np.pi / 4.0 # 0.7853981633974483
d_width = 0.2
cut_distance = 6.0
r_width = 1.0
c_width = 0.5
PTP = {\
1 :[1,1] ,2: [1,8]#Row1
,3 :[2,1] ,4: [2,2]#Row2\
,5 :[2,3] ,6: [2,4] ,7 :[2,5] ,8 :[2,6] ,9 :[2,7] ,10 :[2,8]\
,11 :[3,1] ,12: [3,2]#Row3\
,13 :[3,3] ,14: [3,4] ,15 :[3,5] ,16 :[3,6] ,17 :[3,7] ,18 :[3,8]\
,19 :[4,1] ,20: [4,2]#Row4\
,31 :[4,3] ,32: [4,4] ,33 :[4,5] ,34 :[4,6] ,35 :[4,7] ,36 :[4,8]\
,21 :[4,9] ,22: [4,10],23 :[4,11],24 :[4,12],25 :[4,13],26 :[4,14],27 :[4,15],28 :[4,16],29 :[4,17],30 :[4,18]\
,37 :[5,1] ,38: [5,2]#Row5\
,49 :[5,3] ,50: [5,4] ,51 :[5,5] ,52 :[5,6] ,53 :[5,7] ,54 :[5,8]\
,39 :[5,9] ,40: [5,10],41 :[5,11],42 :[5,12],43 :[5,13],44 :[5,14],45 :[5,15],46 :[5,16],47 :[5,17],48 :[5,18]\
,55 :[6,1] ,56: [6,2]#Row6\
,81 :[6,3] ,82: [6,4] ,83 :[6,5] ,84 :[6,6] ,85 :[6,7] ,86 :[6,8]
,72: [6,10],73 :[6,11],74 :[6,12],75 :[6,13],76 :[6,14],77 :[6,15],78 :[6,16],79 :[6,17],80 :[6,18]\
,57 :[6,19],58: [6,20],59 :[6,21],60 :[6,22],61 :[6,23],62 :[6,24],63 :[6,25],64 :[6,26],65 :[6,27],66 :[6,28],67 :[6,29],68 :[6,30],69 :[6,31],70 :[6,32],71 :[6,33]\
,87 :[7,1] ,88: [7,2]#Row7\
,113:[7,3] ,114:[7,4] ,115:[7,5] ,116:[7,6] ,117:[7,7] ,118:[7,8]\
,104:[7,10],105:[7,11],106:[7,12],107:[7,13],108:[7,14],109:[7,15],110:[7,16],111:[7,17],112:[7,18]\
,89 :[7,19],90: [7,20],91 :[7,21],92 :[7,22],93 :[7,23],94 :[7,24],95 :[7,25],96 :[7,26],97 :[7,27],98 :[7,28],99 :[7,29],100:[7,30],101:[7,31],101:[7,32],102:[7,14],103:[7,33]}
def periodic_distance(a, b):
ra = PTP[int(a)][0]
rb = PTP[int(b)][0]
ca = PTP[int(a)][1]
cb = PTP[int(b)][1]
return (r_width**2 + (ra - rb)**2) * (c_width**2 + (ca - cb)**2)
def ksi(x):
return 1.0 - np.sin(np.pi * x/(2.0 * cut_distance))
def aras_scalar(atom1, atom2, q1, q2, n1, n2, qall1, qall2):
ksi1 = ksi(atom1[0,:n1])
ksi2 = ksi(atom2[0,:n2])
a = 1.0 / (np.sum(ksi1) * np.sum(ksi2)) \
* np.sqrt(np.pi) * d_width * r_width**4 * c_width**4 \
/ periodic_distance(q1, q2)
b = 0.0
for i in range(n1):
c = 0.0
for j in range(n2):
d = 0.0
for k in range(n1):
e = 0.0
for l in range(n2):
e += ksi2[l] * np.exp(-(atom1[i+3,k] - atom2[j+3,l])**2 / (4.0 * t_width**2))
d += e * ksi1[k]
c += d * np.exp(-(atom1[0,i] - atom2[0,j])**2 / (4.0 * d_width**2)) * ksi2[j] \
/ periodic_distance(qall1[i], qall2[j])
b += c * ksi1[i]
dist = a * b
return dist
def aras_distance(mol1, mol2, max_size=30):
d = np.zeros((mol1.natoms, mol2.natoms))
aa = np.zeros((mol1.natoms))
bb = np.zeros((mol2.natoms))
for i in range(mol1.natoms):
atom1 = mol1.aras_descriptor[i]
aa[i] = aras_scalar(atom1, atom1,
mol1.nuclear_charges[i],
mol1.nuclear_charges[i],
mol1.natoms, mol1.natoms,
mol1.nuclear_charges, mol1.nuclear_charges)
for i in range(mol2.natoms):
atom2 = mol2.aras_descriptor[i]
bb[i] = aras_scalar(atom2, atom2,
mol2.nuclear_charges[i],
mol2.nuclear_charges[i],
mol2.natoms, mol2.natoms,
mol2.nuclear_charges, mol2.nuclear_charges)
for i in range(mol1.natoms):
atom1 = mol1.aras_descriptor[i]
for j in range(mol2.natoms):
atom2 = mol2.aras_descriptor[j]
ab = aras_scalar(atom1, atom2,
mol1.nuclear_charges[i],
mol2.nuclear_charges[j],
mol1.natoms, mol2.natoms,
mol1.nuclear_charges, mol2.nuclear_charges)
d[i,j] = aa[i] + bb[j] - 2.0 * ab
return d
def get_kernel(mols1, mols2, sigma, max_size=30):
K = np.zeros((len(mols1), len(mols2)))
for i, mol1 in enumerate(mols1):
for j, mol2 in enumerate(mols2):
print i, j
d = aras_distance(mol1, mol2)
d *= -0.5 / (sigma**2)
np.exp(d, d)
K[i,j] = np.sum(d)
return K
def gen_pd(emax=20):
pd = np.zeros((emax,emax))
for i in range(emax):
for j in range(emax):
pd[i,j] = 1.0 / periodic_distance(i+1, j+1)
return pd
def fdist(mol1, mol2):
from faras import faras_molecular_distance as fd
pd = gen_pd()
amax = 11
x1 = np.array(mol1.aras_descriptor).reshape((1,amax,3+amax,amax))
x2 = np.array(mol2.aras_descriptor).reshape((1,amax,3+amax,amax))
q1 = np.array(mol1.nuclear_charges, \
dtype = np.int32).reshape(1,mol1.natoms)
q2 = np.array(mol2.nuclear_charges, \
dtype = np.int32).reshape(1,mol2.natoms)
n1 = np.array([mol1.natoms], dtype=np.int32)
n2 = np.array([mol2.natoms], dtype=np.int32)
nm1 = 1
nm2 = 1
d = fd(x1, x2, q1, q2, n1, n2, nm1, nm2, amax, pd)
return d
def fdists(mols1, mols2):
from faras import faras_molecular_distance as fd
pd = gen_pd()
amax = mols1[0].aras_descriptor.shape[0]
nm1 = len(mols1)
nm2 = len(mols2)
x1 = np.array([mol.aras_descriptor for mol in mols1]).reshape((nm1,amax,3+amax,amax))
x2 = np.array([mol.aras_descriptor for mol in mols2]).reshape((nm2,amax,3+amax,amax))
q1 = np.zeros((nm1,amax), dtype=np.int32)
q2 = np.zeros((nm1,amax), dtype=np.int32)
for a in range(nm1):
for i, charge in enumerate(mols[a].nuclear_charges):
q1[a,i] = int(charge)
for b in range(nm2):
for j, charge in enumerate(mols[b].nuclear_charges):
q2[b,j] = int(charge)
n1 = np.array([mol.natoms for mol in mols1], dtype=np.int32)
n2 = np.array([mol.natoms for mol in mols2], dtype=np.int32)
d = fd(x1, x2, q1, q2, n1, n2, nm1, nm2, amax, pd)
return d
if __name__ == "__main__":
mols = []
path = "xyz/"
filenames = os.listdir(path)
np.set_printoptions(linewidth=99999999999999999)
print "Generating ARAS descriptors from FML interface ..."
for filename in sorted(filenames):
mol = fml.Molecule()
mol.read_xyz(path + filename)
mol.generate_aras_descriptor(size=11)
mols.append(mol)
train = mols[:10]
test = mols[-10:]
a = 1
b = 1
d = aras_distance(mols[a], mols[b])
print d
d2 = fdist(mols[a], mols[b])
print d2[0,0,:mols[a].natoms,:mols[b].natoms]
|
andersx/fml
|
tests/test_aras.py
|
Python
|
mit
| 6,751
|
"""
This file implements a wrapper for facilitating domain randomization over
robosuite environments.
"""
import numpy as np
from robosuite.utils.mjmod import CameraModder, DynamicsModder, LightingModder, TextureModder
from robosuite.wrappers import Wrapper
DEFAULT_COLOR_ARGS = {
"geom_names": None, # all geoms are randomized
"randomize_local": True, # sample nearby colors
"randomize_material": True, # randomize material reflectance / shininess / specular
"local_rgb_interpolation": 0.2,
"local_material_interpolation": 0.3,
"texture_variations": ["rgb", "checker", "noise", "gradient"], # all texture variation types
"randomize_skybox": True, # by default, randomize skybox too
}
DEFAULT_CAMERA_ARGS = {
"camera_names": None, # all cameras are randomized
"randomize_position": True,
"randomize_rotation": True,
"randomize_fovy": True,
"position_perturbation_size": 0.01,
"rotation_perturbation_size": 0.087,
"fovy_perturbation_size": 5.0,
}
DEFAULT_LIGHTING_ARGS = {
"light_names": None, # all lights are randomized
"randomize_position": True,
"randomize_direction": True,
"randomize_specular": True,
"randomize_ambient": True,
"randomize_diffuse": True,
"randomize_active": True,
"position_perturbation_size": 0.1,
"direction_perturbation_size": 0.35,
"specular_perturbation_size": 0.1,
"ambient_perturbation_size": 0.1,
"diffuse_perturbation_size": 0.1,
}
DEFAULT_DYNAMICS_ARGS = {
# Opt parameters
"randomize_density": True,
"randomize_viscosity": True,
"density_perturbation_ratio": 0.1,
"viscosity_perturbation_ratio": 0.1,
# Body parameters
"body_names": None, # all bodies randomized
"randomize_position": True,
"randomize_quaternion": True,
"randomize_inertia": True,
"randomize_mass": True,
"position_perturbation_size": 0.0015,
"quaternion_perturbation_size": 0.003,
"inertia_perturbation_ratio": 0.02,
"mass_perturbation_ratio": 0.02,
# Geom parameters
"geom_names": None, # all geoms randomized
"randomize_friction": True,
"randomize_solref": True,
"randomize_solimp": True,
"friction_perturbation_ratio": 0.1,
"solref_perturbation_ratio": 0.1,
"solimp_perturbation_ratio": 0.1,
# Joint parameters
"joint_names": None, # all joints randomized
"randomize_stiffness": True,
"randomize_frictionloss": True,
"randomize_damping": True,
"randomize_armature": True,
"stiffness_perturbation_ratio": 0.1,
"frictionloss_perturbation_size": 0.05,
"damping_perturbation_size": 0.01,
"armature_perturbation_size": 0.01,
}
class DomainRandomizationWrapper(Wrapper):
"""
Wrapper that allows for domain randomization mid-simulation.
Args:
env (MujocoEnv): The environment to wrap.
seed (int): Integer used to seed all randomizations from this wrapper. It is
used to create a np.random.RandomState instance to make sure samples here
are isolated from sampling occurring elsewhere in the code. If not provided,
will default to using global random state.
randomize_color (bool): if True, randomize geom colors and texture colors
randomize_camera (bool): if True, randomize camera locations and parameters
randomize_lighting (bool): if True, randomize light locations and properties
randomize_dyanmics (bool): if True, randomize dynamics parameters
color_randomization_args (dict): Color-specific randomization arguments
camera_randomization_args (dict): Camera-specific randomization arguments
lighting_randomization_args (dict): Lighting-specific randomization arguments
dynamics_randomization_args (dict): Dyanmics-specific randomization arguments
randomize_on_reset (bool): if True, randomize on every call to @reset. This, in
conjunction with setting @randomize_every_n_steps to 0, is useful to
generate a new domain per episode.
randomize_every_n_steps (int): determines how often randomization should occur. Set
to 0 if randomization should happen manually (by calling @randomize_domain)
"""
def __init__(
self,
env,
seed=None,
randomize_color=True,
randomize_camera=True,
randomize_lighting=True,
randomize_dynamics=True,
color_randomization_args=DEFAULT_COLOR_ARGS,
camera_randomization_args=DEFAULT_CAMERA_ARGS,
lighting_randomization_args=DEFAULT_LIGHTING_ARGS,
dynamics_randomization_args=DEFAULT_DYNAMICS_ARGS,
randomize_on_reset=True,
randomize_every_n_steps=1,
):
super().__init__(env)
self.seed = seed
if seed is not None:
self.random_state = np.random.RandomState(seed)
else:
self.random_state = None
self.randomize_color = randomize_color
self.randomize_camera = randomize_camera
self.randomize_lighting = randomize_lighting
self.randomize_dynamics = randomize_dynamics
self.color_randomization_args = color_randomization_args
self.camera_randomization_args = camera_randomization_args
self.lighting_randomization_args = lighting_randomization_args
self.dynamics_randomization_args = dynamics_randomization_args
self.randomize_on_reset = randomize_on_reset
self.randomize_every_n_steps = randomize_every_n_steps
self.step_counter = 0
self.modders = []
if self.randomize_color:
self.tex_modder = TextureModder(
sim=self.env.sim, random_state=self.random_state, **self.color_randomization_args
)
self.modders.append(self.tex_modder)
if self.randomize_camera:
self.camera_modder = CameraModder(
sim=self.env.sim,
random_state=self.random_state,
**self.camera_randomization_args,
)
self.modders.append(self.camera_modder)
if self.randomize_lighting:
self.light_modder = LightingModder(
sim=self.env.sim,
random_state=self.random_state,
**self.lighting_randomization_args,
)
self.modders.append(self.light_modder)
if self.randomize_dynamics:
self.dynamics_modder = DynamicsModder(
sim=self.env.sim,
random_state=self.random_state,
**self.dynamics_randomization_args,
)
self.modders.append(self.dynamics_modder)
self.save_default_domain()
def reset(self):
"""
Extends superclass method to reset the domain randomizer.
Returns:
OrderedDict: Environment observation space after reset occurs
"""
# undo all randomizations
self.restore_default_domain()
# normal env reset
ret = super().reset()
# save the original env parameters
self.save_default_domain()
# reset counter for doing domain randomization at a particular frequency
self.step_counter = 0
# update sims
for modder in self.modders:
modder.update_sim(self.env.sim)
if self.randomize_on_reset:
# domain randomize + regenerate observation
self.randomize_domain()
ret = self.env._get_observations()
return ret
def step(self, action):
"""
Extends vanilla step() function call to accommodate domain randomization
Returns:
4-tuple:
- (OrderedDict) observations from the environment
- (float) reward from the environment
- (bool) whether the current episode is completed or not
- (dict) misc information
"""
# Step the internal randomization state
self.step_randomization()
return super().step(action)
def step_randomization(self):
"""
Steps the internal randomization state
"""
# functionality for randomizing at a particular frequency
if self.randomize_every_n_steps > 0:
if self.step_counter % self.randomize_every_n_steps == 0:
self.randomize_domain()
self.step_counter += 1
def randomize_domain(self):
"""
Runs domain randomization over the environment.
"""
for modder in self.modders:
modder.randomize()
def save_default_domain(self):
"""
Saves the current simulation model parameters so
that they can be restored later.
"""
for modder in self.modders:
modder.save_defaults()
def restore_default_domain(self):
"""
Restores the simulation model parameters saved
in the last call to @save_default_domain.
"""
for modder in self.modders:
modder.restore_defaults()
|
ARISE-Initiative/robosuite
|
robosuite/wrappers/domain_randomization_wrapper.py
|
Python
|
mit
| 9,076
|
#!/usr/bin/env python3
from sfmutils.api_client import ApiClient
import argparse
import logging
import sys
log = logging.getLogger(__name__)
def main(sys_argv):
# Arguments
parser = argparse.ArgumentParser(description="Return WARC filepaths for passing to other commandlines.")
parser.add_argument("--harvest-start", help="ISO8601 datetime after which harvest was performed. For example, "
"2015-02-22T14:49:07Z")
parser.add_argument("--harvest-end", help="ISO8601 datetime before which harvest was performed. For example, "
"2015-02-22T14:49:07Z")
parser.add_argument("--warc-start", help="ISO8601 datetime after which WARC was created. For example, "
"2015-02-22T14:49:07Z")
parser.add_argument("--warc-end", help="ISO8601 datetime before which WARC was created. For example, "
"2015-02-22T14:49:07Z")
default_api_base_url = "http://api:8080"
parser.add_argument("--api-base-url", help="Base url of the SFM API. Default is {}.".format(default_api_base_url),
default=default_api_base_url)
parser.add_argument("--debug", type=lambda v: v.lower() in ("yes", "true", "t", "1"), nargs="?",
default="False", const="True")
parser.add_argument("--newline", action="store_true", help="Separates WARCs by newline instead of space.")
parser.add_argument("collection", nargs="+", help="Limit to WARCs of this collection. "
"Truncated collection ids may be used.")
# Explicitly using sys.argv so that can mock out for testing.
args = parser.parse_args(sys_argv[1:])
# Logging
logging.basicConfig(format='%(asctime)s: %(name)s --> %(message)s',
level=logging.DEBUG if args.debug else logging.INFO)
logging.getLogger("requests").setLevel(logging.DEBUG if args.debug else logging.INFO)
api_client = ApiClient(args.api_base_url)
collection_ids = []
for collection_id_part in args.collection:
log.debug("Looking up collection id part %s", collection_id_part)
if len(collection_id_part) == 32:
collection_ids.append(collection_id_part)
else:
collections = list(api_client.collections(collection_id_startswith=collection_id_part))
if len(collections) == 0:
print("No matching collections for {}".format(collection_id_part))
sys.exit(1)
elif len(collections) > 1:
print("Multiple matching collections for {}".format(collection_id_part))
sys.exit(1)
else:
collection_ids.append(collections[0]["collection_id"])
warc_filepaths = set()
for collection_id in collection_ids:
log.debug("Looking up warcs for %s", collection_id)
warcs = api_client.warcs(collection_id=collection_id, harvest_date_start=args.harvest_start,
harvest_date_end=args.harvest_end,
created_date_start=args.warc_start, created_date_end=args.warc_end)
for warc in warcs:
warc_filepaths.add(warc["path"])
sep = "\n" if args.newline else " "
return sep.join(sorted(warc_filepaths))
if __name__ == "__main__":
print(main(sys.argv))
|
gwu-libraries/sfm-utils
|
sfmutils/find_warcs.py
|
Python
|
mit
| 3,455
|
NAME="Phone Alert Status"
|
brettchien/PyBLEWrapper
|
pyble/const/profile/phone_alert_status.py
|
Python
|
mit
| 26
|
import requests, urllib, httplib, base64
from flask import Flask, render_template, request, jsonify
app = Flask(__name__)
@app.route("/")
def hello():
return render_template("index.html")
@app.route("/search", methods=['POST', 'GET'])
def callAPI():
error = None
_url = 'https://api.projectoxford.ai/vision/v1.0/ocr' #https://api.projectoxford.ai/vision/v1.0/ocr[?language][&detectOrientation ]
_key = "f8968ffd96d2475cb7ec347c51f24e3e" #Here you have to paste your primary key it is a header
_maxNumRetries = 10
bodyURL = request.args.get('uri','')
print(bodyURL)
headersIn = {
"Content-Type": "application/json",
"Host": "api.projectoxford.ai",
"Ocp-Apim-Subscription-Key": _key
}
paramsIn = urllib.urlencode({
"language": "en",
"detectOrientation": "false"
})
data={"url":"https://csgsarchitects.files.wordpress.com/2011/12/111_new-blog.jpg"}
try:
r = requests.post(_url, json=data,\
params=paramsIn, headers=headersIn)
print r.json()
returnVal = {"data": r.json()}
return returnVal
#
# conn = httplib.HTTPSConnection('api.projectoxford.ai')
# conn.request("POST", "/vision/v1.0/ocr?%s" % paramsIn, "{body}", headersIn)
# response = conn.getresponse()
# data = response.read()
# print(data)
# conn.close()
#
# print 'hello'
# conn.request("POST", "/vision/v1.0/ocr?%s" % params, {"url":"http://example.com/images/test.jpg"}, headers)
# response = conn.getresponse()
# data = response.read()
# print(data)
# conn.close()
except Exception as e:
print(e)
return e
if __name__ == "__main__":
app.run(host='0.0.0.0')
|
USCSoftwareEngineeringClub/pyceratOpsRecs
|
src/interface/hello.py
|
Python
|
mit
| 1,646
|