code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
"""
Given an array of integers nums, calculate the pivot index of this array.
The pivot index is the index where the sum of all the numbers strictly to the left of the index is equal to the sum of all the numbers strictly to the index's right.
If the index is on the left edge of the array, then the left sum is 0 because there are no elements to the left. This also applies to the right edge of the array.
Return the leftmost pivot index. If no such index exists, return -1.
https://leetcode.com/problems/find-pivot-index/
Date: 12/15/21
"""
def pivot(nums : [int]) -> int:
for i in range(1,len(nums)):
print(f'i={i}, left = {nums[:i]}, right = {nums[(i+1):]}')
if sum(nums[:i]) == sum(nums[(i+1):]):
return i
return -1
if __name__ == '__main__':
nums = [1,7,3,6,5,6]
#pivot(nums)
print(pivot(nums))
|
entrepidea/projects
|
python/tutorials/algo/leetcode/easy/pivot_index.py
|
Python
|
gpl-3.0
| 820
|
"""Module which contains the Loader."""
import os
class Loader():
"""Load external files into tamandua."""
__basedir = os.path.abspath(os.path.dirname(__file__))
@classmethod
def load_js(cls, file: str) -> str:
"""
Load a js file and return its content.
path separator is '.' and the '.js' extension is automatically added.
Eg.: if 'file' is 'mongo_js.mapper' Loader will load
the 'mongo_js/mapper.js' file.
"""
path = file.replace('.', os.path.sep) + '.js'
with open(os.path.join(cls.__basedir, path), 'r') as f:
content = f.read()
return content
|
realmar/Tamandua
|
src/repository/js.py
|
Python
|
gpl-3.0
| 681
|
# This file is part of MAUS: http://micewww.pp.rl.ac.uk/projects/maus
#
# MAUS is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# MAUS is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with MAUS. If not, see <http://www.gnu.org/licenses/>.
#pylint: disable=E1101
"""
SocketManager wrapper wraps the ROOT TServerSocket
"""
import sys
import time
import threading
import Queue
import ROOT
from docstore.root_document_store._socket_error import SocketError
from docstore.root_document_store._control_message import ControlMessage
class SocketManager:
"""
SocketManager handles socket set up and communications.
"""
def __init__(self, listen_port_list, timeout, retry_time,
max_send_attempts=100):
self.retry_time = retry_time
self.socket_queue = Queue.Queue() # Fifo queue for easy iterating
self.recv_message_queue = Queue.Queue() # Fifo queue for easy iterating
self.send_message_queue = Queue.Queue()
self.error_queue = Queue.Queue()
self.max_send_attempts = max_send_attempts
self.processing = False
for listen_port in listen_port_list:
if listen_port > 65535 or listen_port <= 0: # segv in ROOT
raise SocketError("Port "+str(listen_port)+" out of range")
self.accept_connection(listen_port, timeout, retry_time)
def check_error_queue(self):
"""
Check the error queue; raise an exception if there is a waiting error
"""
try:
raise self.error_queue.get_nowait()
except Queue.Empty:
pass
def start_processing(self):
"""
start the socket processing, looking for events to process
"""
self._start_message_handler()
self.processing = True
def close_all(self, force):
"""
close the socket
- force: bool, if True then force the socket to close, otherwise wait
for any existing messages to get picked up
"""
for socket in self._loop_over_sockets_unsafe():
if force:
socket.Close("force")
else:
socket.Close("")
def send_message(self, port, message):
"""
Send a message through the socket
- port: integer corresponding to the port over which the message should
be sent. Raises SocketError if no socket is open on the target
port.
- message: object of type ControlMessage; raises SocketError if not
a TMessage.
"""
if not self.processing:
raise SocketError("Can't post a message until start_processing()")
if type(message) == type(ControlMessage()):
try:
root_message = message.root_repr()
except TypeError as exc:
raise SocketError(*exc.args)
else:
raise SocketError("Message must be a ControlMessage - got "+\
str(type(message)))
try:
self.send_message_queue.put_nowait((port, root_message, 0))
except SocketError:
raise SocketError("SocketManager knows nothing about sockets on "+\
"port "+str(port))
def connect(self, url, port, timeout, retry_time):
"""
Connect to a given port
- url: url of the target machine
- port: port on the target machine to connect to
- timeout: give up after timeout seconds
- retry_time: attempt to reconnect every retry_time seconds
"""
return self._request_socket(url, port, timeout, retry_time)
def close_connection(self, port, force):
"""
close the a socket on a given port
- port: the port on which the socket is open.
- force: bool, if True then force the socket to close, otherwise wait
for any existing messages to get picked up
"""
socket = self._get_socket(port)
if force:
socket.Close("force")
else:
socket.Close("")
def accept_connection(self, listen_port, timeout, retry_time):
"""
Accept a connection on a given port
- listen_port: the port on which to listen
- timeout: give up after timeout seconds
- retry_time: retry every retry_time seconds
"""
my_thread = threading.Thread(target=self._accept_socket,
args=(listen_port, timeout, retry_time))
my_thread.daemon = True
my_thread.start()
def port_list(self):
"""
Return a list of ports (integers) on which sockets are open.
"""
return [socket.GetLocalPort() for socket in self._loop_over_sockets()]
def socket_list(self):
"""
Return a list of sockets metadata.
@returns list of dictionaries, each containing following keys:
- local_port: host port on which the socket is open
- local_address: host address
- remote_port: remote port on which the socket is open
- remote_address: remote address
- valid: true if the port is able to send and receive messages.
"""
my_list = []
for socket in self._loop_over_sockets():
my_list.append({
"local_port":socket.GetLocalPort(),
"local_address":socket.GetLocalInetAddress().GetHostAddress(),
"remote_port":socket.GetPort(),
"remote_address":socket.GetInetAddress().GetHostAddress(),
"valid":socket.IsValid()
})
return my_list
def _get_socket(self, port):
"""
Get a socket from the socket queue; caller now owns the socket and must
put it back when finished with it
- port: integer corresponding to the socket LocalPort
Throws a KeyError if the socket is not in the queue. Note that if
another thread is using a socket, it may not be in the queue - even if
it has been opened by the socket manager
"""
for socket in self._loop_over_sockets():
if socket.GetLocalPort() == port:
return socket
raise SocketError("Could not find socket in socket_queue")
def _put_socket(self, socket):
"""
Put a socket on the socket queue. Socket queue now owns the socket.
- socket: the socket to put
"""
if socket.IsValid() and socket not in [self._loop_over_sockets()]:
self.socket_queue.put_nowait(socket)
else:
raise SocketError("Socket was not valid")
def _loop_over_sockets(self):
"""
Loop over sockets in the socket queue; put them back in the queue after
caller is done
"""
return self._loop_over_queue(self.socket_queue)
def _loop_over_sockets_unsafe(self):
"""
Loop over sockets in the socket queue; dont put them back in the queue
after caller is done
"""
return self._loop_over_queue_unsafe(self.socket_queue)
def _loop_over_queue(self, queue):
"""
Loop over sockets in the socket queue; put them back in the queue after
caller is done
"""
for item in self._loop_over_queue_unsafe(queue):
yield item
queue.put_nowait(item)
@classmethod
def _loop_over_queue_unsafe(cls, queue):
"""
Loop over sockets in the socket queue; dont put them back in the queue
after caller is done
"""
item_list = []
while True:
try:
item = queue.get_nowait()
except Queue.Empty:
raise StopIteration("No items in queue")
if item in item_list: # we have looped once
queue.put_nowait(item)
raise StopIteration("Looped over the queue")
yield item
item_list.append(item)
def _start_message_handler(self):
"""
Start the message handler
"""
my_thread = threading.Thread(target=self._message_handler,
args=(self.retry_time,))
my_thread.daemon = True
my_thread.start()
def _message_handler(self, retry_time): # pylint: disable=R0912
"""
Message handler; send any messages on the send queue; receive any
incoming messages
"""
max_sends = self.max_send_attempts
while True:
try:
# Try to send all messages
for socket in self._loop_over_sockets():
sys.stdout.flush()
for item in \
self._loop_over_queue_unsafe(self.send_message_queue):
port, message, sends = item
if socket.GetLocalPort() == port:
socket.Send(message)
else:
self.send_message_queue.put(item)
# Increment sent count on messages that did not send
# Remove them if they have exceeded max_sends
for item in \
self._loop_over_queue_unsafe(self.send_message_queue):
port, message, sends = item
if sends >= max_sends and max_sends > 0:
raise SocketError("Failed to send message after "+\
str(sends)+" attempts")
else:
self.send_message_queue.put((port, message, sends+1))
# Try to receive any queued messages
for socket in self._loop_over_sockets():
message = ROOT.TMessage()
if socket.IsValid():
try_again = True
while try_again:
socket.Recv(message)
try_again = self._queue_received_message(
socket.GetLocalPort(),
message)
# Drop any invalid sockets
for socket in self._loop_over_sockets_unsafe():
if socket.IsValid():
self.socket_queue.put(socket)
time.sleep(retry_time)
except Exception as exc: # pylint: disable=W0703
sys.excepthook(*sys.exc_info())
self.error_queue.put_nowait(exc)
sys.stdout.flush()
time.sleep(retry_time)
def _queue_received_message(self, port, message):
"""
Receive a message and put it on the recv_message_queue for reading later
"""
if message == None or message.GetClass() == None:
# waiting for message
return False
elif message.GetClass() == ROOT.TObjArray().Class():
# control message
ctrl_message = ControlMessage.new_from_root_repr(message)
self.recv_message_queue.put((port, ctrl_message), False)
return True
else:
raise SocketError( # bad type
"Malformed message - should be ROOT.TObjArray type")
def _request_socket(self, url, port, timeout, retry_time):
"""
Request a socket on a remote machine
"""
if port > 65535 or port <= 0: # segv in ROOT
raise SocketError("Port "+str(port)+" out of range")
start_time = time.time()
valid = False
while not valid and \
(timeout < 0 or time.time() - start_time < timeout) and \
port not in self.port_list():
tmp_socket = ROOT.TSocket(url, port)
tmp_socket.SetOption(ROOT.TSocket.kNoBlock, 1)
valid = tmp_socket.IsValid()
if not valid:
time.sleep(retry_time)
if valid:
port = tmp_socket.GetLocalPort()
self._put_socket(tmp_socket)
return port
else:
raise SocketError("Failed to connect to "+str(url)+":"+str(port))
def _accept_socket(self, port, timeout, retry_time):
"""
Accept a socket request
"""
start_time = time.time()
server_socket = ROOT.TServerSocket(port, True)
server_socket.SetOption(ROOT.TServerSocket.kNoBlock, 1)
tcp_socket_index = server_socket.GetDescriptor()
accepted_socket_index = ROOT.gSystem.AcceptConnection(tcp_socket_index)
while True:
while accepted_socket_index < 0 and \
(timeout < 0 or time.time()-start_time < timeout):
sys.stdout.flush()
try:
accepted_socket_index = \
ROOT.gSystem.AcceptConnection(tcp_socket_index)
except AttributeError:
accepted_socket_index = -1
sys.stdout.flush()
if accepted_socket_index < 0:
time.sleep(retry_time)
sys.stdout.flush()
if accepted_socket_index < 0:
raise SocketError("Failed to accept connection on port "+\
str(port))
ROOT.gSystem.SetSockOpt(accepted_socket_index, ROOT.kReuseAddr, 1)
socket = ROOT.TSocket(accepted_socket_index)
socket.SetOption(ROOT.TSocket.kNoBlock, 1)
socket.fAddress = ROOT.gSystem.GetPeerName(accepted_socket_index)
socket.fSecContext = 0
if socket.GetDescriptor() >= 0:
ROOT.gROOT.GetListOfSockets().Add(socket)
self._put_socket(socket)
sys.stdout.flush()
accepted_socket_index = \
ROOT.gSystem.AcceptConnection(tcp_socket_index)
time.sleep(retry_time)
return
|
mice-software/maus
|
src/common_py/docstore/root_document_store/_socket_manager.py
|
Python
|
gpl-3.0
| 14,450
|
# -*- coding: utf-8 -*-
# -*- Channel wikiseries -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
from channels import autoplay
from channels import filtertools
from core import httptools
from core import scrapertools
from core import servertools
from core import jsontools
from core import tmdb
from core.item import Item
from platformcode import config, logger
from channelselector import get_thumb
host = 'http://www.wikiseriesonline.nu/'
list_language = ['Latino', 'Español', 'VOSE', 'VO']
list_quality = []
list_servers = ['openload']
def get_source(url):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r'"|\n|\r|\t| |<br>|\s{2,}', "", data)
return data
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist =[]
itemlist.append(
Item(channel=item.channel, title="Nuevos Capitulos", action="list_all", url=host + 'category/episode',
thumbnail=get_thumb('new episodes', auto=True)))
itemlist.append(Item(channel=item.channel, title="Todas", action="list_all", url=host + 'category/serie',
thumbnail=get_thumb('all', auto=True)))
itemlist.append(Item(channel=item.channel, title="Generos", action="genres",
url=host + 'latest-episodes', thumbnail=get_thumb('genres', auto=True)))
itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url=host + '?s=',
thumbnail=get_thumb('search', auto=True)))
itemlist = filtertools.show_option(itemlist, item.channel, list_language, list_quality)
autoplay.show_option(item.channel, itemlist)
return itemlist
def list_all(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = '39;src=.*?(http.*?)style=display:.*?one-line href=(.*?) title=.*?>(.*?)<'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedthumbnail, scrapedurl, scrapedtitle in matches:
url = scrapedurl
scrapedtitle = scrapedtitle.replace('×','x')
contentSerieName = scrapedtitle
action = 'seasons'
if 'episode' in item.url:
scrapedtitle, season, episode = scrapertools.find_single_match(scrapedtitle,
'(.*?) (\d+).*?(?:x|X).*?(\d+)')
contentSerieName = scrapedtitle
scrapedtitle = '%sx%s - %s' % (season, episode, scrapedtitle)
action='findvideos'
thumbnail = scrapedthumbnail
new_item = Item(channel=item.channel, title=scrapedtitle, url=url,
thumbnail=thumbnail, contentSerieName=contentSerieName, action=action,
context=filtertools.context(item, list_language, list_quality))
if 'episode' in item.url:
new_item.contentSeasonNumber = season
new_item.contentepisodeNumber = episode
new_item.context = []
itemlist.append(new_item)
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# Paginacion
next_page = scrapertools.find_single_match(data, 'rel=next href=(.*?)>»</a>')
if next_page != '':
itemlist.append(Item(channel=item.channel, action="list_all", title='Siguiente >>>',
url=next_page, thumbnail='https://s16.postimg.cc/9okdu7hhx/siguiente.png',
type=item.type))
return itemlist
def genres(item):
itemlist = []
data = get_source(host)
patron = '<li> <a href=(/category/.*?)>(.*?)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
if scrapedtitle != 'Series':
itemlist.append(Item(channel=item.channel, title=scrapedtitle, url=host + scrapedurl, action='list_all'))
return itemlist
def seasons(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = 'data-season-num=1>(.*?)</span>'
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
for scrapedseason in matches:
contentSeasonNumber = scrapedseason
title = 'Temporada %s' % scrapedseason
infoLabels['season'] = contentSeasonNumber
itemlist.append(Item(channel=item.channel, action='episodesxseason', url=item.url, title=title,
contentSeasonNumber=contentSeasonNumber, infoLabels=infoLabels))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]', url=item.url,
action="add_serie_to_library", extra="episodios", contentSerieName=item.contentSerieName,
extra1='library'))
return itemlist
def episodios(item):
logger.info()
itemlist = []
templist = seasons(item)
for tempitem in templist:
itemlist += episodesxseason(tempitem)
return itemlist
def episodesxseason(item):
logger.info()
itemlist = []
data = get_source(item.url)
season = item.contentSeasonNumber
patron = '<li class=ep-list-item id=s%se(\d+)>.*?<a href=(.*?) >.*?name>(.*?)<.*?class=lgn (.*?)</a>' % season
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
for scrapedepi, scrapedurl, scrapedtitle, languages in matches:
url = scrapedurl
language = scrapertools.find_multiple_matches(languages, 'title=(.*?)>')
contentEpisodeNumber = scrapedepi
title = '%sx%s - %s %s' % (season, contentEpisodeNumber, scrapedtitle, language)
infoLabels['episode'] = contentEpisodeNumber
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url,
contentSerieName=item.contentSerieName, contentEpisodeNumber=contentEpisodeNumber,
language=language, infoLabels=infoLabels))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def search(item, text):
logger.info()
item.url = item.url + text
item.text = text
item.type = 'search'
if text != '':
#return list_all(item)
return search_results(item)
def search_results(item):
import urllib
itemlist = []
headers={"Origin": "http://www.wikiseriesonline.nu",
"Accept-Encoding": "gzip, deflate", "Host": "www.wikiseriesonline.nu",
"Accept-Language": "es-ES,es;q=0.8,en;q=0.6",
"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
"Accept": "*/*", "Referer": item.url,
"X-Requested-With": "XMLHttpRequest", "Connection": "keep-alive", "Content-Length": "7"}
post = {"n":item.text}
post = urllib.urlencode(post)
url = host + 'wp-content/themes/wikiSeries/searchajaxresponse.php'
data = httptools.downloadpage(url, post=post, headers=headers).data
data = re.sub(r'"|\n|\r|\t| |<br>|\s{2,}', "", data)
patron = "<!-- .Posts -->.*?<a href=(.*?)>.*?src=(.*?) .*?titleinst>(.*?)<"
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
if item.text.lower() in scrapedtitle.lower():
itemlist.append(Item(channel=item.channel, title=scrapedtitle, contentSerieName=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, action='seasons',
context=filtertools.context(item, list_language, list_quality)))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def findvideos(item):
itemlist = []
data=get_source(item.url)
patron = '<a href=(/reproductor.*?)target'
matches = re.compile(patron, re.DOTALL).findall(data)
for link in matches:
video_data = get_source(host+link)
language = ''
if 'latino' in link.lower():
language='Latino'
elif 'espaÑol' in link.lower():
language = 'Español'
elif 'subtitulado' in link.lower():
language = 'VOSE'
elif 'vo' in link.lower():
language = 'VO'
url = scrapertools.find_single_match(video_data, '<iframe src=(.*?) scrolling')
title = '%s [%s]'
itemlist.append(Item(channel=item.channel, title=title, url=url, action='play', language=language,
infoLabels=item.infoLabels))
itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % (x.server.capitalize(), x.language))
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
return itemlist
|
alfa-jor/addon
|
plugin.video.alfa/channels/wikiseries.py
|
Python
|
gpl-3.0
| 9,162
|
from flask_wtf import Form
from ..models import User, Player
from wtforms.validators import Required, Length, Email, Regexp
from wtforms import StringField, SubmitField, SelectField, BooleanField
from wtforms.ext.sqlalchemy.fields import QuerySelectField
class PlayerForm(Form):
""" This class represents a form to create a new player"""
name = StringField('Name:', validators=[Required()])
email = StringField('Mail', validators=[Required()])
submit = SubmitField('Submit')
class EditProfileForm(Form):
""" This class represents a form to edit a users profile name"""
name = StringField('Real name', validators=[Length(0, 64)])
submit = SubmitField('Submit')
class EditPlayerForm(Form):
""" This class represents a form to edit a player"""
name = StringField('Edit New Player´s name:', validators=[Required()])
email = StringField('Edit New Player´s Mail', validators=[Required()])
submit = SubmitField('Submit')
class TeamForm(Form):
""" This class represents a form to create a new team"""
def fill_field():
""" This function simply returns
a query object for a querySelectField
"""
return Player.query
player_a = QuerySelectField(query_factory=fill_field)
player_b = QuerySelectField(query_factory=fill_field)
submit = SubmitField('Add')
class TournamentForm(Form):
""" This class represents a form to create or alter a tournament"""
name = StringField('Name:', validators=[Required()])
modus = StringField('Modus:', validators=[Required()])
set_count = StringField('Count of Sets:', validators=[Required()])
max_phase = StringField('Max Phase:', validators=[Required()])
submit = SubmitField('Submit')
class KoTournamentForm(Form):
""" This class represents a form to create or alter a tournament
with a knock out elimination
"""
name = StringField('Name:', validators=[Required()])
modus = StringField('Modus:', validators=[Required()])
set_count = StringField('Count of Sets:', validators=[Required()])
submit = SubmitField('Submit')
|
haup/totoro
|
totoro/app/main/forms.py
|
Python
|
gpl-3.0
| 2,118
|
from __future__ import division
import numpy as np
import queue
import threading
from chainer.dataset.iterator import Iterator
def queue_worker(index_queue, batch_queue, dataset, xp):
while True:
batch_begin, batch_end = index_queue.get()
batches = xp.array(dataset[batch_begin:batch_end])
batch_anc = batches[xp.arange(0, len(batches), 3)]
batch_pos = batches[xp.arange(1, len(batches), 3)]
batch_neg = batches[xp.arange(2, len(batches), 3)]
batch_queue.put((batch_anc, batch_pos, batch_neg))
class TripletIterator(Iterator):
def __init__(self, dataset, batch_size, repeat=False, xp=np):
self.dataset = dataset
self.len_data = len(dataset)
self.batch_size = batch_size
self.repeat = repeat
self.xp = xp
self.indices = queue.Queue()
self.batches = queue.Queue(maxsize=6)
self.current_position = 0
self.epoch = 0
self.fill_queue()
self.queue_worker = threading.Thread(target=queue_worker, kwargs={
"index_queue": self.indices,
"batch_queue": self.batches,
"dataset": self.dataset,
"xp": self.xp
})
self.queue_worker.start()
def fill_queue(self):
for i in range(0, self.len_data, 3*self.batch_size):
i_end = i + 3 * self.batch_size
self.indices.put((i, i_end))
def __next__(self):
if self.indices.empty() and self.batches.empty():
self.current_position = 0
self.epoch += 1
self.fill_queue()
if not self.repeat:
raise StopIteration
# simulate progress for ProgressBar extension
self.current_position += 3 * self.batch_size
return self.batches.get(timeout=2)
next = __next__
@property
def epoch_detail(self):
return self.epoch + self.current_position / self.len_data
def serialize(self, serializer):
self.current_position = serializer('current_position',
self.current_position)
self.epoch = serializer('epoch', self.epoch)
self.is_new_epoch = serializer('is_new_epoch', self.is_new_epoch)
if self._order is not None:
serializer('_order', self._order)
|
hrantzsch/signature-embedding
|
triplet_iterator.py
|
Python
|
gpl-3.0
| 2,320
|
'''Utils'''
import re
from typing import Tuple, List, Union, Iterable
from wpull.errors import ServerError
import datetime
from wpull.protocol.ftp.ls.listing import FileEntry
class ReplyCodes(object):
command_okay = 200
syntax_error_command_unrecognized = 500
syntax_error_in_parameters_or_arguments = 501
command_not_implemented_superfluous_at_this_site = 202
command_not_implemented = 502
bad_sequence_of_commands = 503
command_not_implemented_for_that_parameter = 504
restart_marker_reply = 110
system_status_or_system_help_reply = 211
directory_status = 212
file_status = 213
help_message = 214
name_system_type = 215
service_ready_in_nnn_minutes = 120
service_ready_for_new_user = 220
service_closing_control_connection = 221
service_not_available_closing_control_connection = 421
data_connection_already_open_transfer_starting = 125
data_connection_open_no_transfer_in_progress = 225
cant_open_data_connection = 425
closing_data_connection = 226
connection_closed_transfer_aborted = 426
entering_passive_mode = 227
user_logged_in_proceed = 230
not_logged_in = 530
user_name_okay_need_password = 331
need_account_for_login = 332
need_account_for_storing_files = 532
file_status_okay_about_to_open_data_connection = 150
requested_file_action_okay_completed = 250
pathname_created = 257
requested_file_action_pending_further_information = 350
requested_file_action_not_taken = 450
requested_action_not_taken_file_unavailable = 550
requested_action_aborted_local_error_in_processing = 451
requested_action_aborted_page_type_unknown = 551
requested_action_not_taken_insufficient_storage_space = 452
requested_file_action_aborted = 552
requested_action_not_taken_file_name_not_allowed = 553
class FTPServerError(ServerError):
@property
def reply_code(self):
'''Return reply code.'''
if len(self.args) >= 2 and isinstance(self.args[1], int):
return self.args[1]
def parse_address(text: str) -> Tuple[str, int]:
'''Parse PASV address.'''
match = re.search(
r'\('
r'(\d{1,3})\s*,'
r'\s*(\d{1,3})\s*,'
r'\s*(\d{1,3})\s*,'
r'\s*(\d{1,3})\s*,'
r'\s*(\d{1,3})\s*,'
r'\s*(\d{1,3})\s*'
r'\)',
text)
if match:
return (
'{0}.{1}.{2}.{3}'.format(int(match.group(1)),
int(match.group(2)),
int(match.group(3)),
int(match.group(4))
),
int(match.group(5)) << 8 | int(match.group(6))
)
else:
raise ValueError('No address found')
def reply_code_tuple(code: int) -> Tuple[int, int, int]:
'''Return the reply code as a tuple.
Args:
code: The reply code.
Returns:
Each item in the tuple is the digit.
'''
return code // 100, code // 10 % 10, code % 10
def parse_machine_listing(text: str, convert: bool=True, strict: bool=True) -> \
List[dict]:
'''Parse machine listing.
Args:
text: The listing.
convert: Convert sizes and dates.
strict: Method of handling errors. ``True`` will raise
``ValueError``. ``False`` will ignore rows with errors.
Returns:
list: A list of dict of the facts defined in RFC 3659.
The key names must be lowercase. The filename uses the key
``name``.
'''
# TODO: this function should be moved into the 'ls' package
listing = []
for line in text.splitlines(False):
facts = line.split(';')
row = {}
filename = None
for fact in facts:
name, sep, value = fact.partition('=')
if sep:
name = name.strip().lower()
value = value.strip().lower()
if convert:
try:
value = convert_machine_list_value(name, value)
except ValueError:
if strict:
raise
row[name] = value
else:
if name[0:1] == ' ':
# Is a filename
filename = name[1:]
else:
name = name.strip().lower()
row[name] = ''
if filename:
row['name'] = filename
listing.append(row)
elif strict:
raise ValueError('Missing filename.')
return listing
def convert_machine_list_value(name: str, value: str) -> \
Union[datetime.datetime, str, int]:
'''Convert sizes and time values.
Size will be ``int`` while time value will be :class:`datetime.datetime`.
'''
if name == 'modify':
return convert_machine_list_time_val(value)
elif name == 'size':
return int(value)
else:
return value
def convert_machine_list_time_val(text: str) -> datetime.datetime:
'''Convert RFC 3659 time-val to datetime objects.'''
# TODO: implement fractional seconds
text = text[:14]
if len(text) != 14:
raise ValueError('Time value not 14 chars')
year = int(text[0:4])
month = int(text[4:6])
day = int(text[6:8])
hour = int(text[8:10])
minute = int(text[10:12])
second = int(text[12:14])
return datetime.datetime(year, month, day, hour, minute, second,
tzinfo=datetime.timezone.utc)
def machine_listings_to_file_entries(listings: Iterable[dict]) -> \
Iterable[FileEntry]:
'''Convert results from parsing machine listings to FileEntry list.'''
for listing in listings:
yield FileEntry(
listing['name'],
type=listing.get('type'),
size=listing.get('size'),
date=listing.get('modify')
)
|
chfoo/wpull
|
wpull/protocol/ftp/util.py
|
Python
|
gpl-3.0
| 5,974
|
'''
Pyazo is my final project for CS230 (Computing I)
More info in the README
'''
#import all the things
import sys
sys.path.append('./modules')
from PyQt5.QtWidgets import QApplication, QWidget
from PyQt5.QtGui import QPainter, QColor, QFont, QCursor
from PyQt5.QtCore import Qt, QRect, QTimer
#Doing this the OOP way..
class Gui(QWidget):
def __init__(self):
self.__eventHandlers = []
#Initialize the QApp/QWidget things
super().__init__()
#Add a default rectangle
self.__rectangle = QRect(0, 0, 0, 0)
self.__relativeX = 0
self.__relativeY = 0
#Build the window in a method to keep the init clean
self.buildWindow()
#Custom event handling
#Add events
def on(self, eventName, handler):
self.__eventHandlers.append([eventName, handler])
#Fire events
def __fire(self, eventName, *args):
for event in self.__eventHandlers:
if(event[0] == eventName):
event[1](*args)
#Build the window
def buildWindow(self):
#Set the window title even though it will not be seen
self.setWindowTitle('Pyazo')
#Make the window transparent
self.setWindowFlags(self.windowFlags() | Qt.FramelessWindowHint)
self.setAttribute(Qt.WA_TranslucentBackground)
#Maximize the window
self.resize(1920, 1080)
#Enable mouse tracking
self.setMouseTracking(True)
#Render the window
self.show()
#Paint things
def paintEvent(self, event):
qp = QPainter()
qp.begin(self)
#Paint the rectangle
rectangleColor = QColor(200, 200, 200, 100)
qp.setBrush(rectangleColor)
qp.setPen(rectangleColor)
qp.drawRect(self.__rectangle)
qp.end()
#Handle the mouse events below
#press
def mousePressEvent(self, event):
# 'Mouse Click'
#update reactangle coords
self.__rectangle.setCoords(event.x(), event.y(), event.x(), event.y())
self.__relativeX = event.x()
self.__relativeY = event.y()
#repaint
self.repaint()
#release
def mouseReleaseEvent(self, event):
# 'Mouse Release'
#Get the corners of our rectangle for use when actually taking the image from the screen
x = self.__rectangle.left()
y = self.__rectangle.top()
width = self.__rectangle.width()
height = self.__rectangle.height()
self.__rectangle.setCoords(0, 0, 0, 0)
self.update()
#Hide the GUI after the button is released
self.setVisible(False)
#Fire our 'release' event, use the handler we defined, call it after we hide the GUI (so we don't get an image of the GUI)
#Use a timer to create this effect, executing our handler in the QT event loop
#also use a lambda function because singleShot requires anonymity
QTimer.singleShot(0, lambda: self.__fire('release', x, y, width, height))
#drag
def mouseMoveEvent(self, event):
if(event.buttons() == Qt.LeftButton):
# 'Dragging'
#update rectangle bottom left corner to the mouse pos
if(event.x() > self.__relativeX):
self.__rectangle.setRight(event.x())
self.__rectangle.setLeft(self.__relativeX)
elif(event.x() < self.__relativeX):
self.__rectangle.setLeft(event.x())
self.__rectangle.setRight(self.__relativeX)
if(event.y() < self.__relativeY):
self.__rectangle.setTop(event.y())
self.__rectangle.setBottom(self.__relativeY)
elif(event.y() > self.__relativeY):
self.__rectangle.setBottom(event.y())
self.__rectangle.setTop(self.__relativeY)
#repaint
self.repaint()
#Main function
def main():
#Instantiate our app and Gui stuff.
app = QApplication(sys.argv)
gui = Gui()
#Make the cursor the "cross cursor" for effect
app.setOverrideCursor(QCursor(Qt.CrossCursor))
#Exit when our app exits
sys.exit(app.exec_())
#That one thing that we write for funzies (not really, its important)
if(__name__ == '__main__'):
main()
|
PorterK/pyazo
|
Gui.py
|
Python
|
gpl-3.0
| 4,131
|
# -*- coding: utf-8 -*-
"""
Copyright © 2017 - Alexandre Machado <axmachado@gmail.com>
This file is part of Simple POS Compiler.
Simnple POS Compiler is free software: you can redistribute it
and/or modify it under the terms of the GNU General Public License
as published by the Free Software Foundation, either version 3
of the License, or (at your option) any later version.
Simple POS Compiler is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Simple POS Compiler. If not, see <http://www.gnu.org/licenses/>.
@author: Alexandre Machado <axmachado@gmail.com>
"""
from ..objfile.functions import ApiFunction, VoidApiFunction
from ..objfile.typedefs import STRING, INT
def initApiFunctions():
functions = []
fcn = ApiFunction(INT, "menu", ("options", STRING, False),
returnAttribute="variable")
functions.append(fcn)
fcn = ApiFunction(STRING, "menuwithheader",
("header", STRING, False),
("timeoutheader", INT, False),
("options", STRING, False),
("timeout", INT, False),
returnAttribute="variablereturn"
)
functions.append(fcn)
fcn = ApiFunction(INT, "displaybitmap",
("filename", STRING, False),
returnAttribute="variablereturn")
functions.append(fcn)
fcn = VoidApiFunction("display",
("line", INT, False),
("column", INT, False),
("message", STRING, False))
functions.append(fcn)
fcn = VoidApiFunction("cleandisplay")
functions.append(fcn)
fcn = ApiFunction(INT, "gettouch", ("axisx", INT, True),
("axisy", INT, True),
tagName="system.gettouchscreen",
returnAttribute="variablereturn")
functions.append(fcn)
fcn = ApiFunction(STRING, "inputfloat", ("line", INT, False),
("column", INT, False),
("message", STRING, False),
tagName="inputfloat", returnAttribute="variable")
functions.append(fcn)
fcn = ApiFunction(STRING, "inputformat", ("line", INT, False),
("column", INT, False),
("message", STRING, False),
("format", STRING, False), tagName="inputformat",
returnAttribute="variable")
functions.append(fcn)
fcn = ApiFunction(INT, "inputint", ("line", INT, False),
("column", INT, False),
("message", STRING, False),
("minimum", INT, False),
("maximum", INT, False), tagName="inputinteger",
returnAttribute="variable")
functions.append(fcn)
fcn = ApiFunction(INT, "inputoption", ("line", INT, False),
("column", INT, False),
("message", STRING, False),
("minimum", INT, False),
("maximum", INT, False), tagName="inputoption",
returnAttribute="variable")
functions.append(fcn)
fcn = ApiFunction(INT, "inputmoney", ("line", INT, False),
("column", INT, False),
("message", STRING, False),
tagName="inputmoney", returnAttribute="variable")
functions.append(fcn)
return functions
|
axmachado/simplepos
|
simplepos/api/ui.py
|
Python
|
gpl-3.0
| 3,780
|
import mutagen.mp4
from mutagen import Metadata
from mutagen._util import DictMixin, dict_match, utf8
from mutagen.mp4 import MP4, MP4Tags, error, delete
__all__ = ["EasyMP4Tags", "EasyMP4", "delete", "error"]
class EasyMP4KeyError(error, KeyError, ValueError):
pass
class EasyMP4Tags(DictMixin, Metadata):
"""A file with MPEG-4 iTunes metadata.
Like Vorbis comments, EasyMP4Tags keys are case-insensitive ASCII
strings, and values are a list of Unicode strings (and these lists
are always of length 0 or 1). If you need access to the full MP4
metadata feature set, you should use MP4, not EasyMP4.
"""
Set = {}
Get = {}
Delete = {}
List = {}
def __init__(self, *args, **kwargs):
self.__mp4 = MP4Tags(*args, **kwargs)
self.load = self.__mp4.load
self.save = self.__mp4.save
self.delete = self.__mp4.delete
filename = property(
lambda s: s.__mp4.filename, lambda s, fn: setattr(s.__mp4, "filename", fn)
)
def RegisterKey(cls, key, getter=None, setter=None, deleter=None, lister=None):
"""Register a new key mapping.
A key mapping is four functions, a getter, setter, deleter,
and lister. The key may be either a string or a glob pattern.
The getter, deleted, and lister receive an MP4Tags instance
and the requested key name. The setter also receives the
desired value, which will be a list of strings.
The getter, setter, and deleter are used to implement __getitem__,
__setitem__, and __delitem__.
The lister is used to implement keys(). It should return a
list of keys that are actually in the MP4 instance, provided
by its associated getter.
"""
key = key.lower()
if getter is not None:
cls.Get[key] = getter
if setter is not None:
cls.Set[key] = setter
if deleter is not None:
cls.Delete[key] = deleter
if lister is not None:
cls.List[key] = lister
RegisterKey = classmethod(RegisterKey)
def RegisterTextKey(cls, key, atomid):
"""Register a text key.
If the key you need to register is a simple one-to-one mapping
of MP4 atom name to EasyMP4Tags key, then you can use this
function:
EasyMP4Tags.RegisterTextKey("artist", "\xa9ART")
"""
def getter(tags, key):
return tags[atomid]
def setter(tags, key, value):
tags[atomid] = value
def deleter(tags, key):
del tags[atomid]
cls.RegisterKey(key, getter, setter, deleter)
RegisterTextKey = classmethod(RegisterTextKey)
def RegisterIntKey(cls, key, atomid, min_value=0, max_value=2 ** 16 - 1):
"""Register a scalar integer key.
"""
def getter(tags, key):
return map(unicode, tags[atomid])
def setter(tags, key, value):
clamp = lambda x: int(min(max(min_value, x), max_value))
tags[atomid] = map(clamp, map(int, value))
def deleter(tags, key):
del tags[atomid]
cls.RegisterKey(key, getter, setter, deleter)
RegisterIntKey = classmethod(RegisterIntKey)
def RegisterIntPairKey(cls, key, atomid, min_value=0, max_value=2 ** 16 - 1):
def getter(tags, key):
ret = []
for (track, total) in tags[atomid]:
if total:
ret.append(u"%d/%d" % (track, total))
else:
ret.append(unicode(track))
return ret
def setter(tags, key, value):
clamp = lambda x: int(min(max(min_value, x), max_value))
data = []
for v in value:
try:
tracks, total = v.split("/")
tracks = clamp(int(tracks))
total = clamp(int(total))
except (ValueError, TypeError):
tracks = clamp(int(v))
total = min_value
data.append((tracks, total))
tags[atomid] = data
def deleter(tags, key):
del tags[atomid]
cls.RegisterKey(key, getter, setter, deleter)
RegisterIntPairKey = classmethod(RegisterIntPairKey)
def RegisterFreeformKey(cls, key, name, mean="com.apple.iTunes"):
"""Register a text key.
If the key you need to register is a simple one-to-one mapping
of MP4 freeform atom (----) and name to EasyMP4Tags key, then
you can use this function:
EasyMP4Tags.RegisterFreeformKey(
"musicbrainz_artistid", "MusicBrainz Artist Id")
"""
atomid = "----:%s:%s" % (mean, name)
def getter(tags, key):
return [s.decode("utf-8", "replace") for s in tags[atomid]]
def setter(tags, key, value):
tags[atomid] = map(utf8, value)
def deleter(tags, key):
del tags[atomid]
cls.RegisterKey(key, getter, setter, deleter)
RegisterFreeformKey = classmethod(RegisterFreeformKey)
def __getitem__(self, key):
key = key.lower()
func = dict_match(self.Get, key)
if func is not None:
return func(self.__mp4, key)
else:
raise EasyMP4KeyError("%r is not a valid key" % key)
def __setitem__(self, key, value):
key = key.lower()
if isinstance(value, basestring):
value = [value]
func = dict_match(self.Set, key)
if func is not None:
return func(self.__mp4, key, value)
else:
raise EasyMP4KeyError("%r is not a valid key" % key)
def __delitem__(self, key):
key = key.lower()
func = dict_match(self.Delete, key)
if func is not None:
return func(self.__mp4, key)
else:
raise EasyMP4KeyError("%r is not a valid key" % key)
def keys(self):
keys = []
for key in self.Get.keys():
if key in self.List:
keys.extend(self.List[key](self.__mp4, key))
elif key in self:
keys.append(key)
return keys
def pprint(self):
"""Print tag key=value pairs."""
strings = []
for key in sorted(self.keys()):
values = self[key]
for value in values:
strings.append("%s=%s" % (key, value))
return "\n".join(strings)
for atomid, key in {
"\xa9nam": "title",
"\xa9alb": "album",
"\xa9ART": "artist",
"aART": "albumartist",
"\xa9day": "date",
"\xa9cmt": "comment",
"desc": "description",
"\xa9grp": "grouping",
"\xa9gen": "genre",
"cprt": "copyright",
"soal": "albumsort",
"soaa": "albumartistsort",
"soar": "artistsort",
"sonm": "titlesort",
"soco": "composersort",
}.items():
EasyMP4Tags.RegisterTextKey(key, atomid)
for name, key in {
"MusicBrainz Artist Id": "musicbrainz_artistid",
"MusicBrainz Track Id": "musicbrainz_trackid",
"MusicBrainz Album Id": "musicbrainz_albumid",
"MusicBrainz Album Artist Id": "musicbrainz_albumartistid",
"MusicIP PUID": "musicip_puid",
"MusicBrainz Album Status": "musicbrainz_albumstatus",
"MusicBrainz Album Type": "musicbrainz_albumtype",
"MusicBrainz Release Country": "releasecountry",
}.items():
EasyMP4Tags.RegisterFreeformKey(key, name)
for name, key in {"tmpo": "bpm"}.items():
EasyMP4Tags.RegisterIntKey(key, name)
for name, key in {"trkn": "tracknumber", "disk": "discnumber"}.items():
EasyMP4Tags.RegisterIntPairKey(key, name)
class EasyMP4(MP4):
"""Like MP4, but uses EasyMP4Tags for tags."""
MP4Tags = EasyMP4Tags
Get = EasyMP4Tags.Get
Set = EasyMP4Tags.Set
Delete = EasyMP4Tags.Delete
List = EasyMP4Tags.List
RegisterTextKey = EasyMP4Tags.RegisterTextKey
RegisterKey = EasyMP4Tags.RegisterKey
|
hzlf/openbroadcast.org
|
website/tools/mutagen-v1.20.1/easymp4.py
|
Python
|
gpl-3.0
| 7,960
|
from django.conf import settings
from haystack import site
from haystack.indexes import *
from django.contrib.comments.models import Comment
import models
#This needs to be double checked
class TorrentIndex(RealTimeSearchIndex):
text = CharField(document=True, use_template=True)
description = CharField(model_attr='description')
title = CharField(model_attr='title')
user = CharField(model_attr='user__username')
added = DateTimeField(model_attr='added')
category = CharField(model_attr='category__title')
seeders = IntegerField(model_attr='seeders')
leechers = IntegerField(model_attr='leechers')
downloaded = IntegerField(model_attr='downloaded')
if 'django.contrib.comments' in settings.INSTALLED_APPS:
num_comments = IntegerField()
def prepare_num_comments(self, obj):
return Comment.objects.for_model(obj).count()
site.register(models.Torrent, TorrentIndex)
|
twoolie/ProjectNarwhal
|
narwhal/core/torrent/search_indexes.py
|
Python
|
gpl-3.0
| 979
|
# -*- coding: utf-8 -*-
# Derived work from Facebook's tornado server.
"""Utilities for working with multiple processes."""
import os, sys, time, errno
import pluggdapps.utils as h
_task_id = None
def fork_processes( num_processes, max_restarts ):
"""Starts multiple listener cum worker processes.
If ``num_processes`` is None or <= 0, we detect the number of cores
available on this machine and fork that number of child
processes. If ``num_processes`` is given and > 0, we fork that
specific number of sub-processes.
Since we use processes and not threads, there is no shared memory
between any server code.
Note that multiple processes are not compatible with the autoreload
module (or the debug=True option to `Platform`).
When using multiple processes, no HTTPIOLoops can be created or
referenced until after the call to ``fork_processes``.
In each child process, ``fork_processes`` returns its *task id*, a
number between 0 and ``num_processes``. Processes that exit
abnormally (due to a signal or non-zero exit status) are restarted
with the same id (up to ``max_restarts`` times). In the parent
process, ``fork_processes`` returns None if all child processes
have exited normally, but will otherwise only exit by throwing an
exception.
"""
global _task_id
assert _task_id is None
if num_processes is None or num_processes <= 0:
num_processes = h.cpu_count()
children = {}
def start_child(i):
#log.info( "Starting http connection process process, taskid %s", i )
pid = os.fork()
if pid == 0:
# child process
h.reseed_random()
global _task_id
_task_id = i
return i
else:
children[pid] = i
return None
for i in range(num_processes):
id = start_child(i)
if id is not None: # Return from child process
return id
# continue with spawning.
num_restarts = 0
while children :
try:
pid, status = os.wait()
except OSError as e:
if e.errno == errno.EINTR :
continue
raise
if pid not in children :
continue
id = children.pop(pid)
if os.WIFSIGNALED(status):
#log.warning( "child %d (pid %d) killed by signal %d, restarting",
# id, pid, os.WTERMSIG(status) )
pass
elif os.WEXITSTATUS(status) != 0:
#log.warning( "child %d (pid %d) exited with status %d, restarting",
# id, pid, os.WEXITSTATUS(status) )
pass
else:
#log.info( "child %d (pid %d) exited normally", id, pid )
continue
num_restarts += 1
if num_restarts > max_restarts:
raise RuntimeError("Too many child restarts, giving up")
new_id = start_child(id)
if new_id is not None:
return new_id
# All child processes exited cleanly, so exit the master process
# instead of just returning to right after the call to
# fork_processes (which will probably just start up another HTTPIOLoop
# unless the caller checks the return value).
sys.exit(0)
def task_id():
"""Returns the current task id, if any.
Returns None if this process was not created by `fork_processes`.
"""
global _task_id
return _task_id
|
prataprc/pluggdapps
|
pluggdapps/.Attic/evserver/process.py
|
Python
|
gpl-3.0
| 3,484
|
import datetime
def main(le):
return sum(range(le + 1)) ** 2 - sum(x ** 2 for x in range(le + 1))
try:
para = int(input())
except:
para = 100
beg = datetime.datetime.now()
ans = main(para)
end = datetime.datetime.now()
print("answer:", ans)
print("time:", end - beg)
|
nowsword/ProjectEuler
|
p006.py
|
Python
|
gpl-3.0
| 283
|
import os
import rospy
import rosnode
import json
import pygame
import pygame.display
from nips2016.srv import *
from nips2016.msg import *
from geometry_msgs.msg import PoseStamped
from sensor_msgs.msg import Joy
from std_msgs.msg import Bool
from pypot.creatures import PoppyErgoJr
from rospkg import RosPack
from os.path import join
from .button import Button
os.environ["SDL_VIDEODRIVER"] = "dummy"
try:
pygame.display.init()
except pygame.error:
raise pygame.error("Can't connect to the console, from ssh enable -X forwarding")
pygame.joystick.init()
class Ergo(object):
def __init__(self):
self.rospack = RosPack()
with open(join(self.rospack.get_path('nips2016'), 'config', 'ergo.json')) as f:
self.params = json.load(f)
self.button = Button(self.params)
self.rate = rospy.Rate(self.params['publish_rate'])
self.eef_pub = rospy.Publisher('/nips2016/ergo/end_effector_pose', PoseStamped, queue_size=1)
self.state_pub = rospy.Publisher('/nips2016/ergo/state', CircularState, queue_size=1)
self.button_pub = rospy.Publisher('/nips2016/ergo/button', Bool, queue_size=1)
self.joy_pub = rospy.Publisher('/nips2016/ergo/joysticks/1', Joy, queue_size=1)
self.joy_pub2 = rospy.Publisher('/nips2016/ergo/joysticks/2', Joy, queue_size=1)
self.srv_reset = None
self.ergo = None
self.extended = False
self.standby = False
self.last_activity = rospy.Time.now()
if pygame.joystick.get_count() < 2:
rospy.logerr("Ergo: Expecting 2 joysticks but found only {}, exiting".format(pygame.joystick.get_count()))
sys.exit(0)
else:
self.joystick = pygame.joystick.Joystick(0)
self.joystick2 = pygame.joystick.Joystick(1)
self.joystick.init()
self.joystick2.init()
if self.params['useless_joystick_id'] != int(self.joystick2.get_name()[-1]):
useless_joy = self.joystick
self.joystick = self.joystick2
self.joystick2 = useless_joy
rospy.loginfo('Initialized Joystick 1: {}'.format(self.joystick.get_name()))
rospy.loginfo('Initialized Joystick 2: {}'.format(self.joystick2.get_name()))
def force_speeds(self):
for m in self.ergo.motors:
m.moving_speed = 100
def go_to_start(self, slow=True):
self.go_to([0.0, -15.4, 35.34, -8.06, -15.69, 71.99], 4 if slow else 1)
def go_to_extended(self):
extended = {'m2': 60, 'm3': -37, 'm5': -50, 'm6': 96}
self.ergo.goto_position(extended, 0.5)
self.extended = True
def go_to_rest(self):
rest = {'m2': -26, 'm3': 59, 'm5': -30, 'm6': 78}
self.ergo.goto_position(rest, 0.5)
self.extended = False
def is_controller_running(self):
return len([node for node in rosnode.get_node_names() if 'controller' in node]) > 0
def go_or_resume_standby(self):
recent_activity = rospy.Time.now() - self.last_activity < rospy.Duration(self.params['auto_standby_duration'])
if recent_activity and self.standby:
rospy.loginfo("Ergo is resuming from standby")
self.ergo.compliant = False
self.standby = False
elif not self.standby and not recent_activity:
rospy.loginfo("Ergo is entering standby mode")
self.standby = True
self.ergo.compliant = True
if self.is_controller_running():
self.last_activity = rospy.Time.now()
def go_to(self, motors, duration):
self.ergo.goto_position(dict(zip(['m1', 'm2', 'm3', 'm4', 'm5', 'm6'], motors)), duration)
rospy.sleep(duration)
def run(self, dummy=False):
try:
self.ergo = PoppyErgoJr(use_http=True, simulator='poppy-simu' if dummy else None, camera='dummy')
except IOError as e:
rospy.logerr("Ergo hardware failed to init: {}".format(e))
return None
self.ergo.compliant = False
self.go_to_start()
self.last_activity = rospy.Time.now()
self.srv_reset = rospy.Service('/nips2016/ergo/reset', Reset, self._cb_reset)
rospy.loginfo('Ergo is ready and starts joystick servoing...')
self.force_speeds()
while not rospy.is_shutdown():
self.go_or_resume_standby()
pygame.event.get()
x = self.joystick.get_axis(0)
y = self.joystick.get_axis(1)
self.servo_robot(y, x)
self.publish_eef()
self.publish_state()
self.publish_button()
# Publishers
self.publish_joy(x, y, self.joy_pub)
x = self.joystick2.get_axis(0)
y = self.joystick2.get_axis(1)
self.publish_joy(x, y, self.joy_pub2)
self.rate.sleep()
self.ergo.compliant = True
self.ergo.close()
def servo_axis_rotation(self, x):
x = x if abs(x) > self.params['sensitivity_joy'] else 0
p = self.ergo.motors[0].goal_position
min_x = self.params['bounds'][0][0] + self.params['bounds'][3][0]
max_x = self.params['bounds'][0][1] + self.params['bounds'][3][1]
new_x = min(max(min_x, p + self.params['speed']*x/self.params['publish_rate']), max_x)
if new_x > self.params['bounds'][0][1]:
new_x_m3 = new_x - self.params['bounds'][0][1]
elif new_x < self.params['bounds'][0][0]:
new_x_m3 = new_x - self.params['bounds'][0][0]
else:
new_x_m3 = 0
new_x_m3 = max(min(new_x_m3, self.params['bounds'][3][1]), self.params['bounds'][3][0])
self.ergo.motors[0].goto_position(new_x, 1.1/self.params['publish_rate'])
self.ergo.motors[3].goto_position(new_x_m3, 1.1/self.params['publish_rate'])
def servo_axis_elongation(self, x):
if x > self.params['min_joy_elongation']:
self.go_to_extended()
else:
self.go_to_rest()
def servo_axis(self, x, id):
x = x if abs(x) > self.params['sensitivity_joy'] else 0
p = self.ergo.motors[id].goal_position
new_x = p + self.params['speed']*x/self.params['publish_rate']
if self.params['bounds'][id][0] < new_x < self.params['bounds'][id][1]:
self.ergo.motors[id].goto_position(new_x, 1.1/self.params['publish_rate'])
def servo_robot(self, x, y):
self.servo_axis_rotation(-x)
self.servo_axis_elongation(y)
def publish_eef(self):
pose = PoseStamped()
pose.header.frame_id = 'ergo_base'
eef_pose = self.ergo.chain.end_effector
pose.header.stamp = rospy.Time.now()
pose.pose.position.x = eef_pose[0]
pose.pose.position.y = eef_pose[1]
pose.pose.position.z = eef_pose[2]
self.eef_pub.publish(pose)
def publish_button(self):
self.button_pub.publish(Bool(data=self.button.pressed))
def publish_state(self):
# TODO We might want a better state here, get the arena center, get EEF and do the maths as in environment/get_state
angle = self.ergo.motors[0].present_position + self.ergo.motors[3].present_position
self.state_pub.publish(CircularState(angle=angle, extended=self.extended))
def publish_joy(self, x, y, publisher):
# Update the alst activity
if abs(x) > self.params['min_joy_activity'] or abs(y) > self.params['min_joy_activity']:
self.last_activity = rospy.Time.now()
joy = Joy()
joy.header.stamp = rospy.Time.now()
joy.axes.append(x)
joy.axes.append(y)
publisher.publish(joy)
def _cb_reset(self, request):
rospy.loginfo("Resetting Ergo...")
self.go_to_start(request.slow)
return ResetResponse()
|
sebastien-forestier/NIPS2016
|
ros/nips2016/src/nips2016/ergo/ergo.py
|
Python
|
gpl-3.0
| 7,815
|
# coding=utf-8
from popular_proposal.filters import (ProposalWithoutAreaFilter,
ProposalWithAreaFilter,
filterable_areas,
ProposalGeneratedAtFilter
)
from popular_proposal.tests import ProposingCycleTestCaseBase
from popular_proposal.models import PopularProposal
from elections.models import Area
from popular_proposal.forms.form_texts import TOPIC_CHOICES
from django.core.management import call_command
import haystack
from datetime import timedelta
from django.utils import timezone
from django.test import override_settings
from django.core.urlresolvers import reverse
one_day_ago = timezone.now() - timedelta(days=1)
two_days_ago = timezone.now() - timedelta(days=2)
three_days_ago = timezone.now() - timedelta(days=3)
@override_settings(FILTERABLE_AREAS_TYPE=['Comuna'])
class PopularProposalFilterTestCase(ProposingCycleTestCaseBase):
def setUp(self):
super(PopularProposalFilterTestCase, self).setUp()
self.algarrobo = Area.objects.get(id=1)
self.p1 = PopularProposal.objects.create(proposer=self.fiera,
area=self.algarrobo,
data=self.data,
title=u'P1',
clasification=TOPIC_CHOICES[1][0]
)
self.p2 = PopularProposal.objects.create(proposer=self.fiera,
area=self.algarrobo,
data=self.data,
title=u'P2',
clasification=TOPIC_CHOICES[2][0]
)
self.p3 = PopularProposal.objects.create(proposer=self.fiera,
area=self.algarrobo,
data=self.data,
title=u'P3',
clasification=TOPIC_CHOICES[3][0]
)
def test_filter_proposals(self):
data = {'clasification': TOPIC_CHOICES[1][0]}
f = ProposalWithoutAreaFilter(data=data)
self.assertIn(self.p1, f.qs)
self.assertNotIn(self.p2, f.qs)
self.assertNotIn(self.p2, f.qs)
def test_filter_with_area(self):
data = {'clasification': TOPIC_CHOICES[1][0],
'area': self.algarrobo.id}
f = ProposalWithAreaFilter(data=data)
self.assertIn(self.p1, f.qs)
self.assertNotIn(self.p2, f.qs)
self.assertNotIn(self.p2, f.qs)
data = {'area': self.algarrobo.id}
f = ProposalWithAreaFilter(data=data)
self.assertIn(self.p1, f.qs)
self.assertIn(self.p2, f.qs)
self.assertIn(self.p2, f.qs)
def test_filter_where_generated_area(self):
chonchi = Area.objects.create(name="Chonchi", classification="Comuna")
p = PopularProposal.objects.create(proposer=self.fiera,
data=self.data,
title=u'P2',
generated_at=chonchi,
clasification=TOPIC_CHOICES[2][0]
)
data = {'generated_at': chonchi.id}
f = ProposalGeneratedAtFilter(data=data)
self.assertIn(p, f.qs)
self.assertNotIn(self.p1, f.qs)
self.assertNotIn(self.p2, f.qs)
self.assertNotIn(self.p2, f.qs)
def test_filterable_areas(self):
laja = Area.objects.create(name="Laja", classification="Comuna")
rm = Area.objects.create(name="region metropolitana",
classification=u"Región")
osorno = Area.objects.create(name="Osorno", classification="Comuna")
areas = filterable_areas("This is a request")
p = PopularProposal.objects.create(proposer=self.fiera,
data=self.data,
title=u'P2',
generated_at=rm,
clasification=TOPIC_CHOICES[2][0]
)
p2 = PopularProposal.objects.create(proposer=self.fiera,
data=self.data,
title=u'P2',
generated_at=laja,
clasification=TOPIC_CHOICES[2][0]
)
self.assertIn(laja, areas)
self.assertNotIn(osorno, areas)
self.assertNotIn(rm, areas)
def test_filters_by_text(self):
for key, opts in haystack.connections.connections_info.items():
haystack.connections.reload(key)
call_command('update_index', interactive=False, verbosity=0)
data = {'text': 'P2'
}
f = ProposalWithAreaFilter(data=data)
self.assertTrue(f.qs.count())
self.assertIn(self.p2, f.qs)
@override_settings(FILTERABLE_AREAS_TYPE=['Comuna'])
class OrderingFormTestCase(ProposingCycleTestCaseBase):
def setUp(self):
super(OrderingFormTestCase, self).setUp()
self.algarrobo = Area.objects.get(id=1)
def test_order_by_in_form(self):
url = reverse('popular_proposals:home')
response = self.client.get(url)
form = response.context['form']
self.assertIn('order_by', form.fields.keys())
def test_ordered_by_time_descending(self):
p1 = PopularProposal.objects.create(proposer=self.fiera,
area=self.algarrobo,
data=self.data,
title=u'P1',
clasification=TOPIC_CHOICES[1][0]
)
p1.created = two_days_ago
p1.save()
p2 = PopularProposal.objects.create(proposer=self.fiera,
area=self.algarrobo,
data=self.data,
title=u'P2',
clasification=TOPIC_CHOICES[2][0]
)
p2.created = three_days_ago
p2.save()
p3 = PopularProposal.objects.create(proposer=self.fiera,
area=self.algarrobo,
data=self.data,
title=u'P3',
clasification=TOPIC_CHOICES[3][0]
)
p3.created = one_day_ago
p3.save()
data = {'order_by': '-created'}
url = reverse('popular_proposals:home')
response = self.client.get(url, data)
qs = response.context['popular_proposals']
self.assertEquals(qs[0], p3)
self.assertEquals(qs[1], p1)
self.assertEquals(qs[2], p2)
def test_filtered_by_area(self):
peru = Area.objects.create(name=u"Perú")
p1 = PopularProposal.objects.create(proposer=self.fiera,
area=peru,
data=self.data,
title=u'P1',
clasification=TOPIC_CHOICES[1][0]
)
p1.created = two_days_ago
p1.save()
p2 = PopularProposal.objects.create(proposer=self.fiera,
area=self.algarrobo,
data=self.data,
title=u'P2',
clasification=TOPIC_CHOICES[2][0]
)
p2.created = three_days_ago
p2.save()
p3 = PopularProposal.objects.create(proposer=self.fiera,
area=self.algarrobo,
data=self.data,
title=u'P3',
clasification=TOPIC_CHOICES[3][0]
)
p3.created = one_day_ago
p3.save()
data = {'area': self.algarrobo.id}
url = reverse('popular_proposals:home')
response = self.client.get(url, data)
qs = response.context['popular_proposals']
self.assertIn(p2, qs)
self.assertIn(p3, qs)
self.assertNotIn(p1, qs)
data = {'area': "non-existing"}
url = reverse('popular_proposals:home')
response = self.client.get(url, data)
qs = response.context['popular_proposals']
self.assertIn(p2, qs)
self.assertIn(p3, qs)
self.assertIn(p1, qs)
|
ciudadanointeligente/votainteligente-portal-electoral
|
popular_proposal/tests/filter_tests.py
|
Python
|
gpl-3.0
| 9,392
|
# -*- coding: utf-8 -*-
"""
ORCA Open Remote Control Application
Copyright (C) 2013-2020 Carsten Thielepape
Please contact me by : http://www.orca-remote.org/
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
__all__ = ['MatchWildCard']
# The main function that checks if two given strings match.
# The uMatchWithWildCard string may contain wildcard characters
def MatchWildCard(*,uValue:str,uMatchWithWildCard:str):
# If we reach at the end of both strings, we are done
if len(uMatchWithWildCard) == 0 and len(uValue) == 0:
return True
# Make sure that the characters after '*' are present
# in uValue string. This function assumes that the uMatchWithWildCard
# string will not contain two consecutive '*'
if len(uMatchWithWildCard) > 1 and uMatchWithWildCard[0] == '*' and len(uValue) == 0:
return False
# If the uMatchWithWildCard string contains '?', or current characters
# of both strings match
if (len(uMatchWithWildCard) > 1 and uMatchWithWildCard[0] == '?') or (len(uMatchWithWildCard) != 0 and len(uValue) != 0 and uMatchWithWildCard[0] == uValue[0]):
return MatchWildCard(uMatchWithWildCard=uMatchWithWildCard[1:], uValue=uValue[1:])
# If there is *, then there are two possibilities
# a) We consider current character of uValue string
# b) We ignore current character of uValue string.
if len(uMatchWithWildCard) != 0 and uMatchWithWildCard[0] == '*':
return MatchWildCard(uMatchWithWildCard=uMatchWithWildCard[1:], uValue=uValue) or MatchWildCard(uMatchWithWildCard=uMatchWithWildCard, uValue=uValue[1:])
return False
|
thica/ORCA-Remote
|
src/ORCA/utils/Wildcard.py
|
Python
|
gpl-3.0
| 2,302
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2018-04-24 15:15
from __future__ import unicode_literals
from django.db import migrations, models
def populate_thesis_providers(apps, schema_editor):
ThesisProvider = apps.get_model('erudit', 'ThesisProvider')
Collection = apps.get_model('erudit', 'Collection')
Thesis = apps.get_model('erudit', 'Thesis')
collection_ids = Thesis.objects.values_list('collection_id', flat=True)
collections = Collection.objects.filter(id__in=collection_ids)
for collection in collections.all():
tp = ThesisProvider.objects.create(
code=collection.code,
name=collection.name,
solr_name=collection.name,
logo=collection.logo,
)
class Migration(migrations.Migration):
dependencies = [
('erudit', '0088_remove_article_copyrights'),
]
operations = [
migrations.CreateModel(
name='ThesisProvider',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('code', models.CharField(max_length=10, unique=True, verbose_name='Code')),
('name', models.CharField(max_length=200, verbose_name='Nom')),
('solr_name', models.CharField(db_index=True, max_length=200, verbose_name='Nom dans Solr')),
('logo', models.ImageField(blank=True, verbose_name='Logo')),
],
options={
'verbose_name_plural': 'Éditeurs de thèses',
'verbose_name': 'Éditeur de thèses',
},
),
migrations.RunPython(populate_thesis_providers, reverse_code=migrations.RunPython.noop),
]
|
erudit/zenon
|
eruditorg/erudit/migrations/0089_thesisprovider.py
|
Python
|
gpl-3.0
| 1,746
|
from MappingItem import *
# tuto de list:
# http://effbot.org/zone/python-list.htm
class KeyMapReader:
m_mapFileName = None
m_hasRead = False
m_keyArgumentMap = {} # contains all the valid MappingItems, it is a dictionary
def __init__(self):
print("")
def setMapFileName(self, fileName):
self.m_mapFileName = fileName
# reads every line of the mapping file
def processMapFile(self):
try:
# open the file
with open(self.m_mapFileName) as fp:
for line in fp:
# read a line
currentLine = line.strip()
if(len(currentLine)>0 and currentLine[0] != "#" ): # the line is not a comment...
tempMappingItem = MappingItem(currentLine)
# adding the map item
if(tempMappingItem.isValid()):
self.m_keyArgumentMap[tempMappingItem.getMapKey()] = tempMappingItem.getMapAgument()
else:
tempMappingItem = None
except IOError, e:
print("ERROR: the file does not exist or is corrupted")
if(len(self.m_keyArgumentMap) == 0):
print("WARNING: the mapping file does not contain any instructions.")
else:
print("The mapping file contains " + str(len(self.m_keyArgumentMap)) + " instructions.")
#print(self.m_keyArgumentMap)
def getKeyArgumentMap(self):
return self.m_keyArgumentMap
|
jonathanlurie/MidiCombo
|
midiCombo/KeyMapReader.py
|
Python
|
gpl-3.0
| 1,329
|
# -*- coding: utf-8 -*-
#
# rbf documentation build configuration file, created by
# sphinx-quickstart on Fri Jul 17 13:38:21 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.todo',
'sphinx.ext.autodoc',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'rbf'
copyright = u'2015, dandyvica'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
#html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'rbfdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'rbf.tex', u'rbf Documentation',
u'dandyvica', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'rbf', u'rbf Documentation',
[u'dandyvica'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'rbf', u'rbf Documentation',
u'dandyvica', 'rbf', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
dandyvica/rbfpython
|
doc/conf.py
|
Python
|
gpl-3.0
| 8,168
|
#!/usr/bin/python
# -*- coding: utf8 -*-
#
# RDFohloh <http://rdfohloh.googlecode.com/>
# GetNT, a simple script to get triples from a RDF/XML file
#
# Copyright (C) 2008 Sergio Fernández
#
# This file is part of RDFohloh, a RDF wrapper of Ohloh.
#
# RDFohloh is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# RDFohloh is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with RDFohloh. If not, see <http://www.gnu.org/licenses/>.
import sys
import os
from rdflib.Graph import ConjunctiveGraph
class GetNT:
def __init__(self, path):
self.path = path
self.graph = self.parse()
print self.get_triples()
def parse(self):
g = ConjunctiveGraph()
g.parse(self.path)
return g
def get_triples(self):
return self.graph.serialize(format="nt")
def usage():
print """
GetNT usage:
$ python getnt.py file
"""
sys.exit()
if __name__ == "__main__":
try:
args = sys.argv[1:]
if (len(args)>0 and os.path.exists(args[0])):
path = args[0]
GetNT(path)
else:
usage()
except KeyboardInterrupt:
print "Received Ctrl+C or another break signal. Exiting..."
|
wikier/rdfohloh
|
tools/rod/getnt.py
|
Python
|
gpl-3.0
| 1,644
|
import ipp
s = ipp.from_s("130.216.3.10")
print "s = %s" % s
print "version = %d, addr = %s, length = %d\n" % (
s.version, s.addr, s.length)
iph4 = {}
iph6 = {}
def add_to_iph(pref_s):
global iph4, iph6
s = ipp.from_s(pref_s)
if s.version == 4:
v = iph4.get(s)
if v:
iph4[s] += 1
else:
iph4[s] = 1
else:
v = iph6.get(s)
if v:
iph6[s] += 1
else:
iph6[s] = 1
add_to_iph("130.216.3.10")
add_to_iph("130.216.3.11")
add_to_iph("130.216.3.10")
add_to_iph("130.216.5.10")
add_to_iph("130.216.5.10")
add_to_iph("130.216.3.10/24")
add_to_iph("130.216.3.11/24")
add_to_iph("130.216.3.10/23")
add_to_iph("130.216.3.11/23")
add_to_iph("2001:df0:0:321:1:2:3:4/128")
add_to_iph("2001:df0:0:abcd::1")
add_to_iph("2001:df0:0:321:1:2:3:4/64")
sk = sorted(iph4)
for dk in sk:
print("%3d: %s" % (iph4[dk], dk))
print
sk = sorted(iph6)
for dk in sk:
print("%3d: %s" % (iph6[dk], dk))
print
p32 = ipp.from_s("130.216.38.3")
print("p32 = %s" % p32)
p24 = p32.network(24)
print("p24 = %s" % p24)
p16 = p24.network(16)
print("p16 = %s" % p16)
print("p32=%s, p24=%s, p16=%s\n" % (p32, p24, p16))
|
nevil-brownlee/pypy-libtrace
|
lib/ipp/test-ipp-hash.py
|
Python
|
gpl-3.0
| 1,218
|
__license__ = 'GPL v3'
__copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>'
'''Dialog to edit metadata in bulk'''
import re, os
from collections import namedtuple, defaultdict
from threading import Thread
from PyQt5.Qt import Qt, QDialog, QGridLayout, QVBoxLayout, QFont, QLabel, \
pyqtSignal, QDialogButtonBox, QInputDialog, QLineEdit, \
QDateTime, QCompleter
from calibre.gui2.dialogs.metadata_bulk_ui import Ui_MetadataBulkDialog
from calibre.gui2.dialogs.tag_editor import TagEditor
from calibre.ebooks.metadata import string_to_authors, authors_to_string, title_sort
from calibre.ebooks.metadata.book.formatter import SafeFormat
from calibre.gui2.custom_column_widgets import populate_metadata_page
from calibre.gui2 import error_dialog, ResizableDialog, UNDEFINED_QDATETIME, \
gprefs, question_dialog, FunctionDispatcher
from calibre.gui2.progress_indicator import ProgressIndicator
from calibre.gui2.metadata.basic_widgets import CalendarWidget
from calibre.utils.config import dynamic, JSONConfig
from calibre.utils.titlecase import titlecase
from calibre.utils.icu import sort_key, capitalize
from calibre.utils.config import prefs, tweaks
from calibre.utils.magick.draw import identify_data
from calibre.utils.date import qt_to_dt
from calibre.db import _get_next_series_num_for_list
def get_cover_data(stream, ext): # {{{
from calibre.ebooks.metadata.meta import get_metadata
old = prefs['read_file_metadata']
if not old:
prefs['read_file_metadata'] = True
cdata = area = None
try:
with stream:
mi = get_metadata(stream, ext)
if mi.cover and os.access(mi.cover, os.R_OK):
cdata = open(mi.cover).read()
elif mi.cover_data[1] is not None:
cdata = mi.cover_data[1]
if cdata:
width, height, fmt = identify_data(cdata)
area = width*height
except:
cdata = area = None
if old != prefs['read_file_metadata']:
prefs['read_file_metadata'] = old
return cdata, area
# }}}
Settings = namedtuple('Settings', 'remove_all remove add au aus do_aus rating pub do_series do_autonumber do_remove_format '
'remove_format do_swap_ta do_remove_conv do_auto_author series do_series_restart series_start_value '
'do_title_case cover_action clear_series clear_pub pubdate adddate do_title_sort languages clear_languages restore_original comments')
null = object()
class MyBlockingBusy(QDialog): # {{{
all_done = pyqtSignal()
def __init__(self, args, ids, db, refresh_books, cc_widgets, s_r_func, do_sr, sr_calls, parent=None, window_title=_('Working')):
QDialog.__init__(self, parent)
self._layout = l = QVBoxLayout()
self.setLayout(l)
self.msg = QLabel(_('Processing %d books, please wait...') % len(ids))
self.font = QFont()
self.font.setPointSize(self.font.pointSize() + 8)
self.msg.setFont(self.font)
self.pi = ProgressIndicator(self)
self.pi.setDisplaySize(100)
self._layout.addWidget(self.pi, 0, Qt.AlignHCenter)
self._layout.addSpacing(15)
self._layout.addWidget(self.msg, 0, Qt.AlignHCenter)
self.setWindowTitle(window_title + '...')
self.setMinimumWidth(200)
self.resize(self.sizeHint())
self.error = None
self.all_done.connect(self.on_all_done, type=Qt.QueuedConnection)
self.args, self.ids = args, ids
self.db, self.cc_widgets = db, cc_widgets
self.s_r_func = FunctionDispatcher(s_r_func)
self.do_sr = do_sr
self.sr_calls = sr_calls
self.refresh_books = refresh_books
def accept(self):
pass
def reject(self):
pass
def on_all_done(self):
if not self.error:
# The cc widgets can only be accessed in the GUI thread
try:
for w in self.cc_widgets:
w.commit(self.ids)
except Exception as err:
import traceback
self.error = (err, traceback.format_exc())
self.pi.stopAnimation()
QDialog.accept(self)
def exec_(self):
self.thread = Thread(target=self.do_it)
self.thread.start()
self.pi.startAnimation()
return QDialog.exec_(self)
def do_it(self):
try:
self.do_all()
except Exception as err:
import traceback
try:
err = unicode(err)
except:
err = repr(err)
self.error = (err, traceback.format_exc())
self.all_done.emit()
def do_all(self):
cache = self.db.new_api
args = self.args
# Title and authors
if args.do_swap_ta:
title_map = cache.all_field_for('title', self.ids)
authors_map = cache.all_field_for('authors', self.ids)
def new_title(authors):
ans = authors_to_string(authors)
return titlecase(ans) if args.do_title_case else ans
new_title_map = {bid:new_title(authors) for bid, authors in authors_map.iteritems()}
new_authors_map = {bid:string_to_authors(title) for bid, title in title_map.iteritems()}
cache.set_field('authors', new_authors_map)
cache.set_field('title', new_title_map)
if args.do_title_case and not args.do_swap_ta:
title_map = cache.all_field_for('title', self.ids)
cache.set_field('title', {bid:titlecase(title) for bid, title in title_map.iteritems()})
if args.do_title_sort:
lang_map = cache.all_field_for('languages', self.ids)
title_map = cache.all_field_for('title', self.ids)
def get_sort(book_id):
if args.languages:
lang = args.languages[0]
else:
try:
lang = lang_map[book_id][0]
except (KeyError, IndexError, TypeError, AttributeError):
lang = 'eng'
return title_sort(title_map[book_id], lang=lang)
cache.set_field('sort', {bid:get_sort(bid) for bid in self.ids})
if args.au:
authors = string_to_authors(args.au)
cache.set_field('authors', {bid:authors for bid in self.ids})
if args.do_auto_author:
aus_map = cache.author_sort_strings_for_books(self.ids)
cache.set_field('author_sort', {book_id:' & '.join(aus_map[book_id]) for book_id in aus_map})
if args.aus and args.do_aus:
cache.set_field('author_sort', {bid:args.aus for bid in self.ids})
# Covers
if args.cover_action == 'remove':
cache.set_cover({bid:None for bid in self.ids})
elif args.cover_action == 'generate':
from calibre.ebooks.covers import generate_cover
for book_id in self.ids:
mi = self.db.get_metadata(book_id, index_is_id=True)
cdata = generate_cover(mi)
cache.set_cover({book_id:cdata})
elif args.cover_action == 'fromfmt':
for book_id in self.ids:
fmts = cache.formats(book_id, verify_formats=False)
if fmts:
covers = []
for fmt in fmts:
fmtf = cache.format(book_id, fmt, as_file=True)
if fmtf is None:
continue
cdata, area = get_cover_data(fmtf, fmt)
if cdata:
covers.append((cdata, area))
covers.sort(key=lambda x: x[1])
if covers:
cache.set_cover({book_id:covers[-1][0]})
elif args.cover_action == 'trim':
from calibre.utils.magick import Image
for book_id in self.ids:
cdata = cache.cover(book_id)
if cdata:
im = Image()
im.load(cdata)
im.trim(tweaks['cover_trim_fuzz_value'])
cdata = im.export('jpg')
cache.set_cover({book_id:cdata})
elif args.cover_action == 'clone':
cdata = None
for book_id in self.ids:
cdata = cache.cover(book_id)
if cdata:
break
if cdata:
cache.set_cover({bid:cdata for bid in self.ids if bid != book_id})
# Formats
if args.do_remove_format:
cache.remove_formats({bid:(args.remove_format,) for bid in self.ids})
if args.restore_original:
for book_id in self.ids:
formats = cache.formats(book_id)
originals = tuple(x.upper() for x in formats if x.upper().startswith('ORIGINAL_'))
for ofmt in originals:
cache.restore_original_format(book_id, ofmt)
# Various fields
if args.rating != -1:
cache.set_field('rating', {bid:args.rating*2 for bid in self.ids})
if args.clear_pub:
cache.set_field('publisher', {bid:'' for bid in self.ids})
if args.pub:
cache.set_field('publisher', {bid:args.pub for bid in self.ids})
if args.clear_series:
cache.set_field('series', {bid:'' for bid in self.ids})
if args.pubdate is not None:
cache.set_field('pubdate', {bid:args.pubdate for bid in self.ids})
if args.adddate is not None:
cache.set_field('timestamp', {bid:args.adddate for bid in self.ids})
if args.do_series:
sval = args.series_start_value if args.do_series_restart else cache.get_next_series_num_for(args.series, current_indices=True)
cache.set_field('series', {bid:args.series for bid in self.ids})
if not args.series:
cache.set_field('series_index', {bid:1.0 for bid in self.ids})
else:
def next_series_num(bid, i):
if args.do_series_restart:
return sval + i
next_num = _get_next_series_num_for_list(sorted(sval.itervalues()), unwrap=False)
sval[bid] = next_num
return next_num
smap = {bid:next_series_num(bid, i) for i, bid in enumerate(self.ids)}
if args.do_autonumber:
cache.set_field('series_index', smap)
elif tweaks['series_index_auto_increment'] != 'no_change':
cache.set_field('series_index', {bid:1.0 for bid in self.ids})
if args.comments is not null:
cache.set_field('comments', {bid:args.comments for bid in self.ids})
if args.do_remove_conv:
cache.delete_conversion_options(self.ids)
if args.clear_languages:
cache.set_field('languages', {bid:() for bid in self.ids})
elif args.languages:
cache.set_field('languages', {bid:args.languages for bid in self.ids})
if args.remove_all:
cache.set_field('tags', {bid:() for bid in self.ids})
if args.add or args.remove:
self.db.bulk_modify_tags(self.ids, add=args.add, remove=args.remove)
if self.do_sr:
for book_id in self.ids:
self.s_r_func(book_id)
if self.sr_calls:
for field, book_id_val_map in self.sr_calls.iteritems():
self.refresh_books.update(self.db.new_api.set_field(field, book_id_val_map))
# }}}
class MetadataBulkDialog(ResizableDialog, Ui_MetadataBulkDialog):
s_r_functions = {'' : lambda x: x,
_('Lower Case') : lambda x: icu_lower(x),
_('Upper Case') : lambda x: icu_upper(x),
_('Title Case') : lambda x: titlecase(x),
_('Capitalize') : lambda x: capitalize(x),
}
s_r_match_modes = [_('Character match'),
_('Regular Expression'),
]
s_r_replace_modes = [_('Replace field'),
_('Prepend to field'),
_('Append to field'),
]
def __init__(self, window, rows, model, tab, refresh_books):
ResizableDialog.__init__(self, window)
Ui_MetadataBulkDialog.__init__(self)
self.model = model
self.db = model.db
self.refresh_book_list.setChecked(gprefs['refresh_book_list_on_bulk_edit'])
self.refresh_book_list.toggled.connect(self.save_refresh_booklist)
self.ids = [self.db.id(r) for r in rows]
self.first_title = self.db.title(self.ids[0], index_is_id=True)
self.cover_clone.setToolTip(unicode(self.cover_clone.toolTip()) + ' (%s)' % self.first_title)
self.box_title.setText('<p>' +
_('Editing meta information for <b>%d books</b>') %
len(rows))
self.write_series = False
self.changed = False
self.refresh_books = refresh_books
self.comments = null
self.comments_button.clicked.connect(self.set_comments)
all_tags = self.db.all_tags()
self.tags.update_items_cache(all_tags)
self.remove_tags.update_items_cache(all_tags)
self.initialize_combos()
for f in sorted(self.db.all_formats()):
self.remove_format.addItem(f)
self.remove_format.setCurrentIndex(-1)
self.series.currentIndexChanged[int].connect(self.series_changed)
self.series.editTextChanged.connect(self.series_changed)
self.tag_editor_button.clicked.connect(self.tag_editor)
self.autonumber_series.stateChanged[int].connect(self.auto_number_changed)
self.pubdate.setMinimumDateTime(UNDEFINED_QDATETIME)
self.pubdate_cw = CalendarWidget(self.pubdate)
self.pubdate.setCalendarWidget(self.pubdate_cw)
pubdate_format = tweaks['gui_pubdate_display_format']
if pubdate_format is not None:
self.pubdate.setDisplayFormat(pubdate_format)
self.pubdate.setSpecialValueText(_('Undefined'))
self.clear_pubdate_button.clicked.connect(self.clear_pubdate)
self.pubdate.dateTimeChanged.connect(self.do_apply_pubdate)
self.adddate.setDateTime(QDateTime.currentDateTime())
self.adddate.setMinimumDateTime(UNDEFINED_QDATETIME)
adddate_format = tweaks['gui_timestamp_display_format']
if adddate_format is not None:
self.adddate.setDisplayFormat(adddate_format)
self.adddate.setSpecialValueText(_('Undefined'))
self.clear_adddate_button.clicked.connect(self.clear_adddate)
self.adddate.dateTimeChanged.connect(self.do_apply_adddate)
if len(self.db.custom_field_keys(include_composites=False)) == 0:
self.central_widget.removeTab(1)
else:
self.create_custom_column_editors()
self.prepare_search_and_replace()
self.button_box.clicked.connect(self.button_clicked)
self.button_box.button(QDialogButtonBox.Apply).setToolTip(_(
'Immediately make all changes without closing the dialog. '
'This operation cannot be canceled or undone'))
self.do_again = False
self.central_widget.setCurrentIndex(tab)
geom = gprefs.get('bulk_metadata_window_geometry', None)
if geom is not None:
self.restoreGeometry(bytes(geom))
ct = gprefs.get('bulk_metadata_window_tab', 0)
self.central_widget.setCurrentIndex(ct)
self.languages.init_langs(self.db)
self.languages.setEditText('')
self.authors.setFocus(Qt.OtherFocusReason)
self.exec_()
def set_comments(self):
from calibre.gui2.dialogs.comments_dialog import CommentsDialog
d = CommentsDialog(self, '' if self.comments is null else (self.comments or ''), _('Comments'))
if d.exec_() == d.Accepted:
self.comments = d.textbox.html
b = self.comments_button
b.setStyleSheet('QPushButton { font-weight: bold }')
if unicode(b.text())[-1] != '*':
b.setText(unicode(b.text()) + ' *')
def save_refresh_booklist(self, *args):
gprefs['refresh_book_list_on_bulk_edit'] = bool(self.refresh_book_list.isChecked())
def save_state(self, *args):
gprefs['bulk_metadata_window_geometry'] = \
bytearray(self.saveGeometry())
gprefs['bulk_metadata_window_tab'] = self.central_widget.currentIndex()
def do_apply_pubdate(self, *args):
self.apply_pubdate.setChecked(True)
def clear_pubdate(self, *args):
self.pubdate.setDateTime(UNDEFINED_QDATETIME)
def do_apply_adddate(self, *args):
self.apply_adddate.setChecked(True)
def clear_adddate(self, *args):
self.adddate.setDateTime(UNDEFINED_QDATETIME)
def button_clicked(self, which):
if which == self.button_box.button(QDialogButtonBox.Apply):
self.do_again = True
self.accept()
# S&R {{{
def prepare_search_and_replace(self):
self.search_for.initialize('bulk_edit_search_for')
self.replace_with.initialize('bulk_edit_replace_with')
self.s_r_template.initialize('bulk_edit_template')
self.test_text.initialize('bulk_edit_test_test')
self.all_fields = ['']
self.writable_fields = ['']
fm = self.db.field_metadata
for f in fm:
if (f in ['author_sort'] or
(fm[f]['datatype'] in ['text', 'series', 'enumeration', 'comments']
and fm[f].get('search_terms', None)
and f not in ['formats', 'ondevice', 'series_sort']) or
(fm[f]['datatype'] in ['int', 'float', 'bool', 'datetime'] and
f not in ['id', 'timestamp'])):
self.all_fields.append(f)
self.writable_fields.append(f)
if fm[f]['datatype'] == 'composite':
self.all_fields.append(f)
self.all_fields.sort()
self.all_fields.insert(1, '{template}')
self.writable_fields.sort()
self.search_field.setMaxVisibleItems(25)
self.destination_field.setMaxVisibleItems(25)
self.testgrid.setColumnStretch(1, 1)
self.testgrid.setColumnStretch(2, 1)
offset = 10
self.s_r_number_of_books = min(10, len(self.ids))
for i in range(1,self.s_r_number_of_books+1):
w = QLabel(self.tabWidgetPage3)
w.setText(_('Book %d:')%i)
self.testgrid.addWidget(w, i+offset, 0, 1, 1)
w = QLineEdit(self.tabWidgetPage3)
w.setReadOnly(True)
name = 'book_%d_text'%i
setattr(self, name, w)
self.book_1_text.setObjectName(name)
self.testgrid.addWidget(w, i+offset, 1, 1, 1)
w = QLineEdit(self.tabWidgetPage3)
w.setReadOnly(True)
name = 'book_%d_result'%i
setattr(self, name, w)
self.book_1_text.setObjectName(name)
self.testgrid.addWidget(w, i+offset, 2, 1, 1)
ident_types = sorted(self.db.get_all_identifier_types(), key=sort_key)
self.s_r_dst_ident.setCompleter(QCompleter(ident_types))
try:
self.s_r_dst_ident.setPlaceholderText(_('Enter an identifier type'))
except:
pass
self.s_r_src_ident.addItems(ident_types)
self.main_heading = _(
'<b>You can destroy your library using this feature.</b> '
'Changes are permanent. There is no undo function. '
'You are strongly encouraged to back up your library '
'before proceeding.<p>'
'Search and replace in text fields using character matching '
'or regular expressions. ')
self.character_heading = _(
'In character mode, the field is searched for the entered '
'search text. The text is replaced by the specified replacement '
'text everywhere it is found in the specified field. After '
'replacement is finished, the text can be changed to '
'upper-case, lower-case, or title-case. If the case-sensitive '
'check box is checked, the search text must match exactly. If '
'it is unchecked, the search text will match both upper- and '
'lower-case letters'
)
self.regexp_heading = _(
'In regular expression mode, the search text is an '
'arbitrary python-compatible regular expression. The '
'replacement text can contain backreferences to parenthesized '
'expressions in the pattern. The search is not anchored, '
'and can match and replace multiple times on the same string. '
'The modification functions (lower-case etc) are applied to the '
'matched text, not to the field as a whole. '
'The destination box specifies the field where the result after '
'matching and replacement is to be assigned. You can replace '
'the text in the field, or prepend or append the matched text. '
'See <a href="http://docs.python.org/library/re.html"> '
'this reference</a> for more information on python\'s regular '
'expressions, and in particular the \'sub\' function.'
)
self.search_mode.addItems(self.s_r_match_modes)
self.search_mode.setCurrentIndex(dynamic.get('s_r_search_mode', 0))
self.replace_mode.addItems(self.s_r_replace_modes)
self.replace_mode.setCurrentIndex(0)
self.s_r_search_mode = 0
self.s_r_error = None
self.s_r_obj = None
self.replace_func.addItems(sorted(self.s_r_functions.keys()))
self.search_mode.currentIndexChanged[int].connect(self.s_r_search_mode_changed)
self.search_field.currentIndexChanged[int].connect(self.s_r_search_field_changed)
self.destination_field.currentIndexChanged[int].connect(self.s_r_destination_field_changed)
self.replace_mode.currentIndexChanged[int].connect(self.s_r_paint_results)
self.replace_func.currentIndexChanged[str].connect(self.s_r_paint_results)
self.search_for.editTextChanged[str].connect(self.s_r_paint_results)
self.replace_with.editTextChanged[str].connect(self.s_r_paint_results)
self.test_text.editTextChanged[str].connect(self.s_r_paint_results)
self.comma_separated.stateChanged.connect(self.s_r_paint_results)
self.case_sensitive.stateChanged.connect(self.s_r_paint_results)
self.s_r_src_ident.currentIndexChanged[int].connect(self.s_r_identifier_type_changed)
self.s_r_dst_ident.textChanged.connect(self.s_r_paint_results)
self.s_r_template.lost_focus.connect(self.s_r_template_changed)
self.central_widget.setCurrentIndex(0)
self.search_for.completer().setCaseSensitivity(Qt.CaseSensitive)
self.replace_with.completer().setCaseSensitivity(Qt.CaseSensitive)
self.s_r_template.completer().setCaseSensitivity(Qt.CaseSensitive)
self.s_r_search_mode_changed(self.search_mode.currentIndex())
self.multiple_separator.setFixedWidth(30)
self.multiple_separator.setText(' ::: ')
self.multiple_separator.textChanged.connect(self.s_r_separator_changed)
self.results_count.valueChanged[int].connect(self.s_r_display_bounds_changed)
self.starting_from.valueChanged[int].connect(self.s_r_display_bounds_changed)
self.save_button.clicked.connect(self.s_r_save_query)
self.remove_button.clicked.connect(self.s_r_remove_query)
self.queries = JSONConfig("search_replace_queries")
self.saved_search_name = ''
self.query_field.addItem("")
self.query_field_values = sorted([q for q in self.queries], key=sort_key)
self.query_field.addItems(self.query_field_values)
self.query_field.currentIndexChanged[str].connect(self.s_r_query_change)
self.query_field.setCurrentIndex(0)
self.search_field.setCurrentIndex(0)
self.s_r_search_field_changed(0)
def s_r_sf_itemdata(self, idx):
if idx is None:
idx = self.search_field.currentIndex()
return unicode(self.search_field.itemData(idx) or '')
def s_r_df_itemdata(self, idx):
if idx is None:
idx = self.destination_field.currentIndex()
return unicode(self.destination_field.itemData(idx) or '')
def s_r_get_field(self, mi, field):
if field:
if field == '{template}':
v = SafeFormat().safe_format(
unicode(self.s_r_template.text()), mi, _('S/R TEMPLATE ERROR'), mi)
return [v]
fm = self.db.metadata_for_field(field)
if field == 'sort':
val = mi.get('title_sort', None)
elif fm['datatype'] == 'datetime':
val = mi.format_field(field)[1]
else:
val = mi.get(field, None)
if isinstance(val, (int, float, bool)):
val = str(val)
elif fm['is_csp']:
# convert the csp dict into a list
id_type = unicode(self.s_r_src_ident.currentText())
if id_type:
val = [val.get(id_type, '')]
else:
val = [u'%s:%s'%(t[0], t[1]) for t in val.iteritems()]
if val is None:
val = [] if fm['is_multiple'] else ['']
elif not fm['is_multiple']:
val = [val]
elif fm['datatype'] == 'composite':
val = [v2.strip() for v2 in val.split(fm['is_multiple']['ui_to_list'])]
elif field == 'authors':
val = [v2.replace('|', ',') for v2 in val]
else:
val = []
if not val:
val = ['']
return val
def s_r_display_bounds_changed(self, i):
self.s_r_search_field_changed(self.search_field.currentIndex())
def s_r_template_changed(self):
self.s_r_search_field_changed(self.search_field.currentIndex())
def s_r_identifier_type_changed(self, idx):
self.s_r_search_field_changed(self.search_field.currentIndex())
self.s_r_paint_results(idx)
def s_r_search_field_changed(self, idx):
self.s_r_template.setVisible(False)
self.template_label.setVisible(False)
self.s_r_src_ident_label.setVisible(False)
self.s_r_src_ident.setVisible(False)
if idx == 1: # Template
self.s_r_template.setVisible(True)
self.template_label.setVisible(True)
elif self.s_r_sf_itemdata(idx) == 'identifiers':
self.s_r_src_ident_label.setVisible(True)
self.s_r_src_ident.setVisible(True)
for i in range(0, self.s_r_number_of_books):
w = getattr(self, 'book_%d_text'%(i+1))
mi = self.db.get_metadata(self.ids[i], index_is_id=True)
src = self.s_r_sf_itemdata(idx)
t = self.s_r_get_field(mi, src)
if len(t) > 1:
t = t[self.starting_from.value()-1:
self.starting_from.value()-1 + self.results_count.value()]
w.setText(unicode(self.multiple_separator.text()).join(t))
if self.search_mode.currentIndex() == 0:
self.destination_field.setCurrentIndex(idx)
else:
self.s_r_destination_field_changed(self.destination_field.currentIndex())
self.s_r_paint_results(None)
def s_r_destination_field_changed(self, idx):
self.s_r_dst_ident_label.setVisible(False)
self.s_r_dst_ident.setVisible(False)
txt = self.s_r_df_itemdata(idx)
if not txt:
txt = self.s_r_sf_itemdata(None)
if txt and txt in self.writable_fields:
if txt == 'identifiers':
self.s_r_dst_ident_label.setVisible(True)
self.s_r_dst_ident.setVisible(True)
self.destination_field_fm = self.db.metadata_for_field(txt)
self.s_r_paint_results(None)
def s_r_search_mode_changed(self, val):
self.search_field.clear()
self.destination_field.clear()
if val == 0:
for f in self.writable_fields:
self.search_field.addItem(f if f != 'sort' else 'title_sort', f)
self.destination_field.addItem(f if f != 'sort' else 'title_sort', f)
self.destination_field.setCurrentIndex(0)
self.destination_field.setVisible(False)
self.destination_field_label.setVisible(False)
self.replace_mode.setCurrentIndex(0)
self.replace_mode.setVisible(False)
self.replace_mode_label.setVisible(False)
self.comma_separated.setVisible(False)
self.s_r_heading.setText('<p>'+self.main_heading + self.character_heading)
else:
self.search_field.blockSignals(True)
self.destination_field.blockSignals(True)
for f in self.all_fields:
self.search_field.addItem(f if f != 'sort' else 'title_sort', f)
for f in self.writable_fields:
self.destination_field.addItem(f if f != 'sort' else 'title_sort', f)
self.search_field.blockSignals(False)
self.destination_field.blockSignals(False)
self.destination_field.setVisible(True)
self.destination_field_label.setVisible(True)
self.replace_mode.setVisible(True)
self.replace_mode_label.setVisible(True)
self.comma_separated.setVisible(True)
self.s_r_heading.setText('<p>'+self.main_heading + self.regexp_heading)
self.s_r_paint_results(None)
def s_r_separator_changed(self, txt):
self.s_r_search_field_changed(self.search_field.currentIndex())
def s_r_set_colors(self):
if self.s_r_error is not None:
col = 'rgb(255, 0, 0, 20%)'
self.test_result.setText(self.s_r_error.message)
else:
col = 'rgb(0, 255, 0, 20%)'
self.test_result.setStyleSheet('QLineEdit { color: black; '
'background-color: %s; }'%col)
for i in range(0,self.s_r_number_of_books):
getattr(self, 'book_%d_result'%(i+1)).setText('')
def s_r_func(self, match):
rfunc = self.s_r_functions[unicode(self.replace_func.currentText())]
rtext = unicode(self.replace_with.text())
rtext = match.expand(rtext)
return rfunc(rtext)
def s_r_do_regexp(self, mi):
src_field = self.s_r_sf_itemdata(None)
src = self.s_r_get_field(mi, src_field)
result = []
rfunc = self.s_r_functions[unicode(self.replace_func.currentText())]
for s in src:
t = self.s_r_obj.sub(self.s_r_func, s)
if self.search_mode.currentIndex() == 0:
t = rfunc(t)
result.append(t)
return result
def s_r_do_destination(self, mi, val):
src = self.s_r_sf_itemdata(None)
if src == '':
return ''
dest = self.s_r_df_itemdata(None)
if dest == '':
if (src == '{template}' or
self.db.metadata_for_field(src)['datatype'] == 'composite'):
raise Exception(_('You must specify a destination when source is '
'a composite field or a template'))
dest = src
dest_mode = self.replace_mode.currentIndex()
if self.destination_field_fm['is_csp']:
dest_ident = unicode(self.s_r_dst_ident.text())
if not dest_ident or (src == 'identifiers' and dest_ident == '*'):
raise Exception(_('You must specify a destination identifier type'))
if self.destination_field_fm['is_multiple']:
if self.comma_separated.isChecked():
splitter = self.destination_field_fm['is_multiple']['ui_to_list']
res = []
for v in val:
res.extend([x.strip() for x in v.split(splitter) if x.strip()])
val = res
else:
val = [v.replace(',', '') for v in val]
if dest_mode != 0:
dest_val = mi.get(dest, '')
if self.db.metadata_for_field(dest)['is_csp']:
dst_id_type = unicode(self.s_r_dst_ident.text())
if dst_id_type:
dest_val = [dest_val.get(dst_id_type, '')]
else:
# convert the csp dict into a list
dest_val = [u'%s:%s'%(t[0], t[1]) for t in dest_val.iteritems()]
if dest_val is None:
dest_val = []
elif not isinstance(dest_val, list):
dest_val = [dest_val]
else:
dest_val = []
if dest_mode == 1:
val.extend(dest_val)
elif dest_mode == 2:
val[0:0] = dest_val
return val
def s_r_replace_mode_separator(self):
if self.comma_separated.isChecked():
return ','
return ''
def s_r_paint_results(self, txt):
self.s_r_error = None
self.s_r_set_colors()
if self.case_sensitive.isChecked():
flags = 0
else:
flags = re.I
flags |= re.UNICODE
try:
stext = unicode(self.search_for.text())
if not stext:
raise Exception(_('You must specify a search expression in the "Search for" field'))
if self.search_mode.currentIndex() == 0:
self.s_r_obj = re.compile(re.escape(stext), flags)
else:
self.s_r_obj = re.compile(stext, flags)
except Exception as e:
self.s_r_obj = None
self.s_r_error = e
self.s_r_set_colors()
return
try:
self.test_result.setText(self.s_r_obj.sub(self.s_r_func,
unicode(self.test_text.text())))
except Exception as e:
self.s_r_error = e
self.s_r_set_colors()
return
for i in range(0,self.s_r_number_of_books):
mi = self.db.get_metadata(self.ids[i], index_is_id=True)
wr = getattr(self, 'book_%d_result'%(i+1))
try:
result = self.s_r_do_regexp(mi)
t = self.s_r_do_destination(mi, result)
if len(t) > 1 and self.destination_field_fm['is_multiple']:
t = t[self.starting_from.value()-1:
self.starting_from.value()-1 + self.results_count.value()]
t = unicode(self.multiple_separator.text()).join(t)
else:
t = self.s_r_replace_mode_separator().join(t)
wr.setText(t)
except Exception as e:
self.s_r_error = e
self.s_r_set_colors()
break
def do_search_replace(self, book_id):
source = self.s_r_sf_itemdata(None)
if not source or not self.s_r_obj:
return
dest = self.s_r_df_itemdata(None)
if not dest:
dest = source
dfm = self.db.field_metadata[dest]
mi = self.db.new_api.get_proxy_metadata(book_id)
val = self.s_r_do_regexp(mi)
val = self.s_r_do_destination(mi, val)
if dfm['is_multiple']:
if dfm['is_csp']:
# convert the colon-separated pair strings back into a dict,
# which is what set_identifiers wants
dst_id_type = unicode(self.s_r_dst_ident.text())
if dst_id_type and dst_id_type != '*':
v = ''.join(val)
ids = mi.get(dest)
ids[dst_id_type] = v
val = ids
else:
try:
val = dict([(t.split(':')) for t in val])
except:
raise Exception(_('Invalid identifier string. It must be a '
'comma-separated list of pairs of '
'strings separated by a colon'))
else:
val = self.s_r_replace_mode_separator().join(val)
if dest == 'title' and len(val) == 0:
val = _('Unknown')
self.set_field_calls[dest][book_id] = val
# }}}
def create_custom_column_editors(self):
w = self.central_widget.widget(1)
layout = QGridLayout()
self.custom_column_widgets, self.__cc_spacers = \
populate_metadata_page(layout, self.db, self.ids, parent=w,
two_column=False, bulk=True)
w.setLayout(layout)
self.__custom_col_layouts = [layout]
ans = self.custom_column_widgets
for i in range(len(ans)-1):
w.setTabOrder(ans[i].widgets[-1], ans[i+1].widgets[1])
for c in range(2, len(ans[i].widgets), 2):
w.setTabOrder(ans[i].widgets[c-1], ans[i].widgets[c+1])
def initialize_combos(self):
self.initalize_authors()
self.initialize_series()
self.initialize_publisher()
for x in ('authors', 'publisher', 'series'):
x = getattr(self, x)
x.setSizeAdjustPolicy(x.AdjustToMinimumContentsLengthWithIcon)
x.setMinimumContentsLength(25)
def initalize_authors(self):
all_authors = self.db.all_authors()
all_authors.sort(key=lambda x : sort_key(x[1]))
self.authors.set_separator('&')
self.authors.set_space_before_sep(True)
self.authors.set_add_separator(tweaks['authors_completer_append_separator'])
self.authors.update_items_cache(self.db.all_author_names())
self.authors.show_initial_value('')
def initialize_series(self):
all_series = self.db.all_series()
all_series.sort(key=lambda x : sort_key(x[1]))
self.series.set_separator(None)
self.series.update_items_cache([x[1] for x in all_series])
self.series.show_initial_value('')
def initialize_publisher(self):
all_publishers = self.db.all_publishers()
all_publishers.sort(key=lambda x : sort_key(x[1]))
self.publisher.set_separator(None)
self.publisher.update_items_cache([x[1] for x in all_publishers])
self.publisher.show_initial_value('')
def tag_editor(self, *args):
d = TagEditor(self, self.db, None)
d.exec_()
if d.result() == QDialog.Accepted:
tag_string = ', '.join(d.tags)
self.tags.setText(tag_string)
self.tags.update_items_cache(self.db.all_tags())
self.remove_tags.update_items_cache(self.db.all_tags())
def auto_number_changed(self, state):
if state:
self.series_numbering_restarts.setEnabled(True)
self.series_start_number.setEnabled(True)
else:
self.series_numbering_restarts.setEnabled(False)
self.series_numbering_restarts.setChecked(False)
self.series_start_number.setEnabled(False)
self.series_start_number.setValue(1)
def reject(self):
self.save_state()
ResizableDialog.reject(self)
def accept(self):
self.save_state()
if len(self.ids) < 1:
return QDialog.accept(self)
try:
source = self.s_r_sf_itemdata(None)
except:
source = ''
do_sr = source and self.s_r_obj
if self.s_r_error is not None and do_sr:
error_dialog(self, _('Search/replace invalid'),
_('Search pattern is invalid: %s')%self.s_r_error.message,
show=True)
return False
self.changed = bool(self.ids)
# Cache values from GUI so that Qt widgets are not used in
# non GUI thread
for w in getattr(self, 'custom_column_widgets', []):
w.gui_val
remove_all = self.remove_all_tags.isChecked()
remove = []
if not remove_all:
remove = unicode(self.remove_tags.text()).strip().split(',')
add = unicode(self.tags.text()).strip().split(',')
au = unicode(self.authors.text())
aus = unicode(self.author_sort.text())
do_aus = self.author_sort.isEnabled()
rating = self.rating.value()
pub = unicode(self.publisher.text())
do_series = self.write_series
clear_series = self.clear_series.isChecked()
clear_pub = self.clear_pub.isChecked()
series = unicode(self.series.currentText()).strip()
do_autonumber = self.autonumber_series.isChecked()
do_series_restart = self.series_numbering_restarts.isChecked()
series_start_value = self.series_start_number.value()
do_remove_format = self.remove_format.currentIndex() > -1
remove_format = unicode(self.remove_format.currentText())
do_swap_ta = self.swap_title_and_author.isChecked()
do_remove_conv = self.remove_conversion_settings.isChecked()
do_auto_author = self.auto_author_sort.isChecked()
do_title_case = self.change_title_to_title_case.isChecked()
do_title_sort = self.update_title_sort.isChecked()
clear_languages = self.clear_languages.isChecked()
restore_original = self.restore_original.isChecked()
languages = self.languages.lang_codes
pubdate = adddate = None
if self.apply_pubdate.isChecked():
pubdate = qt_to_dt(self.pubdate.dateTime())
if self.apply_adddate.isChecked():
adddate = qt_to_dt(self.adddate.dateTime())
cover_action = None
if self.cover_remove.isChecked():
cover_action = 'remove'
elif self.cover_generate.isChecked():
cover_action = 'generate'
elif self.cover_from_fmt.isChecked():
cover_action = 'fromfmt'
elif self.cover_trim.isChecked():
cover_action = 'trim'
elif self.cover_clone.isChecked():
cover_action = 'clone'
args = Settings(remove_all, remove, add, au, aus, do_aus, rating, pub, do_series,
do_autonumber, do_remove_format, remove_format, do_swap_ta,
do_remove_conv, do_auto_author, series, do_series_restart,
series_start_value, do_title_case, cover_action, clear_series, clear_pub,
pubdate, adddate, do_title_sort, languages, clear_languages,
restore_original, self.comments)
self.set_field_calls = defaultdict(dict)
bb = MyBlockingBusy(args, self.ids, self.db, self.refresh_books,
getattr(self, 'custom_column_widgets', []),
self.do_search_replace, do_sr, self.set_field_calls, parent=self)
# The metadata backup thread causes database commits
# which can slow down bulk editing of large numbers of books
self.model.stop_metadata_backup()
try:
bb.exec_()
finally:
self.model.start_metadata_backup()
bb.thread = bb.db = bb.cc_widgets = None
if bb.error is not None:
return error_dialog(self, _('Failed'),
bb.error[0], det_msg=bb.error[1],
show=True)
dynamic['s_r_search_mode'] = self.search_mode.currentIndex()
self.db.clean()
return QDialog.accept(self)
def series_changed(self, *args):
self.write_series = bool(unicode(self.series.currentText()).strip())
self.autonumber_series.setEnabled(True)
def s_r_remove_query(self, *args):
if self.query_field.currentIndex() == 0:
return
if not question_dialog(self, _("Delete saved search/replace"),
_("The selected saved search/replace will be deleted. "
"Are you sure?")):
return
item_id = self.query_field.currentIndex()
item_name = unicode(self.query_field.currentText())
self.query_field.blockSignals(True)
self.query_field.removeItem(item_id)
self.query_field.blockSignals(False)
self.query_field.setCurrentIndex(0)
if item_name in self.queries.keys():
del(self.queries[item_name])
self.queries.commit()
def s_r_save_query(self, *args):
names = ['']
names.extend(self.query_field_values)
try:
dex = names.index(self.saved_search_name)
except:
dex = 0
name = ''
while not name:
name, ok = QInputDialog.getItem(self, _('Save search/replace'),
_('Search/replace name:'), names, dex, True)
if not ok:
return
if not name:
error_dialog(self, _("Save search/replace"),
_("You must provide a name."), show=True)
new = True
name = unicode(name)
if name in self.queries.keys():
if not question_dialog(self, _("Save search/replace"),
_("That saved search/replace already exists and will be overwritten. "
"Are you sure?")):
return
new = False
query = {}
query['name'] = name
query['search_field'] = unicode(self.search_field.currentText())
query['search_mode'] = unicode(self.search_mode.currentText())
query['s_r_template'] = unicode(self.s_r_template.text())
query['s_r_src_ident'] = unicode(self.s_r_src_ident.currentText())
query['search_for'] = unicode(self.search_for.text())
query['case_sensitive'] = self.case_sensitive.isChecked()
query['replace_with'] = unicode(self.replace_with.text())
query['replace_func'] = unicode(self.replace_func.currentText())
query['destination_field'] = unicode(self.destination_field.currentText())
query['s_r_dst_ident'] = unicode(self.s_r_dst_ident.text())
query['replace_mode'] = unicode(self.replace_mode.currentText())
query['comma_separated'] = self.comma_separated.isChecked()
query['results_count'] = self.results_count.value()
query['starting_from'] = self.starting_from.value()
query['multiple_separator'] = unicode(self.multiple_separator.text())
self.queries[name] = query
self.queries.commit()
if new:
self.query_field.blockSignals(True)
self.query_field.clear()
self.query_field.addItem('')
self.query_field_values = sorted([q for q in self.queries], key=sort_key)
self.query_field.addItems(self.query_field_values)
self.query_field.blockSignals(False)
self.query_field.setCurrentIndex(self.query_field.findText(name))
def s_r_query_change(self, item_name):
if not item_name:
self.s_r_reset_query_fields()
self.saved_search_name = ''
return
item = self.queries.get(unicode(item_name), None)
if item is None:
self.s_r_reset_query_fields()
return
self.saved_search_name = item_name
def set_text(attr, key):
try:
attr.setText(item[key])
except:
pass
def set_checked(attr, key):
try:
attr.setChecked(item[key])
except:
attr.setChecked(False)
def set_value(attr, key):
try:
attr.setValue(int(item[key]))
except:
attr.setValue(0)
def set_index(attr, key):
try:
attr.setCurrentIndex(attr.findText(item[key]))
except:
attr.setCurrentIndex(0)
set_index(self.search_mode, 'search_mode')
set_index(self.search_field, 'search_field')
set_text(self.s_r_template, 's_r_template')
self.s_r_template_changed() # simulate gain/loss of focus
set_index(self.s_r_src_ident, 's_r_src_ident')
set_text(self.s_r_dst_ident, 's_r_dst_ident')
set_text(self.search_for, 'search_for')
set_checked(self.case_sensitive, 'case_sensitive')
set_text(self.replace_with, 'replace_with')
set_index(self.replace_func, 'replace_func')
set_index(self.destination_field, 'destination_field')
set_index(self.replace_mode, 'replace_mode')
set_checked(self.comma_separated, 'comma_separated')
set_value(self.results_count, 'results_count')
set_value(self.starting_from, 'starting_from')
set_text(self.multiple_separator, 'multiple_separator')
def s_r_reset_query_fields(self):
# Don't reset the search mode. The user will probably want to use it
# as it was
self.search_field.setCurrentIndex(0)
self.s_r_src_ident.setCurrentIndex(0)
self.s_r_template.setText("")
self.search_for.setText("")
self.case_sensitive.setChecked(False)
self.replace_with.setText("")
self.replace_func.setCurrentIndex(0)
self.destination_field.setCurrentIndex(0)
self.s_r_dst_ident.setText('')
self.replace_mode.setCurrentIndex(0)
self.comma_separated.setChecked(True)
self.results_count.setValue(999)
self.starting_from.setValue(1)
self.multiple_separator.setText(" ::: ")
|
sharad/calibre
|
src/calibre/gui2/dialogs/metadata_bulk.py
|
Python
|
gpl-3.0
| 49,960
|
# coding=utf-8
import unittest
"""1027. Longest Arithmetic Sequence
https://leetcode.com/problems/longest-arithmetic-sequence/description/
Given an array `A` of integers, return the **length** of the longest
arithmetic subsequence in `A`.
Recall that a _subsequence_ of `A` is a list `A[i_1], A[i_2], ..., A[i_k]`
with `0 <= i_1 < i_2 < ... < i_k <= A.length - 1`, and that a sequence `B` is
_arithmetic_ if `B[i+1] - B[i]` are all the same value (for `0 <= i < B.length
- 1`).
**Example 1:**
**Input:** [3,6,9,12]
**Output:** 4
**Explanation:**
The whole array is an arithmetic sequence with steps of length = 3.
**Example 2:**
**Input:** [9,4,7,2,10]
**Output:** 3
**Explanation:**
The longest arithmetic subsequence is [4,7,10].
**Example 3:**
**Input:** [20,1,15,3,10,5,8]
**Output:** 4
**Explanation:**
The longest arithmetic subsequence is [20,15,10,5].
**Note:**
1. `2 <= A.length <= 2000`
2. `0 <= A[i] <= 10000`
Similar Questions:
"""
class Solution(object):
def longestArithSeqLength(self, A):
"""
:type A: List[int]
:rtype: int
"""
class T(unittest.TestCase):
def test(self):
pass
if __name__ == "__main__":
unittest.main()
|
openqt/algorithms
|
leetcode/python/lc1027-longest-arithmetic-sequence.py
|
Python
|
gpl-3.0
| 1,327
|
from typing import List, Tuple, Callable
import gdspy
import numpy as np
from numpy import diff, floor, ceil, zeros, hstack, newaxis
import pickle
import warnings
import copy
from spins.gridlock.float_raster import raster_1D, raster_2D
from spins.gridlock import GridError, Direction, GridType
from spins.gridlock._helpers import is_scalar
class Grid:
"""
Simulation grid generator intended for electromagnetic simulations.
Can be used to generate non-uniform rectangular grids (the entire grid
is generated based on the coordinates of the boundary points). Also does
straightforward natural <-> grid unit conversion.
The Grid object must be specified with shifts that generate the primary grid and a
complementary grid from grid specified by the coordinates of the boundary points. In the
context of EM simulations, the primary grid is the E-field grid and the complementary
grid is the H-field grid. More formally, the primary grid should have vertices at the
body-centers of the complementary grid and vice-versa. This relationship between the
primary and complementary grid is assumed while aliasing the drawn structures onto the grid
Objects on the grid can be drawn via the draw_ functions (e.g. draw_cuboid, draw_cylinder,
draw_slab). Once all the objects have been drawn on the grid, the render() function can be
called to raster the drawn objects on the grid. It is assumed that the object drawn latest is
the correct object and should replace any of the older objects being drawn in case of an intersection
with the older objects.
self.grids[i][a,b,c] contains the value of epsilon for the cell located at
(xyz[0][a]+dxyz[0][a]*shifts[i, 0],
xyz[1][b]+dxyz[1][b]*shifts[i, 1],
xyz[2][c]+dxyz[2][c]*shifts[i, 2]).
You can get raw edge coordinates (exyz),
center coordinates (xyz),
cell sizes (dxyz),
from the properties named as above, or get them for a given grid by using the
self.shifted_*xyz(which_shifts) functions.
It is tricky to determine the size of the right-most cell after shifting,
since its right boundary should shift by shifts[i][a] * dxyz[a][dxyz[a].size],
where the dxyz element refers to a cell that does not exist.
Because of this, we either assume this 'ghost' cell is the same size as the last
real cell, or, if self.periodic[a] is set to True, the same size as the first cell.
"""
# Intended for use as static constants
Yee_Shifts_E = 0.5 * np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]],
dtype=float) # type: np.ndarray
Yee_Shifts_H = 0.5 * np.array([[0, 1, 1], [1, 0, 1], [1, 1, 0]],
dtype=float) # type: np.ndarray
@property
def dxyz(self) -> List[np.ndarray]:
"""
Cell sizes for each axis, no shifts applied
:return: List of 3 ndarrays of cell sizes
"""
return [diff(self.exyz[a]) for a in range(3)]
@property
def xyz(self) -> List[np.ndarray]:
"""
Cell centers for each axis, no shifts applied
:return: List of 3 ndarrays of cell edges
"""
return [self.exyz[a][:-1] + self.dxyz[a] / 2.0 for a in range(3)]
@property
def shape(self) -> np.ndarray:
"""
The number of cells in x, y, and z
:return: ndarray [x_centers.size, y_centers.size, z_centers.size]
"""
# Substract one because we keep track of edges.
return np.array([coord.size - 1 for coord in self.exyz], dtype=int)
@property
def dxyz_with_ghost(self) -> List[np.ndarray]:
"""
Gives dxyz with an additional 'ghost' cell at the end, whose value depends
on whether or not the axis has periodic boundary conditions. See main description
above to learn why this is necessary.
If periodic, final edge shifts same amount as first
Otherwise, final edge shifts same amount as second-to-last
:return: list of [dxs, dys, dzs] with each element same length as elements of self.xyz
"""
el = [0 if p else -1 for p in self.periodic]
return [
hstack((self.dxyz[a], self.dxyz[a][e]))
for a, e in zip(range(3), el)
]
@property
def center(self) -> np.ndarray:
"""
Center position of the entire grid, no shifts applied
:return: ndarray [x_center, y_center, z_center]
"""
# center is just average of first and last xyz, which is just the average of the
# first two and last two exyz
centers = [(self.exyz[a][0] + self.exyz[a][1] + self.exyz[a][-2] +
self.exyz[a][-1]) / 4.0 for a in range(3)]
return np.array(centers, dtype=float)
@property
def dxyz_limits(self) -> Tuple[np.ndarray, np.ndarray]:
"""
Returns the minimum and maximum cell size for each axis, as a tuple of two 3-element
ndarrays. No shifts are applied, so these are extreme bounds on these values (as a
weighted average is performed when shifting).
:return: List of 2 ndarrays, d_min=[min(dx), min(dy), min(dz)] and d_max=[...]
"""
d_min = np.array([min(self.dxyz[a]) for a in range(3)], dtype=float)
d_max = np.array([max(self.dxyz[a]) for a in range(3)], dtype=float)
return d_min, d_max
def shifted_exyz(self,
which_shifts: int or None,
which_grid: GridType = GridType.PRIM) -> List[np.ndarray]:
"""
Returns edges for which_shifts.
:param which_shifts: Which grid (which shifts) to use, or None for unshifted
:param which_grid: GridType.PRIM for the primary grid and GRIDTYPE.COMP for the complementary grid
:return: List of 3 ndarrays of cell edges
"""
if which_shifts is None:
return self.exyz
dxyz = self.dxyz_with_ghost
if which_grid.value == 0:
shifts = self.shifts[which_shifts, :]
sexyz = [self.exyz[a] + dxyz[a] * shifts[a] for a in range(3)]
else:
shifts = self.comp_shifts[which_shifts, :]
# Adding ghost cell to the beginning if the complementary grid
sexyz = [
np.append(self.exyz[a][0] - dxyz[a][-1] * shifts[a],
self.exyz[a] + dxyz[a] * shifts[a]) for a in range(3)
]
# Removing the ghost cell if the compelementary grid is not shifted in a particular direction
sexyz = [
sexyz[a][1:] if shifts[a] == 0 else sexyz[a] for a in range(3)
]
return sexyz
def shifted_dxyz(self,
which_shifts: int or None,
which_grid: GridType = GridType.PRIM) -> List[np.ndarray]:
"""
Returns cell sizes for which_shifts.
:param which_shifts: Which grid (which shifts) to use, or None for unshifted
:param which_grid: GridType.PRIM for the primary grid and GridType.COMP for complementary grid
:return: List of 3 ndarrays of cell sizes
"""
if which_shifts is None:
return self.dxyz
dxyz = self.dxyz_with_ghost
if which_grid.value == 0:
shifts = self.shifts[which_shifts, :]
sdxyz = [(dxyz[a][:-1] * (1 - shifts[a]) + dxyz[a][1:] * shifts[a])
for a in range(3)]
else:
shifts = self.comp_shifts[which_shifts, :]
# Adding ghost cell to the beginning of the complementary grid
sdxyz = [
np.append(
dxyz[a][-1] * (1 - shifts[a]) + dxyz[a][0] * shifts[a],
dxyz[a][:-1] * (1 - shifts[a]) + dxyz[a][1:] * shifts[a])
for a in range(3)
]
# Removing the ghost cell if the complementary grid is not shifted in a particular direction
sdxyz = [
sdxyz[a][1:] if shifts[a] == 0 else sdxyz[a] for a in range(3)
]
return sdxyz
def shifted_xyz(self,
which_shifts: int or None,
which_grid: GridType = GridType.PRIM) -> List[np.ndarray]:
"""
Returns cell centers for which_shifts.
:param which_shifts: Which grid (which shifts) to use, or None for unshifted
:which_grid: GridType.PRIM for the primary grid and GridType.COMP for the complementary grid
:return: List of 3 ndarrays of cell centers
"""
if which_shifts is None:
return self.xyz
exyz = self.shifted_exyz(which_shifts, which_grid)
dxyz = self.shifted_dxyz(which_shifts, which_grid)
return [exyz[a][:-1] + dxyz[a] / 2.0 for a in range(3)]
def autoshifted_dxyz(self):
"""
Return cell widths, with each dimension shifted by the corresponding shifts.
:return: [grid.shifted_dxyz(which_shifts=a)[a] for a in range(3)]
"""
return [
self.shifted_dxyz((a + 1) % 3, GridType.COMP)[a][:-1]
for a in range(3)
]
def ind2pos(self,
ind: np.ndarray or List,
which_shifts: int or None,
which_grid: GridType = GridType.PRIM,
round_ind: bool = True,
check_bounds: bool = True) -> np.ndarray:
"""
Returns the natural position corresponding to the specified indices.
The resulting position is clipped to the bounds of the grid
(to cell centers if round_ind=True, or cell outer edges if round_ind=False)
:param ind: Indices of the position. Can be fractional. (3-element ndarray or list)
:param which_shifts: which grid number (shifts) to use
:param round_ind: Whether to round ind to the nearest integer position before indexing
(default True)
:param check_bounds: Whether to raise an GridError if the provided ind is outside of
the grid, as defined above (centers if round_ind, else edges) (default True)
:return: 3-element ndarray specifying the natural position
:raises: GridError
"""
if which_shifts is not None and which_shifts >= self.shifts.shape[0]:
raise GridError('Invalid shifts')
ind = np.array(ind, dtype=float)
if check_bounds:
if round_ind:
low_bound = 0.0
high_bound = -1
else:
low_bound = -0.5
high_bound = -0.5
if (ind < low_bound).any() or (ind > self.shape - high_bound).any():
raise GridError('Position outside of grid: {}'.format(ind))
if round_ind:
rind = np.clip(np.round(ind), 0, self.shape - 1)
sxyz = self.shifted_xyz(which_shifts, which_grid)
position = [sxyz[a][rind[a]].astype(int) for a in range(3)]
else:
sexyz = self.shifted_exyz(which_shifts, which_grid)
position = [
np.interp(ind[a],
np.arange(sexyz[a].size) - 0.5, sexyz[a])
for a in range(3)
]
return np.array(position, dtype=float)
def pos2ind(self,
r: np.ndarray or List,
which_shifts: int or None,
which_grid: GridType = GridType.PRIM,
round_ind: bool = True,
check_bounds: bool = True) -> np.ndarray:
"""
Returns the indices corresponding to the specified natural position.
The resulting position is clipped to within the outer centers of the grid.
:param r: Natural position that we will convert into indices (3-element ndarray or list)
:param which_shifts: which grid number (shifts) to use
:param round_ind: Whether to round the returned indices to the nearest integers.
:param check_bounds: Whether to throw an GridError if r is outside the grid edges
:return: 3-element ndarray specifying the indices
:raises: GridError
"""
r = np.squeeze(r)
if r.size != 3:
raise GridError('r must be 3-element vector: {}'.format(r))
if (which_shifts is not None) and (which_shifts >=
self.shifts.shape[0]):
raise GridError('')
sexyz = self.shifted_exyz(which_shifts, which_grid)
if check_bounds:
for a in range(3):
if self.shape[a] > 1 and (r[a] < sexyz[a][0] or
r[a] > sexyz[a][-1]):
raise GridError('Position[{}] outside of grid!'.format(a))
grid_pos = zeros((3,))
for a in range(3):
xi = np.digitize(r[a],
sexyz[a]) - 1 # Figure out which cell we're in
xi_clipped = np.clip(
xi, 0, sexyz[a].size - 2) # Clip back into grid bounds
# No need to interpolate if round_ind is true or we were outside the grid
if round_ind or xi != xi_clipped:
grid_pos[a] = xi_clipped
else:
# Interpolate
x = self.shifted_exyz(which_shifts, which_grid)[a][xi]
dx = self.shifted_dxyz(which_shifts, which_grid)[a][xi]
f = (r[a] - x) / dx
# Clip to centers
grid_pos[a] = np.clip(xi + f, 0, sexyz[a].size - 1)
return grid_pos
def coord2ind(self,
r: float,
axis: Direction,
which_shifts: int,
which_grid: GridType = GridType.PRIM,
round_ind: bool = True,
check_bounds: bool = True):
'''
Converts a single coordinate to index
'''
point_3D = np.array(
[r if a == axis.value else self.center[a] for a in range(3)])
ind_3D = self.pos2ind(point_3D, which_shifts, which_grid, round_ind,
check_bounds)
return ind_3D[axis.value]
def __init__(self,
pixel_edge_coordinates: List[np.ndarray],
ext_dir: Direction = Direction.z,
shifts: np.ndarray or List = Yee_Shifts_E,
comp_shifts: np.ndarray or List = Yee_Shifts_H,
initial: float or np.ndarray or List[float] or
List[np.ndarray] = (1.0,) * 3,
num_grids: int = None,
periodic: bool or List[bool] = False):
# Backgrdound permittivity and fraction of background permittivity in the grid
self.grids_bg = [] # type: List[np.ndarray]
self.frac_bg = [] # type: List[np.ndarray]
# [[x0 y0 z0], [x1 y1 z1], ...] offsets for primary grid 0,1,...
self.exyz = [np.unique(pixel_edge_coordinates[i]) for i in range(3)]
for i in range(3):
if len(self.exyz[i]) != len(pixel_edge_coordinates[i]):
warnings.warn(
'Dimension {} had duplicate edge coordinates'.format(i))
if is_scalar(periodic):
self.periodic = [periodic] * 3
else:
self.periodic = [False] * 3
self.shifts = np.array(shifts, dtype=float)
self.comp_shifts = np.array(comp_shifts, dtype=float)
if self.shifts.shape[1] != 3:
GridError(
'Misshapen shifts on the primary grid; second axis size should be 3,'
' shape is {}'.format(self.shifts.shape))
if self.comp_shifts.shape[1] != 3:
GridError(
'Misshapen shifts on the complementary grid: second axis size should be 3,'
' shape is {}'.format(self.comp_shifts.shape))
if self.comp_shifts.shape[0] != self.shifts.shape[0]:
GridError(
'Inconsistent number of shifts in the primary and complementary grid'
)
if not ((self.shifts >= 0).all() and (self.comp_shifts >= 0).all()):
GridError(
'Shifts are required to be non-negative for both primary and complementary grid'
)
num_shifts = self.shifts.shape[0]
if num_grids is None:
num_grids = num_shifts
elif num_grids > num_shifts:
raise GridError(
'Number of grids exceeds number of shifts (%u)' % num_shifts)
grids_shape = hstack((num_grids, self.shape))
if is_scalar(initial):
self.grids_bg = np.full(grids_shape, initial, dtype=complex)
else:
if len(initial) < num_grids:
raise GridError('Too few initial grids specified!')
self.grids_bg = [None] * num_grids
for i in range(num_grids):
if is_scalar(initial[i]):
if initial[i] is not None:
self.grids_bg[i] = np.full(
self.shape, initial[i], dtype=complex)
else:
if not np.array_equal(initial[i].shape, self.shape):
raise GridError(
'Initial grid sizes must match given coordinates')
self.grids_bg[i] = initial[i]
if isinstance(ext_dir, Direction):
self.ext_dir = ext_dir.value
elif is_scalar(ext_dir):
if ext_dir in range(3):
self.ext_dir = ext_dir
else:
raise GridError('Invalid extrusion direction')
else:
raise GridError('Invalid extrusion direction')
self.grids = np.full(
grids_shape, 0.0,
dtype=complex) # contains the rendering of objects on the grid
self.frac_bg = np.full(
grids_shape, 1.0,
dtype=complex) # contains the fraction of background permittivity
self.planar_dir = np.delete(range(3), self.ext_dir)
self.list_polygons = [
] # List of polygons corresponding to each block specified by user
self.layer_polygons = [
] # List of polygons after bifurcating extrusion direction into layers
self.reduced_layer_polygons = [
] # List of polygons after removing intersections
self.list_z = [
] # List of z coordinates of the different blocks specified by user
self.layer_z = [
] # List of z coordinates of the different distinct layers
@staticmethod
def load(filename: str) -> 'Grid':
"""
Load a grid from a file
:param filename: Filename to load from.
"""
with open(filename, 'rb') as f:
tmp_dict = pickle.load(f)
g = Grid([[-1, 1]] * 3)
g.__dict__.update(tmp_dict)
return g
def save(self, filename: str):
"""
Save to file.
:param filename: Filename to save to.
"""
with open(filename, 'wb') as f:
pickle.dump(self.__dict__, f, protocol=2)
def copy(self):
"""
Return a deep copy of the grid.
:return: Deep copy of the grid.
"""
return copy.deepcopy(self)
def draw_polygon(self, center: np.ndarray, polygon: np.ndarray,
thickness: float, eps: float or List[float]):
"""
Draws a polygon with coordinates in polygon and thickness
Note on order of coordinates in polygon -
If ext_dir = x, then polygon has coordinates of form (y,z)
ext_dir = y, then polygon has coordinates of form (x,y)
ext_dir = z, then polygon has coordinates of form (x,y)
"""
center = np.array(center)
polygon = np.array(polygon)
# Validating input arguments
if polygon.ndim != 2 or polygon.shape[1] != 2:
raise GridError(
'Invalid format for specifying polygon - must be a Nx2 array')
if polygon.shape[0] <= 2:
raise GridError(
'Malformed Polygon - must contain more than two points')
if center.ndim != 1 or center.size != 3:
raise GridError('Invalid format for the polygon center')
if (not is_scalar(thickness)) or thickness <= 0:
raise GridError('Invalid thickness')
if is_scalar(eps):
eps = np.ones(self.shifts.shape[0]) * eps
elif eps.ndim != 1 and eps.size != self.shifts.shape[0]:
raise GridErro(
'Invalid permittivity - must be scalar or vector of length equalling number of grids'
)
# Translating polygon by its center
polygon_translated = polygon + np.tile(center[self.planar_dir],
(polygon.shape[0], 1))
self.list_polygons.append((polygon_translated, eps))
# Adding the z-coordinates of the z-coordinates of the added layers
self.list_z.append([
center[self.ext_dir] - 0.5 * thickness,
center[self.ext_dir] + 0.5 * thickness
])
def draw_cuboid(self, center: np.ndarray, extent: np.ndarray, eps: float or
List[float]):
"""
Draw a cuboid with permittivity epsilon
"""
center = np.array(center)
extent = np.array(extent)
# Validating input parameters
if center.ndim != 1 or center.size != 3:
raise GridError('Invalid center coordinate')
if extent.ndim != 1 or extent.size != 3:
raise GridError('Invalid cuboid lengths')
if is_scalar(eps):
eps = np.ones(self.shifts.shape[0]) * eps
if eps.ndim != 1 or eps.size != self.shifts.shape[0]:
raise GridError(
'Invalid permittivity - must be scalar or vector of length equalling number of grids'
)
# Calculating the polygon corresponding to the drawn cuboid
polygon = 0.5 * np.array(
[[-extent[self.planar_dir[0]], extent[self.planar_dir[1]]],
[extent[self.planar_dir[0]], extent[self.planar_dir[1]]],
[extent[self.planar_dir[0]], -extent[self.planar_dir[1]]],
[-extent[self.planar_dir[0]], -extent[self.planar_dir[1]]]],
dtype=float)
thickness = extent[self.ext_dir]
# Drawing polygon
self.draw_polygon(center, polygon, thickness, eps)
def draw_cylinder(self, center: np.ndarray, radius: float, thickness: float,
num_points: int, eps: float or np.ndarray):
"""
Draw a cylinder with permittivity epsilon. By default, the axis of the cylinder
is assumed to be along the extrusion direction
"""
center = np.array(center)
# Validating input parameters
if center.ndim != 1 or center.size != 3:
raise GridError('Invalid center coordinate')
if is_scalar(eps):
eps = np.ones(self.shifts.shape[0]) * eps
if eps.ndim != 1 or eps.size != self.shifts.shape[0]:
raise GridError(
'Invalid permittvity - must be scalar or vector of length equalling number of grids'
)
if not is_scalar(thickness):
raise GridError('Invalid thickness')
if not is_scalar(num_points):
raise GridError('Invalid number of points on the cylinder')
# Approximating the drawn cylinder with a polygon with number of vertices = num_points
theta = np.linspace(0, 2.0 * np.pi, num_points)
x = radius * np.sin(theta)
y = radius * np.cos(theta)
polygon = np.vstack((x, y)).T
# Drawing polygon
self.draw_polygon(center, polygon, thickness, eps)
def draw_slab(self, dir_slab: Direction or float, center: float,
thickness: float, eps: float or np.ndarray):
"""
Draw a slab
"""
# Validating input arguments
if isinstance(dir_slab, Direction):
dir_slab = dir_slab.value
elif not is_scalar(dir_slab):
raise GridError('Invalid slab direction')
elif not dir_slab in range(3):
raise GridError('Invalid slab direction')
if not is_scalar(center):
raise GridError('Invalid slab center')
if is_scalar(eps):
eps = np.ones(self.shifts.shape[0]) * eps
if eps.ndim != 1 or eps.size != self.shifts.shape[0]:
raise GridError(
'Invalid permittivity - must be a scalar or vector of length equalling number of grids'
)
dir_slab_par = np.delete(range(3), dir_slab)
cuboid_cen = np.array(
[self.center[a] if a != dir_slab else center for a in range(3)])
cuboid_extent = np.array([2*np.abs(self.exyz[a][-1]-self.exyz[a][0]) if a !=dir_slab \
else thickness for a in range(3)])
self.draw_cuboid(cuboid_cen, cuboid_extent, eps)
def fill_cuboid(self, fill_dir: Direction, fill_pol: int,
surf_center: np.ndarray, surf_extent: np.ndarray,
eps: float or np.ndarray):
'''
INPUTS:
1. surf_extent - array of size 2 corresponding to the extent of the surface. If the fill direction
is x, then the two elements correspond to y,z, if it is y then x,z and if it is z then x,y
'''
surf_center = np.array(surf_center)
surf_extent = np.array(surf_extent)
# Validating input arguments
if isinstance(fill_dir, Direction):
fill_dir = fill_dir.value
elif not is_scalar(fill_dir):
raise GridError('Invalid slab direction')
elif not dir_slab in range(3):
raise GridError('Invalid slab direction')
if not is_scalar(fill_pol):
raise GridError('Invalid polarity')
if not fill_pol in [-1, 1]:
raise GridError('Invalid polarity')
if surf_center.ndim != 1 or surf_center.size != 3:
raise GridError('Invalid surface center')
if surf_extent.ndim != 1 or surf_extent.size != 2:
raise GridError('Invalid surface extent')
edge_lim = self.exyz[fill_dir][0] if fill_pol == -1 else self.exyz[
fill_dir][-1]
cuboid_extent = np.insert(surf_extent, fill_dir,
2 * np.abs(edge_lim - surf_center[fill_dir]))
cuboid_center = np.array([surf_center[a] if a != fill_dir else \
(surf_center[a]+0.5*fill_pol*cuboid_extent[a]) for a in range(3)])
self.draw_cuboid(cuboid_center, cuboid_extent, eps)
def fill_slab(self, fill_dir: Direction, fill_pol: int, surf_center: float,
eps: float or np.ndarray):
# Validating input arguments
if isinstance(fill_dir, Direction):
fill_dir = fill_dir.value
elif not is_scalar(fill_dir):
raise GridError('Invalid slab direction')
elif not dir_slab in range(3):
raise GridError('Invalid slab direction')
if not is_scalar(fill_pol):
raise GridError('Invalid polarity')
if not fill_pol in [-1, 1]:
raise GridError('Invalid polarity')
if not is_scalar(surf_center):
raise GridError('Invalid surface center')
edge_lim = self.exyz[fill_dir][0] if fill_pol == -1 else self.exyz[
fill_dir][-1]
slab_thickness = 2 * np.abs(edge_lim - surf_center)
slab_center = surf_center + 0.5 * fill_pol * slab_thickness
self.draw_slab(fill_dir, slab_center, slab_thickness, eps)
def compute_layers(self):
"""
Function to break the structure into different layers
OUTPUT: Takes the set of polygons, which may be drawn at displaced z coordinates
and breaks them into layers which can then be seperately be rendered
"""
# Calculating the layer coordinates
self.layer_z = np.sort(np.unique(np.array(self.list_z).flatten('F')))
self.layer_polygons = [[] for i in range(self.layer_z.size - 1)]
# Assigning polynomials into layers
for i in range(len(self.list_polygons)):
ind_bottom = np.searchsorted(self.layer_z, self.list_z[i][0])
ind_top = np.searchsorted(self.layer_z, self.list_z[i][1])
for k in range(ind_bottom, ind_top):
self.layer_polygons[k].append(self.list_polygons[i])
def remove_intersection(self):
"""
Function to remove polygon intersections
We assume that the material drawn at the end is the desired material
OUTPUT: Converts the set of objects specified by the user into another
set of objects which do NOT intersect with each other
"""
def check_bounding_box(polygon_1, polygon_2):
'''
Helper function to perform a simple check if the bounding box of
polygon_1 and polygon_2 do not intersect
This is mainly to avoid computing intersections if the two polygons
are very far from each other, in order to speed up the reduction process
'''
r1_max = np.max(polygon_1, axis=0)
r2_max = np.max(polygon_2, axis=0)
r1_min = np.min(polygon_1, axis=0)
r2_min = np.min(polygon_2, axis=0)
if r1_max[0] < r2_min[0] or r2_max[0] < r2_min[0]:
return False
elif r1_max[1] < r2_min[1] or r2_max[1] < r2_min[1]:
return False
else:
return True
def compute_intersection(polygon_1, polygon_2):
'''
Wrapper function around the gdspy module to take as input
two polygons and return polygon_1-polygon_2
Explicit NOT operation is only performed if the bounding boxes
of the two polygons do not intersect.
'''
if check_bounding_box(polygon_1, polygon_2):
gds_poly1 = gdspy.Polygon(polygon_1, 0)
gds_poly2 = gdspy.Polygon(polygon_2, 0)
gds_poly = gdspy.fast_boolean(
gds_poly1, gds_poly2, 'not', layer=1)
if gds_poly is None:
return []
else:
return gds_poly.polygons
else:
return [polygon_1]
num_layers = len(self.layer_polygons)
self.reduced_layer_polygons = []
for layer_i_polygons in self.layer_polygons:
# In each layer we remove the polygons added later from the
# polygons added earlier
if layer_i_polygons:
red_layer_i_polygons = [layer_i_polygons[0]]
num_polygons = len(layer_i_polygons)
for n in range(1, num_polygons):
temp_layer_i_polygons = []
for red_layer_i_polygon in red_layer_i_polygons:
polygon_inter = compute_intersection(
red_layer_i_polygon[0], layer_i_polygons[n][0])
if polygon_inter:
polygon_inter_eps = [(polygon,
red_layer_i_polygon[1])
for polygon in polygon_inter]
temp_layer_i_polygons = temp_layer_i_polygons + polygon_inter_eps
temp_layer_i_polygons.append(layer_i_polygons[n])
red_layer_i_polygons = copy.deepcopy(temp_layer_i_polygons)
else:
red_layer_i_polygons = []
self.reduced_layer_polygons.append(
copy.deepcopy(red_layer_i_polygons))
def render_polygon(self, polygon: np.ndarray, z_extent: np.ndarray,
eps: np.ndarray):
"""
Function to render grid with contribution due to polygon 'polygon'.
INPUTS:
polygon - list of (x,y) vertices of the polygon being rendered
z_extent - extent (z_1, z_2) along the extrusion direction of the polygon being rendered
eps - permittivity of the polygon being rendered
OUTPUTS:
updates self.grids with the properly aliased polygon permittivity
reduces self.frac_bg by the fraction of space occupied by the polygon 'polygon'
"""
# Validating input arguments
if polygon.ndim != 2 or polygon.shape[1] != 2:
raise GridError(
'Invalid format for specifying polygon - must be a Nx2 array')
if polygon.shape[0] <= 2:
raise GridError(
'Malformed Polygon - must contain more than two points')
if z_extent.ndim != 1 or z_extent.size != 2:
raise GridError(
'Invalid format for specifying z-extent - must be a vector of length 2'
)
def to_3D(vector: List or np.ndarray,
z: float = 0.5 * (z_extent[0] + z_extent[1])):
return np.insert(vector, self.ext_dir, z)
def get_zi(z: float, which_shifts: float):
pos_3D = to_3D([0, 0], z)
grid_coords = self.pos2ind(
pos_3D,
which_shifts,
which_grid=GridType.PRIM,
check_bounds=False)
return grid_coords[self.ext_dir]
# Calculating slice affected by polygon
pbd_min = polygon.min(axis=0)
pbd_max = polygon.max(axis=0)
z_min = z_extent.min()
z_max = z_extent.max()
for n, grid in enumerate(self.grids):
'''
Computing the in-plane pixel values
'''
# Shape of the complementary grid
comp_shape = np.array([self.shape[a]+2 if self.comp_shifts[n][a] == 0 else self.shape[a]+1 \
for a in range(3)])
# Calculating the indices of the maximum and minimum polygon coordinate
ind_xy_min = self.pos2ind(
to_3D(pbd_min),
which_shifts=n,
which_grid=GridType.PRIM,
round_ind=True,
check_bounds=False)
ind_xy_max = self.pos2ind(
to_3D(pbd_max),
which_shifts=n,
which_grid=GridType.PRIM,
round_ind=True,
check_bounds=False)
# Calculating the points on the grid that are affected by the drawn polygons
corner_xy_min = ind_xy_min[self.planar_dir].astype(int)
corner_xy_max = np.minimum(
ind_xy_max[self.planar_dir] + 1,
self.shape[self.planar_dir] - 1).astype(int)
# Calculating the points of the complementary grid that need to be passed
comp_corner_xy_min = corner_xy_min.astype(int)
comp_corner_xy_max = np.minimum(
corner_xy_max + 1, comp_shape[self.planar_dir] - 1).astype(int)
# Setting up slices
edge_slice_xy = [
np.s_[j:f + 1]
for j, f in zip(comp_corner_xy_min, comp_corner_xy_max)
]
# Calling the rastering function
aa_x, aa_y = (self.shifted_exyz(which_shifts = n, which_grid = GridType.COMP)[a][s] \
for a,s in zip(self.planar_dir, edge_slice_xy))
w_xy = raster_2D(polygon.T, aa_x, aa_y)
'''
Computing the pixel value along the surface normal
'''
# Calculating the indices of the start and stop point
ind_z_min = get_zi(z_min, which_shifts=n)
ind_z_max = get_zi(z_max, which_shifts=n)
corner_z_min = ind_z_min.astype(int)
corner_z_max = np.minimum(ind_z_max + 1,
self.shape[self.ext_dir] - 1).astype(int)
comp_corner_z_min = corner_z_min.astype(int)
comp_corner_z_max = np.minimum(
corner_z_max + 1, comp_shape[self.ext_dir] - 1).astype(int)
edge_slice_z = np.s_[comp_corner_z_min:comp_corner_z_max + 1]
aa_z = self.shifted_exyz(
which_shifts=n,
which_grid=GridType.COMP)[self.ext_dir][edge_slice_z]
w_z = raster_1D(z_extent, aa_z)
# Combining the extrusion and planar area calculation
w = (w_xy[:, :, np.newaxis] * w_z).transpose(
np.insert([0, 1], self.ext_dir, (2,)))
# Adding to the grid
center_slice = [None for a in range(3)]
center_slice[self.ext_dir] = np.s_[corner_z_min:corner_z_max + 1]
for i in range(2):
center_slice[self.planar_dir[i]] = np.s_[corner_xy_min[i]:
corner_xy_max[i] + 1]
# Updating permittivity
self.grids[n][tuple(center_slice)] += eps[n] * w
self.frac_bg[n][tuple(center_slice)] -= w
def clear(self):
'''
Function to clear the existing polygons in the grid object
Following the clear command, new structures can be added to the grid
object and subsequently rendered
'''
self.list_polygons = [
] # List of polygons corresponding to each block specified by user
self.layer_polygons = [
] # List of polygons after bifurcating extrusion direction into layers
self.reduced_layer_polygons = [
] # List of polygons after removing intersections
self.list_z = [
] # List of z coordinates of the different blocks specified by user
self.layer_z = [
] # List of z coordinates of the different distinct layers
def render(self, disable_intersection: bool = False):
"""
Function to render the added polygons to the specified grid
INPUTS:
1. disable_intersection - set this flag to True if you are
sure that the polygons that you draw do not intersect with each other.
The intersection removal process will not be performed, and direct rastering of
the polygons onto the grid will be performed. Note that one polygon completely
being inside the other counts as an intersection. This might speed up the
drawing functions if you are drawing a large number of polygons (for e.g. in a
photonic crystal)
OUTPUTS: Renders all the drawn polygons onto the grid
There are three steps to rendering the grid
1. Computing the layers along the z-direction
2. Simplify polygon intersection at each layer (done only if disable_intersection is False)
3. Use the rastering functions to compute areas and add back background permittivities
NOTE: The rendering function CAN be called more than once - if you draw a bunch of objects, render,
visualize and do some operations on the resulting grid, and want to edit the grid by
adding more objects, you can continue to add the polygons on the same object and
render again.
"""
# Begin by setting the grid.grids to 0 and frac_bg to 1 -
# This handles the case if it is not the first call to the render function
self.frac_bg = np.ones_like(self.grids_bg)
self.grids = np.zeros_like(self.grids_bg)
# Computing layers involved in the problem
self.compute_layers()
# Removing intersections involved in the problem
if disable_intersection:
self.reduced_layer_polygons = self.layer_polygons
else:
self.remove_intersection()
# Now all the layers and polygons should not intersect with each other and can be aliased on the grids
for i, polygons in enumerate(self.reduced_layer_polygons):
for j, polygon in enumerate(self.reduced_layer_polygons[i]):
# Iterating over each layer and rendering each polygon
if polygon is not None:
self.render_polygon(
polygon[0],
z_extent=np.array(
[self.layer_z[i], self.layer_z[i + 1]]),
eps=polygon[1])
# Finally, adding the background permittivity
for i in range(0, self.shifts.shape[0]):
self.grids[i] = self.grids[i] + self.grids_bg[i] * self.frac_bg[i]
def get_slice(self,
surface_normal: Direction or int,
center: float,
which_shifts: int = 0,
sample_period: int = 1) -> np.ndarray:
"""
Retrieve a slice of a grid.
Interpolates if given a position between two planes.
:param surface_normal: Axis normal to the plane we're displaying. Can be a Direction or
integer in range(3)
:param center: Scalar specifying position along surface_normal axis.
:param which_shifts: Which grid to display. Default is the first grid (0).
:param sample_period: Period for down-sampling the image. Default 1 (disabled)
:return Array containing the portion of the grid.
"""
if not is_scalar(center) and np.isreal(center):
raise GridError('center must be a real scalar')
sp = round(sample_period)
if sp <= 0:
raise GridError('sample_period must be positive')
if not is_scalar(which_shifts) or which_shifts < 0:
raise GridError('Invalid which_shifts')
# Turn surface_normal into its integer representation
if isinstance(surface_normal, Direction):
surface_normal = surface_normal.value
if surface_normal not in range(3):
raise GridError('Invalid surface_normal direction')
surface = np.delete(range(3), surface_normal)
# Extract indices and weights of planes
center3 = np.insert([0, 0], surface_normal, (center,))
center_index = self.pos2ind(
center3, which_shifts, round_ind=False,
check_bounds=False)[surface_normal]
centers = np.unique([floor(center_index),
ceil(center_index)]).astype(int)
if len(centers) == 2:
fpart = center_index - floor(center_index)
w = [1 - fpart, fpart] # longer distance -> less weight
else:
w = [1]
c_min, c_max = (self.xyz[surface_normal][i] for i in [0, -1])
if center < c_min or center > c_max:
raise GridError(
'Coordinate of selected plane must be within simulation domain')
# Extract grid values from planes above and below visualized slice
sliced_grid = zeros(self.shape[surface])
for ci, weight in zip(centers, w):
s = tuple(
ci if a == surface_normal else np.s_[::sp] for a in range(3))
sliced_grid += weight * self.grids[which_shifts][tuple(s)]
# Remove extra dimensions
sliced_grid = np.squeeze(sliced_grid)
return sliced_grid
def visualize_slice(self,
surface_normal: Direction or int,
center: float,
which_shifts: int = 0,
sample_period: int = 1,
finalize: bool = True):
"""
Visualize a slice of a grid.
Interpolates if given a position between two planes.
:param surface_normal: Axis normal to the plane we're displaying. Can be a Direction or
integer in range(3)
:param center: Scalar specifying position along surface_normal axis.
:param which_shifts: Which grid to display. Default is the first grid (0).
:param sample_period: Period for down-sampling the image. Default 1 (disabled)
:param finalize: Whether to call pyplot.show() after constructing the plot. Default True
"""
from matplotlib import pyplot
# Set surface normal to its integer value
if isinstance(surface_normal, Direction):
surface_normal = surface_normal.value
grid_slice = self.get_slice(
surface_normal=surface_normal,
center=center,
which_shifts=which_shifts,
sample_period=sample_period)
surface = np.delete(range(3), surface_normal)
x, y = (self.shifted_exyz(which_shifts)[a] for a in surface)
xmesh, ymesh = np.meshgrid(x, y, indexing='ij')
x_label, y_label = ('xyz' [a] for a in surface)
if (len(grid_slice.shape) == 1):
grid_slice = np.transpose(np.array([grid_slice]))
pyplot.figure()
pyplot.pcolormesh(xmesh, ymesh, grid_slice)
pyplot.colorbar()
pyplot.gca().set_aspect('equal', adjustable='box')
pyplot.xlabel(x_label)
pyplot.ylabel(y_label)
if finalize:
pyplot.show()
def visualize_isosurface(self,
level: float = None,
which_shifts: int = 0,
sample_period: int = 1,
show_edges: bool = True,
finalize: bool = True):
"""
Draw an isosurface plot of the device.
:param level: Value at which to find isosurface. Default (None) uses mean value in grid.
:param which_shifts: Which grid to display. Default is the first grid (0).
:param sample_period: Period for down-sampling the image. Default 1 (disabled)
:param show_edges: Whether to draw triangle edges. Default True
:param finalize: Whether to call pyplot.show() after constructing the plot. Default True
"""
from matplotlib import pyplot
import skimage.measure
# Claims to be unused, but needed for subplot(projection='3d')
from mpl_toolkits.mplot3d import Axes3D
# Get data from self.grids
grid = self.grids[which_shifts][::sample_period, ::sample_period, ::
sample_period]
if level is None:
level = grid.mean()
# Find isosurface with marching cubes
verts, faces = skimage.measure.marching_cubes(grid, level)
# Convert vertices from index to position
pos_verts = np.array([
self.ind2pos(verts[i, :], which_shifts, round_ind=False)
for i in range(verts.shape[0])
],
dtype=float)
xs, ys, zs = (pos_verts[:, a] for a in range(3))
# Draw the plot
fig = pyplot.figure()
ax = fig.add_subplot(111, projection='3d')
if show_edges:
ax.plot_trisurf(xs, ys, faces, zs)
else:
ax.plot_trisurf(xs, ys, faces, zs, edgecolor='none')
# Add a fake plot of a cube to force the axes to be equal lengths
max_range = np.array(
[xs.max() - xs.min(),
ys.max() - ys.min(),
zs.max() - zs.min()],
dtype=float).max()
mg = np.mgrid[-1:2:2, -1:2:2, -1:2:2]
xbs = 0.5 * max_range * mg[0].flatten() + 0.5 * (xs.max() + xs.min())
ybs = 0.5 * max_range * mg[1].flatten() + 0.5 * (ys.max() + ys.min())
zbs = 0.5 * max_range * mg[2].flatten() + 0.5 * (zs.max() + zs.min())
# Comment or uncomment following both lines to test the fake bounding box:
for xb, yb, zb in zip(xbs, ybs, zbs):
ax.plot([xb], [yb], [zb], 'w')
if finalize:
pyplot.show()
|
stanfordnqp/spins-b
|
spins/gridlock/grid.py
|
Python
|
gpl-3.0
| 47,797
|
from os.path import isfile as os_isfile
from lzma import compress as lzma_compress
from lzma import decompress as lzma_decompress
from json import loads as json_loads
from json import dumps as json_dumps
import codecs
import getpass
from Crypto.Cipher import AES
from Crypto.Hash import SHA256
from Crypto import Random
def encrypt(key: bytes, plaintext: str) -> bytes:
""" encrypt(key, plaintext) -> Encrypts plaintext using key.
"""
block_size = AES.block_size
valid_key = SHA256.new(key.encode()).digest()
iv = Random.new().read(AES.block_size)
# Pads '\x00' (null) bytes to the end of the plaintext to make it a
# multiple of the block_size.
pad_len = block_size - (len(plaintext) % block_size)
padded_plaintext = plaintext.encode() + b'\x00' * pad_len
encrypt_obj = AES.new(valid_key, AES.MODE_CBC, iv)
# Put the iv at the start of the ciphertext so when it needs to be
# decrypted the same iv can be used.
ciphertext = iv + encrypt_obj.encrypt(padded_plaintext)
return ciphertext
def decrypt(key: bytes, ciphertext: bytes) -> str:
""" decrypt(key, ciphertext) -> Decrypts the ciphertext using the key.
"""
block_size = AES.block_size
valid_key = SHA256.new(key.encode()).digest()
# iv is the first block_size of data at the start of ciphertext.
iv = ciphertext[:block_size]
real_ciphertext = ciphertext[block_size:]
decrypt_obj = AES.new(valid_key, AES.MODE_CBC, iv)
padded_plaintext = decrypt_obj.decrypt(real_ciphertext)
try:
# Remove the padding from the plaintext.
plaintext = padded_plaintext.strip(b'\x00').decode()
except UnicodeDecodeError:
print("There was an error. Maybe the wrong password was given.")
return ''
return plaintext
def list_to_dict(key_val_list: list) -> dict:
""" Turns a ['key=val'] list into a dictionary.
"""
# Return an empty dictionary if key_val_list is empty.
if not key_val_list: return {}
# Split the list values at the '=' into a tuple.
split_list = [i.split('=') for i in key_val_list]
return dict(split_list)
def bytes_to_str(bytes_obj: bytes) -> str:
""" Encodes the bytes object using base64, and returns that string value.
"""
return codecs.encode(bytes_obj, 'base64').decode()
def str_to_bytes(str_obj: str) -> bytes:
""" Decodes a base64 string into a bytes object.
"""
return codecs.decode(str_obj.encode(), 'base64')
def write_file(filename: str, accounts_dict: dict):
""" Compresses and writes the accounts_dict to the file at filename.
"""
json_data = json_dumps(accounts_dict)
lzma_data = lzma_compress(json_data.encode())
with open(filename, 'wb') as pass_file:
pass_file.write(lzma_data)
def crypt_to_dict(crypt_data: str) -> dict:
""" Decrypts crypt_data and returns the json.loads dictionary.
"""
# Get the password to decrypt the data.
password = getpass.getpass('Password to use for Decryption: ')
# Convert the data to a bytes object and decrypt it.
json_data = decrypt(password, str_to_bytes(crypt_data))
# Load the decrypted data with json and return the resulting
# dictionary.
try:
return json_loads(json_data)
except:
return {}
def dict_to_crypt(data_dict: dict) -> str:
""" Returns the encrypted json dump of data_dict.
"""
# Dump the data_dict into json data.
json_data = json_dumps(data_dict)
# Get the password to encrypt the data.
password = getpass.getpass('Password to use for Encryption: ')
# Return the string encoded encrypted json dump.
return bytes_to_str(encrypt(password, json_data))
def dict_to_str(data_dict: dict) -> str:
""" Returns a formated string of the (key, value) items in the supplied
dictionary.
"""
str_list = ['\n']
max_key_len = max(len(key) for key in data_dict.keys())
for key, value in data_dict.items():
if key == 'Account Name':
str_list.insert(1, "{1:<{0}} -> {2}".format(max_key_len,
key, value))
continue
# Format the info in a list as follows:
# key (right align by space max_key_len): value
str_list.append("{1:<{0}} -> {2}".format(max_key_len,
key.lower().capitalize(),
value))
return '\n'.join(str_list)
def main(args: dict) -> int:
""" Read the password file, decrypt it and print the requested info.
"""
filename = args.pop('filename')
account = args.pop('account')
remove_account = args.pop('remove_account')
if account:
# Get the sha256 hash of the account name.
hashed_account = bytes_to_str(SHA256.new(account.encode()).digest())
# Create the information dictionary from the info list supplied
# by the user.
info_dict = list_to_dict(args.pop('info_list'))
if info_dict:
# Put the non hashed account name in the info dict so it is
# not lost.
info_dict['Account Name'] = account
# Get the secret information.
for key, value in info_dict.items():
if value == '{secret}':
info_dict[key] = getpass.getpass('Enter the %s: ' % key)
else:
# No account name was given.
hashed_account = ''
password = ''
# Create the file if it doesn't exist.
if not os_isfile(filename):
open_mode = 'w+b'
else:
open_mode = 'rb'
with open(filename, open_mode) as pass_file:
# Read all the data from the file.
lzma_data = pass_file.read()
# Get the json data out of the file data or an empty json dict of
# the file was empty.
if lzma_data:
json_data = lzma_decompress(lzma_data).decode()
else:
json_data = '{}'
# Load the json data into a dictionary.
accounts_dict = json_loads(json_data)
if not hashed_account:
# List all accounts if none where given, but list was requested.
if args.get('list_account_info', False):
account_str = ''
for account_data in accounts_dict.values():
account_dict = crypt_to_dict(account_data)
if account_dict:
account_str += '\n' + dict_to_str(account_dict)
print(account_str)
return 0
else:
# Pop the requested account out of the dictionary, so it can be
# modified or removed or just printed to stdout.
account_data = accounts_dict.pop(hashed_account, '')
# Don't do anything with the account_data if it is to be
# removed.
if remove_account:
write_file(filename, accounts_dict)
return 0
# If there was account data put the decrypted dictionary in
# account_dict otherwise put an empty dictionary.
account_dict = crypt_to_dict(account_data) if account_data else {}
# Update the account info if new data was supplied.
if info_dict:
account_dict.update(info_dict)
# Encrypt the account_dict.
account_data = dict_to_crypt(account_dict)
# Print the account info.
if args.get('list_account_info', False) and account_dict:
print(dict_to_str(account_dict))
# Put the accounts data back into the dictionary.
accounts_dict[hashed_account] = account_data
# Write accounts_dict to the password file.
write_file(filename, accounts_dict)
return 0
if __name__ == '__main__':
from argparse import ArgumentParser
parser = ArgumentParser(description="Password manager")
parser.add_argument('-i', '--info', dest='info_list', action='append',
help='Set account info. Use {secret} to input \
secrets e.g. (Question={secret})')
parser.add_argument('-r', '--remove', dest='remove_account',
action='store_true', default=False,
help='Remove account')
parser.add_argument('-f', '--filename', dest='filename', action='store',
help='Account details file.')
parser.add_argument('-l', '--list', dest='list_account_info',
action='store_true',
help='Print out the account information.')
parser.add_argument('-a', '--account', dest='account', action='store',
help='The account to operate on')
args = parser.parse_args()
main(args.__dict__)
|
zepto/lessinfo
|
old/crypttest.py
|
Python
|
gpl-3.0
| 8,690
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'Paweł Krawczyk'
import hashlib
import hmac
from flask import make_response, redirect
from settings import CSRF_KEY, DEVELOPER_MACHINE
__author__ = 'Paweł Krawczyk'
def login_response(owner_id):
"""
Perform actual login action which is currently limited to setting owner_id cookie
and XSRF-TOKEN cookie.
:param owner_id:
:return:
"""
token = hmac.new(bytes(CSRF_KEY, 'ascii'), bytes(owner_id, 'ascii'), hashlib.sha512).hexdigest()
resp = make_response(redirect('/static/#/analysis'))
resp.set_cookie('XSRF-TOKEN', token, secure=(not DEVELOPER_MACHINE))
resp.set_cookie('owner_id', owner_id, secure=(not DEVELOPER_MACHINE))
print('login_response setting token cookie {}'.format(token))
return resp
def verify_csrf_token(req):
"""
Utility function to verify CSRF token on API calls. Uses secret configured in .ini file
and the owner_id from request.
:return: True if token correct, False if incorrect
"""
request_token = req.headers.get('X-XSRF-TOKEN')
owner_id = req.cookies.get('owner_id')
print('verify_csrf_token owner_id={} request_token={}'.format(owner_id, request_token))
if not (owner_id or request_token):
print('verify_csrf_token missing owner_id or request token')
return False
expected_token = hmac.new(bytes(CSRF_KEY, 'ascii'), bytes(owner_id, 'ascii'), hashlib.sha512).hexdigest()
if hmac.compare_digest(request_token, expected_token):
return True
print('verify_csrf_token token mismatch expected_token={}'.format(expected_token))
return False
|
kravietz/cspbuilder
|
apihelpers/auth.py
|
Python
|
gpl-3.0
| 1,656
|
# _*_ coding: utf-8 _*_
__author__ = 'bobby'
__date__ = '2017/6/8 10:58'
from django import forms
from .models import Comment
class CommentForm(forms.ModelForm):
class Meta:
model = Comment
fields = ['name', 'email', 'url', 'text']
|
TwocatWhelp/lizhen
|
comments/forms.py
|
Python
|
gpl-3.0
| 255
|
from collections import OrderedDict as OD
from copy import deepcopy
from math import ceil
from .callbacks import spi_efc_cmd_cb
from .ADF4350 import columns
from ..regs import RegsData, regs_cb
def Fout_src_cb(data, val):
REFin = float(data.get_value('REFin'))
B = float(data.get_value('B'))
R = float(data.get_value('R'))
return '%.3f' % (REFin * B / R)
hex_data = '''
R1|000015|R COUNTER LATCH
R0|000020|CONTROL LATCH
R2|000802|N COUNTER LATCH
'''
bin_data = '''
R1|0|Control bits|1:1|
R1|2|R-counter||
R1|16|Anti-backlash pulse width||
R1|18|Lock-detect precision||
R1|19|Test-mode bit||
R1|20|Band select clock||
R1|22|Reserved|1|
R0|0|Control bits|1:0|
R0|2|Core power level||
R0|4|Counter reset||
R0|5|Muxout control||001 - LD'
R0|8|Phase detector polarity||
R0|9|CP three-state||
R0|10|CP gain||
R0|11|Mute-til-ld||
R0|12|Output power level||
R0|14|Current setting 1||
R0|17|Current setting 2||
R0|20|POWER DOWN 1||
R0|21|POWER DOWN 2||
R0|22|Reserved|1|
R2|0|Control bits|1:2|
R2|2|Reserved|1|
R2|8|B-counter||
R2|21|CP-gain||
R2|22|Reserved|1|
'''
def get_menu(dev):
return OD([('Registers', regs_cb)])
def get_regs(dev):
data = RegsData(sz=24)
data.add_page('calc0')
data.add('label1', label='Fvco = REFin * B / R')
data.add_page('calc1')
data.add('Fout', wdgt='entry', state='readonly', src=Fout_src_cb, msg='Fout')
data.add('REFin', wdgt='entry', state='readonly', src=lambda d,v: d.dev_src('refin'), msg='REFin')
spn = RegsData.spn
data.add('B', wdgt='spin', value=spn(3, 8191), src=lambda d,v:d.bits_src('R2', 8, 20, v, minimum=3), msg='B')
data.add('R', wdgt='spin', value=spn(1, 16383), src=lambda d,v:d.bits_src('R1', 2, 15, v, minimum=1), msg='R')
cmd_cb = lambda dev, cmd, val=None: spi_efc_cmd_cb(dev, cmd, val, ncpha='1', cpol='0')
data.add_hex_data(hex_data, cmd_cb)
data.add_bin_data(bin_data)
return data
|
ivanovev/sg
|
gui/ADF4360_8.py
|
Python
|
gpl-3.0
| 1,908
|
from ..documents.constants import pubkey_regex
from ..documents.constants import hash_regex
from pypeg2 import *
class Pubkey(str):
regex = re.compile(pubkey_regex)
class Hash(str):
regex = re.compile(hash_regex)
class Int(str):
regex = re.compile(r"[0-9]+")
class SIG(str):
grammar = "SIG(", attr('pubkey', Pubkey), ")"
@classmethod
def token(cls, pubkey):
sig = cls()
sig.pubkey = pubkey
return sig
def compose(self, parser, grammar=None, attr_of=None):
return "SIG({0})".format(self.pubkey)
class CSV(str):
grammar = "CSV(", attr('time', Int), ")"
@classmethod
def token(cls, time):
csv = cls()
csv.time = str(time)
return csv
def compose(self, parser, grammar=None, attr_of=None):
return "CSV({0})".format(self.time)
class CLTV(str):
grammar = "CLTV(", attr('timestamp', Int), ")"
@classmethod
def token(cls, timestamp):
cltv = cls()
cltv.timestamp = str(timestamp)
return cltv
def compose(self, parser, grammar=None, attr_of=None):
return "CLTV({0})".format(self.timestamp)
class XHX(str):
grammar = "XHX(", attr('sha_hash', Hash), ")"
@classmethod
def token(cls, sha_hash):
xhx = cls()
xhx.sha_hash = sha_hash
return xhx
def compose(self, parser, grammar=None, attr_of=None):
return "XHX({0})".format(self.sha_hash)
class Operator(Keyword):
grammar = Enum(K("&&"), K("||"), K("AND"), K("OR"))
regex = re.compile(r"[&&|\|\||\w]+")
@classmethod
def token(cls, keyword):
op = cls(keyword)
return op
def compose(self, parser, grammar=None, attr_of=None):
return "{0}".format(self.name)
class Condition(str):
@classmethod
def token(cls, left, op=None, right=None):
condition = cls()
condition.left = left
if op:
condition.op = op
if right:
condition.right = right
return condition
def compose(self, parser, grammar=None, attr_of=None):
if type(self.left) is Condition:
left = "({0})".format(parser.compose(self.left, grammar=grammar, attr_of=attr_of))
else:
left = parser.compose(self.left, grammar=grammar, attr_of=attr_of)
if getattr(self, 'op', None):
if type(self.right) is Condition:
right = "({0})".format(parser.compose(self.right, grammar=grammar, attr_of=attr_of))
else:
right = parser.compose(self.right, grammar=grammar, attr_of=attr_of)
op = parser.compose(self.op, grammar=grammar, attr_of=attr_of)
result = "{0} {1} {2}".format(left, op, right)
else:
result = left
return result
Condition.grammar = contiguous(attr('left', [SIG, XHX, CSV, CLTV, ('(', Condition, ')')]),
maybe_some(whitespace, attr('op', Operator), whitespace,
attr('right', [SIG, XHX, CSV, CLTV, ('(', Condition, ')')])))
|
vtexier/duniter-python-api
|
duniterpy/grammars/output.py
|
Python
|
gpl-3.0
| 3,068
|
import numpy as np
from gpaw.utilities import erf
class DipoleCorrection:
"""Dipole-correcting wrapper around another PoissonSolver."""
def __init__(self, poissonsolver, direction):
"""Construct dipole correction object.
poissonsolver is a GPAW Poisson solver.
direction is 0, 1 or 2 and specifies x, y or z.
"""
self.corrector = DipoleCorrector(direction)
self.poissonsolver = poissonsolver
def get_stencil(self):
return self.poissonsolver.get_stencil()
def set_grid_descriptor(self, gd):
for c in range(3):
if c == self.corrector.c:
if gd.pbc_c[c]:
raise ValueError('System must be non-periodic along '
'dipole correction axis')
else:
pass
# XXX why was the below restriction deemed desirable?
#if not gd.pbc_c[c]:
# raise ValueError('System must be periodic along axes '
# 'perpendicular to dipole correction')
self.poissonsolver.set_grid_descriptor(gd)
self.description = (
self.poissonsolver.description +
'\nDipole correctaion along %s-axis' % 'xyz'[self.corrector.c])
def initialize(self):
self.poissonsolver.initialize()
def solve(self, phi, rho, **kwargs):
gd = self.poissonsolver.gd
drho, dphi = self.corrector.get_dipole_correction(gd, rho)
phi -= dphi
iters = self.poissonsolver.solve(phi, rho + drho, **kwargs)
phi += dphi
return iters
def estimate_memory(self, mem):
self.poissonsolver.estimate_memory(mem)
class DipoleCorrector:
def __init__(self, direction):
self.c = direction
def get_dipole_correction(self, gd, rhot_g):
"""Get dipole corrections to charge and potential.
Returns arrays drhot_g and dphit_g such that if rhot_g has the
potential phit_g, then rhot_g + drhot_g has the potential
phit_g + dphit_g, where dphit_g is an error function.
The error function is chosen so as to be largely constant at the
cell boundaries and beyond.
"""
# This implementation is not particularly economical memory-wise
c = self.c
# Right now the dipole correction must be along one coordinate
# axis and orthogonal to the two others. The two others need not
# be orthogonal to each other.
for c1 in range(3):
if c1 != c:
if np.vdot(gd.cell_cv[c], gd.cell_cv[c1]) > 1e-12:
raise ValueError('Dipole correction axis must be '
'orthogonal to the two other axes.')
moment = gd.calculate_dipole_moment(rhot_g)[c]
if abs(moment) < 1e-12:
return gd.zeros(), gd.zeros()
r_g = gd.get_grid_point_coordinates()[c]
cellsize = abs(gd.cell_cv[c, c])
sr_g = 2.0 / cellsize * r_g - 1.0 # sr ~ 'scaled r'
alpha = 12.0 # should perhaps be variable
drho_g = sr_g * np.exp(-alpha * sr_g**2)
moment2 = gd.calculate_dipole_moment(drho_g)[c]
factor = -moment / moment2
drho_g *= factor
phifactor = factor * (np.pi / alpha)**1.5 * cellsize**2 / 4.0
dphi_g = -phifactor * erf(sr_g * np.sqrt(alpha))
return drho_g, dphi_g
|
ajylee/gpaw-rtxs
|
gpaw/dipole_correction.py
|
Python
|
gpl-3.0
| 3,480
|
# -*- coding: utf-8 -*-
from __future__ import print_function
class Cube(object):
def __init__(self, Connection, APIOutput):
APIOutput = APIOutput.split(';')
self.Sid = Connection.Sid
self.Client = Connection.Client
self.getUrlResult = Connection.getUrlResult
self.getDatabaseUrlRequest = Connection.getDatabaseUrlRequest
self.ServerRoot = Connection.ServerRoot
self.getCube = Connection.getCube
self.__DataBaseID = Connection.getID()
self.__ID = APIOutput[0]
self.__Name = APIOutput[1][1:-1]
self.__NumDimensions = APIOutput[2]
self.__DimensionsIDList = APIOutput[3].split(',')
self.__DimensionsList = {}
self.__DimensionsListByName = {}
self.__NumCells = APIOutput[4]
self.__NumFilledCells = APIOutput[5]
self.__Status = APIOutput[6]
self.__Type = APIOutput[7]
self.__Token = APIOutput[8]
self.__Rules = {}
self.isSystemCube = True if ('#_' in self.__Name) else False
self.getDimensionByID = Connection._getFullLoadedDimension
self.LoadRules()
self.LoadCubeDimensions()
def getDataBaseID(self):
return self.__DataBaseID
def getID(self):
return self.__ID
def getName(self):
return self.__Name
def getNumDimensions(self):
return self.__NumDimensions
def getDimensionsList(self):
return self.__DimensionsList
def getDimensionsIDList(self):
return self.__DimensionsIDList
def getDimensionsNameList(self):
return [self.__DimensionsList[DimID].getName() for DimID in self.__DimensionsIDList]
def getDimensionByName(self, Name):
ID = self.__DimensionsListByName[Name]
return self.__DimensionsList[ID]
def DimensionExists(self, Name):
return Name in self.__DimensionsListByName
def getNumCells(self):
return self.__NumCells
def getNumFilledCells(self):
return self.__NumFilledCells
def getStatus(self):
return self.__Status
def getType(self):
return self.__Type
def getToken(self):
return self.__Token
def getAttrDimension(self):
if self.isSystemCube:
AttrDimName = self.getName() + '_'
return self.getDimensionByName(AttrDimName)
else:
return False
def getCubeUrlRequest(self, CMD, Param = {}):
Param['cube'] = self.getID()
return self.getDatabaseUrlRequest(CMD, Param)
def LoadRules(self):
CMD = 'cube/rules'
Url = self.getCubeUrlRequest(CMD)
r = self.Client.request('GET', Url)
for Row in r.data.decode('utf-8').split('\n')[:-1]:
ID, Definition, ExtID = Row.split(';')[:3]
self.__Rules[str(ID)] = Definition[1:-1]
def LoadCubeDimensions(self):
for ID in self.__DimensionsIDList:
self.__DimensionsList[ID] = self.getDimensionByID(ID)
self.__DimensionsListByName[self.getDimensionByID(ID).getName()] = ID
def getRules(self):
return self.__Rules
def getRuleID(self, Definition):
for ID, R in self.getRules().items():
if R == Definition:
return ID
def CreateRule(self, Definition):
CMD = 'rule/create'
Param = {'definition': Definition.replace('""', '"')}
Url = self.getCubeUrlRequest(CMD, Param)
r = self.Client.request('GET', Url)
return r.data.decode('utf-8')
def ParseRule(self, Definition):
CMD = 'rule/parse'
Param = {'definition': Definition.replace('""', '"')}
Url = self.getCubeUrlRequest(CMD, Param)
r = self.Client.request('GET', Url)
return r.data.decode('utf-8')
def Load(self):
CMD = 'cube/load'
Url = self.getCubeUrlRequest(CMD)
r = self.Client.request('GET', Url)
return r.data.decode('utf-8')
def Unload(self):
CMD = 'cube/unload'
Url = self.getCubeUrlRequest(CMD)
r = self.Client.request('GET', Url)
return r.data.decode('utf-8')
def Destroy(self):
CMD = 'cube/destroy'
Url = self.getCubeUrlRequest(CMD)
r = self.Client.request('GET', Url)
return r.data.decode('utf-8')
def Save(self):
CMD = 'cube/save'
Url = self.getCubeUrlRequest(CMD)
r = self.Client.request('GET', Url)
return r.data.decode('utf-8')
def getCellPath(self, Coordinates):
CellPath = []
if not type(Coordinates) == tuple:
Coordinates = (Coordinates, )
for i, Coord in enumerate(Coordinates):
ID = self.getDimensionsIDList()[i]
Dim = self.getDimensionByID(ID)
CellPath.append(str(Dim.getElementID(Coord)))
return ','.join(CellPath)
def getAreaPath(self, Coordinates):
ElementsID = []
AreaPath = []
for i, Elements in enumerate(Coordinates):
ID = self.getDimensionsIDList()[i]
Dim = self.getDimensionByID(ID)
ElementsID = []
if not type(Elements) == tuple:
if Elements == '*':
AreaPath.append(Elements)
else:
ElementsID = Dim.getElementID(Elements)
if not ElementsID == False:
AreaPath.append(str(ElementsID))
else:
return False
# AreaPath.append(str(Dim.getElementID(Elements)))
else:
for E in Elements:
IDElem = Dim.getElementID(E)
if not IDElem == False:
ElementsID.append(str(IDElem))
AreaPath.append(':'.join(ElementsID))
return ','.join(AreaPath)
def getValuePath(self, Values):
return ':'.join([str(Value) for Value in Values])
def Replace(self, Coordinate, Value, Splash = 0):
CMD = 'cell/replace'
Path = self.getCellPath(Coordinate)
Param = {'path': Path,
'value': str(Value),
'splash': Splash}
Url = self.getCubeUrlRequest(CMD, Param)
try:
r = self.Client.request('GET', Url)
except:
return False
self.Save()
return r.data.decode('utf-8')
def ReplaceBulk(self, Coordinates, Values, Splash = 0):
CMD = 'cell/replace_bulk'
Paths = self.getAreaPath(Coordinates)
Values = self.getValuePath(Values)
Param = {'paths': Paths,
'values': Values,
'splash': Splash}
Url = self.getCubeUrlRequest(CMD, Param)
try:
r = self.Client.request('GET', Url)
except:
return False
return r.data.decode('utf-8')
def Clear(self, Coordinates):
CMD = 'cube/clear'
Path = self.getAreaPath(Coordinates)
if Path == False:
return False
Param = {'area': Path}
Url = self.getCubeUrlRequest(CMD, Param)
r = self.Client.request('GET', Url)
return r.data.decode('utf-8')
def getValue(self, Coordinates):
CMD = 'cell/value'
Path = self.getCellPath(Coordinates)
Param = {'path': Path}
Url = self.getCubeUrlRequest(CMD, Param)
r = self.Client.request('GET', Url)
return r.data.decode('utf-8').split(';')[2]
def getValueByID(self, CoordinatesID):
CMD = 'cell/value'
Path = ','.join([str(ID) for ID in CoordinatesID])
Param = {'path': Path}
Url = self.getCubeUrlRequest(CMD, Param)
r = self.Client.request('GET', Url)
return r.data.decode('utf-8').split(';')[2].replace('"', '')
def getArea(self, Coordinates):
CMD = 'cell/area'
Path = self.getAreaPath(Coordinates)
Param = {'area': Path}
Url = self.getCubeUrlRequest(CMD, Param)
r = self.Client.request('GET', Url)
return r.data.decode('utf-8').split('\n')[:-1]
def getValues(self, Coordinates, ForceEmptyCells = False):
CMD = 'cell/values'
Paths = ''
for CellPath in self.getArea(Coordinates):
CellTuple = CellPath.split(';')
if CellTuple[2] != '' or ForceEmptyCells == True:
Paths = Paths + CellTuple[-2] + ':'
Paths = Paths[:-1]
Param = {'paths': Paths}
Url = self.getCubeUrlRequest(CMD, Param)
r = self.Client.request('GET', Url)
Values = []
for Rec in Res.data.decode('utf-8').split('\n')[:-1]:
Value = Rec.split(';')[-2].replace('"', '')
Values.append(Rec.split(';')[-2].replace('"', ''))
return Values
# return [Rec.split(';')[-2].replace('"', '') for Rec in Res.data.split('\n')[:-1]]
def Export(self, Coord = None, Condition = None, BlockSize = 1000000):
CMD = 'cell/export'
Param = {'blocksize': BlockSize,
'use_rules': 1}
if Coord:
Param ['area'] = self.getAreaPath(Coord)
if Condition:
Param ['condition'] = Condition
Url = self.getCubeUrlRequest(CMD, Param)
r = self.Client.request('GET', Url)
return r.data.decode('utf-8').split('\n')[:-2]
def Dump(self, Coord = None, Condition = None):
Res = []
Export = self.Export(Coord, Condition)
for Cell in Export:
NewRec = []
CellRec = Cell.split(';')[:-1]
CellValue = CellRec[2]
CellCoord = CellRec[3].split(',')
for DimID, ElementID in zip(self.getDimensionsIDList(), CellCoord):
NewRec.append(self.getDimensionByID(DimID).getElementName(ElementID))
NewRec.append(CellValue)
Res.append(NewRec)
return Res
def DumpCell(self, Coord = None, Condition = None, UseKeyDWIfExists = True):
return CubeDumpIterator(self, self.Export(Coord, Condition), UseKeyDWIfExists)
def DumpCellAsObject(self, Coord = None, Condition = None, UseKeyDWIfExists = True):
return CubeDumpIteratorObject(self, self.Export(Coord, Condition), UseKeyDWIfExists)
## ##
class CubeDumpCell():
def __init__(self, MethodList):
self.MethodList = MethodList
self.MethodList.append('Value')
def __repr__(self):
ReprStr = ''
for Method in self.MethodList:
ReprStr = ReprStr + getattr(self, Method) + ', '
return ReprStr[:-3]
## Iteratore per la funzione Dump##
class CubeDumpIterator():
def __init__(self, CubeObj, Export, UseKeyDWIfExists):
self.Export = Export
self.index = 0
self.loop = len(Export) -1
self.CubeObj = CubeObj
self.UseKeyDWIfExists = UseKeyDWIfExists
if UseKeyDWIfExists:
self._setDumpSchema()
def __iter__(self):
return self
def next(self):
if len(self.Export) == 0:
raise(StopIteration)
if self.index == self.loop:
raise(StopIteration)
if self.UseKeyDWIfExists:
Record = self._getRecord()
while not Record:
Record = self.next()
return Record
else:
return self._getSimpleRecord()
def _setDumpSchema(self):
if len(self.Export) == 0:
return False
Record = self.Export[0].split(';')[:-1][3].split(',')
AttributeValues = {}
for DimID, ElementID in zip(self.CubeObj.getDimensionsIDList(), Record):
Dim = self.CubeObj.getDimensionByID(DimID)
AttrCubeName = Dim.getAttributeCubeName()
AttrCube = self.CubeObj.getCube(AttrCubeName)
AttributeValues[DimID] = dict(zip(Dim.getElementsIDList(), Dim.getElementsNameList()))
## Overriding del dictionary AttributeValues per i cubi di attributi e di sistema
if AttrCube and AttrCube.isSystemCube:
AttrDim = AttrCube.getAttrDimension()
if AttrDim.KeyDataWarehouseExists():
AttributeValues[DimID] = {}
CubeExport = AttrCube.Export(((AttrDim.getKeyDWName()), ('*')), None)
for Cell in CubeExport:
CellRec = Cell.split(';')[:-1]
Value = str(CellRec[2]).replace('"', '')
Skip, ElementID = CellRec[3].split(',')
AttributeValues[DimID][ElementID] = Value
self.AttributeValues = AttributeValues
self.DimensionsIDList = self.CubeObj.getDimensionsIDList()
def _getSimpleRecord(self):
Rec = []
Coord, Value = self._getCoordAndValue()
for DimID, ElementID in zip(self.CubeObj.getDimensionsIDList(), Coord):
Dim = self.CubeObj.getDimensionByID(DimID)
Rec.append(Dim.getElementName(ElementID))
Rec.append(Value)
return Rec
def _getRecord(self):
Rec = []
Coord, Value = self._getCoordAndValue()
for DimID, ElementID in zip(self.DimensionsIDList, Coord):
if ElementID in self.AttributeValues[DimID]:
Rec.append(self.AttributeValues[DimID][ElementID])
else:
return False
Rec.append(Value)
return Rec
def _getCoordAndValue(self):
Cell = self.Export[self.index]
self.index = self.index + 1
CellRec = Cell.split(';')[:-1]
Value = CellRec[2]
Coord = CellRec[3].split(',')
return Coord, Value
class CubeDumpIteratorObject(CubeDumpIterator):
def __init__(self, CubeObj, Export, UseKeyDWIfExists):
CubeDumpIterator.__init__(self, CubeObj, Export, UseKeyDWIfExists)
self.CubeCell = CubeDumpCell(CubeObj.getDimensionsNameList())
def _getSimpleRecord(self):
Coord, Value = self._getCoordAndValue()
for DimID, ElementID in zip(self.CubeObj.getDimensionsIDList(), Coord):
Dim = self.CubeObj.getDimensionByID(DimID)
ElementValue = Dim.getElementName(ElementID)
setattr(self.CubeCell, Dim.getName(), ElementValue)
setattr(self.CubeCell, 'Value', Value)
return self.CubeCell
## Nuova versione
def _getRecord(self):
Coord, Value = self._getCoordAndValue()
for DimID, ElementID in zip(self.DimensionsIDList, Coord):
Dim = self.CubeObj.getDimensionByID(DimID)
if ElementID in self.AttributeValues[DimID]:
ElementValue = self.AttributeValues[DimID][ElementID]
setattr(self.CubeCell, Dim.getName(), ElementValue)
else:
print("Le seguenti cooridnate sono inesistenti: ", DimID, ElementID)
return False
setattr(self.CubeCell, 'Value', Value)
return self.CubeCell
## Originale
## def _getRecord(self):
## Coord, Value = self._getCoordAndValue()
## for DimID, ElementID in zip(self.DimensionsIDList, Coord):
## Dim = self.CubeObj.getDimensionByID(DimID)
## try:
## ElementValue = self.AttributeValues[DimID][ElementID]
## except:
## print DimID, ElementID
## setattr(self.CubeCell, Dim.getName(), ElementValue)
## setattr(self.CubeCell, 'Value', Value)
## return self.CubeCell
|
opnmind/python-olapdb-palo-web
|
src/py3/PyJedoxWebApi/Cube.py
|
Python
|
gpl-3.0
| 15,820
|
# -*- coding: utf-8 -*-
"""project settings"""
# Copyright (C) 2015 ZetaOps Inc.
#
# This file is licensed under the GNU General Public License v3
# (GPLv3). See LICENSE.txt for details.
from zengine.settings import *
import os.path
__author__ = 'Evren Esat Ozkan'
BASE_DIR = os.path.dirname(os.path.realpath(__file__))
# Localization/Internationalization settings defaults
DEFAULT_LANG = 'tr'
DEFAULT_LOCALIZATION_FORMAT = 'tr_TR'
# Available translations
TRANSLATIONS = ['tr', 'en']
# Locale options that should be given to the users for formatting of dates, numbers etc.
LOCALIZATION_FORMATS = ['en_US', 'en_GB', 'tr_TR']
# The directory containing the translations
TRANSLATIONS_DIR = os.path.join(BASE_DIR, 'locale')
# The language of messages in Ulakbus
TRANSLATION_DOMAINS['messages'] = 'tr'
TRANSLATION_DOMAINS['students'] = 'tr'
# Default mail address
MAIL_ADDRESS = 'postmaster@mg.ulakbus.net'
# Demo main url
DEMO_URL = 'http://ulakbus.net'
# path of the activity modules which will be invoked by workflow tasks
ACTIVITY_MODULES_IMPORT_PATHS.extend(['ulakbus.views', 'ulakbus.tasks'])
# absolute path to the workflow packages
WORKFLOW_PACKAGES_PATHS.append(os.path.join(BASE_DIR, 'diagrams'))
SERVICE_PACKAGES_PATH = os.path.join(BASE_DIR, 'services')
LOG_FILE = os.environ.get('LOG_FILE', './ulakbus.log')
AUTH_BACKEND = 'ulakbus.models.auth.AuthBackend'
WF_INITIAL_VALUES = 'ulakbus.lib.view_helpers.WFValues'
PERMISSION_MODEL = 'ulakbus.models.auth.Permission'
USER_MODEL = 'ulakbus.models.auth.User'
ROLE_MODEL = 'ulakbus.models.auth.Role'
UNIT_MODEL = 'ulakbus.models.auth.Unit'
ABSTRACT_ROLE_MODEL = 'ulakbus.models.auth.AbstractRole'
# # left blank to use StreamHandler aka stderr
# LOG_HANDLER = os.environ.get('LOG_HANDLER', 'file')
#
# # logging dir for file handler
# LOG_DIR = os.environ.get('LOG_DIR', '/tmp/')
# DEFAULT_CACHE_EXPIRE_TIME = 99999999 # seconds
# diagrams that does not require logged in user
ANONYMOUS_WORKFLOWS.extend(
['login', 'logout', 'parolami_unuttum', 'yeni_parola_belirle', 'bap_firma_kayit',
'bap_duyurulari_goruntule', 'bap_makine_techizat_ara', 'bap_anasayfa',
'bap_komisyon_uyeleri', 'bap_iletisim', 'bap_proje_arama', 'bap_hakkinda',
'bap_takvim_goruntule', 'bap_yardim', 'bap_belgeler', 'bap_mevzuat', 'bap_raporlari',
'bap_satin_alma_duyurulari_listeleme',
])
#: Ortak kullanılan workflowlar
COMMON_WORKFLOWS.extend(['profil_sayfasi_goruntuleme', 'e_posta_degistir', 'kullanici_adi_degistir',
'parola_degistir'])
# #PYOKO SETTINGS
DEFAULT_BUCKET_TYPE = os.environ.get('DEFAULT_BUCKET_TYPE', 'models')
DATE_DEFAULT_FORMAT = "%d.%m.%Y"
DATETIME_DEFAULT_FORMAT = "%d.%m.%Y %H:%M:%S"
DEFAULT_WF_CATEGORY_NAME = 'Genel'
DEFAULT_OBJECT_CATEGORY_NAME = 'Seçime Uygun Görevler'
OBJECT_MENU = {
# 'personel|ogrenci|personeller|ogrenciler': [{'name':'ModelName',
# 'field':'field_name',
# 'verbose_name': 'verbose_name',
# 'category': 'Genel'
# 'wf':'crud'}]
# 'field' defaults to 'personel' or 'ogrenci'
# verbose_name can be specified to override the model's verbose_name_plural
'other': [
# {'name': 'Personel', 'category': 'Genel'},
# {'name': 'Ogrenci', 'category': 'Genel'},
# {'name': 'Okutman', 'category': 'Genel'},
# {'name': 'HariciOkutman', 'category': 'Genel'},
# {'name': 'Donem', 'category': 'Genel'},
# {'name': 'Program', 'category': 'Genel'},
# {'name': 'Ders', 'category': 'Genel'},
# {'name': 'Campus', 'category': 'Genel'},
# {'name': 'Building', 'category': 'Genel'},
# {'name': 'Room', 'category': 'Genel'},
# {'name': 'AkademikTakvim', 'category': 'Genel'},
# {'name': 'OgrenciProgram', 'category': 'Genel'},
],
'personel': [
{'name': 'Personel', 'wf': 'personel_bilgileri',
'verbose_name': 'Personel Kartı', 'field': 'object_id'},
{'name': 'Personel', 'wf': 'kimlik_ve_iletisim_bilgileri',
'verbose_name': 'Kimlik ve Iletisim Bilgileri Düzenle', 'field': 'object_id'},
{'name': 'Izin', 'wf': 'izin', 'verbose_name': 'İzin İşlemleri', 'field': 'personel_id'},
{'name': 'UcretsizIzin', 'wf': 'ucretsiz_izin', 'verbose_name': 'Ücretsiz İzin İşlemleri',
'field': 'personel_id'},
{'name': 'AdresBilgileri', 'verbose_name': 'Adres Bilgileri', 'field': 'personel_id'},
{'name': 'Atama', 'verbose_name': 'Atama İşlemleri', "wf": 'personel_atama',
'field': 'personel_id'},
# {'name': 'Izin', 'verbose_name': 'İzin Başvuru', 'wf': 'izin_basvuru',
# 'field': 'personel_id'},
{'name': 'Personel', 'verbose_name': 'Akademik Personel Görev Süresi Uzatma',
'wf': 'gorev_suresi_uzatma', 'field': 'personel_id'},
{'name': 'Personel', 'verbose_name': 'Görevlendirme', 'wf': 'gorevlendirme',
'field': 'personel_id'},
{'name': 'Ceza', 'verbose_name': 'İdari Ceza Takibi', 'field': 'personel_id',
'wf': 'idari_cezalar_takibi'},
{'name': 'Personel', 'verbose_name': 'Personel İşten Ayrılma', 'field': 'personel_id',
'wf': 'personel_isten_ayrilma'},
{'name': 'SaglikRaporu', 'verbose_name': 'Sağlık Raporu', 'field': 'personel_id',
'wf': 'saglik_raporu_olusturma'},
# Hitap İşlemleri
{'name': 'HizmetKayitlari', 'verbose_name': 'Hizmet Cetveli', 'field': 'personel_id',
'category': 'Hitap İşlemleri', 'wf': 'personel_hizmet_cetveli'},
{'name': 'HizmetKurs', 'verbose_name': 'Kurs Bilgileri', 'field': 'personel_id',
'category': 'Hitap İşlemleri', 'wf': 'crud_hitap'},
{'name': 'HizmetOkul', 'verbose_name': 'Okul Bilgileri', 'field': 'personel_id',
'category': 'Hitap İşlemleri', 'wf': 'crud_hitap'},
{'name': 'HizmetMahkeme', 'verbose_name': 'Mahkeme Bilgileri', 'field': 'personel_id',
'category': 'Hitap İşlemleri', 'wf': 'crud_hitap'},
{'name': 'HizmetBirlestirme', 'verbose_name': 'Hizmet Birleştirme', 'field': 'personel_id',
'category': 'Hitap İşlemleri', 'wf': 'crud_hitap'},
{'name': 'HizmetTazminat', 'verbose_name': 'Tazminat Bilgileri', 'field': 'personel_id',
'category': 'Hitap İşlemleri', 'wf': 'crud_hitap'},
{'name': 'HizmetUnvan', 'verbose_name': 'Ünvan', 'field': 'personel_id',
'category': 'Hitap İşlemleri', 'wf': 'crud_hitap'},
{'name': 'HizmetAcikSure', 'verbose_name': 'Açık Süre Bilgileri', 'field': 'personel_id',
'category': 'Hitap İşlemleri', 'wf': 'crud_hitap'},
{'name': 'HizmetBorclanma', 'verbose_name': 'Borçlanma Bilgileri', 'field': 'personel_id',
'category': 'Hitap İşlemleri', 'wf': 'crud_hitap'},
{'name': 'HizmetIHS', 'verbose_name': 'İtibari Hizmet', 'field': 'personel_id',
'category': 'Hitap İşlemleri', 'wf': 'crud_hitap'},
{'name': 'HizmetIstisnaiIlgi', 'verbose_name': 'İstisnai İlgi', 'field': 'personel_id',
'category': 'Hitap İşlemleri', 'wf': 'crud_hitap'},
{'name': 'AskerlikKayitlari', 'verbose_name': 'Askerlik Kayıtları', 'field': 'personel_id',
'category': 'Hitap İşlemleri', 'wf': 'crud_hitap'},
],
'ogrenci': [
{'name': 'Borc', 'verbose_name': 'Harç Bilgileri', 'field': 'ogrenci_id'},
{'name': 'OgrenciProgram', 'verbose_name': 'Öğrenci Mezuniyet', 'wf': 'ogrenci_mezuniyet',
'field': 'ogrenci_id'},
{'name': 'DegerlendirmeNot', 'field': 'ogrenci_id'},
{'name': 'OgrenciDersi', 'field': 'ogrenci_id'},
{'name': 'Ogrenci', 'field': 'object_id', 'wf': 'ogrenci_kimlik_bilgileri',
'verbose_name': 'Kimlik Bilgileri'},
{'name': 'Ogrenci', 'field': 'object_id', 'wf': 'ogrenci_iletisim_bilgileri',
'verbose_name': 'İletişim Bilgileri'},
{'name': 'OncekiEgitimBilgisi', 'verbose_name': 'Önceki Eğitim Bilgileri',
'field': 'ogrenci_id'},
# {'name': 'OgrenciProgram', 'field': 'ogrenci_id', 'wf': 'ders_ekle',
# 'verbose_name': 'Ders Ekle'},
{'name': 'OgrenciProgram', 'field': 'ogrenci_id', 'wf': 'danisman_atama',
'verbose_name': 'Danışman Atama'},
{'name': 'DondurulmusKayit', 'verbose_name': 'Kayıt Dondurma', 'wf': 'kayit_dondur',
'field': 'ogrenci_id'},
{'name': 'OgrenciProgram', 'verbose_name': 'Mazaretli Öğrenci',
'wf': 'mazeretli_ders_kaydi', 'field': 'ogrenci_id'},
{'name': 'DegerlendirmeNot', 'verbose_name': 'Not Düzenleme',
'wf': 'ogrenci_isleri_not_duzenleme',
'field': 'ogrenci_id'},
{'name': 'OgrenciProgram', 'verbose_name': 'Kayıt Sil', 'wf': 'kayit_sil',
'field': 'ogrenci_id'},
{'name': 'OgrenciDersi', 'verbose_name': 'Ders Ekle', 'wf': 'ogrenci_ders_atama',
'field': 'ogrenci_id'}
],
}
AUTO_IMPORT_MODULES.extend([
'ulakbus.views.system',
])
ZATO_SERVER = os.environ.get('ZATO_SERVER', 'http://localhost:11223')
ENABLE_SIMPLE_CRUD_MENU = False
ALLOWED_ORIGINS += [
'http://ulakbus.net',
'http://www.ulakbus.net',
'http://dev.zetaops.io',
'http://nightly.zetaops.io',
'http://nightly.ulakbus.net'
]
# Universite ID'sidir
UID = 173500
UNIVERSITY_NAME = os.environ.get('UNIVERSITY_NAME', 'ULAKBUS')
UNIVERSITY_LOGO = os.environ.get('UNIVERSITY_LOGO', None)
FILE_MANAGER = 'ulakbus.lib.s3_file_manager.S3FileManager'
ALLOWED_FILE_TYPES = {
'png': ('image/png', 'png'),
'txt': ('text/plain', 'txt'),
'jpg': ('image/jpeg', 'jpg'),
'jpeg': ('image/jpeg', 'jpg'),
'pdf': ('application/pdf', 'pdf'),
'doc': ('application/msword', 'doc'),
'xls': ('application/vnd.ms-excel', 'xls'),
'ppt': ('application/vnd.ms-powerpoint', 'ppt'),
'pptx': ('application/vnd.openxmlformats-officedocument.presentationml.presentation', 'pptx'),
'xlsx': ('application/vnd.openxmlformats-officedocument.spreadsheetml.sheet', 'xlsx'),
'docx': ('application/vnd.openxmlformats-officedocument.wordprocessingml.document', 'docx'),
'csv': ('text/csv', 'csv'),
'zip': ('application/zip', 'zip')
}
S3_PROXY_URL = os.environ.get('S3_PROXY_URL')
S3_ACCESS_KEY = os.environ.get('S3_ACCESS_KEY')
S3_SECRET_KEY = os.environ.get('S3_SECRET_KEY')
S3_PUBLIC_URL = os.environ.get('S3_PUBLIC_URL')
S3_PROXY_PORT = os.environ.get('S3_PROXY_PORT', '80')
S3_BUCKET_NAME = 'ulakbus'
QUICK_MENU = [
'kadro_islemleri',
# 'izin',
'akademik_takvim',
'ders_hoca_sube_atama',
'ders_ekle',
'Birim',
'Ders',
'Program'
]
MAX_NUM_DROPDOWN_LINKED_MODELS = 20
PERMISSION_PROVIDER = 'ulakbus.models.auth.ulakbus_permissions'
ERROR_MESSAGE_500 = "DEMO Sisteminde güncelleme nedeniyle kesinti ve hata olabilir." \
"Şimdi bunlardan birini görüyorsunuz. Lütfen daha sonra tekrar deneyiniz"
SICIL_PREFIX = "KON"
#: User search method of messaging subsystem will work on these fields
MESSAGING_USER_SEARCH_FIELDS = ['name', 'surname']
#: Unit search method of messaging subsystem will work on these fields
MESSAGING_UNIT_SEARCH_FIELDS = ['name', ]
MESSAGES = {
'lane_change_invite_title': 'Etkinlik gerekiyor!',
'lane_change_invite_body': 'adlı iş akışı sizin etkinliğinizi gerektiriyor, '
'görev yöneticinizden ilgili iş akışına ulaşabilirsiniz.',
'lane_change_message_title': 'Teşekkürler!',
'lane_change_message_body': 'Bu iş akışında şuan için gerekli adımları tamamladınız. '
'İlgili kişiler, iş akışına katılmaları için haberdar edildiler.',
}
DATA_GRID_PAGE_SIZE = 100
#Personel yolluk ve yevmiye katsayıları her yıl değişmektedir.
#2017 için guncel yolluk ve yevmiye katsayıları
EK_GOSTERGE_8K = 48.25
EK_GOSTERGE_5800_8K = 45
EK_GOSTERGE_3K_5800 = 42.25
DERECE_1_4 = 37.25
DERECE_5_15 = 36.25
KM_KATSAYISI = 5 / 100.0
|
zetaops/ulakbus
|
ulakbus/settings.py
|
Python
|
gpl-3.0
| 12,167
|
# -*- coding: utf-8 -*-
# crunchyfrog - a database schema browser and query tool
# Copyright (C) 2008 Andi Albrecht <albrecht.andi@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Core objects"""
import gobject
import sys
import os
from inspect import isclass
import imp
import zipimport
import logging
log = logging.getLogger("PLUGINS")
from gettext import gettext as _
PLUGIN_TYPE_GENERIC = 0
PLUGIN_TYPE_EXPORT = 2
PLUGIN_TYPE_EDITOR = 3
from cf import USER_PLUGIN_DIR, PLUGIN_DIR
from cf.plugins.mixins import InstanceMixin, MenubarMixin, EditorMixin
from cf.plugins.mixins import UserDBMixin
class GenericPlugin(gobject.GObject):
"""Plugin base class"""
id = None
name = None
description = None
long_description = None
icon = None
author = None
license = None
homepage = None
version = None
has_custom_options = False
plugin_type = PLUGIN_TYPE_GENERIC
INIT_ERROR = None
def __init__(self, app):
"""
The constructor of this class takes one argument:
:Parameter:
app
`CFApplication`_ instance
.. _CFApplication: cf.app.CFApplication.html
"""
self.app = app
self.__gobject_init__()
@classmethod
def run_custom_options_dialog(cls, app):
"""Runs a preferences dialog
If ``has_custom_options`` is ``True`` this method will
be called if the user clicks on the *Configure plugin* button
in the preferences dialog.
:Parameter:
app
`CFApplication`_ instance
.. _CFApplication: cf.app.CFApplication.html
"""
pass
def shutdown(self):
"""Called when the plugin is deactivated."""
pass
class BottomPanePlugin(GenericPlugin):
"""A plugin that lives in the bottom pane."""
class ExportPlugin(GenericPlugin):
"""Export filter base class"""
icon = "gtk-save-as"
file_filter_name = None
file_filter_mime = []
file_filter_pattern = []
has_options = False
plugin_type = PLUGIN_TYPE_EXPORT
def __init__(self, app):
GenericPlugin.__init__(self, app)
def export(self, description, rows, options=dict()):
raise NotImplementedError
def show_options(self, description, rows):
return dict()
# Key: plugin type
# Value: 2-tuple (label, expected class)
PLUGIN_TYPES_MAP = {
PLUGIN_TYPE_GENERIC : (_(u"Miscellaneous"), GenericPlugin),
PLUGIN_TYPE_EXPORT : (_(u"Export filter"), ExportPlugin),
PLUGIN_TYPE_EDITOR : (_(u"Editor"), GenericPlugin),
}
class PluginManager(gobject.GObject):
"""Plugin manager
An instance of this class is accessible through the ``plugins``
attribute of an `CFApplication`_ instance.
:Signals:
plugin-added
``def callback(manager, plugin, user_param1, ...)``
Emitted when a plugin was added to the registry.
plugin-removed
``def callback(manager, plugin, user_param1, ...)``
Emitted when a plugin is removed from the registry.
plugin-active
``def callback(manager, plugin, active, user_param1, ...)``
Emitted when a plugin is activated or deactivated. `active`
is either ``True`` or ``False``.
.. _CFApplication: cf.app.CFApplication.html
"""
plugin_types = PLUGIN_TYPES_MAP
__gsignals__ = {
"plugin-added" : (gobject.SIGNAL_RUN_LAST,
gobject.TYPE_NONE,
(gobject.TYPE_PYOBJECT,)),
"plugin-removed" : (gobject.SIGNAL_RUN_LAST,
gobject.TYPE_NONE,
(gobject.TYPE_PYOBJECT,)),
"plugin-active" : (gobject.SIGNAL_RUN_LAST,
gobject.TYPE_NONE,
(gobject.TYPE_PYOBJECT, bool)),
}
def __init__(self, app):
"""
The constructor of this class takes one argument:
:Parameter:
app
`CFApplication`_ instance
.. _CFApplication: cf.app.CFApplication.html
"""
self.app = app
self.__gobject_init__()
self.__plugins = dict()
self.__active_plugins = dict()
self.app.register_shutdown_task(self.on_app_shutdown, "")
self.app.cb.connect("instance-created", self.on_instance_created)
self.refresh()
self._first_run()
def on_app_shutdown(self):
for plugin in self.__active_plugins.values():
plugin.shutdown()
def on_instance_created(self, cb, instance):
for plugin in self.__active_plugins.values():
if isinstance(plugin, InstanceMixin):
self.init_instance_mixins(plugin, instance)
def _first_run(self):
if not self.app.options.first_run:
return
def get_plugins(self, plugin_type, active_only=False):
"""Returns a list of plugins.
:Parameter:
plugin_type
a ``PLUGIN_TYPE_*`` constant
active_only
If set to ``True`` only activated plugins are returned.
:Returns:
List of `plugins`_
.. _plugins: cf.plugins.core.GenericPlugin.html
"""
ret = list()
if active_only:
plugins = self.__active_plugins.values()
else:
plugins = self.__plugins.values()
for item in plugins:
if item.plugin_type == plugin_type:
ret.append(item)
return ret
def _get_modules(self, path):
modules = []
if not os.access(path, os.R_OK) or not os.path.exists(path):
logging.warning('Plugin path %s missing', path)
return modules
if path not in sys.path:
sys.path.insert(0, path)
for item in os.listdir(path):
if item.startswith(".") or item.startswith("_"):
continue
name, ext = os.path.splitext(item)
if ext and ext not in [".zip", ".py"]:
continue
elif not ext and not os.path.isfile(os.path.join(path, item,
'__init__.py')):
continue
if ext == ".zip":
sys.path.insert(0, os.path.join(path, item))
importer = zipimport.zipimporter(os.path.join(path, item))
importer = importer.find_module(name)
if not importer:
continue
mod = importer.load_module(name)
modules.append(mod)
else:
try:
modinfo = imp.find_module(name)
except ImportError, err:
logging.error('Failed to load module %s: %s', name, err)
continue
try:
mod = imp.load_module(name, *modinfo)
except Exception, err:
logging.error('Failed to load module %s: %s', name, err)
try: del sys.modules[name]
except KeyError: pass
continue
modules.append(mod)
return modules
def _get_plugins(self, module):
plugins = []
for name in dir(module):
obj = getattr(module, name)
if isclass(obj) and issubclass(obj, GenericPlugin) \
and obj not in [GenericPlugin, ExportPlugin]:
plugins.append(obj)
return plugins
def refresh(self):
"""Refreshs the plugin registry.
This method is called when the contents of a plugin folder
changes.
"""
from cf.plugins import builtin
modules = [builtin]
for path in [PLUGIN_DIR, USER_PLUGIN_DIR]:
modules += self._get_modules(path)
plugins = []
for module in modules:
plugins += self._get_plugins(module)
ids_found = []
for plugin in plugins:
if not self.__plugins.has_key(plugin.id):
self.__plugins[plugin.id] = plugin
l = self.app.config.get("plugins.active", [])
if plugin.id in l and not plugin.INIT_ERROR:
self.set_active(plugin, True)
elif plugin.id in l and plugin.INIT_ERROR:
self.set_active(plugin, False)
self.emit("plugin-added", plugin)
ids_found.append(plugin.id)
for id, plugin in self.__plugins.items():
if id not in ids_found:
l = self.app.config.get("plugins.active", [])
if id in l:
self.set_active(plugin, False)
del self.__plugins[id]
self.emit("plugin-removed", plugin)
def set_active(self, plugin, active, instance=None):
"""Activates / deactivates a plugin
:Parameter:
plugin
Plugin to activate / deactivate
active
If ``True`` the plugin gets activated, otherwise it will
be deactivated.
"""
id = None
for key, value in self.__plugins.items():
if value == plugin:
id = key
break
if not id:
return
l = self.app.config.get("plugins.active", [])
if active:
x = plugin(self.app)
self.__active_plugins[plugin] = x
if isinstance(x, UserDBMixin):
x.userdb_set(self.app.userdb)
x.userdb_init()
if isinstance(x, InstanceMixin):
for instance in self.app.get_instances():
self.init_instance_mixins(x, instance)
if id not in l:
l.append(id)
elif not active:
x = self.__active_plugins.get(plugin, None)
if x:
if isinstance(x, InstanceMixin):
for instance in self.app.get_instances():
self.unload_instance_mixins(x, instance)
x.shutdown()
del self.__active_plugins[plugin]
if id in l:
l.remove(id)
self.app.config.set("plugins.active", l)
self.emit("plugin-active", plugin, active)
def is_active(self, plugin):
"""Returns ``True`` if the plugin is active
:Parameter:
plugin
A plugin
:Returns: ``True`` if the plugin is active.
"""
if isinstance(plugin, GenericPlugin):
plugin = plugin.__class__
return self.__active_plugins.has_key(plugin)
def by_id(self, id, active_only=True):
"""Returns a plugin by its id
:Parameter:
id
Plugin ID
active_only
If ``True`` only active plugins are returned.
:Returns: `Plugin`_ or ``None``
.. _Plugin: cf.plugins.core.GenricPlugin.html
"""
plugins = self.__active_plugins.values()
if not active_only:
plugins += self.__plugins.values()
for plugin in plugins:
if plugin.id == id:
return plugin
return None
def init_instance_mixins(self, plugin, instance):
plugin.init_instance(instance)
def unload_instance_mixins(self, plugin, instance):
if isinstance(plugin, MenubarMixin):
plugin.menubar_unload(instance.xml.get_widget("menubar"), instance)
def editor_notify(self, editor, instance):
"""Called by an instance when the current editor has changed
:Parameter:
editor
an editor or ``None``
instance
an instance
"""
for plugin in self.__active_plugins.values():
if isinstance(plugin, EditorMixin):
plugin.set_editor(editor, instance)
|
angvp/angelvelasquez-crunchyfrog
|
cf/plugins/core.py
|
Python
|
gpl-3.0
| 12,547
|
"""
IPS ROM patch file formal
Implemented according to http://zerosoft.zophar.net/ips.php
"""
import struct
from collections import namedtuple
MAGIC = b'PATCH'
EOF = b'EOF'
EntryHeader = namedtuple('EntryHeader', [
'size',
'rle_size'])
EntryHeader.format = '2H'
|
Schala/format-scripts
|
ips.py
|
Python
|
gpl-3.0
| 267
|
import datetime
import json
import logging
import re
import time
from multiprocessing import Process, Queue
import requests
class TheTVDBAPI:
API_KEY = "87EF0C7BB9CA4283"
url = 'https://api.thetvdb.com'
def __init__(self, test=False):
logging.getLogger("requests").setLevel(logging.WARNING)
self.jwt_token = None
if not test:
self.jwt_token = self.login()
self.headers = {'Accept-Language': 'en', 'Accept': 'application/json',
'Authorization': 'Bearer {}'.format(self.jwt_token)}
def login(self):
logging.debug('Authenticating against TVDB-API...')
data = json.dumps({'apikey': self.API_KEY})
headers = {'Content-Type': 'application/json'}
try:
ret = requests.post(self.url + "/login", data=data, headers=headers, timeout=10)
if ret.ok:
logging.debug(' Authenticated!')
ret_j = json.loads(ret.text)
return ret_j.get('token')
if 400 < ret.status_code < 499:
logging.error(' Authentication failed! Invalid API-Key?')
except (requests.exceptions.ReadTimeout, requests.exceptions.ConnectionError):
logging.error(' Authentication failed! API down?')
def get_shows_by_search(self, search, year=None):
logging.debug('Getting shows matching "{}"...'.format(search))
response = self._make_request('/search/series?name={}'.format(search))
responses = self._validate_response_to_json(response)
if year:
responses = self._filter_search_by_year(responses, year)
if len(responses) > 1:
return [s for s in self._jsons_to_show_threaded(responses) if s]
return [self._json_to_show(r) for r in responses]
def get_show_by_imdb_id(self, imdb_id):
logging.debug('Getting show by imdb_id "{}"...'.format(imdb_id))
response = self._make_request('/search/series?imdbId={}'.format(imdb_id))
responses = self._validate_response_to_json(response)
return self._json_to_show(responses[0]) if responses else None
def _json_to_show(self, response):
show = TVDBShow(response, self) if response else None
show.fill_data()
return show if show else None
def _jsons_to_show_threaded(self, responses):
queue = Queue()
process_list = []
for response in responses:
proc_ = Process(target=self._json_to_show_threaded, args=(response, queue))
proc_.start()
process_list.append(proc_)
[p.join(5) for p in process_list]
return [queue.get_nowait() for _ in range(queue.qsize())]
def _json_to_show_threaded(self, response, queue):
queue.put(self._json_to_show(response))
def _filter_search_by_year(self, responses, year):
return [r for r in responses if str(self.get_airdate(r).year) == str(year)]
@staticmethod
def _validate_response_to_json(response):
if response is not None and response.ok:
ret_j = json.loads(response.text)
return ret_j.get('data', []) if not ret_j.get('errors') else ret_j.get('errors')
return {}
def _make_request(self, url, data=None):
""" Do a request, take care of timeouts and exceptions """
if data is None:
data = {}
for _ in range(3):
try:
return requests.get(self.url + url, data=data, headers=self.headers, timeout=10)
except (requests.exceptions.Timeout, requests.exceptions.ConnectionError):
time.sleep(1)
except Exception as e:
logging.debug(
'Caught Exception "{}" while making a get-request to "{}"'.format(e.__class__, url))
return
def get_episode_data(self, tvdb_show, page=1):
if page == 1:
logging.debug('Getting episode_data for show "{}"...'.format(repr(tvdb_show)))
data = []
response = self._make_request('/series/{}/episodes?page={}'.format(tvdb_show.tvdb_id, page))
if response is not None and response.ok:
ret_j = json.loads(response.text)
data.extend(ret_j.get('data', []))
errors = ret_j.get('errors')
if errors and not data:
logging.warning('Getting episode_data for show "{}" showed errors: {}'.format(tvdb_show, errors))
return []
next_ = ret_j.get('links', {}).get('next')
if next_ is not None:
data.extend(self.get_episode_data(tvdb_show, page=next_))
return data
def get_imdb_id_from_tvdb_id(self, tvdb_id):
response = self._make_request('/series/{}'.format(tvdb_id))
response_j = self._validate_response_to_json(response)
return response_j.get('imdbId', 'tt0')
@staticmethod
def get_airdate(data):
try:
return datetime.datetime.strptime(data.get('firstAired'), '%Y-%m-%d').date()
except (ValueError, TypeError):
return datetime.datetime.fromtimestamp(0).date()
def __bool__(self):
return bool(self.jwt_token)
class TVDBShow:
def __init__(self, json_result, api):
self.raw = json_result
self.api = api
self.aired = self.api.get_airdate(json_result)
self.name = json_result.get('seriesName', '')
self.overview = json_result.get('overview', '') if json_result.get('overview') else ''
self.tvdb_id = json_result.get('id', 0)
self.imdb_id = ''
self.seasons = {}
self.episodes = []
def fill_data(self):
if self.imdb_id:
return
self.imdb_id = self.api.get_imdb_id_from_tvdb_id(self.tvdb_id)
for ep_j in self.api.get_episode_data(self):
self._add_episode(Episode(self, ep_j))
if not self.seasons:
logging.error('Could not get show data of "{}"!'.format(self.name))
def _add_episode(self, episode):
if episode:
self.episodes.append(episode)
if episode.season in self.seasons.keys():
self.seasons[episode.season].add_episode(episode)
else:
self.seasons[episode.season] = Season(self, episode)
def get_newest_episode(self):
today = datetime.date.today()
episodes_ = [ep for ep in self.episodes if ep.date <= today]
return max(episodes_, key=lambda ep: ep.date) if episodes_ else {}
def get_episodes_since(self, date):
today = datetime.date.today()
return [ep for ep in self.episodes if date <= ep.date <= today]
def __repr__(self):
return '{} [{}]'.format(self.name, self.imdb_id) if self.imdb_id else self.name
def __str__(self):
return self.name
def str_verbose(self):
return "{}\n{}".format(str(self), '\t\n'.join(map(lambda f: f.str_verbose(), self.seasons.values())))
def __bool__(self):
return bool(self.name) and bool(self.seasons)
def get_storage_name(self):
return '{} [{}]'.format(self.name, self.imdb_id)
def get_brief(self):
return '{:25} [{:9}] | {} | {:3} episodes | {}'.format(self.name[:25], self.imdb_id, self.aired.year,
len(self.episodes), self.overview[:40])
def get_search_query(self):
""" Get the show name to use for searching torrents.
Remove the year of the show, like Doctor Who (2005), for that."""
return re.sub(r'\(\d{4}\)(?<=$)', '', self.name).strip()
def __len__(self):
return len(self.episodes)
class Season:
FORMAT = "Season {}"
def __init__(self, show, init_episode):
self.show = show
self.number = init_episode.season
self.episodes = [init_episode]
def add_episode(self, episode):
if episode not in self.episodes:
self.episodes.append(episode)
def get_aired_episodes(self):
today = datetime.date.today()
return [ep for ep in self.episodes if ep.date <= today]
def get_season_from_string(self, string_):
match = re.match(self.FORMAT.format('(\d+)'), string_)
return int(match.group(1)) if match else 0
def get_regex(self):
return re.compile(r'(?i)s?0*(?P<season>{})(?=\D)'.format(self.number))
def __repr__(self):
return self.FORMAT.format(self.number)
def str_short(self):
return "s{:02}".format(self.number)
def str_verbose(self):
return "{}: {}".format(str(self), ', '.join(map(lambda f: f.str_verbose(), self.episodes)))
class Episode:
def __init__(self, show, json_data):
self.show = show
self.season = self._get_int_if_true(json_data, 'airedSeason', 0)
self.episode = self._get_int_if_true(json_data, 'airedEpisodeNumber', 0)
self.name = json_data.get('episodeName', '')
self.date = self.show.api.get_airdate(json_data)
self.absolute_episode_number = self._get_int_if_true(json_data, 'absoluteNumber', 0)
@staticmethod
def _get_int_if_true(data, value, default):
item = data.get(value, default)
if type(item) is int:
return item
if type(item) is str and item.isdigit():
return int(item)
return default
def get_regex(self):
return re.compile(r'(?i)s?0*(?P<season>{s.season})\W?[ex]0*(?P<episode>{s.episode})(?!\d)'.format(s=self))
def __repr__(self):
return "s{s.season:02}e{s.episode:02}".format(s=self)
def str_short(self):
return self.__repr__()
def str_verbose(self):
return "{}: {} [#{}]".format(str(self), self.name, self.absolute_episode_number)
def __bool__(self):
return bool(self.name)
def __eq__(self, other):
return (other and self.season == other.season and self.episode == other.episode and
self.absolute_episode_number == other.absolute_episode_number and self.name == other.name)
if __name__ == '__main__':
import sys
api_ = TheTVDBAPI()
show_ = api_.get_show_by_imdb_id(sys.argv[1])
print(show_.get_newest_episode())
|
sistason/show_archive_manager
|
a_argument_to_show/thetvdb_api.py
|
Python
|
gpl-3.0
| 10,199
|
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
("tutorialv2", "0009_publishedcontent_authors"),
]
operations = [
migrations.AddField(
model_name="publishedcontent",
name="sizes",
field=models.CharField(
default=b"{}", max_length=512, verbose_name=b"Tailles des fichiers t\xc3\xa9l\xc3\xa9chargeables"
),
preserve_default=True,
),
]
|
ChantyTaguan/zds-site
|
zds/tutorialv2/migrations/0010_publishedcontent_sizes.py
|
Python
|
gpl-3.0
| 502
|
import tensorflow as tf
# 2D convolutional neural network which can mixed with fully-connected layers
class ConvNet:
#input_shape [rows columns]
def __init__(self, input_shape, layer_list):
self.session = tf.InteractiveSession()
self.input_shape = input_shape
for layer in layer_list:
assert isinstance(layer, Layer), "the item in the layer list is not layer"
self.layer_list = layer_list
self.input_tensor = tf.placeholder(tf.float32, shape=[None, self.input_shape[0]*self.input_shape[1]])
self.output = self.forward_computation()
#grenrate the operation, the forward computaion
def forward_computation(self):
forward_tensor = self.input_tensor
if len(forward_tensor.get_shape())!=4:
assert len(forward_tensor.get_shape())==2, 'the input data dimension is wrong'
forward_tensor = tf.reshape(forward_tensor, [-1,self.input_shape[0],self.input_shape[1],1])
for layer in self.layer_list:
if isinstance(layer, FullyLayer):
forward_tensor = tf.reshape(forward_tensor, [-1, layer.get_weight_shape()[0].value])
forward_tensor = layer.process(forward_tensor)
return forward_tensor
#the next_batch: is a function which will return a feed_dict including a batch of trainning set,
# a 2d matrix ,each row is a input vector
#loss_function: is a function which create the operation that producing loss value
#logging: the fuction whcih may be called every 100 steps, the loss valus will be feed in
def train(self, loss_function, steps ,next_batch , logging):
loss_value = loss_function(self)
train_step = tf.train.AdamOptimizer(1e-4).minimize(loss_value)
self.session.run(tf.initialize_all_variables())
for i in range(steps):
batch = next_batch(self)
train_step.run(batch)
if i%100==0:
logging(self, loss_value.eval(batch))
print 'train has finished'
#the super class, representing a conv layer or fully-connected layer
class Layer:
def __init__(self):
pass
#initialize the weight randomly
def weight_variable(self,shape):
initial = tf.truncated_normal(shape, stddev = 0.1)
return tf.Variable(initial)
#initialize the bias randomly
def bias_variable(self,shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
#process the output from last layer and output it
def process(self,x):
pass
#the convolutional layer
class ConvLayer(Layer):
# weight_shape[rows columns input_feature_number output_feature_number]
# pooling_strides[1 rows columns 1]
def __init__(self, weight_shape, pooling_strides, activation_func=tf.nn.relu):
self.__weight=Layer.weight_variable(self,weight_shape)
self.__bias=Layer.bias_variable(self,[weight_shape[3]])
self.__pooling_strides = pooling_strides
self.__activation_func = activation_func
#convolute the x by W
#x: is a 4d input tenser
#W: is the weights
def conv2d(self, x, W, strides=[1, 1, 1, 1]):
return tf.nn.conv2d(x, W, strides, padding='SAME')
#pooling the feature map, the knums and pooling strides depends on the __pooling_strides member
#x: is a 4d input tensor, the feature maps
def pooling(self, x, pooling_type='MAX'):
if pooling_type=='MAX':
return tf.nn.max_pool(x, self.__pooling_strides, self.__pooling_strides, padding='SAME')
def process(self,x):
assert len(x.get_shape())==4, "the input shape dimension %s do not fit the requirements of the 2d convlayer"% len(x.get_shape())
assert x.get_shape()[3]==self.__weight.get_shape()[2], "the input feature dimension %s do not fit the features of the 2d convlayer"% x.get_shape()[3]
feature_map = self.__activation_func(self.conv2d(x,self.__weight)+self.__bias)
return self.pooling(feature_map)
#the fully-connected layer
class FullyLayer(Layer):
def __init__(self, weight_shape, activation_func=tf.nn.sigmoid):
self.__weight=Layer.weight_variable(self,weight_shape)
self.__bias=Layer.bias_variable(self,[weight_shape[1]])
self.__activation_func=activation_func
def get_weight_shape(self):
return self.__weight.get_shape()
def process(self, x):
assert len(x.get_shape())==2, "the input shape dimension %s do not fit the requirements of the fully-connected layer"%len(x.get_shape())
assert x.get_shape()[1]==self.__weight.get_shape()[0], "the input feature dimension %s do not fit the weight number of the fully-connected layer"%x.get_shape()[1]
return self.__activation_func(tf.matmul(x, self.__weight) + self.__bias)
|
kumkee/SURF2016
|
src/network.py
|
Python
|
gpl-3.0
| 4,798
|
#! /usr/bin/python
from com.smartitengineering.cms.api.content.template import VariationGenerator
class MyVar(VariationGenerator):
def getVariationForField(self, field, params):
return field.value.value
if __name__ == "__main__":
rep = MyVar
|
SmartITEngineering/smart-cms
|
spi-modules/content-spi-template-providers-impl/src/test/resources/scripts/python/var-script.py
|
Python
|
gpl-3.0
| 256
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
from xbmctorrent import plugin
from xbmctorrent.scrapers import scraper
from xbmctorrent.ga import tracked
from xbmctorrent.caching import cached_route, shelf
from xbmctorrent.utils import ensure_fanart
from xbmctorrent.library import library_context
BASE_URL = "%s/" % plugin.get_setting("base_eztv")
HEADERS = {
"Referer": BASE_URL,
}
SHOW_LIST_CACHE_TTL = 24 * 3600 # 24 hours caching
NEW_EPISODES = "New Episodes"
# Logo found on http://thesimurg.deviantart.com/art/Logo-for-EZTV-57874544
@scraper("EZTV - Series", "http://i.imgur.com/XcH6WOg.jpg")
@plugin.route("/eztv")
@ensure_fanart
@tracked
def eztv_index():
import string
for letter in [NEW_EPISODES, "1", "2", "3", "4", "5", "6", "7", "8", "9"] + list(string.ascii_uppercase):
yield {
"label": letter,
"path": plugin.url_for("eztv_shows_by_letter", letter=letter),
"is_playable": False,
}
@plugin.route("/eztv/shows/<letter>")
@cached_route(ttl=SHOW_LIST_CACHE_TTL, content_type="tvshows")
@ensure_fanart
@tracked
def eztv_shows_by_letter(letter):
import re
import xbmc
import xbmcgui
from bs4 import BeautifulSoup
from contextlib import nested, closing
from itertools import izip, groupby
from concurrent import futures
from xbmctorrent.scrapers import ungenerate
from xbmctorrent.utils import terminating, url_get, SafeDialogProgress
from xbmctorrent import tvdb
with shelf("it.eztv.shows") as eztv_shows:
if not eztv_shows:
response = url_get("%s/showlist/" % BASE_URL, headers=HEADERS)
soup = BeautifulSoup(response, "html5lib")
nodes = soup.findAll("a", "thread_link")
for node in nodes:
show_id, show_named_id = node["href"].split("/")[2:4]
show_name = node.text
#skip this node
if show_name == "":
continue
show_first_letter = show_name[0].lower()
eztv_shows.setdefault(show_first_letter, {}).update({
show_id: {
"id": show_id,
"named_id": show_named_id,
"name": node.text,
}
})
try:
if letter == NEW_EPISODES:
pass
else:
shows_list = sorted(eztv_shows[letter.lower()].values(), key=lambda x: x["name"].lower())
except:
shows_list = ""
if letter == NEW_EPISODES:
pass
else:
with closing(SafeDialogProgress(delay_close=0)) as dialog:
dialog.create(plugin.name)
dialog.update(percent=0, line1="Fetching serie information...", line2="", line3="")
state = {"done": 0}
def on_serie(future):
data = future.result()
state["done"] += 1
dialog.update(
percent=int(state["done"] * 100.0 / len(shows_list)),
line2=data and data["seriesname"] or "",
)
with futures.ThreadPoolExecutor(max_workers=5) as pool_tvdb:
tvdb_list = [pool_tvdb.submit(tvdb.search, show["name"], True) for show in shows_list]
[future.add_done_callback(on_serie) for future in tvdb_list]
while not all(job.done() for job in tvdb_list):
if dialog.iscanceled():
return
xbmc.sleep(100)
if letter == NEW_EPISODES:
#A bit of a hack, but what the hell ;)
response = url_get("%s/sort/100/" % (BASE_URL), headers=HEADERS)
soup = BeautifulSoup(response, "html5lib")
items = soup.findAll("tr", "forum_header_border")
for item in items :
item_string = str(item)
MAGNET_CONST = "magnet:"
start_pos_of_magnet = item_string.find(MAGNET_CONST , 0)
if start_pos_of_magnet > 0:
end_pos_of_magnet = item_string.find('"' , start_pos_of_magnet + len(MAGNET_CONST) + 1)
magnet_link = item_string[start_pos_of_magnet:end_pos_of_magnet]
TITLE_CONST = "title="
start_pos_of_first_title = item_string.find(TITLE_CONST , 0)
#looking for the second "title="
start_pos_of_second_title = item_string.find(TITLE_CONST , start_pos_of_first_title + len(TITLE_CONST) + 1)
end_pos_of_second_title = item_string.find('"' , start_pos_of_second_title + len(TITLE_CONST) + 1)
title = item_string[start_pos_of_second_title + len(TITLE_CONST) + 1:end_pos_of_second_title]
item = {
"label": title,
}
item.setdefault("info", {}).update({
"tvshowtitle": "",
"title": title,
})
stream_info = {}
item.update({
"path": plugin.url_for("play", uri=magnet_link),
"stream_info": {"video": stream_info},
"is_playable": True,
})
yield item
else:
tvdb_list = [job.result() for job in tvdb_list]
for i, (eztv_show, tvdb_show) in enumerate(izip(shows_list, tvdb_list)):
if tvdb_show:
item = tvdb.get_list_item(tvdb_show)
item.update({
"path": plugin.url_for("eztv_get_show_seasons", show_id=eztv_show["id"], tvdb_id=tvdb_show["id"])
})
yield item
else:
yield {
"label": eztv_show["name"],
"path": plugin.url_for("eztv_get_show_seasons", show_id=eztv_show["id"])
}
def get_episode_data_from_name(name):
import re
res = re.search("S(\d+)E(\d+)", name)
if res:
return map(int, res.groups())
res = re.search("(\d+)x(\d+)", name)
if res:
return map(int, res.groups())
return 0, 0
@plugin.route("/eztv/shows/<show_id>/seasons")
@ensure_fanart
@tracked
def eztv_get_show_seasons(show_id):
import random
from bs4 import BeautifulSoup
from itertools import groupby
from concurrent import futures
from xbmctorrent.utils import first, terminating, url_get
from xbmctorrent import tvdb
plugin.set_content("seasons")
tvdb_id = first(plugin.request.args.get("tvdb_id"))
with futures.ThreadPoolExecutor(max_workers=2) as pool:
def _eztv_get_show():
plugin.log.info("Getting show")
if show_id == NEW_EPISODES:
show_id = 17
response = url_get("%s/shows/%s/" % (BASE_URL, show_id), headers=HEADERS)
plugin.log.info("Got show")
return BeautifulSoup(response, "html5lib")
soup = pool.submit(_eztv_get_show)
if tvdb_id:
tvdb_show = pool.submit(tvdb.get_all_meta, plugin.request.args["tvdb_id"][0])
soup = soup.result()
fanarts = []
if tvdb_id:
tvdb_show = tvdb_show.result()
fanarts = list([banner for banner in tvdb_show["banners"] if banner["bannertype"] == "fanart"])
random.shuffle(fanarts)
seasons = {}
for node_episode in soup.findAll("a", "epinfo"):
season, episode = get_episode_data_from_name(node_episode.text)
seasons.setdefault(season, {})[episode] = True
for i, season in enumerate(reversed(sorted(seasons.keys()))):
item = tvdb_id and tvdb.get_season_list_item(tvdb_show, season) or {}
item.update({
"label": "Season %d [%d episodes]" % (season, len(seasons[season])),
"path": plugin.url_for("eztv_get_episodes_for_season", show_id=show_id, season=season, tvdb_id=tvdb_id),
})
if fanarts:
item.setdefault("properties", {}).update({
"fanart_image": fanarts[i % len(fanarts)]["bannerpath"],
})
yield item
@plugin.route("/eztv/shows/<show_id>/<season>/episodes")
@library_context
@ensure_fanart
@tracked
def eztv_get_episodes_for_season(show_id, season):
import copy
import random
from bs4 import BeautifulSoup
from itertools import izip
from concurrent import futures
from xbmctorrent.utils import first, terminating, url_get
from xbmctorrent import tvdb
plugin.set_content("episodes")
season = int(season)
tvdb_id = first(plugin.request.args.get("tvdb_id"))
with futures.ThreadPoolExecutor(max_workers=2) as pool:
def _eztv_get_show():
return BeautifulSoup(url_get("%s/shows/%s/" % (BASE_URL, show_id), headers=HEADERS), "html5lib")
soup = pool.submit(_eztv_get_show)
if tvdb_id:
tvdb_show = pool.submit(tvdb.get_all_meta, plugin.request.args["tvdb_id"][0])
soup = soup.result()
items = []
fanarts = []
if tvdb_id:
tvdb_show = tvdb_show.result()
fanarts = list([banner for banner in tvdb_show["banners"] if banner["bannertype"] == "fanart"])
random.shuffle(fanarts)
items = list(tvdb.build_episode_list_items(tvdb_show, int(season)))
text_nodes = soup.findAll("a", "epinfo")
href_nodes = soup.findAll("a", "magnet")
season_nodes = izip(text_nodes, href_nodes)
season_nodes = filter(lambda x: get_episode_data_from_name(x[0].text)[0] == season, season_nodes)
for i, (node_text, node_magnet) in enumerate(season_nodes):
season, episode = get_episode_data_from_name(node_text.text)
if tvdb_id and episode >= 0:
item = copy.deepcopy(items[int(episode) - 1])
for pattern, suffix in (("720p", "(HD)"), ("1080p", "(FullHD)"), ("repack", "(REPACK)"), ("proper", "(PROPER)")):
if pattern in node_text.text.lower():
item["label"] = "%s %s" % (item["label"], suffix)
else:
item = {
"label": node_text.text,
}
item.setdefault("info", {}).update({
"tvshowtitle": node_text.text,
"title": item["label"],
})
stream_info = {}
if "x264" in node_text.text:
stream_info["codec"] = item["info"]["video_codec"] = "h264"
if "xvid" in node_text.text.lower():
stream_info["codec"] = item["info"]["video_codec"] = "xvid"
if "720p" in node_text.text:
stream_info["width"] = 1280
stream_info["height"] = 720
if "1080p" in node_text.text:
stream_info["width"] = 1920
stream_info["height"] = 1080
item.update({
"path": plugin.url_for("play", uri=node_magnet["href"]),
"stream_info": {"video": stream_info},
"is_playable": True,
})
if fanarts:
item.setdefault("properties", {}).update({
"fanart_image": fanarts[i % len(fanarts)]["bannerpath"],
})
yield item
|
skipmodea1/plugin.video.xbmctorrent
|
resources/site-packages/xbmctorrent/scrapers/eztv.py
|
Python
|
gpl-3.0
| 11,348
|
class Matrix:
def __init__(self,y_size_or_matrix,x_size=None,inital_value=None):
import pandas
import random
self.pandas = pandas
self.random = random
if x_size != None and inital_value != None:
self.y_size = y_size_or_matrix
self.x_size = x_size
self.data = []
for i in range(self.y_size):
self.data.append([])
for j in range(self.x_size):
self.data[i].append(inital_value)
elif x_size != None:
self.y_size = y_size_or_matrix
self.x_size = x_size
self.data = []
for i in range(self.y_size):
self.data.append([])
for j in range(self.x_size):
self.data[i].append(0)
else:
if type(y_size_or_matrix) == list:
self.x_size = 1
self.y_size = len(y_size_or_matrix)
self.data = []
for i in range(len(y_size_or_matrix)):
self.data.append([y_size_or_matrix[i]])
else:
self.x_size = y_size_or_matrix.x_size
self.y_size = y_size_or_matrix.y_size
self.data = []
for i in range(self.y_size):
self.data.append([])
for j in range(self.x_size):
self.data[i].append(y_size_or_matrix.data[i][j])
def toArray(self):
output_array = []
for i in range(self.y_size):
for j in range(self.x_size):
output_array.append(self.data[i][j])
return output_array
def clone(self,matrixvar):
self.x_size = (matrixvar.x_size)
self.y_size = (matrixvar.y_size)
self.data = []
for i in range(y_size):
self.data.append([])
for j in range(x_size):
self.data.append(matrixvar.data[i][j])
def get_val(self,y_pos,x_pos):
return self.data[x_pos][y_pos]
def randomise(self):
for i in range(self.y_size):
for j in range(self.x_size):
self.data[i][j] = (self.random.random()*2) - 1
def set_val(self,var):
for i in range(self.y_size):
for j in range(self.x_size):
self.data[i][j] = var
def printdata(self):
print()
print(self.pandas.DataFrame(self.data))
print()
def add(self,var):
for i in range(self.y_size):
for j in range(self.x_size):
self.data[i][j] += var
def sub(self,var):
for i in range(self.y_size):
for j in range(self.x_size):
self.data[i][j] -= var
def mult(self,var):
for i in range(self.y_size):
for j in range(self.x_size):
self.data[i][j] *= var
def div(self,var):
for i in range(self.y_size):
for j in range(self.x_size):
self.data[i][j] /= var
def div_safe(self,var):
if var == 0:
set_value(0)
else:
for i in range(self.y_size):
for j in range(self.x_size):
self.data[i][j] += var
def round(self):
for i in range(self.y_size):
for j in range(self.x_size):
self.data[i][j] = round(self.data[i][j])
def map_values(self,old_min,old_max,new_min,new_max):
new_scale = (new_max - new_min) / (old_max - old_min)
for i in range(self.y_size):
for j in range(self.x_size):
self.data[i][j] = new_min + ((self.data[i][j] - old_min) * new_scale)
def add_matrix(self,matrixvar):
output = True
tempdata = self.data
if self.y_size == matrixvar.y_size and self.x_size == matrixvar.x_size:
for i in range(self.y_size):
if output:
for j in range(self.x_size):
if output:
try:
tempdata[i][j] += matrixvar.data[i][j]
except:
output = False
else:
output = False
if output:
self.data = tempdata
return output
def sub_matrix(self,matrixvar):
output = True
tempdata = self.data
if self.y_size == matrixvar.y_size and self.x_size == matrixvar.x_size:
for i in range(self.y_size):
if output:
for j in range(self.x_size):
if output:
try:
tempdata[i][j] -= matrixvar.data[i][j]
except:
output = False
else:
output = False
if output:
self.data = tempdata
return output
def mult_matrix(self,matrixvar):
output = True
tempdata = self.data
if self.y_size == matrixvar.y_size and self.x_size == matrixvar.x_size:
for i in range(self.y_size):
if output:
for j in range(self.x_size):
if output:
try:
tempdata[i][j] *= matrixvar.data[i][j]
except:
output = False
else:
output = False
if output:
self.data = tempdata
return output
def transpose(self):
tempdata = []
y_size = self.x_size
x_size = self.y_size
for i in range(y_size):
tempdata.append([])
for j in range(x_size):
tempdata[i].append(self.data[j][i])
self.data = tempdata
self.y_size = y_size
self.x_size = x_size
def apply_funct(self,funct,arguments=None):
output = True
tempdata = self.data
if arguments != None:
for i in range(self.y_size):
if output:
for j in range(self.x_size):
if output:
try:
tempdata[i][j] = funct(tempdata[i][j],arguments)
except:
output = False
else:
for i in range(self.y_size):
if output:
for j in range(self.x_size):
if output:
try:
tempdata[i][j] = funct(tempdata[i][j])
except:
output = False
if output:
self.data = tempdata
return output
def apply_funct_matrix(self,funct,matrixvar,arguments=None):
output = True
tempdata = self.data
if self.y_size == matrixvar.y_size and self.x_size == matrixvar.x_size:
if arguments != None:
for i in range(self.y_size):
if output:
for j in range(self.x_size):
if output:
try:
tempdata[i][j] = funct(tempdata[i][j],matrixvar.data[i][j],arguments)
except:
output = False
else:
for i in range(self.y_size):
if output:
for j in range(self.x_size):
if output:
try:
tempdata[i][j] = funct(tempdata[i][j],matrixvar.data[i][j])
except:
output = False
else:
output = False
if output:
self.data = tempdata
return output
def mult_matrix(matrix1,matrix2):
if matrix1.x_size == matrix2.y_size:
output = Matrix(matrix1.y_size,matrix2.x_size,0)
for i in range(output.y_size):
for j in range(output.x_size):
value_sum = 0
for k in range(matrix1.x_size):
value_sum += matrix1.data[i][k] * matrix2.data[k][j]
output.data[i][j] = value_sum
return output
else:
return None
|
TNT-Samuel/Coding-Projects
|
Neural Networks/MatrixMath.py
|
Python
|
gpl-3.0
| 8,601
|
from builtins import range
import numpy as np
def affine_forward(x, w, b):
"""
Computes the forward pass for an affine (fully-connected) layer.
The input x has shape (N, d_1, ..., d_k) and contains a minibatch of N
examples, where each example x[i] has shape (d_1, ..., d_k). We will
reshape each input into a vector of dimension D = d_1 * ... * d_k, and
then transform it to an output vector of dimension M.
Inputs:
- x: A numpy array containing input data, of shape (N, d_1, ..., d_k)
- w: A numpy array of weights, of shape (D, M)
- b: A numpy array of biases, of shape (M,)
Returns a tuple of:
- out: output, of shape (N, M)
- cache: (x, w, b)
"""
out = None
N = np.shape(x)[0]
###########################################################################
# TODO: Implement the affine forward pass. Store the result in out. You #
# will need to reshape the input into rows. #
###########################################################################
out = np.dot(np.reshape(x, (N, -1)), w) + b
###########################################################################
# END OF YOUR CODE #
###########################################################################
cache = (x, w, b)
return out, cache
def affine_backward(dout, cache):
"""
Computes the backward pass for an affine layer.
Inputs:
- dout: Upstream derivative, of shape (N, M)
- cache: Tuple of:
- x: Input data, of shape (N, d_1, ... d_k)
- w: Weights, of shape (D, M)
Returns a tuple of:
- dx: Gradient with respect to x, of shape (N, d1, ..., d_k)
- dw: Gradient with respect to w, of shape (D, M)
- db: Gradient with respect to b, of shape (M,)
"""
x, w, b = cache
N = np.shape(x)[0]
dx, dw, db = None, None, None
###########################################################################
# TODO: Implement the affine backward pass. #
###########################################################################
dw = np.dot(np.reshape(x, (N, -1)).T, dout)
db = np.sum(dout, axis=0)
dx = np.reshape(np.dot(dout, w.T), x.shape)
###########################################################################
# END OF YOUR CODE #
###########################################################################
return dx, dw, db
def relu_forward(x):
"""
Computes the forward pass for a layer of rectified linear units (ReLUs).
Input:
- x: Inputs, of any shape
Returns a tuple of:
- out: Output, of the same shape as x
- cache: x
"""
out = None
###########################################################################
# TODO: Implement the ReLU forward pass. #
###########################################################################
out = np.maximum(0, x)
###########################################################################
# END OF YOUR CODE #
###########################################################################
cache = x
return out, cache
def relu_backward(dout, cache):
"""
Computes the backward pass for a layer of rectified linear units (ReLUs).
Input:
- dout: Upstream derivatives, of any shape
- cache: Input x, of same shape as dout
Returns:
- dx: Gradient with respect to x
"""
dx, x = None, cache
###########################################################################
# TODO: Implement the ReLU backward pass. #
###########################################################################
dx = dout * (x > 0)
###########################################################################
# END OF YOUR CODE #
###########################################################################
return dx
def batchnorm_forward(x, gamma, beta, bn_param):
"""
Forward pass for batch normalization.
During training the sample mean and (uncorrected) sample variance are
computed from minibatch statistics and used to normalize the incoming data.
During training we also keep an exponentially decaying running mean of the
mean and variance of each feature, and these averages are used to normalize
data at test-time.
At each timestep we update the running averages for mean and variance using
an exponential decay based on the momentum parameter:
running_mean = momentum * running_mean + (1 - momentum) * sample_mean
running_var = momentum * running_var + (1 - momentum) * sample_var
Note that the batch normalization paper suggests a different test-time
behavior: they compute sample mean and variance for each feature using a
large number of training images rather than using a running average. For
this implementation we have chosen to use running averages instead since
they do not require an additional estimation step; the torch7
implementation of batch normalization also uses running averages.
Input:
- x: Data of shape (N, D)
- gamma: Scale parameter of shape (D,)
- beta: Shift paremeter of shape (D,)
- bn_param: Dictionary with the following keys:
- mode: 'train' or 'test'; required
- eps: Constant for numeric stability
- momentum: Constant for running mean / variance.
- running_mean: Array of shape (D,) giving running mean of features
- running_var Array of shape (D,) giving running variance of features
Returns a tuple of:
- out: of shape (N, D)
- cache: A tuple of values needed in the backward pass
"""
mode = bn_param['mode']
eps = bn_param.get('eps', 1e-5)
momentum = bn_param.get('momentum', 0.9)
N, D = x.shape
running_mean = bn_param.get('running_mean', np.zeros(D, dtype=x.dtype))
running_var = bn_param.get('running_var', np.zeros(D, dtype=x.dtype))
out, cache = None, {}
if mode == 'train':
#######################################################################
# TODO: Implement the training-time forward pass for batch norm. #
# Use minibatch statistics to compute the mean and variance, use #
# these statistics to normalize the incoming data, and scale and #
# shift the normalized data using gamma and beta. #
# #
# You should store the output in the variable out. Any intermediates #
# that you need for the backward pass should be stored in the cache #
# variable. #
# #
# You should also use your computed sample mean and variance together #
# with the momentum variable to update the running mean and running #
# variance, storing your result in the running_mean and running_var #
# variables. #
#######################################################################
sample_mean = np.mean(x, axis=0, keepdims=True)
sample_var = np.mean(np.square(x - sample_mean), axis=0, keepdims=True)
running_mean = momentum * running_mean + (1 - momentum) * sample_mean
running_var = momentum * running_var + (1 - momentum) * sample_var
_x = (x - sample_mean) / np.sqrt(sample_var + eps)
out = _x * gamma + beta
cache["_x"] = _x
cache["x"] = x
cache["gamma"] = gamma
cache["beta"] = beta
cache["sample_mean"] = sample_mean
cache["sample_var"] = sample_var
cache["eps"] = eps
#######################################################################
# END OF YOUR CODE #
#######################################################################
elif mode == 'test':
#######################################################################
# TODO: Implement the test-time forward pass for batch normalization. #
# Use the running mean and variance to normalize the incoming data, #
# then scale and shift the normalized data using gamma and beta. #
# Store the result in the out variable. #
#######################################################################
_x = (x - running_mean) / np.sqrt(running_var + eps)
out = _x * gamma + beta
#######################################################################
# END OF YOUR CODE #
#######################################################################
else:
raise ValueError('Invalid forward batchnorm mode "%s"' % mode)
# Store the updated running means back into bn_param
bn_param['running_mean'] = running_mean
bn_param['running_var'] = running_var
return out, cache
def batchnorm_backward(dout, cache):
"""
Backward pass for batch normalization.
For this implementation, you should write out a computation graph for
batch normalization on paper and propagate gradients backward through
intermediate nodes.
Inputs:
- dout: Upstream derivatives, of shape (N, D)
- cache: Variable of intermediates from batchnorm_forward.
Returns a tuple of:
- dx: Gradient with respect to inputs x, of shape (N, D)
- dgamma: Gradient with respect to scale parameter gamma, of shape (D,)
- dbeta: Gradient with respect to shift parameter beta, of shape (D,)
"""
dx, dgamma, dbeta = None, None, None
###########################################################################
# TODO: Implement the backward pass for batch normalization. Store the #
# results in the dx, dgamma, and dbeta variables. #
###########################################################################
var, mean, gamma, beta, x, _x, eps = cache["sample_var"], cache["sample_mean"], \
cache["gamma"], cache["beta"], cache["x"], cache["_x"], cache["eps"]
N, D = x.shape
dgamma = np.sum(dout * _x, axis=0)
dbeta = np.sum(dout, axis=0)
d_x = dout * gamma
dvar = np.sum(d_x * (x - mean) * (-1/2) * ((var + eps)**(-3/2)), axis=0)
dmean = np.sum(d_x * (-1/np.sqrt(var+eps)) + dvar * (-2/N) * (x - mean), axis=0)
dx = dmean * (1/N) * np.ones_like(x) + dvar * (2/N) * (x - mean) + d_x * (1/np.sqrt(var+eps))
###########################################################################
# END OF YOUR CODE #
###########################################################################
return dx, dgamma, dbeta
def batchnorm_backward_alt(dout, cache):
"""
Alternative backward pass for batch normalization.
For this implementation you should work out the derivatives for the batch
normalizaton backward pass on paper and simplify as much as possible. You
should be able to derive a simple expression for the backward pass.
Note: This implementation should expect to receive the same cache variable
as batchnorm_backward, but might not use all of the values in the cache.
Inputs / outputs: Same as batchnorm_backward
"""
dx, dgamma, dbeta = None, None, None
###########################################################################
# TODO: Implement the backward pass for batch normalization. Store the #
# results in the dx, dgamma, and dbeta variables. #
# #
# After computing the gradient with respect to the centered inputs, you #
# should be able to compute gradients with respect to the inputs in a #
# single statement; our implementation fits on a single 80-character line.#
###########################################################################
var, mean, gamma, beta, x, _x, eps = cache["sample_var"], cache["sample_mean"], \
cache["gamma"], cache["beta"], cache["x"], cache["_x"], cache["eps"]
N, D = x.shape
dgamma = np.sum(dout * _x, axis=0)
dbeta = np.sum(dout, axis=0)
dx = (dout * gamma- np.sum(dout * gamma, axis=0)) / np.sqrt(var+eps)
###########################################################################
# END OF YOUR CODE #
###########################################################################
return dx, dgamma, dbeta
def dropout_forward(x, dropout_param):
"""
Performs the forward pass for (inverted) dropout.
Inputs:
- x: Input data, of any shape
- dropout_param: A dictionary with the following keys:
- p: Dropout parameter. We drop each neuron output with probability p.
- mode: 'test' or 'train'. If the mode is train, then perform dropout;
if the mode is test, then just return the input.
- seed: Seed for the random number generator. Passing seed makes this
function deterministic, which is needed for gradient checking but not
in real networks.
Outputs:
- out: Array of the same shape as x.
- cache: tuple (dropout_param, mask). In training mode, mask is the dropout
mask that was used to multiply the input; in test mode, mask is None.
"""
p, mode = dropout_param['p'], dropout_param['mode']
if 'seed' in dropout_param:
np.random.seed(dropout_param['seed'])
mask = None
out = None
if mode == 'train':
#######################################################################
# TODO: Implement training phase forward pass for inverted dropout. #
# Store the dropout mask in the mask variable. #
#######################################################################
mask = np.random.uniform(size=x.shape) > p
out = x * mask / (1-p)
#######################################################################
# END OF YOUR CODE #
#######################################################################
elif mode == 'test':
#######################################################################
# TODO: Implement the test phase forward pass for inverted dropout. #
#######################################################################
out = x
#######################################################################
# END OF YOUR CODE #
#######################################################################
cache = (dropout_param, mask)
out = out.astype(x.dtype, copy=False)
return out, cache
def dropout_backward(dout, cache):
"""
Perform the backward pass for (inverted) dropout.
Inputs:
- dout: Upstream derivatives, of any shape
- cache: (dropout_param, mask) from dropout_forward.
"""
dropout_param, mask = cache
mode = dropout_param['mode']
dx = None
if mode == 'train':
#######################################################################
# TODO: Implement training phase backward pass for inverted dropout #
#######################################################################
dx = dout * mask / (1-dropout_param['p'])
#######################################################################
# END OF YOUR CODE #
#######################################################################
elif mode == 'test':
dx = dout
return dx
def conv_forward_naive(x, w, b, conv_param):
"""
A naive implementation of the forward pass for a convolutional layer.
The input consists of N data points, each with C channels, height H and
width W. We convolve each input with F different filters, where each filter
spans all C channels and has height HH and width HH.
Input:
- x: Input data of shape (N, C, H, W)
- w: Filter weights of shape (F, C, HH, WW)
- b: Biases, of shape (F,)
- conv_param: A dictionary with the following keys:
- 'stride': The number of pixels between adjacent receptive fields in the
horizontal and vertical directions.
- 'pad': The number of pixels that will be used to zero-pad the input.
Returns a tuple of:
- out: Output data, of shape (N, F, H', W') where H' and W' are given by
H' = 1 + (H + 2 * pad - HH) / stride
W' = 1 + (W + 2 * pad - WW) / stride
- cache: (x, w, b, conv_param)
"""
out = None
###########################################################################
# TODO: Implement the convolutional forward pass. #
# Hint: you can use the function np.pad for padding. #
###########################################################################
N, C, H, W = x.shape
F, C, HH, WW = w.shape
stride, pad = conv_param["stride"], conv_param["pad"]
_H = 1 + (H + 2 * pad - HH) // stride
_W = 1 + (W + 2 * pad - WW) // stride
out = np.zeros((N, F, _H, _W))
_x = np.pad(x, ((0, 0), (0, 0), (pad, pad), (pad, pad)), "constant")
def conv(f, i, j):
return np.sum(_x[:, :, i:i+HH, j:j+WW] * w[f], axis=(1, 2, 3)) + b[f]
for f in range(F):
for i in range(_H):
for j in range(_W):
out[:, f, i, j] = conv(f, i*stride, j*stride)
###########################################################################
# END OF YOUR CODE #
###########################################################################
cache = (x, w, b, conv_param)
return out, cache
def conv_backward_naive(dout, cache):
"""
A naive implementation of the backward pass for a convolutional layer.
Inputs:
- dout: Upstream derivatives.
- cache: A tuple of (x, w, b, conv_param) as in conv_forward_naive
Returns a tuple of:
- dx: Gradient with respect to x
- dw: Gradient with respect to w
- db: Gradient with respect to b
"""
dx, dw, db = None, None, None
###########################################################################
# TODO: Implement the convolutional backward pass. #
###########################################################################
x, w, b, conv_param = cache
N, C, H, W = x.shape
F, C, HH, WW = w.shape
N, F, _H, _W = dout.shape
stride, pad = conv_param["stride"], conv_param["pad"]
_x = np.pad(x, ((0, 0), (0, 0), (pad, pad), (pad, pad)), "constant")
dw = np.zeros_like(w)
db = np.zeros_like(b)
dx = np.zeros_like(_x)
for f in range(F):
for i in range(_H):
for j in range(_W):
_i = i*stride
_j = j*stride
dw[f] += np.sum(_x[:, :, _i:_i+HH, _j:_j+WW]*np.reshape(dout[:, f, i, j], (-1, 1, 1, 1)), axis=0)
db[f] += np.sum(dout[:, f, i, j], axis=0)
dx[:, :, _i:_i+HH, _j:_j+WW] += np.reshape(w[f], (1, C, HH, WW))*np.reshape(dout[:, f, i, j], (-1, 1, 1, 1))
dx = dx[:, :, pad:-pad, pad:-pad]
###########################################################################
# END OF YOUR CODE #
###########################################################################
return dx, dw, db
def max_pool_forward_naive(x, pool_param):
"""
A naive implementation of the forward pass for a max pooling layer.
Inputs:
- x: Input data, of shape (N, C, H, W)
- pool_param: dictionary with the following keys:
- 'pool_height': The height of each pooling region
- 'pool_width': The width of each pooling region
- 'stride': The distance between adjacent pooling regions
Returns a tuple of:
- out: Output data
- cache: (x, pool_param)
"""
out = None
###########################################################################
# TODO: Implement the max pooling forward pass #
###########################################################################
pool_height = pool_param["pool_height"]
pool_width = pool_param["pool_width"]
stride = pool_param["stride"]
N, C, H, W = x.shape
out = np.zeros((N, C, H//pool_height, W//pool_width))
for n in range(N):
for c in range(C):
for i in range(H//pool_height):
for j in range(W//pool_width):
out[n, c, i, j] = np.amax(x[n, c, i*stride:i*stride+pool_height, j*stride:j*stride+pool_width])
###########################################################################
# END OF YOUR CODE #
###########################################################################
cache = (x, pool_param)
return out, cache
def max_pool_backward_naive(dout, cache):
"""
A naive implementation of the backward pass for a max pooling layer.
Inputs:
- dout: Upstream derivatives
- cache: A tuple of (x, pool_param) as in the forward pass.
Returns:
- dx: Gradient with respect to x
"""
dx = None
###########################################################################
# TODO: Implement the max pooling backward pass #
###########################################################################
x, pool_param = cache
pool_height = pool_param["pool_height"]
pool_width = pool_param["pool_width"]
stride = pool_param["stride"]
N, C, H, W = x.shape
dx = np.zeros_like(x)
for n in range(N):
for c in range(C):
for i in range(H//pool_height):
for j in range(W//pool_width):
_x = x[n, c, i*stride:i*stride+pool_height, j*stride:j*stride+pool_width]
dx[n, c, i*stride:i*stride+pool_height, j*stride:j*stride+pool_width] += \
(np.amax(_x)==_x) * dout[n, c, i, j]
###########################################################################
# END OF YOUR CODE #
###########################################################################
return dx
def spatial_batchnorm_forward(x, gamma, beta, bn_param):
"""
Computes the forward pass for spatial batch normalization.
Inputs:
- x: Input data of shape (N, C, H, W)
- gamma: Scale parameter, of shape (C,)
- beta: Shift parameter, of shape (C,)
- bn_param: Dictionary with the following keys:
- mode: 'train' or 'test'; required
- eps: Constant for numeric stability
- momentum: Constant for running mean / variance. momentum=0 means that
old information is discarded completely at every time step, while
momentum=1 means that new information is never incorporated. The
default of momentum=0.9 should work well in most situations.
- running_mean: Array of shape (D,) giving running mean of features
- running_var Array of shape (D,) giving running variance of features
Returns a tuple of:
- out: Output data, of shape (N, C, H, W)
- cache: Values needed for the backward pass
"""
out, cache = None, None
###########################################################################
# TODO: Implement the forward pass for spatial batch normalization. #
# #
# HINT: You can implement spatial batch normalization using the vanilla #
# version of batch normalization defined above. Your implementation should#
# be very short; ours is less than five lines. #
###########################################################################
N, C, H, W = x.shape
_x = np.swapaxes(x, 1, 3)
_x = np.reshape(_x, (-1, C))
out, cache = batchnorm_forward(_x, gamma, beta, bn_param)
out = np.reshape(out, (N, W, H, C))
out = np.swapaxes(out, 1, 3)
###########################################################################
# END OF YOUR CODE #
###########################################################################
return out, cache
def spatial_batchnorm_backward(dout, cache):
"""
Computes the backward pass for spatial batch normalization.
Inputs:
- dout: Upstream derivatives, of shape (N, C, H, W)
- cache: Values from the forward pass
Returns a tuple of:
- dx: Gradient with respect to inputs, of shape (N, C, H, W)
- dgamma: Gradient with respect to scale parameter, of shape (C,)
- dbeta: Gradient with respect to shift parameter, of shape (C,)
"""
dx, dgamma, dbeta = None, None, None
###########################################################################
# TODO: Implement the backward pass for spatial batch normalization. #
# #
# HINT: You can implement spatial batch normalization using the vanilla #
# version of batch normalization defined above. Your implementation should#
# be very short; ours is less than five lines. #
###########################################################################
N, C, H, W = dout.shape
_dout = np.swapaxes(dout, 1, 3)
_dout = np.reshape(_dout, (-1, C))
_dx, dgamma, dbeta = batchnorm_backward(_dout, cache)
dx = np.reshape(_dx, (N, W, H, C))
dx = np.swapaxes(dx, 1, 3)
###########################################################################
# END OF YOUR CODE #
###########################################################################
return dx, dgamma, dbeta
def svm_loss(x, y):
"""
Computes the loss and gradient using for multiclass SVM classification.
Inputs:
- x: Input data, of shape (N, C) where x[i, j] is the score for the jth
class for the ith input.
- y: Vector of labels, of shape (N,) where y[i] is the label for x[i] and
0 <= y[i] < C
Returns a tuple of:
- loss: Scalar giving the loss
- dx: Gradient of the loss with respect to x
"""
N = x.shape[0]
correct_class_scores = x[np.arange(N), y]
margins = np.maximum(0, x - correct_class_scores[:, np.newaxis] + 1.0)
margins[np.arange(N), y] = 0
loss = np.sum(margins) / N
num_pos = np.sum(margins > 0, axis=1)
dx = np.zeros_like(x)
dx[margins > 0] = 1
dx[np.arange(N), y] -= num_pos
dx /= N
return loss, dx
def softmax_loss(x, y):
"""
Computes the loss and gradient for softmax classification.
Inputs:
- x: Input data, of shape (N, C) where x[i, j] is the score for the jth
class for the ith input.
- y: Vector of labels, of shape (N,) where y[i] is the label for x[i] and
0 <= y[i] < C
Returns a tuple of:
- loss: Scalar giving the loss
- dx: Gradient of the loss with respect to x
"""
shifted_logits = x - np.max(x, axis=1, keepdims=True)
Z = np.sum(np.exp(shifted_logits), axis=1, keepdims=True)
log_probs = shifted_logits - np.log(Z)
probs = np.exp(log_probs)
N = x.shape[0]
loss = -np.sum(log_probs[np.arange(N), y]) / N
dx = probs.copy()
dx[np.arange(N), y] -= 1
dx /= N
return loss, dx
|
ThyrixYang/LearningNotes
|
MOOC/stanford_cnn_cs231n/assignment2/cs231n/layers.py
|
Python
|
gpl-3.0
| 28,227
|
"""
Created on Feb 15, 2014
@author: alex
"""
from sqlalchemy import Column
from sqlalchemy.types import Float
from sqlalchemy.types import Integer
from sqlalchemy.types import SmallInteger
from sqlalchemy.types import Unicode
from sqlalchemy.orm import relationship
from farmgui.models import Base
from farmgui.models import InterpolationKnot
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
from scipy import interpolate
class SetpointInterpolation(Base):
"""
classdocs
"""
__tablename__ = 'SetpointInterpolations'
_id = Column(SmallInteger, primary_key=True, autoincrement=True, nullable=False, unique=True)
name = Column(Unicode(250))
order = Column(SmallInteger, nullable=False)
start_value = Column(Float(), nullable=False)
end_time = Column(Integer(), nullable=False)
end_value = Column(Float(), nullable=True)
description = Column(Unicode(250), nullable=True)
knots = relationship('InterpolationKnot', backref='interpolation')
f = None
def __init__(self, name, order, start_value, end_time, end_value, description):
self.name = name
self.order = order
self.start_value = start_value
self.end_time = end_time
self.end_value = end_value
self.description = description
@property
def id(self):
return self._id
def plot(self, y_axis_name, filename):
fig = plt.figure(figsize=(5, 3))
ax = fig.add_axes([0.15, 0.15, 0.8, 0.8])
ax.set_xlabel('Time')
ax.set_ylabel(y_axis_name, rotation='horizontal')
ax.xaxis.grid(color='gray', linestyle='dashed')
ax.yaxis.grid(color='gray', linestyle='dashed')
x = []
y = []
x.append(0)
y.append(self.start_value)
for knot in self.knots:
x.append(knot.time*self.end_time)
y.append(knot.value)
x.append(self.end_time)
y.append(self.end_value)
while len(x) <= self.order:
# not enough knots
x.append(self.end_time * len(x)/5.0)
y.append(self.end_value)
x_inter = np.linspace(0, self.end_time, 100)
if self.order < 4:
f = interpolate.interp1d(x, y, kind=self.order)
y_inter = f(x_inter)
else:
f = interpolate.splrep(x, y)
y_inter = interpolate.splev(x_inter, f)
ax.set_xlim(0, self.end_time)
ax.set_ylim(y_inter.min()-1, y_inter.max()+1)
ax.plot(x, y, 'o', x_inter, y_inter, '-')
fig.savefig(filename)
def calculate_interpolation(self):
x = []
y = []
x.append(0)
y.append(self.start_value)
for knot in self.knots:
x.append(knot.time)
y.append(knot.value)
x.append(self.end_time)
y.append(self.end_value)
if self.order < 4:
self.f = interpolate.interp1d(x, y, kind=self.order)
else:
self.f = interpolate.splrep(x, y)
def get_value_at(self, interpolation_time):
if self.f is None:
self.calculate_interpolation()
if self.order < 4:
y = self.f([interpolation_time])[0]
else:
y = interpolate.splev([interpolation_time], self.f)[0]
return round(y.item(), 2)
def init_setpoint_interpolations(db_session):
h = 3600
m = 60
new_inter = SetpointInterpolation('Temperature Interpolation (long day)', 1, 20, 86400, 20, '...')
new_inter.knots.append(InterpolationKnot(new_inter, 6*h, 20))
new_inter.knots.append(InterpolationKnot(new_inter, 8*h, 25))
new_inter.knots.append(InterpolationKnot(new_inter, 22*h, 25))
db_session.add(new_inter)
new_inter = SetpointInterpolation('Humidity Interpolation (long day)', 1, 70, 86400, 70, '...')
new_inter.knots.append(InterpolationKnot(new_inter, 6*h, 70))
new_inter.knots.append(InterpolationKnot(new_inter, 8*h, 50))
new_inter.knots.append(InterpolationKnot(new_inter, 22*h, 50))
db_session.add(new_inter)
new_inter = SetpointInterpolation('Red Light Interpolation (long day)', 1, 0, 86400, 0, '...')
new_inter.knots.append(InterpolationKnot(new_inter, 3*h, 0))
new_inter.knots.append(InterpolationKnot(new_inter, 3*h+30*m, 100))
new_inter.knots.append(InterpolationKnot(new_inter, 20*h+30*m, 100))
new_inter.knots.append(InterpolationKnot(new_inter, 21*h, 0))
db_session.add(new_inter)
new_inter = SetpointInterpolation('Test Interpolation', 1, 0, 86400, 0, '...')
new_inter.knots.append(InterpolationKnot(new_inter, 4*h, 100))
new_inter.knots.append(InterpolationKnot(new_inter, 8*h, 0))
new_inter.knots.append(InterpolationKnot(new_inter, 12*h, 100))
new_inter.knots.append(InterpolationKnot(new_inter, 16*h, 0))
new_inter.knots.append(InterpolationKnot(new_inter, 20*h, 100))
db_session.add(new_inter)
|
AlexanderLang/OpenAutomatedFarm
|
FarmGUI/farmgui/models/SetpointInterpolation.py
|
Python
|
gpl-3.0
| 4,921
|
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
from __future__ import division, with_statement
import unicodedata
from django.db import models
from ox.django import fields
import ox
from item import utils
import item.models
import managers
import tasks
def get_name_sort(name, sortname=None):
name = unicodedata.normalize('NFKD', name).strip()
if name:
person, created = Person.objects.get_or_create(name=name)
if created:
if sortname:
person.sortname = sortname
person.save()
sortname = unicodedata.normalize('NFKD', person.sortname)
else:
sortname = u''
return sortname
class Person(models.Model):
name = models.CharField(max_length=200, unique=True)
sortname = models.CharField(max_length=200)
sortsortname = models.CharField(max_length=200)
edited = models.BooleanField(default=False)
numberofnames = models.IntegerField(default=0)
#FIXME: how to deal with aliases
aliases = fields.TupleField(default=[])
imdbId = models.CharField(max_length=7, blank=True)
wikipediaId = models.CharField(max_length=1000, blank=True)
objects = managers.PersonManager()
def __unicode__(self):
return self.name
def save(self, *args, **kwargs):
if not self.sortname:
self.sortname = ox.get_sort_name(self.name)
self.sortname = unicodedata.normalize('NFKD', self.sortname)
self.sortsortname = utils.sort_string(self.sortname)
self.numberofnames = len(self.name.split(' '))
super(Person, self).save(*args, **kwargs)
tasks.update_itemsort.delay(self.id)
def update_itemsort(self):
item.models.Facet.objects.filter(
key__in=item.models.Item.person_keys + ['name'],
value=self.name
).exclude(
sortvalue=self.sortname
).update(
sortvalue=self.sortname
)
for i in item.models.Item.objects.filter(facets__in=item.models.Facet.objects.filter(
key__in=item.models.Item.person_keys + ['name'],
value=self.name)
).distinct():
i.update_sort()
def get_or_create(model, name, imdbId=None):
if imdbId:
q = model.objects.filter(name=name, imdbId=imdbId)
else:
q = model.objects.all().filter(name=name)
if q.count() > 0:
o = q[0]
else:
o = model.objects.create(name=name)
if imdbId:
o.imdbId = imdbId
o.save()
return o
get_or_create = classmethod(get_or_create)
def get_id(self):
return ox.toAZ(self.id)
def json(self, keys=None, user=None):
j = {
'id': self.get_id(),
'name': self.name,
'sortname': self.sortname,
'numberofnames': self.numberofnames,
}
if keys:
for key in j.keys():
if key not in keys:
del j[key]
return j
def update_sort_name():
for p in Person.objects.all():
_sortname = ox.get_sort_name(p.name).lower()
_sortname = unicodedata.normalize('NFKD', _sortname)
if (not p.edited and _sortname != p.sortname) or \
(p.edited and _sortname == p.sortname):
p.sortname = _sortname
p.edited = False
p.save()
|
maysara/pandora_image
|
pandora/person/models.py
|
Python
|
gpl-3.0
| 3,404
|
# -*- coding: utf-8 -*-
import sys
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QApplication
from gui.implement import MainWindow
if __name__ == '__main__':
QApplication.setAttribute(Qt.AA_EnableHighDpiScaling, True)
app = QApplication(sys.argv)
master = MainWindow()
master.show()
sys.exit(app.exec_())
|
tucuprum/jjal_downloader
|
main.py
|
Python
|
gpl-3.0
| 348
|
from contrib.rfc2460 import no_next_header_after_extension_header as suite
from scapy.all import *
from veripy.testability import ComplianceTestTestCase
class NoNextHeaderAfterExtensionHeaderEndNodeTestCase(ComplianceTestTestCase):
def test_no_next_header_after_extension_header(self):
o = self.get_outcome(suite.NoNextHeaderAfterExtensionHeaderEndNodeTestCase)
self.assertCheckPasses(o)
def test_no_next_header_after_extension_header_reply(self):
self.ifx.replies_with(IPv6(src=str(self.ifx.global_ip()), dst=str(self.tn1.global_ip()))/ICMPv6EchoReply())
o = self.get_outcome(suite.NoNextHeaderAfterExtensionHeaderEndNodeTestCase)
self.assertCheckFails(o)
class NoNextHeaderAfterExtensionHeaderIntermediateNodeTestCase(ComplianceTestTestCase):
def test_packet_forwarded_intact(self):
self.ify.replies_with(IPv6(src=str(self.tn4.global_ip()), dst=str(self.tn1.global_ip()))/IPv6ExtHdrDestOpt(nh=59, len=0, options=[PadN(otype='PadN', optlen=4)])/ICMPv6EchoRequest(), to=self.ifx)
o = self.get_outcome(suite.NoNextHeaderAfterExtensionHeaderIntermediateNodeTestCase)
self.assertCheckPasses(o)
def test_packet_not_forwarded_on_link_b(self):
o = self.get_outcome(suite.NoNextHeaderAfterExtensionHeaderIntermediateNodeTestCase)
self.assertCheckFails(o)
def test_packet_forwarded_but_octects_after_header_changed(self):
self.ify.replies_with(IPv6(src=str(self.tn4.global_ip()), dst=str(self.tn1.global_ip()))/IPv6ExtHdrDestOpt(nh=59, len=0, options=[PadN(otype='PadN', optlen=4)])/ICMPv6EchoReply(data='IPv6'), to=self.ifx)
o = self.get_outcome(suite.NoNextHeaderAfterExtensionHeaderIntermediateNodeTestCase)
self.assertCheckFails(o)
|
mwrlabs/veripy
|
contrib/rfc2460/tests/no_next_header_after_extension_header_test_case.py
|
Python
|
gpl-3.0
| 1,789
|
from mumax2 import *
# Standard Problem 4
Nx = 128
Ny = 32
Nz = 1
setgridsize(Nx, Ny, Nz)
setcellsize(500e-9/Nx, 125e-9/Ny, 3e-9/Nz)
load('micromagnetism')
load('anisotropy/uniaxial')
load('micromag/energy')
setv('Msat', 800e3)
setv('Aex', 1.3e-11)
setv('alpha', 1)
setv('dt', 1e-15)
setv('m_maxerror', 1./100)
setv('Ku', 500)
setv('anisU', [0, 1, 0])
m=[ [[[1]]], [[[1]]], [[[0]]] ]
setarray('m', m)
savegraph("graph.png")
#run(2e-9) #relax
run_until_smaller('maxtorque', 1e-2 * gets('gamma') * 800e3)
setv('alpha', 0.02)
setv('dt', 1e-15)
setv('t', 0)
autosave("m", "omf", ["Text"], 200e-12)
autotabulate(["t", "<m>", "m_error", "m_peakerror", "badsteps", "dt", "maxtorque"], "m.txt", 10e-12)
autotabulate(["t", "E_zeeman"], "Ezeeman.txt", 10e-12)
autotabulate(["t", "E_ex"], "Eex.txt", 10e-12)
autotabulate(["t", "E_demag"], "Edemag.txt", 10e-12)
autotabulate(["t", "E_anis"], "Eanis.txt", 10e-12)
autotabulate(["t", "E"], "E.txt", 10e-12)
Bx = -24.6E-3
By = 4.3E-3
Bz = 0
setv('B_ext', [Bx, By, Bz])
run(1e-9)
printstats()
|
mumax/2
|
tests/energy.py
|
Python
|
gpl-3.0
| 1,049
|
###########################################################
# SPEpy - simplified parquet equation solver for SIAM #
# Copyright (C) 2019 Vladislav Pokorny; pokornyv@fzu.cz #
# homepage: github.com/pokornyv/SPEpy #
# siam_parquet.py - solver for SPE #
# method described in Phys. Rev. B 100, 195114 (2019). #
###########################################################
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import scipy as sp
from scipy.integrate import simps
from scipy.optimize import brentq
from sys import argv,exit,version_info
from os import listdir
from time import ctime,time
from parlib import *
from parlib2 import *
t = time()
hashes = '#'*80
## python version
ver = str(version_info[0])+'.'+str(version_info[1])+'.'+str(version_info[2])
## header for files so we store the parameters along with data
parline = '# U = {0: .5f}, Delta = {1: .5f}, ed = {2: .5f}, h = {3: .5f}, T = {4: .5f}'\
.format(U,Delta,ed,h,T)
parfname = str(GFtype)+'_U'+str(U)+'eps'+str(ed)+'T'+str(T)+'h'+str(h)
## print the header #######################################
if chat:
print(hashes+'\n# generated by '+str(argv[0])+', '+str(ctime()))
print('# python version: '+str(ver)+', SciPy version: '+str(sp.version.version))
print('# energy axis: [{0: .5f} ..{1: .5f}], step = {2: .5f}, length = {3: 3d}'\
.format(En_A[0],En_A[-1],dE,len(En_A)))
print(parline)
print('# Kondo temperature from Bethe ansatz: Tk ~{0: .5f}'\
.format(float(KondoTemperature(U,Delta,ed))))
if SC: print('# using partial self-consistency scheme for the self-energy')
elif FSC: print('# using full self-consistency scheme for the self-energy')
else: print('# using no self-consistency scheme for the self-energy')
if SC and FSC: SC = False
if SCsolver == 'fixed':
print('# using Steffensen fixed-point algorithm to calculate Lambda vertex')
elif SCsolver == 'root':
print('# using MINPACK root to calculate Lambda vertex')
else:
print('# using iteration algorithm to calculate Lambda vertex, mixing parameter alpha = {0: .5f}'\
.format(float(alpha)))
###########################################################
## inicialize the non-interacting Green function ##########
if GFtype == 'lor':
if chat: print('# using Lorentzian non-interacting DoS')
GFlambda = lambda x: GreensFunctionLorenz(x,Delta)
DensityLambda = lambda x: DensityLorentz(x,Delta)
elif GFtype == 'semi':
if chat: print('# using semielliptic non-interacting DoS')
W = Delta ## half-bandwidth
GFlambda = lambda x: GreensFunctionSemi(x,W)
DensityLambda = lambda x: DensitySemi(x,W)
elif GFtype == 'gauss':
if chat: print('# using Gaussian non-interacting DoS')
GFlambda = lambda x: GreensFunctionGauss(x,Delta)
DensityLambda = lambda x: DensityGauss(x,Delta)
else:
print('# Error: DoS type "'+GFtype+'" not implemented.')
exit(1)
## using the Lambda from the older method as a starting point
if not Lin:
if chat: print('# calculating the fully static vertex at half-filling as a starting point:')
GFzero_A = GFlambda(En_A)
Bubble_A = TwoParticleBubble(GFzero_A,GFzero_A,'eh')
Lambda0 = CalculateLambda(Bubble_A,GFzero_A,GFzero_A)
if chat: print('# - Lambda0 = {0: .8f}'.format(Lambda0))
else:
if chat: print('# Initial guess for Lambda: {0: .6f}'.format(LIn))
########################################################
## calculate filling of the thermodynamic Green function
if chat: print('#\n# calculating the initial thermodynamic Green function:')
[nTup,nTdn] = [0.5,0.5]
[nTupOld,nTdnOld] = [1e8,1e8]
k = 1
while any([sp.fabs(nTupOld-nTup) > epsn, sp.fabs(nTdnOld-nTdn) > epsn]):
[nTupOld,nTdnOld] = [nTup,nTdn]
if T == 0.0:
nup_dens = lambda x: DensityLambda(ed+U/2.0*(x+nTdn-1.0)-h) - x
ndn_dens = lambda x: DensityLambda(ed+U/2.0*(nTup+x-1.0)+h) - x
else:
nup_dens = lambda x: Filling(GFlambda(En_A-ed-U/2.0*(x+nTdn-1.0)+h)) - x
ndn_dens = lambda x: Filling(GFlambda(En_A-ed-U/2.0*(nTup+x-1.0)-h)) - x
nTup = brentq(nup_dens,0.0,1.0,xtol = epsn)
nTdn = brentq(ndn_dens,0.0,1.0,xtol = epsn)
if chat: print('# - - {0: 3d}: nUp: {1: .8f}, nDn: {2: .8f}'.format(k,nTup,nTdn))
k += 1
## fill the Green functions
GFTup_A = GFlambda(En_A-ed-U/2.0*(nTup+nTdn-1.0)+h)
GFTdn_A = GFlambda(En_A-ed-U/2.0*(nTup+nTdn-1.0)-h)
## write non-interacting GF to a file, development only
#WriteFileX([GFTup_A,GFTdn_A],WriteMax,WriteStep,parline,'GFTzero.dat')
if chat: print('# - norm[GTup]: {0: .8f}, n[GTup]: {1: .8f}'\
.format(float(IntDOS(GFTup_A)),float(nTup)))
if chat: print('# - norm[GTdn]: {0: .8f}, n[GTdn]: {1: .8f}'\
.format(float(IntDOS(GFTdn_A)),float(nTdn)))
if chat: print('# - nT = {0: .8f}, mT = {1: .8f}'.format(float(nTup+nTdn),float(nTup-nTdn)))
###########################################################
## calculate the Lambda vertex ############################
if chat:
if FSC: print('#\n# calculating the full self-energy using FSC scheme:')
else: print('#\n# calculating the Hartree-Fock self-energy:')
if Lin: ## reading initial values from command line
Lambda = LIn
else: ## using the static guess
Lambda = Lambda0
[nTupOld,nTdnOld] = [1e8,1e8]
[Sigma0,Sigma1] = [U*(nTup+nTdn-1.0)/2.0,Lambda*(nTdn-nTup)/2.0]
k = 1
sumsq = 1e8 if FSC else 0.0 ## converence criterium for FSC scheme
while any([sp.fabs(nTupOld-nTup) > epsn, sp.fabs(nTdnOld-nTdn) > epsn, sumsq > 0.01]):
if chat: print('#\n# Iteration {0: 3d}'.format(k))
[nTupOld,nTdnOld] = [nTup,nTdn]
if FSC:
GFTupOld_A = sp.copy(GFTup_A)
## Lambda vertex
if chat: print('# - calculating Lambda vertex:')
Lambda = CalculateLambdaD(GFTup_A,GFTdn_A,Lambda)
if chat: print('# - - Lambda vertex: Lambda: {0: .8f}'.format(Lambda))
if True: ## print auxiliary functions, development only
# if False:
K = KvertexD(Lambda,GFTup_A,GFTdn_A)
if chat: print('# - - K vertex: K: {0: .8f}'.format(K))
## check the integrals:
XD = ReBDDFDD(GFTup_A,GFTdn_A,0)
if chat: print('# - - aux. integral: X: {0: .8f}'.format(XD))
## HF self-energy
if chat: print('# - calculating static self-energy:')
[Sigma0,Sigma1] = CalculateSigmaT(Lambda,Sigma0,Sigma1,GFlambda,DensityLambda)
if chat: print('# - - static self-energy: normal: {0: .8f}, anomalous: {1: .8f}'.format(Sigma0,Sigma1))
GFTup_A = GFlambda(En_A-ed-Sigma0+(h-Sigma1))
GFTdn_A = GFlambda(En_A-ed-Sigma0-(h-Sigma1))
## symmetrize the Green function if possible
if h == 0.0:
if chat: print('# - h = 0, averaging Green functions over spin to avoid numerical errors')
GFTup_A = sp.copy((GFTup_A+GFTdn_A)/2.0)
GFTdn_A = sp.copy((GFTup_A+GFTdn_A)/2.0)
Sigma1 = 0.0
## recalculate filling and magnetization
if any([ed!=0.0,h!=0.0]):
if T == 0.0:
nTup = DensityLambda(ed+Sigma0-(h-Sigma1))
nTdn = DensityLambda(ed+Sigma0+(h-Sigma1))
else:
nTup = Filling(GFTup_A)
nTdn = Filling(GFTdn_A)
else: ## ed = 0 and h = 0
nTup = nTdn = 0.5
## this is to convert complex to float, the warning is just a sanity check
if any([sp.fabs(sp.imag(nTup))>1e-6,sp.fabs(sp.imag(nTdn))>1e-6,]):
print('# Warning: non-zero imaginary part of nT, up: {0: .8f}, dn: {1: .8f}.'\
.format(sp.imag(nTup),sp.imag(nTdn)))
[nTup,nTdn] = [sp.real(nTup),sp.real(nTdn)]
if FSC:
## spectral self-energy ###################################
SigmaUp_A = SelfEnergyD2(GFTup_A,GFTdn_A,Lambda,'up')
SigmaDn_A = SelfEnergyD2(GFTup_A,GFTdn_A,Lambda,'dn')
Sigma_A = (SigmaUp_A+SigmaDn_A)/2.0
## interacting Green function #############################
GFTup_A = GFlambda(En_A-ed-U/2.0*(nTup+nTdn-1.0)+(h-Sigma1)-Sigma_A)
GFTdn_A = GFlambda(En_A-ed-U/2.0*(nTup+nTdn-1.0)-(h-Sigma1)-Sigma_A)
## print output for given iteration
if chat:
print('# - thermodynamic Green function filling: nTup = {0: .8f}, nTdn = {1: .8f}'.format(nTup,nTdn))
print('# - ed = {0: .4f}, h = {1: .4f}: nT = {2: .8f}, mT = {3: .8f}'.format(ed,h,nTup+nTdn,nTup-nTdn))
print('{0: 3d}\t{1: .8f}\t{2: .8f}\t{3: .8f}\t{4: .8f}'.format(k,nTup,nTdn,nTup+nTdn,nTup-nTdn))
if FSC:
sumsq = sp.sum(sp.imag(GFTupOld_A-GFTup_A)[int(0.5*Nhalf):int(1.5*Nhalf)]**2)
if chat: print('# Sum of squares: {0: .8f}'.format(sumsq))
k+=1
if chat:
if FSC: print('# - Calculation of the Hartree-Fock self-energy finished after {0: 3d} iterations.'.format(int(k-1)))
else: print('# - Calculation of the full spectral self-energy finished after {0: 3d} iterations.'.format(int(k-1)))
Det_A = DeterminantGD(Lambda,GFTup_A,GFTdn_A)
Dzero = Det_A[int((len(En_A)-1)/2)]
if chat: print('# - determinant at zero energy: {0: .8f} {1:+8f}i'.format(sp.real(Dzero),sp.imag(Dzero)))
## write the determinant to a file, for development only
#WriteFileX([GFTup_A,GFTdn_A,Det_A],WriteMax,WriteStep,parline,'DetG.dat')
if SC: ## partial self-consistency between Sigma and G:
if chat: print('#\n# calculating the spectral self-energy:')
parfname = 'SC_'+ parfname
k = 1
sumsq = 1e8
GFintUp_A = sp.copy(GFTup_A)
GFintDn_A = sp.copy(GFTdn_A)
[nUp,nDn] = [nTup,nTdn]
while sumsq > 0.06:
GFintUpOld_A = sp.copy(GFintUp_A)
## spectral self-energy ###################################
if chat: print('#\n# Iteration {0: 3d}'.format(k))
SigmaUp_A = SelfEnergyD_sc(GFintUp_A,GFintDn_A,GFTup_A,GFTdn_A,Lambda,'up')
SigmaDn_A = SelfEnergyD_sc(GFintUp_A,GFintDn_A,GFTup_A,GFTdn_A,Lambda,'dn')
Sigma_A = (SigmaUp_A+SigmaDn_A)/2.0
## interacting Green function #############################
GFintUp_A = GFlambda(En_A-ed-U/2.0*(nUp+nDn-1.0)+(h-Sigma1)-Sigma_A)
GFintDn_A = GFlambda(En_A-ed-U/2.0*(nUp+nDn-1.0)-(h-Sigma1)-Sigma_A)
if any([ed!=0.0,h!=0.0]):
[nUp,nDn] = [Filling(GFintUp_A),Filling(GFintDn_A)]
else: ## ed = 0 and h = 0
[nUp,nDn] = [0.5,0.5]
if chat: print('# densities: nUp: {1: .8f}, nDn: {2: .8f}'.format(k,nUp,nDn))
sumsq = sp.sum(sp.imag(GFintUpOld_A-GFintUp_A)[int(0.5*Nhalf):int(1.5*Nhalf)]**2)
if chat: print('# Sum of squares: {0: .8f}'.format(sumsq))
k+=1
elif FSC: ## full self-consistency between Sigma and G:
parfname = 'FSC_'+ parfname
GFintUp_A = sp.copy(GFTup_A)
GFintDn_A = sp.copy(GFTdn_A)
if any([ed!=0.0,h!=0.0]): [nUp,nDn] = [Filling(GFintUp_A),Filling(GFintDn_A)]
else: [nUp,nDn] = [0.5,0.5]
else:
## spectral self-energy ###################################
if chat: print('#\n# calculating the spectral self-energy')
SigmaUp_A = SelfEnergyD2(GFTup_A,GFTdn_A,Lambda,'up')
SigmaDn_A = SelfEnergyD2(GFTup_A,GFTdn_A,Lambda,'dn')
Sigma_A = (SigmaUp_A+SigmaDn_A)/2.0
## interacting Green function #############################
if chat: print('#\n# calculating the spectral Green function:')
if chat: print('# - iterating the final density:')
[nUp,nDn] = [nTup,nTdn]
[nUpOld,nDnOld] = [1e8,1e8]
k = 1
while any([sp.fabs(nUpOld-nUp) > epsn, sp.fabs(nDnOld-nDn) > epsn]):
[nUpOld,nDnOld] = [nUp,nDn]
nup_dens = lambda x: Filling(GFlambda(En_A-ed-U/2.0*(x+nDn-1.0)+(h-Sigma1)-Sigma_A)) - x
ndn_dens = lambda x: Filling(GFlambda(En_A-ed-U/2.0*(nUp+x-1.0)-(h-Sigma1)-Sigma_A)) - x
nUp = brentq(nup_dens,0.0,1.0,xtol = epsn)
nDn = brentq(ndn_dens,0.0,1.0,xtol = epsn)
if chat: print('# - - {0: 3d}: nUp: {1: .8f}, nDn: {2: .8f}'.format(k,nUp,nDn))
k += 1
GFintUp_A = GFlambda(En_A-ed-U/2.0*(nUp+nDn-1.0)+(h-Sigma1)-Sigma_A)
GFintDn_A = GFlambda(En_A-ed-U/2.0*(nUp+nDn-1.0)-(h-Sigma1)-Sigma_A)
###########################################################
## calculate properties ###################################
## quasiparticle weights
[Zup,dReSEupdw] = QuasiPWeight(sp.real(SigmaUp_A))
[Zdn,dReSEdndw] = QuasiPWeight(sp.real(SigmaDn_A))
[Z,dReSEdw] = QuasiPWeight(sp.real(Sigma_A))
if chat: print('# quasiparticle weight:')
if chat: print('# - Z = {0: .8f}, DReSE/dw[0] = {1: .8f}, m*/m = {2: .8f}'\
.format(float(Z),float(dReSEdw),float(1.0/Z)))
if chat and h!=0.0:
print('# - up spin: Z = {0: .8f}, DReSE/dw[0] = {1: .8f}, m*/m = {2: .8f}'\
.format(float(Zup),float(dReSEupdw),float(1.0/Zup)))
print('# - dn spin: Z = {0: .8f}, DReSE/dw[0] = {1: .8f}, m*/m = {2: .8f}'\
.format(float(Zdn),float(dReSEdndw),float(1.0/Zdn)))
## DoS at Fermi energy
DOSFup = -sp.imag(GFintUp_A[int(N/2)])/sp.pi
DOSFdn = -sp.imag(GFintDn_A[int(N/2)])/sp.pi
## filling
[nUp,nDn] = [Filling(GFintUp_A),Filling(GFintDn_A)]
if chat:
print('# - spectral Green function filling: nUp = {0: .8f}, nDn = {1: .8f}'.format(nUp,nDn))
print('# - ed = {0: .4f}, h = {1: .4f}: n = {2: .8f}, m = {3: .8f}'.format(ed,h,nUp+nDn,nUp-nDn))
## HWHM of the spectral function
[HWHMup,DOSmaxUp,wmaxUp] = CalculateHWHM(GFintUp_A)
[HWHMdn,DOSmaxDn,wmaxDn] = CalculateHWHM(GFintDn_A)
if any([HWHMup == 0.0,HWHMdn == 0.0]) and chat:
print('# - Warning: HWHM cannot be calculated, setting it to zero.')
elif any([HWHMup < dE,HWHMdn < dE]):
print('# - Warning: HWHM smaller than energy resolution.')
if chat: print('# - spin-up: DOS[0] = {0: .8f}, maximum of DoS: {1: .8f} at w = {2: .8f}'\
.format(float(DOSFup),float(DOSmaxUp),float(wmaxUp)))
if h!=0.0 and chat:
print('# - spin-dn: DOS[0] = {0: .8f}, maximum of DoS: {1: .8f} at w = {2: .8f}'\
.format(float(DOSFdn),float(DOSmaxDn),float(wmaxDn)))
if chat: print('# - HWHM: spin-up: {0: .8f}, spin-dn: {1: .8f}'.format(float(HWHMup),float(HWHMdn)))
## zero-field susceptibility
if h==0.0:
ChiT = sp.real(SusceptibilityTherm(Dzero,GFTup_A))
ChiS = sp.real(SusceptibilitySpecD(Lambda,ChiT,GFintUp_A))
if chat: print('# - thermodynamic susceptibility: {0: .8f}'.format(ChiT))
if chat: print('# - spectral susceptibility: {0: .8f}'.format(ChiS))
else:
ChiS = ChiT = 0.0
###########################################################
## write the output files #################################
if WriteGF:
header = parline+'\n# E\t\tRe GF0\t\tIm GF0\t\tRe SE\t\tIm SE\t\tRe GF\t\tIm GF'
filename = 'gfUp_'+parfname+'.dat'
WriteFileX([GFTup_A,SigmaUp_A,GFintUp_A],WriteMax,WriteStep,header,filename)
#WriteFileX([GFTup_A,SigmaUp_A,(GFintUp_A+sp.flipud(GFintUp_A))/2.0],WriteMax,WriteStep,header,'symmGF.dat')
if h!=0.0:
filename = 'gfDn_'+parfname+'.dat'
WriteFileX([GFTdn_A,SigmaDn_A,GFintDn_A],WriteMax,WriteStep,header,filename)
filename = 'gfMag_'+parfname+'.dat'
WriteFileX([GFintUp_A,GFintDn_A,Sigma_A],WriteMax,WriteStep,header,filename)
## write data to standard output
## use awk 'NR%2==0', awk 'NR%2==1' to separate the output into two blocks
print('{0: .4f}\t{1: .4f}\t{2: .4f}\t{3: .4f}\t{4: .6f}\t{5: .6f}\t{6: .6f}\t{7: .6f}\t{8: .6f}'\
.format(U,ed,T,h,sp.real(Lambda),HWHMup,Z,DOSFup,sp.real(Dzero)))
print('{0: .4f}\t{1: .4f}\t{2: .4f}\t{3: .4f}\t{4: .6f}\t{5: .6f}\t{6: .6f}\t{7: .6f}\t{8: .6f}\t{9: .6f}'\
.format(U,ed,T,h,nTup,nTdn,nUp,nDn,ChiT,ChiS))
if chat: print('# '+argv[0]+' DONE after {0: .2f} seconds.'.format(float(time()-t)))
## siam_parquet.py end ###
|
pokornyv/SPEpy
|
siam_parquet.py
|
Python
|
gpl-3.0
| 15,368
|
#!/usr/bin/python
# This script generates a list of testsuites that should be run as part of
# the Samba 4 test suite.
# The output of this script is parsed by selftest.pl, which then decides
# which of the tests to actually run. It will, for example, skip all tests
# listed in selftest/skip or only run a subset during "make quicktest".
# The idea is that this script outputs all of the tests of Samba 4, not
# just those that are known to pass, and list those that should be skipped
# or are known to fail in selftest/skip or selftest/knownfail. This makes it
# very easy to see what functionality is still missing in Samba 4 and makes
# it possible to run the testsuite against other servers, such as Samba 3 or
# Windows that have a different set of features.
# The syntax for a testsuite is "-- TEST --" on a single line, followed
# by the name of the test, the environment it needs and the command to run, all
# three separated by newlines. All other lines in the output are considered
# comments.
import os, sys
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "../../selftest"))
import selftesthelpers
from selftesthelpers import *
print >>sys.stderr, "OPTIONS %s" % " ".join(smbtorture4_options)
def plansmbtorture4testsuite(name, env, options, modname=None):
return selftesthelpers.plansmbtorture4testsuite(name, env, options,
target='samba4', modname=modname)
samba4srcdir = source4dir()
samba4bindir = bindir()
validate = os.getenv("VALIDATE", "")
if validate:
validate_list = [validate]
else:
validate_list = []
nmblookup4 = binpath('nmblookup4')
smbclient4 = binpath('smbclient4')
bbdir = os.path.join(srcdir(), "testprogs/blackbox")
# Simple tests for LDAP and CLDAP
for options in ['-U"$USERNAME%$PASSWORD" --option=socket:testnonblock=true', '-U"$USERNAME%$PASSWORD"', '-U"$USERNAME%$PASSWORD" -k yes', '-U"$USERNAME%$PASSWORD" -k no', '-U"$USERNAME%$PASSWORD" -k no --sign', '-U"$USERNAME%$PASSWORD" -k no --encrypt', '-U"$USERNAME%$PASSWORD" -k yes --encrypt', '-U"$USERNAME%$PASSWORD" -k yes --sign']:
plantestsuite("samba4.ldb.ldap with options %s(ad_dc_ntvfs)" % options, "ad_dc_ntvfs", "%s/test_ldb.sh ldap $SERVER %s" % (bbdir, options))
# see if we support ADS on the Samba3 side
try:
config_h = os.environ["CONFIG_H"]
except KeyError:
config_h = os.path.join(samba4bindir, "default/include/config.h")
# see if we support ldaps
f = open(config_h, 'r')
try:
have_tls_support = ("ENABLE_GNUTLS 1" in f.read())
finally:
f.close()
if have_tls_support:
for options in ['-U"$USERNAME%$PASSWORD"']:
plantestsuite("samba4.ldb.ldaps with options %s(ad_dc_ntvfs)" % options, "ad_dc_ntvfs",
"%s/test_ldb.sh ldaps $SERVER_IP %s" % (bbdir, options))
for options in ['-U"$USERNAME%$PASSWORD"']:
plantestsuite("samba4.ldb.ldapi with options %s(ad_dc_ntvfs:local)" % options, "ad_dc_ntvfs:local",
"%s/test_ldb.sh ldapi $PREFIX_ABS/ad_dc_ntvfs/private/ldapi %s" % (bbdir, options))
for t in smbtorture4_testsuites("ldap."):
plansmbtorture4testsuite(t, "ad_dc_ntvfs", '-U"$USERNAME%$PASSWORD" //$SERVER_IP/_none_')
ldbdir = os.path.join(srcdir(), "lib/ldb")
# Don't run LDB tests when using system ldb, as we won't have ldbtest installed
if os.path.exists(os.path.join(samba4bindir, "ldbtest")):
plantestsuite("ldb.base", "none", "%s/tests/test-tdb-subunit.sh %s" % (ldbdir, samba4bindir))
else:
skiptestsuite("ldb.base", "Using system LDB, ldbtest not available")
# Tests for RPC
# add tests to this list as they start passing, so we test
# that they stay passing
ncacn_np_tests = ["rpc.schannel", "rpc.join", "rpc.lsa", "rpc.dssetup", "rpc.altercontext", "rpc.netlogon", "rpc.netlogon.admin", "rpc.handles", "rpc.samsync", "rpc.samba3-sessionkey", "rpc.samba3-getusername", "rpc.samba3-lsa", "rpc.samba3-bind", "rpc.samba3-netlogon", "rpc.asyncbind", "rpc.lsalookup", "rpc.lsa-getuser", "rpc.schannel2", "rpc.authcontext"]
ncalrpc_tests = ["rpc.schannel", "rpc.join", "rpc.lsa", "rpc.dssetup", "rpc.altercontext", "rpc.netlogon", "rpc.drsuapi", "rpc.asyncbind", "rpc.lsalookup", "rpc.lsa-getuser", "rpc.schannel2", "rpc.authcontext"]
drs_rpc_tests = smbtorture4_testsuites("drs.rpc")
ncacn_ip_tcp_tests = ["rpc.schannel", "rpc.join", "rpc.lsa", "rpc.dssetup", "rpc.netlogon", "rpc.asyncbind", "rpc.lsalookup", "rpc.lsa-getuser", "rpc.schannel2", "rpc.authcontext", "rpc.samr.passwords.validate"] + drs_rpc_tests
slow_ncacn_np_tests = ["rpc.samlogon", "rpc.samr.users", "rpc.samr.large-dc", "rpc.samr.users.privileges", "rpc.samr.passwords", "rpc.samr.passwords.pwdlastset", "rpc.samr.passwords.lockout", "rpc.samr.passwords.badpwdcount"]
slow_ncacn_ip_tcp_tests = ["rpc.samr", "rpc.cracknames"]
all_rpc_tests = ncalrpc_tests + ncacn_np_tests + ncacn_ip_tcp_tests + slow_ncacn_np_tests + slow_ncacn_ip_tcp_tests + ["rpc.lsa.secrets", "rpc.pac", "rpc.samba3-sharesec", "rpc.countcalls"]
# Make sure all tests get run
rpc_tests = smbtorture4_testsuites("rpc.")
auto_rpc_tests = filter(lambda t: t not in all_rpc_tests, rpc_tests)
for bindoptions in ["seal,padcheck"] + validate_list + ["bigendian"]:
for transport in ["ncalrpc", "ncacn_np", "ncacn_ip_tcp"]:
env = "ad_dc_ntvfs"
if transport == "ncalrpc":
tests = ncalrpc_tests
env = "ad_dc_ntvfs:local"
elif transport == "ncacn_np":
tests = ncacn_np_tests
elif transport == "ncacn_ip_tcp":
tests = ncacn_ip_tcp_tests
else:
raise AssertionError("invalid transport %r"% transport)
for t in tests:
plansmbtorture4testsuite(t, env, ["%s:$SERVER[%s]" % (transport, bindoptions), '-U$USERNAME%$PASSWORD', '--workgroup=$DOMAIN'], "samba4.%s on %s with %s" % (t, transport, bindoptions))
plansmbtorture4testsuite('rpc.samba3-sharesec', env, ["%s:$SERVER[%s]" % (transport, bindoptions), '-U$USERNAME%$PASSWORD', '--workgroup=$DOMAIN', '--option=torture:share=tmp'], "samba4.rpc.samba3.sharesec on %s with %s" % (transport, bindoptions))
#Plugin S4 DC tests (confirms named pipe auth forwarding). This can be expanded once kerberos is supported in the plugin DC
#
for bindoptions in ["seal,padcheck"] + validate_list + ["bigendian"]:
for t in ncacn_np_tests:
env = "ad_dc"
transport = "ncacn_np"
plansmbtorture4testsuite(t, env, ["%s:$SERVER[%s]" % (transport, bindoptions), '-U$USERNAME%$PASSWORD', '--workgroup=$DOMAIN'], "samba4.%s with %s" % (t, bindoptions))
for bindoptions in [""] + validate_list + ["bigendian"]:
for t in auto_rpc_tests:
plansmbtorture4testsuite(t, "ad_dc_ntvfs", ["$SERVER[%s]" % bindoptions, '-U$USERNAME%$PASSWORD', '--workgroup=$DOMAIN'], "samba4.%s with %s" % (t, bindoptions))
t = "rpc.countcalls"
plansmbtorture4testsuite(t, "ad_dc_ntvfs:local", ["$SERVER[%s]" % bindoptions, '-U$USERNAME%$PASSWORD', '--workgroup=$DOMAIN'], modname="samba4.%s" % t)
for transport in ["ncacn_np", "ncacn_ip_tcp"]:
env = "ad_dc_ntvfs"
if transport == "ncacn_np":
tests = slow_ncacn_np_tests
elif transport == "ncacn_ip_tcp":
tests = slow_ncacn_ip_tcp_tests
else:
raise AssertionError("Invalid transport %r" % transport)
for t in tests:
plansmbtorture4testsuite(t, env, ["%s:$SERVER" % transport, '-U$USERNAME%$PASSWORD', '--workgroup=$DOMAIN'], "samba4.%s on %s" % (t, transport))
# Tests for the DFS referral calls implementation
for t in smbtorture4_testsuites("dfs."):
plansmbtorture4testsuite(t, "ad_dc_ntvfs", '//$SERVER/ipc\$ -U$USERNAME%$PASSWORD')
plansmbtorture4testsuite(t, "ad_dc", '//$SERVER/ipc\$ -U$USERNAME%$PASSWORD')
# Tests for the NET API (net.api.become.dc tested below against all the roles)
net_tests = filter(lambda x: "net.api.become.dc" not in x, smbtorture4_testsuites("net."))
for t in net_tests:
plansmbtorture4testsuite(t, "ad_dc_ntvfs", '$SERVER[%s] -U$USERNAME%%$PASSWORD -W$DOMAIN' % validate)
# Tests for session keys and encryption of RPC pipes
# FIXME: Integrate these into a single smbtorture test
transport = "ncacn_np"
for env in ["ad_dc_ntvfs", "nt4_dc"]:
for ntlmoptions in [
"-k no --option=usespnego=yes",
"-k no --option=usespnego=yes --option=ntlmssp_client:128bit=no",
"-k no --option=usespnego=yes --option=ntlmssp_client:56bit=yes",
"-k no --option=usespnego=yes --option=ntlmssp_client:56bit=no",
"-k no --option=usespnego=yes --option=ntlmssp_client:128bit=no --option=ntlmssp_client:56bit=yes",
"-k no --option=usespnego=yes --option=ntlmssp_client:128bit=no --option=ntlmssp_client:56bit=no",
"-k no --option=usespnego=yes --option=clientntlmv2auth=yes",
"-k no --option=usespnego=yes --option=clientntlmv2auth=yes --option=ntlmssp_client:128bit=no",
"-k no --option=usespnego=yes --option=clientntlmv2auth=yes --option=ntlmssp_client:128bit=no --option=ntlmssp_client:56bit=yes",
"-k no --option=usespnego=no --option=clientntlmv2auth=yes",
"-k no --option=gensec:spnego=no --option=clientntlmv2auth=yes",
"-k no --option=usespnego=no"]:
name = "rpc.lsa.secrets on %s with with %s" % (transport, ntlmoptions)
plansmbtorture4testsuite('rpc.lsa.secrets', env, ["%s:$SERVER[]" % (transport), ntlmoptions, '-U$USERNAME%$PASSWORD', '--workgroup=$DOMAIN', '--option=gensec:target_hostname=$NETBIOSNAME'], "samba4.%s" % name)
plantestsuite("samba.blackbox.pdbtest(%s)" % env, "%s:local" % env, [os.path.join(bbdir, "test_pdbtest.sh"), '$SERVER', "$PREFIX", "pdbtest", smbclient4, '$SMB_CONF_PATH', configuration])
plantestsuite("samba.blackbox.pdbtest.winbind(%s)" % env, "%s:local" % env, [os.path.join(bbdir, "test_pdbtest.sh"), '$SERVER', "$PREFIX", "pdbtest2", smbclient4, '$SMB_CONF_PATH', configuration + " --option='authmethods=wbc'"])
plantestsuite("samba.blackbox.pdbtest.s4winbind(ad_dc_ntvfs)", "ad_dc_ntvfs:local", [os.path.join(bbdir, "test_pdbtest.sh"), '$SERVER', "$PREFIX", "pdbtest3", smbclient4, '$SMB_CONF_PATH', configuration + " --option='authmethods=samba4:winbind'"])
plantestsuite("samba.blackbox.pdbtest.s4winbind_wbclient(ad_dc_ntvfs)", "ad_dc_ntvfs:local", [os.path.join(bbdir, "test_pdbtest.sh"), '$SERVER', "$PREFIX", "pdbtest4", smbclient4, '$SMB_CONF_PATH', configuration + " --option='authmethods=samba4:winbind_wbclient'"])
transports = ["ncacn_np", "ncacn_ip_tcp"]
#Kerberos varies between functional levels, so it is important to check this on all of them
for env in ["ad_dc_ntvfs", "fl2000dc", "fl2003dc", "fl2008r2dc", "ad_dc"]:
transport = "ncacn_np"
plansmbtorture4testsuite('rpc.pac', env, ["%s:$SERVER[]" % (transport, ), '-U$USERNAME%$PASSWORD', '--workgroup=$DOMAIN'], "samba4.rpc.pac on %s" % (transport,))
plansmbtorture4testsuite('rpc.lsa.secrets', env, ["%s:$SERVER[]" % (transport, ), '-k', 'yes', '-U$USERNAME%$PASSWORD', '--workgroup=$DOMAIN', '--option=gensec:target_hostname=$NETBIOSNAME', 'rpc.lsa.secrets'], "samba4.rpc.lsa.secrets on %s with Kerberos" % (transport,))
plansmbtorture4testsuite('rpc.lsa.secrets', env, ["%s:$SERVER[]" % (transport, ), '-k', 'yes', '-U$USERNAME%$PASSWORD', '--workgroup=$DOMAIN', "--option=clientusespnegoprincipal=yes", '--option=gensec:target_hostname=$NETBIOSNAME'], "samba4.rpc.lsa.secrets on %s with Kerberos - use target principal" % (transport,))
plansmbtorture4testsuite('rpc.lsa.secrets', env, ["%s:$SERVER[target_principal=dcom/$NETBIOSNAME]" % (transport, ), '-k', 'yes', '-U$USERNAME%$PASSWORD', '--workgroup=$DOMAIN'], "samba4.rpc.lsa.secrets on %s with Kerberos - netbios name principal dcom" % (transport,))
plansmbtorture4testsuite('rpc.lsa.secrets', env, ["%s:$SERVER[target_principal=$NETBIOSNAME\$]" % (transport, ), '-k', 'yes', '-U$USERNAME%$PASSWORD', '--workgroup=$DOMAIN'], "samba4.rpc.lsa.secrets on %s with Kerberos - netbios name principal dollar" % (transport,))
plansmbtorture4testsuite('rpc.lsa.secrets', env, ["%s:$SERVER[target_principal=$NETBIOSNAME]" % (transport, ), '-k', 'yes', '-U$USERNAME%$PASSWORD', '--workgroup=$DOMAIN'], "samba4.rpc.lsa.secrets on %s with Kerberos - netbios name principal" % (transport,))
plansmbtorture4testsuite('rpc.lsa.secrets.none*', env, ["%s:$SERVER" % transport, '-k', 'yes', '-U$USERNAME%$PASSWORD', '--workgroup=$DOMAIN', "--option=gensec:fake_gssapi_krb5=yes", '--option=gensec:gssapi_krb5=no', '--option=gensec:target_hostname=$NETBIOSNAME'], "samba4.rpc.lsa.secrets on %s with Kerberos - use Samba3 style login" % transport)
plansmbtorture4testsuite('rpc.lsa.secrets.none*', env, ["%s:$SERVER" % transport, '-k', 'yes', '-U$USERNAME%$PASSWORD', '--workgroup=$DOMAIN', "--option=clientusespnegoprincipal=yes", '--option=gensec:fake_gssapi_krb5=yes', '--option=gensec:gssapi_krb5=no', '--option=gensec:target_hostname=$NETBIOSNAME'], "samba4.rpc.lsa.secrets on %s with Kerberos - use Samba3 style login, use target principal" % transport)
# Winreg tests test bulk Kerberos encryption of DCE/RPC
# We test rpc.winreg here too, because the winreg interface if
# handled by the source3/rpc_server code.
for bindoptions in ["connect", "krb5", "krb5,sign", "krb5,seal", "spnego", "spnego,sign", "spnego,seal"]:
plansmbtorture4testsuite('rpc.winreg', env, ["%s:$SERVER[%s]" % (transport, bindoptions), '-k', 'yes', '-U$USERNAME%$PASSWORD', '--workgroup=$DOMAIN'], "samba4.rpc.winreg on %s with %s" % (transport, bindoptions))
for transport in transports:
plansmbtorture4testsuite('rpc.echo', env, ["%s:$SERVER[]" % (transport,), '-U$USERNAME%$PASSWORD', '--workgroup=$DOMAIN'], "samba4.rpc.echo on %s" % (transport, ))
# Echo tests test bulk Kerberos encryption of DCE/RPC
for bindoptions in ["connect", "krb5", "krb5,sign", "krb5,seal", "spnego", "spnego,sign", "spnego,seal"] + validate_list + ["padcheck", "bigendian", "bigendian,seal"]:
echooptions = "--option=socket:testnonblock=True --option=torture:quick=yes -k yes"
plansmbtorture4testsuite('rpc.echo', env, ["%s:$SERVER[%s]" % (transport, bindoptions), echooptions, '-U$USERNAME%$PASSWORD', '--workgroup=$DOMAIN'], "samba4.rpc.echo on %s with %s and %s" % (transport, bindoptions, echooptions))
plansmbtorture4testsuite("net.api.become.dc", env, '$SERVER[%s] -U$USERNAME%%$PASSWORD -W$DOMAIN' % validate)
for bindoptions in ["sign", "seal"]:
plansmbtorture4testsuite('rpc.backupkey', "ad_dc_ntvfs", ["ncacn_np:$SERVER[%s]" % ( bindoptions), '-U$USERNAME%$PASSWORD', '--workgroup=$DOMAIN'], "samba4.rpc.backupkey with %s" % (bindoptions))
for transport in transports:
for bindoptions in ["sign", "seal"]:
for ntlmoptions in [
"--option=ntlmssp_client:ntlm2=yes --option=torture:quick=yes",
"--option=ntlmssp_client:ntlm2=no --option=torture:quick=yes",
"--option=ntlmssp_client:ntlm2=yes --option=ntlmssp_client:128bit=no --option=torture:quick=yes",
"--option=ntlmssp_client:ntlm2=no --option=ntlmssp_client:128bit=no --option=torture:quick=yes",
"--option=ntlmssp_client:ntlm2=yes --option=ntlmssp_client:keyexchange=no --option=torture:quick=yes",
"--option=ntlmssp_client:ntlm2=no --option=ntlmssp_client:keyexchange=no --option=torture:quick=yes",
"--option=clientntlmv2auth=yes --option=ntlmssp_client:keyexchange=no --option=torture:quick=yes",
"--option=clientntlmv2auth=yes --option=ntlmssp_client:128bit=no --option=ntlmssp_client:keyexchange=yes --option=torture:quick=yes",
"--option=clientntlmv2auth=yes --option=ntlmssp_client:128bit=no --option=ntlmssp_client:keyexchange=no --option=torture:quick=yes"]:
if transport == "ncalrpc":
env = "ad_dc_ntvfs:local"
else:
env = "ad_dc_ntvfs"
plansmbtorture4testsuite('rpc.echo', env, ["%s:$SERVER[%s]" % (transport, bindoptions), ntlmoptions, '-U$USERNAME%$PASSWORD', '--workgroup=$DOMAIN'], "samba4.rpc.echo on %s with %s and %s" % (transport, bindoptions, ntlmoptions))
plansmbtorture4testsuite('rpc.echo', "ad_dc_ntvfs", ['ncacn_np:$SERVER[smb2]', '-U$USERNAME%$PASSWORD', '--workgroup=$DOMAIN'], "samba4.rpc.echo on ncacn_np over smb2")
plansmbtorture4testsuite('ntp.signd', "ad_dc_ntvfs:local", ['ncacn_np:$SERVER', '-U$USERNAME%$PASSWORD', '--workgroup=$DOMAIN'], "samba4.ntp.signd")
nbt_tests = smbtorture4_testsuites("nbt.")
for t in nbt_tests:
plansmbtorture4testsuite(t, "ad_dc_ntvfs", "//$SERVER/_none_ -U\"$USERNAME%$PASSWORD\"")
# Tests against the NTVFS POSIX backend
ntvfsargs = ["--option=torture:sharedelay=100000", "--option=torture:oplocktimeout=3", "--option=torture:writetimeupdatedelay=500000"]
smb2 = smbtorture4_testsuites("smb2.")
#The QFILEINFO-IPC test needs to be on ipc$
raw = filter(lambda x: "raw.qfileinfo.ipc" not in x, smbtorture4_testsuites("raw."))
base = smbtorture4_testsuites("base.")
netapi = smbtorture4_testsuites("netapi.")
libsmbclient = smbtorture4_testsuites("libsmbclient.")
for t in base + raw + smb2 + netapi + libsmbclient:
plansmbtorture4testsuite(t, "ad_dc_ntvfs", ['//$SERVER/tmp', '-U$USERNAME%$PASSWORD'] + ntvfsargs)
plansmbtorture4testsuite("raw.qfileinfo.ipc", "ad_dc_ntvfs", '//$SERVER/ipc\$ -U$USERNAME%$PASSWORD')
for t in smbtorture4_testsuites("rap."):
plansmbtorture4testsuite(t, "ad_dc_ntvfs", '//$SERVER/IPC\$ -U$USERNAME%$PASSWORD')
# Tests against the NTVFS CIFS backend
for t in base + raw:
plansmbtorture4testsuite(t, "ad_dc_ntvfs", ['//$NETBIOSNAME/cifs', '-U$USERNAME%$PASSWORD', '--kerberos=yes'] + ntvfsargs, modname="samba4.ntvfs.cifs.krb5.%s" % t)
# Test NTVFS CIFS backend with S4U2Self and S4U2Proxy
t = "base.unlink"
plansmbtorture4testsuite(t, "ad_dc_ntvfs", ['//$NETBIOSNAME/cifs', '-U$USERNAME%$PASSWORD', '--kerberos=no'] + ntvfsargs, "samba4.ntvfs.cifs.ntlm.%s" % t)
plansmbtorture4testsuite(t, "rpc_proxy", ['//$NETBIOSNAME/cifs_to_dc', '-U$DC_USERNAME%$DC_PASSWORD', '--kerberos=yes'] + ntvfsargs, "samba4.ntvfs.cifs.krb5.%s" % t)
plansmbtorture4testsuite(t, "rpc_proxy", ['//$NETBIOSNAME/cifs_to_dc', '-U$DC_USERNAME%$DC_PASSWORD', '--kerberos=no'] + ntvfsargs, "samba4.ntvfs.cifs.ntlm.%s" % t)
plansmbtorture4testsuite('echo.udp', 'ad_dc_ntvfs:local', '//$SERVER/whatever')
# Local tests
for t in smbtorture4_testsuites("local."):
#The local.resolve test needs a name to look up using real system (not emulated) name routines
plansmbtorture4testsuite(t, "none", "ncalrpc:localhost")
# Confirm these tests with the system iconv too
for t in ["local.convert_string_handle", "local.convert_string", "local.ndr"]:
options = "ncalrpc: --option='iconv:use_builtin_handlers=false'"
plansmbtorture4testsuite(t, "none", options,
modname="samba4.%s.system.iconv" % t)
tdbtorture4 = binpath("tdbtorture")
if os.path.exists(tdbtorture4):
plantestsuite("tdb.stress", "none", valgrindify(tdbtorture4))
else:
skiptestsuite("tdb.stress", "Using system TDB, tdbtorture not available")
plansmbtorture4testsuite("drs.unit", "none", "ncalrpc:")
# Pidl tests
for f in sorted(os.listdir(os.path.join(samba4srcdir, "../pidl/tests"))):
if f.endswith(".pl"):
planperltestsuite("pidl.%s" % f[:-3], os.path.normpath(os.path.join(samba4srcdir, "../pidl/tests", f)))
# DNS tests
planpythontestsuite("fl2003dc:local", "samba.tests.dns")
for t in smbtorture4_testsuites("dns_internal."):
plansmbtorture4testsuite(t, "ad_dc_ntvfs:local", '//$SERVER/whavever')
# Local tests
for t in smbtorture4_testsuites("dlz_bind9."):
#The dlz_bind9 tests needs to look at the DNS database
plansmbtorture4testsuite(t, "chgdcpass:local", ["ncalrpc:$SERVER", '-U$USERNAME%$PASSWORD'])
planpythontestsuite("nt4_dc", "samba.tests.libsmb_samba_internal");
# Blackbox Tests:
# tests that interact directly with the command-line tools rather than using
# the API. These mainly test that the various command-line options of commands
# work correctly.
for env in ["ad_member", "s4member", "ad_dc_ntvfs", "chgdcpass"]:
plantestsuite("samba4.blackbox.smbclient(%s:local)" % env, "%s:local" % env, [os.path.join(samba4srcdir, "utils/tests/test_smbclient.sh"), '$SERVER', '$SERVER_IP', '$USERNAME', '$PASSWORD', '$DOMAIN', smbclient4])
plantestsuite("samba4.blackbox.samba_tool(ad_dc_ntvfs:local)", "ad_dc_ntvfs:local", [os.path.join(samba4srcdir, "utils/tests/test_samba_tool.sh"), '$SERVER', '$SERVER_IP', '$USERNAME', '$PASSWORD', '$DOMAIN', smbclient4])
plantestsuite("samba4.blackbox.pkinit(ad_dc_ntvfs:local)", "ad_dc_ntvfs:local", [os.path.join(bbdir, "test_pkinit.sh"), '$SERVER', '$USERNAME', '$PASSWORD', '$REALM', '$DOMAIN', '$PREFIX/ad_dc_ntvfs', "aes256-cts-hmac-sha1-96", smbclient4, configuration])
plantestsuite("samba4.blackbox.kinit(ad_dc_ntvfs:local)", "ad_dc_ntvfs:local", [os.path.join(bbdir, "test_kinit.sh"), '$SERVER', '$USERNAME', '$PASSWORD', '$REALM', '$DOMAIN', '$PREFIX', "aes256-cts-hmac-sha1-96", smbclient4, configuration])
plantestsuite("samba4.blackbox.kinit(fl2000dc:local)", "fl2000dc:local", [os.path.join(bbdir, "test_kinit.sh"), '$SERVER', '$USERNAME', '$PASSWORD', '$REALM', '$DOMAIN', '$PREFIX', "arcfour-hmac-md5", smbclient4, configuration])
plantestsuite("samba4.blackbox.kinit(fl2008r2dc:local)", "fl2008r2dc:local", [os.path.join(bbdir, "test_kinit.sh"), '$SERVER', '$USERNAME', '$PASSWORD', '$REALM', '$DOMAIN', '$PREFIX', "aes256-cts-hmac-sha1-96", smbclient4, configuration])
plantestsuite("samba4.blackbox.kinit_trust(fl2008r2dc:local)", "fl2008r2dc:local", [os.path.join(bbdir, "test_kinit_trusts.sh"), '$SERVER', '$USERNAME', '$PASSWORD', '$REALM', '$DOMAIN', '$TRUST_SERVER', '$TRUST_USERNAME', '$TRUST_PASSWORD', '$TRUST_REALM', '$TRUST_DOMAIN', '$PREFIX', "forest", "aes256-cts-hmac-sha1-96"])
plantestsuite("samba4.blackbox.kinit_trust(fl2003dc:local)", "fl2003dc:local", [os.path.join(bbdir, "test_kinit_trusts.sh"), '$SERVER', '$USERNAME', '$PASSWORD', '$REALM', '$DOMAIN', '$TRUST_SERVER', '$TRUST_USERNAME', '$TRUST_PASSWORD', '$TRUST_REALM', '$TRUST_DOMAIN', '$PREFIX', "external", "arcfour-hmac-md5"])
plantestsuite("samba4.blackbox.trust_utils(fl2008r2dc:local)", "fl2008r2dc:local", [os.path.join(bbdir, "test_trust_utils.sh"), '$SERVER', '$USERNAME', '$PASSWORD', '$REALM', '$DOMAIN', '$TRUST_SERVER', '$TRUST_USERNAME', '$TRUST_PASSWORD', '$TRUST_REALM', '$TRUST_DOMAIN', '$PREFIX', "forest"])
plantestsuite("samba4.blackbox.trust_utils(fl2003dc:local)", "fl2003dc:local", [os.path.join(bbdir, "test_trust_utils.sh"), '$SERVER', '$USERNAME', '$PASSWORD', '$REALM', '$DOMAIN', '$TRUST_SERVER', '$TRUST_USERNAME', '$TRUST_PASSWORD', '$TRUST_REALM', '$TRUST_DOMAIN', '$PREFIX', "external"])
plantestsuite("samba4.blackbox.ktpass(ad_dc_ntvfs)", "ad_dc_ntvfs", [os.path.join(bbdir, "test_ktpass.sh"), '$PREFIX/ad_dc_ntvfs'])
plantestsuite("samba4.blackbox.passwords(ad_dc_ntvfs:local)", "ad_dc_ntvfs:local", [os.path.join(bbdir, "test_passwords.sh"), '$SERVER', '$USERNAME', '$PASSWORD', '$REALM', '$DOMAIN', "$PREFIX/ad_dc_ntvfs", smbclient4])
plantestsuite("samba4.blackbox.export.keytab(ad_dc_ntvfs:local)", "ad_dc_ntvfs:local", [os.path.join(bbdir, "test_export_keytab.sh"), '$SERVER', '$USERNAME', '$REALM', '$DOMAIN', "$PREFIX", smbclient4])
plantestsuite("samba4.blackbox.cifsdd(ad_dc_ntvfs)", "ad_dc_ntvfs", [os.path.join(samba4srcdir, "client/tests/test_cifsdd.sh"), '$SERVER', '$USERNAME', '$PASSWORD', "$DOMAIN"])
plantestsuite("samba4.blackbox.nmblookup(ad_dc_ntvfs)", "ad_dc_ntvfs", [os.path.join(samba4srcdir, "utils/tests/test_nmblookup.sh"), '$NETBIOSNAME', '$NETBIOSALIAS', '$SERVER', '$SERVER_IP', nmblookup4])
plantestsuite("samba4.blackbox.locktest(ad_dc_ntvfs)", "ad_dc_ntvfs", [os.path.join(samba4srcdir, "torture/tests/test_locktest.sh"), '$SERVER', '$USERNAME', '$PASSWORD', '$DOMAIN', '$PREFIX'])
plantestsuite("samba4.blackbox.masktest", "ad_dc_ntvfs", [os.path.join(samba4srcdir, "torture/tests/test_masktest.sh"), '$SERVER', '$USERNAME', '$PASSWORD', '$DOMAIN', '$PREFIX'])
plantestsuite("samba4.blackbox.gentest(ad_dc_ntvfs)", "ad_dc_ntvfs", [os.path.join(samba4srcdir, "torture/tests/test_gentest.sh"), '$SERVER', '$USERNAME', '$PASSWORD', '$DOMAIN', "$PREFIX"])
plantestsuite("samba4.blackbox.rfc2307_mapping(ad_dc_ntvfs:local)", "ad_dc_ntvfs:local", [os.path.join(samba4srcdir, "../nsswitch/tests/test_rfc2307_mapping.sh"), '$DOMAIN', '$USERNAME', '$PASSWORD', "$SERVER", "$UID_RFC2307TEST", "$GID_RFC2307TEST", configuration])
plantestsuite("samba4.blackbox.chgdcpass", "chgdcpass", [os.path.join(bbdir, "test_chgdcpass.sh"), '$SERVER', "CHGDCPASS\$", '$REALM', '$DOMAIN', '$PREFIX', "aes256-cts-hmac-sha1-96", '$SELFTEST_PREFIX/chgdcpass', smbclient4])
plantestsuite("samba4.blackbox.samba_upgradedns(chgdcpass:local)", "chgdcpass:local", [os.path.join(bbdir, "test_samba_upgradedns.sh"), '$SERVER', '$REALM', '$PREFIX', '$SELFTEST_PREFIX/chgdcpass'])
plantestsuite_loadlist("samba4.rpc.echo against NetBIOS alias", "ad_dc_ntvfs", [valgrindify(smbtorture4), "$LISTOPT", "$LOADLIST", 'ncacn_np:$NETBIOSALIAS', '-U$DOMAIN/$USERNAME%$PASSWORD', 'rpc.echo'])
# Tests using the "Simple" NTVFS backend
for t in ["base.rw1"]:
plansmbtorture4testsuite(t, "ad_dc_ntvfs", ["//$SERVER/simple", '-U$USERNAME%$PASSWORD'], modname="samba4.ntvfs.simple.%s" % t)
# Domain S4member Tests
plansmbtorture4testsuite('rpc.echo', "s4member", ['ncacn_np:$NETBIOSNAME', '-U$NETBIOSNAME/$USERNAME%$PASSWORD'], "samba4.rpc.echo against s4member server with local creds")
plansmbtorture4testsuite('rpc.echo', "s4member", ['ncacn_np:$NETBIOSNAME', '-U$DOMAIN/$DC_USERNAME%$DC_PASSWORD'], "samba4.rpc.echo against s4member server with domain creds")
plansmbtorture4testsuite('rpc.samr', "s4member", ['ncacn_np:$NETBIOSNAME', '-U$NETBIOSNAME/$USERNAME%$PASSWORD'], "samba4.rpc.samr against s4member server with local creds")
plansmbtorture4testsuite('rpc.samr.users', "s4member", ['ncacn_np:$NETBIOSNAME', '-U$NETBIOSNAME/$USERNAME%$PASSWORD'], "samba4.rpc.samr.users against s4member server with local creds",)
plansmbtorture4testsuite('rpc.samr.passwords', "s4member", ['ncacn_np:$NETBIOSNAME', '-U$NETBIOSNAME/$USERNAME%$PASSWORD'], "samba4.rpc.samr.passwords against s4member server with local creds")
plantestsuite("samba4.blackbox.smbclient against s4member server with local creds", "s4member", [os.path.join(samba4srcdir, "client/tests/test_smbclient.sh"), '$NETBIOSNAME', '$USERNAME', '$PASSWORD', '$NETBIOSNAME', '$PREFIX', smbclient4])
# RPC Proxy
plansmbtorture4testsuite("rpc.echo", "rpc_proxy", ['ncacn_ip_tcp:$NETBIOSNAME', '-U$DOMAIN/$DC_USERNAME%$DC_PASSWORD'], modname="samba4.rpc.echo against rpc proxy with domain creds")
# Tests SMB signing
for mech in [
"-k no",
"-k no --option=usespnego=no",
"-k no --option=gensec:spengo=no",
"-k yes",
"-k yes --option=gensec:fake_gssapi_krb5=yes --option=gensec:gssapi_krb5=no"]:
for signing in ["--signing=on", "--signing=required"]:
signoptions = "%s %s" % (mech, signing)
name = "smb.signing on with %s" % signoptions
plansmbtorture4testsuite('base.xcopy', "ad_dc_ntvfs", ['//$NETBIOSNAME/xcopy_share', signoptions, '-U$USERNAME%$PASSWORD'], modname="samba4.%s" % name)
for mech in [
"-k no",
"-k no --option=usespnego=no",
"-k no --option=gensec:spengo=no",
"-k yes"]:
signoptions = "%s --signing=off" % mech
name = "smb.signing disabled on with %s" % signoptions
plansmbtorture4testsuite('base.xcopy', "s4member", ['//$NETBIOSNAME/xcopy_share', signoptions, '-U$DC_USERNAME%$DC_PASSWORD'], "samba4.%s domain-creds" % name)
plansmbtorture4testsuite('base.xcopy', "ad_member", ['//$NETBIOSNAME/xcopy_share', signoptions, '-U$DC_USERNAME%$DC_PASSWORD'], "samba4.%s domain-creds" % name)
plansmbtorture4testsuite('base.xcopy', "ad_dc", ['//$NETBIOSNAME/xcopy_share', signoptions, '-U$USERNAME%$PASSWORD'], "samba4.%s" % name)
plansmbtorture4testsuite('base.xcopy', "ad_dc",
['//$NETBIOSNAME/xcopy_share', signoptions, '-U$DC_USERNAME%$DC_PASSWORD'], "samba4.%s administrator" % name)
plantestsuite("samba4.blackbox.bogusdomain", "ad_member", ["testprogs/blackbox/bogus.sh", "$NETBIOSNAME", "xcopy_share", '$USERNAME', '$PASSWORD', '$DC_USERNAME', '$DC_PASSWORD', smbclient4])
for mech in [
"-k no",
"-k no --option=usespnego=no",
"-k no --option=gensec:spengo=no"]:
signoptions = "%s --signing=off" % mech
plansmbtorture4testsuite('base.xcopy', "s4member", ['//$NETBIOSNAME/xcopy_share', signoptions, '-U$NETBIOSNAME/$USERNAME%$PASSWORD'], modname="samba4.smb.signing on with %s local-creds" % signoptions)
plansmbtorture4testsuite('base.xcopy', "ad_dc_ntvfs", ['//$NETBIOSNAME/xcopy_share', '-k', 'no', '--signing=yes', '-U%'], modname="samba4.smb.signing --signing=yes anon")
plansmbtorture4testsuite('base.xcopy', "ad_dc_ntvfs", ['//$NETBIOSNAME/xcopy_share', '-k', 'no', '--signing=required', '-U%'], modname="samba4.smb.signing --signing=required anon")
plansmbtorture4testsuite('base.xcopy', "s4member", ['//$NETBIOSNAME/xcopy_share', '-k', 'no', '--signing=no', '-U%'], modname="samba4.smb.signing --signing=no anon")
wb_opts_default = ["--option=\"torture:strict mode=no\"", "--option=\"torture:timelimit=1\"", "--option=\"torture:winbindd_separator=/\"", "--option=\"torture:winbindd_netbios_name=$SERVER\"", "--option=\"torture:winbindd_netbios_domain=$DOMAIN\""]
winbind_ad_client_tests = smbtorture4_testsuites("winbind.struct") + smbtorture4_testsuites("winbind.pac")
winbind_wbclient_tests = smbtorture4_testsuites("winbind.wbclient")
for env in ["ad_dc", "ad_dc_ntvfs", "s4member", "ad_member", "nt4_member"]:
wb_opts = wb_opts_default[:]
if env in ["ad_member"]:
wb_opts += ["--option=\"torture:winbindd_domain_without_prefix=$DOMAIN\""]
for t in winbind_ad_client_tests:
plansmbtorture4testsuite(t, "%s:local" % env, wb_opts + ['//$SERVER/tmp', '--realm=$REALM', '--machine-pass', '--option=torture:addc=$DC_SERVER'])
for env in ["nt4_dc", "fl2003dc"]:
for t in winbind_wbclient_tests:
plansmbtorture4testsuite(t, "%s:local" % env, '//$SERVER/tmp -U$DC_USERNAME%$DC_PASSWORD')
for env in ["nt4_dc", "nt4_member", "ad_dc", "ad_dc_ntvfs", "ad_member", "s4member", "chgdcpass"]:
tests = ["--ping", "--separator",
"--own-domain",
"--all-domains",
"--trusted-domains",
"--domain-info=BUILTIN",
"--domain-info=$DOMAIN",
"--online-status",
"--online-status --domain=BUILTIN",
"--online-status --domain=$DOMAIN",
"--check-secret --domain=$DOMAIN",
"--change-secret --domain=$DOMAIN",
"--check-secret --domain=$DOMAIN",
"--online-status --domain=$DOMAIN",
"--domain-users",
"--domain-groups",
"--name-to-sid=$DC_USERNAME",
"--name-to-sid=$DOMAIN/$DC_USERNAME",
"--user-info=$DOMAIN/$DC_USERNAME",
"--user-groups=$DOMAIN/$DC_USERNAME",
"--authenticate=$DOMAIN/$DC_USERNAME%$DC_PASSWORD",
"--allocate-uid",
"--allocate-gid"]
for t in tests:
plantestsuite("samba.wbinfo_simple.(%s:local).%s" % (env, t), "%s:local" % env, [os.path.join(srcdir(), "nsswitch/tests/test_wbinfo_simple.sh"), t])
plantestsuite(
"samba.wbinfo_sids2xids.(%s:local)" % env, "%s:local" % env,
[os.path.join(samba3srcdir, "script/tests/test_wbinfo_sids2xids.sh")])
plantestsuite(
"samba.ntlm_auth.diagnostics(%s:local)" % env, "%s:local" % env,
[os.path.join(samba3srcdir, "script/tests/test_ntlm_auth_diagnostics.sh"), ntlm_auth3, '$DOMAIN', '$DC_USERNAME', '$DC_PASSWORD', configuration])
plantestsuite("samba.ntlm_auth.(%s:local)" % env, "%s:local" % env, [os.path.join(samba3srcdir, "script/tests/test_ntlm_auth_s3.sh"), valgrindify(python), samba3srcdir, ntlm_auth3, '$DOMAIN', '$DC_USERNAME', '$DC_PASSWORD', configuration])
nsstest4 = binpath("nsstest")
for env in ["ad_dc:local", "ad_dc_ntvfs:local", "s4member:local", "nt4_dc:local", "ad_member:local", "nt4_member:local"]:
if os.path.exists(nsstest4):
plantestsuite("samba.nss.test using winbind(%s)" % env, env, [os.path.join(bbdir, "nsstest.sh"), nsstest4, os.path.join(samba4bindir, "shared/libnss_wrapper_winbind.so.2")])
else:
skiptestsuite("samba.nss.test using winbind(%s)" % env, "nsstest not available")
subunitrun = valgrindify(python) + " " + os.path.join(samba4srcdir, "scripting/bin/subunitrun")
def planoldpythontestsuite(env, module, name=None, extra_path=[], environ={}, extra_args=[]):
environ = dict(environ)
py_path = list(extra_path)
if py_path:
environ["PYTHONPATH"] = ":".join(["$PYTHONPATH"] + py_path)
args = ["%s=%s" % item for item in environ.iteritems()]
args += [subunitrun, "$LISTOPT", "$LOADLIST", module]
args += extra_args
if name is None:
name = module
plantestsuite_loadlist(name, env, args)
planoldpythontestsuite("ad_dc_ntvfs:local", "samba.tests.gensec", extra_args=['-U"$USERNAME%$PASSWORD"'])
planoldpythontestsuite("none", "simple", extra_path=["%s/lib/tdb/python/tests" % srcdir()], name="tdb.python")
planpythontestsuite("ad_dc_ntvfs:local", "samba.tests.dcerpc.sam")
planpythontestsuite("ad_dc_ntvfs:local", "samba.tests.dsdb")
planpythontestsuite("ad_dc_ntvfs:local", "samba.tests.dcerpc.bare")
planpythontestsuite("ad_dc_ntvfs:local", "samba.tests.dcerpc.unix")
planpythontestsuite("ad_dc_ntvfs:local", "samba.tests.dcerpc.srvsvc")
planpythontestsuite("ad_dc_ntvfs:local", "samba.tests.samba_tool.timecmd")
# We run this test against both AD DC implemetnations because it is
# the only test we have of GPO get/set behaviour, and this involves
# the file server as well as the LDAP server.
planpythontestsuite("ad_dc_ntvfs:local", "samba.tests.samba_tool.gpo")
planpythontestsuite("ad_dc:local", "samba.tests.samba_tool.gpo")
planpythontestsuite("ad_dc_ntvfs:local", "samba.tests.samba_tool.processes")
planpythontestsuite("ad_dc_ntvfs:local", "samba.tests.samba_tool.user")
planpythontestsuite("ad_dc_ntvfs:local", "samba.tests.samba_tool.group")
planpythontestsuite("ad_dc:local", "samba.tests.samba_tool.ntacl")
planpythontestsuite("ad_dc_ntvfs:local", "samba.tests.dcerpc.rpcecho")
planoldpythontestsuite("ad_dc_ntvfs:local", "samba.tests.dcerpc.registry", extra_args=['-U"$USERNAME%$PASSWORD"'])
planoldpythontestsuite("ad_dc_ntvfs", "samba.tests.dcerpc.dnsserver", extra_args=['-U"$USERNAME%$PASSWORD"'])
planoldpythontestsuite("ad_dc", "samba.tests.dcerpc.dnsserver", extra_args=['-U"$USERNAME%$PASSWORD"'])
plantestsuite_loadlist("samba4.ldap.python(ad_dc_ntvfs)", "ad_dc_ntvfs", [python, os.path.join(samba4srcdir, "dsdb/tests/python/ldap.py"), '$SERVER', '-U"$USERNAME%$PASSWORD"', '--workgroup=$DOMAIN', '$LOADLIST', '$LISTOPT'])
plantestsuite_loadlist("samba4.tokengroups.python(ad_dc_ntvfs)", "ad_dc_ntvfs:local", [python, os.path.join(samba4srcdir, "dsdb/tests/python/token_group.py"), '$SERVER', '-U"$USERNAME%$PASSWORD"', '--workgroup=$DOMAIN', '$LOADLIST', '$LISTOPT'])
plantestsuite("samba4.sam.python(ad_dc_ntvfs)", "ad_dc_ntvfs", [python, os.path.join(samba4srcdir, "dsdb/tests/python/sam.py"), '$SERVER', '-U"$USERNAME%$PASSWORD"', '--workgroup=$DOMAIN'])
plantestsuite("samba4.user_account_control.python(ad_dc_ntvfs)", "ad_dc_ntvfs", [python, os.path.join(samba4srcdir, "dsdb/tests/python/user_account_control.py"), '$SERVER', '-U"$USERNAME%$PASSWORD"', '--workgroup=$DOMAIN'])
planoldpythontestsuite("ad_dc_ntvfs", "dsdb_schema_info",
extra_path=[os.path.join(samba4srcdir, 'dsdb/tests/python')],
name="samba4.schemaInfo.python(ad_dc_ntvfs)",
extra_args=['-U"$DOMAIN/$DC_USERNAME%$DC_PASSWORD"'])
plantestsuite_loadlist("samba4.urgent_replication.python(ad_dc_ntvfs)", "ad_dc_ntvfs:local", [python, os.path.join(samba4srcdir, "dsdb/tests/python/urgent_replication.py"), '$PREFIX_ABS/ad_dc_ntvfs/private/sam.ldb', '$LOADLIST', '$LISTOPT'])
plantestsuite_loadlist("samba4.ldap.dirsync.python(ad_dc_ntvfs)", "ad_dc_ntvfs", [python, os.path.join(samba4srcdir, "dsdb/tests/python/dirsync.py"), '$SERVER', '-U"$USERNAME%$PASSWORD"', '--workgroup=$DOMAIN', '$LOADLIST', '$LISTOPT'])
plantestsuite_loadlist("samba4.ldap.match_rules.python", "ad_dc_ntvfs", [python, os.path.join(srcdir(), "lib/ldb-samba/tests/match_rules.py"), '$SERVER', '-U"$USERNAME%$PASSWORD"', '--workgroup=$DOMAIN', '$LOADLIST', '$LISTOPT'])
plantestsuite_loadlist("samba4.ldap.sites.python(ad_dc_ntvfs)", "ad_dc_ntvfs", [python, os.path.join(samba4srcdir, "dsdb/tests/python/sites.py"), '$SERVER', '-U"$USERNAME%$PASSWORD"', '--workgroup=$DOMAIN', '$LOADLIST', '$LISTOPT'])
for env in ["ad_dc_ntvfs", "fl2000dc", "fl2003dc", "fl2008r2dc"]:
plantestsuite_loadlist("samba4.ldap_schema.python(%s)" % env, env, [python, os.path.join(samba4srcdir, "dsdb/tests/python/ldap_schema.py"), '$SERVER', '-U"$USERNAME%$PASSWORD"', '--workgroup=$DOMAIN', '$LOADLIST', '$LISTOPT'])
plantestsuite("samba4.ldap.possibleInferiors.python(%s)" % env, env, [python, os.path.join(samba4srcdir, "dsdb/samdb/ldb_modules/tests/possibleinferiors.py"), "ldap://$SERVER", '-U"$USERNAME%$PASSWORD"', "-W$DOMAIN"])
plantestsuite_loadlist("samba4.ldap.secdesc.python(%s)" % env, env, [python, os.path.join(samba4srcdir, "dsdb/tests/python/sec_descriptor.py"), '$SERVER', '-U"$USERNAME%$PASSWORD"', '--workgroup=$DOMAIN', '$LOADLIST', '$LISTOPT'])
plantestsuite_loadlist("samba4.ldap.acl.python(%s)" % env, env, [python, os.path.join(samba4srcdir, "dsdb/tests/python/acl.py"), '$SERVER', '-U"$USERNAME%$PASSWORD"', '--workgroup=$DOMAIN', '$LOADLIST', '$LISTOPT'])
if env != "fl2000dc":
# This test makes excessive use of the "userPassword" attribute which
# isn't available on DCs with Windows 2000 domain function level -
# therefore skip it in that configuration
plantestsuite_loadlist("samba4.ldap.passwords.python(%s)" % env, env, [python, os.path.join(samba4srcdir, "dsdb/tests/python/passwords.py"), "$SERVER", '-U"$USERNAME%$PASSWORD"', "-W$DOMAIN", '$LOADLIST', '$LISTOPT'])
plantestsuite_loadlist("samba4.ldap.password_lockout.python(%s)" % env, env, [python, os.path.join(samba4srcdir, "dsdb/tests/python/password_lockout.py"), "$SERVER", '-U"$USERNAME%$PASSWORD"', "-W$DOMAIN", "--realm=$REALM", '$LOADLIST', '$LISTOPT'])
planpythontestsuite("ad_dc_ntvfs:local", "samba.tests.upgradeprovisionneeddc")
planpythontestsuite("ad_dc:local", "samba.tests.posixacl")
planpythontestsuite("ad_dc_no_nss:local", "samba.tests.posixacl")
plantestsuite_loadlist("samba4.deletetest.python(ad_dc_ntvfs)", "ad_dc_ntvfs", [python, os.path.join(samba4srcdir, "dsdb/tests/python/deletetest.py"),
'$SERVER', '-U"$USERNAME%$PASSWORD"', '--workgroup=$DOMAIN', '$LOADLIST', '$LISTOPT'])
plantestsuite("samba4.blackbox.samba3dump", "none", [os.path.join(samba4srcdir, "selftest/test_samba3dump.sh")])
plantestsuite("samba4.blackbox.upgrade", "none", ["PYTHON=%s" % python, os.path.join(samba4srcdir, "setup/tests/blackbox_s3upgrade.sh"), '$PREFIX/provision'])
plantestsuite("samba4.blackbox.provision.py", "none", ["PYTHON=%s" % python, os.path.join(samba4srcdir, "setup/tests/blackbox_provision.sh"), '$PREFIX/provision'])
plantestsuite("samba4.blackbox.upgradeprovision.current", "none", ["PYTHON=%s" % python, os.path.join(samba4srcdir, "setup/tests/blackbox_upgradeprovision.sh"), '$PREFIX/provision'])
plantestsuite("samba4.blackbox.setpassword.py", "none", ["PYTHON=%s" % python, os.path.join(samba4srcdir, "setup/tests/blackbox_setpassword.sh"), '$PREFIX/provision'])
plantestsuite("samba4.blackbox.newuser.py", "none", ["PYTHON=%s" % python, os.path.join(samba4srcdir, "setup/tests/blackbox_newuser.sh"), '$PREFIX/provision'])
plantestsuite("samba4.blackbox.group.py", "none", ["PYTHON=%s" % python, os.path.join(samba4srcdir, "setup/tests/blackbox_group.sh"), '$PREFIX/provision'])
plantestsuite("samba4.blackbox.spn.py(ad_dc_ntvfs:local)", "ad_dc_ntvfs:local", ["PYTHON=%s" % python, os.path.join(samba4srcdir, "setup/tests/blackbox_spn.sh"), '$PREFIX/ad_dc_ntvfs'])
plantestsuite_loadlist("samba4.ldap.bind(ad_dc_ntvfs)", "ad_dc_ntvfs", [python, os.path.join(srcdir(), "auth/credentials/tests/bind.py"), '$SERVER', '-U"$USERNAME%$PASSWORD"', '$LOADLIST', '$LISTOPT'])
# This makes sure we test the rid allocation code
t = "rpc.samr.large-dc"
plansmbtorture4testsuite(t, "vampire_dc", ['$SERVER', '-U$USERNAME%$PASSWORD', '--workgroup=$DOMAIN'], modname=("samba4.%s.one" % t))
plansmbtorture4testsuite(t, "vampire_dc", ['$SERVER', '-U$USERNAME%$PASSWORD', '--workgroup=$DOMAIN'], modname="samba4.%s.two" % t)
# some RODC testing
for env in ['rodc']:
plansmbtorture4testsuite('rpc.echo', env, ['ncacn_np:$SERVER', "-k", "yes", '-U$USERNAME%$PASSWORD', '--workgroup=$DOMAIN'], modname="samba4.rpc.echo")
plansmbtorture4testsuite('rpc.echo', "%s:local" % env, ['ncacn_np:$SERVER', "-k", "yes", '-P', '--workgroup=$DOMAIN'], modname="samba4.rpc.echo")
plantestsuite("samba4.blackbox.provision-backend", "none", ["PYTHON=%s" % python, os.path.join(samba4srcdir, "setup/tests/blackbox_provision-backend.sh"), '$PREFIX/provision'])
# Test renaming the DC
plantestsuite("samba4.blackbox.renamedc.sh", "none", ["PYTHON=%s" % python, os.path.join(bbdir, "renamedc.sh"), '$PREFIX/provision'])
# Demote the vampire DC, it must be the last test on the VAMPIRE DC
for env in ['vampire_dc', 'promoted_dc']:
# DRS python tests
planoldpythontestsuite(env, "samba.tests.blackbox.samba_tool_drs",
environ={'DC1': '$DC_SERVER', 'DC2': '$%s_SERVER' % env.upper()},
extra_args=['-U$DOMAIN/$DC_USERNAME%$DC_PASSWORD'])
planoldpythontestsuite("%s:local" % env, "replica_sync",
extra_path=[os.path.join(samba4srcdir, 'torture/drs/python')],
name="samba4.drs.replica_sync.python(%s)" % env,
environ={'DC1': '$DC_SERVER', 'DC2': '$%s_SERVER' % env.upper()},
extra_args=['-U$DOMAIN/$DC_USERNAME%$DC_PASSWORD'])
planoldpythontestsuite(env, "delete_object",
extra_path=[os.path.join(samba4srcdir, 'torture/drs/python')],
name="samba4.drs.delete_object.python(%s)" % env,
environ={'DC1': '$DC_SERVER', 'DC2': '$%s_SERVER' % env.upper()},
extra_args=['-U$DOMAIN/$DC_USERNAME%$DC_PASSWORD'])
planoldpythontestsuite(env, "fsmo",
name="samba4.drs.fsmo.python(%s)" % env,
extra_path=[os.path.join(samba4srcdir, 'torture/drs/python')],
environ={'DC1': "$DC_SERVER", 'DC2': '$%s_SERVER' % env.upper()},
extra_args=['-U$DOMAIN/$DC_USERNAME%$DC_PASSWORD'])
planoldpythontestsuite(env, "repl_schema",
extra_path=[os.path.join(samba4srcdir, 'torture/drs/python')],
name="samba4.drs.repl_schema.python(%s)" % env,
environ={'DC1': "$DC_SERVER", 'DC2': '$%s_SERVER' % env.upper()},
extra_args=['-U$DOMAIN/$DC_USERNAME%$DC_PASSWORD'])
plantestsuite("samba4.blackbox.samba_tool_demote(%s)" % env, env, [os.path.join(samba4srcdir, "utils/tests/test_demote.sh"), '$SERVER', '$SERVER_IP', '$USERNAME', '$PASSWORD', '$DOMAIN', '$DC_SERVER', '$PREFIX/%s' % env, smbclient4])
for env in ["ad_dc_ntvfs", "s4member", "rodc", "promoted_dc", "ad_dc", "ad_member"]:
plantestsuite("samba.blackbox.wbinfo(%s:local)" % env, "%s:local" % env, [os.path.join(samba4srcdir, "../nsswitch/tests/test_wbinfo.sh"), '$DOMAIN', '$DC_USERNAME', '$DC_PASSWORD', env])
for env in ["ad_dc_ntvfs", "rodc", "promoted_dc", "ad_dc", "fl2000dc", "fl2003dc", "fl2008r2dc"]:
if env == "rodc":
extra_options = ['--option=torture:expect_rodc=true']
else:
extra_options = []
plansmbtorture4testsuite('krb5.kdc', env, ['ncacn_np:$SERVER_IP', "-k", "yes", '-U$USERNAME%$PASSWORD', '--workgroup=$DOMAIN', '--realm=$REALM'] + extra_options,
"samba4.krb5.kdc with specified account")
plansmbtorture4testsuite('krb5.kdc', env, ['ncacn_np:$SERVER_IP', "-k", "yes", '-Utestdenied%$PASSWORD', '--workgroup=$DOMAIN', '--realm=$REALM', '--option=torture:krb5-upn=testdenied_upn@$REALM.upn'] + extra_options,
"samba4.krb5.kdc with account DENIED permission to replicate to an RODC")
# These last two tests are for users cached at the RODC
if env == "rodc":
extra_options = ['--option=torture:expect_rodc=true', '--option=torture:expect_cached_at_rodc=true']
else:
extra_options = []
plansmbtorture4testsuite('krb5.kdc', "%s:local" % env, ['ncacn_np:$SERVER_IP', "-k", "yes", '-P',
'--workgroup=$DOMAIN', '--realm=$REALM',
'--option=torture:krb5-hostname=$SERVER',
'--option=torture:run_removedollar_test=true',
'--option=torture:expect_machine_account=true'] + extra_options,
"samba4.krb5.kdc with machine account")
plansmbtorture4testsuite('krb5.kdc', env, ['ncacn_np:$SERVER_IP', "-k", "yes", '-Utestallowed\ account%$PASSWORD',
'--workgroup=$DOMAIN', '--realm=$REALM',
'--option=torture:expect_machine_account=true',
'--option=torture:krb5-upn=testallowed\ upn@$REALM',
'--option=torture:krb5-hostname=testallowed'] + extra_options,
"samba4.krb5.kdc with account ALLOWED permission to replicate to an RODC")
# TODO: Verifying the databases really should be a part of the
# environment teardown.
# check the databases are all OK. PLEASE LEAVE THIS AS THE LAST TEST
for env in ["ad_dc_ntvfs", "ad_dc", "fl2000dc", "fl2003dc", "fl2008r2dc", 'vampire_dc', 'promoted_dc']:
plantestsuite("samba4.blackbox.dbcheck(%s)" % env, env + ":local" , ["PYTHON=%s" % python, os.path.join(bbdir, "dbcheck.sh"), '$PREFIX/provision', configuration])
for env in [
'vampire_dc',
'promoted_dc']:
planoldpythontestsuite(env, "samba.tests.kcc",
name="samba.tests.kcc",
environ={'TEST_SERVER': '$SERVER', 'TEST_USERNAME': '$USERNAME',
'TEST_PASSWORD': '$PASSWORD',
'TEST_ENV': env
},
extra_path=[os.path.join(srcdir(), "samba/python"), ]
)
|
starrybeam/samba
|
source4/selftest/tests.py
|
Python
|
gpl-3.0
| 46,933
|
#!/usr/bin/python
#-------------------------------------------------------------------------------------------------------------------
# Use to monitor DC sensors based in ACS758
# It uses the MCP3008 Analogue-to-digital converter
# It uploads values to m2mlight.com platform
#
# Credits:
# - http://henrysbench.capnfatz.com/henrys-bench/arduino-current-measurements/acs758-arduino-current-sensor-tutorial/
# - http://www.raspberrypi-spy.co.uk/2013/10/analogue-sensors-on-the-raspberry-pi-using-an-mcp3008/
#---------------------------------------------------------------------------------------------------------------------
import spidev
import time
import os
import urllib
# Open SPI bus
spi = spidev.SpiDev()
spi.open(0,0)
# Function to read SPI data from MCP3008 chip
# Channel must be an integer 0-7
def ReadChannel(channel):
adc = spi.xfer2([1,(8+channel)<<4,0])
data = ((adc[1]&3) << 8) + adc[2]
return data
# Function to convert data to voltage level,
# rounded to specified number of decimal places.
def ConvertVolts(data,places):
volts = (data * 3.3) / float(1023)
volts = round(volts,places)
return volts
# Return the current in amps for a sensor based in ACS758
def ACS758_Current_Measurement(RawValue, mVperAmp, ACSoffset):
Voltage = 0
Amps = 0
Voltage = (RawValue / 1023.0) * 5000
Amps = ((Voltage - ACSoffset) / mVperAmp)
return Amps
# Define sensor channels
dc_solar_channel = 0
dc_wind_channel = 1
# Define delay between readings
delay = 15
while True:
# Read the DC sensor data - solar panels
light_level0 = ReadChannel(dc_solar_channel)
light_volts0 = ConvertVolts(light_level0,2)
# Read the DC sensor data - wind turbine
light_level1 = ReadChannel(dc_wind_channel)
light_volts1 = ConvertVolts(light_level1,2)
# Current sensor of 100 Amp - solar
current_value_solar = ACS758_Current_Measurement(light_volts0, 20, 2500)
# Current sensor of 50 Amp - wind
current_value_wind = ACS758_Current_Measurement(light_volts1, 40, 2500)
# Print out results
print "--------------------------------------------"
print("Solar : {} ({}V) {}".format(light_level0,light_volts0, current_value_solar))
print("Wind : {} ({}V) {}".format(light_level1,light_volts1, current_value_wind))
# sensor and alert apy_key obtained from m2mlight.com
api_key_solar = "56pLJotkFX"
api_key_wind = "578i6BKAlY"
# Upload values to m2mlight.com platform using api_key's
data1 = urllib.urlencode({"api_key":api_key_solar,"value":current_value_solar})
u1 = urllib.urlopen("http://m2mlight.com/iot/send_sensor_value?%s" % data1)
data2 = urllib.urlencode({"api_key":api_key_wind,"value":current_value_wind})
u2 = urllib.urlopen("http://m2mlight.com/iot/send_sensor_value?%s" % data2)
# Wait before repeating loop
time.sleep(delay)
|
m2mlight/m2mData
|
example_server_functions_with_a_sensor_v2.py
|
Python
|
gpl-3.0
| 2,818
|
# -*- coding: utf-8 -*-
import bpy
from bpy.types import PropertyGroup
from bpy.props import BoolProperty, EnumProperty, FloatProperty, FloatVectorProperty, IntProperty, StringProperty
from mmd_tools import register_wrap
from mmd_tools.core import material
from mmd_tools.core.material import FnMaterial
from mmd_tools.core.model import Model
from mmd_tools import utils
def _updateAmbientColor(prop, context):
FnMaterial(prop.id_data).update_ambient_color()
def _updateDiffuseColor(prop, context):
FnMaterial(prop.id_data).update_diffuse_color()
def _updateAlpha(prop, context):
FnMaterial(prop.id_data).update_alpha()
def _updateSpecularColor(prop, context):
FnMaterial(prop.id_data).update_specular_color()
def _updateShininess(prop, context):
FnMaterial(prop.id_data).update_shininess()
def _updateIsDoubleSided(prop, context):
FnMaterial(prop.id_data).update_is_double_sided()
def _updateSphereMapType(prop, context):
FnMaterial(prop.id_data).update_sphere_texture_type(context.active_object)
def _updateToonTexture(prop, context):
FnMaterial(prop.id_data).update_toon_texture()
def _updateDropShadow(prop, context):
FnMaterial(prop.id_data).update_drop_shadow()
def _updateSelfShadowMap(prop, context):
FnMaterial(prop.id_data).update_self_shadow_map()
def _updateSelfShadow(prop, context):
FnMaterial(prop.id_data).update_self_shadow()
def _updateEnabledToonEdge(prop, context):
FnMaterial(prop.id_data).update_enabled_toon_edge()
def _updateEdgeColor(prop, context):
FnMaterial(prop.id_data).update_edge_color()
def _updateEdgeWeight(prop, context):
FnMaterial(prop.id_data).update_edge_weight()
def _getNameJ(prop):
return prop.get('name_j', '')
def _setNameJ(prop, value):
old_value = prop.get('name_j')
prop_value = value
if prop_value and prop_value != old_value:
root = Model.findRoot(bpy.context.active_object)
if root:
rig = Model(root)
prop_value = utils.uniqueName(value, [mat.mmd_material.name_j for mat in rig.materials() if mat])
else:
prop_value = utils.uniqueName(value, [mat.mmd_material.name_j for mat in bpy.data.materials])
prop['name_j'] = prop_value
#===========================================
# Property classes
#===========================================
@register_wrap
class MMDMaterial(PropertyGroup):
""" マテリアル
"""
name_j = StringProperty(
name='Name',
description='Japanese Name',
default='',
set=_setNameJ,
get=_getNameJ,
)
name_e = StringProperty(
name='Name(Eng)',
description='English Name',
default='',
)
material_id = IntProperty(
name='Material ID',
description='Unique ID for the reference of material morph',
default=-1,
min=-1,
)
ambient_color = FloatVectorProperty(
name='Ambient Color',
description='Ambient color',
subtype='COLOR',
size=3,
min=0,
max=1,
precision=3,
step=0.1,
default=[0.4, 0.4, 0.4],
update=_updateAmbientColor,
)
diffuse_color = FloatVectorProperty(
name='Diffuse Color',
description='Diffuse color',
subtype='COLOR',
size=3,
min=0,
max=1,
precision=3,
step=0.1,
default=[0.8, 0.8, 0.8],
update=_updateDiffuseColor,
)
alpha = FloatProperty(
name='Alpha',
description='Alpha transparency',
min=0,
max=1,
precision=3,
step=0.1,
default=1.0,
update=_updateAlpha,
)
specular_color = FloatVectorProperty(
name='Specular Color',
description='Specular color',
subtype='COLOR',
size=3,
min=0,
max=1,
precision=3,
step=0.1,
default=[0.625, 0.625, 0.625],
update=_updateSpecularColor,
)
shininess = FloatProperty(
name='Reflect',
description='Sharpness of reflected highlights',
min=0,
soft_max=512,
step=100.0,
default=50.0,
update=_updateShininess,
)
is_double_sided = BoolProperty(
name='Double Sided',
description='Both sides of mesh should be rendered',
default=False,
update=_updateIsDoubleSided,
)
enabled_drop_shadow = BoolProperty(
name='Ground Shadow',
description='Display ground shadow',
default=True,
update=_updateDropShadow,
)
enabled_self_shadow_map = BoolProperty(
name='Self Shadow Map',
description='Object can become shadowed by other objects',
default=True,
update=_updateSelfShadowMap,
)
enabled_self_shadow = BoolProperty(
name='Self Shadow',
description='Object can cast shadows',
default=True,
update=_updateSelfShadow,
)
enabled_toon_edge = BoolProperty(
name='Toon Edge',
description='Use toon edge',
default=False,
update=_updateEnabledToonEdge,
)
edge_color = FloatVectorProperty(
name='Edge Color',
description='Toon edge color',
subtype='COLOR',
size=4,
min=0,
max=1,
precision=3,
step=0.1,
default=[0, 0, 0, 1],
update=_updateEdgeColor,
)
edge_weight = FloatProperty(
name='Edge Weight',
description='Toon edge size',
min=0,
max=100,
soft_max=2,
step=1.0,
default=1.0,
update=_updateEdgeWeight,
)
sphere_texture_type = EnumProperty(
name='Sphere Map Type',
description='Choose sphere texture blend type',
items = [
(str(material.SPHERE_MODE_OFF), 'Off', '', 1),
(str(material.SPHERE_MODE_MULT), 'Multiply', '', 2),
(str(material.SPHERE_MODE_ADD), 'Add', '', 3),
(str(material.SPHERE_MODE_SUBTEX), 'SubTexture', '', 4),
],
update=_updateSphereMapType,
)
is_shared_toon_texture = BoolProperty(
name='Use Shared Toon Texture',
description='Use shared toon texture or custom toon texture',
default=False,
update=_updateToonTexture,
)
toon_texture = StringProperty(
name='Toon Texture',
subtype='FILE_PATH',
description='The file path of custom toon texture',
default='',
update=_updateToonTexture,
)
shared_toon_texture = IntProperty(
name='Shared Toon Texture',
description='Shared toon texture id (toon01.bmp ~ toon10.bmp)',
default=0,
min=0,
max=9,
update=_updateToonTexture,
)
comment = StringProperty(
name='Comment',
description='Comment',
)
def is_id_unique(self):
return self.material_id < 0 or not next((m for m in bpy.data.materials if m.mmd_material != self and m.mmd_material.material_id == self.material_id), None)
|
powroupi/blender_mmd_tools
|
mmd_tools/properties/material.py
|
Python
|
gpl-3.0
| 7,178
|
import json
import psycopg2
from planetwoo.tiletree import load_cutter, load_renderer
import planetwoo.tiletree.mapserver as mapserver
import planetwoo.tiletree.label as label
import planetwoo.tiletree.postgres as postgres
import planetwoo.tiletree.composite as composite
import planetwoo.tiletree.memcached as memcached
def load_config(config_path, conn_str, force_create, recreate_layers, memcache):
config = json.loads(open(config_path, 'r').read())
render_infos = {}
postgres_conn = psycopg2.connect(conn_str)
for layer_name in config['layer_order']:
layer = config['layers'][layer_name]
#apply the dynamic override settings
layer.update(layer.get('dynamic_override', {}))
cutter = load_cutter(layer)
renderer = load_renderer(layer)
#have all of the storage_managers use the same connection so we don't overwhelm postgres
storage_manager = postgres.PostgresStorageManager(None, layer['tree_table'],
layer['image_table'], postgres_conn)
render_infos[layer_name] = composite.RenderInfo(layer_name, storage_manager, renderer, cutter,
layer.get('check_full', True), layer.get('min_zoom', None), layer.get('max_zoom', None))
if(layer.get('renderer_type', '') == 'label'):
renderer.storage_manager = storage_manager
if(force_create or layer_name in recreate_layers or not storage_manager.do_tables_exist()):
print 'Recreating', storage_manager.node_table, storage_manager.image_table
storage_manager.recreate_tables()
compositor = composite.TileCompositor(render_infos, config['layer_order'], config['map_extent'])
if(memcache != None):
compositor = memcached.MCDStorageManager(compositor, memcache, config['layer_order'])
return (compositor, config['layer_groups'])
|
blc56/PlanetWoo
|
www/__init__.py
|
Python
|
gpl-3.0
| 1,722
|
import bpy
class LIObjectExportSettingsOperator(bpy.types.Operator):
bl_idname = "wm.lipsofsuna_object_export_settings"
bl_label = "Object Exporting Settings"
bl_description = 'Control what data is exported of this object'
li_export = bpy.props.EnumProperty(name='Export mode', description='The type of data exported from the object', items=[\
('particle', 'Particle animations', ''),\
('shape', 'Collision shape', ''),\
('render', 'Graphics', ''),\
('none', 'Disabled', '')],
default='none')
li_file = bpy.props.StringProperty(name='File name', description='Target file name without path or extension')
li_shape = bpy.props.StringProperty(name='Shape name', description='The name of the exported collision shape')
def draw(self, context):
layout = self.layout
box = layout.column()
box.prop(self, 'li_export')
box.prop(self, 'li_file')
box.prop(self, 'li_shape')
def execute(self, context):
def setprop(obj, name, value):
try:
if value:
obj[name] = value
else:
del obj[name]
except:
pass
if not len(self.li_file):
try:
path,name = os.path.split(bpy.data.filepath)
self.li_file = os.path.splitext(name)[0]
except:
self.li_file = 'unnamed'
if self.li_export == 'render':
setprop(context.object, 'file', self.li_file)
setprop(context.object, 'render', None)
setprop(context.object, 'shape', None)
setprop(context.object, 'export', None)
elif self.li_export == 'shape':
setprop(context.object, 'file', self.li_file)
setprop(context.object, 'render', 'false')
setprop(context.object, 'shape', self.li_shape or 'default')
setprop(context.object, 'export', None)
elif self.li_export == 'particle':
setprop(context.object, 'file', self.li_file)
setprop(context.object, 'render', 'false')
setprop(context.object, 'shape', None)
setprop(context.object, 'export', None)
elif self.li_export == 'none':
setprop(context.object, 'export', 'false')
else:
setprop(context.object, 'file', None)
setprop(context.object, 'render', None)
setprop(context.object, 'shape', None)
return {'FINISHED'}
def invoke(self, context, event):
def getprop(obj, name):
try:
v = str(obj[name])
if len(v):
return v
return None
except:
return None
target = getprop(context.object, 'file')
if not target:
self.li_export = 'none'
self.li_file = ''
self.li_shape = ''
elif getprop(context.object, 'render') == 'false':
shape = getprop(context.object, 'shape')
if shape:
self.li_export = 'shape'
self.li_file = target
self.li_shape = shape
else:
self.li_export = 'particle'
self.li_file = target
self.li_shape = ''
else:
self.li_export = 'render'
self.li_file = target
self.li_shape = ''
return context.window_manager.invoke_props_dialog(self)
|
bsmr-games/lipsofsuna
|
tool/lipsofsuna_export/object_export_settings_operator.py
|
Python
|
gpl-3.0
| 2,837
|
"""
This package contains all analyses modules. Analysis classes are
imported into a dictionary (similarly as with the 'devices'
package). Keys are the device types. So to create a new device just
use:
analyses.anClass['analysisType']
----------------------------------------------------------------------
This file is part of the cardoon electronic circuit simulator.
Cardoon is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, version 3 or later:
http://www.gnu.org/licenses/gpl.html
"""
# Regular 'analysis' modules listed here
analysisList = ['testdev', 'op', 'dc', 'ac', 'tran']
# Add here any modules to be imported in addition to analysisList
__all__ = analysisList
from analysis import AnalysisError
# Dictionary with existing analysis types. Key is netlist name.
anClass = {}
# This is a list of valid request types. Each analysis module must
# define a list with allowed types (reqTypes).
validReqTypes = list()
for modname in analysisList:
module = __import__(modname, globals(), locals())
anClass[module.aClass.anType] = module.aClass
try:
validReqTypes += module.reqTypes
except AttributeError:
# do nothing if attribute does not exist
pass
#---------------------------------------------------------------------
class OutRequest:
"""
Holds a set of output variables requests
Output request consist in:
1. Type of of request (``type``): dc, ac_*, tran, etc. These are
defined by each analysis.
2. List of variables (``varlist``): for external terminals,
these are strings with terminal name. For internal terminals,
a list with device and terminal names.
After initialization the circuit adds a list of terminals in the
``termlist`` attribute.
"""
def __init__(self, reqtype, varlist):
if reqtype not in validReqTypes:
raise CircuitError(
'Not a valid output request type: {0}'.format(reqtype)
+ '\nValid types: {0}'.format(validReqTypes))
self.type = reqtype
self.varlist = varlist
|
cechrist/cardoon
|
cardoon/analyses/__init__.py
|
Python
|
gpl-3.0
| 2,196
|
# -*- coding: utf-8 -*-
import ply.yacc as yacc
from lexLDM import tokens
import AST
def p_programme(p):
"""programme : bloc"""
p[0] = AST.ProgramNode(p[1])
def p_bloc(p):
"""bloc : BLOC_START instructions BLOC_END """
p[0] = AST.BlocNode(p[2])
def p_instructions(p):
"""instructions : instruction
| instruction instructions"""
try:
try:
# 1er essai, si p[2] est une liste
p[0] = [p[1]] + p[2]
except:
# 2e essai, p[2] n'est pas une liste
p[0] = [p[1]] + [p[2]]
except:
#3e essai, p[2] n'existe pas
p[0] = p[1]
def p_instruction_assign(p):
"""instruction : expression ASSIGN_OP IDENTIFIANT ENDL """
p[0] = AST.AssignNode([AST.IdNumNode(p[3]), p[1]])
def p_instruction_assign_str(p):
"""instruction : chaines ASSIGN_OP IDENTIFIANT_STR ENDL """
p[0] = AST.AssignNode([AST.IdStrNode(p[3]), p[1]])
def p_instruction_print(p):
"""instruction : PRINT EXPR_START expression EXPR_END ENDL
| PRINT EXPR_START chaines EXPR_END ENDL """
p[0] = AST.PrintNode(p[3])
def p_instruction_while(p):
"""instruction : WHILE EXPR_START condition EXPR_END bloc"""
p[0] = AST.WhileNode([p[3], p[5]])
def p_expression_token(p):
"""expression : NUMBER"""
p[0] = AST.NumNode(p[1])
def p_expression_token_id_num(p):
"""expression : IDENTIFIANT """
p[0] = AST.IdNumNode(p[1])
def p_expression_op(p):
"""expression : expression ADD_OP expression
| expression MULT_OP expression """
p[0] = AST.OpNode(p[2], [p[1], p[3]])
def p_expression_par(p):
"""expression : EXPR_START expression EXPR_END """
p[0] = p[2]
def p_minus(p):
"""expression : ADD_OP expression %prec UMINUS"""
p[0] = AST.OpNode(p[1], [p[2]])
def p_condition(p):
"""condition : expression """
p[0] = AST.CondNode(p[1])
def p_if(p):
"""instruction : IF EXPR_START condition EXPR_END IF_FALSE bloc IF_TRUE bloc
| IF EXPR_START condition EXPR_END IF_FALSE bloc """
try:
p[0] = AST.IfNode([p[3], p[6], p[8]])
except:
p[0] = AST.IfNode([p[3], p[6]])
def p_for(p):
"""instruction : FOR EXPR_START IDENTIFIANT FOR_SEP expression FOR_SEP expression EXPR_END bloc """
p[0] = AST.ForNode([AST.IdNumNode(p[3]), p[5], p[7], p[9]])
def p_chaine(p):
"""chaine : STR"""
p[0] = AST.StringNode(p[1])
def p_chaines_str(p):
"""chaines : IDENTIFIANT_STR"""
p[0] = AST.IdStrNode(p[1])
def p_chaines(p):
"""chaines : chaine
| expression
| chaines STR_CONCAT chaines """
try:
if isinstance(p[1], AST.StringGroupNode):
gauche = p[1].children
else:
gauche = [p[1]]
if isinstance(p[3], AST.StringGroupNode):
droite = p[3].children
else:
droite = [p[3]]
p[0] = AST.StringGroupNode(gauche + droite)
except:
p[0] = p[1]
def p_error(p):
#print("Syntax error in line %d" % p.lineno)
print("Syntax error")
print(p)
yacc.errok()
precedence = (
('left', 'ADD_OP'),
('left', 'MULT_OP'),
('right', 'UMINUS'),
)
def parse(program):
return yacc.parse(program)
yacc.yacc(outputdir='generated')
def parseFile(file, generateTree = False):
import tools
prog = tools.getFileContent(file)
ast = yacc.parse(prog, debug=0)
if generateTree:
name = tools.changeExtension(file, "pdf")
graph = ast.makegraphicaltree()
try:
import os
os.remove(name)
except:
pass
graph.write_pdf(name)
return [ast, name]
else:
pass
return [ast, None]
if __name__ == "__main__":
import tools
name = tools.getFileNameFromArg("test1.txt")
result, name = parseFile(name, True)
print("Wrote ast to " , name)
|
fa18swiss/LDM
|
parserLDM.py
|
Python
|
gpl-3.0
| 3,837
|
import pystan._chains as _chains
def ess(sim, n):
"""Calculate effective sample size
Parameters
----------
sim : chains
n : int
Parameter index starting from 0
"""
return _chains.effective_sample_size(sim, n)
def splitrhat(sim, n):
"""Calculate rhat
Parameters
----------
sim : chains
n : int
Parameter index starting from 0
"""
return _chains.split_potential_scale_reduction(sim, n)
def ess_and_splitrhat(sim, n):
"""Calculate ess and rhat
This saves time by creating just one stan::mcmc::chains instance.
"""
# FIXME: does not yet save time
return (ess(sim, n), splitrhat(sim, n))
|
chendaniely/pystan
|
pystan/chains.py
|
Python
|
gpl-3.0
| 684
|
"""erudit URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.i18n import i18n_patterns
from django.contrib import admin
from django.utils.translation import ugettext_lazy as _
from django.views.generic import RedirectView
from django.views.generic import TemplateView
from . import urls_compat
urlpatterns = i18n_patterns(
url('^', include('django.contrib.auth.urls')),
url(r'^grappelli/', include('grappelli.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^upload/', include('plupload.urls', namespace='plupload')),
# The PDF viewer exposes a PDF.js template
url(r'^pdf-viewer\.html$',
TemplateView.as_view(template_name='pdf_viewer.html'), name='pdf-viewer'),
# Apps
url(_(r'^espace-utilisateur/'), include('apps.userspace.urls', namespace='userspace')),
url(r'^', include('apps.public.urls', namespace='public')),
# Compatibility URLs
url('^', include(urls_compat.urlpatterns)),
# Catchall
url(r'', RedirectView.as_view(url="/espace-utilisateur/", permanent=False)),
)
# In DEBUG mode, serve media files through Django.
if settings.DEBUG:
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.views.static import serve
urlpatterns += staticfiles_urlpatterns()
# Remove leading and trailing slashes so the regex matches.
media_url = settings.MEDIA_URL.lstrip('/').rstrip('/')
urlpatterns += [
url(r'^%s/(?P<path>.*)$' % media_url, serve, {'document_root': settings.MEDIA_ROOT}),
]
|
thetoine/eruditorg
|
erudit/base/urls.py
|
Python
|
gpl-3.0
| 2,176
|
################################################################################
## ##
## This file is a part of TADEK. ##
## ##
## TADEK - Test Automation in a Distributed Environment ##
## (http://tadek.comarch.com) ##
## ##
## Copyright (C) 2011 Comarch S.A. ##
## All rights reserved. ##
## ##
## TADEK is free software for non-commercial purposes. For commercial ones ##
## we offer a commercial license. Please check http://tadek.comarch.com for ##
## details or write to tadek-licenses@comarch.com ##
## ##
## You can redistribute it and/or modify it under the terms of the ##
## GNU General Public License as published by the Free Software Foundation, ##
## either version 3 of the License, or (at your option) any later version. ##
## ##
## TADEK is distributed in the hope that it will be useful, ##
## but WITHOUT ANY WARRANTY; without even the implied warranty of ##
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ##
## GNU General Public License for more details. ##
## ##
## You should have received a copy of the GNU General Public License ##
## along with TADEK bundled with this file in the file LICENSE. ##
## If not, see http://www.gnu.org/licenses/. ##
## ##
## Please notice that Contributor Agreement applies to any contribution ##
## you make to TADEK. The Agreement must be completed, signed and sent ##
## to Comarch before any contribution is made. You should have received ##
## a copy of Contribution Agreement along with TADEK bundled with this file ##
## in the file CONTRIBUTION_AGREEMENT.pdf or see http://tadek.comarch.com ##
## or write to tadek-licenses@comarch.com ##
## ##
################################################################################
from PySide import QtCore
from PySide import QtGui
from tadek.core import log
from tadek.core import settings
from tadek.engine import testresult
from tadek.engine import channels
from tadek.engine.runner import TestRunner
import icons
from view import View
from tests import Tests
from devices import Device
from utils import viewName
from dialogs import runWarning, runQuestion
from testdialogs import ReportDialog
from progresschannel import ProgressChannel, ProgressChannelHelper
class TestDeviceList(QtCore.QObject):
'''
Manages a device appearance in the view.
'''
deviceChecked = QtCore.Signal(Device)
deviceUnchecked = QtCore.Signal(Device)
def __init__(self, tree):
'''
Initializes the devices view and assigns the reference to TreeWidget
instance.
'''
QtCore.QObject.__init__(self)
self._tree = tree
self._tree.itemChanged.connect(self._emitSignals)
self._dict = {}
self._warning = False
self._silent = False
def _emitSignals(self, item, column):
'''
Emits 'deviceChecked' or 'deviceUnchecked' signal based on the state of
the given item.
'''
if self._silent:
return
if item is not self._tree.currentItem():
return
if item.checkState(0) == QtCore.Qt.Checked:
self.deviceChecked.emit(self._dict[item.text(0)])
if item.checkState(0) == QtCore.Qt.Unchecked:
if (not self.getChecked() and self._warning and
not runQuestion("Excluding this device will stop execution of "
"tests. Are you sure?")):
self._silent = True
item.setCheckState(0, QtCore.Qt.Checked)
QtGui.qApp.processEvents()
self._silent = False
else:
self.deviceUnchecked.emit(self._dict[item.text(0)])
def add(self, device, check):
'''
Adds given device to the list. If check is True, then the new device
item will be checked.
'''
address, port = device.address
log.debug("Adding '%s' device to view" % device.name)
item = QtGui.QTreeWidgetItem(self._tree)
if check:
item.setCheckState(0, QtCore.Qt.Checked)
else:
item.setCheckState(0, QtCore.Qt.Unchecked)
item.setText(0, device.name)
item.setText(1, "%s:%d" % (address, port))
self._tree.resizeColumnToContents(0)
self._tree.resizeColumnToContents(1)
self._dict[device.name] = device
def remove(self, device):
'''
Removes given device from the list.
'''
if device.name not in self._dict:
return
log.debug("Removing device %s from view" % device.name)
del self._dict[device.name]
item = self._tree.findItems(device.name, QtCore.Qt.MatchFlags())[0]
item = self._tree.takeTopLevelItem(self._tree.indexOfTopLevelItem(item))
if item.checkState(0) == QtCore.Qt.Checked:
item.setCheckState(0, QtCore.Qt.Unchecked)
self._tree.resizeColumnToContents(0)
self._tree.resizeColumnToContents(1)
def getChecked(self):
'''
Returns a list of currently checked devices.
'''
devices = []
for i in range(self._tree.topLevelItemCount()):
item = self._tree.topLevelItem(i)
if item.checkState(0) == QtCore.Qt.Checked:
devices.append(self._dict[item.text(0)])
return devices
def setWarning(self, enabled):
'''
Enables or disables displaying of a warning before last checked
device is being unchecked.
'''
self._warning = enabled
class Test(View):
'''
A view for running test cases.
'''
NAME = viewName()
_UI_FILE = "test_view.ui"
_ICON_FILE = ":/test/icons/system-run.png"
_checkOnConnect = settings.get(NAME, "options", "check_on_connect",
default="Yes", force=True)
# Menus and Tool bar
_menuFile = (
"actionAdd",
"actionRemove",
None
)
_menuEdit = (
"actionExpand",
"actionExpandAll",
"actionCollapse",
"actionCollapseAll",
None
)
_menuView = (
"actionRefresh",
None
)
_toolBar = (
"actionAdd",
"actionRemove",
None,
(
"actionExpand",
"actionExpandAll"
),
(
"actionCollapse",
"actionCollapseAll"
),
"actionRefresh",
None,
"actionStart",
"actionPause",
"actionResume",
"actionStop"
)
def __init__(self, parent):
View.__init__(self, parent)
self._devices = TestDeviceList(self._elements["treeWidgetDevices"])
self._actionStart = self._elements["actionStart"]
self._actionStop = self._elements["actionStop"]
self._actionPause = self._elements["actionPause"]
self._actionResume = self._elements["actionResume"]
self._actionStart.triggered.connect(self._startTests)
self._actionStop.triggered.connect(self._stopTests)
self._actionPause.triggered.connect(self._pauseTests)
self._actionResume.triggered.connect(self._resumeTests)
self._actionStart.setVisible(True)
self._actionStop.setVisible(False)
self._actionPause.setVisible(False)
self._actionResume.setVisible(False)
# Summary channel
channels.add("SummaryChannel", "_ui_summary")
# Progress channel
pBar = QtGui.QProgressBar()
pBar.setAlignment(QtCore.Qt.AlignHCenter | QtCore.Qt.AlignVCenter)
font = pBar.font()
font.setBold(True)
pBar.setFont(font)
self._parent.getStatusBar().addPermanentWidget(pBar, 1)
self._progress = ProgressChannelHelper(pBar)
channels.add(ProgressChannel, "_ui_progress", progress=self._progress)
self._progress.testStarted.connect(self._onTestStarted)
self._progress.testStopped.connect(self._onTestStopped)
self._progress.stopped.connect(self._onStopped)
self._tests = Tests(self._elements["treeWidgetLocations"],
self._elements["treeWidgetTests"],
self._elements["treeWidgetModels"])
self._elements["actionAdd"].triggered.connect(self._tests.addLocation)
self._elements["actionRemove"].triggered.connect(
self._tests.removeLocation)
self._elements["actionExpand"].triggered.connect(
self._tests.expandSelected)
self._elements["actionExpandAll"].triggered.connect(
self._tests.expandAll)
self._elements["actionCollapse"].triggered.connect(
self._tests.collapseSelected)
self._elements["actionCollapseAll"].triggered.connect(
self._tests.collapseAll)
self._elements["actionRefresh"].triggered.connect(self._tests.refresh)
# Initialize private test variables
self._suiteRuns = 0
self._todoSuites = 0
self._testResult = None
self._testRunner = None
# Public methods:
def saveState(self):
'''
Saves the view's state to configuration.
'''
View.saveState(self)
self._tests.saveState()
def loadState(self):
'''
Loads the view's state from configuration.
'''
View.loadState(self)
self._tests.loadState()
# Slots:
#@QtCore.Slot(Device)
def _deviceConnected(self, device):
'''
Adds a device to list.
'''
self._devices.add(device, check=self._checkOnConnect.getBool())
#@QtCore.Slot(Device)
def _deviceDisconnected(self, device, error):
'''
Removes given device from list. The error parameter can be set to True
to indicate that the device was disconnected due to an error.
'''
self._devices.remove(device)
#@QtCore.Slot()
def _startTests(self):
'''
Starts execution of tests.
'''
log.debug("Starting tests")
self._actionStart.setVisible(False)
devices = self._devices.getChecked()
if not devices:
runWarning("Select some devices first")
self._actionStart.setVisible(True)
return
tests = self._tests.getCheckedTests()
if not tests:
self._actionStart.setVisible(True)
return
if sum([test.count() for test in tests]) == 0:
runWarning("Selected test suites do not contain any test cases")
self._actionStart.setVisible(True)
return
self._suiteRuns = 0
self._todoSuites = len(tests)
self._testResult = testresult.TestResult()
self._testRunner = TestRunner(devices, tests, self._testResult)
self._devices.deviceChecked.connect(self._testRunner.addDevice)
self._devices.deviceUnchecked.connect(self._testRunner.removeDevice)
self._devices.setWarning(True)
self._testRunner.start()
self._actionStop.setVisible(True)
self._actionPause.setVisible(True)
#@QtCore.Slot()
def _stopTests(self):
'''
Stops execution of tests.
'''
log.debug("Stopping tests")
self._actionStart.setVisible(True)
self._actionStop.setVisible(False)
self._actionPause.setVisible(False)
self._actionResume.setVisible(False)
self._testRunner.stop()
#@QtCore.Slot()
def _pauseTests(self):
'''
Pauses execution of tests.
'''
log.debug("Pausing tests")
self._actionStart.setVisible(False)
self._actionStop.setVisible(True)
self._actionPause.setVisible(False)
self._actionResume.setVisible(True)
self._testRunner.pause()
#@QtCore.Slot()
def _resumeTests(self):
'''
Resumes execution of tests.
'''
log.debug("Resuming tests")
self._actionStart.setVisible(False)
self._actionStop.setVisible(True)
self._actionPause.setVisible(True)
self._actionResume.setVisible(False)
self._testRunner.resume()
#@QtCore.Slot(testresult.TestResultBase, testresult.DeviceExecResult)
def _onTestStarted(self, result, device):
'''
Handles a start test execution of a test represented by the given
result.
'''
if isinstance(result, testresult.TestCaseResult):
log.debug("Began execution of test case: %s" % result.id)
# If it is a top-level test suite result then increase the counter of
# running top-level test suites
if result.parent is None:
self._suiteRuns += 1
#@QtCore.Slot(testresult.TestResultBase, testresult.DeviceExecResult)
def _onTestStopped(self, result, device):
'''
Handles a stop test execution of a test represented by the given
result.
'''
if isinstance(result, testresult.TestCaseResult):
log.debug("Finished execution of test case: %s" % result.id)
# If it is a top-level test suite result then decrease the counters of
# running top-level test suites and to do test suites.
if result.parent is None:
self._suiteRuns -= 1
self._todoSuites -= 1
# If all top-level test suites are done then join() the test runner
if self._suiteRuns == 0 and self._todoSuites <= 0:
self._testRunner.join()
#@QtCore.Slot()
def _onStopped(self):
'''
Shows summary dialog after finishing test executions.
'''
log.debug("All tests finished")
self._actionStart.setVisible(True)
self._actionStop.setVisible(False)
self._actionPause.setVisible(False)
self._actionResume.setVisible(False)
self._devices.deviceChecked.disconnect(self._testRunner.addDevice)
self._devices.deviceUnchecked.disconnect(self._testRunner.removeDevice)
self._devices.setWarning(False)
files = []
for c in self._testRunner.result.get():
if isinstance(c, channels.TestResultFileChannel) and c.isActive():
files.append((c.name, c.filePath()))
dialog = ReportDialog(
self._testResult.get(name='_ui_summary')[0].getSummary(),
files, len(self._devices.getChecked()) > 0)
dialog.closed.connect(self._progress.reset,
type=QtCore.Qt.DirectConnection)
dialog.runAgainClicked.connect(self._startTests,
type=QtCore.Qt.QueuedConnection)
dialog.showDetailsClicked.connect(self._showDetails)
dialog.run()
#@QtCore.Slot()
def _showDetails(self):
'''
Shows execution result in Result view.
'''
resultView = self._parent.getView("result")
if resultView is not None:
log.debug("Showing details in Result view")
resultView.activate()
resultView.showLastResult()
|
tadek-project/tadek-ui
|
src/test/testview.py
|
Python
|
gpl-3.0
| 16,336
|
# Copyright (C) 2010-2019 Dzhelil S. Rufat. All Rights Reserved.
import numpy as np
import pybindcpp.ext.ufunc as uf
from pybindcpp.helper import eq
def test_1():
x = np.linspace(0, 1, 5)
x = x.astype('d')
assert eq(np.cos(x), uf.cos(x))
x = x.astype('f')
assert eq(np.sin(x), uf.sin(x))
def test_2():
fn = np.vectorize(lambda N, x: N * x)
for N, x in ((1, 2.0),
([[1], [2]], [1.0, 2.0, 3.0, 4.0]),
(1, [[1.0, 2.0], [3.0, 4.0]]),
([[1], [2]], [[1.0, 2.0], [3.0, 4.0]])):
assert eq(fn(N, x), uf.fn(N, x))
N = 1
for x in ([1, 2, 3, 4],
np.ones(100),
np.zeros(100)):
assert eq(fn(N, x), uf.fn(N, x))
def test_3():
assert eq(uf.add_one([1, 2, 3]), [2, 3, 4])
assert eq(uf.add_one([3, 2, 1]), [4, 3, 2])
if __name__ == '__main__':
test_1()
test_2()
test_3()
|
drufat/pybindcpp
|
pybindcpp/ext/ufunc_test.py
|
Python
|
gpl-3.0
| 917
|
# Copyright (C) 2011-2016 2ndQuadrant Italia Srl
#
# This file is part of Barman.
#
# Barman is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Barman is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Barman. If not, see <http://www.gnu.org/licenses/>.
"""
This module represents a backup.
"""
import datetime
import logging
import os
import shutil
import time
from glob import glob
import dateutil.parser
import dateutil.tz
from barman import output, xlog
from barman.backup_executor import RsyncBackupExecutor, SshCommandException
from barman.command_wrappers import DataTransferFailure
from barman.compression import CompressionIncompatibility, CompressionManager
from barman.config import BackupOptions
from barman.hooks import (AbortedRetryHookScript, HookScriptRunner,
RetryHookScriptRunner)
from barman.infofile import BackupInfo, UnknownBackupIdException, WalFileInfo
from barman.recovery_executor import RecoveryExecutor
from barman.remote_status import RemoteStatusMixin
from barman.utils import fsync_dir, human_readable_timedelta, pretty_size
_logger = logging.getLogger(__name__)
class DuplicateWalFile(Exception):
"""
A duplicate WAL file has been found
"""
pass
class MatchingDuplicateWalFile(DuplicateWalFile):
"""
A duplicate WAL file has been found, but it's identical to the one we
already have.
"""
pass
class BackupManager(RemoteStatusMixin):
"""Manager of the backup archive for a server"""
DEFAULT_STATUS_FILTER = (BackupInfo.DONE,)
def __init__(self, server):
"""
Constructor
"""
super(BackupManager, self).__init__()
self.name = "default"
self.server = server
self.config = server.config
self._backup_cache = None
self.compression_manager = CompressionManager(self.config, server.path)
self.executor = None
try:
self.executor = RsyncBackupExecutor(self)
except SshCommandException as e:
self.config.disabled = True
self.config.msg_list.append(str(e).strip())
def get_available_backups(self, status_filter=DEFAULT_STATUS_FILTER):
"""
Get a list of available backups
:param status_filter: default DEFAULT_STATUS_FILTER. The status of
the backup list returned
"""
# If the filter is not a tuple, create a tuple using the filter
if not isinstance(status_filter, tuple):
status_filter = tuple(status_filter,)
# Load the cache if necessary
if self._backup_cache is None:
self._load_backup_cache()
# Filter the cache using the status filter tuple
backups = {}
for key, value in self._backup_cache.items():
if value.status in status_filter:
backups[key] = value
return backups
def _load_backup_cache(self):
"""
Populate the cache of the available backups, reading information
from disk.
"""
self._backup_cache = {}
# Load all the backups from disk reading the backup.info files
for filename in glob("%s/*/backup.info" %
self.config.basebackups_directory):
backup = BackupInfo(self.server, filename)
self._backup_cache[backup.backup_id] = backup
def backup_cache_add(self, backup_info):
"""
Register a BackupInfo object to the backup cache.
NOTE: Initialise the cache - in case it has not been done yet
:param barman.infofile.BackupInfo backup_info: the object we want to
register in the cache
"""
# Load the cache if needed
if self._backup_cache is None:
self._load_backup_cache()
# Insert the BackupInfo object into the cache
self._backup_cache[backup_info.backup_id] = backup_info
def backup_cache_remove(self, backup_info):
"""
Remove a BackupInfo object from the backup cache
This method _must_ be called after removing the object from disk.
:param barman.infofile.BackupInfo backup_info: the object we want to
remove from the cache
"""
# Nothing to do if the cache is not loaded
if self._backup_cache is None:
return
# Remove the BackupInfo object from the backups cache
del self._backup_cache[backup_info.backup_id]
def get_backup(self, backup_id):
"""
Return the backup information for the given backup id.
If the backup_id is None or backup.info file doesn't exists,
it returns None.
:param str|None backup_id: the ID of the backup to return
:rtype: BackupInfo|None
"""
if backup_id is not None:
# Get all the available backups from the cache
available_backups = self.get_available_backups(
BackupInfo.STATUS_ALL)
# Return the BackupInfo if present, or None
return available_backups.get(backup_id)
return None
def get_previous_backup(self, backup_id,
status_filter=DEFAULT_STATUS_FILTER):
"""
Get the previous backup (if any) in the catalog
:param status_filter: default DEFAULT_STATUS_FILTER. The status of
the backup returned
"""
if not isinstance(status_filter, tuple):
status_filter = tuple(status_filter)
backup = BackupInfo(self.server, backup_id=backup_id)
available_backups = self.get_available_backups(status_filter +
(backup.status,))
ids = sorted(available_backups.keys())
try:
current = ids.index(backup_id)
while current > 0:
res = available_backups[ids[current - 1]]
if res.status in status_filter:
return res
current -= 1
return None
except ValueError:
raise UnknownBackupIdException('Could not find backup_id %s' %
backup_id)
def get_next_backup(self, backup_id, status_filter=DEFAULT_STATUS_FILTER):
"""
Get the next backup (if any) in the catalog
:param status_filter: default DEFAULT_STATUS_FILTER. The status of
the backup returned
"""
if not isinstance(status_filter, tuple):
status_filter = tuple(status_filter)
backup = BackupInfo(self.server, backup_id=backup_id)
available_backups = self.get_available_backups(status_filter +
(backup.status,))
ids = sorted(available_backups.keys())
try:
current = ids.index(backup_id)
while current < (len(ids) - 1):
res = available_backups[ids[current + 1]]
if res.status in status_filter:
return res
current += 1
return None
except ValueError:
raise UnknownBackupIdException('Could not find backup_id %s' %
backup_id)
def get_last_backup_id(self, status_filter=DEFAULT_STATUS_FILTER):
"""
Get the id of the latest/last backup in the catalog (if exists)
:param status_filter: The status of the backup to return,
default to DEFAULT_STATUS_FILTER.
:return string|None: ID of the backup
"""
available_backups = self.get_available_backups(status_filter)
if len(available_backups) == 0:
return None
ids = sorted(available_backups.keys())
return ids[-1]
def get_first_backup_id(self, status_filter=DEFAULT_STATUS_FILTER):
"""
Get the id of the oldest/first backup in the catalog (if exists)
:param status_filter: The status of the backup to return,
default to DEFAULT_STATUS_FILTER.
:return string|None: ID of the backup
"""
available_backups = self.get_available_backups(status_filter)
if len(available_backups) == 0:
return None
ids = sorted(available_backups.keys())
return ids[0]
def delete_backup(self, backup):
"""
Delete a backup
:param backup: the backup to delete
"""
available_backups = self.get_available_backups()
minimum_redundancy = self.server.config.minimum_redundancy
# Honour minimum required redundancy
if backup.status == BackupInfo.DONE and \
minimum_redundancy >= len(available_backups):
output.warning("Skipping delete of backup %s for server %s "
"due to minimum redundancy requirements "
"(minimum redundancy = %s, "
"current redundancy = %s)",
backup.backup_id,
self.config.name,
len(available_backups),
minimum_redundancy)
return
output.info("Deleting backup %s for server %s",
backup.backup_id, self.config.name)
previous_backup = self.get_previous_backup(backup.backup_id)
next_backup = self.get_next_backup(backup.backup_id)
# Delete all the data contained in the backup
try:
self.delete_backup_data(backup)
except OSError as e:
output.error("Failure deleting backup %s for server %s.\n%s",
backup.backup_id, self.config.name, e)
return
# Check if we are deleting the first available backup
if not previous_backup:
# In the case of exclusive backup (default), removes any WAL
# files associated to the backup being deleted.
# In the case of concurrent backup, removes only WAL files
# prior to the start of the backup being deleted, as they
# might be useful to any concurrent backup started immediately
# after.
remove_until = None # means to remove all WAL files
if next_backup:
remove_until = next_backup
elif BackupOptions.CONCURRENT_BACKUP in self.config.backup_options:
remove_until = backup
output.info("Delete associated WAL segments:")
for name in self.remove_wal_before_backup(remove_until):
output.info("\t%s", name)
# As last action, remove the backup directory,
# ending the delete operation
try:
self.delete_basebackup(backup)
except OSError as e:
output.error("Failure deleting backup %s for server %s.\n%s\n"
"Please manually remove the '%s' directory",
backup.backup_id, self.config.name, e,
backup.get_basebackup_directory())
return
self.backup_cache_remove(backup)
output.info("Done")
def retry_backup_copy(self, target_function, *args, **kwargs):
"""
Execute the target backup copy function, retrying the configured
number of times
:param target_function: the base backup target function
:param args: args for the target function
:param kwargs: kwargs of the target function
:return: the result of the target function
"""
attempts = 0
while True:
try:
# if is not the first attempt, output the retry number
if attempts >= 1:
output.warning("Copy of base backup: retry #%s", attempts)
# execute the target function for backup copy
return target_function(*args, **kwargs)
# catch rsync errors
except DataTransferFailure as e:
# exit condition: if retry number is lower than configured
# retry limit, try again; otherwise exit.
if attempts < self.config.basebackup_retry_times:
# Log the exception, for debugging purpose
_logger.exception("Failure in base backup copy: %s", e)
output.warning(
"Copy of base backup failed, waiting for next "
"attempt in %s seconds",
self.config.basebackup_retry_sleep)
# sleep for configured time. then try again
time.sleep(self.config.basebackup_retry_sleep)
attempts += 1
else:
# if the max number of attempts is reached and
# there is still an error, exit re-raising the exception.
raise
def backup(self):
"""
Performs a backup for the server
"""
_logger.debug("initialising backup information")
self.executor.init()
backup_info = None
try:
# Create the BackupInfo object representing the backup
backup_info = BackupInfo(
self.server,
backup_id=datetime.datetime.now().strftime('%Y%m%dT%H%M%S'))
backup_info.save()
self.backup_cache_add(backup_info)
output.info(
"Starting backup for server %s in %s",
self.config.name,
backup_info.get_basebackup_directory())
# Run the pre-backup-script if present.
script = HookScriptRunner(self, 'backup_script', 'pre')
script.env_from_backup_info(backup_info)
script.run()
# Run the pre-backup-retry-script if present.
retry_script = RetryHookScriptRunner(
self, 'backup_retry_script', 'pre')
retry_script.env_from_backup_info(backup_info)
retry_script.run()
# Do the backup using the BackupExecutor
self.executor.backup(backup_info)
# Compute backup size and fsync it on disk
self.backup_fsync_and_set_sizes(backup_info)
# Mark the backup as DONE
backup_info.set_attribute("status", "DONE")
# Use BaseException instead of Exception to catch events like
# KeyboardInterrupt (e.g.: CRTL-C)
except BaseException as e:
msg_lines = str(e).strip().splitlines()
if backup_info:
# Use only the first line of exception message
# in backup_info error field
backup_info.set_attribute("status", "FAILED")
# If the exception has no attached message use the raw
# type name
if len(msg_lines) == 0:
msg_lines = [type(e).__name__]
backup_info.set_attribute(
"error",
"failure %s (%s)" % (
self.executor.current_action, msg_lines[0]))
output.error("Backup failed %s.\nDETAILS: %s\n%s",
self.executor.current_action, msg_lines[0],
'\n'.join(msg_lines[1:]))
else:
output.info("Backup end at xlog location: %s (%s, %08X)",
backup_info.end_xlog,
backup_info.end_wal,
backup_info.end_offset)
output.info("Backup completed")
# Create a restore point after a backup
target_name = 'barman_%s' % backup_info.backup_id
self.server.postgres.create_restore_point(target_name)
finally:
if backup_info:
backup_info.save()
# Run the post-backup-retry-script if present.
try:
retry_script = RetryHookScriptRunner(
self, 'backup_retry_script', 'post')
retry_script.env_from_backup_info(backup_info)
retry_script.run()
except AbortedRetryHookScript as e:
# Ignore the ABORT_STOP as it is a post-hook operation
_logger.warning("Ignoring stop request after receiving "
"abort (exit code %d) from post-backup "
"retry hook script: %s",
e.hook.exit_status, e.hook.script)
# Run the post-backup-script if present.
script = HookScriptRunner(self, 'backup_script', 'post')
script.env_from_backup_info(backup_info)
script.run()
output.result('backup', backup_info)
def recover(self, backup_info, dest, tablespaces=None, target_tli=None,
target_time=None, target_xid=None, target_name=None,
exclusive=False, remote_command=None):
"""
Performs a recovery of a backup
:param barman.infofile.BackupInfo backup_info: the backup to recover
:param str dest: the destination directory
:param dict[str,str]|None tablespaces: a tablespace name -> location
map (for relocation)
:param str|None target_tli: the target timeline
:param str|None target_time: the target time
:param str|None target_xid: the target xid
:param str|None target_name: the target name created previously with
pg_create_restore_point() function call
:param bool exclusive: whether the recovery is exclusive or not
:param str|None remote_command: default None. The remote command
to recover the base backup, in case of remote backup.
"""
# Archive every WAL files in the incoming directory of the server
self.server.archive_wal(verbose=False)
# Delegate the recovery operation to a RecoveryExecutor object
executor = RecoveryExecutor(self)
recovery_info = executor.recover(backup_info,
dest, tablespaces,
target_tli, target_time,
target_xid, target_name,
exclusive, remote_command)
# Output recovery results
output.result('recovery', recovery_info['results'])
def archive_wal(self, verbose=True):
"""
Executes WAL maintenance operations, such as archiving and compression
If verbose is set to False, outputs something only if there is
at least one file
:param bool verbose: report even if no actions
"""
with self.server.xlogdb('a') as fxlogdb:
for archiver in self.server.archivers:
archiver.archive(fxlogdb, verbose)
def cron_retention_policy(self):
"""
Retention policy management
"""
if (self.server.enforce_retention_policies and
self.config.retention_policy_mode == 'auto'):
available_backups = self.get_available_backups(
BackupInfo.STATUS_ALL)
retention_status = self.config.retention_policy.report()
for bid in sorted(retention_status.keys()):
if retention_status[bid] == BackupInfo.OBSOLETE:
output.info(
"Enforcing retention policy: removing backup %s for "
"server %s" % (bid, self.config.name))
self.delete_backup(available_backups[bid])
def delete_basebackup(self, backup):
"""
Delete the basebackup dir of a given backup.
:param barman.infofile.BackupInfo backup: the backup to delete
"""
backup_dir = backup.get_basebackup_directory()
_logger.debug("Deleting base backup directory: %s" % backup_dir)
shutil.rmtree(backup_dir)
def delete_backup_data(self, backup):
"""
Delete the data contained in a given backup.
:param barman.infofile.BackupInfo backup: the backup to delete
"""
if backup.tablespaces:
if backup.backup_version == 2:
tbs_dir = backup.get_basebackup_directory()
else:
tbs_dir = os.path.join(backup.get_data_directory(),
'pg_tblspc')
for tablespace in backup.tablespaces:
rm_dir = os.path.join(tbs_dir, str(tablespace.oid))
if os.path.exists(rm_dir):
_logger.debug("Deleting tablespace %s directory: %s" %
(tablespace.name, rm_dir))
shutil.rmtree(rm_dir)
pg_data = backup.get_data_directory()
if os.path.exists(pg_data):
_logger.debug("Deleting PGDATA directory: %s" % pg_data)
shutil.rmtree(pg_data)
def delete_wal(self, wal_info):
"""
Delete a WAL segment, with the given WalFileInfo
:param barman.infofile.WalFileInfo wal_info: the WAL to delete
"""
try:
os.unlink(wal_info.fullpath(self.server))
try:
os.removedirs(os.path.dirname(wal_info.fullpath(self.server)))
except OSError:
# This is not an error condition
# We always try to remove the the trailing directories,
# this means that hashdir is not empty.
pass
except OSError:
_logger.warning('Expected WAL file %s not found during delete',
wal_info.name, exc_info=1)
def check(self, check_strategy):
"""
This function does some checks on the server.
:param CheckStrategy check_strategy: the strategy for the management
of the results of the various checks
"""
# Check compression_setting parameter
if self.config.compression and not self.compression_manager.check():
check_strategy.result(self.config.name,
'compression settings', False)
else:
status = True
try:
self.compression_manager.get_compressor()
except CompressionIncompatibility as field:
check_strategy.result(self.config.name,
'%s setting' % field, False)
status = False
check_strategy.result(self.config.name,
'compression settings', status)
# Failed backups check
failed_backups = self.get_available_backups((BackupInfo.FAILED,))
status = len(failed_backups) == 0
check_strategy.result(
self.config.name,
'failed backups',
status,
'there are %s failed backups' % (len(failed_backups,))
)
# Minimum redundancy checks
no_backups = len(self.get_available_backups())
# Check minimum_redundancy_requirements parameter
if no_backups < int(self.config.minimum_redundancy):
status = False
else:
status = True
check_strategy.result(
self.config.name,
'minimum redundancy requirements', status,
'have %s backups, expected at least %s' % (
no_backups, self.config.minimum_redundancy))
# TODO: Add a check for the existence of ssh and of rsync
# Execute additional checks defined by the BackupExecutor
if self.executor:
self.executor.check(check_strategy)
def status(self):
"""
This function show the server status
"""
# get number of backups
no_backups = len(self.get_available_backups())
output.result('status', self.config.name,
"backups_number",
"No. of available backups", no_backups)
output.result('status', self.config.name,
"first_backup",
"First available backup",
self.get_first_backup_id())
output.result('status', self.config.name,
"last_backup",
"Last available backup",
self.get_last_backup_id())
# Minimum redundancy check. if number of backups minor than minimum
# redundancy, fail.
if no_backups < self.config.minimum_redundancy:
output.result('status', self.config.name,
"minimum_redundancy",
"Minimum redundancy requirements",
"FAILED (%s/%s)" % (
no_backups,
self.config.minimum_redundancy))
else:
output.result('status', self.config.name,
"minimum_redundancy",
"Minimum redundancy requirements",
"satisfied (%s/%s)" % (
no_backups,
self.config.minimum_redundancy))
# Output additional status defined by the BackupExecutor
if self.executor:
self.executor.status()
def fetch_remote_status(self):
"""
Build additional remote status lines defined by the BackupManager.
This method does not raise any exception in case of errors,
but set the missing values to None in the resulting dictionary.
:rtype: dict[str, None|str]
"""
if self.executor:
return self.executor.get_remote_status()
else:
return {}
def rebuild_xlogdb(self):
"""
Rebuild the whole xlog database guessing it from the archive content.
"""
from os.path import isdir, join
output.info("Rebuilding xlogdb for server %s", self.config.name)
root = self.config.wals_directory
default_compression = self.config.compression
wal_count = label_count = history_count = 0
# lock the xlogdb as we are about replacing it completely
with self.server.xlogdb('w') as fxlogdb:
xlogdb_new = fxlogdb.name + ".new"
with open(xlogdb_new, 'w') as fxlogdb_new:
for name in sorted(os.listdir(root)):
# ignore the xlogdb and its lockfile
if name.startswith(self.server.XLOG_DB):
continue
fullname = join(root, name)
if isdir(fullname):
# all relevant files are in subdirectories
hash_dir = fullname
for wal_name in sorted(os.listdir(hash_dir)):
fullname = join(hash_dir, wal_name)
if isdir(fullname):
_logger.warning(
'unexpected directory '
'rebuilding the wal database: %s',
fullname)
else:
if xlog.is_wal_file(fullname):
wal_count += 1
elif xlog.is_backup_file(fullname):
label_count += 1
else:
_logger.warning(
'unexpected file '
'rebuilding the wal database: %s',
fullname)
continue
wal_info = WalFileInfo.from_file(
fullname,
default_compression=default_compression)
fxlogdb_new.write(wal_info.to_xlogdb_line())
else:
# only history files are here
if xlog.is_history_file(fullname):
history_count += 1
wal_info = WalFileInfo.from_file(
fullname,
default_compression=default_compression)
fxlogdb_new.write(wal_info.to_xlogdb_line())
else:
_logger.warning(
'unexpected file '
'rebuilding the wal database: %s',
fullname)
os.fsync(fxlogdb_new.fileno())
shutil.move(xlogdb_new, fxlogdb.name)
fsync_dir(os.path.dirname(fxlogdb.name))
output.info('Done rebuilding xlogdb for server %s '
'(history: %s, backup_labels: %s, wal_file: %s)',
self.config.name, history_count, label_count, wal_count)
def remove_wal_before_backup(self, backup_info):
"""
Remove WAL files which have been archived before the start of
the provided backup.
If no backup_info is provided delete all available WAL files
:param BackupInfo|None backup_info: the backup information structure
:return list: a list of removed WAL files
"""
removed = []
with self.server.xlogdb() as fxlogdb:
xlogdb_new = fxlogdb.name + ".new"
with open(xlogdb_new, 'w') as fxlogdb_new:
for line in fxlogdb:
wal_info = WalFileInfo.from_xlogdb_line(line)
if not xlog.is_any_xlog_file(wal_info.name):
output.error(
"invalid xlog segment name %r\n"
"HINT: Please run \"barman rebuild-xlogdb %s\" "
"to solve this issue",
wal_info.name, self.config.name)
continue
# Keeps the WAL segment if it is a history file or later
# than the given backup (the first available)
if (xlog.is_history_file(wal_info.name) or
(backup_info and
wal_info.name >= backup_info.begin_wal)):
fxlogdb_new.write(wal_info.to_xlogdb_line())
continue
else:
self.delete_wal(wal_info)
removed.append(wal_info.name)
fxlogdb_new.flush()
os.fsync(fxlogdb_new.fileno())
shutil.move(xlogdb_new, fxlogdb.name)
fsync_dir(os.path.dirname(fxlogdb.name))
return removed
def validate_last_backup_maximum_age(self, last_backup_maximum_age):
"""
Evaluate the age of the last available backup in a catalogue.
If the last backup is older than the specified time interval (age),
the function returns False. If within the requested age interval,
the function returns True.
:param timedate.timedelta last_backup_maximum_age: time interval
representing the maximum allowed age for the last backup
in a server catalogue
:return tuple: a tuple containing the boolean result of the check and
auxiliary information about the last backup current age
"""
# Get the ID of the last available backup
backup_id = self.get_last_backup_id()
if backup_id:
# Get the backup object
backup = BackupInfo(self.server, backup_id=backup_id)
now = datetime.datetime.now(dateutil.tz.tzlocal())
# Evaluate the point of validity
validity_time = now - last_backup_maximum_age
# Pretty print of a time interval (age)
msg = human_readable_timedelta(now - backup.end_time)
# If the backup end time is older than the point of validity,
# return False, otherwise return true
if backup.end_time < validity_time:
return False, msg
else:
return True, msg
else:
# If no backup is available return false
return False, "No available backups"
def backup_fsync_and_set_sizes(self, backup_info):
"""
Fsync all files in a backup and set the actual size on disk
of a backup.
Also evaluate the deduplication ratio and the deduplicated size if
applicable.
:param barman.infofile.BackupInfo backup_info: the backup to update
"""
# Calculate the base backup size
self.executor.current_action = "calculating backup size"
_logger.debug(self.executor.current_action)
backup_size = 0
deduplicated_size = 0
backup_dest = backup_info.get_basebackup_directory()
for dir_path, _, file_names in os.walk(backup_dest):
# execute fsync() on the containing directory
fsync_dir(dir_path)
# execute fsync() on all the contained files
for filename in file_names:
file_path = os.path.join(dir_path, filename)
file_fd = os.open(file_path, os.O_RDONLY)
file_stat = os.fstat(file_fd)
backup_size += file_stat.st_size
# Excludes hard links from real backup size
if file_stat.st_nlink == 1:
deduplicated_size += file_stat.st_size
os.fsync(file_fd)
os.close(file_fd)
# Save size into BackupInfo object
backup_info.set_attribute('size', backup_size)
backup_info.set_attribute('deduplicated_size', deduplicated_size)
if backup_info.size > 0:
deduplication_ratio = 1 - (float(
backup_info.deduplicated_size) / backup_info.size)
else:
deduplication_ratio = 0
if self.config.reuse_backup == 'link':
output.info(
"Backup size: %s. Actual size on disk: %s"
" (-%s deduplication ratio)." % (
pretty_size(backup_info.size),
pretty_size(backup_info.deduplicated_size),
'{percent:.2%}'.format(percent=deduplication_ratio)
))
else:
output.info("Backup size: %s" %
pretty_size(backup_info.size))
|
hareevs/pgbarman
|
barman/backup.py
|
Python
|
gpl-3.0
| 35,032
|
import colander
from colander import MappingSchema
from colander import SchemaNode
from colander import String
@colander.deferred
def deferred_name_default(node, kw):
if 'log_diagram' in kw:
name = kw['log_diagram'].name
if name is None:
return colander.null
return name
return colander.null
@colander.deferred
def deferred_description_default(node, kw):
if 'log_diagram' in kw:
desc = kw['log_diagram'].description
if desc is None:
return colander.null
return desc
return colander.null
class DisplaySchema(MappingSchema):
name = SchemaNode(typ=String(),
title='Name',
default=deferred_name_default)
description = SchemaNode(typ=String(),
title='Description',
default=deferred_description_default,
missing=None)
|
AlexanderLang/OpenAutomatedFarm
|
FarmGUI/farmgui/schemas/DisplaySchemas.py
|
Python
|
gpl-3.0
| 945
|
import os
from zipfile import ZipFile
from pupa.scrape import VoteEvent, Scraper
class NCVoteScraper(Scraper):
def scrape(self, chamber=None, session=None):
if not session:
session = self.latest_session()
self.info('no session specified, using %s', session)
chambers = [chamber] if chamber else ['upper', 'lower']
for chamber in chambers:
yield from self.scrape_chamber(chamber, session)
def scrape_chamber(self, chamber, session):
# special sessions need a ftp_session set
if 'E' in session:
ftp_session = session.replace('E', '_E')
else:
ftp_session = session
# Unfortunately, you now have to request access to FTP.
# This method of retrieving votes needs to be be changed or
# fall back to traditional web scraping.
if session == '2009':
# 2009 files have a different delimiter and naming scheme.
vote_data_url = 'ftp://www.ncleg.net/Bill_Status/Vote Data 2009.zip'
naming_scheme = '{session}{file_label}.txt'
delimiter = ";"
else:
vote_data_url = 'ftp://www.ncleg.net/Bill_Status/Votes%s.zip' % ftp_session
naming_scheme = '{file_label}_{session}.txt'
delimiter = "\t"
try:
fname, resp = self.urlretrieve(vote_data_url)
except Exception as e:
self.error('Error retrieving {}: {}'.format(vote_data_url, e))
lines = self.get('ftp://www.ncleg.net/Bill_Status/').content.splitlines()
for line in lines:
if 'Votes' in str(line):
self.info('FTP directory includes: {}'.format(line))
return
zf = ZipFile(fname)
chamber_code = 'H' if chamber == 'lower' else 'S'
# Members_YYYY.txt: tab separated
# 0: id (unique only in chamber)
# 1: H or S
# 2: member name
# 3-5: county, district, party
# 6: mmUserId
member_file = zf.open(naming_scheme.format(file_label='Members', session=ftp_session))
members = {}
for line in member_file.readlines():
data = line.decode().split(delimiter)
if data[1] == chamber_code:
members[data[0]] = data[2]
# Votes_YYYY.txt
# 0: sequence number
# 1: chamber (S/H)
# 2: date
# 3: prefix
# 4: bill_id
# 5: yes votes
# 6: no votes
# 7: excused absences
# 8: excused votes
# 9: didn't votes
# 10: total yes+no
# 11: sponsor
# 12: reading info
# 13: info
# 20: PASSED/FAILED
# 21: legislative day
vote_file = zf.open(naming_scheme.format(file_label='Votes', session=ftp_session))
bill_chambers = {'H': 'lower', 'S': 'upper'}
votes = {}
for line in vote_file.readlines():
data = line.decode().split(delimiter)
if len(data) < 24:
self.warning('line too short %s', data)
continue
if data[1] == chamber_code:
date = data[2][:19]
if data[3][0] not in bill_chambers:
# skip votes that aren't on bills
self.info('skipping vote %s' % data[0])
continue
ve = VoteEvent(chamber=chamber,
start_date=date.replace(' ', 'T'),
motion_text=data[13],
result='pass' if 'PASS' in data[20] else 'fail',
bill_chamber=bill_chambers[data[3][0]],
bill=data[3] + data[4],
legislative_session=session,
classification='passage',
)
ve.set_count('yes', int(data[5]))
ve.set_count('no', int(data[6]))
ve.set_count('absent', int(data[7]))
ve.set_count('excused', int(data[8]))
ve.set_count('not voting', int(data[9]))
votes[data[0]] = ve
member_vote_file = zf.open(naming_scheme.format(file_label='MemberVotes',
session=ftp_session))
# 0: member id
# 1: chamber (S/H)
# 2: vote id
# 3: vote chamber (always same as 1)
# 4: vote (Y,N,E,X)
# 5: pair ID (member)
# 6: pair order
# If a vote is paired then it should be counted as an 'other'
for line in member_vote_file.readlines():
data = line.decode().split(delimiter)
if data[1] == chamber_code:
try:
member_voting = members[data[0]]
except KeyError:
self.debug('Member %s not found.' % data[0])
continue
try:
vote = votes[data[2]]
except KeyError:
self.debug('Vote %s not found.' % data[2])
continue
# -1 votes are Lt. Gov, not included in count, so we use a hacky way to
# increment the counts
if data[4] == 'Y' and not data[5]:
if data[0] == '-1':
for c in ve.counts:
if c['option'] == 'yes':
c['count'] += 1
vote.yes(member_voting)
elif data[4] == 'N' and not data[5]:
if data[0] == '-1':
for c in ve.counts:
if c['option'] == 'no':
c['count'] += 1
vote.no(member_voting)
else:
# for some reason other_count is high for paired votes so we use the hack
# to decrement counts
if data[5]:
for c in ve.counts:
if c['option'] == 'other':
c['count'] -= 1
# is either E: excused, X: no vote, or paired (doesn't count)
vote_type = {'E': 'excused', 'X': 'not voting', 'V': 'other'}[data[4]]
vote.vote(vote_type, member_voting)
for vote in votes.values():
vote.add_source(vote_data_url)
yield vote
# remove file
zf.close()
os.remove(fname)
|
votervoice/openstates
|
openstates/nc/votes.py
|
Python
|
gpl-3.0
| 6,583
|
from registry.config.defaults import LOG_CONF
LOG_CONF['loggers']['root']['level'] = 'DEBUG'
|
arsgeografica/kinderstadt-registry
|
registry/config/development.py
|
Python
|
gpl-3.0
| 94
|
"""
Copyright 2000, 2001 Astrolabe by William McClain
Forked in 2013 to Astronomia
Copyright 2013 Astronomia by Tim Cera
This file is part of Astronomia.
Astronomia is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
Astronomia is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Astronomia; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
"""Global values.
These can be set directly, or there is a routine astronomia.util.load_params()
which will assign them based on values in a parameter text file.
"""
#
# Abbreviation for standard timezone (e.g., "CST" for North American
# Central Standard Time)
#
standard_timezone_name = "UT"
#
# Time in fractional days to be subtracted from UT to calculate the standard
# time zone offset. Locations east of Greenwich should use negative values.
#
standard_timezone_offset = 0.0
#
# Abbreviation for daylight savings timezone (e.g., "CDT" for North American
# Central Daylight Time)
#
# This is optional. If set to None, no daylight savings conversions
# will be performed.
#
daylight_timezone_name = None
#
# Time in fractional days to be subtracted from UT to calculate the daylight
# savings time zone offset. Locations east of Greenwich should use negative
# values.
#
# This value is not used unless "daylight_timezone_name" has an value other
# than None.
#
daylight_timezone_offset = None
#
# Observer's longitude in radians, measured positive west of Greenwich,
# negative to the east. Should be between -pi...pi.
#
longitude = 0.0
#
# Observer's latitude in radians, measured positive north of the equator,
# negative to the south. Should be between -pi/2...pi/2.
#
latitude = 0.0
#
# Month names. There must be twelve. The default is three-character
# abbreviations so that listings line up.
#
month_names = ("jan",
"feb",
"mar",
"apr",
"may",
"jun",
"jul",
"aug",
"sep",
"oct",
"nov",
"dec")
#
# Season names. There must be four. These are used to characterize the
# equinoxes and solstices.
#
season_names = ("spring", "summer", "autumn", "winter")
|
webplate/astrini
|
astronomia/globals.py
|
Python
|
gpl-3.0
| 2,736
|
# -*- coding: UTF-8 -*-
from __future__ import absolute_import
import MySQLdb
from flask import _request_ctx_stack
class MySQL(object):
def __init__(self, app=None, **connect_args):
self.connect_args = connect_args
if app is not None:
self.app = app
self.init_app(self.app)
else:
self.app = None
def init_app(self, app):
self.app = app
self.app.config.setdefault('MYSQL_DATABASE_HOST', 'localhost')
self.app.config.setdefault('MYSQL_DATABASE_PORT', 3306)
self.app.config.setdefault('MYSQL_DATABASE_USER', None)
self.app.config.setdefault('MYSQL_DATABASE_PASSWORD', None)
self.app.config.setdefault('MYSQL_DATABASE_DB', None)
self.app.config.setdefault('MYSQL_DATABASE_CHARSET', 'utf8')
self.app.config.setdefault('MYSQL_USE_UNICODE', True)
self.app.teardown_request(self.teardown_request)
self.app.before_request(self.before_request)
def connect(self):
if self.app.config['MYSQL_DATABASE_HOST']:
self.connect_args['host'] = self.app.config['MYSQL_DATABASE_HOST']
if self.app.config['MYSQL_DATABASE_PORT']:
self.connect_args['port'] = self.app.config['MYSQL_DATABASE_PORT']
if self.app.config['MYSQL_DATABASE_USER']:
self.connect_args['user'] = self.app.config['MYSQL_DATABASE_USER']
if self.app.config['MYSQL_DATABASE_PASSWORD']:
self.connect_args['passwd'] = self.app.config['MYSQL_DATABASE_PASSWORD']
if self.app.config['MYSQL_DATABASE_DB']:
self.connect_args['db'] = self.app.config['MYSQL_DATABASE_DB']
if self.app.config['MYSQL_DATABASE_CHARSET']:
self.connect_args['charset'] = self.app.config['MYSQL_DATABASE_CHARSET']
if self.app.config['MYSQL_USE_UNICODE']:
self.connect_args['use_unicode'] = self.app.config['MYSQL_USE_UNICODE']
return MySQLdb.connect(**self.connect_args)
def before_request(self):
ctx = _request_ctx_stack.top
ctx.mysql_db = self.connect()
def teardown_request(self, exception):
ctx = _request_ctx_stack.top
if hasattr(ctx, "mysql_db"):
ctx.mysql_db.close()
def get_db(self):
ctx = _request_ctx_stack.top
if ctx is not None:
return ctx.mysql_db
|
yelongyu/chihu
|
venv/lib/python2.7/site-packages/flaskext/mysql.py
|
Python
|
gpl-3.0
| 2,365
|
# process Forsythe extTransects
import arcpy, time, os, pythonaddins, sys, math
sys.path.append(r"\\Mac\Home\Documents\scripting\TransectExtraction") # path to TransectExtraction module
from TransectExtraction import *
arcpy.env.overwriteOutput = True # Overwrite output?
arcpy.CheckOutExtension("Spatial") # Checkout Spatial Analysis extension
# INPUTS - same as TE
SiteYear_strings = {'site': 'Forsythe',
'year': '2014',
'region': 'NewJersey'}
arcpy.env.workspace = home = r'T:\Commons_DeepDive\DeepDive\{region}\{site}\{year}\{site}{year}.gdb'.format(**SiteYear_strings)
# INPUTS - not in TE
fill = -99999
old_transects = '{region}N_LT'.format(**SiteYear_strings)
extTransects = 'Forsythe_extTrans'
new_extTrans = 'Forsythe_extTrans_v2'
in_file = extTransects
"""
PROCESSING
"""
# Loops through each of the fields,
# creates a FieldMap object that retrieves the old value for each field,
# and adds the FieldMap to the FieldMappings
fieldlist = arcpy.ListFields(extTransects)[2:18]
for i in range(len(fieldlist)):
fieldlist[i] = fieldlist[i].name
fms = arcpy.FieldMappings()
for field in fieldlist:
print field.name
fm = arcpy.FieldMap()
fm.addInputField(old_transects, field)
fms.addFieldMap(fm)
arcpy.SpatialJoin_analysis(extTransects, old_transects, new_extTrans, 'JOIN_ONE_TO_ONE',
join_type='KEEP_ALL', field_mapping=fms, match_option='WITHIN_A_DISTANCE',search_radius='5 METERS')
# Replace false attributes with None values
# Select features that did not have spatial match in old_transects
arcpy.SelectLayerByAttribute_management(new_extTrans,"ADD_TO_SELECTION","Join_Count=0")
# Overwrite values with fill in selected features
for field in fieldlist:
arcpy.CalculateField_management(new_extTrans, field, fill)
# Replace fills with Null values
ReplaceValueInFC(new_extTrans,fields=[],oldvalue=-99999,newvalue=None)
# delete fields used in processing
arcpy.DeleteField_management(new_extTrans, ['Join_Count','TARGET_FID'])
# Transect processing
in_fc = 'Forsythe_extTrans_v2'
base_fc = 'Forsythe_extTrans_v3'
sortfield = 'trans_sort'
sort_corner='LL'
arcpy.CreateFeatureclass_management(home,'sort_line1', "POLYLINE", projCR)
arcpy.CopyFeatures_management('sort_line1','{}\\sort_line2'.format(home))
sort_line_list = ['sort_line1','sort_line2']
SortTransectsFromSortLines(in_fc, base_fc, sort_line_list, sortfield='trans_sort',sort_corner='LL')
|
esturdivant-usgs/plover_transect_extraction
|
TransectExtraction/archive/pre_prepandas/Forsythe_transects.py
|
Python
|
gpl-3.0
| 2,430
|
#!/bin/python
#
# Chisne ShengJi card game
# Song, Qiang <keeyang@ustc.edu>
#
# player action and player strategies
from Card import Card
class Player:
def __init__(self, _id = 0):
self.hands = dict({"S":list(), "D":list(), "H":list(),
"C":list(), "Z":list()})
self.playedcards = list()
self.id = _id;
def receive_card(self, card, game):
if card.suit in ["S", "D", "H", "C"] and card.number != game.level:
self.hands[card.suit].append(card)
self.hands[card.suit].sort(key = lambda c: c.number)
if len(self.hands[card.suit]) >= 6 \
and Card(card.suit, game.level) in self.hands["Z"]:
game.set_trump(Card(card.suit, game.level))
else:
self.hands["Z"].append(card)
self.hands["Z"].sort(key = lambda c: c.number)
def remove_card(self, card, game):
if card.number == game.level or card.suit == "J":
self.hands["Z"].remove(card)
else:
self.hands[card.suit].remove(card)
def has_better_card_single(self, card, game):
if (card.suit == game.trump or card.number == game.level):
if self.hands["Z"]:
for c in self.hands["Z"]:
if not game.cmp_card_single(card, c):
return True
else:
if self.hands[card.suit] \
and not game.cmp_card_single(card, self.hands[card.suit][-1]):
return True
return False
def has_better_card_pair(self, cards, game):
if (cards[0].suit == game.trump or cards[0].number == game.level):
if self.hands["Z"]:
for i in range(len(self.hands["Z"]) - 1):
if self.hands["Z"][i].is_pair(self.hands["Z"][i+1]) and \
not game.cmp_card(cards[0], self.hands["Z"][i]):
return True
else:
if self.hands[cards[0].suit]:
for i in range(len(self.hands[cards[0].suit]) - 1):
if self.hands[cards[0].suit][i].is_pair(self.hands[cards[0].suit][i+1]) and \
not game.cmp_card(cards[0], self.hands[cards[0].suit][i]):
return True
return False
def has_better_card_tuolaji(self, cards, game):
if (cards[0].suit == game.trump or cards[0].number == game.level):
print "NOT IMPLEMENTED YET"
else:
if self.hands[cards[0].suit]:
for i in range(len(self.hands[cards[0].suit]) - 3):
if game.is_tuolaji(self.hands[cards[0].suit][i:(i+4)]) and \
not game.cmp_card(cards[0], self.hands[cards[0].suit][i]):
return True
return False
def has_better_card(self, cards, game):
cards.sort()
i = 0
while i < len(cards):
if game.is_pair(cards[i:(i+2)]):
if game.is_tuolaji(cards[i:(i+4)]):
if self.has_better_card_tuolaji(cards[i:(i+4)], game):
return True
else:
i += 4
elif self.has_better_card_pair(cards[i:(i+2)], game):
return True
else:
i += 2
elif self.has_better_card_single(cards[i], game):
return True
else:
i += 1
return False
def play_card(self, card, game):
self.playedcards.append([card])
if card.number == game.level or card.suit == "J":
self.hands["Z"].remove(card)
else:
self.hands[card.suit].remove(card)
def play_card_first(self, game):
var = raw_input("Player {:d} Which card to play (First play: enter to delegate): ".format(self.id))
if var:
card = Card().construct_from_str(var)
self.play_card(card, game)
return
for suit in ["H", "S", "C", "D"]:
if game.trump != suit and self.hands[suit] \
and self.hands[suit][-1].number >= 11:
self.playedcards.append([self.hands[suit][-1]])
del self.hands[suit][-1]
return
if self.hands[game.trump]:
self.playedcards.append([self.hands[game.trump][0]])
del self.hands[game.trump][0]
return
if self.hands["Z"]:
self.playedcards.append([self.hands["Z"][0]])
del self.hands["Z"][0]
return
for suit in ["H", "S", "C", "D"]:
if game.trump != suit and self.hands[suit]:
self.playedcards.append([self.hands[suit][0]])
del self.hands[suit][0]
return
def play_card_second(self, game):
var = raw_input("Player {:d} Which card to play (enter to delegate): ".format(self.id))
if var:
card = Card().construct_from_str(var)
self.play_card(card, game)
return
last_card = game.players[game.last_player].playedcards[-1][0]
if last_card.suit == "J" or last_card.suit == game.trump \
or last_card.number == game.level:
best = Card("Z", -1)
worst = Card("Z", 15)
if self.hands["Z"]:
best = self.hands["Z"][0]
worst = self.hands["Z"][0]
elif self.hands[game.trump]:
best = self.hands[game.trump][0]
worst = self.hands[game.trump][0]
if best.number != -1:
for card in self.hands["Z"]:
if game.cmp_card_single(card, best):
best = card
if game.cmp_card_single(worst, card):
worst = card
if not game.cmp_card_single(last_card, best):
self.playedcards.append([best])
self.remove_card(best, game)
else:
self.playedcards.append([worst])
self.remove_card(worst, game)
else:
for cards in self.hands.values():
for c in cards:
if worst.number > c.number:
worst = c
self.playedcards.append([worst])
self.remove_card(worst, game)
else:
if self.hands[last_card.suit]:
card = self.hands[last_card.suit][-1] if not game.cmp_card_single(last_card, self.hands[last_card.suit][-1]) else self.hands[last_card.suit][0]
self.playedcards.append([card])
self.remove_card(card, game)
elif self.hands[game.trump]:
card = self.hands[game.trump][0]
self.playedcards.append([card])
self.remove_card(card, game)
elif self.hands["Z"]:
card = self.hands["Z"][0]
self.playedcards.append([card])
self.remove_card(card, game)
else:
worst = Card("Z", 15)
for cards in self.hands.values():
for c in cards:
if worst.number > c.number:
worst = c
self.playedcards.append([worst])
self.remove_card(worst, game)
def play_card_third(self, game):
var = raw_input("Player {:d} Which card to play (enter to delegate): ".format(self.id))
if var:
card = Card().construct_from_str(var)
self.play_card(card, game)
return
dealer_card = game.players[game.dealer].playedcards[-1][0]
prev_card = game.players[game.last_player].playedcards[-1][0]
if dealer_card.suit == "J" or dealer_card.suit == game.trump \
or dealer_card.number == game.level:
if self.hands["Z"] or self.hands[game.trump]:
## dealer wants to score
if game.cmp_card_single(dealer_card, prev_card):
if dealer_card.suit == "J" \
or dealer_card.number == game.level \
or dealer_card.number >= 10:
# add more score
score_5_cards = [card for card in self.hands["Z"] \
+ self.hands[game.trump] \
if card.number == 5]
score_10_cards = [card for card in self.hands["Z"] \
+ self.hands[game.trump] \
if card.number == 10]
score_K_cards = [card for card in self.hands["Z"] \
+ self.hands[game.trump] \
if card.number == 13]
if score_5_cards or score_10_cards or score_K_cards:
score_card = score_10_cards[0] if score_10_cards else (score_K_cards[0] if score_K_cards else score_5_cards[0])
self.play_card(score_card, game)
return
## dealer wants to turn over control
best_card = Card("Z", -1)
worst_card = Card("Z", 15)
if self.hands["Z"]:
best_card = self.hands["Z"][0]
worst_card = self.hands["Z"][0]
elif self.hands[game.trump]:
best_card = self.hands[game.trump][0]
worst_card = self.hands[game.trump][0]
for card in self.hands["Z"]:
if game.cmp_card_single(card, best_card):
best_card = card
if game.cmp_card([worst_card], [card]):
worst_card = card
if not game.cmp_card([prev_card], [best_card]):
self.playedcards.append([best_card])
self.remove_card(best_card, game)
else:
self.playedcards.append([worst_card])
self.remove_card(worst_card, game)
else:
## dealer wants to score
if game.cmp_card([dealer_card], [prev_card]):
if dealer_card.suit == "J" \
or dealer_card.number == game.level \
or dealer_card.number >= 10:
# add more score
score_5_cards = [card for card in self.hands["H"] \
+ self.hands["S"] + self.hands["C"] \
+ self.hands["D"] if card.number == 5]
score_10_cards = [card for card in self.hands["H"] \
+ self.hands["S"] + self.hands["C"] \
+ self.hands["D"] if card.number == 10]
score_K_cards = [card for card in self.hands["H"] \
+ self.hands["S"] + self.hands["C"] \
+ self.hands["D"] if card.number == 13]
score_card = score_10_cards[0] if score_10_cards else (score_K_cards[0] if score_K_cards else score_5_cards[0])
self.play_card(score_card, game)
return
worst_card = Card("Z", 15)
for cards in self.hands.values():
for c in cards:
if worst_card.number > c.number:
worst_card = c
self.playedcards.append([worst_card])
self.remove_card(worst_card, game)
else:
if self.hands[dealer_card.suit]:
card = self.hands[dealer_card.suit][-1] if not game.cmp_card_single(dealer_card, self.hands[dealer_card.suit][-1]) else self.hands[dealer_card.suit][0]
self.playedcards.append([card])
self.remove_card(card, game)
elif self.hands[game.trump]:
card = self.hands[game.trump][0]
self.playedcards.append([card])
self.remove_card(card, game)
elif self.hands["Z"]:
card = self.hands["Z"][0]
self.playedcards.append([card])
self.remove_card(card, game)
else:
worst = Card("Z", 15)
for cards in self.hands.values():
for c in cards:
if worst.number > c.number:
worst = c
self.playedcards.append([worst])
self.remove_card(worst, game)
def play_card_fourth(self, game):
var = raw_input("Player {:d} Which card to play (enter to delegate): ".format(self.id))
if var:
card = Card().construct_from_str(var)
self.playedcards.append([card])
self.remove_card(card, game)
return
last_card = game.players[game.last_player].playedcards[-1][0]
if last_card.suit == "J" or last_card.suit == game.trump \
or last_card.number == game.level:
best = Card("Z", -1)
worst = Card("Z", 15)
if self.hands["Z"]:
best = self.hands["Z"][0]
worst = self.hands["Z"][0]
elif self.hands[game.trump]:
best = self.hands[game.trump][0]
worst = self.hands[game.trump][0]
if best.number != -1:
for card in self.hands["Z"]:
if game.cmp_card([card], [best]):
best = card
if game.cmp_card([worst], [card]):
worst = card
if not game.cmp_card([last_card], [best]):
self.playedcards.append([best])
self.remove_card(best, game)
else:
self.playedcards.append([worst])
self.remove_card(worst, game)
else:
for cards in self.hands.values():
for c in cards:
if worst.number > c.number:
worst = c
self.playedcards.append([worst])
self.remove_card(worst, game)
else:
if self.hands[last_card.suit]:
card = self.hands[last_card.suit][-1] if not game.cmp_card_single(last_card, self.hands[last_card.suit][-1]) else self.hands[last_card.suit][0]
self.playedcards.append([card])
self.remove_card(card, game)
elif self.hands[game.trump]:
card = self.hands[game.trump][0]
self.playedcards.append([card])
self.remove_card(card, game)
elif self.hands["Z"]:
card = self.hands["Z"][0]
self.playedcards.append([card])
self.remove_card(card, game)
else:
worst = Card("Z", 15)
for cards in self.hands.values():
for c in cards:
if worst.number > c.number:
worst = c
self.playedcards.append([worst])
self.remove_card(worst, game)
def __str__(self):
s = ""
for v in self.hands.values():
if v: s += ", ".join([str(e) for e in v]) + "; "
return s
def print_hands(self):
s = ""
for v in self.hands.values():
if v: s += ", ".join([str(e) for e in v]) + "\n"
print s
def print_played_cards(self):
print ", ".join(["|".join([str(e) for e in v]) for v in self.playedcards])
|
songqiang/ShengJi
|
Player.py
|
Python
|
gpl-3.0
| 16,083
|
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# attribute_selection_test.py
# Copyright (C) 2014 Fracpete (pythonwekawrapper at gmail dot com)
import os
import sys
import traceback
import weka.core.jvm as jvm
import wekaexamples.helper as helper
from weka.core.converters import Loader
from weka.core.classes import Random
from weka.attribute_selection import ASSearch, ASEvaluation, AttributeSelection
from weka.classifiers import Classifier, Evaluation
from weka.filters import Filter
def use_classifier(data):
"""
Uses the meta-classifier AttributeSelectedClassifier for attribute selection.
:param data: the dataset to use
:type data: Instances
"""
print("\n1. Meta-classifier")
classifier = Classifier(classname="weka.classifiers.meta.AttributeSelectedClassifier")
aseval = ASEvaluation(classname="weka.attributeSelection.CfsSubsetEval")
assearch = ASSearch(classname="weka.attributeSelection.GreedyStepwise", options=["-B"])
base = Classifier(classname="weka.classifiers.trees.J48")
# setting nested options is always a bit tricky, getting all the escaped double quotes right
# simply using the bean property for setting Java objects is often easier and less error prone
classifier.set_property("classifier", base.jobject)
classifier.set_property("evaluator", aseval.jobject)
classifier.set_property("search", assearch.jobject)
evaluation = Evaluation(data)
evaluation.crossvalidate_model(classifier, data, 10, Random(1))
print(evaluation.summary())
def use_filter(data):
"""
Uses the AttributeSelection filter for attribute selection.
:param data: the dataset to use
:type data: Instances
"""
print("\n2. Filter")
flter = Filter(classname="weka.filters.supervised.attribute.AttributeSelection")
aseval = ASEvaluation(classname="weka.attributeSelection.CfsSubsetEval")
assearch = ASSearch(classname="weka.attributeSelection.GreedyStepwise", options=["-B"])
flter.set_property("evaluator", aseval.jobject)
flter.set_property("search", assearch.jobject)
flter.inputformat(data)
filtered = flter.filter(data)
print(str(filtered))
def use_low_level(data):
"""
Uses the attribute selection API directly.
:param data: the dataset to use
:type data: Instances
"""
print("\n3. Low-level")
attsel = AttributeSelection()
aseval = ASEvaluation(classname="weka.attributeSelection.CfsSubsetEval")
assearch = ASSearch(classname="weka.attributeSelection.GreedyStepwise", options=["-B"])
attsel.jwrapper.setEvaluator(aseval.jobject)
attsel.jwrapper.setSearch(assearch.jobject)
attsel.select_attributes(data)
indices = attsel.selected_attributes
print("selected attribute indices (starting with 0):\n" + str(indices.tolist()))
def main(args):
"""
Performs attribute selection on the specified dataset (uses vote UCI dataset if no dataset specified). Last
attribute is assumed to be the class attribute. Used: CfsSubsetEval, GreedyStepwise, J48
:param args: the commandline arguments
:type args: list
"""
# load a dataset
if len(args) <= 1:
data_file = helper.get_data_dir() + os.sep + "vote.arff"
else:
data_file = args[1]
helper.print_info("Loading dataset: " + data_file)
loader = Loader(classname="weka.core.converters.ArffLoader")
data = loader.load_file(data_file)
data.class_is_last()
use_classifier(data)
use_filter(data)
use_low_level(data)
if __name__ == "__main__":
try:
jvm.start()
main(sys.argv)
except Exception, e:
print(traceback.format_exc())
finally:
jvm.stop()
|
fracpete/python-weka-wrapper-examples
|
src/wekaexamples/attribute_selection/attribute_selection_test.py
|
Python
|
gpl-3.0
| 4,279
|
import shelve
class Recoder:
def __init__(self):
print("Recoder Message: Recoder start")
# file = shelve.open("./record/list.db", protocol=2, flag='c')
# file['num'] = 0;
# file['dict'] = {0:['title'], 1:['id'], 2:['date'],3:['subtitle_en'], 4:['subtitle_cn'], 5:['download_video'], 6:['compos_en'], 7:['upload_en'], 8:['compos_cn'], 9:['upload_cn']}
# file.sync()
# file.close()
def latest(self):
file = shelve.open("./record/list.db", protocol=2, flag='c')
num = file['num']
out_dict = {}
for i in range(0,7):
out_dict[i] = file['dict'][i][num]
file.close()
return out_dict
def add_new_video(self, title, id, date,):
file = shelve.open("./record/list.db", writeback=True)
num = file['num'] + 1
file['dict'][0].append(title)
file['dict'][1].append(id)
file['dict'][2].append(date)
for i in range(3,10):
temp = file['dict'][i]
temp.append('null')
file['dict'][i] = temp
file['num'] = num
file.sync()
print('Recoder Message: the dict is : \n')
print(file['dict'])
file.close()
print("Recoder Message: add new video: " + title)
def check_id(self, id):
file = shelve.open("./record/list.db", protocol=2, flag='c')
id_latest = file['dict'][1][file['num']]
print("Recoder Message: latest id is " + id_latest)
file.close()
if id_latest == id:
return True
else:
return False
def check_sub(self, lang):
if lang == 'en':
file = shelve.open("./record/list.db", protocol=2, flag='c')
num = file['num']
sub_latest = file['dict'][3][num]
file.close()
return sub_latest
elif lang == 'cn':
file = shelve.open("./record/list.db", protocol=2, flag='c')
num = file['num']
sub_latest = file['dict'][4][num]
file.close()
return sub_latest
def check_download(self):
file = shelve.open("./record/list.db", protocol=2, flag='c')
num = file['num']
download_latest = file['dict'][5][num]
file.close()
return download_latest
def check_compos(self):
file = shelve.open("./record/list.db", protocol=2, flag='c')
num = file['num']
compos_latest = file['dict'][6][num]
file.close()
return compos_latest
def check_upload(self):
file = shelve.open("./record/list.db", protocol=2, flag='c')
num = file['num']
upload_latest = file['dict'][7][num]
file.close()
return upload_latest
def add_content(self, item, content):
file = shelve.open("./record/list.db", writeback=True)
num = file['num']
file['dict'][item][num] = content
file.sync()
file.close()
def simple_judge(self):
file = shelve.open("./record/list.db", writeback=True)
num = file['num']
sub_latest = file['dict'][3][num]
download_latest = file['dict'][5][num]
compos_latest = file['dict'][6][num]
upload_latest = file['dict'][7][num]
for each in sub_latest,download_latest,compos_latest,upload_latest:
if each == 'null': # if not finished, do schedule again
return False
elif each == 'running':
return False
return True
if __name__ == '__main__':
recoder = Recoder()
|
fxt0706/YouTools
|
Recorder.py
|
Python
|
gpl-3.0
| 3,593
|
# -*- coding: utf-8 -*-
# Copyright (C) 2015 ZetaOps Inc.
#
# This file is licensed under the GNU General Public License v3
# (GPLv3). See LICENSE.txt for details.
from ulakbus.lib.common import is_akisini_belli_bir_adimdan_aktif_et
from ulakbus.models import BAPButcePlani, BAPRapor, Okutman, Permission, User, BAPSatinAlma, \
BAPGenel
from zengine.lib.translation import gettext as _
from datetime import datetime, timedelta
from zengine.models import WFInstance, BPMNWorkflow, TaskInvitation
import json
gundem_kararlari = {
1: {'tip_adi': 'proje_basvurusu',
'kararlar': [('kabul', 'Kabul'), ('red', 'Red'), ('revizyon', 'Revizyon')],
'default': 'kabul'},
2: {'tip_adi': 'ek_butce_talebi',
'kararlar': [('kabul', 'Kabul'), ('red', 'Red')],
'default': 'kabul'},
3: {'tip_adi': 'fasil_aktarim_talebi',
'kararlar': [('kabul', 'Kabul'), ('red', 'Red')],
'default': 'kabul'},
4: {'tip_adi': 'ek_sure_talebi',
'kararlar': [('kabul', 'Kabul'), ('red', 'Red')],
'default': 'kabul'},
5: {'tip_adi': 'proje_sonuc_raporu',
'kararlar': [('basarili', 'Başarılı'), ('basarisiz', 'Başarısız')],
'default': 'basarili'},
6: {'tip_adi': 'proje_donem_raporu',
'kararlar': [('basarili', 'Başarılı'), ('basarisiz', 'Başarısız')],
'default': 'basarili'},
7: {'tip_adi': 'proje_iptal_talebi',
'kararlar': [('kabul', 'Kabul'), ('red', 'Red')],
'default': 'kabul'},
8: {'tip_adi': 'yurutucu_degisikligi',
'kararlar': [('kabul', 'Kabul'), ('red', 'Red')],
'default': 'kabul'},
9: {'tip_adi': 'etkinlik_basvuru',
'kararlar': [('kabul', 'Kabul'), ('red', 'Red')],
'default': 'kabul'},
}
class KomisyonKarariSonrasiAdimlar():
def __init__(self, obj, user):
self.object = obj
self.user = user
def proje_basvurusu_kabul(self):
"""
Projenin durumu komisyon tarafından onaylandı anlamına gelen 5 yapılır.
"""
eylem = "Onaylandı"
aciklama = "Proje, komisyon tarafından {} karar numarası ile onaylandı."
self.islem_gecmisi_guncelle(eylem, aciklama, durum=5)
self.butce_kalemleri_durum_degistir(durum=2)
bildirim = _(
u"%s adlı projeniz %s karar numarası ile komisyon tarafından onaylanmıştır.") % (
self.object.proje.ad, self.object.karar_no)
self.bildirim_gonder(bildirim)
self.butce_fisi_is_akisini_tetikle()
def proje_basvurusu_red(self):
"""
Projenin durumu komisyon tarafından reddedildi anlamına gelen 6 yapılır.
"""
eylem = "Reddedildi"
aciklama = "Proje komisyon tarafından {} karar numarası ile reddedildi."
self.islem_gecmisi_guncelle(eylem, aciklama, durum=6)
self.butce_kalemleri_durum_degistir(durum=3)
bildirim = _(
u"%s adlı projeniz %s karar numarası ile komisyon tarafından reddedilmiştir. Gerekçe:"
u" %s") % (self.object.proje.ad, self.object.karar_no, self.object.karar_gerekcesi)
self.bildirim_gonder(bildirim)
def proje_basvurusu_revizyon(self):
"""
Projenin durumu komisyon tarafından revizyon istendi anlamına gelen 7 yapılır.
Öğretim üyesine davet gönderilerek, projesini revize etmesi sağlanır.
"""
eylem = "Revizyon"
aciklama = "Proje, komisyon tarafindan {} karar numarası ile revizyona gonderildi."
self.islem_gecmisi_guncelle(eylem, aciklama, durum=7)
role = self.object.proje.basvuru_rolu
data = {'karar': 'revizyon',
'revizyon_gerekce': self.object.karar_gerekce}
step = '"bap_revizyon_noktasi", 1'
title = _(u"Proje Revizyon İsteği")
message = _(u"""%s adlı başvurunuza komisyon tarafından %s karar numarası ile
revizyon istenmiştir. Görev yöneticinizden ilgili isteğe ulaşabilir, proje revizyonunu
gerçekleştirebilirsiniz.""" % self.object.ad, self.object.karar_no)
sender = self.user
is_akisini_belli_bir_adimdan_aktif_et(role, self.object, data, step, title, message, sender)
def ek_butce_talebi_kabul(self):
"""
Ek bütçe talebinin kabul edilmesi halinde, değişen kalemler güncellenir.
Silinecek olan ve yeni eklenen kalemlerin durumları değiştirilir.
"""
ek_butce_bilgileri = json.loads(self.object.gundem_ekstra_bilgiler)
for kalem_id, data in ek_butce_bilgileri['ek_butce'].items():
if data['durum'] == 4:
continue
kalem = BAPButcePlani.objects.get(kalem_id)
if data['durum'] == 3:
kalem.birim_fiyat = data['yeni_birim_fiyat']
kalem.toplam_fiyat = data['yeni_toplam_fiyat']
kalem.adet = data['yeni_adet']
kalem.gerekce = data['gerekce']
kalem.muhasebe_kod_genel = data['muhasebe_kod_genel']
kalem.proje_durum = 2 if data['durum'] == 1 else 4
kalem.save()
genel = BAPGenel.get()
mevcut_taahhut_farki = ek_butce_bilgileri['toplam'] - ek_butce_bilgileri['mevcut_toplam']
genel.toplam_taahhut += mevcut_taahhut_farki
genel.save()
eylem = "Ek Bütçe Talebi Kabulü"
aciklama = "Ek bütçe talebi komisyon tarafından {} karar numarası ile kabul edildi."
self.islem_gecmisi_guncelle(eylem, aciklama)
bildirim = _(
u"%s adlı projeniz için yapmış olduğunuz ek bütçe talebi %s karar numarası ile "
u"komisyon tarafından onaylanmıştır.") % self.object.proje.ad
self.bildirim_gonder(bildirim)
def ek_butce_talebi_red(self):
"""
Ek bütçe talebinin reddi halinde, değişen kalemler güncellenir.
Silinecek olan ve yeni eklenen kalemlerin durumları değiştirilir.
"""
yeni_kalemler = BAPButcePlani.objects.filter(ilgili_proje=self.object.proje, proje_durum=1)
self.butce_kalemleri_durum_degistir(durum=3, kalemler=yeni_kalemler)
eylem = "Ek Bütçe Talebi Reddi"
aciklama = "Ek bütçe talebi komisyon tarafından {} karar numarası ile reddedildi."
self.islem_gecmisi_guncelle(eylem, aciklama)
bildirim = _(
u"%s adlı projeniz için yapmış olduğunuz ek bütçe talebi %s karar numarası ile "
u"komisyon tarafından reddedilmiştir. Gerekçe: %s") % (
self.object.proje.ad, self.object.karar_no, self.object.karar_gerekcesi)
self.bildirim_gonder(bildirim)
def fasil_aktarim_talebi_kabul(self):
"""
Öğretim üyesinin fasıl aktarım talebi kabul edildiğinde, değişiklik yapılan bütçe kalemleri
güncellenir. Proje işlem geçmişi güncellenir.
"""
fasil_bilgileri = json.loads(self.object.gundem_ekstra_bilgiler)
for kalem_id, data in fasil_bilgileri['fasil_islemleri'].items():
if data['durum'] == 2:
continue
else:
kalem = BAPButcePlani.objects.get(kalem_id)
kalem.birim_fiyat = data['yeni_birim_fiyat']
kalem.toplam_fiyat = data['yeni_toplam_fiyat']
kalem.adet = data['yeni_adet']
kalem.gerekce = data['gerekce']
kalem.muhasebe_kod_genel = data['muhasebe_kod_genel']
kalem.save()
genel = BAPGenel.get()
mevcut_taahhut_farki = fasil_bilgileri['yeni_toplam'] - fasil_bilgileri['mevcut_toplam']
genel.toplam_taahhut += mevcut_taahhut_farki
genel.save()
eylem = "Fasıl Aktarım Talebi Kabulü"
aciklama = "Fasıl aktarımı talebi komisyon tarafından {} karar numarası ile kabul edildi."
self.islem_gecmisi_guncelle(eylem, aciklama)
bildirim = _(
u"%s adlı projeniz için yapmış olduğunuz fasıl aktarımı talebi %s karar numarası ile "
u"komisyon tarafından kabul edilmiştir.") % (self.object.proje.ad, self.object.karar_no)
self.bildirim_gonder(bildirim)
def fasil_aktarim_talebi_red(self):
"""
Fasıl aktarımı talebi reddedildiğinde proje geçmişi
güncellenir ve öğretim üyesi karar hakkında bilgilendirilir.
"""
eylem = "Fasıl Aktarım Talebi Reddi"
aciklama = "Fasıl aktarımı talebi komisyon tarafından {} karar numarası ile reddedildi."
self.islem_gecmisi_guncelle(eylem, aciklama)
bildirim = _(
u"%s adlı projeniz için yapmış olduğunuz fasıl aktarımı talebi %s karar numarası ile "
u"komisyon tarafından reddedilmiştir. Gerekçe: %s") % (
self.object.proje.ad, self.object.karar_no, self.object.karar_gerekcesi)
self.bildirim_gonder(bildirim)
def ek_sure_talebi_kabul(self):
"""
Ek süre talebi kabul edildiğinde projenin süresi talep edilen ek süre eklenerek
güncellenir. Proje geçmişi güncellenir, iilgili öğretim üyesi bilgilendirilir.
"""
ek_sure_bilgileri = json.loads(self.object.gundem_ekstra_bilgiler)
self.object.proje.sure += int(ek_sure_bilgileri['ek_sure'])
self.object.proje.save()
eylem = 'Ek Süre Talebi Kabul'
aciklama = ', '.join(
["Ek süre talebi komisyon tarafından {} karar numarası ile kabul edildi.",
ek_sure_bilgileri['aciklama']])
self.islem_gecmisi_guncelle(eylem, aciklama)
bildirim = _(
u"{} adlı projeniz için yapmış olduğunuz ek süre talebi {} karar numarası ile "
u"komisyon tarafından kabul edilmiştir.".format(self.object.proje.ad,
self.object.karar_no))
self.bildirim_gonder(bildirim)
def ek_sure_talebi_red(self):
"""
Ek süre talebi reddedildiğinde proje geçmişi
güncellenir ve öğretim üyesi karar hakkında bilgilendirilir.
"""
eylem = 'Ek Süre Talebi Red'
aciklama = "Ek süre talebi komisyon tarafından {} karar numarası ile reddedildi."
self.islem_gecmisi_guncelle(eylem, aciklama)
bildirim = _(
u"%s adlı projeniz için yapmış olduğunuz ek süre talebi %s karar numarası ile "
u"komisyon tarafından reddedilmiştir. Gerekçe: %s") % (
self.object.proje.ad, self.object.karar_no, self.object.karar_gerekcesi)
self.bildirim_gonder(bildirim)
def proje_sonuc_raporu_basarili(self):
"""
Proje sonuç raporu kabul edildiğinde, proje raporunun durumu değiştirilir.
Proje geçmişi güncellenir ve öğretim üyesi karar hakkında bilgilendirilir.
"""
self.rapor_durum_degistir(2)
eylem = 'Proje Sonuç Raporu Kabulü'
aciklama = "Sonuç raporu komisyon tarafından {} karar numarası ile kabul edildi."
self.islem_gecmisi_guncelle(eylem, aciklama)
bildirim = _(
u"%s adlı projeniz için sunduğunuz sonuç raporu %s karar numarası ile "
u"komisyon tarafından kabul edilmiştir.") % (self.object.proje.ad, self.object.karar_no)
self.bildirim_gonder(bildirim)
def proje_sonuc_raporu_basarisiz(self):
"""
Proje sonuç raporu reddedildiğinde, proje raporunun durumu değiştirilir.
Proje geçmişi güncellenir ve öğretim üyesi karar hakkında bilgilendirilir.
"""
self.rapor_durum_degistir(3)
eylem = 'Proje Sonuç Raporu Reddi'
aciklama = "Sonuç raporu komisyon tarafından {} karar numarası ile reddedildi."
self.islem_gecmisi_guncelle(eylem, aciklama)
bildirim = _(
u"%s adlı projeniz için sunduğunuz sonuç raporu %s karar numarası ile "
u"komisyon tarafından reddedilmiştir. Gerekçe: %s") % (
self.object.proje.ad, self.object.karar_no, self.object.karar_gerekcesi)
self.bildirim_gonder(bildirim)
def proje_donem_raporu_basarili(self):
"""
Proje dönem raporu kabul edildiğinde, proje raporunun durumu değiştirilir.
Proje geçmişi güncellenir ve öğretim üyesi karar hakkında bilgilendirilir.
"""
self.rapor_durum_degistir(2)
eylem = 'Proje Dönem Raporu Kabulü'
aciklama = "Dönem raporu komisyon tarafından {} karar numarası ile kabul edildi."
self.islem_gecmisi_guncelle(eylem, aciklama)
bildirim = _(
u"%s adlı projeniz için sunduğunuz dönem raporu %s karar numarası ile komisyon "
u"tarafından kabul edilmiştir.") % (self.object.proje.ad, self.object.karar_no)
self.bildirim_gonder(bildirim)
def proje_donem_raporu_basarisiz(self):
"""
Proje dönem raporu reddedildiğinde, proje raporunun durumu değiştirilir.
Proje geçmişi güncellenir ve öğretim üyesi karar hakkında bilgilendirilir.
"""
self.rapor_durum_degistir(3)
eylem = 'Proje Dönem Raporu Reddi'
aciklama = "Dönem raporu komisyon tarafından {} karar numarası ile rededildi."
self.islem_gecmisi_guncelle(eylem, aciklama)
bildirim = _(
u"%s adlı projeniz için sunduğunuz dönem raporu %s karar numarası ile "
u"komisyon tarafından reddedilmiştir. Gerekçe: %s") % (
self.object.proje.ad, self.object.karar_no, self.object.karar_gerekcesi)
self.bildirim_gonder(bildirim)
def proje_iptal_talebi_kabul(self):
"""
Öğretim üyesinin proje iptal talebinin kabulünde, projenin durumu iptal anlamına gelen
8 yapılır. Projeye ait onaylanmış bütçe kalemlerinin durumu iptal edildi anlamına gelen
4 yapılır. Proje ile ilgili olan teklife açık satın alma duyurularının durumu iptal
anlamına gelen 4 yapılır.
"""
mevcut_butce = BAPButcePlani.mevcut_butce(proje=self.object.proje)
genel = BAPGenel.get()
genel.toplam_taahhut += mevcut_butce
genel.save()
eylem = "İptal Talebi Kabulü"
aciklama = "Projenin iptal talebi komisyon tarafından {} karar numarası ile kabul edildi."
self.islem_gecmisi_guncelle(eylem, aciklama, durum=8)
self.butce_kalemleri_durum_degistir(4)
for duyuru in BAPSatinAlma.objects.filter(ilgili_proje=self.object.proje, teklif_durum=1):
duyuru.teklif_durum = 4
duyuru.save()
bildirim = _(
u"%s adlı projeniz için yapmış olduğunuz iptal talebi %s karar numarası ile komisyon "
u"tarafından kabul edilmiştir.") % (self.object.proje.ad, self.object.karar_no)
self.bildirim_gonder(bildirim)
def proje_iptal_talebi_red(self):
"""
Öğretim üyesinin proje iptal talebinin reddinde, projenin işlem geçmişi güncellenir.
Öğretim üyesi bilgilendirilir.
"""
eylem = "İptal Talebi Reddi"
aciklama = "Projenin iptal talebi komisyon tarafından {} karar numarası ile reddedildi"
self.islem_gecmisi_guncelle(eylem, aciklama)
bildirim = _(
u"%s adlı projeniz için yapmış olduğunuz iptal talebi %s karar numarası ile komisyon "
u"tarafından reddedilmiştir. Gerekçe: %s") % (
self.object.proje.ad, self.object.karar_no, self.object.karar_gerekcesi)
self.bildirim_gonder(bildirim)
def yurutucu_degisikligi_kabul(self):
"""
Yürütücü değişikliği kabulünde, projenin yürütücüsü, talep edilen
öğretim üyesi ile güncellenir. Proje işlem geçmişi güncellenir,
talep eden öğretim üyesine bildirim gönderilir.
"""
yurutucu_bilgileri = json.loads(self.object.gundem_ekstra_bilgiler)
yeni_yurutucu_id = yurutucu_bilgileri['yeni_yurutucu_id']
yeni_yurutucu = Okutman.objects.get(yeni_yurutucu_id)
self.object.proje.yurutucu = yeni_yurutucu
self.object.proje.yurutucu.save()
eylem = "Yürütücü Değişikliği Talebi Kabulü"
aciklama = "Yürütücü değişikliği talebi komisyon " \
"tarafından {} karar numarası ile kabul edildi"
self.islem_gecmisi_guncelle(eylem, aciklama)
eski_yurutucu_bildirim = _(
u"%s adlı projeniz için yapmış olduğunuz yürütücü değişikliği talebi %s karar numarası "
u"ile komisyon tarafından kabul edilmiştir. Yeni yürütücü: %s") % (
self.object.proje.ad, self.object.karar_no,
yeni_yurutucu.__unicode__)
self.bildirim_gonder(eski_yurutucu_bildirim)
yeni_yurutucu_bildirim = _(
u"%s karar numarası ile %s adlı projenin yeni yürütücüsü olmanız komisyon tarafından "
u"onay verilmiştir.") % (
self.object.proje.ad,
self.object.karar_no,
yeni_yurutucu.__unicode__)
yeni_yurutucu.personel.user.send_notification(title='Komisyon Kararı',
message=yeni_yurutucu_bildirim,
sender=self.user)
def yurutucu_degisikligi_red(self):
"""
Yürütücü değişikliği reddinde proje işlem geçmişi güncellenir,
talep eden öğretim üyesine bildirim gönderilir.
"""
eylem = "Yürütücü Değişikliği Talebi Reddi"
aciklama = "Yürütücü değişikliği talebi komisyon tarafından {} karar numarası ile reddedildi"
self.islem_gecmisi_guncelle(eylem, aciklama)
bildirim = _(
u"%s adlı projeniz için yapmış olduğunuz yürütücü değişikliği talebi %s karar numarası "
u"ile komisyon tarafından reddedilmiştir. Gerekçe: %s") % (
self.object.proje.ad, self.object.karar_no, self.object.karar_gerekcesi)
self.bildirim_gonder(bildirim)
def etkinlik_basvuru_kabul(self):
"""
Etkinlik başvuru kabulünde başvuru yapan öğretim üyesine bildirim gönderilir. Durumu
onaylandı anlamına gelen 2 yapılır.
"""
self.object.etkinlik.durum = 2
self.object.save()
bildirim = _(
u"Yapmış olduğunuz etkinlik başvurusu talebi %s karar numarası "
u"ile komisyon tarafından kabul edilmiştir.") % (self.object.karar_no)
self.object.etkinlik.basvuru_yapan.personel.user.send_notification(
title="Etkinlik Başvurusu Komisyon Kararı",
message=bildirim)
def etkinlik_basvuru_red(self):
"""
Etkinlik başvuru kabulünde başvuru yapan öğretim üyesine bildirim gönderilir. Durumu
reddedildi anlamına gelen 3 yapılır.
"""
self.object.etkinlik.durum = 3
self.object.save()
bildirim = _(
u"Yapmış olduğunuz etkinlik başvurusu talebi %s karar numarası "
u"ile komisyon tarafından reddedilmiştir. Gerekçe: %s") % (self.object.karar_no,
self.object.karar_gerekcesi)
self.object.etkinlik.basvuru_yapan.personel.user.send_notification(
title="Etkinlik Başvurusu Komisyon Kararı",
message=bildirim)
def islem_gecmisi_guncelle(self, eylem, aciklama, durum=None):
"""
Gönderilen eylem ve açıklama ile, seçilmiş gündemin projesinin işlem geçmişini günceller.
Args:
eylem(str): İşlem ana başlığı
aciklama(str): İşlemin içeriği
"""
self.object.proje.ProjeIslemGecmisi(aciklama=aciklama.format(self.object.karar_no),
eylem=eylem, tarih=datetime.now())
if durum:
self.object.proje.durum = durum
self.object.proje.save()
def bildirim_gonder(self, bildirim, role=None):
"""
Gönderilen bildirim mesajı ile, seçilmiş gündemin projesinin yürütücüsüne bildirim gönderir.
Args:
bildirim(str): Gönderilecek bildirim mesajı.
"""
role = role or self.object.proje.basvuru_rolu
role.send_notification(
title=_(u"Komisyon Kararı"),
message=bildirim,
sender=self.user)
def butce_kalemleri_durum_degistir(self, durum, kalemler=None):
"""
Eğer var ise değişiklik istenen kalemler ile yoksa default olarak
gündemin projesinin bütçe kalemleri istenen durum ile değiştirilir.
Args:
durum(int): Kalemlerin durumu (1: Onaylandi, 2: Reddedildi gibi.)
kalemler(list): durumu değiştirilmesi istenen kalemler.
"""
kalemler = kalemler or BAPButcePlani.objects.filter(ilgili_proje=self.object.proje)
for kalem in kalemler:
kalem.proje_durum = durum
kalem.save()
def rapor_durum_degistir(self, durum):
"""
Gönderilen durum ile raporun durumu güncellenir.
Args:
durum(int): Raporun durumu
"""
rapor_id = json.loads(self.object.gundem_ekstra_bilgiler).get('rapor')
rapor = BAPRapor.objects.get(rapor_id)
rapor.durum = durum
rapor.save()
def butce_fisi_is_akisini_tetikle(self):
"""
Projenin kabulü sonrası, bütçe fişi iş akışını çalıştırma izini olan personele davet
yollanır.
"""
wf = BPMNWorkflow.objects.get(name='bap_butce_fisi')
perm = Permission.objects.get('bap_butce_fisi')
sistem_user = User.objects.get(username='sistem_bilgilendirme')
today = datetime.today()
for role in perm.get_permitted_roles():
wfi = WFInstance(
wf=wf,
current_actor=role,
task=None,
name=wf.name,
wf_object=self.object.proje.key
)
wfi.data = {'bap_proje_id': self.object.proje.key}
wfi.pool = {}
wfi.blocking_save()
role.send_notification(title=_(u"{} | {} | Bütçe Fişi İş Akışı".format(
self.object.proje.yurutucu.__unicode__(),
self.object.proje.ad)),
message=_(u"""{} adlı onaylanmış projenin bütçe fişi kesilmesi gerekmektedir.
Görev yöneticinizden ilgili isteğe ulaşabilir,
iş akışını çalıştırabilirsiniz.""".format(self.object.proje.ad)),
typ=1,
sender=sistem_user
)
inv = TaskInvitation(
instance=wfi,
role=role,
wf_name=wfi.wf.name,
progress=30,
start_date=today,
finish_date=today + timedelta(15)
)
inv.title = _(u"{} | {} | Bütçe Fişi İş Akışı".format(
self.object.proje.yurutucu.__unicode__(),
self.object.proje.ad))
inv.save()
|
zetaops/ulakbus
|
ulakbus/lib/komisyon_sonrasi_adimlar.py
|
Python
|
gpl-3.0
| 23,613
|
#!/usr/bin/env python
#coding:utf-8
from django.utils.translation import ugettext_lazy as _
from django.contrib.contenttypes.models import ContentType
from grappelli.dashboard import modules, Dashboard
# from grappelli.dashboard.utils import get_admin_site_name
class AllRecentActions(modules.DashboardModule):
"""
Module that lists the recent actions for the current user.
"""
title = _('All Recent Actions')
template = 'grappelli/dashboard/modules/recent_actions_all.html'
limit = 10
include_list = None
exclude_list = None
def __init__(self, title=None, limit=10, include_list=None,
exclude_list=None, **kwargs):
self.include_list = include_list or []
self.exclude_list = exclude_list or []
kwargs.update({'limit': limit})
super(AllRecentActions, self).__init__(title, **kwargs)
def init_with_context(self, context):
if self._initialized:
return
from django.db.models import Q
from django.contrib.admin.models import LogEntry
def get_qset(list):
qset = None
for contenttype in list:
if isinstance(contenttype, ContentType):
current_qset = Q(content_type__id=contenttype.id)
else:
try:
app_label, model = contenttype.split('.')
except:
raise ValueError(
'Invalid contenttype: "%s"' % contenttype)
current_qset = Q(
content_type__app_label=app_label,
content_type__model=model
)
if qset is None:
qset = current_qset
else:
qset = qset | current_qset
return qset
qs = LogEntry.objects.all()
if self.include_list:
qs = qs.filter(get_qset(self.include_list))
if self.exclude_list:
qs = qs.exclude(get_qset(self.exclude_list))
self.children = qs.select_related('content_type', 'user')[:self.limit]
self._initialized = True
class CustomIndexDashboard(Dashboard):
"""
Custom index dashboard for www.
"""
def init_with_context(self, context):
# site_name = get_admin_site_name(context)
# append a group for "Administration" & "Applications"
self.children.append(modules.AppList(
u'Yönetim',
column=1,
collapsible=True,
css_classes=('grp-closed',),
models=('django.contrib.*',),
))
self.children.append(modules.ModelList(
title=u'Çerçi',
column=1,
models=('cerci_issue.*', 'cerci_content.*', 'cerci_newsletters.*')
))
# append a recent actions module
self.children.append(modules.RecentActions(
u'Benim İşlem Geçmişim',
limit=10,
collapsible=True,
column=2,
))
# append a recent actions module
self.children.append(AllRecentActions(
u'Tüm İşlem Geçmişi',
limit=10,
collapsible=True,
column=3,
))
self.children.append(modules.LinkList(
title='Linkler',
layout='inline',
column=2,
children=(
{
'title': 'Anasayfa',
'url': 'http://cercisanat.com',
'external': False,
'description': u'Çerçi Sanat',
},
{
'title': u'Çerçi e-posta',
'url': 'http://mail.cercisanat.com',
'external': True,
'description': u'Çerçi Sanat E-posta',
},
{
'title': u'Önbelleği sil',
'url': '/admin/cerci_cache/clear/?next=/admin/',
'external': False,
'description': u'Önbelleği sik',
}
)
))
|
cercisanat/cercisanat.com
|
cerci/dashboard.py
|
Python
|
gpl-3.0
| 4,170
|
from .network import SaveProtocol
class Capture:
"""PlanetaryImager capture manager.
Handles start, stop and pause recording, and gets information about fps and saved/dropped frames.
Can also be configured with callbacks to be executed when these events occur.
You can set callbacks by adding the proper key to the dictionary `callbacks`.
Callbacks to be called when capturing events occur.
Key: string (name of callback).
Value: function.
Supported callbacks:
- on_recording_started: Sets a callback function to be invoked when recording starts.
- Callback signature: function(filename)
- on_recording_finished: Sets a callback function to be invoked when recording ends.
- Callback signature: function()
- on_save_fps: Sets a callback function to be invoked when receiving save fps info.
- Callback signature: function(float)
- on_save_mean_fps: Sets a callback function to be invoked when receiving mean save fps info.
- Callback signature: function(float)
- on_saved_frames: Sets a callback function to be invoked when receiving saved frames info.
- Callbacks signature: function(int)
- on_dropped_frames: Sets a callback function to be invoked when receiving dropped frames info.
- Callbacks signature: function(int)
"""
def __init__(self, client):
"""Mean fps on saving frames."""
self.mean_save_fps = 0
"""Current fps on saving frames."""
self.save_fps = 0
"""Total frames saved."""
self.saved_frames = 0
"""Total frames dropped."""
self.dropped_frames = 0
"""Boolean flag to indicate if Planetary Imager is currently recording."""
self.is_recording = False
self.__recording_filename = None
self.callbacks = {}
self.__saveprotocol = SaveProtocol(client)
self.__saveprotocol.on_signal_recording(self.__handle_signal_start_recording)
self.__saveprotocol.on_signal_end_recording(self.__handle_signal_end_recording)
self.__saveprotocol.on_signal_mean_fps(self.__handle_mean_fps)
self.__saveprotocol.on_signal_save_fps(self.__handle_save_fps)
self.__saveprotocol.on_signal_saved_frames(self.__handle_saved_frames)
self.__saveprotocol.on_signal_dropped_frames(self.__handle_dropped_frames)
def start_recording(self):
self.__saveprotocol.start_recording()
def end_recording(self):
self.__saveprotocol.end_recording()
def pause(self):
self.__saveprotocol.set_paused(True)
def resume(self):
self.__saveprotocol.set_paused(False)
@property
def recording_filename(self):
return self.__recording_filename
def __handle_signal_start_recording(self, filename):
self.is_recording = True
self.mean_save_fps = 0
self.save_fps = 0
self.saved_frames = 0
self.dropped_frames = 0
self.__recording_filename = filename
self.__invoke_callback('on_recording_started', filename)
def __handle_signal_end_recording(self):
self.is_recording = False
self.__recording_filename = None
self.__invoke_callback('on_recording_finished')
def __handle_mean_fps(self, fps):
self.mean_save_fps = fps
self.__invoke_callback('on_save_mean_fps', fps)
def __handle_save_fps(self, fps):
self.save_fps = fps
self.__invoke_callback('on_save_fps', fps)
def __handle_saved_frames(self, frames):
self.saved_frames = frames
self.__invoke_callback('on_saved_frames', frames)
def __handle_dropped_frames(self, frames):
self.dropped_frames = frames
self.__invoke_callback('on_dropped_frames', frames)
def __invoke_callback(self, name, *args, **kwargs):
if name in self.callbacks:
self.callbacks[name](*args, **kwargs)
|
GuLinux/PlanetaryImager
|
scripting_client/planetaryimager/capture.py
|
Python
|
gpl-3.0
| 3,976
|
import bpy
# -----------------------------------------------------------------------------
# Substance Project panel
# Draw the UI panel, only the Substance project options :
# - Create a new Substance Project
# - Remove from a Substance Project
# - Export and re-export
# -----------------------------------------------------------------------------
class SubstanceProjectPanel(bpy.types.Panel):
bl_idname = "OBJECT_PT_substance_project"
bl_label = "Substance Project"
bl_space_type = "VIEW_3D"
bl_region_type = "TOOLS"
bl_category = "Substances"
bl_context = "objectmode"
def draw(self, context):
layout = self.layout
scn = context.scene
obj = context.object
act = context.active_object
data = scn.sbs_project_settings
row = layout.row(align=True)
if act:
# Check if this object as an Sbs Project.
if act.get('substance_project') is not None:
sbs_obj = bpy.context.active_object['substance_project']
scene_name = bpy.context.scene.name
scene = bpy.data.scenes[scene_name]['sbs_project_settings']
scene['prj_name'] = sbs_obj
row.prop(data, 'prj_name', text="")
# Panel when the selected object has no Substance Project
if obj.get("substance_project") is None:
icon = "ZOOMIN"
row.operator("sbs_painter.substance_name", text="", icon=icon)
# Not Substance Project in this blend-file
layout.label("Create a New Project")
# If the mesh use a Substance Project
else:
icon = "ZOOMIN"
row.operator("sbs_painter.substance_name", text="", icon=icon)
icon = "RESTRICT_SELECT_OFF"
row.operator("sbs_painter.selected_project", text="", icon=icon)
icon = "PANEL_CLOSE"
row.operator("sbs_painter.remove_from_project", text="", icon=icon)
name = "Export New Project"
ops = "substance.painter_export"
layout.operator(ops, name).project = False
data = scn.sbs_project_settings
layout.prop(data, 'path_spp', text="")
name = 'Export Update'
icon = 'FILE_REFRESH'
layout.operator(ops, name, icon=icon).project = True
def register():
bpy.utils.register_class(SubstanceProjectPanel)
def unregister():
bpy.utils.unregister_class(SubstanceProjectPanel)
|
stilobique/SubstanceBridge
|
views/substanceproject.py
|
Python
|
gpl-3.0
| 2,575
|
"""
This file is part of Uzu.
Uzu is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Uzu is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with Uzu. If not, see <http://www.gnu.org/licenses/>.
"""
from uzu.db.field.core import *
from uzu.db.field.relations import *
|
NoZip/uzu
|
uzu/db/field/__init__.py
|
Python
|
gpl-3.0
| 710
|
import libtcodpy as libtcod
import gameconfig
def target_tile(max_range=None):
global key, mouse
# returns x, y of a tile selected by a mouseclick
while True:
libtcod.console_flush()
libtcod.sys_check_for_event(libtcod.EVENT_KEY_PRESS|libtcod.EVENT_MOUSE, key, mouse)
render_all()
(x, y) = (mouse.cx, mouse.cy)
if (mouse.lbutton_pressed and libtcod.map_is_in_fov(fov_map, x, y) and
(max_range is None or player.distance(x, y) <= max_range)):
return(x, y)
if mouse.rbutton_pressed or key.vk == libtcod.KEY_ESCAPE:
return(None, None)
def target_npc(max_range=None):
# select NPC in range
while True:
(x, y) = target_tile(max_range)
if x is None:
return None
for obj in objects:
if obj.x == x and obj.y == y and obj.fighter and obj != player:
return obj
def get_names_under_mouse():
# return name of object under mouse pointer
global mouse
(x, y) = (mouse.cx, mouse.cy)
names = [obj.name for obj in objects
if obj.x == x and obj.y == y and libtcod.map_is_in_fov(fov_map, obj.x, obj.y)]
names = ', '.join(names)
return names.capitalize()
def handle_keys():
global playerx, playery, fov_recompute, key
# primary game controls
if key.vk == libtcod.KEY_ENTER and key.lalt:
#Alt+Enter: toggle fullscreen
libtcod.console_set_fullscreen(not libtcod.console_is_fullscreen())
elif key.vk == libtcod.KEY_ESCAPE:
selected = 0
return('exit')
# 8-D movement arrorw keys or numpad
if key.vk == libtcod.KEY_UP or key.vk == libtcod.KEY_KP8:
player_move_or_attack(0, -1)
elif key.vk == libtcod.KEY_DOWN or key.vk == libtcod.KEY_KP2:
player_move_or_attack(0, 1)
elif key.vk == libtcod.KEY_LEFT or key.vk == libtcod.KEY_KP4:
player_move_or_attack(-1, 0)
elif key.vk == libtcod.KEY_RIGHT or key.vk == libtcod.KEY_KP6:
player_move_or_attack(1, 0)
elif key.vk == libtcod.KEY_KP7:
player_move_or_attack(-1, -1)
elif key.vk == libtcod.KEY_KP9:
player_move_or_attack(1, -1)
elif key.vk == libtcod.KEY_KP1:
player_move_or_attack(-1, 1)
elif key.vk == libtcod.KEY_KP3:
player_move_or_attack(1, 1)
elif key.vk == libtcod.KEY_KP5:
message('You wait a turn for the darkness to close in on you.', libtcod.white)
pass
else:
# additional game commands
key_char = chr(key.c)
# pick up an item
if key_char == 'g':
for obj in objects:
if obj.x == player.x and obj.y == player.y and obj.item:
obj.item.pick_up()
break
# go down stairs if player is on them
if key_char == ',' or key_char == '.':
if stairs.x == player.x and stairs.y == player.y:
next_level()
# display inventory
if key_char == 'i':
selection = -1
chosen_item = inventory_menu('Press the key next to an item to use it, or ESC to cancel\n')
if chosen_item is not None:
chosen_item.use()
# drop item
if key_char == 'd':
chosen_item = inventory_menu('Press the key next to an item to drop it.\n')
if chosen_item is not None:
chosen_item.drop()
# show character info
if key_char == 'c':
level_up_xp = LEVEL_UP_BASE + player.level * LEVEL_UP_FACTOR
message_box('Character Information\n\nLevel: ' + str(player.level) + '\nExperience: ' + str(player.fighter.xp) +
'\nExperience to level up: ' + str(level_up_xp) + '\n\nMaximum HP: ' + str(player.fighter.max_hp) +
'\nAttack: ' + str(player.fighter.power) + '\nDefense: ' + str(player.fighter.defense), 24)
# toggle fullscreen
if key_char == 'f':
libtcod.console_set_fullscreen(not libtcod.console_is_fullscreen())
return('no turn') # nothing valid happened
return('playing') # carry on
|
nerdyLawman/pyhack
|
src/interface/controls.py
|
Python
|
gpl-3.0
| 4,117
|
# -*- coding: utf-8 -*-
# daemon/runner.py
# Part of ‘python-daemon’, an implementation of PEP 3143.
#
# Copyright © 2009–2016 Ben Finney <ben+python@benfinney.id.au>
# Copyright © 2007–2008 Robert Niederreiter, Jens Klein
# Copyright © 2003 Clark Evans
# Copyright © 2002 Noah Spurrier
# Copyright © 2001 Jürgen Hermann
#
# This is free software: you may copy, modify, and/or distribute this work
# under the terms of the Apache License, version 2.0 as published by the
# Apache Software Foundation.
# No warranty expressed or implied. See the file ‘LICENSE.ASF-2’ for details.
""" Daemon runner library.
"""
from __future__ import (absolute_import, unicode_literals)
import errno
import os
import signal
import sys
import warnings
import lockfile
from . import pidfile
from .daemon import DaemonContext
from .daemon import _chain_exception_from_existing_exception_context
from .daemon import (basestring, unicode)
try:
# Python 3 standard library.
ProcessLookupError
except NameError:
# No such class in Python 2.
ProcessLookupError = NotImplemented
__metaclass__ = type
warnings.warn(
"The ‘runner’ module is not a supported API for this library.",
PendingDeprecationWarning)
class DaemonRunnerError(Exception):
""" Abstract base class for errors from DaemonRunner. """
def __init__(self, *args, **kwargs):
self._chain_from_context()
super(DaemonRunnerError, self).__init__(*args, **kwargs)
def _chain_from_context(self):
_chain_exception_from_existing_exception_context(self, as_cause=True)
class DaemonRunnerInvalidActionError(DaemonRunnerError, ValueError):
""" Raised when specified action for DaemonRunner is invalid. """
def _chain_from_context(self):
# This exception is normally not caused by another.
_chain_exception_from_existing_exception_context(self, as_cause=False)
class DaemonRunnerStartFailureError(DaemonRunnerError, RuntimeError):
""" Raised when failure starting DaemonRunner. """
class DaemonRunnerStopFailureError(DaemonRunnerError, RuntimeError):
""" Raised when failure stopping DaemonRunner. """
class DaemonRunner:
""" Controller for a callable running in a separate background process.
The first command-line argument is the action to take:
* 'start': Become a daemon and call `app.run()`.
* 'stop': Exit the daemon process specified in the PID file.
* 'restart': Stop, then start.
"""
start_message = "started with pid {pid:d}"
def __init__(self, app):
""" Set up the parameters of a new runner.
:param app: The application instance; see below.
:return: ``None``.
The `app` argument must have the following attributes:
* `stdin_path`, `stdout_path`, `stderr_path`: Filesystem paths
to open and replace the existing `sys.stdin`, `sys.stdout`,
`sys.stderr`.
* `pidfile_path`: Absolute filesystem path to a file that will
be used as the PID file for the daemon. If ``None``, no PID
file will be used.
* `pidfile_timeout`: Used as the default acquisition timeout
value supplied to the runner's PID lock file.
* `run`: Callable that will be invoked when the daemon is
started.
"""
self.parse_args()
self.app = app
self.daemon_context = DaemonContext()
self.daemon_context.stdin = open(app.stdin_path, 'rt')
self.daemon_context.stdout = open(app.stdout_path, 'w+t')
self.daemon_context.stderr = open(
app.stderr_path, 'w+t', buffering=0)
self.pidfile = None
if app.pidfile_path is not None:
self.pidfile = make_pidlockfile(
app.pidfile_path, app.pidfile_timeout)
self.daemon_context.pidfile = self.pidfile
def _usage_exit(self, argv):
""" Emit a usage message, then exit.
:param argv: The command-line arguments used to invoke the
program, as a sequence of strings.
:return: ``None``.
"""
progname = os.path.basename(argv[0])
usage_exit_code = 2
action_usage = "|".join(self.action_funcs.keys())
message = "usage: {progname} {usage}".format(
progname=progname, usage=action_usage)
emit_message(message)
sys.exit(usage_exit_code)
def parse_args(self, argv=None):
""" Parse command-line arguments.
:param argv: The command-line arguments used to invoke the
program, as a sequence of strings.
:return: ``None``.
The parser expects the first argument as the program name, the
second argument as the action to perform.
If the parser fails to parse the arguments, emit a usage
message and exit the program.
"""
if argv is None:
argv = sys.argv
min_args = 2
if len(argv) < min_args:
self._usage_exit(argv)
self.action = unicode(argv[1])
if self.action not in self.action_funcs:
self._usage_exit(argv)
def _start(self):
""" Open the daemon context and run the application.
:return: ``None``.
:raises DaemonRunnerStartFailureError: If the PID file cannot
be locked by this process.
"""
if is_pidfile_stale(self.pidfile):
self.pidfile.break_lock()
try:
self.daemon_context.open()
except lockfile.AlreadyLocked:
error = DaemonRunnerStartFailureError(
"PID file {pidfile.path!r} already locked".format(
pidfile=self.pidfile))
raise error
pid = os.getpid()
message = self.start_message.format(pid=pid)
emit_message(message)
self.app.run()
def _terminate_daemon_process(self):
""" Terminate the daemon process specified in the current PID file.
:return: ``None``.
:raises DaemonRunnerStopFailureError: If terminating the daemon
fails with an OS error.
"""
pid = self.pidfile.read_pid()
try:
os.kill(pid, signal.SIGTERM)
except OSError as exc:
error = DaemonRunnerStopFailureError(
"Failed to terminate {pid:d}: {exc}".format(
pid=pid, exc=exc))
raise error
def _stop(self):
""" Exit the daemon process specified in the current PID file.
:return: ``None``.
:raises DaemonRunnerStopFailureError: If the PID file is not
already locked.
"""
if not self.pidfile.is_locked():
error = DaemonRunnerStopFailureError(
"PID file {pidfile.path!r} not locked".format(
pidfile=self.pidfile))
raise error
if is_pidfile_stale(self.pidfile):
self.pidfile.break_lock()
else:
self._terminate_daemon_process()
def _restart(self):
""" Stop, then start.
"""
self._stop()
self._start()
action_funcs = {
'start': _start,
'stop': _stop,
'restart': _restart,
}
def _get_action_func(self):
""" Get the function for the specified action.
:return: The function object corresponding to the specified
action.
:raises DaemonRunnerInvalidActionError: if the action is
unknown.
The action is specified by the `action` attribute, which is set
during `parse_args`.
"""
try:
func = self.action_funcs[self.action]
except KeyError:
error = DaemonRunnerInvalidActionError(
"Unknown action: {action!r}".format(
action=self.action))
raise error
return func
def do_action(self):
""" Perform the requested action.
:return: ``None``.
The action is specified by the `action` attribute, which is set
during `parse_args`.
"""
func = self._get_action_func()
func(self)
def emit_message(message, stream=None):
""" Emit a message to the specified stream (default `sys.stderr`). """
if stream is None:
stream = sys.stderr
stream.write("{message}\n".format(message=message))
stream.flush()
def make_pidlockfile(path, acquire_timeout):
""" Make a PIDLockFile instance with the given filesystem path. """
if not isinstance(path, basestring):
error = ValueError("Not a filesystem path: {path!r}".format(
path=path))
raise error
if not os.path.isabs(path):
error = ValueError("Not an absolute path: {path!r}".format(
path=path))
raise error
lockfile = pidfile.TimeoutPIDLockFile(path, acquire_timeout)
return lockfile
def is_pidfile_stale(pidfile):
""" Determine whether a PID file is stale.
:return: ``True`` iff the PID file is stale; otherwise ``False``.
The PID file is “stale” if its contents are valid but do not
match the PID of a currently-running process.
"""
result = False
pidfile_pid = pidfile.read_pid()
if pidfile_pid is not None:
try:
os.kill(pidfile_pid, signal.SIG_DFL)
except ProcessLookupError:
# The specified PID does not exist.
result = True
except OSError as exc:
if exc.errno == errno.ESRCH:
# Under Python 2, process lookup error is an OSError.
# The specified PID does not exist.
result = True
return result
# Local variables:
# coding: utf-8
# mode: python
# End:
# vim: fileencoding=utf-8 filetype=python :
|
dsu/orchestration-utilities
|
libs/daemon/runner.py
|
Python
|
gpl-3.0
| 10,099
|
# -*- coding: utf-8 -*-
from dec import dec
class Distribution():
def __init__(self, distd):
self.distd = distd
def calc(self, val, decimals=2):
sorted_keys = sorted(self.distd)
distlist = []
for el in sorted_keys:
distlist.append(dec(self.distd[el], decimals))
tmpl = []
val = dec(val, decimals)
tar = dec(sum(distlist), decimals)
for el in distlist:
tmpl.append(dec(val * el / tar , decimals))
nval = sum(tmpl)
dif = val - nval
if dif != 0:
tmpl[tmpl.index(max(tmpl))] += dif
dist = {}
for i, el in enumerate(sorted_keys):
dist[el] = tmpl[i]
return dist
def check(self, val, decimals=2):
ad = self.calc(val, decimals)
tmp = dec(0)
for el in ad.keys():
tmp += ad[el]
return (dec(val, decimals) == tmp)
@property
def xiliosta(self):
return self.calc(1000, 0)
@property
def xiliostast(self):
dic = self.xiliosta
st = ''
for key in sorted(dic.keys()):
st += '%20s : %20s\n' % (key, dic[key])
return st
if __name__ == '__main__':
asanser = {'a': 204, 'b': 159, 'c': 243, 'd': 120, 'e': 274}
disasans = Distribution(asanser)
print(disasans.xiliostast)
print(disasans.calc(34.467, 3))
print(disasans.check(34.467, 3))
|
tedlaz/pyted
|
functions/dist.py
|
Python
|
gpl-3.0
| 1,427
|
numbers = list(range(1, 10)) + list(range(11, 20)) + list(range(10, 100, 10))
numbers_by_name = {input(): x for x in numbers}
s = 0
N = int(input())
for k in range(N):
x = 0
line = input()
for word in line.split():
if word in numbers_by_name:
x += numbers_by_name[word]
else:
x = 0 # недопустимое число!
break
s += x
print(s)
|
tkhirianov/fox_inf10_2016
|
lesson28/written_numbers.py
|
Python
|
gpl-3.0
| 412
|
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2015 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Launcher for an external editor."""
import os
import tempfile
from PyQt5.QtCore import pyqtSignal, pyqtSlot, QObject, QProcess
from qutebrowser.config import config
from qutebrowser.utils import message, log
from qutebrowser.misc import guiprocess
class ExternalEditor(QObject):
"""Class to simplify editing a text in an external editor.
Attributes:
_text: The current text before the editor is opened.
_oshandle: The OS level handle to the tmpfile.
_filehandle: The file handle to the tmpfile.
_proc: The GUIProcess of the editor.
_win_id: The window ID the ExternalEditor is associated with.
"""
editing_finished = pyqtSignal(str)
def __init__(self, win_id, parent=None):
super().__init__(parent)
self._text = None
self._oshandle = None
self._filename = None
self._proc = None
self._win_id = win_id
def _cleanup(self):
"""Clean up temporary files after the editor closed."""
if self._oshandle is None or self._filename is None:
# Could not create initial file.
return
try:
os.close(self._oshandle)
os.remove(self._filename)
except OSError as e:
# NOTE: Do not replace this with "raise CommandError" as it's
# executed async.
message.error(self._win_id,
"Failed to delete tempfile... ({})".format(e))
def on_proc_closed(self, exitcode, exitstatus):
"""Write the editor text into the form field and clean up tempfile.
Callback for QProcess when the editor was closed.
"""
log.procs.debug("Editor closed")
if exitstatus != QProcess.NormalExit:
# No error/cleanup here, since we already handle this in
# on_proc_error.
return
try:
if exitcode != 0:
return
encoding = config.get('general', 'editor-encoding')
try:
with open(self._filename, 'r', encoding=encoding) as f:
text = ''.join(f.readlines()) # pragma: no branch
except OSError as e:
# NOTE: Do not replace this with "raise CommandError" as it's
# executed async.
message.error(self._win_id, "Failed to read back edited file: "
"{}".format(e))
return
log.procs.debug("Read back: {}".format(text))
self.editing_finished.emit(text)
finally:
self._cleanup()
@pyqtSlot(QProcess.ProcessError)
def on_proc_error(self, _err):
self._cleanup()
def edit(self, text):
"""Edit a given text.
Args:
text: The initial text to edit.
"""
if self._text is not None:
raise ValueError("Already editing a file!")
self._text = text
try:
self._oshandle, self._filename = tempfile.mkstemp(
text=True, prefix='qutebrowser-editor-')
if text:
encoding = config.get('general', 'editor-encoding')
with open(self._filename, 'w', encoding=encoding) as f:
f.write(text) # pragma: no branch
except OSError as e:
message.error(self._win_id, "Failed to create initial file: "
"{}".format(e))
return
self._proc = guiprocess.GUIProcess(self._win_id, what='editor',
parent=self)
self._proc.finished.connect(self.on_proc_closed)
self._proc.error.connect(self.on_proc_error)
editor = config.get('general', 'editor')
executable = editor[0]
args = [self._filename if arg == '{}' else arg for arg in editor[1:]]
log.procs.debug("Calling \"{}\" with args {}".format(executable, args))
self._proc.start(executable, args)
|
r8b7xy/qutebrowser
|
qutebrowser/misc/editor.py
|
Python
|
gpl-3.0
| 4,806
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-05-07 17:04
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('users', '0005_auto_20170326_1520'),
]
operations = [
migrations.RenameField(
model_name='profile',
old_name='modified',
new_name='updated',
),
]
|
devolio/devolio
|
users/migrations/0006_auto_20170507_1704.py
|
Python
|
gpl-3.0
| 430
|
'''
SELECT DISTINCT * {
?a a <http://www.w3.org/2002/07/owl#Thing>.
?a a <http://dbpedia.org/class/yago/Anatomy106057539>.
?a a <http://dbpedia.org/ontology/AnatomicalStructure>.
?a ((!a)*/<http://dbpedia.org/ontology/drainsTo>)|(<http://dbpedia.org/ontology/drainsTo>/(!a)*) ?b.
?b a <http://www.w3.org/2002/07/owl#Thing>.
?b a <http://dbpedia.org/ontology/Vein>.
?b a <http://dbpedia.org/ontology/AnatomicalStructure>.
} LIMIT 100
'''
from RDFTypeSummary import *
import networkx as nx
mysum=RDFTypeSummary("/home/spyros/Documents/Paper-Evaluation-2/RDF-Type-Summary-graphs-new/benchmark_1.summary")
q_s=frozenset(["http://www.w3.org/2002/07/owl#Thing","http://dbpedia.org/class/yago/Anatomy106057539","http://dbpedia.org/ontology/AnatomicalStructure"])
q_t=frozenset(["http://www.w3.org/2002/07/owl#Thing","http://dbpedia.org/ontology/Vein","http://dbpedia.org/ontology/AnatomicalStructure"])
q_p="http://dbpedia.org/ontology/drainsTo"
q_l=2
mysum.execute_query(q_s,q_p,q_t,q_l)
|
SWRG/ESWC2015-paper-evaluation
|
test_2.py
|
Python
|
gpl-3.0
| 984
|
RIOT_API_KEY = "RGAPI-SOME-LONG-STRING"
|
patillacode/tilt-o-meter
|
flaskr/secrets.sample.py
|
Python
|
gpl-3.0
| 40
|
#!/usr/bin/env python2
import tools
tools.login("kevin", "", get_xslt=False, parse_and_transform=False, do_static=False)
tools.get_choix(1, "Root", want_static=False, want_background=False, parse_and_transform=False)
tools.get_a_page("Database?action=TRUNK", save=False, parse_and_transform=False)
page = tools.get_a_page("Database?action=IMPORT", save=False, parse_and_transform=False)
correct = "Export OK" in page
print "Correct ? %s" % correct
exit(0 if correct else 1)
|
wazari972/WebAlbums
|
WebAlbums-Downloader/git_bisect.py
|
Python
|
gpl-3.0
| 480
|
from __future__ import absolute_import
from .compat import _bytes, _str
from .VixHandle import VixHandle
from .VixError import VixError
from vix import _backend, API_ENCODING
import datetime
vix = _backend._vix
ffi = _backend._ffi
class VixJob(VixHandle):
"""Represnts a VIX Job handle.
.. note:: Internal use.
"""
VIX_PROPERTY_JOB_RESULT_ERROR_CODE = 3000
VIX_PROPERTY_JOB_RESULT_VM_IN_GROUP = 3001
VIX_PROPERTY_JOB_RESULT_USER_MESSAGE = 3002
VIX_PROPERTY_JOB_RESULT_EXIT_CODE = 3004
VIX_PROPERTY_JOB_RESULT_COMMAND_OUTPUT = 3005
VIX_PROPERTY_JOB_RESULT_HANDLE = 3010
VIX_PROPERTY_JOB_RESULT_GUEST_OBJECT_EXISTS = 3011
VIX_PROPERTY_JOB_RESULT_GUEST_PROGRAM_ELAPSED_TIME = 3017
VIX_PROPERTY_JOB_RESULT_GUEST_PROGRAM_EXIT_CODE = 3018
VIX_PROPERTY_JOB_RESULT_ITEM_NAME = 3035
VIX_PROPERTY_JOB_RESULT_FOUND_ITEM_DESCRIPTION = 3036
VIX_PROPERTY_JOB_RESULT_SHARED_FOLDER_COUNT = 3046
VIX_PROPERTY_JOB_RESULT_SHARED_FOLDER_HOST = 3048
VIX_PROPERTY_JOB_RESULT_SHARED_FOLDER_FLAGS = 3049
VIX_PROPERTY_JOB_RESULT_PROCESS_ID = 3051
VIX_PROPERTY_JOB_RESULT_PROCESS_OWNER = 3052
VIX_PROPERTY_JOB_RESULT_PROCESS_COMMAND = 3053
VIX_PROPERTY_JOB_RESULT_FILE_FLAGS = 3054
VIX_PROPERTY_JOB_RESULT_PROCESS_START_TIME = 3055
VIX_PROPERTY_JOB_RESULT_VM_VARIABLE_STRING = 3056
VIX_PROPERTY_JOB_RESULT_PROCESS_BEING_DEBUGGED = 3057
VIX_PROPERTY_JOB_RESULT_SCREEN_IMAGE_SIZE = 3058
VIX_PROPERTY_JOB_RESULT_SCREEN_IMAGE_DATA = 3059
VIX_PROPERTY_JOB_RESULT_FILE_SIZE = 3061
VIX_PROPERTY_JOB_RESULT_FILE_MOD_TIME = 3062
VIX_PROPERTY_JOB_RESULT_EXTRA_ERROR_INFO = 3084
VIX_FILE_ATTRIBUTES_DIRECTORY = 0x0001
VIX_FILE_ATTRIBUTES_SYMLINK = 0x0002
STR_RESULT_TYPES = (
VIX_PROPERTY_JOB_RESULT_ITEM_NAME,
VIX_PROPERTY_JOB_RESULT_VM_VARIABLE_STRING,
VIX_PROPERTY_JOB_RESULT_COMMAND_OUTPUT,
VIX_PROPERTY_JOB_RESULT_PROCESS_OWNER,
VIX_PROPERTY_JOB_RESULT_PROCESS_COMMAND,
VIX_PROPERTY_JOB_RESULT_SHARED_FOLDER_HOST,
)
def __init__(self, handle):
super(VixJob, self).__init__(handle)
assert self.get_type() == VixHandle.VIX_HANDLETYPE_JOB, 'Expected VixJob handle.'
def wait(self, *args):
"""Waits for the job to complete and gets requested results.
:param \*args: A list of properties to retreive (VIX_PROPERTY_JOB_RESULT_*).
:returns: A tuple of results if more than one object was requested.
:raises vix.VixError: If job failed.
"""
c_args = list()
ret_data = list()
for arg in args:
c_args.append(ffi.cast('VixPropertyType', arg))
# TODO: Check the arg type and allocate accordingly...
alloc = None
if arg in self.STR_RESULT_TYPES:
alloc = ffi.new('char**')
else:
alloc = ffi.new('int*')
ret_data.append(alloc)
c_args.append(alloc)
c_args.append(ffi.cast('VixPropertyType', self.VIX_PROPERTY_NONE))
error_code = vix.VixJob_Wait(self._handle, *c_args)
if error_code != VixError.VIX_OK:
raise VixError(error_code)
# deref data...
result = list()
for i in range(len(args)):
if args[i] == self.VIX_PROPERTY_NONE:
break
val = ret_data[i]
if args[i] in self.STR_RESULT_TYPES:
result.append(_str(ffi.string(val[0]), API_ENCODING))
vix.Vix_FreeBuffer(val[0])
else:
result.append(val[0])
return result[0] if len(result) == 1 else result
def is_done(self):
"""Checks if the job completed.
:returns: True if job completed, otherwise False.
:rtype: bool
:raises vix.VixError: If failed to get job state.
"""
result = ffi.new('Bool*')
error_code = vix.VixJob_CheckCompletion(self._handle, result)
if error_code != VixError.VIX_OK:
raise VixError(error_code)
return result[0]
def get_error(self):
"""Gets an exception object.
:returns: Exception object of job. The error may be VixError(VIX_OK).
:rtype: .VixError
"""
error_code = vix.VixJob_GetError(self._handle)
return VixError(error_code)
def _get_num_properties(self, property_id):
count = vix.VixJob_GetNumProperties(
self._handle,
property_id,
)
return int(count)
def _get_nth_properties(self, index, *args):
c_args = list()
for arg in args:
alloc = None
if arg in self.STR_RESULT_TYPES:
alloc = ffi.new('char**')
elif arg in (self.VIX_PROPERTY_JOB_RESULT_PROCESS_ID, self.VIX_PROPERTY_JOB_RESULT_FILE_SIZE, self.VIX_PROPERTY_JOB_RESULT_FILE_MOD_TIME):
alloc = ffi.new('uint64*')
else:
alloc = ffi.new('int*')
c_args.append(ffi.cast('VixPropertyType', arg))
c_args.append(alloc)
c_args.append(ffi.cast('VixPropertyType', self.VIX_PROPERTY_NONE))
error_code = vix.VixJob_GetNthProperties(
self._handle,
index,
*c_args
)
if error_code != VixError.VIX_OK:
raise VixError(error_code)
result = list()
for i in range(len(args)):
prop_id = int(c_args[i * 2])
prop_val = c_args[(i * 2) + 1]
value = None
if prop_id in self.STR_RESULT_TYPES:
value = _str(ffi.string(prop_val[0]), API_ENCODING)
vix.Vix_FreeBuffer(prop_val[0])
elif prop_id == self.VIX_PROPERTY_JOB_RESULT_PROCESS_BEING_DEBUGGED:
value = bool(ffi.cast('Bool', prop_val[0]))
elif prop_id in (self.VIX_PROPERTY_JOB_RESULT_PROCESS_START_TIME, self.VIX_PROPERTY_JOB_RESULT_FILE_MOD_TIME):
value = datetime.datetime.fromtimestamp(int(ffi.cast('int', prop_val[0])))
else:
value = int(ffi.cast('int', prop_val[0]))
result.append(value)
return tuple(result)
def get_properties(self, *args):
"""Get properties of a job result
:param \*args: properties to fetch.
:returns: A list of tuples of requests properties.
:rtype: list
:raises vix.VixError: On failure to fetch results.
"""
num = self._get_num_properties(args[0])
result = list()
for i in range(num):
result.append(self._get_nth_properties(i, *args))
return result
def __del__(self):
self.release()
|
naim94a/vix
|
vix/VixJob.py
|
Python
|
gpl-3.0
| 6,745
|
# -*- coding: utf-8 -*-
"""
Created on Sat May 5 15:56:56 2018
@author: Han
"""
import numpy as np
from openpyxl import Workbook
def nparray2string(val):
'''
convert a numpy array to string
'''
x = np.array2string(val, threshold=np.nan,
max_line_width=np.inf,
formatter={'int': lambda x: '%d' % (x),
'float_kind':lambda x: "%.6e" % x}).replace('[', '').replace(']', '') + '\n'
if val.ndim == 1:
return x
else:
return ' ' + x
def py2dat(tdata,fname):
if not isinstance(tdata,dict):
raise TypeError
else:
with open(fname,'w',encoding='utf-8') as f:
# variables
f.write('VARIABLES = ')
for var in tdata['varnames'][:-1]:
f.write('"%s", '%(var,))
f.write('"%s"\n'%(tdata['varnames'][-1],))
nzone = -1
# write lines
if 'lines' in tdata.keys():
for iline,line in enumerate(tdata['lines']):
nzone += 1
x = np.asarray(line['x'])
lenx = x.size
data = x.reshape(1,lenx)
y = np.asarray(line['y']).reshape(1,lenx)
data = np.vstack((data,y))
if 'z' in line.keys():
if line['z'].size > 0:
z = np.asarray(line['z']).reshape(1,lenx)
data = np.vstack((data,z))
if 'v' in line.keys():
if line['v'].size > 0:
v = np.asarray(line['v'])
if v.ndim == 1:
v = v.reshape(1,lenx)
data = np.vstack((data,v))
if 'zonename' in line.keys():
if len(line['zonename']) == 0:
zonename = 'ZONE %d'%(iline,)
else:
zonename = line['zonename']
else:
zonename = 'ZONE %d'%(nzone,)
ivarloc = 0
f.write('ZONE I = %d T="%s" DATAPACKING=POINT\n'%(lenx,zonename))
f.write(' '+np.array2string(data.T,threshold=np.nan,max_line_width=np.inf).replace('[','').replace(']','') )
f.write('\n\n')
# write surfaces
if 'surfaces' in tdata.keys():
for isurf,surf in enumerate(tdata['surfaces']):
nzone += 1
# 0 for point, 1 for block
if 'datapacking' in surf.keys():
ipack = 'POINT' if surf['datapacking'] == 0 else 'BLOCK'
else:
ipack = 'POINT'
# 0 for nodal, 1 for center
if 'varloc' in surf.keys():
ivarloc = surf['varloc']
if isinstance(ivarloc, list):
ipack = 'BLOCK'
icen = []
inodal = []
for i, ii in enumerate(ivarloc):
if ii == 1:
icen.append(i+1)
else:
inodal.append(i+1)
else:
ivarloc = 0
# 3 for IJ order, 2 for IK order, 1 for JK order
if 'order' in surf.keys():
iorder = surf['order']
else:
iorder = 3
x = surf['x']
# x should be store in the following way
# x -----> i
# |
# |
# ^ j
y = surf['y']
if 'z' in surf.keys():
z = surf['z']
if 'v' in surf.keys():
v = surf['v']
if 'zonename' in surf.keys():
if len(surf['zonename']) == 0:
zonename = 'ZONE %d'%(nzone,)
else:
zonename = surf['zonename']
m, n = x.shape
f.write('ZONE I=%d, J=%d, T="%s", DATAPACKING=%s, '%(m,n,zonename,ipack))
if isinstance(ivarloc, list):
f.write('VARLOCATION=(%s=CELLCENTERED, %s=NODAL)\n'%(str(icen), str(inodal)))
elif ivarloc == 1:
f.write('VARLOCATION=([%d-%d]=CELLCENTERED)\n'%(1, len(tdata['varnames'])))
if ipack == 'BLOCK':
f.write(nparray2string(x.flatten()))
f.write(nparray2string(y.flatten()))
if 'z' in surf.keys():
f.write(nparray2string(z.flatten()))
if 'v' in surf.keys():
for vv in v:
f.write(nparray2string(vv.flatten()))
else:
data = x.flatten()
data = np.vstack((data,y.flatten()))
if 'z' in surf.keys():
data = np.vstack((data,z.flatten()))
if 'v' in line.keys():
for vv in v:
data = np.vstack((data,vv.flatten()))
f.write(nparray2string(data.T))
f.write('\n\n')
def py2xls(tdata, fname, creator='Han Luo'):
wb = Workbook()
k =-1
for line in tdata['lines']:
k = k+1
if 'zonename' in line.keys():
if len(line['zonename']) == 0:
zn = 'Sheet %d'%(k+1,)
else:
zn = line['zonename']
else:
zn = 'Sheet %d'%(k+1,)
ws=wb.create_sheet(zn,k)
for i,j in enumerate(tdata['varnames']):
cellnum = chr(65+i)
ws[cellnum+'1'] = j
data = np.vstack((line['x'],line['y'],line['z'],line['v'])).T
for i,row in enumerate(data):
for j,ele in enumerate(row):
cellnum = chr(65+j)
cellid = cellnum+'%d'%(i+2,)
ws[cellid] = ele
wb.properties.creator = creator
wb.save(fname)
def py2dat2(tdata, fname):
with open(fname, 'w') as f:
# variables
f.write('VARIABLES = ')
for var in tdata['varnames'][:-1]:
f.write('"%s", '%(var,))
f.write('"%s"\n'%(tdata['varnames'][-1],))
# data
for zone in tdata['data']:
f.write('\n')
m, n = zone['data'].shape
f.write('ZONE I = %d, T = "%s"\n'%(m, zone['title']))
if 'passivevarlist' in zone.keys():
if zone['passivevarlist']:
f.write('PASSIVEVARLIST=%s\n'%(str(zone['passivevarlist']),))
f.write(nparray2string(zone['data']))
|
luohancfd/FluidDynamicTools
|
Tecplot_Tools/py2tecplot.py
|
Python
|
gpl-3.0
| 7,403
|
from operator import itemgetter
from django.shortcuts import render, get_object_or_404
from django.contrib.auth.decorators import login_required, user_passes_test
from teams.models import Team, TeamCodingAnswer, TeamMcqAnswer, UploadFileModel
from mcqs.models import Question as McqQuestion
from coding.models import InputCase, Question as CodingQuestion
def _get_teaminfo_list():
teaminfo_list = []
for team in Team.objects.all():
member_list = []
for member in team.teammember_set.all():
if member.full_name != '':
member_list.append({
'full_name': member.full_name,
'college_id': member.college_id,
'email': member.email,
'mobile_no': member.mobile_no,}
)
teaminfo_list.append({
'team_name': team.team_name,
'member_list': member_list,
'lang_pref': team.lang_pref,}
)
return teaminfo_list
def _get_mcq_score(team):
score = 0
for ans in team.teammcqanswer_set.all():
question = McqQuestion.objects.get(question_no=ans.question_no,
language=team.lang_pref)
if ans.choice_no == question.answer_choice_no:
score += 1
return score
def _get_mcq_evaluation(team):
score, evaluated_list = 0, []
for ques in McqQuestion.objects.filter(language=team.lang_pref):
try:
answer = team.teammcqanswer_set.get(question_no=ques.question_no)
selected_choice = ques.choice_set.get(
choice_no=answer.choice_no).choice_text
if answer.choice_no == ques.answer_choice_no:
score += 1
except TeamMcqAnswer.DoesNotExist:
selected_choice = ''
correct_choice = ques.choice_set.get(
choice_no=ques.answer_choice_no).choice_text
evaluated_list.append({
'question_no': ques.question_no,
'correct_choice': correct_choice,
'selected_choice': selected_choice,}
)
evaluated_list = sorted(evaluated_list, key=itemgetter('question_no'))
return (score, evaluated_list)
def _get_coding_score(team):
score = 0
for ans in team.teamcodinganswer_set.all():
question = CodingQuestion.objects.get(question_no=ans.question_no)
inputcase = InputCase.objects.get(question=question,
case_no=ans.inputcase_no)
if ans.output_text == inputcase.answer_case_text:
score += inputcase.points
return score
def _get_coding_evaluation(team):
score, evaluated_list = 0, []
for ques in CodingQuestion.objects.all():
inputcase_list = []
for inputcase in ques.inputcase_set.all():
try:
answer = team.teamcodinganswer_set.get(
question_no=ques.question_no,
inputcase_no=inputcase.case_no)
output_text = answer.output_text
# Do not include inputcase for empty answers
if output_text == '':
continue
if answer.output_text == inputcase.answer_case_text:
score += inputcase.points
inputcase_list.append({
'inputcase_no': inputcase.case_no,
'correct_output': inputcase.answer_case_text,
'answered_output': output_text,
'points': inputcase.points,}
)
except TeamCodingAnswer.DoesNotExist:
pass
evaluated_list.append({
'question_no': ques.question_no,
'inputcase_list': inputcase_list,}
)
evaluated_list = sorted(evaluated_list, key=itemgetter('question_no'))
return (score, evaluated_list)
@user_passes_test(lambda u: u.is_admin)
def index(request):
return render(request, 'scores/index.html', {
'team_list': _get_teaminfo_list,
'team_count': Team.objects.count(),
'mcqs_count': McqQuestion.objects.count(),
'coding_count': CodingQuestion.objects.count(),
'inputcase_count': InputCase.objects.count(),}
)
@user_passes_test(lambda u: u.is_admin)
def leaderboard(request, app):
teaminfo_list = _get_teaminfo_list()
if app == 'mcqs':
get_score_func = _get_mcq_score
elif app == 'coding':
get_score_func = _get_coding_score
for teaminfo in teaminfo_list:
team = Team.objects.get(team_name=teaminfo['team_name'])
teaminfo['score'] = get_score_func(team)
teaminfo_list = sorted(teaminfo_list, key=itemgetter('score'), reverse=True)
return render(request, 'scores/leaderboard.html', {
'team_list': teaminfo_list,
'app': app,}
)
@login_required
@user_passes_test(lambda u: u.is_admin)
def evaluate(request, team_name, app):
team = get_object_or_404(Team, team_name=team_name)
if app == 'mcqs':
score, evaluated_list = _get_mcq_evaluation(team)
template_name = 'scores/evaluate_mcqs.html'
elif app == 'coding':
score, evaluated_list = _get_coding_evaluation(team)
template_name = 'scores/evaluate_coding.html'
member_list = []
for member in team.teammember_set.all():
if member.full_name != '':
member_list.append({
'full_name': member.full_name,
'college_id': member.college_id,
'email': member.email,
'mobile_no': member.mobile_no,}
)
return render(request, template_name, {
'team_name': team.team_name,
'member_list': member_list,
'team_lang_pref': team.lang_pref,
'evaluated_list': evaluated_list,
'score': score,}
)
@login_required
@user_passes_test(lambda u: u.is_admin)
def display_file(request, team_name, question_no):
team = get_object_or_404(Team, team_name=team_name)
uploaded_file = get_object_or_404(UploadFileModel, team=team,
question_no=question_no)
if team.lang_pref == 'C':
language_class = 'c'
else:
language_class = 'java'
return render(request, 'scores/file_display.html', {
'question_no': question_no,
'file_url': uploaded_file.file.url,
'language_class': language_class,}
)
|
shivamMg/malvo
|
scores/views.py
|
Python
|
gpl-3.0
| 6,411
|
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "project.settings")
import django
django.setup()
from tsk.models import Provoz
from datetime import datetime
from elasticsearch.helpers import streaming_bulk
from elasticsearch_dsl import DocType, Object, String, GeoPoint, Date, Integer, Float
from elasticsearch_dsl.connections import connections
es = connections.create_connection(hosts=['http://31eccb709ebda3928dd01ae144d5379a.eu-west-1.aws.found.io:9200'])
class TrafficReport(DocType):
location = Object(properties={
'name': String(fields={'raw': String(index='not_analyzed')}),
'geo': GeoPoint()
})
timestamp = Date()
class Meta:
index = 'traffic'
def get_provoz():
cnt = Provoz.objects.count()
print 'Celkem %d zaznamu' % cnt
batch = 500000
i = 1
while i*batch < cnt:
for p in Provoz.objects.all().select_related('location')[i*batch:(i+1)*batch].iterator():
yield {
'location': {'name': p.location.name, 'geo': {'lat': 0, 'lon': 0}},
'timestamp': p.time_start,
'traffic': p.level,
'cas': float(p.time_start.strftime('%H')) + float(p.time_start.strftime('%M'))/60,
'den_v_tydnu': p.time_start.weekday(),
'_id': p.id
}
i += 1
def django_import():
#es.indices.delete(index='traffic', ignore=404)
#TrafficReport.init()
i = 0
for ok, info in streaming_bulk(es, get_provoz(), doc_type='traffic_report', index='traffic'):
i+= 1
if i % 1000 == 0:
print(i, 'dokumentu hotovo')
def search():
es.indices.refresh(index="traffic")
res = es.search(index="traffic", body={"query": {"match_all": {}}})
print("Got %d Hits:" % res['hits']['total'])
for hit in res['hits']['hits']:
print hit
#print("%(timestamp)s %(author)s: %(text)s" % hit["_source"])
django_import()
#search()
|
auto-mat/stupneprovozu
|
es.py
|
Python
|
gpl-3.0
| 1,920
|
#!/usr/bin/python
import rpm as rpm_mod
import os
import shutil
import bdb
import sh
import fs
import logging
logger = logging.getLogger("yum")
#-----------------------------------------------------------------------------
def mklist(value):
if isinstance(value, list):
return value
elif isinstance(value, tuple):
return list(value)
else:
return [value]
#-----------------------------------------------------------------------------
class YumConfig:
def __init__(self, chroot, repos = {}, env = None):
self.chroot = os.path.abspath(chroot)
self.repos = repos.copy() # shallow copy is enough
self.gpg_keys = os.path.join(self.chroot, 'yumbootstrap/RPM-GPG-KEYS')
self.pretend_has_keys = False
#self.multilib = False
self.env = env
def add_repository(self, name, url):
self.repos[name] = url
def add_key(self, path, pretend = False):
if pretend:
self.pretend_has_keys = True
else:
fs.touch(self.gpg_keys)
open(self.gpg_keys, 'a').write(open(path).read())
@property
def config_file(self):
return os.path.join(self.chroot, 'yumbootstrap/yum.conf')
@property
def root_dir(self):
return os.path.join(self.chroot, 'yumbootstrap')
def text(self):
if self.pretend_has_keys or os.path.exists(self.gpg_keys):
logger.info("GPG keys defined, adding them to repository configs")
gpgcheck = 1
def repo(name, url):
return \
'\n' \
'[%s]\n' \
'name = %s\n' \
'baseurl = %s\n' \
'gpgkey = file://%s\n' % (name, name, url, self.gpg_keys)
else:
logger.warn("no GPG keys defined, RPM signature verification disabled")
gpgcheck = 0
def repo(name, url):
return \
'\n' \
'[%s]\n' \
'name = %s\n' \
'baseurl = %s\n' % (name, name, url)
main = \
'[main]\n' \
'exactarch = 1\n' \
'obsoletes = 1\n' \
'#multilib_policy = all | best\n' \
'cachedir = /yumbootstrap/cache\n' \
'logfile = /yumbootstrap/log/yum.log\n'
main += 'gpgcheck = %d\n' % (gpgcheck)
main += 'reposdir = %s/yumbootstrap/yum.repos.d\n' % (gpgcheck)
repos = [repo(name, self.repos[name]) for name in sorted(self.repos)]
return main + ''.join(repos)
#-----------------------------------------------------------------------------
# TODO:
# * setarch
# * should `chroot' go through YumConfig?
class Yum:
def __init__(self, chroot, yum_conf = None, yum = '/usr/bin/yum',
interactive = False):
self.chroot = os.path.abspath(chroot)
if yum_conf is not None:
self.yum_conf = yum_conf
else:
self.yum_conf = YumConfig(chroot = chroot)
self.yum = yum # yum from host OS
self.interactive = interactive
self.rpmdb_fixed = False
# NOTE: writing yum.conf is delayed to the first operation
def _yum_call(self):
yum_conf = self.yum_conf.config_file
if not os.path.exists(yum_conf):
logger.info("%s doesn't exist, creating one", yum_conf)
fs.touch(yum_conf, text = self.yum_conf.text())
opts = [self.yum, '-c', yum_conf, '--installroot', self.chroot, '-y']
if self.interactive:
opts.extend(['-e', '1', '-d', '2'])
else:
opts.extend(['-e', '1', '-d', '1'])
return opts
def install(self, packages, exclude = []):
if self.rpmdb_fixed:
raise Exception("Can't install anything after RPM DB was fixed")
exclude_opts = ["--exclude=" + pkg for pkg in exclude]
sh.run(
self._yum_call() + exclude_opts + ['install'] + mklist(packages),
env = self.yum_conf.env,
)
def group_install(self, groups, exclude = []):
if self.rpmdb_fixed:
raise Exception("Can't install anything after RPM DB was fixed")
exclude_opts = ["--exclude=" + pkg for pkg in exclude]
sh.run(
self._yum_call() + exclude_opts + ['groupinstall'] + mklist(groups),
env = self.yum_conf.env,
)
def clean(self):
logger.info("removing directory %s", self.yum_conf.root_dir)
shutil.rmtree(self.yum_conf.root_dir, ignore_errors = True)
def fix_rpmdb(self, expected_rpmdb_dir = None,
db_load = 'db_load', rpm = 'rpm'):
logger.info("fixing RPM database for guest")
current_rpmdb_dir = rpm_mod.expandMacro('%{_dbpath}')
if expected_rpmdb_dir is None:
expected_rpmdb_dir = sh.run(
['python', '-c', 'import rpm; print rpm.expandMacro("%{_dbpath}")'],
chroot = self.chroot,
pipe = sh.READ,
env = self.yum_conf.env,
).strip()
# input directory
rpmdb_dir = os.path.join(self.chroot, current_rpmdb_dir.lstrip('/'))
logger.info('converting "Packages" file')
in_pkg_db = os.path.join(rpmdb_dir, 'Packages')
tmp_pkg_db = os.path.join(expected_rpmdb_dir, 'Packages.tmp')
out_pkg_db = os.path.join(expected_rpmdb_dir, 'Packages')
out_command = sh.run(
[db_load, tmp_pkg_db],
chroot = self.chroot, pipe = sh.WRITE,
env = self.yum_conf.env,
)
bdb.db_dump(in_pkg_db, out_command)
out_command.close()
os.rename(
os.path.join(self.chroot, tmp_pkg_db.lstrip('/')),
os.path.join(self.chroot, out_pkg_db.lstrip('/'))
)
logger.info('removing all the files except "Packages"')
for f in os.listdir(rpmdb_dir):
if f in ('.', '..', 'Packages'): continue
os.unlink(os.path.join(rpmdb_dir, f))
logger.info("running `rpm --rebuilddb'")
sh.run(
[rpm, '--rebuilddb'],
chroot = self.chroot,
env = self.yum_conf.env,
)
if current_rpmdb_dir != expected_rpmdb_dir:
# Red Hat under Debian; delete old directory (~/.rpmdb possibly)
logger.info("removing old RPM DB directory: $TARGET%s",
current_rpmdb_dir)
shutil.rmtree(os.path.join(self.chroot, current_rpmdb_dir.lstrip('/')))
self.rpmdb_fixed = True
#-----------------------------------------------------------------------------
# vim:ft=python
|
dozzie/yumbootstrap
|
lib/yumbootstrap/yum.py
|
Python
|
gpl-3.0
| 6,008
|
ERROR_USAGE = 1
ERROR_COMMAND_SYNTAX_ERROR = 2
ERROR_NO_SUCH_COMMAND = 3
ERROR_AGENT_NOT_FOUND = 10
ERROR_AGENT_NOT_PRIMARY = 11
ERROR_AGENT_NOT_CONNECTED = 12
ERROR_AGENT_NOT_ENABLED = 13
ERROR_AGENT_NOT_SPECIFIED = 14
ERROR_AGENT_NO_SPACE = 15
ERROR_BUSY = 20
ERROR_WRONG_STATE = 21
ERROR_INVALID_PORT = 30
ERROR_NOT_FOUND = 31
ERROR_BAD_VALUE = 32
ERROR_SOCKET_DISCONNECTED = 40
ERROR_COMMAND_FAILED = 50
ERROR_INVALID_XID = 51
ERROR_DOMAIN_INVALID = 60
ERROR_PERMISSION = 70
ERROR_INTERNAL = 99
ERROR_STRINGS = {
ERROR_USAGE: "Invalid usage",
ERROR_AGENT_NOT_FOUND: "Agent not found",
ERROR_AGENT_NOT_PRIMARY: "Agent not primary",
ERROR_BUSY: "Busy with another user request",
ERROR_AGENT_NOT_SPECIFIED: "No agent specified",
ERROR_WRONG_STATE: "wrong state for command",
ERROR_INTERNAL: "Internal error",
ERROR_PERMISSION: "Permission denied"
}
|
palette-software/palette
|
controller/controller/clierror.py
|
Python
|
gpl-3.0
| 891
|
import configparser
class Config(object):
def __init__(self):
self.certificate_path = None
self.certificate_name = None
self.channel = None
self.base_url = None
self.upload_url = None
self.server_url = None
def parse_config(config_file):
config = configparser.ConfigParser()
config.read(config_file)
data = Config()
updater_data = config['Updater']
data.base_url = updater_data['base-url']
data.certificate_name = updater_data['certificate-name']
data.certificate_path = updater_data['certificate-path']
data.channel = updater_data['channel']
data.upload_url = updater_data['upload-url']
data.server_url = updater_data["ServerURL"]
return data
|
beppec56/core
|
bin/update/config.py
|
Python
|
gpl-3.0
| 746
|
class MainClass:
def r2_init(self):
self.additional_string = None
def on_receive(self, msg, outputObject, source):
outputObject.AddMetadata("TestHeader", "baz")
if msg.Payload.Has("text"):
outputObject.Payload = msg.Payload.text
if self.additional_string is not None:
outputObject.Payload = outputObject.Payload + self.additional_string
elif msg.Payload.Has("ob") and msg.Payload.ob.Has("bar"):
outputObject.Payload = {}
outputObject.Payload["foo"] = msg.Payload.ob.bar * 10
outputObject.Payload["bar"] = "baz"
else:
outputObject.Payload = "ARGH!!!"
return outputObject
main_class = MainClass()
|
TordWessman/r2Project
|
TestData/test_server.py
|
Python
|
gpl-3.0
| 639
|
""""
This is dota2. All dota2 related functions are in here.
"""
import datetime
from Download import download_xml as download_xml
import xml.etree.ElementTree as et
class DotaData(object):
def __init__(self, account_id):
self.account_id = account_id
def get_account_id(self):
return self.account_id
def get_user_match_items(self, amount):
""""
This function returns a list of all item_id's per amount games.
"""
item_ids = []
amount = (amount*2)
x = 0
match_data = self.get_match_data()
while True:
steam_xml_file = download_xml(4, match_data[x])
for players in steam_xml_file:
for player in players:
find_my_id = str(player.find("account_id").text)
if find_my_id == str(self.account_id):
item_ids.append(player.find("item_0").text)
item_ids.append(player.find("item_1").text)
item_ids.append(player.find("item_2").text)
item_ids.append(player.find("item_3").text)
item_ids.append(player.find("item_4").text)
item_ids.append(player.find("item_5").text)
x += 2
if x is amount:
break
return item_ids
def get_item_name(self, item_id):
"""
This function returns the chosen item name -> compares it with item_id
!!NEEDS WORK!!
"""
self.pass_static()
steam_xml_file = et.parse("item_info.xml")
steam_xml_root = steam_xml_file.getroot()
for items in steam_xml_root:
for item in items:
if item_id == str(item.find("id").text):
chosen_item = item.find("name").text
return chosen_item
def convert_hero_items(self):
""""
This functions converts item id into item text and print's it.
!!NEEDS WORK!!
"""
list_user_item_ids = self.get_user_match_items(self.account_id)
for x in list_user_item_ids:
print(str(self.get_item_name(x)).upper())
def get_match_result(self, match_id, side):
"""
Returns list of games results / sides. (radiant or dire)
"""
steam_xml_file = download_xml(4, match_id)
radiant_xml_result = str(steam_xml_file.find("radiant_win").text).upper()
player_side = ""
for players in steam_xml_file:
for player in players:
xml_account_id = player.find("account_id").text
if xml_account_id == self.account_id:
player_slot = player.find("player_slot").text
if int(player_slot) < 5:
player_side = "Radiant"
else:
player_side = "Dire"
if player_side == "Radiant" and radiant_xml_result == "TRUE":
player_result = 1
elif player_side == "Dire" and radiant_xml_result == "FALSE":
player_result = 1
else:
player_result = 0
if side == 0:
return player_result
elif side == 1:
return player_side
else:
return None
def list_results(self, amount, side):
"""
This functions is used to loop through the returned items from get_match_result and add them into a list.
We then return this list in the amount specified by the user.
"""
i = 0
amount = (amount*2)
match_data = self.get_match_data()
result_list = []
while i is not amount:
result = self.get_match_result(match_data[i], side)
result_list.append(result)
i += 2
return result_list
def get_user_hero_id(self, amount):
"""
This functions returns a list containing the ID of the hero that the given account_id user, has played.
It fills up after completing all for loops, otherwise it will only remember the first input hero_id
"""
# MAGIC
steam_xml_file = download_xml(2, "")
steam_xml_root = steam_xml_file
steam_xml_matches = steam_xml_root.find('matches')
user_match_data_list = []
x = 0
for match in steam_xml_matches:
for match_info in match:
for player in match_info:
if player.find("account_id").text == self.account_id:
user_match_data_list.append(player.find('hero_id').text)
x += 1
if x is amount:
break
return user_match_data_list
def get_hero_information(self, hero_id):
"""
This function returns the hero in text. It is compared to the hero_id given by the list, user_match_data_list.
From the function get_user_id()
"""
# MAGIC
self.pass_static()
steam_xml_file = download_xml(1, "")
# steam_xml_parser = et.parse("herolog.xml")
steam_hero_root = steam_xml_file
steam_heroes_root = steam_hero_root.find('heroes')
hero_list = []
for all_heroes in steam_heroes_root:
if hero_id == all_heroes.find("id").text:
hero_list.append(all_heroes.find("localized_name").text)
# print(hero_id)
# print(all_heroes.find("localized_name").text)
for selected_hero in hero_list:
return selected_hero
def list_hero_amount(self, hero_id, amount):
hero_found = 0
x = 0
hero_list = self.get_user_hero_id(amount)
for heroes in hero_list:
if int(heroes) is int(hero_id):
hero_found += 1
return hero_found
def get_match_data(self):
"""
This function returns the match_id and time in timestamp.
Converting of timestamp happens when outputting.
"""
steam_xml_file = download_xml(2, str(self.account_id))
steam_xml_root = steam_xml_file
steam_xml_matches = steam_xml_root.find('matches')
match_data_list = []
for match in steam_xml_matches:
for m_id in match.findall('match_id'):
match_data_list.append(m_id.text)
match_data_list.append(match.find('start_time').text)
return match_data_list
def list_dota2_news(self):
"""
This function downloads the xml file and adds the chosen items into a list.
It then returns this list with current dota2 news.
"""
self.pass_static()
steam_xml_file = download_xml(3, "")
news_list = []
for news_items in steam_xml_file:
for news_item in news_items:
news_list.append(news_item.find("title").text)
news_list.append(news_item.find("url").text)
news_list.append(news_item.find("contents").text)
return news_list
def last_game_time(self):
match_data = self.get_match_data()
time_converted = datetime.datetime.fromtimestamp(int(match_data[1])).strftime('%Y-%m-%d %H:%M:%S')
return time_converted
def list_last_game_ids(self, amount):
amount = (amount*2)
id_list = []
match_data = self.get_match_data()
i = 0
while i < amount:
id_list.append(match_data[i])
i += 2
return id_list
def list_last_games(self, amount):
"""
This will be changed / removed.
"""
i = 0
amount = (amount*2)
user_match_data_list = self.get_user_hero_id(self.account_id)
match_data = self.get_match_data()
list_games = []
for info in user_match_data_list:
match_result = self.get_match_result(match_data[i], self.account_id)
chosen_hero = self.get_hero_information(str(info))
list_games.append(str(chosen_hero))
list_games.append(self.account_id)
list_games.append(match_data[i])
list_games.append(datetime.datetime.fromtimestamp(int(match_data[i + 1])).strftime('%Y-%m-%d %H:%M:%S'))
list_games.append(match_result)
i += 2
# modulo of 2 represent each game -> (i = 4) is 2 games, (i = 6) is 3 games, (i = 8) is 4 games and so on.
if i is amount:
break
return list_games
def pass_static(self):
pass
|
nigh7fox/dota2_information_composition
|
Dota2.py
|
Python
|
gpl-3.0
| 8,863
|
#!/usr/bin/env python
# coding: utf-8
"""
Hotfile Downloader.
"""
import re
import time
import gobject
from guicavane.Utils.UrlOpen import UrlOpen
from guicavane.Paths import HOSTS_IMAGES_DIR, SEP
from CaptchaWindow import CaptchaWindow, CAPTCHA_IMAGE_PATH
from Base import BaseDownloader
RECAPTCHA_CHALLENGE_URL = "http://api.recaptcha.net/challenge?k="
RECAPTCHA_IMAGE_URL = "http://www.google.com/recaptcha/api/image?c="
TM_RE = re.compile(r'name=tm value=(.*?)>')
TMHASH_RE = re.compile(r'name=tmhash value=(.*?)>')
WAIT_RE = re.compile(r'<input type=hidden name=wait value=(\d*?)>')
WAITHASH_RE = re.compile(r'name=waithash value=(.*?)>')
UPIDHASH_RE = re.compile(r'name=upidhash value=(.*?)>')
CAPTCHA_ID_RE = re.compile(r'src="http://api.recaptcha.net/challenge\?k=(.*?)">')
CAPTCHA_ID2_RE = re.compile(r"challenge : '(.*?)',")
FILE_URL_RE = re.compile(r'href="(.*?)" class="click_download"')
MAIN_URL_OPEN = UrlOpen()
CAPTCHA_URL_OPEN = UrlOpen()
class Hotfile(BaseDownloader):
""" Hotfile's Downloader. """
name = "Hotfile"
icon_path = HOSTS_IMAGES_DIR + SEP + "hotfile.png"
accept_ranges = False
def __init__(self, gui_manager, url):
BaseDownloader.__init__(self, MAIN_URL_OPEN, gui_manager, url)
self.gui_manager = gui_manager
self.url = url
self.captcha_window = CaptchaWindow(gui_manager, self._on_captcha_ok)
CAPTCHA_URL_OPEN.add_headers({"referer": url})
def process_url(self, play_callback, file_path):
self.play_callback = play_callback
self.file_path = file_path
self.gui_manager.background_task(self.start_regular,
self.show_captcha_window, unfreeze=False)
def start_regular(self):
page_data = MAIN_URL_OPEN(self.url)
waiting_time = int(WAIT_RE.search(page_data).group(1))
tm = TM_RE.search(page_data).group(1)
tmhash = TMHASH_RE.search(page_data).group(1)
waithash = WAITHASH_RE.search(page_data).group(1)
upidhash = UPIDHASH_RE.search(page_data).group(1)
for i in range(waiting_time, 0, -1):
gobject.idle_add(self.gui_manager.set_status_message,
"Please wait %d second%s..." % (i, "s" * (i > 1)))
time.sleep(1)
data = {"action": "capt", "tm": tm, "tmhash": tmhash,
"wait": waiting_time, "waithash": waithash,
"upidhash": upidhash}
# Get the challenge id for the captcha request
page_data = MAIN_URL_OPEN(self.url, data=data)
captcha_id = CAPTCHA_ID_RE.search(page_data).group(1)
# Get the challenge id for the captcha image
page_data = CAPTCHA_URL_OPEN(RECAPTCHA_CHALLENGE_URL + captcha_id)
self.captcha_id2 = CAPTCHA_ID2_RE.search(page_data).group(1)
# Download the captcha image
page_data = CAPTCHA_URL_OPEN(RECAPTCHA_IMAGE_URL + self.captcha_id2)
filehandler = open(CAPTCHA_IMAGE_PATH, "wb")
filehandler.write(page_data)
filehandler.close()
def show_captcha_window(self, (is_error, result)):
if is_error:
self.gui_manager.report_error("Error: %s" % result)
return
self.captcha_window.show()
def send_captcha(self):
gobject.idle_add(self.gui_manager.set_status_message,
"Sending Captcha...")
response_text = self.captcha_window.get_input_text()
data = {"action": "checkcaptcha",
"recaptcha_challenge_field": self.captcha_id2,
"recaptcha_response_field": response_text}
page_data = MAIN_URL_OPEN(self.url, data=data)
self.file_url = FILE_URL_RE.search(page_data).group(1)
def _download_loop(self):
self.add_range(MAIN_URL_OPEN)
handler = MAIN_URL_OPEN(self.file_url, handle=True)
gobject.idle_add(self.gui_manager.set_status_message, "Loading...")
# Using the BaseDownloader download function
self.download_to(handler, self.file_path)
def _on_captcha_ok(self):
self.gui_manager.background_task(self.send_captcha,
self._on_captcha_finish)
def _on_captcha_finish(self, (is_error, result)):
if is_error:
self.gui_manager.report_error("Error sending captcha: %s" % result)
return
self.gui_manager.background_task(self._download_loop,
self._on_download_finish)
self.play_callback()
def _on_download_finish(self, (is_error, result)):
if is_error:
if "Requested Range Not Satisfiable" in str(result):
self.file_size = self.downloaded_size
else:
self.gui_manager.report_error("Download finish with error: %s" % result)
self.gui_manager.unfreeze()
|
j0hn/guicavane
|
guicavane/Downloaders/Hotfile.py
|
Python
|
gpl-3.0
| 4,791
|
# Eyegrade: grading multiple choice questions with a webcam
# Copyright (C) 2010-2021 Jesus Arias Fisteus
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see
# <http://www.gnu.org/licenses/>.
#
import os
import unittest
import numpy as np
import eyegrade.ocr.sample as sample
import eyegrade.ocr.classifiers as classifiers
class TestClassifier(unittest.TestCase):
def _get_test_file_path(self, filename):
dirname = os.path.dirname(os.path.abspath(__file__))
return os.path.join(dirname, filename)
def test_classify_digit(self):
image_path = self._get_test_file_path("digit.png")
corners = np.array([[0, 1], [19, 0], [0, 7], [21, 17]])
samp = sample.Sample(corners, image_filename=image_path)
classifier = classifiers.DefaultDigitClassifier()
label = classifier.classify(samp)
self.assertTrue(label in range(10))
def test_classify_cross(self):
image_path = self._get_test_file_path("cross.png")
corners = np.array([[0, 0], [27, 0], [1, 32], [29, 32]])
samp = sample.Sample(corners, image_filename=image_path)
classifier = classifiers.DefaultCrossesClassifier()
label = classifier.classify(samp)
self.assertTrue(label == 0 or label == 1)
|
jfisteus/eyegrade
|
tests/test_classifier.py
|
Python
|
gpl-3.0
| 1,817
|
#!/usr/bin/python3
import os
import sys
from web.libreScanWeb import LibreScanWeb
if __name__ == '__main__':
if len(sys.argv) < 2:
print('You most provide at least one argument.')
print('Usted debe de proveer al menos un argumento.')
sys.exit(0)
if sys.argv[1] == 'web':
os.environ["LS_DEV_MODE"] = "False"
if len(sys.argv) > 2:
os.environ["LS_DEV_MODE"] = str(sys.argv[2] == '--dev')
app = LibreScanWeb()
app.run_app()
else:
print('Argument not valid.')
print('El argumento ingresado no es válido.')
|
LabExperimental-SIUA/LibreScan
|
src/main.py
|
Python
|
gpl-3.0
| 601
|
#!/usr/bin/python
import sys, pygame
import random
import time
pygame.init()
class GameData:
def __init__(self):
self.size = width, height = 640, 480
self.screen = pygame.display.set_mode(self.size)
self.background = pygame.image.load("background.jpg").convert()
self.backrect = self.background.get_rect()
def get_size(self):
return self.size
def get_window_width(self):
return self.size[0]
def get_window_height(self):
return self.size[1]
def get_screen(self):
return self.screen
def set_screen(self, x):
self.screen=x
def get_background(self):
return self.background
def set_background(self, x):
self.background = x
def get_backrect(self):
return self.backrect
def set_backrect(self, x):
self.backrect = x
screen = property(fget=get_screen, fset=set_screen)
background = property(fget=get_background, fset=set_background)
backrect = property(fget=get_backrect, fset=set_backrect)
class paddle:
def __init__(self,x,y):
self.width=45
self.height=10
self.xvelocity=0
self.XPos = x
self.YPos = y
self.rect = pygame.Rect(x, y, self.width, self.height)
self.paddlespeed = 5
def get_paddle_height(self):
return 10
def get_paddle_width(self):
return 32
def xvel(self,x):
self.xvelocity=x
return
def updatePos(self):
if ((self.XPos+self.xvelocity)>640-self.width):
self.XPos=640-self.width
elif ((self.XPos+self.xvelocity)<0):
self.XPos=0
else:
self.XPos=self.XPos+self.xvelocity
#change this to correctness
#self.rect=pygame.Rect(self.Position[0], self.Position[1], self.width, self.height)
self.rect=self.rect.move(self.XPos, self.YPos)
def get_rect(self):
return self.rect
def draw(self, screen):
self.updatePos()
screen.blit(pygame.Surface(self.rect.size).convert_alpha(), (self.XPos, self.YPos) )
class cball:
def __init__(self,x, y):
self.x=x
self.y=y
self.XSpeed = 2.5
self.YSpeed = 2.5
def draw(self, surface):
pygame.draw.circle(surface, (255,255,255), [int(self.x), int(self.y)], int(5), int(0))
class MainGame:
def __init__(self):
random.seed(time.time())
self.gamedata = GameData()
self.player = paddle(self.gamedata.get_window_width()/2, self.gamedata.get_window_height()-20)
self.ball = cball(self.player.XPos, self.player.YPos)
#back = background()
self.ai = paddle(self.gamedata.get_window_width()/2, 20)
def __drawscreen(self):
self.gamedata.screen.blit(self.gamedata.background, self.gamedata.backrect)
self.player.draw(self.gamedata.screen)
self.ai.draw(self.gamedata.screen)
self.ball.draw(self.gamedata.screen)
pygame.display.flip()
def __moveassets(self, ball, player, ai, gamedata):
rand = 0
# if not in x alignment with ball.x then keep moving
if ball.YSpeed>0:
if ai.XPos > (ball.x - rand):
ai.xvel(-ai.paddlespeed)
if ai.XPos < (ball.x + rand):
ai.xvel(ai.paddlespeed)
else:
rand=int(100*random.random())%30
ai.xvel(0)
if ai.XPos < 0: ai.XPos=3
if ai.XPos >= gamedata.get_window_width(): ai.XPos = gamedata.get_window_width()-30
ball.x = ball.x + ball.XSpeed
ball.y = ball.y + -ball.YSpeed
#if ball.ballrect.left < 0 or ball.ballrect.right > width:
if ball.x < 0 or ball.x > gamedata.get_window_width():
ball.XSpeed = -ball.XSpeed
#if ball.ballrect.top < 0 or ball.ballrect.bottom > height:
if ball.y < 0 or ball.y > gamedata.get_window_height():
ball.YSpeed = -ball.YSpeed
# check to see if hit player paddle
# and whether the ball is behind paddle or coming in front
# left of paddle is beginning of position x
# top of paddle is beginning of position y
if ball.YSpeed < 0:
if ball.y >= player.YPos and ball.y <= player.YPos + player.get_paddle_height():
if ball.x >= player.XPos-2 and ball.x <= player.XPos + player.get_paddle_width():
ball.YSpeed=-ball.YSpeed
print "player Paddle Hit"
#check to see if ai paddle hit
if ball.YSpeed > 0:
if ball.y >= ai.YPos and ball.y <= ai.YPos+player.get_paddle_height():
if ball.x >= ai.XPos-2 and ball.x <= ai.XPos + player.get_paddle_width():
ball.YSpeed=-ball.YSpeed
print "ai paddle hit"
def mainLoop(self):
self.__drawscreen()
framerate=60
framems = 1000 / framerate #calculate the length of each frame
while 1:
#when the frame starts
startms = pygame.time.get_ticks()
for event in pygame.event.get():
if event.type == pygame.QUIT:
print "QUIT Event"
sys.exit()
elif event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE:
print "K_ESCAPE Event"
sys.exit()
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_RIGHT:
# velocity to right
self.player.xvel(self.player.paddlespeed)
print "K_RIGHT Event"
elif event.key == pygame.K_LEFT:
#velocity to left
self.player.xvel(-self.player.paddlespeed)
print "K_LEFT Event"
elif event.type == pygame.KEYUP:
if event.key == pygame.K_RIGHT or event.key == pygame.K_LEFT:
self.player.xvel(0)
self.__moveassets(self.ball, self.player, self.ai, self.gamedata)
self.__drawscreen()
# when the frame ends
endms = pygame.time.get_ticks()
# how long to delay
delayms = framems - (endms - startms)
# delay processing
pygame.time.wait(delayms)
if __name__ == "__main__":
game = MainGame()
game.mainLoop()
|
majorsilence/PongLike
|
ponglike.py
|
Python
|
gpl-3.0
| 6,672
|
#!/bin/env python
# _*_ coding:utf-8 _*_
import pexpect
mypassword = "123456"
child = pexpect.spawn('ssh root@192.168.100.150') # spawn 启动scp 程序
child.expect('password:') #expect 方法等待子程序产生的输出,判断是否匹配定义的字符串#'Password:'
child.sendline(mypassword) # 匹配后则发送密码串进行回应'
|
zhengjue/mytornado
|
study/5/demo.py
|
Python
|
gpl-3.0
| 347
|
# coding = utf-8
"""
5.9 特征选取
http://git.oschina.net/wizardforcel/sklearn-cb/blob/master/5.md
"""
import numpy as np
from sklearn import datasets
from sklearn import feature_selection
(x, y) = datasets.make_regression(n_samples=1000, n_features=10000)
(f, p) = feature_selection.f_regression(x, y)
print(f[:7])
print(p[:7])
# f 就是和每个线性模型的特征之一相关的 f 分数
# 我们之后可以比较这些特征,并基于这个比较,我们可以筛选特征
# p 是 f 值对应的 p 值。在统计学中,p 值是一个值的概率
# 我们可以看到,许多 p 值都太大了。我们更想让 p 值变小
# 并且选取小于 0.05 的 p 值。这些就是我们用于分析的特征
idx = np.arange(0, x.shape[1])
ftKeep = idx[p < 0.05]
print(ftKeep.shape)
# 另一个选择是使用VarianceThreshold对象
varTh = feature_selection.VarianceThreshold(np.median(np.var(x, axis=1)))
print(varTh.fit_transform(x).shape)
# 绘制 p 值,可以看到筛选和保留那些特征
import matplotlib.pyplot as plt
plt.style.use("ggplot")
(fig, ax) = plt.subplots(figsize=(6, 4))
ax.bar(np.arange(100), p[:100])
ax.set_title("p 值的分布")
fig.tight_layout()
fig.show()
|
Ginkgo-Biloba/Misc-Python
|
sklearn/SKLearn5SelectFeature.py
|
Python
|
gpl-3.0
| 1,206
|
from __future__ import print_function, absolute_import
def is_center_ground_line(lbot):
gsensors = lbot.getGroundSensors()
if gsensors is not None and gsensors["left"] is not None and gsensors["right"] is not None:
return gsensors["left"]<50 and gsensors["right"]<50
return False
|
robocomp/learnbot
|
learnbot_dsl/functions/perceptual/groundsensors/is_center_ground_line.py
|
Python
|
gpl-3.0
| 286
|
# -*- coding: utf-8 -*-
import logging as log
import datetime as dt
from collections import defaultdict
def from_csv(file, separator=';', fmt=None):
check_string_type_of(separator)
format_of = fmt
number_of_transactions = 0
number_of_corrupted_records = 0
transactions = []
last_unique_items_of = defaultdict(lambda:
defaultdict(lambda: dt.datetime(1, 1, 1)))
def process_transaction_time():
try:
time = depending_on(format_of)(timestamp)
except (ValueError, OverflowError):
line_no = number_of_transactions + number_of_corrupted_records + 1
log.warning('Could not interpret timestamp on line {0}. '
'Skipping.'.format(line_no))
return 0
if time > last_unique_items_of[user][item]:
last_unique_items_of[user][item] = time
transactions.append((time.isoformat(), user, item))
return 1
def log_corrupted_transaction():
line_number = number_of_transactions + number_of_corrupted_records + 1
log.warning('Transaction on line {0} contains empty fields. '
'Skipping.'.format(line_number))
return 0
process = {True : process_transaction_time,
False: log_corrupted_transaction}
with open(file) as stream:
for transaction in stream:
try:
timestamp, user, item = transaction.rstrip().split(separator)
except ValueError:
number_of_corrupted_records += 1
line = number_of_transactions + number_of_corrupted_records
log.warning('Could not interpret transaction on line {0}. '
'Skipping.'.format(line))
else:
complete_record = all((timestamp, user, item))
success = process[complete_record]()
number_of_transactions += success
number_of_corrupted_records += 1 - success
return (number_of_transactions,
number_of_corrupted_records,
finalized(last_unique_items_of),
transactions)
def check_string_type_of(separator):
if not isinstance(separator, str):
log.error('Attempt to set separator argument to non-string type.')
raise TypeError('Separator argument must be a string!')
def depending_on(fmt=None):
def fromstring(timestamp):
try:
time = dt.datetime.strptime(timestamp, fmt)
except ValueError:
log.warning('Failed to read timestamp. Check that it adheres to '
'the given format "{0}".'.format(fmt))
raise ValueError
return time
def fromstamp(timestamp):
try:
time = int(timestamp)
except ValueError:
log.warning('Failed to convert UNIX epoch timestamp to integer.')
raise ValueError
try:
converted = dt.datetime.fromtimestamp(time)
except OverflowError:
log.warning('Integer is not a valid UNIX epoch timestamp.')
raise OverflowError
return converted
return fromstring if fmt else fromstamp
def finalized(last_unique):
last_unique_items = {user: {item: time.isoformat()
for item, time in article.items()}
for user, article in last_unique.items()}
return last_unique_items
|
yedivanseven/bestPy
|
datastructures/traintest/read/from_csv.py
|
Python
|
gpl-3.0
| 3,459
|
# -*- coding: iso-8859-15 -*-
# (c) 2011-2012 Roland Kindermann
#
# This file is part of the Aalto Timed Model Checker ATMOC.
#
# ATMOC is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ATMOC is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ATMOC. If not, see <http://www.gnu.org/licenses/>.
import random, readline, fractions
import expressions, byices, config, brind, states, signal, sys
DISCRETE_STEP = 0
ELAPSE_STEP = 1
class DelayBound(object):
def __init__(self, bound, strict=None):
assert bound == None or isinstance(bound, (int, long, fractions.Fraction))
assert strict == None or isinstance(strict, bool)
assert (bound == None) == (strict == None)
assert bound >= 0
assert bound != 0 or not strict
self.bound = bound
self.strict = strict
def __str__(self):
if self.bound == None:
return 'unbounded'
else:
return '%s %s unit%s' % ('less than' if self.strict else 'at most', str(self.bound), 's' if self.bound != 1 else '')
def satisfied(self, num):
return self.bound == None or num < self.bound or (num == self.bound and not self.strict)
def num_to_letters(num):
assert isinstance(num, (int, long))
assert num >= 0
if num == 0:
return 'A'
else:
ret = ''
while num > 0:
ret = chr(ord('A') + (num % 26)) + ret
num /= 26
return ret
def letters_to_num(strn):
assert isinstance(strn, basestring)
strn = strn.lower()
assert all((ord('a') <= ord(x) <= ord('z')) for x in strn)
return sum(((ord(x) - ord('a')) * (26**(len(strn) - 1 - i))) for i, x in enumerate(strn))
class Interactor(object):
def __init__(self, variables, clockmax, init, trans, invar):
assert isinstance(variables, set)
assert all(isinstance(x, expressions.Variable) for x in variables)
assert isinstance(clockmax, dict)
assert all(isinstance(x, expressions.Variable) for x in clockmax.keys())
assert all(isinstance(x, (int, long, fractions.Fraction)) for x in clockmax.values())
assert all(x.type == expressions.REAL for x in clockmax.keys())
assert isinstance(init, expressions.Expression)
self.varcount = len(variables)
self.clockcount = len(clockmax)
self.variables = [] #Soon to be filled
self.clockmax = []
self.init = init
self.trans = trans
self.invar = invar
# Variables
stfnc = lambda x, y : cmp(x.name, y.name)
for clk in sorted(clockmax, stfnc):
self.variables.append(clk)
self.clockmax.append(clockmax[clk])
self.clockcount = len(self.variables)
assert len(self.clockmax) == self.clockcount
for var in sorted(variables, stfnc):
self.variables.append(var)
self.varcount = len(self.variables)
### Initial stuff
def init_yices(self):
self.yi = byices.Yices(config.yicesfile)
assert len(self.yi.vars) == 0
for v in self.variables:
self.yi.add_var(v.name, v.type)
for v in self.variables:
self.yi.add_var(v.name + brind.PRIME_SUFFIX, v.type)
assert len(self.yi.vars) == len(self.variables) * 2
if self.clockmax:
self.deltaind = len(self.yi.vars)
self.yi.add_var(brind.DELTAVAR_NAME, expressions.REAL)
self.yi.encode(self.trans) # to get variables allocated
### State searching
def full_states(self, s):
'Replaces all None values with actual values'
s += [None] * (self.varcount - len(s))
# Find combinations
svals = []
for i in xrange(len(s)):
if s[i] == None:
if self.yi.vtypes[i] == expressions.BOOLEAN:
svals.append([False, True])
elif self.yi.vtypes[i] == expressions.REAL:
svals.append([0])
else:
raise Exception('Unsupported type: self.yi.vtypes[i]')
else:
svals.append([s[i]])
assert len(svals) == len(s)
assert len(svals) >= 1
# Products
comb = [[]]
for vals in svals:
ncomb = []
for c in comb:
for val in vals:
ncomb.append(c + [val])
comb = ncomb
return [states.State(x, False, self) for x in comb]
def get_all_states(self, primed):
ret = []
while self.yi.check():
# Get states
s = states.State(self.yi.get_model(), primed, self)
ret += self.full_states(s)
# Encode negation minus clocks
for i in xrange(self.clockcount):
s[i] = None
enc = s.encode_exact(self.yi, primed)
if len(enc) == 0: #'Anything possible'
break
elif len(enc) == 1:
enc = enc[0]
else:
enc = enc[0].mk_and(enc[1:])
self.yi.assertion(enc.mk_not())
return ret
def get_delay_bound(self):
'Returns delay bound for unbounded'
# Find relevant values
if not self.clockmax:
return None
relevant_values = [0]
for ci in xrange(self.clockcount):
mx = self.clockmax[ci]
v = self.state[ci]
assert v != None
rval = mx - v
while rval > 0:
relevant_values.append(rval)
rval -= 1
relevant_values = sorted(set(relevant_values))
# Check them
try:
self.yi.push()
self.yi.assertion(self.yi.encode(self.invar))
for i in xrange(len(relevant_values)):
# Values
rv = relevant_values[i]
last = i >= len(relevant_values) - 1
if last:
rvp = rv + fractions.Fraction(1, 2)
else:
rvp = (rv + relevant_values[i+1]) / fractions.Fraction(2)
# rv
try:
self.yi.push()
for enc in self.state.elapse(rv).encode_exact(self.yi, False):
self.yi.assertion(enc)
if not self.yi.check():
return DelayBound(rv, True)
finally:
self.yi.pop()
#rvp
try:
self.yi.push()
for enc in self.state.elapse(rvp).encode_exact(self.yi, False):
self.yi.assertion(enc)
if not self.yi.check():
return DelayBound(rv, False)
finally:
self.yi.pop()
finally:
self.yi.pop()
return DelayBound(None)
## Selection
def select_state(self, typ, states, delay_bound=None):
'Returns selected state (or None to quit)'
# Print options
if (not states) and ((delay_bound == None ) or (delay_bound.bound == 0)):
print 'DEADLOCK -- No %s state' % typ
sys.exit(0)
if states:
print 'Please select a%s %s state' % ('n' if typ[0].lower() in 'aiouey' else '', typ)
strns = [str(s) for s in states]
multiline = any(('\n' in x) for x in strns)
for i, strn in enumerate(strns):
print ' State %s: %s%s' % (num_to_letters(i), strn, '\n' if multiline else '')
if delay_bound != None and (delay_bound.bound == None or delay_bound.bound > 0):
print '%s perform a time elapse step by entering a number (%s)' % ('Or' if states else 'Please', str(delay_bound))
# State selection
try:
while True:
try:
print
inp = raw_input('Your choice: ')
except EOFError, e:
return None
oinp = inp
inp = inp.lower().strip()
if inp == '':
if states:
selected = random.randint(0, len(states) - 1)
print 'Selected state', num_to_letters(selected), 'randomly'
return states[selected]
elif all((ord('a') <= ord(c) <= ord('z')) for c in inp):
num = letters_to_num(inp)
if 0 <= num < len(states):
return states[num]
elif delay_bound != None:
try:
if '/' in inp:
inp = inp.split('/')
inp = fractions.Fraction(int(inp[0]), int(inp[1]))
elif '.' in inp:
[l, r] = inp.split('.')
den = 10**len(r)
if len(l) == 0:
l = '0'
inp = fractions.Fraction(int(l) * den + int(r), den) # Avoid using fractions to avoid rounding errors
else:
inp = int(inp)
if inp >= 0 and delay_bound.satisfied(inp):
return self.state.elapse(inp)
except ValueError, e:
pass
print 'INVALID SELECTION:', repr(oinp)
finally:
print
assert False
def select_successor(self):
# Delay bound
delay_bound = self.get_delay_bound()
# Successors
self.yi.push()
for enc in self.state.encode_exact(self.yi, False):
self.yi.assertion(enc)
self.yi.assertion(self.yi.encode(self.invar))
self.yi.assertion(self.yi.encode(self.trans))
if self.clockmax:
zero = self.yi.context.mk_num(0)
self.yi.assertion(self.yi.vars[self.deltaind].mk_eq(zero))
for i in xrange(self.clockcount):
self.yi.assertion(self.yi.vars[i].mk_ge(zero))
self.yi.assertion(self.yi.vars[i + self.varcount].mk_ge(zero))
successors = self.get_all_states(True)
self.yi.pop()
# Choice
self.state = self.select_state('successor', successors, delay_bound)
def get_initial_states(self):
self.yi.push()
self.yi.assertion(self.yi.encode(self.init))
self.yi.assertion(self.yi.encode(self.invar))
if self.clockmax:
# Note that all clocks are set to zero due to real valued variables being unused leading to trouble
zero = self.yi.context.mk_num(0)
for i in xrange(self.clockcount):
self.yi.assertion(self.yi.encode(expressions.AstExpression('=', self.variables[i], '0')))
self.yi.assertion(self.yi.vars[i + self.varcount].mk_eq(zero))
self.yi.assertion(self.yi.vars[self.deltaind].mk_eq(zero))
ret = self.get_all_states(False)
self.yi.pop()
return ret
### Control flow
def print_instructions(self):
print 'Commands:'
print ' <number> : Select option <number>. Supported formats: 1.5, 3/2'
print ' <enter> : Select random option'
print ' EOF : Quit'
print
def run(self):
prev_sig = signal.signal(signal.SIGINT, lambda _, __ : sys.exit(0))
try:
self.print_instructions()
self.init_yices()
inits = self.get_initial_states()
self.state = self.select_state('initial', inits)
while self.state != None:
print 'Current state:', self.state
print
self.select_successor()
finally:
signal.signal(signal.SIGINT, prev_sig)
if __name__ == '__main__':
for n in xrange(100):
strn = num_to_letters(n)
print n, strn, letters_to_num(strn)
|
dc-aalto/atmoc
|
interactive.py
|
Python
|
gpl-3.0
| 10,094
|