repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
MSLNZ/msl-equipment
|
msl/equipment/resources/thorlabs/kinesis/filter_flipper.py
|
1
|
16157
|
"""
This module provides all the functionality required to control a
Filter Flipper (MFF101, MFF102).
"""
from ctypes import byref, c_int64
from msl.equipment.resources import register
from msl.equipment.resources.utils import WORD, DWORD
from msl.equipment.resources.thorlabs.kinesis.motion_control import MotionControl
from msl.equipment.resources.thorlabs.kinesis.api_functions import FilterFlipper_FCNS
from msl.equipment.resources.thorlabs.kinesis.structs import FF_IOSettings
from msl.equipment.resources.thorlabs.kinesis.enums import FF_IOModes, FF_SignalModes
@register(manufacturer=r'Thorlabs', model=r'MFF10[1|2]')
class FilterFlipper(MotionControl):
MIN_TRANSIT_TIME = 300
MAX_TRANSIT_TIME = 2800
MIN_PULSE_WIDTH = 10
MAX_PULSE_WIDTH = 200
def __init__(self, record):
"""A wrapper around ``Thorlabs.MotionControl.FilterFlipper.dll``.
The :attr:`~msl.equipment.record_types.ConnectionRecord.properties`
for a FilterFlipper connection supports the following key-value pairs in the
:ref:`connections-database`::
'device_name': str, the device name found in ThorlabsDefaultSettings.xml [default: None]
Do not instantiate this class directly. Use the :meth:`~.EquipmentRecord.connect`
method to connect to the equipment.
Parameters
----------
record : :class:`~msl.equipment.record_types.EquipmentRecord`
A record from an :ref:`equipment-database`.
"""
name = record.connection.properties.get('device_name')
if name is None:
record.connection.properties['device_name'] = 'MFF Filter Flipper'
super(FilterFlipper, self).__init__(record, FilterFlipper_FCNS)
def open(self):
"""Open the device for communication.
Raises
------
~msl.equipment.exceptions.ThorlabsError
If not successful.
"""
self.sdk.FF_Open(self._serial)
def close(self):
"""Disconnect and close the device."""
self.sdk.FF_Close(self._serial)
def check_connection(self):
"""Check connection.
Returns
-------
:class:`bool`
Whether the USB is listed by the FTDI controller.
"""
return self.sdk.FF_CheckConnection(self._serial)
def identify(self):
"""Sends a command to the device to make it identify itself."""
self.sdk.FF_Identify(self._serial)
def get_hardware_info(self):
"""Gets the hardware information from the device.
Returns
-------
:class:`.structs.TLI_HardwareInformation`
The hardware information.
Raises
------
~msl.equipment.exceptions.ThorlabsError
If not successful.
"""
return self._get_hardware_info(self.sdk.FF_GetHardwareInfo)
def get_firmware_version(self):
"""Gets version number of the device firmware.
Returns
-------
:class:`str`
The firmware version.
"""
return self.to_version(self.sdk.FF_GetFirmwareVersion(self._serial))
def get_software_version(self):
"""Gets version number of the device software.
Returns
-------
:class:`str`
The device software version.
"""
return self.to_version(self.sdk.FF_GetSoftwareVersion(self._serial))
def load_settings(self):
"""Update device with stored settings.
The settings are read from ``ThorlabsDefaultSettings.xml``, which
gets created when the Kinesis software is installed.
Raises
------
~msl.equipment.exceptions.ThorlabsError
If not successful.
"""
self.sdk.FF_LoadSettings(self._serial)
def load_named_settings(self, settings_name):
"""Update device with named settings.
Parameters
----------
settings_name : :class:`str`
The name of the device to load the settings for. Examples for the value
of `setting_name` can be found in `ThorlabsDefaultSettings.xml``, which
gets created when the Kinesis software is installed.
Raises
------
~msl.equipment.exceptions.ThorlabsError
If not successful.
"""
self.sdk.FF_LoadNamedSettings(self._serial, settings_name.encode())
def persist_settings(self):
"""Persist the devices current settings.
Raises
------
~msl.equipment.exceptions.ThorlabsError
If not successful.
"""
self.sdk.FF_PersistSettings(self._serial)
def get_number_positions(self):
"""Get number of positions available from the device.
Returns
-------
:class:`int`
The number of positions.
"""
return self.sdk.FF_GetNumberPositions(self._serial)
def home(self):
"""Home the device.
Homing the device will set the device to a known state and determine
the home position.
Raises
------
~msl.equipment.exceptions.ThorlabsError
If not successful.
"""
self.sdk.FF_Home(self._serial)
def move_to_position(self, position):
"""Move the device to the specified position (index).
Parameters
----------
position : :class:`int`
The required position. Must be 1 or 2.
Raises
------
~msl.equipment.exceptions.ThorlabsError
If not successful.
"""
self.sdk.FF_MoveToPosition(self._serial, position)
def get_position(self):
"""Get the current position.
Returns
-------
:class:`int`
The position, 1 or 2 (can be 0 during a move).
"""
return self.sdk.FF_GetPosition(self._serial)
def get_io_settings(self):
"""Gets the I/O settings from filter flipper.
Returns
-------
:class:`~.structs.FF_IOSettings`
The Filter Flipper I/O settings.
Raises
------
~msl.equipment.exceptions.ThorlabsError
If not successful.
"""
settings = FF_IOSettings()
self.sdk.FF_GetIOSettings(self._serial, byref(settings))
return settings
def request_io_settings(self):
"""Requests the I/O settings from the filter flipper.
Raises
------
~msl.equipment.exceptions.ThorlabsError
If not successful.
"""
self.sdk.FF_RequestIOSettings(self._serial)
def set_io_settings(self, transit_time=500,
oper1=FF_IOModes.FF_ToggleOnPositiveEdge, sig1=FF_SignalModes.FF_InputButton, pw1=200,
oper2=FF_IOModes.FF_ToggleOnPositiveEdge, sig2=FF_SignalModes.FF_OutputLevel, pw2=200):
"""
Sets the settings on filter flipper.
Parameters
----------
transit_time : :class:`int`, optional
Time taken to get from one position to other in milliseconds.
oper1 : :class:`~.enums.FF_IOModes`, optional
I/O 1 Operating Mode.
sig1 : :class:`~.enums.FF_SignalModes`, optional
I/O 1 Signal Mode.
pw1 : :class:`int`, optional
Digital I/O 1 pulse width in milliseconds.
oper2 : :class:`~.enums.FF_IOModes`, optional
I/O 2 Operating Mode.
sig2 : :class:`~.enums.FF_SignalModes`, optional
I/O 2 Signal Mode.
pw2 : :class:`int`, optional
Digital I/O 2 pulse width in milliseconds.
Raises
------
~msl.equipment.exceptions.ThorlabsError
If not successful.
"""
if transit_time > self.MAX_TRANSIT_TIME or transit_time < self.MIN_TRANSIT_TIME:
msg = 'Invalid transit time value of {} ms; {} <= transit_time <= {}'.format(
transit_time, self.MIN_TRANSIT_TIME, self.MAX_TRANSIT_TIME)
self.raise_exception(msg)
if pw1 > self.MAX_PULSE_WIDTH or pw1 < self.MIN_PULSE_WIDTH:
msg = 'Invalid digital I/O 1 pulse width of {} ms; {} <= pw <= {}'.format(
pw1, self.MIN_PULSE_WIDTH, self.MAX_PULSE_WIDTH)
self.raise_exception(msg)
if pw2 > self.MAX_PULSE_WIDTH or pw2 < self.MIN_PULSE_WIDTH:
msg = 'Invalid digital I/O 2 pulse width of {} ms; {} <= pw <= {}'.format(
pw2, self.MIN_PULSE_WIDTH, self.MAX_PULSE_WIDTH)
self.raise_exception(msg)
settings = FF_IOSettings()
settings.transitTime = int(transit_time)
settings.digIO1OperMode = self.convert_to_enum(oper1, FF_IOModes, prefix='FF_')
settings.digIO1SignalMode = self.convert_to_enum(sig1, FF_SignalModes, prefix='FF_')
settings.digIO1PulseWidth = int(pw1)
settings.digIO2OperMode = self.convert_to_enum(oper2, FF_IOModes, prefix='FF_')
settings.digIO2SignalMode = self.convert_to_enum(sig2, FF_SignalModes, prefix='FF_')
settings.digIO2PulseWidth = int(pw2)
self.sdk.FF_SetIOSettings(self._serial, byref(settings))
def get_transit_time(self):
"""Gets the transit time.
Returns
-------
:class:`int`
The transit time in milliseconds.
"""
return self.sdk.FF_GetTransitTime(self._serial)
def set_transit_time(self, transit_time):
"""Sets the transit time.
Parameters
----------
transit_time : :class:`int`
The transit time in milliseconds.
Raises
------
~msl.equipment.exceptions.ThorlabsError
If not successful.
"""
if transit_time > self.MAX_TRANSIT_TIME or transit_time < self.MIN_TRANSIT_TIME:
msg = 'Invalid transit time value of {} ms; {} <= transit_time <= {}'.format(
transit_time, self.MIN_TRANSIT_TIME, self.MAX_TRANSIT_TIME)
self.raise_exception(msg)
self.sdk.FF_SetTransitTime(self._serial, int(transit_time))
def request_status(self):
"""Request status bits.
This needs to be called to get the device to send it's current status.
This is called automatically if Polling is enabled for the device using
:meth:`.start_polling`.
Raises
------
~msl.equipment.exceptions.ThorlabsError
If not successful.
"""
self.sdk.FF_RequestStatus(self._serial)
def get_status_bits(self):
"""Get the current status bits.
This returns the latest status bits received from the device. To get
new status bits, use :meth:`.request_status` or use the polling
function, :meth:`.start_polling`
Returns
-------
:class:`int`
The status bits from the device.
"""
return self.sdk.FF_GetStatusBits(self._serial)
def start_polling(self, milliseconds):
"""Starts the internal polling loop.
This function continuously requests position and status messages.
Parameters
----------
milliseconds : :class:`int`
The polling rate, in milliseconds.
Raises
------
~msl.equipment.exceptions.ThorlabsError
If not successful.
"""
self.sdk.FF_StartPolling(self._serial, int(milliseconds))
def polling_duration(self):
"""Gets the polling loop duration.
Returns
-------
:class:`int`
The time between polls in milliseconds or 0 if polling is not active.
"""
return self.sdk.FF_PollingDuration(self._serial)
def stop_polling(self):
"""Stops the internal polling loop."""
self.sdk.FF_StopPolling(self._serial)
def time_since_last_msg_received(self):
"""Gets the time, in milliseconds, since tha last message was received.
This can be used to determine whether communications with the device is
still good.
Returns
-------
:class:`int`
The time, in milliseconds, since the last message was received.
"""
ms = c_int64()
self.sdk.FF_TimeSinceLastMsgReceived(self._serial, byref(ms))
return ms.value
def enable_last_msg_timer(self, enable, msg_timeout=0):
"""Enables the last message monitoring timer.
This can be used to determine whether communications with the device is
still good.
Parameters
----------
enable : :class:`bool`
:data:`True` to enable monitoring otherwise :data:`False` to disable.
msg_timeout : :class:`int`, optional
The last message error timeout in ms. Set to 0 to disable.
"""
self.sdk.FF_EnableLastMsgTimer(self._serial, enable, msg_timeout)
def has_last_msg_timer_overrun(self):
"""Queries if the time since the last message has exceeded the
``lastMsgTimeout`` set by :meth:`.enable_last_msg_timer`.
This can be used to determine whether communications with the device is
still good.
Returns
-------
:class:`bool`
:data:`True` if last message timer has elapsed or
:data:`False` if monitoring is not enabled or if time of last message
received is less than ``msg_timeout``.
"""
return self.sdk.FF_HasLastMsgTimerOverrun(self._serial)
def request_settings(self):
"""Requests that all settings are downloaded from the device.
This function requests that the device upload all it's settings to the
DLL.
Raises
------
~msl.equipment.exceptions.ThorlabsError
If not successful.
"""
self.sdk.FF_RequestSettings(self._serial)
def clear_message_queue(self):
"""Clears the device message queue."""
self.sdk.FF_ClearMessageQueue(self._serial)
def register_message_callback(self, callback):
"""Registers a callback on the message queue.
Parameters
----------
callback : :class:`~msl.equipment.resources.thorlabs.kinesis.callbacks.MotionControlCallback`
A function to be called whenever messages are received.
"""
self.sdk.FF_RegisterMessageCallback(self._serial, callback)
def message_queue_size(self):
"""Gets the size of the message queue.
Returns
-------
:class:`int`
The number of messages in the queue.
"""
return self.sdk.FF_MessageQueueSize(self._serial)
def get_next_message(self):
"""Get the next Message Queue item. See :mod:`.messages`.
Returns
-------
:class:`int`
The message type.
:class:`int`
The message ID.
:class:`int`
The message data.
Raises
------
~msl.equipment.exceptions.ThorlabsError
If not successful.
"""
message_type = WORD()
message_id = WORD()
message_data = DWORD()
self.sdk.FF_GetNextMessage(self._serial, byref(message_type), byref(message_id), byref(message_data))
return message_type.value, message_id.value, message_data.value
def wait_for_message(self):
"""Wait for next Message Queue item. See :mod:`.messages`.
Returns
-------
:class:`int`
The message type.
:class:`int`
The message ID.
:class:`int`
The message data.
Raises
------
~msl.equipment.exceptions.ThorlabsError
If not successful.
"""
message_type = WORD()
message_id = WORD()
message_data = DWORD()
self.sdk.FF_WaitForMessage(self._serial, byref(message_type), byref(message_id), byref(message_data))
return message_type.value, message_id.value, message_data.value
if __name__ == '__main__':
from msl.equipment.resources.thorlabs.kinesis import _print
_print(FilterFlipper, FilterFlipper_FCNS, 'Thorlabs.MotionControl.FilterFlipper.h')
|
mit
| -1,277,002,313,023,637,200
| 31.574597
| 111
| 0.594355
| false
| 4.123788
| false
| false
| false
|
Tailszefox/scrabblesolve
|
binaire.py
|
1
|
6312
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os.path
import sys
# Nœud de l'arbre contenant une lettre
class Noeud(object):
"""
Constructeur
lettre : lettre stockée dans le nœud
mot : True si le chemin représente un mot du dictionnaire, False sinon
fd : lettre située au même niveau et après dans l'ordre alphabétique
fm : lettre située au niveau suivant
"""
def __init__(self, lettre = None, motComplet = None):
self._lettre = None
self._mot = False
self._motComplet = None
self._fd = None
self._fm = None
self.lettre = lettre
self.motComplet = motComplet
#Getters et setters
def setLettre(self, lettre):
self._lettre = lettre
def getLettre(self):
return self._lettre
def setFd(self, fd):
self._fd = fd
def getFd(self):
return self._fd
def setFm(self, fm):
self._fm = fm
def getFm(self):
return self._fm
def setMot(self, mot):
self._mot = mot
def getMot(self):
return self._mot
def setMotComplet(self, motComplet):
self._motComplet = motComplet
def getMotComplet(self):
return self._motComplet
lettre = property(getLettre, setLettre)
estMot = property(getMot, setMot)
motComplet = property(getMotComplet, setMotComplet)
fd = property(getFd, setFd)
fm = property(getFm, setFm)
#Arbre binaire
class Arbre(object):
"""
Constructeur
racine : nœud racine de l'arbre
fichier : fichier du dictionnaire
hash : tableau de hashage correspondant aux nœuds
"""
def __init__(self, fichier = None):
self._racine = None
self._fichier = None
self._hash = None
self.charger(fichier)
#Getters et setters
def getFichier(self):
return self._fichier
def setFichier(self, fichier):
self._fichier = fichier
def getRacine(self):
return self._racine
def setRacine(self, racine):
self._racine = racine
def setHash(self, h):
self._hash = h
def getHash(self):
return self._hash
fichier = property(getFichier, setFichier)
racine = property(getRacine, setRacine)
hash = property(getHash, setHash)
""" Chargement d'un fichier de dictionnaire """
def charger(self, fichier):
if not os.path.exists(fichier):
sys.exit('Le dictionnaire ' + fichier + ' n\'existe pas.')
self.hash = {}
self.fichier = fichier
self.racine = self.chargerMots()
def chargerMots(self):
racine = None
#Pour chaque mot du dictionnaire
for mot in open(self.fichier):
#Suppression du \n
mot = mot[:-1]
noeud = None
i = 1
#On cherche le préfixe du mot le plus grand existant déjà dans la table de hashage
motDecoupe = mot[0:-i]
while(motDecoupe and not noeud):
try:
noeud = self.hash[motDecoupe]
except:
#Le préfixe n'existe pas, on enlève une lettre au préfixe et on réessaye
i += 1
motDecoupe = mot[0:-i]
#Aucun préfixe n'existe, on ajoute le mot en entier
if(not motDecoupe):
racine = self.inserer(racine, mot, "")
#On a trouvé un préfixe, on démarre l'ajout à partir du noeud du préfixe, en ajoutant la partie du mot qui n'existe pas encore
else:
noeud.fm = self.inserer(noeud.fm, mot[-i:], motDecoupe)
return racine
"""
Insertion d'un nœud
noeud : noeud à partir duquel démarrer l'ajout
mot : mot à ajouter (si 'noeud' n'est pas la racine, il ne s'agit pas d'un mot entier)
chemin : chaine représentant le chemin parcouru pour arriver à 'noeud' (vide si noeud est la racine)
"""
def inserer(self, noeud, mot, chemin):
#Le noeud n'existe pas, on le crée et on l'ajoute dans la table de hashage
if noeud is None:
chemin += mot[0]
noeud = Noeud(mot[0], chemin)
self.hash[chemin] = noeud
#On est sur le noeud correspondant à la lettre actuelle
if (mot[0] == noeud.lettre):
#On a ajouté le mot en entier, estMot devient vrai
if (len(mot) == 1):
noeud.estMot = True
#On ajoute la suite du mot
else:
noeud.fm = self.inserer(noeud.fm, mot[1:], chemin)
#On n'est pas sur le noeud correspondant à la lettre actuelle, on continue l'insertion à droite
else:
noeud.fd = self.inserer(noeud.fd, mot, chemin)
return noeud
""" Recherche d'un mot dans l'arbre """
def rechercher(self, mot, noeuds = None):
estMot = False
suivants = []
#Si aucun noeud de départ n'est précisé, on démarre de la racine
if(not noeuds):
noeuds = [self.racine]
#Pour chacun des noeuds à partir desquels lancer la recherche
for noeud in noeuds:
estMotActuel, suivant = self.rechercherMot(noeud, mot)
#Si on trouve au moins un mot, estMot devient le nœud actuel comportant le mot complet
if(estMotActuel is not False):
estMot = estMotActuel
#On complète la liste des noeuds à partir desquels continuer la recherche (avec mot comme préfixe)
suivants += suivant
return estMot, suivants
def rechercherMot(self, noeudA, mot):
estMotM = False
estMotD = False
suivantM = []
suivantD = []
#Si le noeud existe
if(noeudA):
lettre = noeudA.lettre
estMot = noeudA.estMot
fmA = noeudA.fm
fdA = noeudA.fd
#Si le noeud correspond à la lettre actuelle (ou qu'il s'agit d'un joker)
if (mot[0] == '.' or mot[0] == lettre):
#On a trouvé le noeud correspond au mot
if(len(mot) == 1):
#Ce noeud à un fils, on le garde pour démarrer la recherche dessus plus tard
if(fmA):
suivantM.append(fmA)
#Le chemin parcouru correspond à un mot du dictionnaire
if(estMot):
estMotM = noeudA
#On continue la recherche du mot avec la lettre suivante si on n'est pas à la fin
else:
if(fmA):
estMotM, suivantM = self.rechercherMot(fmA, mot[1:])
#Si le noeud ne correspond pas à la lettre actuelle (ou qu'il s'agit d'un joker), on continue la recherche à droite
if (mot[0] == '.' or mot[0] > lettre):
if(fdA):
estMotD, suivantD = self.rechercherMot(fdA, mot)
#On fusionne les deux listes de noeuds (utile uniquement quand mot[0] est un joker)
suivant = suivantM + suivantD
#Si on a trouvé un mot à droite ou au milieu (ou les deux), on récupère le noeud correspondant à ce mot
if(estMotM):
estMot = estMotM
elif(estMotD):
estMot = estMotD
else:
estMot = False
return estMot, suivant
|
cc0-1.0
| 6,933,392,178,072,535,000
| 23.625984
| 129
| 0.667306
| false
| 2.429126
| false
| false
| false
|
llvm/llvm-zorg
|
zorg/buildbot/builders/annotated/util.py
|
1
|
3453
|
from __future__ import print_function
import errno
import os
import re
import shutil
import subprocess
import sys
def clean_dir(path):
"""
Removes directory at path (and all its subdirectories) if it exists,
and creates an empty directory in its place.
"""
try:
rmtree(path)
except OSError as e:
if e.errno != errno.ENOENT:
raise
mkdirp(path)
def cmake_pjoin(*args):
"""
Join paths like safe_pjoin, but replace backslashes with forward
slashes on platforms where they are path separators. This prevents
CMake from choking when trying to decode what it thinks are escape
sequences in filenames.
"""
result = safe_pjoin(*args)
if os.sep == '\\':
return result.replace('\\', '/')
else:
return result
def report(msg):
sys.stderr.write(msg + '\n')
sys.stderr.flush()
def report_run_cmd(cmd, shell=False, *args, **kwargs):
"""
Print a command, then executes it using subprocess.check_call.
"""
report('Running: %s' % ((cmd if shell else shquote_cmd(cmd)),))
sys.stderr.flush()
subprocess.check_call(cmd, shell=shell, *args, **kwargs)
def mkdirp(path):
"""Create directory path if it does not already exist."""
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def rmtree(path):
"""
Remove directory path and all its subdirectories. This differs from
shutil.rmtree() in that it tries to adjust permissions so that deletion
will succeed.
"""
# Some files will not be deletable, so we set permissions that allow
# deletion before we try deleting files.
for root, dirs, files in os.walk(path):
os.chmod(root, 0o755)
for f in files:
p = os.path.join(root, f)
os.chmod(p, 0o644)
os.unlink(p)
# At this point, we should have a tree of deletable directories.
shutil.rmtree(path)
def safe_pjoin(dirname, *args):
"""
Join path components with os.path.join, skipping the first component
if it is None.
"""
if dirname is None:
return os.path.join(*args)
else:
return os.path.join(dirname, *args)
def _shquote_impl(txt, escaped_chars, quoted_chars):
quoted = re.sub(escaped_chars, r'\\\1', txt)
if len(quoted) == len(txt) and not quoted_chars.search(txt):
return txt
else:
return '"' + quoted + '"'
_SHQUOTE_POSIX_ESCAPEDCHARS = re.compile(r'(["`$\\])')
_SHQUOTE_POSIX_QUOTEDCHARS = re.compile('[|&;<>()\' \t\n]')
def shquote_posix(txt):
"""Return txt, appropriately quoted for POSIX shells."""
return _shquote_impl(
txt, _SHQUOTE_POSIX_ESCAPEDCHARS, _SHQUOTE_POSIX_QUOTEDCHARS)
_SHQUOTE_WINDOWS_ESCAPEDCHARS = re.compile(r'(["\\])')
_SHQUOTE_WINDOWS_QUOTEDCHARS = re.compile('[ \t\n]')
def shquote_windows(txt):
"""Return txt, appropriately quoted for Windows's cmd.exe."""
return _shquote_impl(
txt.replace('%', '%%'),
_SHQUOTE_WINDOWS_ESCAPEDCHARS, _SHQUOTE_WINDOWS_QUOTEDCHARS)
def shquote(txt):
"""Return txt, appropriately quoted for use in a shell command."""
if os.name in set(('nt', 'os2', 'ce')):
return shquote_windows(txt)
else:
return shquote_posix(txt)
def shquote_cmd(cmd):
"""Convert a list of shell arguments to an appropriately quoted string."""
return ' '.join(map(shquote, cmd))
|
apache-2.0
| -2,361,367,775,517,901,300
| 25.767442
| 78
| 0.631045
| false
| 3.638567
| false
| false
| false
|
manuvarkey/cmbautomiser
|
cmbautomiser/openpyxl/cell/read_only.py
|
1
|
3984
|
from __future__ import absolute_import
# Copyright (c) 2010-2018 openpyxl
import re
from openpyxl.compat import unicode, long
from openpyxl.cell import Cell
from openpyxl.utils import get_column_letter
from openpyxl.utils.datetime import from_excel
from openpyxl.styles import is_date_format
from openpyxl.styles.numbers import BUILTIN_FORMATS
FLOAT_REGEX = re.compile(r"\.|[E-e]")
def _cast_number(value):
"Convert numbers as string to an int or float"
m = FLOAT_REGEX.search(value)
if m is not None:
return float(value)
return long(value)
class ReadOnlyCell(object):
__slots__ = ('parent', 'row', 'column', '_value', 'data_type', '_style_id')
def __init__(self, sheet, row, column, value, data_type='n', style_id=0):
self.parent = sheet
self._value = None
self.row = row
self.column = column
self.data_type = data_type
self.value = value
self._style_id = style_id
def __eq__(self, other):
for a in self.__slots__:
if getattr(self, a) != getattr(other, a):
return
return True
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "<ReadOnlyCell {0!r}.{1}>".format(self.parent.title, self.coordinate)
@property
def shared_strings(self):
return self.parent.shared_strings
@property
def base_date(self):
return self.parent.base_date
@property
def coordinate(self):
column = get_column_letter(self.column)
return "{1}{0}".format(self.row, column)
@property
def style_array(self):
return self.parent.parent._cell_styles[self._style_id]
@property
def number_format(self):
_id = self.style_array.numFmtId
if _id < 164:
return BUILTIN_FORMATS.get(_id, "General")
else:
return self.parent.parent._number_formats[_id - 164]
@property
def font(self):
_id = self.style_array.fontId
return self.parent.parent._fonts[_id]
@property
def fill(self):
_id = self.style_array.fillId
return self.parent.parent._fills[_id]
@property
def border(self):
_id = self.style_array.borderId
return self.parent.parent._borders[_id]
@property
def alignment(self):
_id = self.style_array.alignmentId
return self.parent.parent._alignments[_id]
@property
def protection(self):
_id = self.style_array.protectionId
return self.parent.parent._protections[_id]
@property
def is_date(self):
return self.data_type == 'n' and is_date_format(self.number_format)
@property
def internal_value(self):
return self._value
@property
def value(self):
if self._value is None:
return
if self.data_type == 'n':
if self.style_array:
if is_date_format(self.number_format):
return from_excel(self._value, self.base_date)
return self._value
if self.data_type == 'b':
return self._value == '1'
elif self.data_type in(Cell.TYPE_INLINE, Cell.TYPE_FORMULA_CACHE_STRING):
return unicode(self._value)
elif self.data_type == 's':
return unicode(self.shared_strings[int(self._value)])
return self._value
@value.setter
def value(self, value):
if self._value is not None:
raise AttributeError("Cell is read only")
if value is None:
self.data_type = 'n'
elif self.data_type == 'n':
value = _cast_number(value)
self._value = value
class EmptyCell(object):
__slots__ = ()
value = None
is_date = False
font = None
border = None
fill = None
number_format = None
alignment = None
data_type = 'n'
def __repr__(self):
return "<EmptyCell>"
EMPTY_CELL = EmptyCell()
|
gpl-3.0
| 7,798,107,740,639,183,000
| 24.703226
| 84
| 0.591616
| false
| 3.702602
| false
| false
| false
|
enobayram/MHFlib
|
MHFPython/scripts/figures.py
|
1
|
3219
|
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 1 11:30:49 2012
@author: eba
"""
from numpy import *
from scipy import *
from matplotlib.pyplot import *
from MHFPython import *
from plotGaussians import *
def rotate(x,y,rot):
return [x*rot[0,0]+y*rot[0,1],x*rot[1,0]+y*rot[1,1]]
theta = arange(-pi,pi,0.01)
r = 1;
limit = 3.;
[x,y] = [r*cos(theta), r*sin(theta)]
rotOrig = array([[1.,0.],[1.,1.]])
[xorig,yorig] = rotate(x,y,rotOrig)
variances = [0.025,1]
stddevs = sqrt(variances)
rotMax = array([[stddevs[0],0],[0,stddevs[1]]])
[xmax,ymax] = rotate(x,y,rotMax)
figure(1)
hold(False)
Orig = plot(xorig,yorig)
hold(True)
Max = plot(xmax,ymax)
ylim([-limit,limit])
xlim([-limit,limit])
grid(True)
legend([Orig,Max],["Original Gaussian","Maximum Component Size"])
title("2D Gaussian to Split")
rotOrigScaled = inv(rotMax).dot(rotOrig)
[xorigs,yorigs] = rotate(x,y,rotOrigScaled)
rotMaxScaled = inv(rotMax).dot(rotMax)
[xmaxs,ymaxs] = rotate(x,y,rotMaxScaled)
figure(2)
hold(False)
OrigS = plot(xorigs,yorigs)
hold(True)
MaxS = plot(xmaxs,ymaxs)
ylim([-limit,limit])
xlim([-limit,limit])
grid(True)
legend([OrigS,MaxS],["Original Gaussian","Maximum Component Size"])
title("Scaled Coordinates")
POrigScaled = rotOrigScaled.dot(rotOrigScaled.transpose());
eigs,rotDecompose = eig(POrigScaled)
rotIndependent = inv(rotDecompose).dot(rotOrigScaled)
[xind,yind] = rotate(x,y,rotIndependent)
figure(3)
hold(False)
OrigI = plot(xind,yind)
hold(True)
MaxI = plot(xmaxs,ymaxs)
ylim([-limit,limit])
xlim([-limit,limit])
grid(True)
legend([OrigI,MaxI],["Original Gaussian","Maximum Component Size"])
table = SplitTable1('tables/kl1e-1table');
plotvar = eigs[0];
#1D plot of the table entries
lim1d = sqrt(plotvar)*4;
x = arange(-lim1d,lim1d,lim1d/500)
#y = 1/sqrt(2*pi*originalVar)*exp(-1/2*(x*x)/originalVar)
y = 1/sqrt(2*pi*plotvar)*exp(-x*x/(2*plotvar))
fig=figure(4)
hold(False)
orig1d = plot(x, y)
hold(True)
y = zeros_like(x)
entry = table.getUpperEntry(plotvar)
entryvar = entry.variance;
varRatio = plotvar/entryvar;
hyp1d = entry.hypotheses
for gh in hyp1d:
var = gh.cov(0)*varRatio;
mean = gh.mean(0)*sqrt(varRatio);
y=1/sqrt(2*pi*var)*exp(-(x-mean)*(x-mean)/(2*var))*gh.weight
components = plot(x, y, color = 'green')
#savefig('figures/split')
legend([OrigI,MaxI],["Original","Components"])
vO = rotOrig.dot(rotOrig.transpose())
original = GaussianHypothesis3();
assign(original.mean,[0,0,0]);
assign(original.cov,[vO[0,0],vO[0,1],0, vO[1,0],vO[1,1],0, 0,0,1]);
original.weight = 1;
variancesMat = MeanMatrix();
assign(variancesMat, [variances[0],variances[1],2]);
result = GH3list();
mhf = MultiHypDist3();
split(original, result, variancesMat, table);
[x,y] = [r*cos(theta), r*sin(theta)]
figure(5)
hold(False)
Orig = plot(xorig,yorig)
hold(True)
for gh in result:
mean = pyArray(gh.mean)
rotGh = cholesky(pyArray(gh.cov))
[xgh,ygh] = rotate(x,y,rotGh[0:2,0:2])
[xghm, yghm] = [xgh+mean[0], ygh+mean[1]]
plot(xghm,yghm, color='green')
ylim([-limit,limit])
xlim([-limit,limit])
legend([OrigI,MaxI],["Original","Components"])
grid(True)
steps = 100
plotGaussian(original,[0,1],limit,steps,6)
plotGaussians(result,[0,1], limit, steps, 7, 0.)
|
bsd-2-clause
| -8,608,220,335,965,278,000
| 21.047945
| 67
| 0.680646
| false
| 2.43679
| false
| false
| false
|
mocnik-science/osm-python-tools
|
OSMPythonTools/internal/cacheObject.py
|
1
|
4281
|
import hashlib
import os
import time
import ujson
import urllib.request
import OSMPythonTools
class CacheObject:
def __init__(self, prefix, endpoint, cacheDir='cache', waitBetweenQueries=None, jsonResult=True):
self._prefix = prefix
self._endpoint = endpoint
self.__cacheDir = cacheDir
self.__waitBetweenQueries = waitBetweenQueries
self.__lastQuery = None
self.__jsonResult = jsonResult
def query(self, *args, onlyCached=False, shallow=False, **kwargs):
queryString, hashString, params = self._queryString(*args, **kwargs)
filename = self.__cacheDir + '/' + self._prefix + '-' + self.__hash(hashString + ('????' + urllib.parse.urlencode(sorted(params.items())) if params else ''))
if not os.path.exists(self.__cacheDir):
os.makedirs(self.__cacheDir)
if os.path.exists(filename):
with open(filename, 'r') as file:
data = ujson.load(file)
elif onlyCached:
OSMPythonTools.logger.error('[' + self._prefix + '] data not cached: ' + queryString)
return None
elif shallow:
data = shallow
else:
OSMPythonTools.logger.warning('[' + self._prefix + '] downloading data: ' + queryString)
if self._waitForReady() == None:
if self.__lastQuery and self.__waitBetweenQueries and time.time() - self.__lastQuery < self.__waitBetweenQueries:
time.sleep(self.__waitBetweenQueries - time.time() + self.__lastQuery)
self.__lastQuery = time.time()
data = self.__query(queryString, params)
with open(filename, 'w') as file:
ujson.dump(data, file)
result = self._rawToResult(data, queryString, params, shallow=shallow)
if not self._isValid(result):
msg = '[' + self._prefix + '] error in result (' + filename + '): ' + queryString
OSMPythonTools.logger.exception(msg)
raise(Exception(msg))
return result
def deleteQueryFromCache(self, *args, **kwargs):
queryString, hashString, params = self._queryString(*args, **kwargs)
filename = self.__cacheDir + '/' + self._prefix + '-' + self.__hash(hashString + ('????' + urllib.parse.urlencode(sorted(params.items())) if params else ''))
if os.path.exists(filename):
OSMPythonTools.logger.info('[' + self._prefix + '] removing cached data: ' + queryString)
os.remove(filename)
def _queryString(self, *args, **kwargs):
raise(NotImplementedError('Subclass should implement _queryString'))
def _queryRequest(self, endpoint, queryString, params={}):
raise(NotImplementedError('Subclass should implement _queryRequest'))
def _rawToResult(self, data, queryString, params, shallow=False):
raise(NotImplementedError('Subclass should implement _rawToResult'))
def _isValid(self, result):
return True
def _waitForReady(self):
return None
def _userAgent(self):
return '%s/%s (%s)' % (OSMPythonTools.pkgName, OSMPythonTools.pkgVersion, OSMPythonTools.pkgUrl)
def __hash(self, x):
h = hashlib.sha1()
h.update(x.encode('utf-8'))
return h.hexdigest()
def __query(self, requestString, params):
request = self._queryRequest(self._endpoint, requestString, params=params)
if not isinstance(request, urllib.request.Request):
request = urllib.request.Request(request)
request.headers['User-Agent'] = self._userAgent()
try:
response = urllib.request.urlopen(request)
except urllib.request.HTTPError as err:
msg = 'The requested data could not be downloaded. ' + str(err)
OSMPythonTools.logger.exception(msg)
raise Exception(msg)
except:
msg = 'The requested data could not be downloaded. Please check whether your internet connection is working.'
OSMPythonTools.logger.exception(msg)
raise Exception(msg)
encoding = response.info().get_content_charset('utf-8')
r = response.read().decode(encoding)
return ujson.loads(r) if self.__jsonResult else r
|
gpl-3.0
| -6,900,467,216,144,160,000
| 44.542553
| 165
| 0.61551
| false
| 4.176585
| false
| false
| false
|
blechta/dolfin-tape
|
dolfintape/hat_function.py
|
1
|
4970
|
# Copyright (C) 2015 Jan Blechta
#
# This file is part of dolfin-tape.
#
# dolfin-tape is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# dolfin-tape is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with dolfin-tape. If not, see <http://www.gnu.org/licenses/>.
from dolfin import Expression, cpp, FiniteElement, jit, \
vertices, facets, Vertex, not_working_in_parallel
__all__ = ['hat_function_collection', 'hat_function', 'hat_function_grad']
def hat_function_collection(vertex_colors, color, element=None):
"""Return Expression on given element which takes values:
1 ... if vertex_colors[node] == color
0 ... at other nodes
This is well defined just on Lagrange 1 element (default) and Dicontinuous
Lagrange 1 element.
NOTE: This expression provides a little hack as it lacks continuity across
MPI partitions boundaries unless vertex_colors is compatible there. In fact,
this behaviour is needed in FluxReconstructor."""
assert isinstance(vertex_colors, cpp.VertexFunctionSizet)
mesh = vertex_colors.mesh()
if not element:
element = FiniteElement('Lagrange', mesh.ufl_cell(), 1)
assert element.family() in ['Lagrange', 'Discontinuous Lagrange']
assert element.degree() == 1
ufc_element, ufc_dofmap = jit(element, mpi_comm=mesh.mpi_comm())
dolfin_element = cpp.FiniteElement(ufc_element)
e = Expression(hats_cpp_code, element=element, domain=mesh)
e.vertex_colors = vertex_colors
e.color = color
e.dolfin_element = dolfin_element
return e
hats_cpp_code="""
class HatFunctionCollection : public Expression
{
public:
std::shared_ptr<VertexFunction<std::size_t> > vertex_colors;
std::size_t color;
std::shared_ptr<FiniteElement> dolfin_element;
HatFunctionCollection() : Expression() { }
void restrict(double* w, const FiniteElement& element,
const Cell& cell,
const double* vertex_coordinates,
const ufc::cell& ufc_cell) const
{
if ( cell.mesh_id() == vertex_colors->mesh()->id()
&& element.hash() == dolfin_element->hash() )
for (VertexIterator v(cell); !v.end(); ++v)
*(w++) = (*vertex_colors)[v->index()] == color ? 1.0 : 0.0;
else
restrict_as_ufc_function(w, element, cell, vertex_coordinates, ufc_cell);
}
};
"""
def hat_function(vertex):
"""Return Expression on Lagrange degree 1 element which is
1 ... at given 'vertex'
0 ... at other vertices
"""
assert isinstance(vertex, Vertex)
mesh = vertex.mesh()
element = FiniteElement('Lagrange', mesh.ufl_cell(), 1)
ufc_element, ufc_dofmap = jit(element, mpi_comm=mesh.mpi_comm())
ufc_element = make_ufc_finite_element(ufc_element)
dolfin_element = cpp.FiniteElement(ufc_element)
e = Expression(hat_cpp_code, element=element, domain=mesh)
e.vertex = vertex
e.dolfin_element = dolfin_element
return e
hat_cpp_code="""
#include <dolfin/mesh/Vertex.h>
namespace dolfin {
class HatFunction : public Expression
{
public:
MeshEntity vertex;
std::shared_ptr<FiniteElement> dolfin_element;
HatFunction() : Expression() { }
void restrict(double* w, const FiniteElement& element,
const Cell& cell,
const double* vertex_coordinates,
const ufc::cell& ufc_cell) const
{
if ( cell.mesh_id() == vertex.mesh_id()
&& element.hash() == dolfin_element->hash() )
for (VertexIterator v(cell); !v.end(); ++v)
*(w++) = *v == vertex ? 1.0 : 0.0;
else
restrict_as_ufc_function(w, element, cell, vertex_coordinates, ufc_cell);
}
};
}
"""
def hat_function_grad(vertex, cell):
"""Compute L^\infty-norm of gradient of hat function on 'cell'
and value 1 in 'vertex'."""
# TODO: fix using ghosted mesh
not_working_in_parallel("function 'hat_function_grad'")
assert vertex in vertices(cell), "vertex not in cell!"
# Find adjacent facet
f = [f for f in facets(cell) if not vertex in vertices(f)]
assert len(f) == 1, "Something strange with adjacent cell!"
f = f[0]
# Get unit normal
n = f.normal()
n /= n.norm()
# Pick some vertex on facet
# FIXME: Is it correct index in parallel?
facet_vertex_0 = Vertex(cell.mesh(), f.entities(0)[0])
# Compute signed distance from vertex to facet plane
d = (facet_vertex_0.point() - vertex.point()).dot(n)
# Return norm of gradient
assert d != 0.0, "Degenerate cell!"
return 1.0/abs(d)
|
gpl-3.0
| -4,066,114,395,403,508,000
| 30.858974
| 80
| 0.663179
| false
| 3.495077
| false
| false
| false
|
Acehaidrey/incubator-airflow
|
tests/cluster_policies/__init__.py
|
1
|
2187
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import Callable, List
from airflow.configuration import conf
from airflow.exceptions import AirflowClusterPolicyViolation
from airflow.models.baseoperator import BaseOperator
# [START example_cluster_policy_rule]
def task_must_have_owners(task: BaseOperator):
if not task.owner or task.owner.lower() == conf.get('operators', 'default_owner'):
raise AirflowClusterPolicyViolation(
f'''Task must have non-None non-default owner. Current value: {task.owner}'''
)
# [END example_cluster_policy_rule]
# [START example_list_of_cluster_policy_rules]
TASK_RULES: List[Callable[[BaseOperator], None]] = [
task_must_have_owners,
]
def _check_task_rules(current_task: BaseOperator):
"""Check task rules for given task."""
notices = []
for rule in TASK_RULES:
try:
rule(current_task)
except AirflowClusterPolicyViolation as ex:
notices.append(str(ex))
if notices:
notices_list = " * " + "\n * ".join(notices)
raise AirflowClusterPolicyViolation(
f"DAG policy violation (DAG ID: {current_task.dag_id}, Path: {current_task.dag.filepath}):\n"
f"Notices:\n"
f"{notices_list}"
)
def cluster_policy(task: BaseOperator):
"""Ensure Tasks have non-default owners."""
_check_task_rules(task)
# [END example_list_of_cluster_policy_rules]
|
apache-2.0
| 1,157,787,360,719,644,000
| 33.171875
| 105
| 0.703704
| false
| 3.857143
| false
| false
| false
|
vondrejc/FFTHomPy
|
tutorials/03_exact_integration_simple.py
|
1
|
4521
|
from __future__ import division, print_function
print("""
Numerical homogenisation based on exact integration, which is described in
J. Vondrejc, Improved guaranteed computable bounds on homogenized properties
of periodic media by FourierGalerkin method with exact integration,
Int. J. Numer. Methods Eng., 2016.
This is a self-contained tutorial implementing scalar problem in dim=2 or dim=3
on a unit periodic cell Y=(-0.5,0.5)**dim
with a square (2D) or cube (3D) inclusion of size 0.6 (side).
The material is identity I in matrix phase and 11*I in inclusion phase.
""")
import numpy as np
import itertools
from scipy.sparse.linalg import cg, LinearOperator
dim = 3 # number of spatial dimensions
N = dim*(5,) # number of discretization points
dN = tuple(2*np.array(N)-1) # double grid value
vec_shape=(dim,)+dN
# indicator function indicating the phase per grid point (square inclusion)
P = dim*(5,) # material resolution in each spatial dimension
phi = np.zeros(P, dtype='float')
if dim==2:
phi[1:4, 1:4] = 1
elif dim==3:
phi[1:4, 1:4, 1:4] = 1
# material coefficients at grid points
C = np.einsum('ij,...->ij...', 11*np.eye(dim), phi)
C += np.einsum('ij,...->ij...', 1*np.eye(dim), 1-phi)
# tensor products / (inverse) Fourier transform / frequencies
dot = lambda A, B: np.einsum('ij...,j...->i...', A, B)
fft = lambda x, N: np.fft.fftshift(np.fft.fftn(np.fft.ifftshift(x), N))/np.prod(np.array(N))
ifft = lambda x, N: np.fft.fftshift(np.fft.ifftn(np.fft.ifftshift(x), N))*np.prod(np.array(N))
freq_fun = lambda N: np.arange(np.fix(-N/2.), np.fix(N/2.+0.5))
freq = [freq_fun(n) for n in dN]
def get_weights(h): # calculation of integral weights of rectangular function
Wphi = np.zeros(dN) # integral weights
for ind in itertools.product(*[range(n) for n in dN]):
Wphi[ind] = np.prod(h)
for ii in range(dim):
Wphi[ind] *= np.sinc(h[ii]*freq[ii][ind[ii]])
return Wphi
def decrease(val, dN): # auxiliary function to remove unnecesary Fourier freq.
dN=np.array(dN)
N=np.array(val.shape[-dN.size:])
ibeg = np.array(np.fix((N-dN+(dN % 2))/2), dtype=np.int)
iend = np.array(np.fix((N+dN+(dN % 2))/2), dtype=np.int)
if dN.size==2:
return val[:,:,ibeg[0]:iend[0],ibeg[1]:iend[1]]
elif dN.size==3:
return val[:,:,ibeg[0]:iend[0],ibeg[1]:iend[1],ibeg[2]:iend[2]]
## GRID-BASED COMPOSITE ######### evaluate the matrix of Galerkin approximation
hC0 = np.prod(np.array(P))*fft(C, P)
if P == dN:
hCex = hC0
elif P > dN:
hCex = decrease(hC0, dN)
elif P < dN:
factor = np.max(np.ceil(np.array(dN) / np.array(P)))
hCper = np.tile(hC0, int(2*factor-1)*np.ones(dim, dtype=np.int))
hCex = decrease(hCper, dN)
Cex = ifft(np.einsum('ij...,...->ij...', hCex, get_weights(1./np.array(P))), dN).real
## INCLUSION-BASED COMPOSITE #### another expression of Cex
Wraw = get_weights(0.6*np.ones(dim))
"""HINT: the size 0.6 corresponds to the size of square inclusion; it is exactly
the size of topology generated by phi, i.e. 3x3 pixels in 5x5 image of PUC with
PUC size 1; then 0.6 = 3./5.
"""
char_square = ifft(Wraw, dN).real
Cex2 = np.einsum('ij...,...->ij...', 11*np.eye(dim), char_square)
Cex2 += np.einsum('ij...,...->ij...', 1*np.eye(dim), 1.-char_square)
## checking that the Cex2 is the same
print('zero check:', np.linalg.norm(Cex-Cex2))
Gamma = np.zeros((dim,dim)+ tuple(dN)) # zero initialize
for i,j in itertools.product(range(dim),repeat=2):
for ind in itertools.product(*[range(int((dN[k]-N[k])/2), int((dN[k]-N[k])/2+N[k])) for k in range(dim)]):
q = np.array([freq[ii][ind[ii]] for ii in range(dim)]) # frequency vector
if not q.dot(q) == 0: # zero freq. -> mean
Gamma[(i,j)+ind] = -(q[i]*q[j])/(q.dot(q))
# - convert to operators
G = lambda X: np.real(ifft(dot(Gamma, fft(X, dN)), dN)).reshape(-1)
A = lambda x: dot(Cex, x.reshape(vec_shape))
GA = lambda x: G(A(x))
# initiate strain/stress (2nd order tensor for each grid point)
X = np.zeros(vec_shape, dtype=np.float)
x = X.reshape(-1)
# macroscopic value
E = np.zeros_like(X); E[0] = 1.
b = -GA(E.reshape(-1))
# iterative solution of the linear system
Alinoper = LinearOperator(shape=(x.size, x.size), matvec=GA, dtype=np.float)
x, info = cg(A=Alinoper, b=b, x0=X.reshape(-1)) # conjugate gradients
state = x.reshape(vec_shape) + E
flux = dot(Cex, state)
AH_11 = np.sum(flux*state)/np.prod(np.array(dN)) # homogenised properties
print('homogenised coefficient (component 11) =', AH_11)
print('END')
|
mit
| -5,727,445,794,444,432,000
| 38.657895
| 110
| 0.652068
| false
| 2.673566
| false
| false
| false
|
srio/shadow3-scripts
|
script1_ID26.py
|
1
|
12878
|
import numpy
from orangecontrib.xoppy.util.xoppy_undulators import xoppy_calc_undulator_power_density, xoppy_calc_undulator_spectrum
from orangecontrib.xoppy.util.xoppy_xraylib_util import xpower_calc
from orangecontrib.xoppy.util.fit_gaussian2d import fit_gaussian2d, info_params, twoD_Gaussian
from srxraylib.plot.gol import plot, plot_image
import scipy.constants as codata
def calculate_line(photon_energy,undulator_period,N,K,thickness_diamond_mm,distance,slit_h,slit_v,coating,incident_angle_mrad,
do_plot=False):
print("######################### INPUTS ###################################")
print("photon_energy=",photon_energy)
print("undulator_period=",undulator_period)
print("N=",N)
print("K=",K)
print("thickness_diamond_mm=",thickness_diamond_mm)
print("distance=",distance)
print("slit_h=",slit_h)
print("coating=",coating)
print("incident_angle_mrad=",incident_angle_mrad)
print("#######################################################################")
out_dictionary = {}
#
# Spectrum simulation
#
#ULATTICEFILE S28D.mat
#UEPSILONX 1.3166e-10
#UEPSILONY 5e-12
#BETAX = 6.89997
#BETAY = 2.6447
SIGMAX = 30.1836 * 1e-6
SIGMAY = 3.63641 * 1e-6
SIGMAXP = 4.36821 * 1e-6
SIGMAYP = 1.37498 * 1e-6
METHOD = 2 # US=0 URGENT=1 SRW=2
print("\n\n Computing spectrum \n\n")
e, f, spectral_power, cumulated_power = \
xoppy_calc_undulator_spectrum(ELECTRONENERGY=6.0,ELECTRONENERGYSPREAD=0.001,ELECTRONCURRENT=0.2,\
ELECTRONBEAMSIZEH=SIGMAX,ELECTRONBEAMSIZEV=SIGMAY,\
ELECTRONBEAMDIVERGENCEH=SIGMAXP,ELECTRONBEAMDIVERGENCEV=SIGMAYP,\
PERIODID=undulator_period,NPERIODS=N,KV=K,DISTANCE=distance,GAPH=slit_h,GAPV=slit_v,\
PHOTONENERGYMIN=1000.0,PHOTONENERGYMAX=100000.0,PHOTONENERGYPOINTS=5000,METHOD=2,
USEEMITTANCES=1)
power_in_spectrum = f.sum()*1e3*codata.e*(e[1]-e[0])
out_dictionary["power_in_spectrum"] = power_in_spectrum
if do_plot:
plot(e,spectral_power,title="E = %d keV"%photon_energy)
#
# optical system
#
# """
# Apply reflectivities/transmittivities of optical elements on a source spectrum
#
# :param energies: the array with photon energies in eV
# :param source: the spectral intensity or spectral power
# :param substance: a list with descriptors of each optical element material
# :param flags: a list with 0 (filter or attenuator) or 1 (mirror) for all optical elements
# :param dens: a list with densities of o.e. materials. "?" is accepted for looking in the database
# :param thick: a list with the thickness in mm for all o.e.'s. Only applicable for filters
# :param angle: a list with the grazing angles in mrad for all o.e.'s. Only applicable for mirrors
# :param roughness:a list with the roughness RMS in A for all o.e.'s. Only applicable for mirrors
# :param output_file: name of the output file (default=None, no output file)
# :return: a dictionary with the results
# """
optical_system_dictionary = xpower_calc(energies=e,source=spectral_power,
substance=["C",coating,coating],flags=[0,1,1],dens=[3.53,2.33,2.33],
thick=[thickness_diamond_mm,1,1],
angle=[0,incident_angle_mrad,incident_angle_mrad],roughness=[0,0,0],
output_file=None)
for key in optical_system_dictionary.keys():
print(key)
print(optical_system_dictionary["info"])
for i,ilabel in enumerate(optical_system_dictionary["labels"]):
print(i,ilabel)
# 0 Photon Energy [eV]
# 1 Source
# 2 [oe 1] Total CS cm2/g
# 3 [oe 1] Mu cm^-1
# 4 [oe 1] Transmitivity
# 5 [oe 1] Absorption
# 6 Intensity after oe #1
# 7 [oe 2] 1-Re[n]=delta
# 8 [oe 2] Im[n]=beta
# 9 [oe 2] delta/beta
# 10 [oe 2] Reflectivity-s
# 11 [oe 2] Transmitivity
# 12 Intensity after oe #2
# 13 [oe 3] 1-Re[n]=delta
# 14 [oe 3] Im[n]=beta
# 15 [oe 3] delta/beta
# 16 [oe 3] Reflectivity-s
# 17 [oe 3] Transmitivity
# 18 Intensity after oe #3
print(optical_system_dictionary["data"].shape)
# I would be interested in:
#
# - Total Power [W] emitted in the slit aperture: power_in_spectrum
#
# - Absorbed Power [W] by Diamond Window: integral of col6-col1
#
# - Absorbed Power [W] for 1rst and 2nd mirrors: : integral of col112-col6 and integral of col18-col12
#
# - Fitted parameters from the power density distribution calculated in a 5*5 mm slit aperture:
#
# - Maximum value [W/mm2]
#
# - Gaussian Fit parameters for both axis: FWHM [mm]
I0 = numpy.trapz( optical_system_dictionary["data"][1,:], x=e, axis=-1)
I1 = numpy.trapz( optical_system_dictionary["data"][6,:], x=e, axis=-1)
I2 = numpy.trapz( optical_system_dictionary["data"][12,:], x=e, axis=-1)
I3 = numpy.trapz( optical_system_dictionary["data"][18,:], x=e, axis=-1)
print("Source power: ",I0)
print(" after diamond: ",I1)
print(" after M1: ",I2)
print(" after M2: ",I3)
out_dictionary["diamond_absorbed"] = I0-I1
out_dictionary["m1_absorbed"] = I1-I2
out_dictionary["m2_absorbed"] = I2-I3
#
# power density
#
h, v, p, code = xoppy_calc_undulator_power_density(ELECTRONENERGY=6.0,ELECTRONENERGYSPREAD=0.001,ELECTRONCURRENT=0.2,\
ELECTRONBEAMSIZEH=SIGMAX,ELECTRONBEAMSIZEV=SIGMAY,\
ELECTRONBEAMDIVERGENCEH=SIGMAXP,ELECTRONBEAMDIVERGENCEV=SIGMAYP,\
PERIODID=undulator_period,NPERIODS=N,KV=K,DISTANCE=distance,GAPH=5e-3,GAPV=5e-3,\
HSLITPOINTS=101,VSLITPOINTS=101,METHOD=2,USEEMITTANCES=1)
if do_plot:
plot_image(p,h,v,title="power density E = %d keV"%photon_energy)
#
# fit power density
#
print("============= Fitting power density to a 2D Gaussian. ==============\n")
print("Please use these results with care: check if the original data looks like a Gaussian.")
fit_parameters = fit_gaussian2d(p,h,v)
print(info_params(fit_parameters))
H,V = numpy.meshgrid(h,v)
data_fitted = twoD_Gaussian( (H,V), *fit_parameters)
power_in_spectrum = p.sum()*(h[1]-h[0])*(v[1]-v[0])
print(" Total power in the calculated data [W]: ",power_in_spectrum)
power_in_spectrum_fit = data_fitted.sum()*(h[1]-h[0])*(v[1]-v[0])
print(" Total power in the fitted data [W]: ",power_in_spectrum_fit)
# plot_image(data_fitted.reshape((h.size,v.size)),h, v,title="FIT")
print("====================================================\n")
if do_plot:
data_fitted.shape = (h.size,v.size)
plot_image(data_fitted,h,v,title="FITTED power density E = %d keV"%photon_energy)
out_dictionary["fit_parameters"] = fit_parameters
out_dictionary["fit_percent_difference"] = 100 * (power_in_spectrum_fit - power_in_spectrum) / power_in_spectrum
return out_dictionary
if __name__ == "__main__":
Energy_keV = [ 2.043 , 2.44 , 2.44 , 4 , 4 , 5.9 , 5.9 , 5.9 , 10 , 10 , 15 , 24 , 24 , 30 ]
#Undulator = [ U35 , U35 , U35 , U35 , U35 , U35 ,U35 , U35, U35 , U35, U35, U35, U35, U35 ]
lambda0_cm = [ 3.5 , 3.5 , 3.5 , 3.5 , 3.5 , 3.5 ,3.5 , 3.5, 3.5 , 3.5, 3.5, 3.5, 3.5, 3.5 ]
N = [ 47 , 47 , 47 , 47 , 47 , 47 ,47 , 47, 47 , 47, 47, 47, 47, 47 ]
K = [ 2.75 , 2.45 , 2.45 , 1.67 , 1.67 , 1.12 ,1.12 , 1.12 , 1.94 , 1.94 , 1.36 , 1.41 , 1.41 , 1.09 ]
Diamond_window_thickness_mm = [ 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0, 0.0 , 0.0 ]
Distance_from_source_m = [ 29.2 , 29.2 , 29.2 , 29.2 , 29.2 , 29.2 , 29.2 , 29.2, 29.2 , 29.2, 29.2, 29.2, 29.2, 29.2 ]
H_mm = [ 2.8 , 2.8 , 2.2 , 2.2 , 1.4 , 2.00 , 1.4 , 1.00, 1.00 , 1.00, 1.00, 1.00, 1.00, 1.00 ]
V_mm = [ 0.875 , 0.801 , 0.801 , 0.628 , 0.628 , 0.515 , 0.515 , 0.515, 0.403 , 0.403, 0.333, 0.268, 0.268, 0.243 ]
Coating = [ "Si" , "Cr" , "Si" , "Cr" , "Si" , "Cr" , "Cr" , "Si" , "Si" , "Pd" , "Pd" , "Pt", "Pd" , "Pt"]
Incident_angle_mrad = [ 7 , 7 , 5.5 , 5.5 , 3.5 , 5 , 3.5 , 2.5, 2.5 , 2.5, 2.5, 2.5, 2.5, 2.5 ]
#
# calculation loop
#
out_dictionaries = []
for i,photon_energy in enumerate(Energy_keV):
out_dictionary = calculate_line(photon_energy,1e-2*lambda0_cm[i],N[i],K[i],Diamond_window_thickness_mm[i],
Distance_from_source_m[i],1e-3*H_mm[i],1e-3*V_mm[i],Coating[i],Incident_angle_mrad[i],
do_plot=False)
out_dictionaries.append(out_dictionary)
#
# prepare text output
#
text_output = ""
titles = ["energy_kev","power_in_spectrum","diamond_absorbed","m1_absorbed","m2_absorbed"]
text_output += (" %20s %20s %20s %20s %20s \n")%(tuple(titles))
for i in range(len(out_dictionaries)):
text_output += ("%20d %20.3f %20.3f %20.3f %20.3f \n")%( Energy_keV[i],
out_dictionaries[i]["power_in_spectrum"],
out_dictionaries[i]["diamond_absorbed"],
out_dictionaries[i]["m1_absorbed"],
out_dictionaries[i]["m2_absorbed"])
text_fit = ""
titles_fit = ["energy_kev","Height A: ","center x0:","center y0","sigmax","sigmay","angle","offset","fit difference"]
text_fit += ("%20s %20s %20s %20s %20s %20s %20s %20s %20s\n")%(tuple(titles_fit))
for i in range(len(out_dictionaries)):
text_fit += ("%20.3f %20.3f %20.3f %20.3f %20.3f %20.3f %20.3f %20.3f %20.3f \n")%(
Energy_keV[i],
out_dictionaries[i]["fit_parameters"][0],
out_dictionaries[i]["fit_parameters"][1],
out_dictionaries[i]["fit_parameters"][2],
out_dictionaries[i]["fit_parameters"][3],
out_dictionaries[i]["fit_parameters"][4],
out_dictionaries[i]["fit_parameters"][5],
out_dictionaries[i]["fit_parameters"][6],
out_dictionaries[i]["fit_percent_difference"])
text_full = ""
titles = ["energy_kev","power_in_spectrum","diamond_absorbed","m1_absorbed","m2_absorbed","Height A: ","sigmax","sigmay","offset","fit difference"]
text_full += (" %20s %20s %20s %20s %20s %20s %20s %20s %20s %20s \n")%(tuple(titles))
for i in range(len(out_dictionaries)):
text_full += ("%20.3f %20.3f %20.3f %20.3f %20.3f %20.3f %20.3f %20.3f %20.3f %20.3f \n")%( Energy_keV[i],
out_dictionaries[i]["power_in_spectrum"],
out_dictionaries[i]["diamond_absorbed"],
out_dictionaries[i]["m1_absorbed"],
out_dictionaries[i]["m2_absorbed"],
out_dictionaries[i]["fit_parameters"][0],
out_dictionaries[i]["fit_parameters"][3],
out_dictionaries[i]["fit_parameters"][4],
out_dictionaries[i]["fit_parameters"][6],
out_dictionaries[i]["fit_percent_difference"]
)
print(text_output)
print(text_fit)
print(text_full)
#
# dump to file
#
f = open("script1_ID26_v1.txt",'w')
#f.write(text_output)
#f.write("\n\n\n")
#f.write(text_fit)
#f.write("\n\n\n")
f.write(text_full)
f.close()
print("File written to disk: script1_ID26_v1.txt")
|
mit
| -5,591,580,236,130,774,000
| 44.34507
| 178
| 0.516773
| false
| 3.136386
| false
| false
| false
|
LxiaoGirl/sqlmapTamper
|
unicodetobypasswaf.py
|
1
|
1384
|
#!/usr/bin/env python
"""
Copyright (c) 2015 @xiaoL (http://xlixli.net/)
"""
import os
import string
from lib.core.enums import PRIORITY
from lib.core.common import singleTimeWarnMessage
__priority__ = PRIORITY.LOWEST
def dependencies():
singleTimeWarnMessage("tamper script '%s' is only meant to be run against WAF on IIS")
def tamper(payload, **kwargs):
"""
IIS Unicode-url-encodes
WideChar To MultiByte bypass weak web application firewalls
Requirement:
* IIS
Tested against:
* WAF
Reference:
* http://blog.sina.com.cn/s/blog_85e506df0102vo9s.html
Notes:
* Useful to bypass weak web application firewalls
tamper('SELECT FIELD%20FROM TABLE')
'S%u00F0L%u00F0C%u00DE FI%u00F0L%u00D0%20FR%u00BAM %u00DE%u00AABL%u00F0'
"""
change_char = {'1': 'B9', '2': 'B2', '3': 'B3', 'D': 'D0',
'T': 'DE', 'Y': 'DD', 'a': 'AA', 'e': 'F0',
'o': 'BA', 't': 'FE', 'y': 'FD', '|': 'A6',
'd': 'D0', 'A': 'AA', 'E': 'F0', 'O': 'BA'}
ret_val = payload
if payload:
ret_val = ""
i = 0
while i < len(payload):
if payload[i] in change_char.keys():
ret_val += "%%u00%s" % change_char.get(payload[i])
else:
ret_val += payload[i]
i += 1
return ret_val
|
gpl-2.0
| 6,264,521,854,367,625,000
| 23.280702
| 90
| 0.537572
| false
| 2.995671
| false
| false
| false
|
kebarr/Geist
|
geist/backends/windows.py
|
1
|
14357
|
from __future__ import division, absolute_import, print_function
import numpy
import subprocess
import shlex
import ctypes
from ctypes import (
byref,
WINFUNCTYPE,
c_ubyte,
sizeof,
POINTER,
Structure,
)
from ctypes.wintypes import (
POINT,
RECT,
DWORD,
LPARAM,
HWND,
BOOL,
WCHAR,
LONG,
WORD
)
from geist.finders import Location, LocationList
from ._common import BackendActionBuilder
from . import logger
class _ActionsTransaction(object):
def __init__(self, backend):
self._actions_builder = BackendActionBuilder(backend)
def __enter__(self):
return self._actions_builder
def __exit__(self, *args):
self._actions_builder.execute()
return False
_USER32 = ctypes.windll.USER32
_GDI32 = ctypes.windll.GDI32
class _RGBQUAD(Structure):
_fields_ = [
('rgbBlue', c_ubyte),
('rgbGreen', c_ubyte),
('rgbRed', c_ubyte),
('rgbReserved', c_ubyte),
]
class _BITMAPINFOHEADER(Structure):
_fields_ = [
('biSize', DWORD),
('biWidth', LONG),
('biHeight', LONG),
('biPlanes', WORD),
('biBitCount', WORD),
('biCompression', WORD),
('biSizeImage', DWORD),
('biXPelsPerMeter', LONG),
('biYPelsPerMeter', LONG),
('biClrUsed', DWORD),
('biClrImportant', DWORD)
]
class _BITMAPINFO(Structure):
_fields_ = [
('bmiHeader', _BITMAPINFOHEADER),
('bmiColors', (_RGBQUAD * 1))
]
_DIB_RGB_COLORS = 0
class GeistWindowsBackend(object):
SRCCOPY = 0xCC0020
SM_CXVIRTUALSCREEN, SM_CYVIRTUALSCREEN = 78, 79
BITSPIXEL = 12
def __init__(self, **kwargs):
self._mouse = _Mouse()
self._keyboard = _KeyBoard()
logger.info("Created GeistWindowsBackend")
def create_process(self, command):
dev_null = open('NUL', 'w')
process = subprocess.Popen(
shlex.split(command), stdout=dev_null, stderr=subprocess.STDOUT
)
return Process(process.pid)
def actions_transaction(self):
return _ActionsTransaction(self)
def capture_locations(self):
hwnd = _USER32.GetDesktopWindow()
width, height = (
_USER32.GetSystemMetrics(GeistWindowsBackend.SM_CXVIRTUALSCREEN),
_USER32.GetSystemMetrics(GeistWindowsBackend.SM_CYVIRTUALSCREEN)
)
desktop_dc = _USER32.GetWindowDC(hwnd)
capture_dc = _GDI32.CreateCompatibleDC(desktop_dc)
# Check that the screen has bit depth of 32
bits = _GDI32.GetDeviceCaps(desktop_dc, GeistWindowsBackend.BITSPIXEL)
if bits != 32:
raise NotImplementedError(
"Geist only supports displays with a bit depth of 32 (%d)"
% bits)
bmp = _GDI32.CreateCompatibleBitmap(desktop_dc, width, height)
_GDI32.SelectObject(capture_dc, bmp)
_GDI32.BitBlt(
capture_dc, 0, 0, width, height, desktop_dc, 0, 0,
GeistWindowsBackend.SRCCOPY
)
bmp_info = _BITMAPINFO()
bmp_info.bmiHeader.biSize = sizeof(bmp_info)
bmp_info.bmiHeader.biPlanes = 1
bmp_info.bmiHeader.biBitCount = 32
bmp_info.bmiHeader.biWidth = width
bmp_info.bmiHeader.biHeight = -height
memarray = numpy.ndarray((height, width), dtype='4B')
_GDI32.GetDIBits(
capture_dc,
bmp,
0,
height,
memarray.ctypes.data_as(POINTER(c_ubyte)),
byref(bmp_info),
_DIB_RGB_COLORS
)
_GDI32.DeleteObject(bmp)
_GDI32.DeleteDC(capture_dc)
_GDI32.DeleteDC(desktop_dc)
#strip alpha and reverse bgr to rgb
image = memarray[:, :, 2::-1]
return LocationList([Location(0, 0, width, height, image=image)])
def key_down(self, name):
self._keyboard.key_down(name)
def key_up(self, name):
self._keyboard.key_up(name)
def button_down(self, button_num):
self._mouse.button_down(button_num)
def button_up(self, button_num):
self._mouse.button_up(button_num)
def move(self, point):
self._mouse.move(point)
def cursor_position(self):
return self._mouse.cursor_position()
def close(self):
pass
def __del__(self):
self.close()
class _KeyBoard(object):
KEYEVENTF_KEYUP = 0x0002
KEYEVENTF_KEYDOWN = 0x0000
NAME_TO_VK_MAP = {
'page down': 0x22,
'page up': 0x21,
'end': 0x23,
'home': 0x24,
'shift': 0x10,
'menu': 0x12,
'control': 0x11,
'down': 0x28,
'up': 0x26,
'left': 0x25,
'right': 0x27,
'lshift': 0xA0,
'rshift': 0xA1,
'escape': 0x1B,
}
NAME_TO_CHAR_MAP = {
'return': '\r',
'space': ' ',
'tab': '\t',
'period': '.',
'minus': '-',
'colon': ':',
'backslash': '\\',
'underscore': '_',
'exclam': '!',
'fslash': '/',
'greaterthan':'>',
}
def _convert_keyname_to_virtual_key(self, name):
if name in _KeyBoard.NAME_TO_VK_MAP:
return _KeyBoard.NAME_TO_VK_MAP[name]
elif name in _KeyBoard.NAME_TO_CHAR_MAP:
char = _KeyBoard.NAME_TO_CHAR_MAP[name]
else:
char = name
assert len(char) == 1, "can not convert %r" % (char,)
return _USER32.VkKeyScanW(WCHAR(char)) & 0xFF
def _map_virtual_key(self, key):
return _USER32.MapVirtualKeyA(key & 0xff, 0)
def key_down(self, name):
vkey = self._convert_keyname_to_virtual_key(name)
scan = self._map_virtual_key(vkey)
_USER32.keybd_event(vkey, scan, _KeyBoard.KEYEVENTF_KEYDOWN, None)
def key_up(self, name):
vkey = self._convert_keyname_to_virtual_key(name)
scan = self._map_virtual_key(vkey)
_USER32.keybd_event(vkey, scan, _KeyBoard.KEYEVENTF_KEYUP, None)
class _Mouse(object):
MOUSEEVENTF_MOVE = 0x0001
MOUSEEVENTF_LEFTDOWN = 0x0002
MOUSEEVENTF_LEFTUP = 0x0004
MOUSEEVENTF_RIGHTDOWN = 0x0008
MOUSEEVENTF_RIGHTUP = 0x0010
MOUSEEVENTF_MIDDLEDOWN = 0x0020
MOUSEEVENTF_MIDDLEUP = 0x0040
MOUSEEVENTF_WHEEL = 0x0800
MOUSEEVENTF_VIRTUALDESK = 0x4000
MOUSEEVENTF_ABSOLUTE = 0x8000
SPI_SETMOUSE = 0x0004
SPI_SETMOUSESPEED = 0x0071
SM_CXSCREEN = 0
SM_CYSCREEN = 1
LEFT_BUTTON, MIDDLE_BUTTON, RIGHT_BUTTON = [1, 2, 3]
def _normalize_coords(self, point):
norm = 65535
x, y = point
w = _USER32.GetSystemMetrics(_Mouse.SM_CXSCREEN)
h = _USER32.GetSystemMetrics(_Mouse.SM_CYSCREEN)
return (int(x * (norm / w)), int(y * (norm/h)))
def move(self, point):
_USER32.SetCursorPos(*point)
def cursor_position(self):
point = POINT()
_USER32.GetCursorPos(byref(point))
return point.x, point.y
def scroll(lines):
_USER32.mouse_event(
_Mouse.MOUSEEVENTF_WHEEL,
0,
0,
int(120 * lines),
None
)
def button_down(self, button):
_USER32.mouse_event(self._map_button_down(button), 0, 0, 0, None)
def button_up(self, button):
_USER32.mouse_event(self._map_button_up(button), 0, 0, 0, None)
def _map_button_down(self, button):
assert button in [
_Mouse.LEFT_BUTTON,
_Mouse.MIDDLE_BUTTON,
_Mouse.RIGHT_BUTTON
]
return [
_Mouse.MOUSEEVENTF_LEFTDOWN,
_Mouse.MOUSEEVENTF_MIDDLEDOWN,
_Mouse.MOUSEEVENTF_RIGHTDOWN
][button - 1]
def _map_button_up(self, button):
assert button in [
_Mouse.LEFT_BUTTON,
_Mouse.MIDDLE_BUTTON,
_Mouse.RIGHT_BUTTON
]
return [
_Mouse.MOUSEEVENTF_LEFTUP,
_Mouse.MOUSEEVENTF_MIDDLEUP,
_Mouse.MOUSEEVENTF_RIGHTUP
][button - 1]
_EnumWindowsProc = WINFUNCTYPE(BOOL, HWND, LPARAM)
def hwnd_to_pid(hwnd):
pid = DWORD()
_USER32.GetWindowThreadProcessId(int(hwnd), byref(pid))
return pid.value
class Process(object):
def __init__(self, pid):
self.__pid = int(pid)
@property
def pid(self):
return self.__pid
def destroy(self):
subprocess.call(
'taskkill /F /T /PID %d' % (self.pid),
shell=True
)
def get_window(self):
found_hwnd = []
def callback(hwnd, data):
found_pid = hwnd_to_pid(hwnd)
if found_pid == self.pid:
found_hwnd.append(hwnd)
return False
return True
_USER32.EnumWindows(_EnumWindowsProc(callback), 0)
if found_hwnd:
return Window(found_hwnd[0]).get_root()
return None
def get_all_windows():
windows = []
def callback(hwnd, data):
windows.append(Window(hwnd))
return True
_USER32.EnumDesktopWindows(None, _EnumWindowsProc(callback), 0)
return windows
def get_window_at(x, y):
point = POINT()
point.x, point.y = x, y
hwnd = ctypes.windll.user32.WindowFromPoint(point)
if not hwnd:
return None
else:
return Window(hwnd)
class Window(object):
SWP_NOOWNERZORDER = 0x0200
SWP_NOSENDCHANGING = 0x0400
SW_SHOWMAXIMIZED = 3
SW_SHOWMINIMIZED = 2
BITSPIXEL = 12
def __init__(self, hwnd):
self.__hwnd = int(hwnd)
def _rect(self):
rect = RECT()
_USER32.GetWindowRect(self.__hwnd, byref(rect))
return (
rect.left,
rect.top,
(rect.right - rect.left),
(rect.bottom - rect.top),
)
def set_position(self, rect):
x, y, w, h = rect
_USER32.SetWindowPos(
self.__hwnd,
0,
x,
y,
w,
h,
Window.SWP_NOSENDCHANGING | Window.SWP_NOOWNERZORDER
)
@property
def title(self):
max_len = 128
text = (WCHAR*max_len)()
_USER32.GetWindowTextW(self.__hwnd, text, max_len)
return text.value
@property
def classname(self):
max_len = 128
text = (WCHAR*max_len)()
_USER32.GetClassNameW(self.__hwnd, text, max_len)
return text.value
@property
def visible(self):
return bool(_USER32.IsWindowVisible(self.__hwnd))
def switch_to(self):
_USER32.SwitchToThisWindow(self.__hwnd, False)
def maximise(self):
"""Maximise the window and return True if previously visible"""
return bool(_USER32.ShowWindow(self.__hwnd, Window.SW_SHOWMAXIMIZED))
def minimise(self):
"""Minimise the window and return True if previously visible"""
return bool(_USER32.ShowWindow(self.__hwnd, Window.SW_SHOWMINIMIZED))
def get_process(self):
return Process(hwnd_to_pid(self.__hwnd))
def get_root(self):
hwnd = self.__hwnd
while _USER32.GetParent(hwnd):
hwnd = _USER32.GetParent(hwnd)
if hwnd == self.__hwnd:
return self
else:
return Window(hwnd)
def __hash__(self):
return self.__hwnd
def __eq__(self, other):
try:
return self.__hwnd == other.__hwnd
except:
return False
def capture_locations(self):
x, y, width, height = self._rect()
window_dc = _USER32.GetWindowDC(self.__hwnd)
capture_dc = _GDI32.CreateCompatibleDC(window_dc)
# Check that the screen has bit depth of 32
bits = _GDI32.GetDeviceCaps(window_dc, Window.BITSPIXEL)
if bits != 32:
raise NotImplementedError(
"Geist only supports displays with a bit depth of 32 (%d)"
% bits)
bmp = _GDI32.CreateCompatibleBitmap(window_dc, width, height)
_GDI32.SelectObject(capture_dc, bmp)
_USER32.PrintWindow(self.__hwnd, capture_dc, 0)
bmp_info = _BITMAPINFO()
bmp_info.bmiHeader.biSize = sizeof(bmp_info)
bmp_info.bmiHeader.biPlanes = 1
bmp_info.bmiHeader.biBitCount = 32
bmp_info.bmiHeader.biWidth = width
bmp_info.bmiHeader.biHeight = -height
memarray = numpy.ndarray((height, width), dtype='4B')
_GDI32.GetDIBits(
capture_dc,
bmp,
0,
height,
memarray.ctypes.data_as(POINTER(c_ubyte)),
byref(bmp_info),
_DIB_RGB_COLORS
)
_GDI32.DeleteObject(bmp)
_GDI32.DeleteDC(capture_dc)
_GDI32.DeleteDC(window_dc)
#strip alpha and reverse bgr to rgb
image = memarray[:, :, 2::-1]
return LocationList([Location(x, y, width, height, image=image)])
def get_child_window_at(self, x, y):
point = POINT()
point.x, point.y = x, y
child_hwnd = ctypes.windll.user32.RealChildWindowFromPoint(
self.__hwnd,
point
)
if not child_hwnd:
return None
else:
return Window(child_hwnd)
def client_area_rect(self):
client_rect = RECT()
win_rect = RECT()
offset = POINT()
_USER32.GetClientRect(self.__hwnd, byref(client_rect))
_USER32.GetWindowRect(self.__hwnd, byref(win_rect))
_USER32.ClientToScreen(self.__hwnd, byref(offset))
x = offset.x - win_rect.left
y = offset.y - win_rect.top
w = client_rect.right
h = client_rect.bottom
return x, y, w, h
def __repr__(self):
return "Window(hwnd=%r, title=%r, classname=%r)" % (
self.__hwnd, self.title, self.classname
)
|
mit
| -8,654,186,419,630,648,000
| 25.451243
| 78
| 0.543498
| false
| 3.578514
| false
| false
| false
|
brentd-smith/smolkinsite
|
songs/migrations/0001_initial.py
|
1
|
4209
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-04-13 19:46
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='BookName',
fields=[
('name', models.CharField(max_length=64, primary_key=True, serialize=False)),
('display', models.CharField(max_length=64)),
('seq_number', models.PositiveSmallIntegerField()),
],
),
migrations.CreateModel(
name='HaftarahReading',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('extension', models.CharField(choices=[('mp3', 'mp3'), ('pdf', 'pdf'), ('jpg', 'jpg')], default='mp3', max_length=3)),
('s3_obj_key', models.CharField(max_length=2048)),
('seq_number', models.PositiveSmallIntegerField()),
('file_name', models.CharField(max_length=128)),
],
),
migrations.CreateModel(
name='ParshaName',
fields=[
('name', models.CharField(max_length=64, primary_key=True, serialize=False)),
('display', models.CharField(max_length=64)),
('seq_number', models.PositiveSmallIntegerField()),
('book_name', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='songs.BookName')),
],
),
migrations.CreateModel(
name='ServiceName',
fields=[
('name', models.CharField(max_length=64, primary_key=True, serialize=False)),
('display', models.CharField(max_length=64)),
('seq_number', models.PositiveSmallIntegerField()),
],
),
migrations.CreateModel(
name='Song',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128)),
('display', models.CharField(max_length=128)),
('s3_obj_key', models.CharField(max_length=2048)),
('extension', models.CharField(choices=[('mp3', 'mp3'), ('pdf', 'pdf'), ('jpg', 'jpg')], default='mp3', max_length=3)),
('page_number', models.PositiveSmallIntegerField()),
('seq_number', models.PositiveSmallIntegerField()),
('file_name', models.CharField(max_length=128)),
('service_name', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='songs.ServiceName')),
],
),
migrations.CreateModel(
name='TorahReading',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('triennial', models.CharField(choices=[(None, None), ('1st', '1st Triennial'), ('2nd', '2nd Triennial'), ('3rd', '3rd Triennial')], default='1st', max_length=3)),
('aliyah', models.CharField(choices=[(None, None), ('1st', '1st Aliyah'), ('2nd', '2nd Aliyah'), ('3rd', '3rd Aliyah'), ('4th', '4th Aliyah'), ('5th', '5th Aliyah'), ('6th', '6th Aliyah'), ('7th', '7th Aliyah'), ('Maftir', 'Maftir')], default='1st', max_length=6)),
('extension', models.CharField(choices=[('mp3', 'mp3'), ('pdf', 'pdf'), ('jpg', 'jpg')], default='mp3', max_length=3)),
('s3_obj_key', models.CharField(max_length=2048)),
('seq_number', models.PositiveSmallIntegerField()),
('file_name', models.CharField(max_length=128)),
('parsha', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='songs.ParshaName')),
],
),
migrations.AddField(
model_name='haftarahreading',
name='parsha',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='songs.ParshaName'),
),
]
|
gpl-3.0
| 6,483,843,645,224,168,000
| 49.107143
| 281
| 0.552388
| false
| 4.000951
| false
| false
| false
|
pkgw/pwkit
|
pwkit/bblocks.py
|
1
|
16546
|
# -*- mode: python; coding: utf-8 -*-
# Copyright 2014 Peter Williams <peter@newton.cx> and collaborators.
# Licensed under the MIT License.
"""pwkit.bblocks - Bayesian Blocks analysis, with a few extensions.
Bayesian Blocks analysis for the "time tagged" case described by Scargle+
2013. Inspired by the bayesian_blocks implementation by Jake Vanderplas in the
AstroML package, but that turned out to have some limitations.
We have iterative determination of the best number of blocks (using an ad-hoc
routine described in Scargle+ 2013) and bootstrap-based determination of
uncertainties on the block heights (ditto).
Functions are:
:func:`bin_bblock`
Bayesian Blocks analysis with counts and bins.
:func:`tt_bblock`
BB analysis of time-tagged events.
:func:`bs_tt_bblock`
Like :func:`tt_bblock` with bootstrap-based uncertainty assessment. NOTE:
the uncertainties are not very reliable!
"""
from __future__ import absolute_import, division, print_function, unicode_literals
__all__ = str ('nlogn bin_bblock tt_bblock bs_tt_bblock').split ()
from six.moves import range
import numpy as np
from . import Holder
def nlogn (n, dt):
# I really feel like there must be a cleverer way to do this
# scalar-or-vector possible-bad-value masking.
if np.isscalar (n):
if n == 0:
return 0.
return n * (np.log (n) - np.log (dt))
n = np.asarray (n)
mask = (n == 0)
r = n * (np.log (np.where (mask, 1, n)) - np.log (dt))
return np.where (mask, 0, r)
def bin_bblock (widths, counts, p0=0.05):
"""Fundamental Bayesian Blocks algorithm. Arguments:
widths - Array of consecutive cell widths.
counts - Array of numbers of counts in each cell.
p0=0.05 - Probability of preferring solutions with additional bins.
Returns a Holder with:
blockstarts - Start times of output blocks.
counts - Number of events in each output block.
finalp0 - Final value of p0, after iteration to minimize `nblocks`.
nblocks - Number of output blocks.
ncells - Number of input cells/bins.
origp0 - Original value of p0.
rates - Event rate associated with each block.
widths - Width of each output block.
"""
widths = np.asarray (widths)
counts = np.asarray (counts)
ncells = widths.size
origp0 = p0
if np.any (widths <= 0):
raise ValueError ('bin widths must be positive')
if widths.size != counts.size:
raise ValueError ('widths and counts must have same size')
if p0 < 0 or p0 >= 1.:
raise ValueError ('p0 must lie within [0, 1)')
vedges = np.cumsum (np.concatenate (([0], widths))) # size: ncells + 1
block_remainders = vedges[-1] - vedges # size: nedges = ncells + 1
ccounts = np.cumsum (np.concatenate (([0], counts)))
count_remainders = ccounts[-1] - ccounts
prev_blockstarts = None
best = np.zeros (ncells, dtype=np.float)
last = np.zeros (ncells, dtype=np.int)
for _ in range (10):
# Pluggable num-change-points prior-weight expression:
ncp_prior = 4 - np.log (p0 / (0.0136 * ncells**0.478))
for r in range (ncells):
tk = block_remainders[:r+1] - block_remainders[r+1]
nk = count_remainders[:r+1] - count_remainders[r+1]
# Pluggable fitness expression:
fit_vec = nlogn (nk, tk)
# This incrementally penalizes partitions with more blocks:
tmp = fit_vec - ncp_prior
tmp[1:] += best[:r]
imax = np.argmax (tmp)
last[r] = imax
best[r] = tmp[imax]
# different semantics than Scargle impl: our blockstarts is similar to
# their changepoints, but we always finish with blockstarts[0] = 0.
work = np.zeros (ncells, dtype=int)
workidx = 0
ind = last[-1]
while True:
work[workidx] = ind
workidx += 1
if ind == 0:
break
ind = last[ind - 1]
blockstarts = work[:workidx][::-1]
if prev_blockstarts is not None:
if (blockstarts.size == prev_blockstarts.size and
(blockstarts == prev_blockstarts).all ()):
break # converged
if blockstarts.size == 1:
break # can't shrink any farther
# Recommended ad-hoc iteration to favor fewer blocks above and beyond
# the value of p0:
p0 = 1. - (1. - p0)**(1. / (blockstarts.size - 1))
prev_blockstarts = blockstarts
assert blockstarts[0] == 0
nblocks = blockstarts.size
info = Holder ()
info.ncells = ncells
info.nblocks = nblocks
info.origp0 = origp0
info.finalp0 = p0
info.blockstarts = blockstarts
info.counts = np.empty (nblocks, dtype=np.int)
info.widths = np.empty (nblocks)
for iblk in range (nblocks):
cellstart = blockstarts[iblk]
if iblk == nblocks - 1:
cellend = ncells - 1
else:
cellend = blockstarts[iblk+1] - 1
info.widths[iblk] = widths[cellstart:cellend+1].sum ()
info.counts[iblk] = counts[cellstart:cellend+1].sum ()
info.rates = info.counts / info.widths
return info
def tt_bblock (tstarts, tstops, times, p0=0.05, intersect_with_bins=False):
"""Bayesian Blocks for time-tagged events. Arguments:
*tstarts*
Array of input bin start times.
*tstops*
Array of input bin stop times.
*times*
Array of event arrival times.
*p0* = 0.05
Probability of preferring solutions with additional bins.
*intersect_with_bins* = False
If true, intersect bblock bins with input bins; can result in more bins
than bblocks wants; they will have the same rate values.
Returns a Holder with:
*counts*
Number of events in each output block.
*finalp0*
Final value of p0, after iteration to minimize `nblocks`.
*ledges*
Times of left edges of output blocks.
*midpoints*
Times of midpoints of output blocks.
*nblocks*
Number of output blocks.
*ncells*
Number of input cells/bins.
*origp0*
Original value of p0.
*rates*
Event rate associated with each block.
*redges*
Times of right edges of output blocks.
*widths*
Width of each output block.
Bin start/stop times are best derived from a 1D Voronoi tesselation of the
event arrival times, with some kind of global observation start/stop time
setting the extreme edges. Or they can be set from "good time intervals"
if observations were toggled on or off as in an X-ray telescope.
If *intersect_with_bins* is True, the true Bayesian Blocks bins (BBBs) are
intersected with the "good time intervals" (GTIs) defined by the *tstarts*
and *tstops* variables. One GTI may contain multiple BBBs if the event
rate appears to change within the GTI, and one BBB may span multiple GTIs
if the event date does *not* appear to change between the GTIs. The
intersection will ensure that no BBB intervals cross the edge of a GTI. If
this would happen, the BBB is split into multiple, partially redundant
records. Each of these records will have the **same** value for the
*counts*, *rates*, and *widths* values. However, the *ledges*, *redges*,
and *midpoints* values will be recalculated. Note that in this mode, it is
not necessarily true that ``widths = ledges - redges`` as is usually the
case. When this flag is true, keep in mind that multiple bins are
therefore *not* necessarily independent statistical samples.
"""
tstarts = np.asarray (tstarts)
tstops = np.asarray (tstops)
times = np.asarray (times)
if tstarts.size != tstops.size:
raise ValueError ('must have same number of starts and stops')
ngti = tstarts.size
if ngti < 1:
raise ValueError ('must have at least one goodtime interval')
if np.any ((tstarts[1:] - tstarts[:-1]) <= 0):
raise ValueError ('tstarts must be ordered and distinct')
if np.any ((tstops[1:] - tstops[:-1]) <= 0):
raise ValueError ('tstops must be ordered and distinct')
if np.any (tstarts >= tstops):
raise ValueError ('tstarts must come before tstops')
if np.any ((times[1:] - times[:-1]) < 0):
raise ValueError ('times must be ordered')
if times.min () < tstarts[0]:
raise ValueError ('no times may be smaller than first tstart')
if times.max () > tstops[-1]:
raise ValueError ('no times may be larger than last tstop')
for i in range (1, ngti):
if np.where ((times > tstops[i-1]) & (times < tstarts[i]))[0].size:
raise ValueError ('no times may fall in goodtime gap #%d' % i)
if p0 < 0 or p0 >= 1.:
raise ValueError ('p0 must lie within [0, 1)')
utimes, uidxs = np.unique (times, return_index=True)
nunique = utimes.size
counts = np.empty (nunique)
counts[:-1] = uidxs[1:] - uidxs[:-1]
counts[-1] = times.size - uidxs[-1]
assert counts.sum () == times.size
# we grow these arrays with concats, which will perform badly with lots of
# GTIs. Not expected to be a big deal.
widths = np.empty (0)
ledges = np.empty (0)
redges = np.empty (0)
for i in range (ngti):
tstart, tstop = tstarts[i], tstops[i]
w = np.where ((utimes >= tstart) & (utimes <= tstop))[0]
if not w.size:
# No events during this goodtime! We have to insert a zero-count
# event block. This may break assumptions within bin_bblock()?
# j = idx of first event after this GTI
wafter = np.where (utimes > tstop)[0]
if wafter.size:
j = wafter[0]
else:
j = utimes.size
assert j == 0 or np.where (utimes < tstart)[0][-1] == j - 1
counts = np.concatenate ((counts[:j], [0], counts[j:]))
widths = np.concatenate ((widths, [tstop - tstart]))
ledges = np.concatenate ((ledges, [tstart]))
redges = np.concatenate ((redges, [tstop]))
else:
gtutimes = utimes[w]
midpoints = 0.5 * (gtutimes[1:] + gtutimes[:-1]) # size: n - 1
gtedges = np.concatenate (([tstart], midpoints, [tstop])) # size: n + 1
gtwidths = gtedges[1:] - gtedges[:-1] # size: n
assert gtwidths.sum () == tstop - tstart
widths = np.concatenate ((widths, gtwidths))
ledges = np.concatenate ((ledges, gtedges[:-1]))
redges = np.concatenate ((redges, gtedges[1:]))
assert counts.size == widths.size
info = bin_bblock (widths, counts, p0=p0)
info.ledges = ledges[info.blockstarts]
# The right edge of the i'th block is the right edge of its rightmost
# bin, which is the bin before the leftmost bin of the (i+1)'th block:
info.redges = np.concatenate ((redges[info.blockstarts[1:] - 1], [redges[-1]]))
info.midpoints = 0.5 * (info.ledges + info.redges)
del info.blockstarts
if intersect_with_bins:
# OK, we now need to intersect the bblock bins with the input bins.
# This can fracture one bblock bin into multiple ones but shouldn't
# make any of them disappear, since they're definitionally constrained
# to contain events.
#
# First: sorted list of all timestamps at which *something* changes:
# either a bblock edge, or a input bin edge. We drop the last entry,
# giving is a list of left edges of bins in which everything is the
# same.
all_times = set(tstarts)
all_times.update(tstops)
all_times.update(info.ledges)
all_times.update(info.redges)
all_times = np.array(sorted(all_times))[:-1]
# Now, construct a lookup table of which bblock number each of these
# bins corresponds to. More than one bin may have the same bblock
# number, if a GTI change slices a single bblock into more than one
# piece. We do this in a somewhat non-obvious way since we know that
# the bblocks completely cover the overall GTI span in order.
bblock_ids = np.zeros(all_times.size)
for i in range(1, info.nblocks):
bblock_ids[all_times >= info.ledges[i]] = i
# Now, a lookup table of which bins are within a good GTI span. Again,
# we know that all bins are either entirely in a good GTI or entirely
# outside, so the logic is simplified but not necessarily obvious.
good_timeslot = np.zeros(all_times.size, dtype=np.bool)
for t0, t1 in zip(tstarts, tstops):
ok = (all_times >= t0) & (all_times < t1)
good_timeslot[ok] = True
# Finally, look for contiguous spans that are in a good timeslot *and*
# have the same underlying bblock number. These are our intersected
# blocks.
old_bblock_ids = []
ledges = []
redges = []
cur_bblock_id = -1
for i in range(all_times.size):
if bblock_ids[i] != cur_bblock_id or not good_timeslot[i]:
if cur_bblock_id >= 0:
# Ending a previous span.
redges.append(all_times[i])
cur_bblock_id = -1
if good_timeslot[i]:
# Starting a new span.
ledges.append(all_times[i])
old_bblock_ids.append(bblock_ids[i])
cur_bblock_id = bblock_ids[i]
if cur_bblock_id >= 0:
# End the last span.
redges.append(tstops[-1])
# Finally, rewrite all of the data as planned.
old_bblock_ids = np.array(old_bblock_ids, dtype=np.int)
info.counts = info.counts[old_bblock_ids]
info.rates = info.rates[old_bblock_ids]
info.widths = info.widths[old_bblock_ids]
info.ledges = np.array(ledges)
info.redges = np.array(redges)
info.midpoints = 0.5 * (info.ledges + info.redges)
info.nblocks = info.ledges.size
return info
def bs_tt_bblock (times, tstarts, tstops, p0=0.05, nbootstrap=512):
"""Bayesian Blocks for time-tagged events with bootstrapping uncertainty
assessment. THE UNCERTAINTIES ARE NOT VERY GOOD! Arguments:
tstarts - Array of input bin start times.
tstops - Array of input bin stop times.
times - Array of event arrival times.
p0=0.05 - Probability of preferring solutions with additional bins.
nbootstrap=512 - Number of bootstrap runs to perform.
Returns a Holder with:
blockstarts - Start times of output blocks.
bsrates - Mean event rate in each bin from bootstrap analysis.
bsrstds - ~Uncertainty: stddev of event rate in each bin from bootstrap analysis.
counts - Number of events in each output block.
finalp0 - Final value of p0, after iteration to minimize `nblocks`.
ledges - Times of left edges of output blocks.
midpoints - Times of midpoints of output blocks.
nblocks - Number of output blocks.
ncells - Number of input cells/bins.
origp0 - Original value of p0.
rates - Event rate associated with each block.
redges - Times of right edges of output blocks.
widths - Width of each output block.
"""
times = np.asarray (times)
tstarts = np.asarray (tstarts)
tstops = np.asarray (tstops)
nevents = times.size
if nevents < 1:
raise ValueError ('must be given at least 1 event')
info = tt_bblock (tstarts, tstops, times, p0)
# Now bootstrap resample to assess uncertainties on the bin heights. This
# is the approach recommended by Scargle+.
bsrsums = np.zeros (info.nblocks)
bsrsumsqs = np.zeros (info.nblocks)
for _ in range (nbootstrap):
bstimes = times[np.random.randint (0, times.size, times.size)]
bstimes.sort ()
bsinfo = tt_bblock (tstarts, tstops, bstimes, p0)
blocknums = np.minimum (np.searchsorted (bsinfo.redges, info.midpoints),
bsinfo.nblocks - 1)
samprates = bsinfo.rates[blocknums]
bsrsums += samprates
bsrsumsqs += samprates**2
bsrmeans = bsrsums / nbootstrap
mask = bsrsumsqs / nbootstrap <= bsrmeans**2
bsrstds = np.sqrt (np.where (mask, 0, bsrsumsqs / nbootstrap - bsrmeans**2))
info.bsrates = bsrmeans
info.bsrstds = bsrstds
return info
|
mit
| -3,543,484,899,377,232,400
| 36.265766
| 89
| 0.620513
| false
| 3.614242
| false
| false
| false
|
mganeva/mantid
|
scripts/Muon/GUI/Common/home_tab/home_tab_view.py
|
1
|
1149
|
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
from __future__ import (absolute_import, division, print_function)
from PyQt4 import QtGui
class HomeTabView(QtGui.QWidget):
def __init__(self, parent=None,widget_list=None):
super(HomeTabView, self).__init__(parent)
self._widget_list = widget_list
self.splitter = None
self.vertical_layout = None
self.setup_interface()
def setup_interface(self):
self.setObjectName("HomeTab")
self.setWindowTitle("Home Tab")
self.resize(500, 100)
self.vertical_layout = QtGui.QVBoxLayout()
if self._widget_list:
for i, widget in enumerate(self._widget_list):
widget.setParent(self)
self.vertical_layout.addWidget(widget)
self.setLayout(self.vertical_layout)
# for docking
def getLayout(self):
return self.vertical_layout
|
gpl-3.0
| 5,541,721,634,874,385,000
| 27.725
| 68
| 0.652742
| false
| 3.804636
| false
| false
| false
|
snowflakedb/snowflake-connector-python
|
src/snowflake/connector/proxy.py
|
1
|
1473
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2012-2021 Snowflake Computing Inc. All right reserved.
#
import os
def set_proxies(proxy_host, proxy_port, proxy_user=None, proxy_password=None):
"""Sets proxy dict for requests."""
PREFIX_HTTP = "http://"
PREFIX_HTTPS = "https://"
proxies = None
if proxy_host and proxy_port:
if proxy_host.startswith(PREFIX_HTTP):
proxy_host = proxy_host[len(PREFIX_HTTP) :]
elif proxy_host.startswith(PREFIX_HTTPS):
proxy_host = proxy_host[len(PREFIX_HTTPS) :]
if proxy_user or proxy_password:
proxy_auth = "{proxy_user}:{proxy_password}@".format(
proxy_user=proxy_user if proxy_user is not None else "",
proxy_password=proxy_password if proxy_password is not None else "",
)
else:
proxy_auth = ""
proxies = {
"http": "http://{proxy_auth}{proxy_host}:{proxy_port}".format(
proxy_host=proxy_host,
proxy_port=str(proxy_port),
proxy_auth=proxy_auth,
),
"https": "http://{proxy_auth}{proxy_host}:{proxy_port}".format(
proxy_host=proxy_host,
proxy_port=str(proxy_port),
proxy_auth=proxy_auth,
),
}
os.environ["HTTP_PROXY"] = proxies["http"]
os.environ["HTTPS_PROXY"] = proxies["https"]
return proxies
|
apache-2.0
| 845,093,474,728,301,600
| 34.926829
| 84
| 0.552614
| false
| 3.876316
| false
| false
| false
|
smarterclayton/solum
|
solum/openstack/common/rpc/amqp.py
|
1
|
23625
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright 2011 - 2012, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Shared code between AMQP based openstack.common.rpc implementations.
The code in this module is shared between the rpc implemenations based on AMQP.
Specifically, this includes impl_kombu and impl_qpid. impl_carrot also uses
AMQP, but is deprecated and predates this code.
"""
import collections
import inspect
import sys
import uuid
from eventlet import greenpool
from eventlet import pools
from eventlet import queue
from eventlet import semaphore
from oslo.config import cfg
from solum.openstack.common import excutils
from solum.openstack.common.gettextutils import _ # noqa
from solum.openstack.common import local
from solum.openstack.common import log as logging
from solum.openstack.common.rpc import common as rpc_common
amqp_opts = [
cfg.BoolOpt('amqp_durable_queues',
default=False,
deprecated_name='rabbit_durable_queues',
deprecated_group='DEFAULT',
help='Use durable queues in amqp.'),
cfg.BoolOpt('amqp_auto_delete',
default=False,
help='Auto-delete queues in amqp.'),
]
cfg.CONF.register_opts(amqp_opts)
UNIQUE_ID = '_unique_id'
LOG = logging.getLogger(__name__)
class Pool(pools.Pool):
"""Class that implements a Pool of Connections."""
def __init__(self, conf, connection_cls, *args, **kwargs):
self.connection_cls = connection_cls
self.conf = conf
kwargs.setdefault("max_size", self.conf.rpc_conn_pool_size)
kwargs.setdefault("order_as_stack", True)
super(Pool, self).__init__(*args, **kwargs)
self.reply_proxy = None
# TODO(comstud): Timeout connections not used in a while
def create(self):
LOG.debug(_('Pool creating new connection'))
return self.connection_cls(self.conf)
def empty(self):
while self.free_items:
self.get().close()
# Force a new connection pool to be created.
# Note that this was added due to failing unit test cases. The issue
# is the above "while loop" gets all the cached connections from the
# pool and closes them, but never returns them to the pool, a pool
# leak. The unit tests hang waiting for an item to be returned to the
# pool. The unit tests get here via the tearDown() method. In the run
# time code, it gets here via cleanup() and only appears in service.py
# just before doing a sys.exit(), so cleanup() only happens once and
# the leakage is not a problem.
self.connection_cls.pool = None
_pool_create_sem = semaphore.Semaphore()
def get_connection_pool(conf, connection_cls):
with _pool_create_sem:
# Make sure only one thread tries to create the connection pool.
if not connection_cls.pool:
connection_cls.pool = Pool(conf, connection_cls)
return connection_cls.pool
class ConnectionContext(rpc_common.Connection):
"""The class that is actually returned to the create_connection() caller.
This is essentially a wrapper around Connection that supports 'with'.
It can also return a new Connection, or one from a pool.
The function will also catch when an instance of this class is to be
deleted. With that we can return Connections to the pool on exceptions
and so forth without making the caller be responsible for catching them.
If possible the function makes sure to return a connection to the pool.
"""
def __init__(self, conf, connection_pool, pooled=True, server_params=None):
"""Create a new connection, or get one from the pool."""
self.connection = None
self.conf = conf
self.connection_pool = connection_pool
if pooled:
self.connection = connection_pool.get()
else:
self.connection = connection_pool.connection_cls(
conf,
server_params=server_params)
self.pooled = pooled
def __enter__(self):
"""When with ConnectionContext() is used, return self."""
return self
def _done(self):
"""If the connection came from a pool, clean it up and put it back.
If it did not come from a pool, close it.
"""
if self.connection:
if self.pooled:
# Reset the connection so it's ready for the next caller
# to grab from the pool
self.connection.reset()
self.connection_pool.put(self.connection)
else:
try:
self.connection.close()
except Exception:
pass
self.connection = None
def __exit__(self, exc_type, exc_value, tb):
"""End of 'with' statement. We're done here."""
self._done()
def __del__(self):
"""Caller is done with this connection. Make sure we cleaned up."""
self._done()
def close(self):
"""Caller is done with this connection."""
self._done()
def create_consumer(self, topic, proxy, fanout=False):
self.connection.create_consumer(topic, proxy, fanout)
def create_worker(self, topic, proxy, pool_name):
self.connection.create_worker(topic, proxy, pool_name)
def join_consumer_pool(self, callback, pool_name, topic, exchange_name,
ack_on_error=True):
self.connection.join_consumer_pool(callback,
pool_name,
topic,
exchange_name,
ack_on_error)
def consume_in_thread(self):
self.connection.consume_in_thread()
def __getattr__(self, key):
"""Proxy all other calls to the Connection instance."""
if self.connection:
return getattr(self.connection, key)
else:
raise rpc_common.InvalidRPCConnectionReuse()
class ReplyProxy(ConnectionContext):
"""Connection class for RPC replies / callbacks."""
def __init__(self, conf, connection_pool):
self._call_waiters = {}
self._num_call_waiters = 0
self._num_call_waiters_wrn_threshhold = 10
self._reply_q = 'reply_' + uuid.uuid4().hex
super(ReplyProxy, self).__init__(conf, connection_pool, pooled=False)
self.declare_direct_consumer(self._reply_q, self._process_data)
self.consume_in_thread()
def _process_data(self, message_data):
msg_id = message_data.pop('_msg_id', None)
waiter = self._call_waiters.get(msg_id)
if not waiter:
LOG.warn(_('No calling threads waiting for msg_id : %(msg_id)s'
', message : %(data)s'), {'msg_id': msg_id,
'data': message_data})
LOG.warn(_('_call_waiters: %s') % str(self._call_waiters))
else:
waiter.put(message_data)
def add_call_waiter(self, waiter, msg_id):
self._num_call_waiters += 1
if self._num_call_waiters > self._num_call_waiters_wrn_threshhold:
LOG.warn(_('Number of call waiters is greater than warning '
'threshhold: %d. There could be a MulticallProxyWaiter '
'leak.') % self._num_call_waiters_wrn_threshhold)
self._num_call_waiters_wrn_threshhold *= 2
self._call_waiters[msg_id] = waiter
def del_call_waiter(self, msg_id):
self._num_call_waiters -= 1
del self._call_waiters[msg_id]
def get_reply_q(self):
return self._reply_q
def msg_reply(conf, msg_id, reply_q, connection_pool, reply=None,
failure=None, ending=False, log_failure=True):
"""Sends a reply or an error on the channel signified by msg_id.
Failure should be a sys.exc_info() tuple.
"""
with ConnectionContext(conf, connection_pool) as conn:
if failure:
failure = rpc_common.serialize_remote_exception(failure,
log_failure)
msg = {'result': reply, 'failure': failure}
if ending:
msg['ending'] = True
_add_unique_id(msg)
# If a reply_q exists, add the msg_id to the reply and pass the
# reply_q to direct_send() to use it as the response queue.
# Otherwise use the msg_id for backward compatibilty.
if reply_q:
msg['_msg_id'] = msg_id
conn.direct_send(reply_q, rpc_common.serialize_msg(msg))
else:
conn.direct_send(msg_id, rpc_common.serialize_msg(msg))
class RpcContext(rpc_common.CommonRpcContext):
"""Context that supports replying to a rpc.call."""
def __init__(self, **kwargs):
self.msg_id = kwargs.pop('msg_id', None)
self.reply_q = kwargs.pop('reply_q', None)
self.conf = kwargs.pop('conf')
super(RpcContext, self).__init__(**kwargs)
def deepcopy(self):
values = self.to_dict()
values['conf'] = self.conf
values['msg_id'] = self.msg_id
values['reply_q'] = self.reply_q
return self.__class__(**values)
def reply(self, reply=None, failure=None, ending=False,
connection_pool=None, log_failure=True):
if self.msg_id:
msg_reply(self.conf, self.msg_id, self.reply_q, connection_pool,
reply, failure, ending, log_failure)
if ending:
self.msg_id = None
def unpack_context(conf, msg):
"""Unpack context from msg."""
context_dict = {}
for key in list(msg.keys()):
# NOTE(vish): Some versions of python don't like unicode keys
# in kwargs.
key = str(key)
if key.startswith('_context_'):
value = msg.pop(key)
context_dict[key[9:]] = value
context_dict['msg_id'] = msg.pop('_msg_id', None)
context_dict['reply_q'] = msg.pop('_reply_q', None)
context_dict['conf'] = conf
ctx = RpcContext.from_dict(context_dict)
rpc_common._safe_log(LOG.debug, _('unpacked context: %s'), ctx.to_dict())
return ctx
def pack_context(msg, context):
"""Pack context into msg.
Values for message keys need to be less than 255 chars, so we pull
context out into a bunch of separate keys. If we want to support
more arguments in rabbit messages, we may want to do the same
for args at some point.
"""
if isinstance(context, dict):
context_d = dict([('_context_%s' % key, value)
for (key, value) in context.iteritems()])
else:
context_d = dict([('_context_%s' % key, value)
for (key, value) in context.to_dict().iteritems()])
msg.update(context_d)
class _MsgIdCache(object):
"""This class checks any duplicate messages."""
# NOTE: This value is considered can be a configuration item, but
# it is not necessary to change its value in most cases,
# so let this value as static for now.
DUP_MSG_CHECK_SIZE = 16
def __init__(self, **kwargs):
self.prev_msgids = collections.deque([],
maxlen=self.DUP_MSG_CHECK_SIZE)
def check_duplicate_message(self, message_data):
"""AMQP consumers may read same message twice when exceptions occur
before ack is returned. This method prevents doing it.
"""
if UNIQUE_ID in message_data:
msg_id = message_data[UNIQUE_ID]
if msg_id not in self.prev_msgids:
self.prev_msgids.append(msg_id)
else:
raise rpc_common.DuplicateMessageError(msg_id=msg_id)
def _add_unique_id(msg):
"""Add unique_id for checking duplicate messages."""
unique_id = uuid.uuid4().hex
msg.update({UNIQUE_ID: unique_id})
LOG.debug(_('UNIQUE_ID is %s.') % (unique_id))
class _ThreadPoolWithWait(object):
"""Base class for a delayed invocation manager.
Used by the Connection class to start up green threads
to handle incoming messages.
"""
def __init__(self, conf, connection_pool):
self.pool = greenpool.GreenPool(conf.rpc_thread_pool_size)
self.connection_pool = connection_pool
self.conf = conf
def wait(self):
"""Wait for all callback threads to exit."""
self.pool.waitall()
class CallbackWrapper(_ThreadPoolWithWait):
"""Wraps a straight callback.
Allows it to be invoked in a green thread.
"""
def __init__(self, conf, callback, connection_pool,
wait_for_consumers=False):
"""Initiates CallbackWrapper object.
:param conf: cfg.CONF instance
:param callback: a callable (probably a function)
:param connection_pool: connection pool as returned by
get_connection_pool()
:param wait_for_consumers: wait for all green threads to
complete and raise the last
caught exception, if any.
"""
super(CallbackWrapper, self).__init__(
conf=conf,
connection_pool=connection_pool,
)
self.callback = callback
self.wait_for_consumers = wait_for_consumers
self.exc_info = None
def _wrap(self, message_data, **kwargs):
"""Wrap the callback invocation to catch exceptions.
"""
try:
self.callback(message_data, **kwargs)
except Exception:
self.exc_info = sys.exc_info()
def __call__(self, message_data):
self.exc_info = None
self.pool.spawn_n(self._wrap, message_data)
if self.wait_for_consumers:
self.pool.waitall()
if self.exc_info:
raise self.exc_info[1], None, self.exc_info[2]
class ProxyCallback(_ThreadPoolWithWait):
"""Calls methods on a proxy object based on method and args."""
def __init__(self, conf, proxy, connection_pool):
super(ProxyCallback, self).__init__(
conf=conf,
connection_pool=connection_pool,
)
self.proxy = proxy
self.msg_id_cache = _MsgIdCache()
def __call__(self, message_data):
"""Consumer callback to call a method on a proxy object.
Parses the message for validity and fires off a thread to call the
proxy object method.
Message data should be a dictionary with two keys:
method: string representing the method to call
args: dictionary of arg: value
Example: {'method': 'echo', 'args': {'value': 42}}
"""
# It is important to clear the context here, because at this point
# the previous context is stored in local.store.context
if hasattr(local.store, 'context'):
del local.store.context
rpc_common._safe_log(LOG.debug, _('received %s'), message_data)
self.msg_id_cache.check_duplicate_message(message_data)
ctxt = unpack_context(self.conf, message_data)
method = message_data.get('method')
args = message_data.get('args', {})
version = message_data.get('version')
namespace = message_data.get('namespace')
if not method:
LOG.warn(_('no method for message: %s') % message_data)
ctxt.reply(_('No method for message: %s') % message_data,
connection_pool=self.connection_pool)
return
self.pool.spawn_n(self._process_data, ctxt, version, method,
namespace, args)
def _process_data(self, ctxt, version, method, namespace, args):
"""Process a message in a new thread.
If the proxy object we have has a dispatch method
(see rpc.dispatcher.RpcDispatcher), pass it the version,
method, and args and let it dispatch as appropriate. If not, use
the old behavior of magically calling the specified method on the
proxy we have here.
"""
ctxt.update_store()
try:
rval = self.proxy.dispatch(ctxt, version, method, namespace,
**args)
# Check if the result was a generator
if inspect.isgenerator(rval):
for x in rval:
ctxt.reply(x, None, connection_pool=self.connection_pool)
else:
ctxt.reply(rval, None, connection_pool=self.connection_pool)
# This final None tells multicall that it is done.
ctxt.reply(ending=True, connection_pool=self.connection_pool)
except rpc_common.ClientException as e:
LOG.debug(_('Expected exception during message handling (%s)') %
e._exc_info[1])
ctxt.reply(None, e._exc_info,
connection_pool=self.connection_pool,
log_failure=False)
except Exception:
# sys.exc_info() is deleted by LOG.exception().
exc_info = sys.exc_info()
LOG.error(_('Exception during message handling'),
exc_info=exc_info)
ctxt.reply(None, exc_info, connection_pool=self.connection_pool)
class MulticallProxyWaiter(object):
def __init__(self, conf, msg_id, timeout, connection_pool):
self._msg_id = msg_id
self._timeout = timeout or conf.rpc_response_timeout
self._reply_proxy = connection_pool.reply_proxy
self._done = False
self._got_ending = False
self._conf = conf
self._dataqueue = queue.LightQueue()
# Add this caller to the reply proxy's call_waiters
self._reply_proxy.add_call_waiter(self, self._msg_id)
self.msg_id_cache = _MsgIdCache()
def put(self, data):
self._dataqueue.put(data)
def done(self):
if self._done:
return
self._done = True
# Remove this caller from reply proxy's call_waiters
self._reply_proxy.del_call_waiter(self._msg_id)
def _process_data(self, data):
result = None
self.msg_id_cache.check_duplicate_message(data)
if data['failure']:
failure = data['failure']
result = rpc_common.deserialize_remote_exception(self._conf,
failure)
elif data.get('ending', False):
self._got_ending = True
else:
result = data['result']
return result
def __iter__(self):
"""Return a result until we get a reply with an 'ending' flag."""
if self._done:
raise StopIteration
while True:
try:
data = self._dataqueue.get(timeout=self._timeout)
result = self._process_data(data)
except queue.Empty:
self.done()
raise rpc_common.Timeout()
except Exception:
with excutils.save_and_reraise_exception():
self.done()
if self._got_ending:
self.done()
raise StopIteration
if isinstance(result, Exception):
self.done()
raise result
yield result
def create_connection(conf, new, connection_pool):
"""Create a connection."""
return ConnectionContext(conf, connection_pool, pooled=not new)
_reply_proxy_create_sem = semaphore.Semaphore()
def multicall(conf, context, topic, msg, timeout, connection_pool):
"""Make a call that returns multiple times."""
LOG.debug(_('Making synchronous call on %s ...'), topic)
msg_id = uuid.uuid4().hex
msg.update({'_msg_id': msg_id})
LOG.debug(_('MSG_ID is %s') % (msg_id))
_add_unique_id(msg)
pack_context(msg, context)
with _reply_proxy_create_sem:
if not connection_pool.reply_proxy:
connection_pool.reply_proxy = ReplyProxy(conf, connection_pool)
msg.update({'_reply_q': connection_pool.reply_proxy.get_reply_q()})
wait_msg = MulticallProxyWaiter(conf, msg_id, timeout, connection_pool)
with ConnectionContext(conf, connection_pool) as conn:
conn.topic_send(topic, rpc_common.serialize_msg(msg), timeout)
return wait_msg
def call(conf, context, topic, msg, timeout, connection_pool):
"""Sends a message on a topic and wait for a response."""
rv = multicall(conf, context, topic, msg, timeout, connection_pool)
# NOTE(vish): return the last result from the multicall
rv = list(rv)
if not rv:
return
return rv[-1]
def cast(conf, context, topic, msg, connection_pool):
"""Sends a message on a topic without waiting for a response."""
LOG.debug(_('Making asynchronous cast on %s...'), topic)
_add_unique_id(msg)
pack_context(msg, context)
with ConnectionContext(conf, connection_pool) as conn:
conn.topic_send(topic, rpc_common.serialize_msg(msg))
def fanout_cast(conf, context, topic, msg, connection_pool):
"""Sends a message on a fanout exchange without waiting for a response."""
LOG.debug(_('Making asynchronous fanout cast...'))
_add_unique_id(msg)
pack_context(msg, context)
with ConnectionContext(conf, connection_pool) as conn:
conn.fanout_send(topic, rpc_common.serialize_msg(msg))
def cast_to_server(conf, context, server_params, topic, msg, connection_pool):
"""Sends a message on a topic to a specific server."""
_add_unique_id(msg)
pack_context(msg, context)
with ConnectionContext(conf, connection_pool, pooled=False,
server_params=server_params) as conn:
conn.topic_send(topic, rpc_common.serialize_msg(msg))
def fanout_cast_to_server(conf, context, server_params, topic, msg,
connection_pool):
"""Sends a message on a fanout exchange to a specific server."""
_add_unique_id(msg)
pack_context(msg, context)
with ConnectionContext(conf, connection_pool, pooled=False,
server_params=server_params) as conn:
conn.fanout_send(topic, rpc_common.serialize_msg(msg))
def notify(conf, context, topic, msg, connection_pool, envelope):
"""Sends a notification event on a topic."""
LOG.debug(_('Sending %(event_type)s on %(topic)s'),
dict(event_type=msg.get('event_type'),
topic=topic))
_add_unique_id(msg)
pack_context(msg, context)
with ConnectionContext(conf, connection_pool) as conn:
if envelope:
msg = rpc_common.serialize_msg(msg)
conn.notify_send(topic, msg)
def cleanup(connection_pool):
if connection_pool:
connection_pool.empty()
def get_control_exchange(conf):
return conf.control_exchange
|
apache-2.0
| -6,104,428,898,525,230,000
| 36.146226
| 79
| 0.603513
| false
| 4.065565
| false
| false
| false
|
plusreed/foxpy
|
main3.py
|
1
|
2910
|
from discord.ext import commands
import discord
import datetime, re
import asyncio
import copy
import logging
import traceback
import sys
from collections import Counter
import config
description = """
I'm Fox, a multi-purpose and modular Discord bot.
"""
init_cogs = [
'plugins.admin.eval',
'plugins.admin.shutdown',
'plugins.core.math',
'plugins.core.ping',
'plugins.core.lastfm',
'plugins.music.voice',
]
dc_log = logging.getLogger(discord)
dc_log.setLevel(logging.DEBUG)
log = logging.getLogger()
log.setLevel(logging.INFO)
handler = logging.FileHandler(filename='fox.log', encoding='utf-8', mode='w')
log.addHandler(handler)
hattr = dict(hidden=True)
prefix = ['$', '^']
fox = commands.Bot(command_prefix=prefix, description=description, pm_help=None, help_attrs=hattr)
@fox.event
async def on_command_error(error, ctx):
if isinstance(error, commands.NoPrivateMessage):
await fox.send_message(ctx.message.author, "Sorry, you can't use this command in private messages.")
elif isinstance(error, commands.DisabledCommand):
await fox.send_message(ctx.message.author, 'Sorry, it looks like that command is disabled.')
elif isinstance(error, commands.CommandInvokeError):
print('In {0.command.qualified_name}:'.format(ctx))
traceback.print_tb(error.original.__traceback__)
print('{0.__class__.__name__}: {0}'.format(error.original))
@fox.event
async def on_ready():
print('Fox is now ready!')
print('Username: ' + fox.user.name)
print('ID: ' + fox.user.id)
print('------')
if not hasattr(fox, 'uptime'):
fox.uptime = datetime.datetime.utcnow()
@fox.event
async def on_resumed():
print("Fox has resumed.")
@fox.event
async def on_command(command, ctx):
fox.commands_used[command.name] += 1
message = ctx.message
if message.channel.is_private:
destination = 'Private Message'
else:
destination = '#{0.channel.name} ({0.server.name})'.format(message)
log.info('{0.timestamp}: {0.author.name} in {1}: {0.content}'.format(message, destination))
@fox.event
async def on_message(message):
if message.author.bot:
return
await fox.process_commands(message)
# @bot.command()
# async def ping():
# await bot.say("pong")
if __name__ == '__main__':
if any('debug' in arg.lower() for arg in sys.argv):
print("Fox is running in debug mode. The command prefix is now '^^'.")
fox.command_prefix = '^^'
fox.client_id = config.BOT_ID
fox.commands_used = Counter()
for plugin in init_cogs:
try:
fox.load_extention(plugin)
except Exception as e:
print('Error: failed to load plugin {}\n{}: {}'.format(plugin, type(e).__name__, e))
fox.run(config.BOT_TOKEN)
handlers = log.handlers[:]
for hdlr in handlers:
hdlr.close()
log.removeHandler(hdlr)
|
mit
| -8,494,187,333,163,795,000
| 25.944444
| 108
| 0.657388
| false
| 3.427562
| false
| false
| false
|
Sirs0ri/PersonalAssistant
|
samantha/plugins/schedule_plugin.py
|
1
|
8649
|
"""This plugin triggers schedules events.
The different commands are triggered:
* every 10 seconds
* at the start of every minute
* ..hour
* ..day
* ..month
* ..year
All these events are triggered as soon as possible, i.e. 'Day' will be
triggered at 0:00, month on the 1st at 0:00, etc.
"""
###############################################################################
#
# TODO: [ ]
#
###############################################################################
# standard library imports
import datetime
import logging
import random
import threading
import time
# related third party imports
# application specific imports
import samantha.context as context
from samantha.core import subscribe_to
from samantha.plugins.plugin import Plugin
from samantha.tools import eventbuilder
__version__ = "1.3.17"
# Initialize the logger
LOGGER = logging.getLogger(__name__)
SUNRISE = datetime.datetime(1970, 1, 1)
SUNSET = datetime.datetime(1970, 1, 1)
PLUGIN = Plugin("Schedule", True, LOGGER, __file__)
def worker():
"""Check if events should be triggered, sleep 1sec, repeat."""
name = __name__ + ".Thread"
logger = logging.getLogger(name)
logger.debug("Started.")
def _check_daytime(_datetime_obj, _timelist):
if (SUNSET < SUNRISE < _datetime_obj or
SUNRISE < _datetime_obj < SUNSET or
_datetime_obj < SUNSET < SUNRISE):
# The sun has risen.
time_of_day = "day"
else:
# The sun hasn't risen yet.
time_of_day = "night"
if time_of_day == context.get_value("time.time_of_day", None):
logger.debug("It's still %stime.", time_of_day)
else:
logger.debug("It's now %stime.", time_of_day)
context.set_property("time.time_of_day", time_of_day)
keyword = "time.time_of_day.{}".format(time_of_day)
eventbuilder.eEvent(sender_id=name,
keyword=keyword,
data=_timelist).trigger()
# calculate time between now and sunrise
if SUNRISE < _datetime_obj:
# the sunrise is in the past
sunrise_pre_post = "post"
diff_sunrise = _datetime_obj - SUNRISE
else:
# the sunrise is in the future
sunrise_pre_post = "pre"
diff_sunrise = SUNRISE - _datetime_obj
if 0 < diff_sunrise.seconds % 300 < 59:
# the difference between now and the sunrise is a multiple of 5
# minutes (this check is executed every minute, thus I'm checking
# this in a way that the condition becomes true every 5th minute.
keyword_sunrise = "time.sunrise.{}.{}".format(
sunrise_pre_post,
diff_sunrise.seconds / 60)
LOGGER.warning("Triggering event '%s'!", keyword_sunrise)
eventbuilder.eEvent(sender_id=name,
keyword=keyword_sunrise,
data=_timelist).trigger()
# calculate time between now and sunset
if SUNSET < _datetime_obj:
# the sunset is in the past
sunset_pre_post = "post"
diff_sunset = _datetime_obj - SUNSET
else:
# the sunset is in the future
sunset_pre_post = "pre"
diff_sunset = SUNSET - _datetime_obj
if 0 < diff_sunset.seconds % 300 < 59:
# the difference between now and the sunset is a multiple of 5
# minutes (this check is executed every minute, thus I'm checking
# this in a way that the condition becomes true every 5th minute.
keyword_sunset = "time.sunset.{}.{}".format(
sunset_pre_post,
diff_sunset.seconds / 60)
LOGGER.warning("Triggering event '%s'!", keyword_sunset)
eventbuilder.eEvent(sender_id=name,
keyword=keyword_sunset,
data=_timelist).trigger()
logger.debug("SUNRISE: %s, SUNSET: %s, NOW: %s",
SUNRISE, SUNSET, _datetime_obj)
def _trigger(keyword, data):
if "10s" in keyword:
ttl = 8
elif "10s" in keyword:
ttl = 55
elif "10s" in keyword:
ttl = 3300
else:
ttl = 0
eventbuilder.eEvent(sender_id=name,
keyword=keyword,
data=data,
ttl=ttl).trigger()
# Initialize the two random events.
# They'll be triggered randomly once an hour/once a day. These two counters
# count down the seconds until the next event. They'll be reset to a random
# value every hour (day) between 0 and the number of seconds in an hour/day
# The default values are 120secs for the hourly event and 180 for the daily
# so that the two events are being triggered relatively soon after starting
rnd_hourly_counter = 120
rnd_daily_counter = 180
while True:
datetime_obj = datetime.datetime.now()
timetuple = datetime_obj.timetuple()
"""
# value: time.struct_time(tm_year=2016, tm_mon=1, tm_mday=22,
# tm_hour=11, tm_min=26, tm_sec=13,
# tm_wday=4, tm_yday=22, tm_isdst=-1)
# ..[0]: tm_year = 2016
# ..[1]: tm_mon = 1
# ..[2]: tm_mday = 22
# ..[3]: tm_hour = 11
# ..[4]: tm_min = 26
# ..[5]: tm_sec = 13
# ..[6]: tm_wday = 4
# ..[7]: tm_yday = 22
# ..[8]: tm_isdst = -1
"""
timelist = list(timetuple)
if rnd_hourly_counter == 0:
_trigger(keyword="time.schedule.hourly_rnd", data=timelist)
if rnd_daily_counter == 0:
_trigger(keyword="time.schedule.daily_rnd", data=timelist)
rnd_hourly_counter -= 1
rnd_daily_counter -= 1
if timelist[5] in [0, 10, 20, 30, 40, 50]:
_trigger(keyword="time.schedule.10s", data=timelist)
if timelist[5] == 0:
# Seconds = 0 -> New Minute
_trigger(keyword="time.schedule.min", data=timelist)
# Check for a change in the time of day
_check_daytime(datetime_obj, timelist)
if timelist[4] == 0:
# Minutes = 0 -> New Hour
_trigger(keyword="time.schedule.hour", data=timelist)
rnd_hourly_counter = random.randint(0, 3599)
if timelist[3] == 0:
# Hours = 0 -> New Day
_trigger(keyword="time.schedule.day", data=timelist)
rnd_daily_counter = random.randint(0, 86399)
if timelist[2] == 1:
# Day of Month = 1 -> New Month
_trigger(keyword="time.schedule.mon",
data=timelist)
if timelist[1] == 1:
# Month = 1 -> New Year
_trigger(keyword="time.schedule.year",
data=timelist)
# sleep to take work from the CPU
time.sleep(1)
@subscribe_to("system.onstart")
def start_thread(key, data):
"""Set up the plugin by starting the worker-thread."""
thread = threading.Thread(target=worker)
thread.daemon = True
thread.start()
return "Worker started successfully."
@subscribe_to("weather.sys.update")
def sun_times(key, data):
"""Update the times for sunset and -rise."""
global SUNRISE, SUNSET
result = ""
invalid = True
if "sunrise" in data:
invalid = False
sunrise = datetime.datetime.fromtimestamp(data["sunrise"])
if SUNRISE is not sunrise:
SUNRISE = sunrise
LOGGER.debug("Updated Sunrise to %s",
SUNRISE.strftime('%Y-%m-%d %H:%M:%S'))
result += "Sunrise updated successfully."
if "sunset" in data:
invalid = False
sunset = datetime.datetime.fromtimestamp(data["sunset"])
if SUNSET is not sunset:
SUNSET = sunset
LOGGER.debug("Updated Sunset to %s",
SUNSET.strftime('%Y-%m-%d %H:%M:%S'))
result += "Sunset updated successfully."
if invalid:
result = "Error: The data does not contain info about sunrise/-set."
if result == "":
result = "Sunrise/-set were already up to date."
return result
|
mit
| 360,726,340,552,907,840
| 36.441558
| 79
| 0.529194
| false
| 3.998613
| false
| false
| false
|
RaphaelNajera/Sunlight_Sensor
|
documentation/CENG355 Solar Capstone/firmware/Solar_Capstone_PV_v4.py
|
1
|
5201
|
#Retrieving from PV1, PV2, PV4
#Working on retrieving from PV3
#Solar Capstone
#Johnson, Raphael & Adrian
from bs4 import BeautifulSoup
from datetime import datetime
import urllib.request
import threading #Loop
import time
from ctypes import c_short
from ctypes import c_byte
from ctypes import c_ubyte
from time import sleep
#Module for push data to firebase
import pyrebase
#Config for connecting to the Firebase
config = {
"apiKey": "AIzaSyB_inMZruQbJUzueOSRqf0-zwbYoUnZqDA",
"authDomain": "solar-capstone.firebaseapp.com",
"databaseURL": "https://solar-capstone.firebaseio.com/",
"storageBucket": "solar-capstone.appspot.com",
#"serviceAccount": "path/to/serviceAccountKey.json"
}
#====================================================================================================
#Send the data to firebase database every 30 minutes.
#PV1
def SendtoFirebasePV1(db, Date, Power, Dailyyield, Totalyield):
PV1 = {"Date": Date, "Power": Power, "Daily_yield": Dailyyield, "Total_yield": Totalyield}
PV1_result = db.child("PV1").push(PV1)
return;
#PV2
def SendtoFirebasePV2(db, Date, Power, Dailyyield, Totalyield):
PV2 = {"Date": Date, "Power": Power, "Daily_yield": Dailyyield, "Total_yield": Totalyield}
PV2_result = db.child("PV2").push(PV2)
return;
#PV3
#PV4
def SendtoFirebasePV4(db, Date, Power, Dailyyield, Totalyield):
PV4 = {"Date": Date, "Power": Power, "Daily_yield": Dailyyield, "Total_yield": Totalyield}
PV4_result = db.child("PV4").push(PV4)
return;
#====================================================================================================
def GetAuthorized(firebase):
auth = firebase.auth()
return '';
#====================================================================================================
#This function execute every hour to retrieve data from all solar panels
def repeatEveryHourly():
firebase = pyrebase.initialize_app(config)
#runs the code every 30mins or replace the timer with
threading.Timer(1800.0, repeatEveryHourly).start()
#grabs the current date and time
currentTime = datetime.now()
print(currentTime.strftime("\n%Y/%m/%d %I:%M %p\n"))
date = currentTime.strftime("%Y/%m/%d %I:%M %p")
#requesting to open this html for reading
PV1 = urllib.request.urlopen("http://10.116.25.7/home.htm").read()
PV2 = urllib.request.urlopen("http://10.116.25.5/production?locale=en").read()
#PV3
PV4 = urllib.request.urlopen("http://10.116.25.6/home.htm").read()
#uses the BeautifulSoup function to process xml and html in Python.
PV1_data = BeautifulSoup(PV1,'lxml')
PV2_data = BeautifulSoup(PV2, 'lxml')
PV4_data = BeautifulSoup(PV4, 'lxml')
#used the find() function to find all html tags consisting with <table> with an id of "OvTb1"
#PV1
PV1_table = PV1_data.find('table', id="OvTbl")
PV1table_row = PV1_table.find_all('tr')
#PV2
PV2_table = PV2_data.find_all('table')
#PV4
PV4_table = PV4_data.find('table', id="OvTbl")
PV4table_row = PV4_table.find_all('tr')
#Global variables for string comparison
power = "Power:"
daily = "Daily yield:"
total = "Total yield:"
#PV2 global variables for string comparison
power_2 = "Currently"
daily_2 = "Today"
total_2 = "Since Installation"
#Variables for PV1
PV1_power = ""
PV1_daily = ""
PV1_total = ""
#Variables for PV2
PV2_daily = ""
PV2_power = ""
PV2_total = ""
#Variables for PV4
PV4_power = ""
PV4_daily = ""
PV4_total = ""
#Display the info
print("Solar Panel PV1")
for tr in PV1table_row:
td = tr.find_all('td')
row = [i.string for i in td]
print(row[0] + " " + row[1])
if power == row[0]:
PV1_power = row[1]
#print(PV1_power)
if daily == row[0]:
PV1_daily = row[1]
#print(PV1_daily)
if total == row[0]:
PV1_total = row[1]
#print(PV1_total)
print("\nSolar Panel PV2")
for tr in PV2_table:
td = tr.find_all('td')
row = [i.text for i in td]
#Testing
#print("\n Row0: "+row[0])
#print("\n Row1: "+row[1])
#print("\n Row2: "+row[2])
#print("\n Row3: "+row[3])
#print("\n Row4: "+row[4])
#print("\n Row5: "+row[5])
#print("\n Row6: "+row[6])
#print("\n Row7: "+row[7])
#print("\n Row8: "+row[8])
if power_2 == row[1]:
PV2_power = row[2]
print("Power:"+PV2_power)
if daily_2 == row[3]:
PV2_daily = row[4]
print("Daily yield: "+PV2_daily)
if total_2 == row[7]:
PV2_total = row[8]
print("Total yield:"+PV2_total)
print("\nSolar Panel PV4")
for tr in PV4table_row:
td = tr.find_all('td')
row = [i.text for i in td]
print(row[0] + " " + row[1])
if power == row[0]:
PV4_power = row[1]
#print(PV4_power)
if daily == row[0]:
PV4_daily = row[1]
#print(PV4_daily)
if total == row[0]:
PV4_total = row[1]
#print(PV4_total)
#Calls to push the data to the firebase
SendtoFirebasePV1( firebase.database(), date, PV1_power, PV1_daily, PV1_total)
SendtoFirebasePV2( firebase.database(), date, PV2_power, PV2_daily, PV2_total)
SendtoFirebasePV4( firebase.database(), date, PV4_power, PV4_daily, PV4_total)
#====================================================================================
#Main program
def main():
repeatEveryHourly()
return
if __name__ == "__main__":
main()
|
agpl-3.0
| -4,950,104,520,752,690,000
| 28.224719
| 101
| 0.614882
| false
| 2.733053
| false
| false
| false
|
seleniumbase/SeleniumBase
|
seleniumbase/translate/chinese.py
|
1
|
22162
|
# Chinese / 中文 - Translations - Python 3 Only!
from seleniumbase import BaseCase
from seleniumbase import MasterQA
class 硒测试用例(BaseCase): # noqa
def __init__(self, *args, **kwargs):
super(硒测试用例, self).__init__(*args, **kwargs)
self._language = "Chinese"
def 开启(self, *args, **kwargs):
# open(url)
return self.open(*args, **kwargs)
def 开启网址(self, *args, **kwargs):
# open_url(url)
return self.open_url(*args, **kwargs)
def 单击(self, *args, **kwargs):
# click(selector)
return self.click(*args, **kwargs)
def 双击(self, *args, **kwargs):
# double_click(selector)
return self.double_click(*args, **kwargs)
def 慢单击(self, *args, **kwargs):
# slow_click(selector)
return self.slow_click(*args, **kwargs)
def 如果可见请单击(self, *args, **kwargs):
# click_if_visible(selector, by=By.CSS_SELECTOR)
return self.click_if_visible(*args, **kwargs)
def 单击链接文本(self, *args, **kwargs):
# click_link_text(link_text)
return self.click_link_text(*args, **kwargs)
def 更新文本(self, *args, **kwargs):
# update_text(selector, text)
return self.update_text(*args, **kwargs)
def 输入文本(self, *args, **kwargs):
# type(selector, text) # Same as update_text()
return self.type(*args, **kwargs)
def 添加文本(self, *args, **kwargs):
# add_text(selector, text)
return self.add_text(*args, **kwargs)
def 获取文本(self, *args, **kwargs):
# get_text(selector, text)
return self.get_text(*args, **kwargs)
def 断言文本(self, *args, **kwargs):
# assert_text(text, selector)
return self.assert_text(*args, **kwargs)
def 确切断言文本(self, *args, **kwargs):
# assert_exact_text(text, selector)
return self.assert_exact_text(*args, **kwargs)
def 断言链接文本(self, *args, **kwargs):
# assert_link_text(link_text)
return self.assert_link_text(*args, **kwargs)
def 断言元素(self, *args, **kwargs):
# assert_element(selector)
return self.assert_element(*args, **kwargs)
def 断言元素可见(self, *args, **kwargs):
# assert_element_visible(selector) # Same as self.assert_element()
return self.assert_element_visible(*args, **kwargs)
def 断言元素不可见(self, *args, **kwargs):
# assert_element_not_visible(selector)
return self.assert_element_not_visible(*args, **kwargs)
def 断言元素存在(self, *args, **kwargs):
# assert_element_present(selector)
return self.assert_element_present(*args, **kwargs)
def 断言元素不存在(self, *args, **kwargs):
# assert_element_absent(selector)
return self.assert_element_absent(*args, **kwargs)
def 断言属性(self, *args, **kwargs):
# assert_attribute(selector, attribute, value)
return self.assert_attribute(*args, **kwargs)
def 断言标题(self, *args, **kwargs):
# assert_title(title)
return self.assert_title(*args, **kwargs)
def 获取标题(self, *args, **kwargs):
# get_title()
return self.get_title(*args, **kwargs)
def 断言为真(self, *args, **kwargs):
# assert_true(expr)
return self.assert_true(*args, **kwargs)
def 断言为假(self, *args, **kwargs):
# assert_false(expr)
return self.assert_false(*args, **kwargs)
def 断言等于(self, *args, **kwargs):
# assert_equal(first, second)
return self.assert_equal(*args, **kwargs)
def 断言不等于(self, *args, **kwargs):
# assert_not_equal(first, second)
return self.assert_not_equal(*args, **kwargs)
def 刷新页面(self, *args, **kwargs):
# refresh_page()
return self.refresh_page(*args, **kwargs)
def 获取当前网址(self, *args, **kwargs):
# get_current_url()
return self.get_current_url(*args, **kwargs)
def 获取页面源代码(self, *args, **kwargs):
# get_page_source()
return self.get_page_source(*args, **kwargs)
def 回去(self, *args, **kwargs):
# go_back()
return self.go_back(*args, **kwargs)
def 向前(self, *args, **kwargs):
# go_forward()
return self.go_forward(*args, **kwargs)
def 文本是否显示(self, *args, **kwargs):
# is_text_visible(text, selector="html")
return self.is_text_visible(*args, **kwargs)
def 元素是否可见(self, *args, **kwargs):
# is_element_visible(selector)
return self.is_element_visible(*args, **kwargs)
def 元素是否启用(self, *args, **kwargs):
# is_element_enabled(selector)
return self.is_element_enabled(*args, **kwargs)
def 元素是否存在(self, *args, **kwargs):
# is_element_present(selector)
return self.is_element_present(*args, **kwargs)
def 等待文本(self, *args, **kwargs):
# wait_for_text(text, selector="html")
return self.wait_for_text(*args, **kwargs)
def 等待元素(self, *args, **kwargs):
# wait_for_element(selector)
return self.wait_for_element(*args, **kwargs)
def 等待元素可见(self, *args, **kwargs):
# wait_for_element_visible(selector) # Same as wait_for_element()
return self.wait_for_element_visible(*args, **kwargs)
def 等待元素不可见(self, *args, **kwargs):
# wait_for_element_not_visible(selector)
return self.wait_for_element_not_visible(*args, **kwargs)
def 等待元素存在(self, *args, **kwargs):
# wait_for_element_present(selector)
return self.wait_for_element_present(*args, **kwargs)
def 等待元素不存在(self, *args, **kwargs):
# wait_for_element_absent(selector)
return self.wait_for_element_absent(*args, **kwargs)
def 等待属性(self, *args, **kwargs):
# wait_for_attribute(selector, attribute, value)
return self.wait_for_attribute(*args, **kwargs)
def 睡(self, *args, **kwargs):
# sleep(seconds)
return self.sleep(*args, **kwargs)
def 等待(self, *args, **kwargs):
# wait(seconds) # Same as sleep(seconds)
return self.wait(*args, **kwargs)
def 提交(self, *args, **kwargs):
# submit(selector)
return self.submit(*args, **kwargs)
def 清除(self, *args, **kwargs):
# clear(selector)
return self.clear(*args, **kwargs)
def 专注于(self, *args, **kwargs):
# focus(selector)
return self.focus(*args, **kwargs)
def JS单击(self, *args, **kwargs):
# js_click(selector)
return self.js_click(*args, **kwargs)
def JS更新文本(self, *args, **kwargs):
# js_update_text(selector, text)
return self.js_update_text(*args, **kwargs)
def JS输入文本(self, *args, **kwargs):
# js_type(selector, text)
return self.js_type(*args, **kwargs)
def 检查HTML(self, *args, **kwargs):
# inspect_html()
return self.inspect_html(*args, **kwargs)
def 保存截图(self, *args, **kwargs):
# save_screenshot(name)
return self.save_screenshot(*args, **kwargs)
def 保存截图到日志(self, *args, **kwargs):
# save_screenshot_to_logs(name)
return self.save_screenshot_to_logs(*args, **kwargs)
def 选择文件(self, *args, **kwargs):
# choose_file(selector, file_path)
return self.choose_file(*args, **kwargs)
def 执行脚本(self, *args, **kwargs):
# execute_script(script)
return self.execute_script(*args, **kwargs)
def 安全执行脚本(self, *args, **kwargs):
# safe_execute_script(script)
return self.safe_execute_script(*args, **kwargs)
def 加载JQUERY(self, *args, **kwargs):
# activate_jquery()
return self.activate_jquery(*args, **kwargs)
def 阻止广告(self, *args, **kwargs):
# ad_block()
return self.ad_block(*args, **kwargs)
def 跳过(self, *args, **kwargs):
# skip(reason="")
return self.skip(*args, **kwargs)
def 检查断开的链接(self, *args, **kwargs):
# assert_no_404_errors()
return self.assert_no_404_errors(*args, **kwargs)
def 检查JS错误(self, *args, **kwargs):
# assert_no_js_errors()
return self.assert_no_js_errors(*args, **kwargs)
def 切换到帧(self, *args, **kwargs):
# switch_to_frame(frame)
return self.switch_to_frame(*args, **kwargs)
def 切换到默认内容(self, *args, **kwargs):
# switch_to_default_content()
return self.switch_to_default_content(*args, **kwargs)
def 打开新窗口(self, *args, **kwargs):
# open_new_window()
return self.open_new_window(*args, **kwargs)
def 切换到窗口(self, *args, **kwargs):
# switch_to_window(window)
return self.switch_to_window(*args, **kwargs)
def 切换到默认窗口(self, *args, **kwargs):
# switch_to_default_window()
return self.switch_to_default_window(*args, **kwargs)
def 切换到最新的窗口(self, *args, **kwargs):
# switch_to_newest_window()
return self.switch_to_newest_window(*args, **kwargs)
def 最大化窗口(self, *args, **kwargs):
# maximize_window()
return self.maximize_window(*args, **kwargs)
def 亮点(self, *args, **kwargs):
# highlight(selector)
return self.highlight(*args, **kwargs)
def 亮点单击(self, *args, **kwargs):
# highlight_click(selector)
return self.highlight_click(*args, **kwargs)
def 滚动到(self, *args, **kwargs):
# scroll_to(selector)
return self.scroll_to(*args, **kwargs)
def 滚动到顶部(self, *args, **kwargs):
# scroll_to_top()
return self.scroll_to_top(*args, **kwargs)
def 滚动到底部(self, *args, **kwargs):
# scroll_to_bottom()
return self.scroll_to_bottom(*args, **kwargs)
def 悬停并单击(self, *args, **kwargs):
# hover_and_click(hover_selector, click_selector)
return self.hover_and_click(*args, **kwargs)
def 是否被选中(self, *args, **kwargs):
# is_selected(selector)
return self.is_selected(*args, **kwargs)
def 按向上箭头(self, *args, **kwargs):
# press_up_arrow(selector="html", times=1)
return self.press_up_arrow(*args, **kwargs)
def 按向下箭头(self, *args, **kwargs):
# press_down_arrow(selector="html", times=1)
return self.press_down_arrow(*args, **kwargs)
def 按向左箭头(self, *args, **kwargs):
# press_left_arrow(selector="html", times=1)
return self.press_left_arrow(*args, **kwargs)
def 按向右箭头(self, *args, **kwargs):
# press_right_arrow(selector="html", times=1)
return self.press_right_arrow(*args, **kwargs)
def 单击可见元素(self, *args, **kwargs):
# click_visible_elements(selector)
return self.click_visible_elements(*args, **kwargs)
def 按文本选择选项(self, *args, **kwargs):
# select_option_by_text(dropdown_selector, option)
return self.select_option_by_text(*args, **kwargs)
def 按索引选择选项(self, *args, **kwargs):
# select_option_by_index(dropdown_selector, option)
return self.select_option_by_index(*args, **kwargs)
def 按值选择选项(self, *args, **kwargs):
# select_option_by_value(dropdown_selector, option)
return self.select_option_by_value(*args, **kwargs)
def 创建演示文稿(self, *args, **kwargs):
# create_presentation(name=None, theme="default", transition="default")
return self.create_presentation(*args, **kwargs)
def 添加幻灯片(self, *args, **kwargs):
# add_slide(content=None, image=None, code=None, iframe=None,
# content2=None, notes=None, transition=None, name=None)
return self.add_slide(*args, **kwargs)
def 保存演示文稿(self, *args, **kwargs):
# save_presentation(name=None, filename=None,
# show_notes=False, interval=0)
return self.save_presentation(*args, **kwargs)
def 开始演示文稿(self, *args, **kwargs):
# begin_presentation(name=None, filename=None,
# show_notes=False, interval=0)
return self.begin_presentation(*args, **kwargs)
def 创建饼图(self, *args, **kwargs):
# create_pie_chart(chart_name=None, title=None, subtitle=None,
# data_name=None, unit=None, libs=True)
return self.create_pie_chart(*args, **kwargs)
def 创建条形图(self, *args, **kwargs):
# create_bar_chart(chart_name=None, title=None, subtitle=None,
# data_name=None, unit=None, libs=True)
return self.create_bar_chart(*args, **kwargs)
def 创建柱形图(self, *args, **kwargs):
# create_column_chart(chart_name=None, title=None, subtitle=None,
# data_name=None, unit=None, libs=True)
return self.create_column_chart(*args, **kwargs)
def 创建折线图(self, *args, **kwargs):
# create_line_chart(chart_name=None, title=None, subtitle=None,
# data_name=None, unit=None, zero=False, libs=True)
return self.create_line_chart(*args, **kwargs)
def 创建面积图(self, *args, **kwargs):
# create_area_chart(chart_name=None, title=None, subtitle=None,
# data_name=None, unit=None, zero=False, libs=True)
return self.create_area_chart(*args, **kwargs)
def 将系列添加到图表(self, *args, **kwargs):
# add_series_to_chart(data_name=None, chart_name=None)
return self.add_series_to_chart(*args, **kwargs)
def 添加数据点(self, *args, **kwargs):
# add_data_point(label, value, color=None, chart_name=None)
return self.add_data_point(*args, **kwargs)
def 保存图表(self, *args, **kwargs):
# save_chart(chart_name=None, filename=None)
return self.save_chart(*args, **kwargs)
def 显示图表(self, *args, **kwargs):
# display_chart(chart_name=None, filename=None, interval=0)
return self.display_chart(*args, **kwargs)
def 提取图表(self, *args, **kwargs):
# extract_chart(chart_name=None)
return self.extract_chart(*args, **kwargs)
def 创建游览(self, *args, **kwargs):
# create_tour(name=None, theme=None)
return self.create_tour(*args, **kwargs)
def 创建SHEPHERD游览(self, *args, **kwargs):
# create_shepherd_tour(name=None, theme=None)
return self.create_shepherd_tour(*args, **kwargs)
def 创建BOOTSTRAP游览(self, *args, **kwargs):
# create_bootstrap_tour(name=None, theme=None)
return self.create_bootstrap_tour(*args, **kwargs)
def 创建DRIVERJS游览(self, *args, **kwargs):
# create_driverjs_tour(name=None, theme=None)
return self.create_driverjs_tour(*args, **kwargs)
def 创建HOPSCOTCH游览(self, *args, **kwargs):
# create_hopscotch_tour(name=None, theme=None)
return self.create_hopscotch_tour(*args, **kwargs)
def 创建INTROJS游览(self, *args, **kwargs):
# create_introjs_tour(name=None, theme=None)
return self.create_introjs_tour(*args, **kwargs)
def 添加游览步骤(self, *args, **kwargs):
# add_tour_step(message, selector=None, name=None,
# title=None, theme=None, alignment=None)
return self.add_tour_step(*args, **kwargs)
def 播放游览(self, *args, **kwargs):
# play_tour(name=None)
return self.play_tour(*args, **kwargs)
def 导出游览(self, *args, **kwargs):
# export_tour(name=None, filename="my_tour.js", url=None)
return self.export_tour(*args, **kwargs)
def 获取PDF文本(self, *args, **kwargs):
# get_pdf_text(pdf, page=None, maxpages=None, password=None,
# codec='utf-8', wrap=False, nav=False, override=False)
return self.get_pdf_text(*args, **kwargs)
def 断言PDF文本(self, *args, **kwargs):
# assert_pdf_text(pdf, text, page=None, maxpages=None, password=None,
# codec='utf-8', wrap=True, nav=False, override=False)
return self.assert_pdf_text(*args, **kwargs)
def 下载文件(self, *args, **kwargs):
# download_file(file)
return self.download_file(*args, **kwargs)
def 下载的文件是否存在(self, *args, **kwargs):
# is_downloaded_file_present(file)
return self.is_downloaded_file_present(*args, **kwargs)
def 获取下载的文件路径(self, *args, **kwargs):
# get_path_of_downloaded_file(file)
return self.get_path_of_downloaded_file(*args, **kwargs)
def 检查下载的文件(self, *args, **kwargs):
# assert_downloaded_file(file)
return self.assert_downloaded_file(*args, **kwargs)
def 删除下载的文件(self, *args, **kwargs):
# delete_downloaded_file(file)
return self.delete_downloaded_file(*args, **kwargs)
def 失败(self, *args, **kwargs):
# fail(msg=None) # Inherited from "unittest"
return self.fail(*args, **kwargs)
def 获取(self, *args, **kwargs):
# get(url) # Same as open(url)
return self.get(*args, **kwargs)
def 访问(self, *args, **kwargs):
# visit(url) # Same as open(url)
return self.visit(*args, **kwargs)
def 访问网址(self, *args, **kwargs):
# visit_url(url) # Same as open(url)
return self.visit_url(*args, **kwargs)
def 获取元素(self, *args, **kwargs):
# get_element(selector) # Element can be hidden
return self.get_element(*args, **kwargs)
def 查找元素(self, *args, **kwargs):
# find_element(selector) # Element must be visible
return self.find_element(*args, **kwargs)
def 删除第一个元素(self, *args, **kwargs):
# remove_element(selector)
return self.remove_element(*args, **kwargs)
def 删除所有元素(self, *args, **kwargs):
# remove_elements(selector)
return self.remove_elements(*args, **kwargs)
def 查找文本(self, *args, **kwargs):
# find_text(text, selector="html") # Same as wait_for_text
return self.find_text(*args, **kwargs)
def 设置文本(self, *args, **kwargs):
# set_text(selector, text)
return self.set_text(*args, **kwargs)
def 获取属性(self, *args, **kwargs):
# get_attribute(selector, attribute)
return self.get_attribute(*args, **kwargs)
def 设置属性(self, *args, **kwargs):
# set_attribute(selector, attribute, value)
return self.set_attribute(*args, **kwargs)
def 设置所有属性(self, *args, **kwargs):
# set_attributes(selector, attribute, value)
return self.set_attributes(*args, **kwargs)
def 写文本(self, *args, **kwargs):
# write(selector, text) # Same as update_text()
return self.write(*args, **kwargs)
def 设置消息主题(self, *args, **kwargs):
# set_messenger_theme(theme="default", location="default")
return self.set_messenger_theme(*args, **kwargs)
def 显示讯息(self, *args, **kwargs):
# post_message(message, duration=None, pause=True, style="info")
return self.post_message(*args, **kwargs)
def 打印(self, *args, **kwargs):
# _print(msg) # Same as Python print()
return self._print(*args, **kwargs)
def 推迟断言元素(self, *args, **kwargs):
# deferred_assert_element(selector)
return self.deferred_assert_element(*args, **kwargs)
def 推迟断言文本(self, *args, **kwargs):
# deferred_assert_text(text, selector="html")
return self.deferred_assert_text(*args, **kwargs)
def 处理推迟断言(self, *args, **kwargs):
# process_deferred_asserts(print_only=False)
return self.process_deferred_asserts(*args, **kwargs)
def 接受警报(self, *args, **kwargs):
# accept_alert(timeout=None)
return self.accept_alert(*args, **kwargs)
def 解除警报(self, *args, **kwargs):
# dismiss_alert(timeout=None)
return self.dismiss_alert(*args, **kwargs)
def 切换到警报(self, *args, **kwargs):
# switch_to_alert(timeout=None)
return self.switch_to_alert(*args, **kwargs)
def 拖放(self, *args, **kwargs):
# drag_and_drop(drag_selector, drop_selector)
return self.drag_and_drop(*args, **kwargs)
def 设置HTML(self, *args, **kwargs):
# set_content(html_string, new_page=False)
return self.set_content(*args, **kwargs)
def 加载HTML文件(self, *args, **kwargs):
# load_html_file(html_file, new_page=True)
return self.load_html_file(*args, **kwargs)
def 打开HTML文件(self, *args, **kwargs):
# open_html_file(html_file)
return self.open_html_file(*args, **kwargs)
def 删除所有COOKIE(self, *args, **kwargs):
# delete_all_cookies()
return self.delete_all_cookies(*args, **kwargs)
def 获取用户代理(self, *args, **kwargs):
# get_user_agent()
return self.get_user_agent(*args, **kwargs)
def 获取语言代码(self, *args, **kwargs):
# get_locale_code()
return self.get_locale_code(*args, **kwargs)
class MasterQA_中文(MasterQA, 硒测试用例):
def 校验(self, *args, **kwargs):
# "Manual Check"
self.DEFAULT_VALIDATION_TITLE = "手动检查"
# "Does the page look good?"
self.DEFAULT_VALIDATION_MESSAGE = "页面是否看起来不错?"
# verify(QUESTION)
return self.verify(*args, **kwargs)
|
mit
| 8,168,400,214,733,496,000
| 33.495017
| 79
| 0.597419
| false
| 2.927675
| false
| false
| false
|
bolkedebruin/airflow
|
tests/providers/microsoft/azure/operators/test_azure_container_instances.py
|
1
|
9083
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import unittest
from collections import namedtuple
import mock
from azure.mgmt.containerinstance.models import ContainerState, Event
from airflow.exceptions import AirflowException
from airflow.providers.microsoft.azure.operators.azure_container_instances import (
AzureContainerInstancesOperator,
)
def make_mock_cg(container_state, events=None):
"""
Make a mock Container Group as the underlying azure Models have read-only attributes
See https://docs.microsoft.com/en-us/rest/api/container-instances/containergroups
"""
events = events or []
instance_view_dict = {"current_state": container_state,
"events": events}
instance_view = namedtuple("InstanceView",
instance_view_dict.keys())(*instance_view_dict.values())
container_dict = {"instance_view": instance_view}
container = namedtuple("Container", container_dict.keys())(*container_dict.values())
container_g_dict = {"containers": [container]}
container_g = namedtuple("ContainerGroup",
container_g_dict.keys())(*container_g_dict.values())
return container_g
class TestACIOperator(unittest.TestCase):
@mock.patch("airflow.providers.microsoft.azure.operators."
"azure_container_instances.AzureContainerInstanceHook")
def test_execute(self, aci_mock):
expected_c_state = ContainerState(state='Terminated', exit_code=0, detail_status='test')
expected_cg = make_mock_cg(expected_c_state)
aci_mock.return_value.get_state.return_value = expected_cg
aci_mock.return_value.exists.return_value = False
aci = AzureContainerInstancesOperator(ci_conn_id=None,
registry_conn_id=None,
resource_group='resource-group',
name='container-name',
image='container-image',
region='region',
task_id='task')
aci.execute(None)
self.assertEqual(aci_mock.return_value.create_or_update.call_count, 1)
(called_rg, called_cn, called_cg), _ = \
aci_mock.return_value.create_or_update.call_args
self.assertEqual(called_rg, 'resource-group')
self.assertEqual(called_cn, 'container-name')
self.assertEqual(called_cg.location, 'region')
self.assertEqual(called_cg.image_registry_credentials, None)
self.assertEqual(called_cg.restart_policy, 'Never')
self.assertEqual(called_cg.os_type, 'Linux')
called_cg_container = called_cg.containers[0]
self.assertEqual(called_cg_container.name, 'container-name')
self.assertEqual(called_cg_container.image, 'container-image')
self.assertEqual(aci_mock.return_value.delete.call_count, 1)
@mock.patch("airflow.providers.microsoft.azure.operators."
"azure_container_instances.AzureContainerInstanceHook")
def test_execute_with_failures(self, aci_mock):
expected_c_state = ContainerState(state='Terminated', exit_code=1, detail_status='test')
expected_cg = make_mock_cg(expected_c_state)
aci_mock.return_value.get_state.return_value = expected_cg
aci_mock.return_value.exists.return_value = False
aci = AzureContainerInstancesOperator(ci_conn_id=None,
registry_conn_id=None,
resource_group='resource-group',
name='container-name',
image='container-image',
region='region',
task_id='task')
with self.assertRaises(AirflowException):
aci.execute(None)
self.assertEqual(aci_mock.return_value.delete.call_count, 1)
@mock.patch("airflow.providers.microsoft.azure.operators."
"azure_container_instances.AzureContainerInstanceHook")
def test_execute_with_tags(self, aci_mock):
expected_c_state = ContainerState(state='Terminated', exit_code=0, detail_status='test')
expected_cg = make_mock_cg(expected_c_state)
tags = {"testKey": "testValue"}
aci_mock.return_value.get_state.return_value = expected_cg
aci_mock.return_value.exists.return_value = False
aci = AzureContainerInstancesOperator(ci_conn_id=None,
registry_conn_id=None,
resource_group='resource-group',
name='container-name',
image='container-image',
region='region',
task_id='task',
tags=tags)
aci.execute(None)
self.assertEqual(aci_mock.return_value.create_or_update.call_count, 1)
(called_rg, called_cn, called_cg), _ = \
aci_mock.return_value.create_or_update.call_args
self.assertEqual(called_rg, 'resource-group')
self.assertEqual(called_cn, 'container-name')
self.assertEqual(called_cg.location, 'region')
self.assertEqual(called_cg.image_registry_credentials, None)
self.assertEqual(called_cg.restart_policy, 'Never')
self.assertEqual(called_cg.os_type, 'Linux')
self.assertEqual(called_cg.tags, tags)
called_cg_container = called_cg.containers[0]
self.assertEqual(called_cg_container.name, 'container-name')
self.assertEqual(called_cg_container.image, 'container-image')
self.assertEqual(aci_mock.return_value.delete.call_count, 1)
@mock.patch("airflow.providers.microsoft.azure.operators."
"azure_container_instances.AzureContainerInstanceHook")
def test_execute_with_messages_logs(self, aci_mock):
events = [Event(message="test"), Event(message="messages")]
expected_c_state1 = ContainerState(state='Running', exit_code=0, detail_status='test')
expected_cg1 = make_mock_cg(expected_c_state1, events)
expected_c_state2 = ContainerState(state='Terminated', exit_code=0, detail_status='test')
expected_cg2 = make_mock_cg(expected_c_state2, events)
aci_mock.return_value.get_state.side_effect = [expected_cg1,
expected_cg2]
aci_mock.return_value.get_logs.return_value = ["test", "logs"]
aci_mock.return_value.exists.return_value = False
aci = AzureContainerInstancesOperator(ci_conn_id=None,
registry_conn_id=None,
resource_group='resource-group',
name='container-name',
image='container-image',
region='region',
task_id='task')
aci.execute(None)
self.assertEqual(aci_mock.return_value.create_or_update.call_count, 1)
self.assertEqual(aci_mock.return_value.get_state.call_count, 2)
self.assertEqual(aci_mock.return_value.get_logs.call_count, 2)
self.assertEqual(aci_mock.return_value.delete.call_count, 1)
def test_name_checker(self):
valid_names = ['test-dash', 'name-with-length---63' * 3]
invalid_names = ['test_underscore',
'name-with-length---84' * 4,
'name-ending-with-dash-',
'-name-starting-with-dash']
for name in invalid_names:
with self.assertRaises(AirflowException):
AzureContainerInstancesOperator._check_name(name)
for name in valid_names:
checked_name = AzureContainerInstancesOperator._check_name(name)
self.assertEqual(checked_name, name)
if __name__ == '__main__':
unittest.main()
|
apache-2.0
| 2,417,109,348,444,527,600
| 45.106599
| 97
| 0.594187
| false
| 4.345933
| true
| false
| false
|
alfred82santa/telebot
|
aiotelebot/formatters.py
|
1
|
2590
|
from json import dumps
from aiohttp.hdrs import CONTENT_TYPE
from aiohttp.multipart import MultipartWriter
from aiohttp.payload import get_payload
from multidict import CIMultiDict
from dirty_models.fields import ArrayField, ModelField
from dirty_models.models import BaseModel
from dirty_models.utils import ModelFormatterIter, JSONEncoder, ListFormatterIter
from service_client.json import json_decoder
from .messages import FileModel, Response
class ContainsFileError(Exception):
pass
class TelegramModelFormatterIter(ModelFormatterIter):
def format_field(self, field, value):
if isinstance(field, ModelField):
if isinstance(value, FileModel):
return value
return dumps(value, cls=JSONEncoder)
elif isinstance(field, ArrayField):
return dumps(ListFormatterIter(obj=value,
field=value.get_field_type(),
parent_formatter=ModelFormatterIter(model=self.model)),
cls=JSONEncoder)
return super(TelegramModelFormatterIter, self).format_field(field, value)
class TelegramJsonEncoder(JSONEncoder):
def default(self, obj):
if isinstance(obj, FileModel):
raise ContainsFileError()
elif isinstance(obj, BaseModel):
obj = TelegramModelFormatterIter(obj)
return super(TelegramJsonEncoder, self).default(obj)
def telegram_encoder(content, *args, **kwargs):
try:
return dumps(content, cls=TelegramJsonEncoder)
except ContainsFileError:
pass
formatter = TelegramModelFormatterIter(content)
kwargs['endpoint_desc']['stream_request'] = True
mp = MultipartWriter('form-data')
for field, value in formatter:
content_dispositon = {'name': field}
if isinstance(value, FileModel):
part = get_payload(value.stream, headers=CIMultiDict())
if value.name:
content_dispositon['filename'] = value.name
if value.mime_type:
part.headers[CONTENT_TYPE] = value.mime_type
else:
part = get_payload(str(value), headers=CIMultiDict())
part.set_content_disposition("form-data", **content_dispositon)
mp.append_payload(part)
try:
kwargs['request_params']['headers'].update(mp.headers)
except KeyError:
kwargs['request_params']['headers'] = mp.headers
return mp
def telegram_decoder(content, *args, **kwargs):
return Response(json_decoder(content, *args, **kwargs))
|
lgpl-3.0
| 2,767,400,065,749,186,000
| 30.975309
| 98
| 0.661004
| false
| 4.345638
| false
| false
| false
|
ActiveState/code
|
recipes/Python/578102_Nautilus_script_push_files_S3/recipe-578102.py
|
1
|
1426
|
#!/usr/bin/env python
import mimetypes
import os
import sys
import boto
from boto.s3.connection import S3Connection
from boto.s3.key import Key
def get_s3_conn():
return S3Connection()
def get_bucket(conn, name):
return conn.get_bucket(name)
og = os.environ.get
bucket_name = og('NAUTILUS_BUCKET_NAME', 'media.foo.com')
bucket_prefix = og('NAUTILUS_BUCKET_PREFIX', 'scrapspace/files')
conn = get_s3_conn()
bucket = get_bucket(conn, bucket_name)
def get_ctype(f):
return mimetypes.guess_type(f)[0] or "application/x-octet-stream"
def put_file(filename, keyname):
new_key = Key(bucket)
new_key.key = keyname
new_key.set_metadata('Content-Type', get_ctype(filename))
new_key.set_contents_from_filename(filename)
if __name__ == '__main__':
for name in sys.argv[1:]:
full = os.path.abspath(name)
if os.path.isdir(name):
parent_dir = os.path.dirname(full)
for base, directories, files in os.walk(full):
for filename in files:
full_path = os.path.join(base, filename)
rel_path = os.path.relpath(full_path, parent_dir)
keyname = os.path.join(bucket_prefix, rel_path)
put_file(full_path, keyname)
else:
filename = os.path.basename(name)
keyname = os.path.join(bucket_prefix, filename)
put_file(filename, keyname)
|
mit
| -6,771,188,464,086,638,000
| 28.708333
| 69
| 0.626928
| false
| 3.293303
| false
| false
| false
|
wmvanvliet/psychic
|
psychic/scalpplot.py
|
1
|
3391
|
import numpy as np
import matplotlib.pyplot as plt
from scipy import interpolate
from matplotlib.path import Path
from matplotlib.patches import PathPatch, Circle
from . import positions
def plot_scalp(densities, sensors, sensor_locs=None,
plot_sensors=True, plot_contour=True, cmap=None, clim=None, smark='k.', linewidth=2, fontsize=8):
if sensor_locs is None:
sensor_locs = positions.POS_10_5
if cmap is None:
cmap = plt.get_cmap('RdBu_r')
# add densities
if clim is None:
cmax = np.max(np.abs(densities))
clim = [-cmax, cmax]
locs = [positions.project_scalp(*sensor_locs[lab]) for lab in sensors]
add_density(densities, locs, cmap=cmap, clim=clim, plot_contour=plot_contour)
# setup plot
MARGIN = 1.2
plt.xlim(-MARGIN, MARGIN)
plt.ylim(-MARGIN, MARGIN)
plt.box(False)
ax = plt.gca()
ax.set_aspect(1.2)
ax.yaxis.set_ticks([],[])
ax.xaxis.set_ticks([],[])
# add details
add_head(linewidth)
if plot_sensors:
add_sensors(sensors, locs, smark, fontsize)
def add_head(linewidth=2):
'''Draw head outline'''
nose = [(Path.MOVETO, (-.1, 1.)), (Path.LINETO, (0, 1.1)),
(Path.LINETO, (.1, 1.))]
lear = [(Path.MOVETO, (-1, .134)), (Path.LINETO, (-1.04, 0.08)),
(Path.LINETO, (-1.08, -0.11)), (Path.LINETO, (-1.06, -0.16)),
(Path.LINETO, (-1.02, -0.15)), (Path.LINETO, (-1, -0.12))]
rear = [(c, (-px, py)) for (c, (px, py)) in lear]
# plot outline
ax = plt.gca()
ax.add_artist(plt.Circle((0, 0), 1, fill=False, linewidth=linewidth))
# add nose and ears
for p in [nose, lear, rear]:
code, verts = list(zip(*p))
ax.add_patch(PathPatch(Path(verts, code), fill=False, linewidth=linewidth))
def add_sensors(labels, locs, smark='k.', fontsize=8):
'''Adds sensor names and markers'''
for (label, (x, y)) in zip(labels, locs):
if len(labels) <= 16:
plt.text(x, y + .03, label, fontsize=fontsize, ha='center')
plt.plot(x, y, smark, ms=2.)
def add_density(dens, locs, cmap=plt.cm.jet, clim=None, plot_contour=True):
'''
This function draws the densities using the locations provided in
sensor_dict. The two are connected throught the list labels. The densities
are inter/extrapolated on a grid slightly bigger than the head using
scipy.interpolate.rbf. The grid is drawn using the colors provided in cmap
and clim inside a circle. Contours are drawn on top of this grid.
'''
RESOLUTION = 50
RADIUS = 1.2
xs, ys = list(zip(*locs))
extent = [-1.2, 1.2, -1.2, 1.2]
vmin, vmax = clim
# interpolate
# TODO: replace with Gaussian process interpolator. ids don't trust SciPy's
# interpolation functions (they wiggle and they segfault).
rbf = interpolate.Rbf(xs, ys, dens, function='linear')
xg = np.linspace(extent[0], extent[1], RESOLUTION)
yg = np.linspace(extent[2], extent[3], RESOLUTION)
xg, yg = np.meshgrid(xg, yg)
zg = rbf(xg, yg)
# draw contour
if plot_contour:
plt.contour(xg, yg, np.where(xg ** 2 + yg ** 2 <= RADIUS ** 2, zg, np.nan),
np.linspace(vmin, vmax, 13), colors='k', extent=extent, linewidths=.3)
# draw grid, needs to be last to enable plt.colormap() to work
im = plt.imshow(zg, origin='lower', extent=extent, vmin=vmin, vmax=vmax,
cmap=cmap)
# clip grid to circle
patch = Circle((0, 0), radius=RADIUS, facecolor='none', edgecolor='none')
plt.gca().add_patch(patch)
im.set_clip_path(patch)
|
bsd-3-clause
| 607,890,273,072,048,300
| 32.245098
| 99
| 0.654969
| false
| 2.861603
| false
| false
| false
|
ShaguptaS/python
|
bigml/tests/create_evaluation_steps.py
|
1
|
3119
|
# -*- coding: utf-8 -*-
#!/usr/bin/env python
#
# Copyright 2012, 2015 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
import json
from datetime import datetime, timedelta
from world import world
from bigml.api import HTTP_CREATED
from bigml.api import FINISHED
from bigml.api import FAULTY
from bigml.api import get_status
from read_evaluation_steps import i_get_the_evaluation
#@step(r'I create an evaluation for the model with the dataset$')
def i_create_an_evaluation(step):
dataset = world.dataset.get('resource')
model = world.model.get('resource')
resource = world.api.create_evaluation(model, dataset)
world.status = resource['code']
assert world.status == HTTP_CREATED
world.location = resource['location']
world.evaluation = resource['object']
world.evaluations.append(resource['resource'])
#@step(r'I create an evaluation for the ensemble with the dataset$')
def i_create_an_evaluation_ensemble(step):
dataset = world.dataset.get('resource')
ensemble = world.ensemble.get('resource')
resource = world.api.create_evaluation(ensemble, dataset)
world.status = resource['code']
assert world.status == HTTP_CREATED
world.location = resource['location']
world.evaluation = resource['object']
world.evaluations.append(resource['resource'])
#@step(r'I wait until the evaluation status code is either (\d) or (-\d) less than (\d+)')
def wait_until_evaluation_status_code_is(step, code1, code2, secs):
start = datetime.utcnow()
i_get_the_evaluation(step, world.evaluation['resource'])
status = get_status(world.evaluation)
while (status['code'] != int(code1) and
status['code'] != int(code2)):
time.sleep(3)
assert datetime.utcnow() - start < timedelta(seconds=int(secs))
i_get_the_evaluation(step, world.evaluation['resource'])
status = get_status(world.evaluation)
assert status['code'] == int(code1)
#@step(r'I wait until the evaluation is ready less than (\d+)')
def the_evaluation_is_finished_in_less_than(step, secs):
wait_until_evaluation_status_code_is(step, FINISHED, FAULTY, secs)
#@step(r'the measured "(.*)" is (\d+\.*\d*)')
def the_measured_measure_is_value(step, measure, value):
ev = world.evaluation['result']['model'][measure] + 0.0
assert ev == float(value), "The %s is: %s and %s is expected" % (
measure, ev, float(value))
#@step(r'the measured "(.*)" is greater than (\d+\.*\d*)')
def the_measured_measure_is_greater_value(step, measure, value):
assert world.evaluation['result']['model'][measure] + 0.0 > float(value)
|
apache-2.0
| 8,521,414,331,171,372,000
| 38.987179
| 90
| 0.702469
| false
| 3.60578
| false
| false
| false
|
miquelo/exectask
|
packages/exectask/context.py
|
1
|
4804
|
#
# This file is part of EXECTASK.
#
# EXECTASK is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# EXECTASK is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EXECTASK. If not, see <http://www.gnu.org/licenses/>.
#
from exectask.expression import *
from exectask.merge import *
import json
import sys
class ExecuteTaskContext:
class NonePrinterFactory:
def printer(self, out):
return NonePrinter()
class NonePrinter:
def print(self, text, level=0, color=None, style=None):
pass
def __init__(self, actions={}, printer_fact=NonePrinterFactory()):
self.__actions = actions
self.__printer_fact = printer_fact
self.__variables_stack = [
{}
]
self.__variables = ExpressionDict(self.__variables_stack[-1], self)
def __len__(self):
return self.__variables.__len__()
def __length_hint__(self):
return self.__variables.__length_hint__()
def __getitem__(self, key):
return self.__variables.__getitem__(key)
def __missing__(self):
self.__variables.__missing__()
def __setitem__(self, key, value):
self.__variables.__setitem__(key, value)
def __delitem__(self, key):
self.__variables.__delitem__(key)
def __iter__(self):
return self.__variables.__iter__()
def __reversed__(self):
return self.__variables.__reversed__()
def __contains__(self, item):
return self.__variables.__contains__(item)
def items(self):
return self.__variables.items()
def printer(self, out):
return self.__printer_fact.printer(out)
def execute_task(self, task, variables={}):
# Check parameter types
if not isinstance(task, dict):
raise TypeError('\'task\' must be a dictionary')
if not isinstance(variables, dict):
raise TypeError('\'variables\' must be a dictionary')
# Gather top variables
top_vars = self.__variables_stack[-1]
try:
task_vars = task['variables']
if not isinstance(task_vars, dict):
raise TypeError('Task \'variables\' must be a dictionary')
merge_dict(top_vars, task_vars)
except KeyError:
pass
merge_dict(top_vars, variables)
# Update variables stack
self.__variables_stack.append(top_vars)
self.__variables = ExpressionDict(self.__variables_stack[-1], self)
# Gather description and actions
task_desc = None
task_actions = []
for key, value in task.items():
if key == 'variables':
pass # Already gathered
elif key == 'description':
if not isinstance(value, str):
raise TypeError('Task \'description\' must be an string')
task_desc = expression_eval(value, self)
elif key == 'actions':
if not isinstance(value, list):
raise TypeError('Task \'actions\' must be a list')
task_actions = value
else:
raise TypeError('Unknown task field \'{}\''.format(key))
# Print task information
printer = self.__printer_fact.printer(sys.stdout)
if task_desc is not None:
printer.print('==> {}'.format(task_desc), 0, 'white', 'bright')
printer.print('Variables:', 1)
printer.print(json.dumps(top_vars, indent=4, sort_keys=True), 1)
printer.print('Actions:', 1)
printer.print(json.dumps(task_actions, indent=4, sort_keys=True), 1)
# Call task actions
for action in ExpressionList(task_actions, self):
self.call_action(action)
# Restore variables stack
self.__variables_stack.pop()
self.__variables = ExpressionDict(self.__variables_stack[-1], self)
def call_action(self, action):
# Check parameter types
if not isinstance(action, dict):
raise TypeError('\'action\' must be a dictionary')
# Gather name and parameters
name = None
parameters = {}
for key, value in action.items():
if key == 'name':
if not isinstance(value, str):
raise TypeError('Action \'name\' must be an string')
name = value
elif key == 'parameters':
if not isinstance(value, dict):
raise TypeError('Action \'parameters\' must be a '
'dictionary')
parameters = value
else:
raise TypeError('Unknown action field \'{}\''.format(key))
if name is None:
raise TypeError('Action \'name\' must be defined')
# Call action function
try:
fn = self.__actions[name]
except KeyError:
raise TypeError('Action \'{}\' was not found'.format(name))
action_locals = {
'fn': fn,
'context': self,
'parameters': parameters
}
eval('fn(context, parameters)', {}, action_locals)
|
gpl-3.0
| -5,314,101,906,513,230,000
| 27.595238
| 70
| 0.667777
| false
| 3.542773
| false
| false
| false
|
VanceKingSaxbeA/MarketsEngine
|
src/bloombergquote.py
|
1
|
1536
|
/*Owner & Copyrights: Vance King Saxbe. A.*/""" Copyright (c) <2014> Author Vance King Saxbe. A, and contributors Power Dominion Enterprise, Precieux Consulting and other contributors. Modelled, Architected and designed by Vance King Saxbe. A. with the geeks from GoldSax Consulting and GoldSax Technologies email @vsaxbe@yahoo.com. Development teams from Power Dominion Enterprise, Precieux Consulting. Project sponsored by GoldSax Foundation, GoldSax Group and executed by GoldSax Manager."""import urllib3
import string
from time import localtime, strftime
class bloombergquote:
def getquote(symbol):
url = "http://www.bloomberg.com/quote/"+symbol
http = urllib3.PoolManager()
r = http.request('GET', url)
r.release_conn()
f = r.data.decode("UTF-8")
a = f.split('span class="ticker_data">')
b = []
tstamp = strftime("%H:%M:%S", localtime())
contents = []
try:
b = a[1].split('</span>')
contents.extend(symbol.replace(':',''))
contents.extend(strftime("%Y-%m-%d"))
contents.extend(tstamp)
contents.extend(b[0])
except IndexError:
print("Index error")
return contents
/*email to provide support at vancekingsaxbe@powerdominionenterprise.com, businessaffairs@powerdominionenterprise.com, For donations please write to fundraising@powerdominionenterprise.com*/
|
mit
| -6,879,921,841,213,445,000
| 52
| 508
| 0.628255
| false
| 3.859296
| false
| false
| false
|
alekseyig/fusion
|
lib/pylibcurl/multi.py
|
1
|
2883
|
#coding=utf8
import ctypes
import lib
import const
import prototype
from pylibcurl.base import Base
### classes
class Multi(Base):
_pointer_type = ctypes.POINTER(const.CURLM)
_lib_init_func = lib.curl_multi_init
_lib_cleanup_func = lib.curl_multi_cleanup
def __init__(self, **kwargs):
self._handles = set()
self._callbacks = {}
if kwargs:
self.setopt(**kwargs)
def __setattr__(self, name, value):
try:
self.setopt(**{name: value})
except ValueError:
object.__setattr__(self, name, value)
def _clear(self):
self._handles.clear()
self._callbacks.clear()
def add_handle(self, curl):
lib.curl_multi_add_handle(self._handle, curl._handle)
self._handles.add(curl)
def remove_handle(self, curl):
lib.curl_multi_remove_handle(self._handle, curl._handle)
self._handles.remove(curl)
def assign(self, socket, callback):
raise NotImplementedError
def fdset(self):
raise NotImplementedError
def perform(self):
running_handles = ctypes.c_int()
code = lib.curl_multi_perform(self._handle, ctypes.byref(running_handles))
return code, running_handles.value
def socket_action(self, socket, event):
running_handles = ctypes.c_int()
code = lib.curl_multi_socket_action(self._handle, socket, event, ctypes.byref(running_handles))
return code, running_handles.value
def info_read(self):
"""
return tuple(msg, number_in_queue)
or
return None
"""
return lib.curl_multi_info_read(self._handle)
def setopt(self, **kwargs):
"""
c.pipelning = 1
or
c.setopt(pipelining=1)
or
c.setopt(pipelining=1, maxconnects=10)
"""
def setopt(name, value):
option_name = 'CURLMOPT_%s' % name.upper()
if name.islower() and hasattr(const, option_name):
option_value = getattr(const, option_name)
if hasattr(prototype, name):
if callable(value):
value = getattr(prototype, name)(value)
self._callbacks[name] = value
else:
self._callbacks[name] = None
lib.curl_multi_setopt(self._handle, option_value, value)
else:
raise ValueError('invalid option name "%s"' % name)
for k, v in kwargs.items():
setopt(k, v)
def strerror(self, errornum):
return lib.curl_multi_strerror(errornum)
def timeout(self):
time_out = ctypes.c_long()
lib.curl_multi_timeout(self._handle, ctypes.byref(time_out))
return time_out.value
|
mit
| -3,252,982,349,766,039,000
| 26.990291
| 103
| 0.556712
| false
| 4.072034
| false
| false
| false
|
michalpravda/Anki_helpers
|
add-ons/export.py
|
1
|
1192
|
# import the main window object (mw) from ankiqt
from aqt import mw
# import the "show info" tool from utils.py
from aqt.utils import showInfo
# import all of the Qt GUI library
from aqt.qt import *
import re
# We're going to add a menu item below. First we want to create a function to
# be called when the menu item is activated.
def testFunction():
ids = mw.col.findCards("deck:'slovicka nemecky'")
with open('d:\\exp.txt', 'w') as f:
output = set()
for id in ids:
card = mw.col.getCard(id)
note = card.note()
for (name, value) in note.items():
if (name == 'Word') or name == 'Text':
value = re.sub('{{c.::(.*?)}}', '\\1', value)
value = value.replace(' ', '').replace('<div>', '').replace('</div>', '')
output.add(value.encode('utf-8'))
lis = sorted(list(output))
for val in lis:
f.write(val + '\n')
f.close
# create a new menu item, "test"
action = QAction("test", mw)
# set it to call testFunction when it's clicked
mw.connect(action, SIGNAL("triggered()"), testFunction)
# and add it to the tools menu
mw.form.menuTools.addAction(action)
|
mit
| -2,603,656,548,504,197,600
| 33.057143
| 96
| 0.598154
| false
| 3.455072
| false
| false
| false
|
paulscherrerinstitute/pyscan
|
tests/helpers/scan_old.py
|
1
|
38634
|
from copy import deepcopy
from datetime import datetime
from time import sleep
import numpy as np
from tests.helpers.utils import TestPyScanDal
# This is just a dummy GUI class.
class DummyClass:
def __init__(self):
self.Progress = 1 # For Thomas!!
def showPanel(self, s):
pass
def emit(self, signal):
pass
class Scan(object):
def __init__(self, fromGUI=0):
self.epics_dal = None
self.fromGUI = fromGUI
self.outdict = None
self.n_validations = None
self.n_observables = None
self.n_readbacks = None
self.ProgDisp = DummyClass()
self.abortScan = 0
self.pauseScan = 0
def finalizeScan(self):
self.epics_dal.close_group('All')
if self.inlist[-1]['Monitor']:
self.epics_dal.close_group('Monitor')
self.outdict['ErrorMessage'] = 'Measurement finalized (finished/aborted) normally. ' \
'Need initialisation before next measurement.'
if self.fromGUI:
self.ProgDisp.showPanel(0)
def _add_group(self, dic, name, sources, result, close=True):
temp_handle = self.epics_dal.add_group(name, sources)
[output, summary, status] = self.epics_dal.get_group(temp_handle)
if summary != 1: # Something wrong. Try again.
[output, summary, status] = self.epics_dal.get_group(temp_handle)
if summary != 1:
for si in status:
if si != 1:
wch = sources[status.index(si)]
self.epics_dal.close_group(temp_handle)
raise ValueError('Something wrong in Epics channel: ' + wch)
if result:
dic[result] = output
if close:
self.epics_dal.close_group(temp_handle)
def initializeScan(self, inlist, dal):
self.epics_dal = dal or TestPyScanDal()
self.inlist = []
if not isinstance(inlist, list): # It is a simple SKS or MKS
inlist = [inlist]
try:
for index, dic in enumerate(inlist):
dic['ID'] = index # Just in case there are identical input dictionaries. (Normally, it may not happen.)
if index == len(inlist) - 1 and ('Waiting' not in dic.keys()):
raise ValueError('Waiting for the scan was not given.')
self._setup_knobs(index, dic)
self._setup_knob_scan_values(index, dic)
if index == len(inlist) - 1 and ('Observable' not in dic.keys()):
raise ValueError('The observable is not given.')
elif index == len(inlist) - 1:
if not isinstance(dic['Observable'], list):
dic['Observable'] = [dic['Observable']]
if index == len(inlist) - 1 and ('NumberOfMeasurements' not in dic.keys()):
dic['NumberOfMeasurements'] = 1
if 'PreAction' in dic.keys():
if not isinstance(dic['PreAction'], list):
raise ValueError('PreAction should be a list. Input dictionary ' + str(i) + '.')
for l in dic['PreAction']:
if not isinstance(l, list):
raise ValueError('Every PreAction should be a list. Input dictionary ' + str(i) + '.')
if len(l) != 5:
if not l[0] == 'SpecialAction':
raise ValueError('Every PreAction should be in a form of '
'[Ch-set, Ch-read, Value, Tolerance, Timeout]. '
'Input dictionary ' + str(i) + '.')
if 'PreActionWaiting' not in dic.keys():
dic['PreActionWaiting'] = 0.0
if not isinstance(dic['PreActionWaiting'], float) and not isinstance(dic['PreActionWaiting'], int):
raise ValueError('PreActionWating should be a float. Input dictionary ' + str(i) + '.')
if 'PreActionOrder' not in dic.keys():
dic['PreActionOrder'] = [0] * len(dic['PreAction'])
if not isinstance(dic['PreActionOrder'], list):
raise ValueError('PreActionOrder should be a list. Input dictionary ' + str(i) + '.')
else:
dic['PreAction'] = []
dic['PreActionWaiting'] = 0.0
dic['PreActionOrder'] = [0] * len(dic['PreAction'])
if 'In-loopPreAction' in dic.keys():
if not isinstance(dic['In-loopPreAction'], list):
raise ValueError('In-loopPreAction should be a list. Input dictionary ' + str(i) + '.')
for l in dic['In-loopPreAction']:
if not isinstance(l, list):
raise ValueError('Every In-loopPreAction should be a list. '
'Input dictionary ' + str(i) + '.')
if len(l) != 5:
if not l[0] == 'SpecialAction':
raise ValueError('Every In-loopPreAction should be in a form of '
'[Ch-set, Ch-read, Value, Tolerance, Timeout]. '
'Input dictionary ' + str(i) + '.')
if 'In-loopPreActionWaiting' not in dic.keys():
dic['In-loopPreActionWaiting'] = 0.0
if not isinstance(dic['In-loopPreActionWaiting'], float) and not isinstance(
dic['In-loopPreActionWaiting'], int):
raise ValueError('In-loopPreActionWating should be a float. Input dictionary ' + str(i) + '.')
if 'In-loopPreActionOrder' not in dic.keys():
dic['In-loopPreActionOrder'] = [0] * len(dic['In-loopPreAction'])
if not isinstance(dic['In-loopPreActionOrder'], list):
raise ValueError('In-loopPreActionOrder should be a list. Input dictionary ' + str(i) + '.')
else:
dic['In-loopPreAction'] = []
dic['In-loopPreActionWaiting'] = 0.0
dic['In-loopPreActionOrder'] = [0] * len(dic['In-loopPreAction'])
if 'PostAction' in dic.keys():
if dic['PostAction'] == 'Restore':
PA = []
for i in range(0, len(dic['Knob'])):
k = dic['Knob'][i]
v = dic['KnobSaved'][i]
PA.append([k, k, v, 1.0, 10])
dic['PostAction'] = PA
elif not isinstance(dic['PostAction'], list):
raise ValueError('PostAction should be a list. Input dictionary ' + str(i) + '.')
Restore = 0
for i in range(0, len(dic['PostAction'])):
l = dic['PostAction'][i]
if l == 'Restore':
Restore = 1
PA = []
for j in range(0, len(dic['Knob'])):
k = dic['Knob'][j]
v = dic['KnobSaved'][j]
PA.append([k, k, v, 1.0, 10])
elif not isinstance(l, list):
raise ValueError('Every PostAction should be a list. Input dictionary ' + str(i) + '.')
elif len(l) != 5:
if not l[0] == 'SpecialAction':
raise ValueError('Every PostAction should be in a form of '
'[Ch-set, Ch-read, Value, Tolerance, Timeout]. '
'Input dictionary ' + str(i) + '.')
if Restore:
dic['PostAction'].remove('Restore')
dic['PostAction'] = dic['PostAction'] + PA
else:
dic['PostAction'] = []
if 'In-loopPostAction' in dic.keys():
if dic['In-loopPostAction'] == 'Restore':
PA = []
for i in range(0, len(dic['Knob'])):
k = dic['Knob'][i]
v = dic['KnobSaved'][i]
PA.append([k, k, v, 1.0, 10])
dic['In-loopPostAction'] = PA
elif not isinstance(dic['In-loopPostAction'], list):
raise ValueError('In-loopPostAction should be a list. Input dictionary ' + str(i) + '.')
Restore = 0
for i in range(0, len(dic['In-loopPostAction'])):
l = dic['In-loopPostAction'][i]
if l == 'Restore':
Restore = 1
PA = []
for j in range(0, len(dic['Knob'])):
k = dic['Knob'][j]
v = dic['KnobSaved'][j]
PA.append([k, k, v, 1.0, 10])
dic['In-loopPostAction'][i] = PA
elif not isinstance(l, list):
raise ValueError('Every In-loopPostAction should be a list. '
'Input dictionary ' + str(i) + '.')
elif len(l) != 5:
raise ValueError('Every In-loopPostAction should be in a form of '
'[Ch-set, Ch-read, Value, Tolerance, Timeout]. '
'Input dictionary ' + str(i) + '.')
if Restore:
dic['In-loopPostAction'].remove('Restore')
dic['In-loopPostAction'] = dic['In-loopPostAction'] + PA
else:
dic['In-loopPostAction'] = []
if 'Validation' in dic.keys():
if not isinstance(dic['Validation'], list):
raise ValueError('Validation should be a list of channels. Input dictionary ' + str(i) + '.')
else:
dic['Validation'] = []
self._setup_monitors(dic, index, inlist)
if 'Additive' not in dic.keys():
dic['Additive'] = 0
if index == len(inlist) - 1 and ('StepbackOnPause' not in dic.keys()):
dic['StepbackOnPause'] = 1
self.allch = []
self.n_readbacks = 0
for d in inlist:
self.allch.append(d['KnobReadback'])
self.n_readbacks += len(d['KnobReadback'])
self.allch.append(inlist[-1]['Validation'])
self.n_validations = len(inlist[-1]['Validation'])
self.allch.append(inlist[-1]['Observable'])
self.n_observables = len(inlist[-1]['Observable'])
self.allch = [item for sublist in self.allch for item in sublist] # Recursive in one line!
self._add_group(dic, 'All', self.allch, None, close=False)
self.Ntot = 1 # Total number of measurements
for dic in inlist:
if not dic['Series']:
self.Ntot = self.Ntot * dic['Nstep']
else:
self.Ntot = self.Ntot * sum(dic['Nstep'])
self.inlist = inlist
self.ProgDisp.Progress = 0
# Prealocating the place for the output
self.outdict = {"ErrorMessage": None,
"KnobReadback": self.allocateOutput(),
"Validation": self.allocateOutput(),
"Observable": self.allocateOutput()}
except ValueError as e:
self.outdict = {"ErrorMessage": str(e)}
return self.outdict
def _setup_monitors(self, dic, index, inlist):
if index == len(inlist) - 1 and ('Monitor' in dic.keys()) and (dic['Monitor']):
if isinstance(dic['Monitor'], str):
dic['Monitor'] = [dic['Monitor']]
self._add_group(dic, 'Monitor', dic['Monitor'], None)
if 'MonitorValue' not in dic.keys():
[dic['MonitorValue'], summary, status] = self.epics_dal.get_group('Monitor')
elif not isinstance(dic['MonitorValue'], list):
dic['MonitorValue'] = [dic['MonitorValue']]
if len(dic['MonitorValue']) != len(dic['Monitor']):
raise ValueError('The length of MonitorValue does not meet to the length of Monitor.')
if 'MonitorTolerance' not in dic.keys():
dic['MonitorTolerance'] = []
[Value, summary, status] = self.epics_dal.get_group('Monitor')
for v in Value:
if isinstance(v, str):
dic['MonitorTolerance'].append(None)
elif v == 0:
dic['MonitorTolerance'].append(0.1)
else:
dic['MonitorTolerance'].append(
abs(v * 0.1)) # 10% of the current value will be the torelance when not given
elif not isinstance(dic['MonitorTolerance'], list):
dic['MonitorTolerance'] = [dic['MonitorTolerance']]
if len(dic['MonitorTolerance']) != len(dic['Monitor']):
raise ValueError('The length of MonitorTolerance does not meet to the length of Monitor.')
if 'MonitorAction' not in dic.keys():
raise ValueError('MonitorAction is not give though Monitor is given.')
if not isinstance(dic['MonitorAction'], list):
dic['MonitorAction'] = [dic['MonitorAction']]
for m in dic['MonitorAction']:
if m != 'Abort' and m != 'Wait' and m != 'WaitAndAbort':
raise ValueError('MonitorAction shold be Wait, Abort, or WaitAndAbort.')
if 'MonitorTimeout' not in dic.keys():
dic['MonitorTimeout'] = [30.0] * len(dic['Monitor'])
elif not isinstance(dic['MonitorTimeout'], list):
dic['MonitorValue'] = [dic['MonitorValue']]
if len(dic['MonitorValue']) != len(dic['Monitor']):
raise ValueError('The length of MonitorValue does not meet to the length of Monitor.')
for m in dic['MonitorTimeout']:
try:
float(m)
except:
raise ValueError('MonitorTimeout should be a list of float(or int).')
elif index == len(inlist) - 1:
dic['Monitor'] = []
dic['MonitorValue'] = []
dic['MonitorTolerance'] = []
dic['MonitorAction'] = []
dic['MonitorTimeout'] = []
def _setup_knob_scan_values(self, index, dic):
if 'Series' not in dic.keys():
dic['Series'] = 0
if not dic['Series']: # Setting up scan values for SKS and MKS
if 'ScanValues' not in dic.keys():
if 'ScanRange' not in dic.keys():
raise ValueError('Neither ScanRange nor ScanValues is given '
'in the input dictionary ' + str(index) + '.')
elif not isinstance(dic['ScanRange'], list):
raise ValueError('ScanRange is not given in the right format. '
'Input dictionary ' + str(index) + '.')
elif not isinstance(dic['ScanRange'][0], list):
dic['ScanRange'] = [dic['ScanRange']]
if ('Nstep' not in dic.keys()) and ('StepSize' not in dic.keys()):
raise ValueError('Neither Nstep nor StepSize is given.')
if 'Nstep' in dic.keys(): # StepSize is ignored when Nstep is given
if not isinstance(dic['Nstep'], int):
raise ValueError('Nstep should be an integer. Input dictionary ' + str(index) + '.')
ran = []
for r in dic['ScanRange']:
s = (r[1] - r[0]) / (dic['Nstep'] - 1)
f = np.arange(r[0], r[1], s)
f = np.append(f, np.array(r[1]))
ran.append(f.tolist())
dic['KnobExpanded'] = ran
else: # StepSize given
if len(dic['Knob']) > 1:
raise ValueError('Give Nstep instead of StepSize for MKS. '
'Input dictionary ' + str(index) + '.')
# StepSize is only valid for SKS
r = dic['ScanRange'][0]
s = dic['StepSize'][0]
f = np.arange(r[0], r[1], s)
f = np.append(f, np.array(r[1]))
dic['Nstep'] = len(f)
dic['KnobExpanded'] = [f.tolist()]
else:
# Scan values explicitly defined.
if not isinstance(dic['ScanValues'], list):
raise ValueError('ScanValues is not given in the right fromat. '
'Input dictionary ' + str(index) + '.')
if len(dic['ScanValues']) != len(dic['Knob']) and len(dic['Knob']) != 1:
raise ValueError('The length of ScanValues does not meet to the number of Knobs.')
if len(dic['Knob']) > 1:
minlen = 100000
for r in dic['ScanValues']:
if minlen > len(r):
minlen = len(r)
ran = []
for r in dic['ScanValues']:
ran.append(r[0:minlen]) # Cut at the length of the shortest list.
dic['KnobExpanded'] = ran
dic['Nstep'] = minlen
else:
dic['KnobExpanded'] = [dic['ScanValues']]
dic['Nstep'] = len(dic['ScanValues'])
else: # Setting up scan values for Series scan
if 'ScanValues' not in dic.keys():
raise ValueError('ScanValues should be given for Series '
'scan in the input dictionary ' + str(index) + '.')
if not isinstance(dic['ScanValues'], list):
raise ValueError('ScanValues should be given as a list (of lists) '
'for Series scan in the input dictionary ' + str(index) + '.')
if len(dic['Knob']) != len(dic['ScanValues']):
raise ValueError('Scan values length does not match to the '
'number of knobs in the input dictionary ' + str(index) + '.')
Nstep = []
for vl in dic['ScanValues']:
if not isinstance(vl, list):
raise ValueError('ScanValue element should be given as a list for '
'Series scan in the input dictionary ' + str(index) + '.')
Nstep.append(len(vl))
dic['Nstep'] = Nstep
def _setup_knobs(self, index, dic):
"""
Setup the values for moving knobs in the scan.
:param index: Index in the dictionary.
:param dic: The dictionary.
"""
if 'Knob' not in dic.keys():
raise ValueError('Knob for the scan was not given for the input dictionary' + str(index) + '.')
else:
if not isinstance(dic['Knob'], list):
dic['Knob'] = [dic['Knob']]
if 'KnobReadback' not in dic.keys():
dic['KnobReadback'] = dic['Knob']
if not isinstance(dic['KnobReadback'], list):
dic['KnobReadback'] = [dic['KnobReadback']]
if len(dic['KnobReadback']) != len(dic['Knob']):
raise ValueError('The number of KnobReadback does not meet to the number of Knobs.')
if 'KnobTolerance' not in dic.keys():
dic['KnobTolerance'] = [1.0] * len(dic['Knob'])
if not isinstance(dic['KnobTolerance'], list):
dic['KnobTolerance'] = [dic['KnobTolerance']]
if len(dic['KnobTolerance']) != len(dic['Knob']):
raise ValueError('The number of KnobTolerance does not meet to the number of Knobs.')
if 'KnobWaiting' not in dic.keys():
dic['KnobWaiting'] = [10.0] * len(dic['Knob'])
if not isinstance(dic['KnobWaiting'], list):
dic['KnobWaiting'] = [dic['KnobWaiting']]
if len(dic['KnobWaiting']) != len(dic['Knob']):
raise ValueError('The number of KnobWaiting does not meet to the number of Knobs.')
if 'KnobWaitingExtra' not in dic.keys():
dic['KnobWaitingExtra'] = 0.0
else:
try:
dic['KnobWaitingExtra'] = float(dic['KnobWaitingExtra'])
except:
raise ValueError('KnobWaitingExtra is not a number in the input dictionary ' + str(index) + '.')
self._add_group(dic, str(index), dic['Knob'], 'KnobSaved')
def startMonitor(self, dic):
self.epics_dal.add_group("Monitor", dic["Monitor"])
# def cbMonitor(h):
# def matchValue(h):
# en = self.MonitorInfo[h][1]
# c = self.epics_dal.getPVCache(h)
# v = c.value[0]
# if v == '':
# # To comply with RF-READY-STATUS channle, where ENUM is empty...
# c = self.epics_dal.getPVCache(h, dt='int')
# v = c.value[0]
# if isinstance(self.MonitorInfo[h][2], list): # Monitor value is in list, i.e. several cases are okay
# if v in self.MonitorInfo[h][2]:
# print('value OK')
# return 1
# else:
# print('kkkkkkk', en, self.MonitorInfo[h][2], v)
# print('value NG')
# return 0
# elif isinstance(v, str):
# if v == self.MonitorInfo[h][2]:
# print('value OK')
# return 1
# else:
# print('nnnnn', en, self.MonitorInfo[h][2], v)
# print('value NG')
# return 0
#
# elif isinstance(v, int) or isinstance(v, float):
# if abs(v - self.MonitorInfo[h][2]) <= self.MonitorInfo[h][3]:
# return 1
# else:
# print('value NG')
# print(v, self.MonitorInfo[h][2], self.MonitorInfo[h][3])
# return 0
# else:
# 'Return value from getPVCache', v
#
# if matchValue(h):
# self.stopScan[self.MonitorInfo[h][0]] = 0
# else:
# self.stopScan[self.MonitorInfo[h][0]] = 1
#
# dic = self.inlist[-1]
# self.stopScan = [0] * len(dic['Monitor'])
# self.MonitorInfo = {}
#
# HandleList = self.epics_dal.getHandlesFromWithinGroup(self.MonitorHandle)
# # self.cafe.openPrepare()
# for i in range(0, len(HandleList)):
# h = HandleList[i]
# self.MonitorInfo[h] = [i, dic['Monitor'][i], dic['MonitorValue'][i], dic['MonitorTolerance'][i],
# dic['MonitorAction'][i], dic['MonitorTimeout']]
#
# self.epics_dal.openMonitorPrepare()
# m0 = self.epics_dal.groupMonitorStartWithCBList(self.MonitorHandle, cb=[cbMonitor] * len(dic['Monitor']))
#
# self.epics_dal.openMonitorNowAndWait(2)
def PreAction(self, dic, key='PreAction'):
order = np.array(dic[key + 'Order'])
maxo = order.max()
mino = order.min()
stat = 0
for i in range(mino, maxo + 1):
for j in range(0, len(order)):
od = order[j]
if i == od:
if dic[key][j][0].lower() == 'specialaction':
self.ObjectSA.SpecialAction(dic[key][j][1])
else:
chset = dic[key][j][0]
chread = dic[key][j][1]
val = dic[key][j][2]
tol = dic[key][j][3]
timeout = dic[key][j][4]
if chset.lower() == 'match':
# print('****************************----')
try:
status = self.epics_dal.match(val, chread, tol, timeout, 1)
# print('--------------', status)
except Exception as inst:
print('Exception in preAction')
print(inst)
stat = 1
else:
try:
status = self.epics_dal.set_and_match(chset, val, chread, tol, timeout, 0)
# print('===========', status)
except Exception as inst:
print('Exception in preAction')
print(inst)
stat = 1
sleep(dic[key + 'Waiting'])
return stat # Future development: Give it to output dictionary
def PostAction(self, dic, key='PostAction'):
for act in dic[key]:
if act[0] == 'SpecialAction':
self.ObjectSA.SpecialAction(act[1])
else:
chset = act[0]
chread = act[1]
val = act[2]
tol = act[3]
timeout = act[4]
try:
self.epics_dal.set_and_match(chset, val, chread, tol, timeout, 0)
except Exception as inst:
print(inst)
def CrossReference(self, Object):
self.ObjectSA = Object
def allocateOutput(self):
root_list = []
for dimension in reversed(self.inlist):
n_steps = dimension['Nstep']
if dimension['Series']:
# For Series scan, each step of each knob represents another result.
current_dimension_list = []
for n_steps_in_knob in n_steps:
current_knob_list = []
for _ in range(n_steps_in_knob):
current_knob_list.append(deepcopy(root_list))
current_dimension_list.append(deepcopy(current_knob_list))
root_list = current_dimension_list
else:
# For line scan, each step represents another result.
current_dimension_list = []
for _ in range(n_steps):
current_dimension_list.append(deepcopy(root_list))
root_list = current_dimension_list
return root_list
def execute_scan(self):
self.Scan(self.outdict['KnobReadback'], self.outdict['Validation'], self.outdict['Observable'], 0)
def startScan(self):
if self.outdict['ErrorMessage']:
if 'After the last scan,' not in self.outdict['ErrorMessage']:
self.outdict[
'ErrorMessage'] = 'It seems that the initialization was not successful... No scan was performed.'
return self.outdict
self.outdict['TimeStampStart'] = datetime.now()
self.stopScan = []
self.abortScan = 0
self.pauseScan = 0
if self.inlist[-1]['Monitor']:
self.startMonitor(self.inlist[-1])
if self.fromGUI:
self.ProgDisp.showPanel(1)
self.ProgDisp.abortScan = 0
self.ProgDisp.emit("pb")
self.Ndone = 0
self.execute_scan()
if self.fromGUI:
self.ProgDisp.showPanel(0)
self.finalizeScan()
self.outdict['TimeStampEnd'] = datetime.now()
return self.outdict
def Scan(self, Rback, Valid, Obs, dic_index):
dic = self.inlist[dic_index]
# print('*****************', dic)
# Execute pre actions.
if len(dic['PreAction']):
self.PreAction(dic)
series_scan = True if dic['Series'] else False
last_pass = dic_index == len(self.inlist) - 1
# Knob, KnobReadback = Writer
# KnobExpanded = Vector scan.
if last_pass:
if series_scan:
self.last_series_scan(Obs, Rback, Valid, dic)
else:
self.last_line_scan(Obs, Rback, Valid, dic)
else:
if series_scan:
self.series_scan(Obs, Rback, Valid, dic_index)
else:
self.line_scan(Obs, Rback, Valid, dic_index)
# Execute post actions.
if len(dic['PostAction']):
self.PostAction(dic)
def post_measurment_actions(self, Obs, Rback, Valid, dic, step_index):
step_index = step_index + 1
self.Ndone = self.Ndone + 1
step_index = self.verify_and_stepback(step_index, Obs, Rback, Valid, dic)
self.update_progress()
return step_index
def update_progress(self):
self.ProgDisp.Progress = 100.0 * self.Ndone / self.Ntot
if self.fromGUI:
self.ProgDisp.emit("pb")
def verify_and_stepback(self, Iscan, Obs, Rback, Valid, dic):
Stepback = 0
count = [0] * len(self.stopScan)
k_stop = None
p_stop = None
while self.stopScan.count(1) + self.pauseScan: # Problem detected in the channel under monitoring
Stepback = 1
sleep(1.0)
for k in range(0, len(self.stopScan)):
if self.stopScan[k]:
k_stop = k
if dic['MonitorAction'][k] == 'Abort':
self.abortScan = 1
count[k] = count[k] + 1
else:
count[k] = 0
if dic['MonitorAction'][k] == 'WaitAndAbort' and count[k] > dic['MonitorTimeout'][k]:
self.abortScan = 1
if self.abortScan:
if len(dic['PostAction']):
self.PostAction(dic)
raise Exception("Scan aborted")
# print('Monitor??')
# print(self.stopScan)
if self.pauseScan:
p_stop = 1
if k_stop and dic['MonitorAction'][k_stop] == 'WaitAndNoStepBack':
Stepback = 0
if p_stop and not dic['StepbackOnPause']:
Stepback = 0
if Stepback:
# print('Stepping back')
Iscan = Iscan - 1
self.Ndone = self.Ndone - 1
Rback[Iscan].pop()
Valid[Iscan].pop()
Obs[Iscan].pop()
if self.fromGUI and self.ProgDisp.abortScan:
self.abortScan = 1
if self.abortScan:
if len(dic['PostAction']):
self.PostAction(dic)
raise Exception("Scan aborted")
if len(dic['In-loopPostAction']):
self.PostAction(dic, 'In-loopPostAction')
return Iscan
def pre_measurment_actions(self, dic):
if dic['KnobWaitingExtra']:
sleep(dic['KnobWaitingExtra'])
if len(dic['In-loopPreAction']):
self.PreAction(dic, 'In-loopPreAction')
def measure_and_save(self, Iscan, Obs, Rback, Valid, dic, Kscan=None):
for j in range(dic['NumberOfMeasurements']):
[v, s, sl] = self.epics_dal.get_group('All')
if self.n_readbacks == 1:
rback_result = v[0]
else:
rback_result = v[0:self.n_readbacks]
if self.n_validations == 1:
valid_result = v[self.n_readbacks]
else:
valid_result = v[self.n_readbacks:self.n_readbacks + self.n_validations]
if self.n_observables == 1:
obs_result = v[-1]
else:
obs_result = v[self.n_readbacks + self.n_validations:
self.n_readbacks + self.n_validations + self.n_observables]
if dic['NumberOfMeasurements'] > 1:
if Kscan is not None:
Rback[Kscan][Iscan].append(rback_result)
Valid[Kscan][Iscan].append(valid_result)
Obs[Kscan][Iscan].append(obs_result)
else:
Rback[Iscan].append(rback_result)
Valid[Iscan].append(valid_result)
Obs[Iscan].append(obs_result)
else:
if Kscan is not None:
Rback[Kscan][Iscan] = rback_result
Valid[Kscan][Iscan] = valid_result
Obs[Kscan][Iscan] = obs_result
else:
Rback[Iscan] = rback_result
Valid[Iscan] = valid_result
Obs[Iscan] = obs_result
sleep(dic['Waiting'])
def line_scan(self, Obs, Rback, Valid, dic_index):
dic = self.inlist[dic_index]
for step_index in range(dic['Nstep']):
# print('Dict' + str(dic_index) + ' Loop' + str(step_index))
for knob_index in range(len(dic['Knob'])):
if dic['Additive']:
KV = np.array(dic['KnobExpanded'][knob_index]) + dic['KnobSaved'][knob_index]
else:
KV = dic['KnobExpanded'][knob_index]
try:
self.set_knob_value(dic, knob_index, KV[step_index])
except Exception as inst:
print('Exception in range_scan')
print(inst)
# Delay between setting the position and reading the values.
if dic['KnobWaitingExtra']:
sleep(dic['KnobWaitingExtra'])
self.Scan(Rback[step_index], Valid[step_index], Obs[step_index], dic_index + 1)
if self.abortScan:
if len(dic['PostAction']):
self.PostAction(dic)
raise Exception("Scan aborted")
def set_knob_value(self, dic, knob_index, pv_value):
set_pv_name = dic['Knob'][knob_index]
readback_pv_name = dic['KnobReadback'][knob_index]
pv_tolerance = dic['KnobTolerance'][knob_index]
pv_wait_time = dic['KnobWaiting'][knob_index]
self.epics_dal.set_and_match(set_pv_name, pv_value, readback_pv_name, pv_tolerance, pv_wait_time, 0)
def last_line_scan(self, Obs, Rback, Valid, dic):
step_index = 0
while step_index < dic['Nstep']:
# print(step_index)
# set knob for this loop
for knob_index in range(len(dic['Knob'])): # Replace later with a group method, setAndMatchGroup?
if dic['Additive']:
KV = np.array(dic['KnobExpanded'][knob_index]) + dic['KnobSaved'][knob_index]
else:
KV = dic['KnobExpanded'][knob_index]
try:
self.set_knob_value(dic, knob_index, KV[step_index])
except Exception as inst:
print('Exception in Scan loop')
print(inst)
self.pre_measurment_actions(dic)
self.measure_and_save(step_index, Obs, Rback, Valid, dic)
step_index = self.post_measurment_actions(Obs, Rback, Valid, dic, step_index)
def series_scan(self, Obs, Rback, Valid, dic_index):
dic = self.inlist[dic_index]
# For every PV.
for Kscan in range(0, len(dic['Knob'])):
# For the number of steps for this PV.
for step_index in range(dic['Nstep'][Kscan]):
# For every PV.
for knob_index in range(len(dic['Knob'])):
#
if knob_index == Kscan:
if dic['Additive']:
KV = dic['KnobSaved'] + dic['ScanValues'][knob_index][step_index]
else:
KV = dic['ScanValues'][knob_index][step_index]
else:
KV = dic['KnobSaved'][knob_index]
try:
self.set_knob_value(dic, knob_index, KV)
except Exception as inst:
raise ValueError('Exception in series_scan', inst)
if dic['KnobWaitingExtra']:
sleep(dic['KnobWaitingExtra'])
self.Scan(Rback[Kscan][step_index], Valid[Kscan][step_index], Obs[Kscan][step_index], dic_index+1)
if self.abortScan:
if len(dic['PostAction']):
self.PostAction(dic)
raise Exception("Scan aborted")
def last_series_scan(self, Obs, Rback, Valid, dic):
Kscan = 0
while Kscan < len(dic['Knob']):
step_index = 0
while step_index < dic['Nstep'][Kscan]:
# print(Kscan, step_index)
# set knob for this loop
for knob_index in range(0, len(dic['Knob'])): # Replace later with a group method, setAndMatchGroup?
if knob_index == Kscan:
if dic['Additive']:
KV = dic['KnobSaved'][knob_index] + dic['ScanValues'][knob_index][step_index]
else:
KV = dic['ScanValues'][knob_index][step_index]
else:
KV = dic['KnobSaved'][knob_index]
try:
self.set_knob_value(dic, knob_index, KV)
except Exception as inst:
print('Exception in preAction')
print(inst)
self.pre_measurment_actions(dic)
self.measure_and_save(step_index, Obs, Rback, Valid, dic, Kscan)
step_index = self.post_measurment_actions(Obs, Rback, Valid, dic, step_index)
Kscan = Kscan + 1
|
gpl-3.0
| 2,175,520,377,062,554,000
| 41.689503
| 120
| 0.476523
| false
| 4.1771
| false
| false
| false
|
ankonzoid/Deep-Reinforcement-Learning-Tutorials
|
advanced_ML/model_tree/src/ModelTree.py
|
1
|
13608
|
"""
ModelTree.py (author: Anson Wong / git: ankonzoid)
"""
import numpy as np
from copy import deepcopy
from graphviz import Digraph
class ModelTree(object):
def __init__(self, model, max_depth=5, min_samples_leaf=10,
search_type="greedy", n_search_grid=100):
self.model = model
self.max_depth = max_depth
self.min_samples_leaf = min_samples_leaf
self.search_type = search_type
self.n_search_grid = n_search_grid
self.tree = None
def get_params(self, deep=True):
return {
"model": self.model.get_params() if deep else self.model,
"max_depth": self.max_depth,
"min_samples_leaf": self.min_samples_leaf,
"search_type": self.search_type,
"n_search_grid": self.n_search_grid,
}
def set_params(self, **params):
for param, value in params.items():
setattr(self, param, value)
return self
def __repr__(self):
class_name = self.__class__.__name__
return "{}({})".format(class_name, ', '.join([ "{}={}".format(k,v) for k, v in self.get_params(deep=False).items() ]))
# ======================
# Fit
# ======================
def fit(self, X, y, verbose=False):
# Settings
model = self.model
min_samples_leaf = self.min_samples_leaf
max_depth = self.max_depth
search_type = self.search_type
n_search_grid = self.n_search_grid
if verbose:
print(" max_depth={}, min_samples_leaf={}, search_type={}...".format(max_depth, min_samples_leaf, search_type))
def _build_tree(X, y):
global index_node_global
def _create_node(X, y, depth, container):
loss_node, model_node = _fit_model(X, y, model)
node = {"name": "node",
"index": container["index_node_global"],
"loss": loss_node,
"model": model_node,
"data": (X, y),
"n_samples": len(X),
"j_feature": None,
"threshold": None,
"children": {"left": None, "right": None},
"depth": depth}
container["index_node_global"] += 1
return node
# Recursively split node + traverse node until a terminal node is reached
def _split_traverse_node(node, container):
# Perform split and collect result
result = _splitter(node, model,
max_depth=max_depth,
min_samples_leaf=min_samples_leaf,
search_type=search_type,
n_search_grid=n_search_grid)
# Return terminal node if split is not advised
if not result["did_split"]:
if verbose:
depth_spacing_str = " ".join([" "] * node["depth"])
print(" {}*leaf {} @ depth {}: loss={:.6f}, N={}".format(depth_spacing_str, node["index"], node["depth"], node["loss"], result["N"]))
return
# Update node information based on splitting result
node["j_feature"] = result["j_feature"]
node["threshold"] = result["threshold"]
del node["data"] # delete node stored data
# Extract splitting results
(X_left, y_left), (X_right, y_right) = result["data"]
model_left, model_right = result["models"]
# Report created node to user
if verbose:
depth_spacing_str = " ".join([" "] * node["depth"])
print(" {}node {} @ depth {}: loss={:.6f}, j_feature={}, threshold={:.6f}, N=({},{})".format(depth_spacing_str, node["index"], node["depth"], node["loss"], node["j_feature"], node["threshold"], len(X_left), len(X_right)))
# Create children nodes
node["children"]["left"] = _create_node(X_left, y_left, node["depth"]+1, container)
node["children"]["right"] = _create_node(X_right, y_right, node["depth"]+1, container)
node["children"]["left"]["model"] = model_left
node["children"]["right"]["model"] = model_right
# Split nodes
_split_traverse_node(node["children"]["left"], container)
_split_traverse_node(node["children"]["right"], container)
container = {"index_node_global": 0} # mutatable container
root = _create_node(X, y, 0, container) # depth 0 root node
_split_traverse_node(root, container) # split and traverse root node
return root
# Construct tree
self.tree = _build_tree(X, y)
# ======================
# Predict
# ======================
def predict(self, X):
assert self.tree is not None
def _predict(node, x):
no_children = node["children"]["left"] is None and \
node["children"]["right"] is None
if no_children:
y_pred_x = node["model"].predict([x])[0]
return y_pred_x
else:
if x[node["j_feature"]] <= node["threshold"]: # x[j] < threshold
return _predict(node["children"]["left"], x)
else: # x[j] > threshold
return _predict(node["children"]["right"], x)
y_pred = np.array([_predict(self.tree, x) for x in X])
return y_pred
# ======================
# Explain
# ======================
def explain(self, X, header):
assert self.tree is not None
def _explain(node, x, explanation):
no_children = node["children"]["left"] is None and \
node["children"]["right"] is None
if no_children:
return explanation
else:
if x[node["j_feature"]] <= node["threshold"]: # x[j] < threshold
explanation.append("{} = {:.6f} <= {:.6f}".format(header[node["j_feature"]], x[node["j_feature"]], node["threshold"]))
return _explain(node["children"]["left"], x, explanation)
else: # x[j] > threshold
explanation.append("{} = {:.6f} > {:.6f}".format(header[node["j_feature"]], x[node["j_feature"]], node["threshold"]))
return _explain(node["children"]["right"], x, explanation)
explanations = [_explain(self.tree, x, []) for x in X]
return explanations
# ======================
# Loss
# ======================
def loss(self, X, y, y_pred):
loss = self.model.loss(X, y, y_pred)
return loss
# ======================
# Tree diagram
# ======================
def export_graphviz(self, output_filename, feature_names,
export_png=True, export_pdf=False):
"""
Assumes node structure of:
node["name"]
node["n_samples"]
node["children"]["left"]
node["children"]["right"]
node["j_feature"]
node["threshold"]
node["loss"]
"""
g = Digraph('g', node_attr={'shape': 'record', 'height': '.1'})
def build_graphviz_recurse(node, parent_node_index=0, parent_depth=0, edge_label=""):
# Empty node
if node is None:
return
# Create node
node_index = node["index"]
if node["children"]["left"] is None and node["children"]["right"] is None:
threshold_str = ""
else:
threshold_str = "{} <= {:.1f}\\n".format(feature_names[node['j_feature']], node["threshold"])
label_str = "{} n_samples = {}\\n loss = {:.6f}".format(threshold_str, node["n_samples"], node["loss"])
# Create node
nodeshape = "rectangle"
bordercolor = "black"
fillcolor = "white"
fontcolor = "black"
g.attr('node', label=label_str, shape=nodeshape)
g.node('node{}'.format(node_index),
color=bordercolor, style="filled",
fillcolor=fillcolor, fontcolor=fontcolor)
# Create edge
if parent_depth > 0:
g.edge('node{}'.format(parent_node_index),
'node{}'.format(node_index), label=edge_label)
# Traverse child or append leaf value
build_graphviz_recurse(node["children"]["left"],
parent_node_index=node_index,
parent_depth=parent_depth + 1,
edge_label="")
build_graphviz_recurse(node["children"]["right"],
parent_node_index=node_index,
parent_depth=parent_depth + 1,
edge_label="")
# Build graph
build_graphviz_recurse(self.tree,
parent_node_index=0,
parent_depth=0,
edge_label="")
# Export pdf
if export_pdf:
print("Saving model tree diagram to '{}.pdf'...".format(output_filename))
g.format = "pdf"
g.render(filename=output_filename, view=False, cleanup=True)
# Export png
if export_png:
print("Saving model tree diagram to '{}.png'...".format(output_filename))
g.format = "png"
g.render(filename=output_filename, view=False, cleanup=True)
# ***********************************
#
# Side functions
#
# ***********************************
def _splitter(node, model,
max_depth=5, min_samples_leaf=10,
search_type="greedy", n_search_grid=100):
# Extract data
X, y = node["data"]
depth = node["depth"]
N, d = X.shape
# Find feature splits that might improve loss
did_split = False
loss_best = node["loss"]
data_best = None
models_best = None
j_feature_best = None
threshold_best = None
# Perform threshold split search only if node has not hit max depth
if (depth >= 0) and (depth < max_depth):
for j_feature in range(d):
# If using adaptive search type, decide on one to use
search_type_use = search_type
if search_type == "adaptive":
if N > n_search_grid:
search_type_use = "grid"
else:
search_type_use = "greedy"
# Use decided search type and generate threshold search list (j_feature)
threshold_search = []
if search_type_use == "greedy":
for i in range(N):
threshold_search.append(X[i, j_feature])
elif search_type_use == "grid":
x_min, x_max = np.min(X[:, j_feature]), np.max(X[:, j_feature])
dx = (x_max - x_min) / n_search_grid
for i in range(n_search_grid+1):
threshold_search.append(x_min + i*dx)
else:
exit("err: invalid search_type = {} given!".format(search_type))
# Perform threshold split search on j_feature
for threshold in threshold_search:
# Split data based on threshold
(X_left, y_left), (X_right, y_right) = _split_data(j_feature, threshold, X, y)
N_left, N_right = len(X_left), len(X_right)
# Splitting conditions
split_conditions = [N_left >= min_samples_leaf,
N_right >= min_samples_leaf]
# Do not attempt to split if split conditions not satisfied
if not all(split_conditions):
continue
# Compute weight loss function
loss_left, model_left = _fit_model(X_left, y_left, model)
loss_right, model_right = _fit_model(X_right, y_right, model)
loss_split = (N_left*loss_left + N_right*loss_right) / N
# Update best parameters if loss is lower
if loss_split < loss_best:
did_split = True
loss_best = loss_split
models_best = [model_left, model_right]
data_best = [(X_left, y_left), (X_right, y_right)]
j_feature_best = j_feature
threshold_best = threshold
# Return the best result
result = {"did_split": did_split,
"loss": loss_best,
"models": models_best,
"data": data_best,
"j_feature": j_feature_best,
"threshold": threshold_best,
"N": N}
return result
def _fit_model(X, y, model):
model_copy = deepcopy(model) # must deepcopy the model!
model_copy.fit(X, y)
y_pred = model_copy.predict(X)
loss = model_copy.loss(X, y, y_pred)
assert loss >= 0.0
return loss, model_copy
def _split_data(j_feature, threshold, X, y):
idx_left = np.where(X[:, j_feature] <= threshold)[0]
idx_right = np.delete(np.arange(0, len(X)), idx_left)
assert len(idx_left) + len(idx_right) == len(X)
return (X[idx_left], y[idx_left]), (X[idx_right], y[idx_right])
|
mit
| 7,199,203,135,097,981,000
| 37.769231
| 241
| 0.486846
| false
| 4.155115
| false
| false
| false
|
sevenian3/ChromaStarPy
|
KappasRaylGas.py
|
1
|
10384
|
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 25 17:18:39 2017
@author: ishort
"""
import math
import Useful
import PartitionFn
import ToolBox
#import numpy
#/* Rayleigh scattering opacity routines taken from Moog (moogjul2014/, MOOGJUL2014.tar)
#Chris Sneden (Universtiy of Texas at Austin) and collaborators
#http://www.as.utexas.edu/~chris/moog.html
#//From Moog source file Opacscat.f
#*/
"""
#JB#
#a function to create a cubic function fit extrapolation
def cubicFit(x,y):
coeffs = numpy.polyfit(x,y,3)
#returns an array of coefficents for the cubic fit of the form
#Ax^3 + Bx^2 + Cx + D as [A,B,C,D]
return coeffs
#this will work for any number of data points!
def valueFromFit(fit,x):
#return the value y for a given fit, at point x
return (fit[0]*(x**3)+fit[1]*(x**2)+fit[2]*x+fit[3])
"""
masterTemp=[130,500,3000,8000,10000]
#JB#
def masterRayl(numDeps, numLams, temp, lambdaScale, stagePops, gsName, gsFirstMol, molPops):
""" /*c******************************************************************************
c The subroutines needed to calculate the opacities from scattering by
c H I, H2, He I, are in this file. These are from ATLAS9.
c******************************************************************************
*/"""
#//System.out.println("masterRayl called...");
#//From Moog source file Opacitymetals.f
#// From how values such as aC1[] are used in Moog file Opacit.f to compute the total opacity
#// and then the optical depth scale, I infer that they are extinction coefficients
#// in cm^-1
#//
#// There does not seem to be any correction for stimulated emission
logE = math.log10(math.e)
masterRScat = [ [ 0.0 for i in range(numDeps) ] for j in range(numLams) ]
logUH1 = [0.0 for i in range(5)]
logUHe1 = [0.0 for i in range(5)]
logStatWH1 = 0.0
logStatWHe1 = 0.0
theta = 1.0
species = ""
logGroundPopsH1 = [0.0 for i in range(numDeps)]
logGroundPopsHe1 = [0.0 for i in range(numDeps)]
logH2Pops = [0.0 for i in range(numDeps)]
#//
#// H I: Z=1 --> iZ=0:
sigH1 = [0.0 for i in range(numDeps)]
#// He I: Z=2 --> iZ=1:
sigHe1 = [0.0 for i in range(numDeps)]
species = "HI"
logUH1 = PartitionFn.getPartFn2(species)
species = "HeI"
logUHe1 = PartitionFn.getPartFn2(species)
sigH2 = [0.0 for i in range(numDeps)]
#Find index of H2 in molPops array
for iH2 in range(len(gsName)):
if (gsName[iH2].strip() == "H2"):
break;
#print("iH2 ", iH2, " iH2-gsFirstMol ", (iH2-gsFirstMol))
#//System.out.println("iD PopsH1 PopsHe1");
for iD in range(numDeps):
#//neutral stage
#//Assumes ground state stat weight, g_1, is 1.0
#theta = 5040.0 / temp[0][iD]
#// U[0]: theta = 1.0, U[1]: theta = 0.5
"""
if (theta <= 0.5):
logStatWH1 = logUH1[1]
logStatWHe1 = logUHe1[1]
elif ( (theta < 1.0) and (theta > 0.5) ):
logStatWH1 = ( (theta-0.5) * logUH1[0] ) + ( (1.0-theta) * logUH1[1] )
logStatWHe1 = ( (theta-0.5) * logUHe1[0] ) + ( (1.0-theta) * logUHe1[1] )
#//divide by common factor of interpolation interval of 0.5 = (1.0 - 0.5):
logStatWH1 = 2.0 * logStatWH1
logStatWHe1 = 2.0 * logStatWHe1
else:
logStatWH1 = logUH1[0]
logStatWHe1 = logUHe1[0]
"""
thisTemp = temp[0][iD];
#JB#
logWH1Fit = ToolBox.cubicFit(masterTemp,logUH1)
logStatWH1 = ToolBox.valueFromFit(logWH1Fit,thisTemp)
logWHe1Fit = ToolBox.cubicFit(masterTemp,logUHe1)
logStatWHe1 = ToolBox.valueFromFit(logWHe1Fit,thisTemp)
#logStatWH1Fun = spline(masterTemp,logUH1)
#logStatWH1=logStatWH1Fun(thisTemp)
#logStatWHe1Fun = spline(masterTemp,logUHe1)
#logStatWHe1=logStatWHe1Fun(thisTemp)
#JB#
#// NEW Interpolation with temperature for new partition function: lburns
thisTemp = temp[0][iD];
if (thisTemp <= 130.0):
logStatWH1 = logUH1[0]
logStatWHe1 = logUHe1[0]
if (thisTemp >= 10000.0):
logStatWH1 = logUH1[4]
logStatWHe1 = logUHe1[4]
"""
elif (thisTemp > 130 and thisTemp <= 500):
logStatWH1 = logUH1[1] * (thisTemp - 130)/(500 - 130) \
+ logUH1[0] * (500 - thisTemp)/(500 - 130)
logStatWHe1 = logUHe1[1] * (thisTemp - 130)/(500 - 130) \
+ logUHe1[0] * (500 - thisTemp)/(500 - 130)
elif (thisTemp > 500 and thisTemp <= 3000):
logStatWH1 = logUH1[2] * (thisTemp - 500)/(3000 - 500) \
+ logUH1[1] * (3000 - thisTemp)/(3000 - 500)
logStatWHe1 = logUHe1[2] * (thisTemp - 500)/(3000 - 500) \
+ logUHe1[1] * (3000 - thisTemp)/(3000 - 500)
elif (thisTemp > 3000 and thisTemp <= 8000):
logStatWH1 = logUH1[3] * (thisTemp - 3000)/(8000 - 3000) \
+ logUH1[2] * (8000 - thisTemp)/(8000 - 3000)
logStatWHe1 = logUHe1[3] * (thisTemp - 3000)/(8000 - 3000) \
+ logUHe1[2] * (8000 - thisTemp)/(8000 - 3000)
elif (thisTemp > 8000 and thisTemp < 10000):
logStatWH1 = logUH1[4] * (thisTemp - 8000)/(10000 - 8000) \
+ logUH1[3] * (10000 - thisTemp)/(10000 - 8000)
logStatWHe1 = logUHe1[4] * (thisTemp - 8000)/(10000 - 8000) \
+ logUHe1[3] * (10000 - thisTemp)/(10000 - 8000)
else:
#// for temperatures of greater than or equal to 10000K lburns
logStatWH1 = logUH1[4]
logStatWHe1 = logUHe1[4]
"""
logGroundPopsH1[iD] = stagePops[0][0][iD] - logStatWH1
logGroundPopsHe1[iD] = stagePops[1][0][iD] - logStatWHe1
logH2Pops[iD] = molPops[iH2-gsFirstMol][iD]
#print("iD " , iD , " logH2 " , logH2Pops[iD])
#// if (iD%10 == 1){
#// System.out.format("%03d, %21.15f, %21.15f %n",
#// iD, logE*logGroundPopsH1[iD], logE*logGroundPopsHe1[iD]);
#// }
kapRScat = 0.0
#//System.out.println("iD iL lambda sigH1 sigHe1 ");
for iL in range(numLams):
#//
for i in range(numDeps):
sigH1[i] = 0.0
sigHe1[i] = 0.0
sigH2[i] = 0.0
#//System.out.println("Calling opacH1 from masterMetal...");
sigH1 = opacHscat(numDeps, temp, lambdaScale[iL], logGroundPopsH1)
sigHe1 = opacHescat(numDeps, temp, lambdaScale[iL], logGroundPopsHe1)
sigH2 = opacH2scat(numDeps, temp, lambdaScale[iL], logH2Pops)
for iD in range(numDeps):
kapRScat = sigH1[iD] + sigHe1[iD] + sigH2[iD]
masterRScat[iL][iD] = math.log(kapRScat)
#if ( (iD%10 == 0) and (iL%10 == 0) ):
# print("iD ", iD, " iL ", iL, " lambda ", lambdaScale[iL], math.log10(sigH1[iD]), math.log10(sigHe1[iD]), math.log10(sigH2[iD]) )
#} //iD
#} //iL
return masterRScat
#} //end method masterRayl
def opacHscat(numDeps, temp, lambda2, logGroundPops):
"""//c******************************************************************************
//c This routine computes H I Rayleigh scattering opacities.
//c******************************************************************************"""
#//System.out.println("opacHscat called");
sigH = [0.0 for i in range(numDeps)]
#//cross-section is zero below threshold, so initialize:
for i in range(numDeps):
sigH[i] = 0.0
freq = Useful.c() / lambda2
#// include 'Atmos.com'
#// include 'Kappa.com'
#// include 'Linex.com'
wavetemp = 2.997925e18 / min(freq, 2.463e15)
ww = math.pow(wavetemp, 2)
sig = ( 5.799e-13 + (1.422e-6/ww) + (2.784/(ww*ww)) ) / (ww*ww)
for i in range(numDeps):
sigH[i] = sig * 2.0 * math.exp(logGroundPops[i])
return sigH
#} //end method opacHscat
def opacHescat(numDeps, temp, lambda2, logGroundPops):
"""//c******************************************************************************
//c This routine computes He I Rayleigh scattering opacities.
//c******************************************************************************"""
#//System.out.println("opacHescat called");
sigHe = [0.0 for i in range(numDeps)]
#//cross-section is zero below threshold, so initialize:
for i in range(numDeps):
sigHe[i] = 0.0
freq = Useful.c() / lambda2
#// include 'Atmos.com'
#// include 'Kappa.com'
#// include 'Linex.com'
wavetemp = 2.997925e18 / min(freq, 5.15e15)
ww = math.pow(wavetemp, 2)
sig = (5.484e-14/ww/ww) * math.pow( ( 1.0 + ((2.44e5 + (5.94e10/(ww-2.90e5)))/ww) ), 2 )
for i in range(numDeps):
sigHe[i] = sig * math.exp(logGroundPops[i])
return sigHe
#} //end method opacHescat
def opacH2scat(numDeps, temp, lambda2, molPops):
sigH2 = [0.0e0 for i in range(numDeps)]
#//cross-section is zero below threshold, so initialize:
for i in range(numDeps):
sigH2[i] = 0.0
freq = Useful.c() / lambda2;
"""
//c******************************************************************************
//c This routine computes H2 I Rayleigh scattering opacities.
//c******************************************************************************
// include 'Atmos.com'
// include 'Kappa.com'
// include 'Linex.com'
"""
wavetemp = 2.997925e18 / min(freq, 2.463e15)
ww = wavetemp**2
sig = ( 8.14e-13 + (1.28e-6/ww) + (1.61/(ww*ww)) ) / (ww*ww)
#print("freq ", freq, " wavetemp ", wavetemp, " ww ", ww, " sig ", sig)
for i in range(numDeps):
sigH2[i] = sig * math.exp(molPops[i])
#print("i " , i , " molPops " , molPops[i] , " sigH2 " , sigH2[i])
return sigH2
|
mit
| 1,023,656,047,883,678,600
| 33.2
| 149
| 0.510401
| false
| 2.96347
| false
| false
| false
|
ac-seguridad/ac-seguridad
|
project/manejador/cliente_entrada.py
|
1
|
2574
|
# Este archivo es el encargado de recibir la placa leída y decidir si dejar
# pasar a un vehículo o no, dependiendo de la configuración de este. Además,
# busca si la placa está registrada en el sistema, en caso de estarlo, busca
# el usuario asociado al vehículo.
# Este archivo básicamente maneja las alertas que se generan en el sistema.
# from ac_seguridad.models import *
import requests
from mysocket import MySocket
import socket
import pdb
# Constantes.
NUM_PUERTA = 5
RIF = "12345"
HOST = "localhost"
PORT = 8000
#1234 acceso restringido
#0000 acceso no restringido
#pdb.set_trace()
# Funciones
def leer_placa():
placa = input("Placa: ")
return placa
# Programa comienza aquí.
# ref: https://docs.python.org/3/howto/sockets.html
# Crear un socket como cliente.
print("Creando socket")
# socket_cliente = MySocket()
# socket_cliente.connect(host=HOST, port=PORT)
print("Socket conectado.")
# Enviar primer mensaje:
# Estructura del primer mensaje:
# * RIF: lleno
# * ticket: None.
# * placa: llena.
# * tipo: llena ('placa_leida')
# * puerta: llena.
# * lectura_automatica: llena, sus posibles valores son:
# True: lectura realizada de forma automática
# False: lentura realizada de forma manual
# None: No aplica la información (ejemplo, mensajes servidor-cliente)
# * registrado: si el ticket es registrado, en el caso de entrada es None
print("Preparando mensaje")
mensaje = dict()
mensaje['estacionamiento'] = RIF
mensaje['ticket'] = None
mensaje['placa'] = leer_placa()
mensaje['puerta'] = NUM_PUERTA
mensaje['tipo'] = 'placa_leida_entrada'
mensaje['lectura_automatica']= True
mensaje['registrado']=None
print("Enviando mensaje: {}".format(mensaje))
# socket_cliente.sendall_json(mensaje)
# socket_cliente.mysend("Hola, este es el mensaje\0".encode(encoding="utf-8", errors="strict"))
url = "http://{}:{}/manejador/manejar_mensaje/".format(HOST,PORT)
data_mensaje = mensaje
respuesta_request = requests.post(url, data=data_mensaje)
respuesta = respuesta_request.json()
print("Mensaje enviado")
print("Recibiendo respuesta")
# respuesta = socket_cliente.receive()
pdb.set_trace()
print("Respuesta recibida: {}".format(respuesta))
if (respuesta['tipo'] == "OK_entrada_estacionamiento"):
print("Luz verde.")
elif (respuesta['tipo'] == "NO_entrada_estacionamiento"):
print("Luz roja.")
elif (respuesta['tipo'] == "NO_carro_dentro"):
print("Luz roja.")
else:
print("Respuesta no válida")
# socket_cliente.sock.shutdown(socket.SHUT_WR)
# socket_cliente.sock.close()
|
apache-2.0
| 5,597,889,476,890,702,000
| 28.45977
| 95
| 0.712446
| false
| 2.512745
| false
| false
| false
|
open-synergy/opnsynid-stock-logistics-warehouse
|
stock_other_receipt_operation/tests/test_warehouse.py
|
1
|
2104
|
# -*- coding: utf-8 -*-
# © 2017 OpenSynergy Indonesia
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from .base import BaseOtherReceiptOperation
import itertools
class TestWarehouse(BaseOtherReceiptOperation):
def test_warehouse_create(self):
reception_steps = [
"one_step",
"two_steps",
"three_steps",
"transit_one_step",
"transit_two_steps",
"transit_three_steps",
]
delivery_steps = [
"ship_only",
"pick_ship",
"pick_pack_ship",
"ship_transit",
"pick_ship_transit",
"pick_pack_ship_transit",
]
num = 1
for combination in itertools.product(reception_steps, delivery_steps):
self.create_wh({
"name": "X WH %s" % str(num),
"code": "X%s" % str(num),
"reception_steps": combination[0],
"delivery_steps": combination[1],
})
num += 1
def test_warehouse_edit(self):
reception_steps = [
"one_step",
"two_steps",
"three_steps",
"transit_one_step",
"transit_two_steps",
"transit_three_steps",
]
delivery_steps = [
"ship_only",
"pick_ship",
"pick_pack_ship",
"ship_transit",
"pick_ship_transit",
"pick_pack_ship_transit",
]
num = 1
for combination in itertools.product(reception_steps, delivery_steps):
if num == 1:
wh = self.create_wh({
"name": "X WH %s" % str(num),
"code": "X%s" % str(num),
"reception_steps": combination[0],
"delivery_steps": combination[1],
})
else:
self.edit_wh(wh, {
"reception_steps": combination[0],
"delivery_steps": combination[1],
})
num += 1
|
agpl-3.0
| -107,100,302,950,765,000
| 29.478261
| 78
| 0.456015
| false
| 4.005714
| false
| false
| false
|
amerlyq/piony
|
piony/gstate.py
|
1
|
3033
|
from PyQt5.QtCore import QObject, pyqtSignal # , QRect, QPoint
# from PyQt5.QtWidgets import qApp
from piony import logger
from piony.config import ymlparser as yml
from piony.config.argparser import ArgParser
from piony.config.budparser import BudParser, BudError
from piony.config.keyparser import KeymapParser
class GState(QObject):
invalidated = pyqtSignal(dict)
def __init__(self, argv):
super().__init__()
logger.info('%s init', self.__class__.__qualname__)
self.active_window = '%1'
self.cfg = None
self.bud = None
self.now = None # Instant states like current visibility, etc
self.kmp = None
yml.init()
self._psArg = ArgParser()
self.update(argv)
def update(self, argv):
kgs = self.parse(argv)
# chg_gs = self.compare(kgs)
# if chg_gs:
# self.invalidated.emit(self.get_gs(), chg_gs)
logger.info('GState updated')
self.invalidated.emit(kgs)
def _set_args_from_command_line(self, cfg, args):
ar = [(k, v) for k, v in vars(args).items() if v]
for section, opts in cfg.items():
for k, v in ar:
if k in opts:
cfg[section][k] = str(v)
def parse(self, argv): # NEED: RFC
args = self._psArg.parse(argv[1:])
self._psArg.apply(args) # Set gvars
cfg = yml.parse(yml.G_CONFIG_PATH)
self.sty = yml.parse(yml.G_STYLE_PATH)
kp = KeymapParser()
self.kmp = kp.convert()
if args.kill:
print("kill:")
self.quit.emit()
self._psArg.apply(args) # Set gvars
self._set_args_from_command_line(cfg, args)
entries = args.buds if args.buds else str(cfg['Bud']['default'])
Bud_Ps = BudParser()
try:
bud = Bud_Ps.parse(entries)
except BudError as e:
print("Error:", e)
if not self.bud: # NOTE: work must go on if client args are bad?
# qApp.quit() # don't work until main loop
raise e
# TODO: Make 'bReload' as tuple to distinguish necessary refreshes.
bReload = {}
bReload['toggle'] = bool(0 == len(argv))
bReload['Window'] = bool(self.cfg and cfg['Window'] != self.cfg['Window'])
self.cfg = cfg
self.bud = bud
# TODO: ret whole new current state?
return bReload
def compare(self, kgs): # WARNING: broken
""" Used as separate function because of embedded file paths in arg """
# Compose dict of current GState variables
# curr_gs = self.get_gs()
# Detected changes in global state
chg_gs = [('cfg', 'Window'), 'bud']
# TODO: recursive diff cgs/kgs and inserting 1 in changed keys/branches
return chg_gs
# TODO: replace with namedtuple (use it to emit)
def get_gs(self):
return {k: v for k, v in self.__dict__.items()
if not k.startswith('__') and not callable(k)}
|
gpl-3.0
| 6,719,100,791,870,633,000
| 33.465909
| 82
| 0.5727
| false
| 3.53085
| false
| false
| false
|
Quantia-Analytics/AzureML-Regression-Example
|
Python files/transform.py
|
1
|
2490
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 21 12:49:06 2015
@author: Steve Elston
"""
## The main function with a single argument, a Pandas data frame
## from the first input port of the Execute Python Script module.
def azureml_main(BikeShare):
import pandas as pd
from sklearn import preprocessing
import utilities as ut
import numpy as np
import os
## If not in the Azure environment, read the data from a csv
## file for testing purposes.
Azure = False
if(Azure == False):
pathName = "C:/Users/Steve/GIT/Quantia-Analytics/AzureML-Regression-Example/Python files"
fileName = "BikeSharing.csv"
filePath = os.path.join(pathName, fileName)
BikeShare = pd.read_csv(filePath)
## Drop the columns we do not need
BikeShare = BikeShare.drop(['instant',
'instant',
'atemp',
'casual',
'registered'], 1)
## Normalize the numeric columns
scale_cols = ['temp', 'hum', 'windspeed']
arry = BikeShare[scale_cols].as_matrix()
BikeShare[scale_cols] = preprocessing.scale(arry)
## Create a new column to indicate if the day is a working day or not.
work_day = BikeShare['workingday'].as_matrix()
holiday = BikeShare['holiday'].as_matrix()
BikeShare['isWorking'] = np.where(np.logical_and(work_day == 1, holiday == 0), 1, 0)
## Compute a new column with the count of months from
## the start of the series which can be used to model
## trend
BikeShare['monthCount'] = ut.mnth_cnt(BikeShare)
## Shift the order of the hour variable so that it is smoothly
## "humped over 24 hours.## Add a column of the count of months which could
hr = BikeShare.hr.as_matrix()
BikeShare['xformHr'] = np.where(hr > 4, hr - 5, hr + 19)
## Add a variable with unique values for time of day for working
## and non-working days.
isWorking = BikeShare['isWorking'].as_matrix()
BikeShare['xformWorkHr'] = np.where(isWorking,
BikeShare.xformHr,
BikeShare.xformHr + 24.0)
BikeShare['dayCount'] = pd.Series(range(BikeShare.shape[0]))/24
return BikeShare
|
gpl-2.0
| -3,416,187,222,252,025,300
| 37.921875
| 97
| 0.562651
| false
| 3.958665
| false
| false
| false
|
nbsdx/abac
|
examples/python_tests/acme_rockets_rt0/query.py
|
1
|
3122
|
#!/usr/bin/env python
"""
Run the queries described in README
cmd: env keystore=`pwd` ./query.py
"""
import os
import ABAC
ctxt = ABAC.Context()
# Keystore is the directory containing the principal credentials.
# Load existing principals and/or policy credentials
if (os.environ.has_key("keystore")) :
keystore=os.environ["keystore"]
ctxt.load_directory(keystore)
else:
print("keystore is not set...")
exit(1)
# retrieve principals' keyid value from local credential files
acmeID=ABAC.ID("Acme_ID.pem");
acmeID.load_privkey("Acme_private.pem");
acme=acmeID.keyid()
coyoteID=ABAC.ID("Coyote_ID.pem");
coyoteID.load_privkey("Coyote_private.pem");
coyote=coyoteID.keyid()
bigbirdID=ABAC.ID("Bigbird_ID.pem");
bigbirdID.load_privkey("Bigbird_private.pem");
bigbird=bigbirdID.keyid()
##########################################################################
# dump the loaded attribute policies
#
print "\n...policy attribute set..."
credentials = ctxt.credentials()
for credential in credentials:
print "context: %s <- %s" % (credential.head().string(), credential.tail().string())
##########################################################################
# is coyote a preferred_customer of Acme ?
# role=[keyid:Acme].role:preferred_customer
# p =[keyid:coyote]
print "\n===good============ Acme.preferred_customer <- Coyote"
(success, credentials) = ctxt.query("%s.preferred_customer" % acme, coyote)
if success:
print "success!"
else:
print "failure!"
for credential in credentials:
print "credential %s <- %s" % (credential.head().string(), credential.tail().string())
##########################################################################
# can coyote buy rockets from Acme ?
# role=[keyid:Acme].role:buy_rockets
# p =[keyid:coyote]
print "\n===good============ Acme.buy_rockets <- Coyote"
(success, credentials) = ctxt.query("%s.buy_rockets" % acme, coyote)
if success:
print "success!"
else:
print "failure!"
for credential in credentials:
print "credential %s <- %s" % (credential.head().string(), credential.tail().string())
##########################################################################
# is Acme a friend of coyote ?
# role=[keyid:Coyote].role:friend
# p=[keyid:Acme]
print "\n===bad=============== Coyote.friend <- Acme"
(success, credentials) = ctxt.query("%s.friend" % coyote, acme)
if success:
print "success!"
else:
print "failure!"
for credential in credentials:
print "credential %s <- %s" % (credential.head().string(), credential.tail().string())
##########################################################################
# using complex role to ask a question.. expecting to fail
# role=[keyid:Acme].role:buy_rockets
# p=[keyid:Acme].role:preferred_customer
print "\n===bad?=============== Acme.buy_rockets <- Acme.preferred_customer"
(success, credentials) = ctxt.query("%s.buy_rockets" % acme, "%s.preferred_customer" % acme)
if success:
print "success!"
else:
print "failure!"
for credential in credentials:
print "credential %s <- %s" % (credential.head().string(), credential.tail().string())
|
mit
| -4,170,137,190,318,609,000
| 30.535354
| 92
| 0.601858
| false
| 3.484375
| false
| false
| false
|
shantanu561993/FAndR
|
docs/conf.py
|
1
|
8415
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# FAR documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import FAR
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Find and Replace'
copyright = u'2015, Shantanu Khandelwal'
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = FAR.__version__
# The full version, including alpha/beta/rc tags.
release = FAR.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'FARdoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'FAR.tex',
u'Find and Replace Documentation',
u'Shantanu Khandelwal', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'FAR',
u'Find and Replace Documentation',
[u'Shantanu Khandelwal'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'FAR',
u'Find and Replace Documentation',
u'Shantanu Khandelwal',
'FAR',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
bsd-3-clause
| 2,237,559,652,515,695,600
| 29.6
| 76
| 0.7041
| false
| 3.716873
| true
| false
| false
|
volodymyrss/3ML
|
threeML/plugins/spectrum/binned_spectrum_set.py
|
1
|
3209
|
import numpy as np
from threeML.utils.time_interval import TimeIntervalSet
from threeML.plugins.spectrum.binned_spectrum import BinnedSpectrum
class BinnedSpectrumSet(object):
def __init__(self, binned_spectrum_list, reference_time=0.0, time_intervals=None):
"""
a set of binned spectra with optional time intervals
:param binned_spectrum_list: lit of binned spectal
:param reference_time: reference time for time intervals
:param time_intervals: optional timeinterval set
"""
self._binned_spectrum_list = binned_spectrum_list # type: list(BinnedSpectrum)
self._reference_time = reference_time
# normalize the time intervals if there are any
if time_intervals is not None:
self._time_intervals = time_intervals - reference_time # type: TimeIntervalSet
assert len(time_intervals) == len(
binned_spectrum_list), 'time intervals mus be the same length as binned spectra'
else:
self._time_intervals = None
@property
def reference_time(self):
return self._reference_time
def __getitem__(self, item):
return self._binned_spectrum_list[item]
def __len__(self):
return len(self._binned_spectrum_list)
def time_to_index(self, time):
"""
get the index of the input time
:param time: time to search for
:return: integer
"""
assert self._time_intervals is not None, 'This spectrum set has no time intervals'
return self._time_intervals.containing_bin(time)
def sort(self):
"""
sort the bin spectra in place according to time
:return:
"""
assert self._time_intervals is not None, 'must have time intervals to do sorting'
# get the sorting index
idx = self._time_intervals.argsort()
# reorder the spectra
self._binned_spectrum_list = self._binned_spectrum_list[idx]
# sort the time intervals in place
self._time_intervals.sort()
@property
def quality_per_bin(self):
return np.array([spectrum.quality for spectrum in self._binned_spectrum_list])
@property
def n_channels(self):
return self.counts_per_bin.shape[1]
@property
def counts_per_bin(self):
return np.array([spectrum.counts for spectrum in self._binned_spectrum_list])
@property
def count_errors_per_bin(self):
return np.array([spectrum.count_errors for spectrum in self._binned_spectrum_list])
@property
def rates_per_bin(self):
return np.array([spectrum.rates for spectrum in self._binned_spectrum_list])
@property
def rate_errors_per_bin(self):
return np.array([spectrum.rate_errors for spectrum in self._binned_spectrum_list])
@property
def sys_errors_per_bin(self):
return np.array([spectrum.sys_errors for spectrum in self._binned_spectrum_list])
@property
def exposure_per_bin(self):
return np.array([spectrum.exposure for spectrum in self._binned_spectrum_list])
@property
def time_intervals(self):
return self._time_intervals
|
bsd-3-clause
| -6,567,661,062,073,182,000
| 25.303279
| 96
| 0.647554
| false
| 4.103581
| false
| false
| false
|
lowks/SDST
|
seqtools/demultiplexer.py
|
1
|
7927
|
import argparse
import subprocess
import os
import collections
import pylev
from seqtools.utils import revcomp,fileOpen
from seqtools.fastq import Fastq
def isIndexRevComp(indexfile,indexes,n=500000):
"""Determine if the indexes are reverse complemented or not
:param indexfile: filename of the Fastq index file
:param indexes: list or tuple of index strings
:param n: integer number of reads to sample
"""
print("HERE")
ifile = Fastq(indexfile)
ilength=len(indexes[0])
print(ilength)
indexreads = collections.defaultdict(int)
for i in range(n):
indexreads[ifile.next().sequence[:ilength]]+=1
counts = {'normal':0,
'revcomp':0}
for k,v in list(indexreads.items()):
print(k,v)
for i in indexes:
if(pylev.levenshtein(k,i)<=1):
counts['normal']+=v
continue
if(pylev.levenshtein(k,revcomp(i))<=1):
counts['revcomp']+=v
if(counts['revcomp']>counts['normal']):
print('using revcomp')
else:
print('NOT revcomp')
return(counts['revcomp']>counts['normal'])
def demultiplex(readfile,
indexfile,
indexes,
readfile2=None,
indexfile2=None):
"""Demultiplex from separate FASTQ files.
All FASTQ files can be gzipped (with suffix .gz).
:param readfile: The filename of the first fastq file
:param indexfile: The filename of the first index fastq file
:param indexes: An iterable of indexes. If dual-barcoding is used, the indexes should be comma-separated strings, one string for each barcode pair.
:param indexfile2: The filename of the second index fastq file. If this parameter is included, then the indexes parameter should be a set of comma-separated pairs of indexes.
:param readfile2: The filename of the second fastq file [optional]
"""
# single readfile, single indexfile
if(readfile2 is None) and (indexfile2 is None):
rfile1 = Fastq(readfile)
(rpath,rname) = os.path.split(readfile)
ifile = Fastq(indexfile)
indexRevComp = isIndexRevComp(indexfile,indexes)
existingIndexes = []
for i in indexes:
ofname1 = os.path.join(rpath,i + "_" + rname)
if(not os.path.exists(ofname1)):
ofile1[i]=fileOpen(os.path.join(rpath,i + "_" + rname),'w')
else:
print(ofname1," already exists, skipping")
existingIndexes.append(i)
for i in existingIndexes:
indexes.remove(i)
if(len(indexes)==0):
exit(0)
for (r1,i) in zip(rfile1,ifile):
try:
if indexRevComp:
i2 = revcomp(i.sequence[:indexlen])
ofile1[i2].write(str(r1))
else:
i2 = i.sequence[:indexlen]
ofile1[i2].write(str(r1))
except KeyError:
pass
rfile1.close()
ifile.close()
for ofile in list(ofile1.values()):
ofile.close()
## for i in indexes:
## os.rename(os.path.join(rpath,'tmp.' + i + "_" + rname),
## os.path.join(rpath,i + "_" + rname))
# two readfiles, single indexfile
if(readfile2 is not None) and (indexfile2 is None):
print("here1")
rfile1 = Fastq(readfile)
rfile2 = Fastq(readfile2)
(rpath,rname) = os.path.split(readfile)
(rpath2,rname2) = os.path.split(readfile2)
ifile = Fastq(indexfile)
indexRevComp = isIndexRevComp(indexfile,indexes)
ofile1 = {}
ofile2 = {}
existingIndexes = []
for i in indexes:
ofname1 = os.path.join(rpath,i + "_" + rname)
ofname2 = os.path.join(rpath2,i + "_" + rname2)
if(os.path.exists(ofname1) and os.path.exists(ofname2)):
print(ofname1,ofname2, " already exist, skipping")
existingIndexes.append(i)
else:
ofile1[i]=fileOpen(os.path.join(rpath,i + "_" + rname),'w')
ofile2[i]=fileOpen(os.path.join(rpath2,i + "_" + rname2),'w')
for i in existingIndexes:
indexes.remove(i)
if(len(indexes)==0):
exit(0)
indexlen = len(indexes[0])
for (r1,r2,i) in zip(rfile1,rfile2,ifile):
try:
if indexRevComp:
i2 = revcomp(i.sequence[:indexlen])
ofile1[i2].write(str(r1))
ofile2[i2].write(str(r2))
else:
i2 = i.sequence[:indexlen]
ofile1[i2].write(str(r1))
ofile2[i2].write(str(r2))
except KeyError:
pass
rfile1.close()
rfile2.close()
ifile.close()
for ofile in list(ofile1.values()):
ofile.close()
for ofile in list(ofile2.values()):
ofile.close()
## for i in indexes:
## print os.path.join(rpath,'tmp.' + i + "_" + rname),os.path.join(rpath,i + "_"+rname)
## os.rename(os.path.join(rpath,'tmp.' + i + "_" + rname),
## os.path.join(rpath,i + "_"+rname))
## os.rename(os.path.join(rpath2,'tmp.' + i +"_"+ rname2),
## os.path.join(rpath2,i +"_"+ rname2))
# two readfiles, two indexfiles
if(readfile2 is not None) and (indexfile2 is not None):
rfile1 = Fastq(readfile)
rfile2 = Fastq(readfile2)
(rpath,rname) = os.path.split(readfile)
(rpath2,rname2) = os.path.split(readfile2)
ifile = Fastq(indexfile)
ifile2 = Fastq(indexfile2)
indexes = [tuple(x.split(',')) for x in indexes]
indexRevComp = isIndexRevComp(indexfile,[i[0] for i in indexes])
ofile1 = {}
ofile2 = {}
existingIndexes = []
for j in indexes:
i = ''.join(j)
ofname1 = os.path.join(rpath,i + "_" + rname)
ofname2 = os.path.join(rpath2,i + "_" + rname2)
if(os.path.exists(ofname1) and os.path.exists(ofname2)):
print(ofname1,ofname2, " already exist, skipping")
existingIndexes.append(i)
else:
ofile1[i]=fileOpen(ofname1,'w')
ofile2[i]=fileOpen(ofname2,'w')
for i in existingIndexes:
indexes.remove(i)
if(len(indexes)==0):
exit(0)
indexlen = len(indexes[0][0])
for (r1,r2,i,i2) in zip(rfile1,rfile2,ifile,ifile2):
try:
if indexRevComp:
ir = revcomp(i.sequence[:indexlen])
ir2 = revcomp(i2.sequence[:indexlen])
istr = ir+ir2
ofile1[istr].write(str(r1))
ofile2[istr].write(str(r2))
else:
ir = i.sequence[:indexlen]
ir2 = i2.sequence[:indexlen]
istr = ir+ir2
ofile1[istr].write(str(r1))
ofile2[istr].write(str(r2))
except KeyError:
pass
rfile1.close()
rfile2.close()
ifile.close()
ifile2.close()
for ofile in list(ofile1.values()):
ofile.close()
for ofile in list(ofile2.values()):
ofile.close()
## for i in indexes:
## ofname1 = os.path.join(rpath,''.join(i) + "_" + rname)
## ofname2 = os.path.join(rpath2,''.join(i) + "_" + rname2)
## os.rename(os.path.join(rpath,'tmp.' + ofname1),
## os.path.join(rpath,ofname1))
## os.rename(os.path.join(rpath2,'tmp.'+ofname2),
## os.path.join(rpath2,ofname2))
|
mit
| 2,461,416,735,547,084,000
| 37.110577
| 181
| 0.526303
| false
| 3.529386
| false
| false
| false
|
ragavvenkatesan/Convolutional-Neural-Networks
|
yann/core/conv.py
|
1
|
10586
|
"""
``yann.core.conv.py`` is one file that contains all the convolution operators.
It contains two functions for performing either 2d convolution (``conv2d``) or 3d convolution
(``conv3d``).
These functions shall be called by every convolution layer from ``yann.layers.py``
TODO:
* Add 3D convolution support from theano.
* Add Masked convolution support.
"""
from theano.tensor.nnet import conv2d
from theano.tensor.nnet.abstract_conv import conv2d_grad_wrt_inputs as deconv2d
from theano.tensor.nnet.abstract_conv import get_conv_output_shape as conv_shape
class convolver_2d(object):
"""
Class that performs convolution
This class basically performs convolution. These ouputs can be probed using the
convolution layer if needed. This keeps things simple.
Args:
input: This variable should either ``thenao.tensor4`` (``theano.matrix``
reshaped also works) variable or an output from a pervious layer which is
a ``theano.tensor4`` convolved with a ``theano.shared``. The input should
be of shape ``(batchsize, channels, height, width)``. For those who have
tried ``pylearn2`` or such, this is called bc01 format.
fitlers: This variable should be ``theano.shared`` variables of filter weights
could even be a filter bank. ``filters`` should be of shape ``(nchannels,
nkerns, filter_height, filter_width)``. ``nchannles`` is the number of input \
channels and ``nkerns`` is the number of kernels or output channels.
subsample: Stride Tuple of ``(int, int)``.
filter_shape: This variable should be a tuple or an array:
``[nkerns, nchannles, filter_height, filter_width]``
image_shape: This variable should a tuple or an array:
``[batchsize, channels, height, width]``
``image_shape[1]`` must be equal to ``filter_shape[1]``
border_mode: The input to this can be either ``'same'`` or other theano defaults
Notes:
* ``conv2d.out`` output, Output that could be provided as
output to the next layer or to other convolutional layer options.
The size of the outut depends on border mode and subsample
operation performed.
* ``conv2d.out_shp``: (``int``, ``int``), A tuple (height, width) of all feature maps
The options for ``border_mode`` input which at the moment of writing this doc are
* ``'valid'`` - apply filter wherever it completely overlaps with the
input. Generates output of shape ``input shape - filter shape + 1``
* ``'full'``- apply filter wherever it partly overlaps with the input.
Generates output of shape ``input shape + filter shape - 1``
* ``'half'``: pad input with a symmetric border of ``filter rows // 2``
rows and ``filter columns // 2`` columns, then perform a valid
convolution. For filters with an odd number of rows and columns, this
leads to the output shape being equal to the input shape.
* ``<int>``: pad input with a symmetric border of zeros of the given
width, then perform a valid convolution.
* ``(<int1>, <int2>)``: pad input with a symmetric border of ``int1``
rows and ``int2`` columns, then perform a valid convolution.
Refer to `theano documentation's convolution page
<http://deeplearning.net/software/theano/library/tensor/nnet/conv.html>`_
for more details on this.
Basically cuDNN is used for ``same`` because at the moment of writing
this funciton, ``theano.conv2d`` doesn't support``same`` convolutions
on the GPU. For everything else, ``theano`` default will be used.
TODO:
Implement ``border_mode = 'same'`` for libgpuarray backend. As of now only supports
CUDA backend.
Need to something about this. With V0.10 of theano, I cannot use cuda.dnn for
same convolution.
"""
def __init__ ( self,
input,
filters,
subsample,
filter_shape,
image_shape,
border_mode = 'valid',
verbose = 1
):
if not image_shape[1] == filter_shape[1]:
raise Exception ("input_shape[1] and filter_shape[1] must match")
if verbose >=3 :
print "... creating convolution operator"
_,_,_out_height,_out_width = conv_shape (image_shape = image_shape,
kernel_shape = filter_shape,
border_mode = border_mode,
subsample = subsample)
self.out = conv2d (
input = input,
filters = filters,
input_shape = image_shape,
filter_shape = filter_shape,
subsample = subsample,
border_mode = border_mode,
)
self.out_shp = (_out_height, _out_width)
class deconvolver_2d(object):
"""
class that performs deconvolution
This class basically performs convolution.
Args:
input: This variable should either ``thenao.tensor4`` (``theano.matrix``
reshaped also works) variable or an output from a pervious layer which is
a ``theano.tensor4`` convolved with a ``theano.shared``. The input should
be of shape ``(batchsize, channels, height, width)``. For those who have
tried ``pylearn2`` or such, this is called bc01 format.
fitlers: This variable should be ``theano.shared`` variables of filter weights
could even be a filter bank. ``filters`` should be of shape ``(nchannels,
nkerns, filter_height, filter_width)``. ``nchannles`` is the number of input \
channels and ``nkerns`` is the number of kernels or output channels.
subsample: Stride Tuple of ``(int, int)``.
filter_shape: This variable should be a tuple or an array:
``[nkerns, nchannles, filter_height, filter_width]``
image_shape: This variable should a tuple or an array:
``[batchsize, channels, height, width]``
``image_shape[1]`` must be equal to ``filter_shape[1]``
output_shape: Request a size of output of image required. This variable should a tuple.
border_mode: The input to this can be either ``'same'`` or other theano defaults
Notes:
* ``conv2d.out`` output, Output that could be provided as
output to the next layer or to other convolutional layer options.
The size of the outut depends on border mode and subsample
operation performed.
* ``conv2d.out_shp``: (``int``, ``int``), A tuple (height, width) of all feature maps
The options for ``border_mode`` input which at the moment of writing this doc are
* ``'valid'`` - apply filter wherever it completely overlaps with the
input. Generates output of shape ``input shape - filter shape + 1``
* ``'full'``- apply filter wherever it partly overlaps with the input.
Generates output of shape ``input shape + filter shape - 1``
* ``'half'``: pad input with a symmetric border of ``filter rows // 2``
rows and ``filter columns // 2`` columns, then perform a valid
convolution. For filters with an odd number of rows and columns, this
leads to the output shape being equal to the input shape.
* ``<int>``: pad input with a symmetric border of zeros of the given
width, then perform a valid convolution.
* ``(<int1>, <int2>)``: pad input with a symmetric border of ``int1``
rows and ``int2`` columns, then perform a valid convolution.
Refer to `theano documentation's convolution page
<http://deeplearning.net/software/theano/library/tensor/nnet/conv.html>`_
for more details on this.
Basically cuDNN is used for ``same`` because at the moment of writing
this funciton, ``theano.conv2d`` doesn't support``same`` convolutions
on the GPU. For everything else, ``theano`` default will be used.
TODO:
Implement ``border_mode = 'same'`` and full for libgpuarray backend. As of now only supports
CUDA backend.
Need to something about this. With V0.10 of theano, I cannot use cuda.dnn for
same convolution.
Right now deconvolution works only with ``border_mode = 'valid'``
"""
def __init__ ( self,
input,
filters,
subsample,
filter_shape,
image_shape,
output_shape,
border_mode = 'valid',
verbose = 1
):
# if not image_shape[1] == filter_shape[1]:
# raise Exception ("input_shape[1] and filter_shape[1] must match")
if verbose >=3 :
print "... creating deconvolution operator"
# Transpose the convoltuion
# self.filters.dimshuffle(1, 0, 2, 3)[:, :, ::-1, ::-1]
_out_height = output_shape[2]
_out_width = output_shape[3]
self.out = deconv2d (
output_grad = input,
filters = filters,
input_shape = output_shape,
#filter_shape = filter_shape,
border_mode = border_mode,
subsample = subsample,
)
self.out_shp = (_out_height, _out_width)
# Check by using the reverse on a convolution shape, the actual size.
_,_,_in_height,_in_width = conv_shape (image_shape = output_shape,
kernel_shape = filter_shape,
border_mode = border_mode,
subsample = subsample)
if not _in_height == image_shape [2] and _in_width == image_shape [3]:
raise Exception (" This dimensionality of th output image cannot be achieved.")
if __name__ == '__main__': #pragma: no cover
pass
|
mit
| 6,412,593,143,335,912,000
| 44.051064
| 100
| 0.573871
| false
| 4.588643
| false
| false
| false
|
moberweger/deep-prior-pp
|
src/trainer/optimizer.py
|
1
|
4689
|
"""Basis for different optimization algorithms.
Optimizer provides interface for creating the update rules for gradient based optimization.
It includes SGD, NAG, RMSProp, etc.
Copyright 2015 Markus Oberweger, ICG,
Graz University of Technology <oberweger@icg.tugraz.at>
This file is part of DeepPrior.
DeepPrior is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
DeepPrior is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with DeepPrior. If not, see <http://www.gnu.org/licenses/>.
"""
import theano
import theano.tensor as T
import numpy
__author__ = "Markus Oberweger <oberweger@icg.tugraz.at>"
__copyright__ = "Copyright 2015, ICG, Graz University of Technology, Austria"
__credits__ = ["Paul Wohlhart", "Markus Oberweger"]
__license__ = "GPL"
__version__ = "1.0"
__maintainer__ = "Markus Oberweger"
__email__ = "oberweger@icg.tugraz.at"
__status__ = "Development"
class Optimizer(object):
"""
Class with different optimizers of the loss function
"""
def __init__(self, grads, params):
"""
Initialize object
:param grads: gradients of the loss function
:param params: model parameters that should be updated
"""
self.grads = grads
self.params = params
self.updates = []
self.shared = []
if len(grads) != len(params):
print "Warning: Size of gradients ({}) does not fit size of parameters ({})!".format(len(grads), len(params))
def ADAM(self, learning_rate=0.0002, beta1=0.9, beta2=0.999, epsilon=1e-8, gamma=1-1e-8):
"""
Adam update rule by Kingma and Ba, ICLR 2015, version 2 (with momentum decay).
:param learning_rate: alpha in the paper, the step size
:param beta1: exponential decay rate of the 1st moment estimate
:param beta2: exponential decay rate of the 2nd moment estimate
:param epsilon: small epsilon to prevent divide-by-0 errors
:param gamma: exponential increase rate of beta1
:return: updates
"""
t = theano.shared(numpy.cast[theano.config.floatX](1.0)) # timestep, for bias correction
beta1_t = beta1*gamma**(t-1.) # decay the first moment running average coefficient
for param_i, grad_i in zip(self.params, self.grads):
mparam_i = theano.shared(numpy.zeros(param_i.get_value().shape, dtype=theano.config.floatX)) # 1st moment
self.shared.append(mparam_i)
vparam_i = theano.shared(numpy.zeros(param_i.get_value().shape, dtype=theano.config.floatX)) # 2nd moment
self.shared.append(vparam_i)
m = beta1_t * mparam_i + (1. - beta1_t) * grad_i # new value for 1st moment estimate
v = beta2 * vparam_i + (1. - beta2) * T.sqr(grad_i) # new value for 2nd moment estimate
m_unbiased = m / (1. - beta1**t) # bias corrected 1st moment estimate
v_unbiased = v / (1. - beta2**t) # bias corrected 2nd moment estimate
w = param_i - (learning_rate * m_unbiased) / (T.sqrt(v_unbiased) + epsilon) # new parameter values
self.updates.append((mparam_i, m))
self.updates.append((vparam_i, v))
self.updates.append((param_i, w))
self.updates.append((t, t + 1.))
return self.updates
def RMSProp(self, learning_rate=0.01, decay=0.9, epsilon=1.0 / 100.):
"""
RMSProp of Tieleman et al.
:param learning_rate: learning rate
:param decay: decay rate of gradient history
:param epsilon: gradient clip
:return: update
"""
for param_i, grad_i in zip(self.params, self.grads):
# Accumulate gradient
msg = theano.shared(numpy.zeros(param_i.get_value().shape, dtype=theano.config.floatX))
self.shared.append(msg)
new_mean_squared_grad = (decay * msg + (1 - decay) * T.sqr(grad_i))
# Compute update
rms_grad_t = T.sqrt(new_mean_squared_grad)
rms_grad_t = T.maximum(rms_grad_t, epsilon)
delta_x_t = -learning_rate * grad_i / rms_grad_t
# Apply update
self.updates.append((param_i, param_i + delta_x_t))
self.updates.append((msg, new_mean_squared_grad))
return self.updates
|
gpl-3.0
| -5,467,339,482,598,829,000
| 39.076923
| 121
| 0.641288
| false
| 3.546899
| false
| false
| false
|
synw/django-mqueue
|
mqueue/signals.py
|
1
|
2131
|
from __future__ import print_function
from mqueue.models import MEvent
from mqueue.utils import get_user, get_url, get_admin_url, get_object_name
def mmessage_create(sender, instance, created, **kwargs):
if created is True:
# try to get the user
user = get_user(instance)
# try to get the object name
obj_name = get_object_name(instance, user)
# try to get the admin url
admin_url = get_admin_url(instance)
event_class = instance.__class__.__name__ + ' created'
# create event
MEvent.objects.create(
model=instance.__class__,
name=obj_name,
obj_pk=instance.pk,
user=user,
url=get_url(instance),
admin_url=admin_url,
event_class=event_class,
)
return
def mmessage_delete(sender, instance, **kwargs):
# try to get the user
user = get_user(instance)
# try to get the object name
obj_name = get_object_name(instance, user)
event_class = instance.__class__.__name__ + ' deleted'
# create event
MEvent.objects.create(
model=instance.__class__,
name=obj_name,
obj_pk=instance.pk,
user=user,
event_class=event_class,
)
return
def mmessage_save(sender, instance, created, **kwargs):
if created is False:
# try to get the user
user = get_user(instance)
if 'name' not in kwargs.keys():
# try to get the object name
obj_name = get_object_name(instance, user)
else:
obj_name = kwargs('name')
# try to get the admin url
admin_url = get_admin_url(instance)
event_str = ' edited'
if created:
event_str = ' created'
event_class = instance.__class__.__name__ + event_str
# create event
MEvent.objects.create(
model=instance.__class__,
name=obj_name,
obj_pk=instance.pk,
user=user,
url=get_url(instance),
admin_url=admin_url,
event_class=event_class,
)
return
|
mit
| 5,622,654,903,332,074,000
| 29.442857
| 74
| 0.56077
| false
| 3.895795
| false
| false
| false
|
rodgzilla/fractal_GAN
|
src/mandel_test.py
|
1
|
5720
|
import sys
import pygame
from pygame import gfxdraw
from pygame import Color
import cmath
import random
pygame.init()
# The small size will help for future generation.
unit = 75
width = 3 * unit
height = 2 * unit
# nb_sections represents the number of slice of each axis we are going
# to consider for the zoom. The number of sections considered will be
# nb_sections * nb_sections
nb_sections = 10
section_width = int(width / nb_sections)
section_height = int(height / nb_sections)
# We select the region on which we zoom amongst the top_select most bright
top_select = 20
def convert_pixel_complex(x, y, re_min, re_max, im_min, im_max):
"""
Converts pixel coordinates to complex plane coordinates. The re and
im arguments indicates the part of the complex plane represented by the window.
"""
re = x * (re_max - re_min) / width + re_min
im = y * (im_max - im_min) / height + im_min
return complex(re, im)
def draw_mandel(window, sequence, max_iter, re_min = -2, re_max = 1, im_min = -1, im_max = 1):
"""
Computes the mandelbrot set on a given part of the complex plane.
"""
screen_array = [[0] * height for _ in range(width)]
# For every pixel of the screen
for x in range(width):
for y in range(height):
# Compute the associated complex number
c = convert_pixel_complex(x, y, re_min, re_max, im_min, im_max)
# Then, compute max_iter element of sequence function with
# c as initial value
z = c
for i in range(max_iter):
z = sequence(z, c)
# If we detect that the sequence diverges
if (z.real * z.real + z.imag * z.imag) > 4:
# We draw a pixel which intensity corresponds to
# the number of iterations we ran before detecting
# the divergence.
color_ratio = int((i * 255.) / max_iter)
gfxdraw.pixel(window, x, y, Color(color_ratio, color_ratio, color_ratio, 255))
screen_array[x][y] = color_ratio
break
else:
# If we did not detect a divergence in max_iter steps,
# we consider that the sequence does not diverge and
# draw a black pixel.
gfxdraw.pixel(window, x, y, Color(0,0,0,255))
pygame.display.flip()
return screen_array
def sec_number_to_indices(w_sec, h_sec):
"""
Converts sections indices into window coordinates.
"""
x_min = w_sec * section_width
x_max = (w_sec + 1) * section_width
y_min = h_sec * section_height
y_max = (h_sec + 1) * section_height
return x_min, x_max, y_min, y_max
def section_intensity(screen_array, weight, w_sec, h_sec):
"""
Computes the weighted average of the pixel intensity after
computing the Mandelbrot set.
"""
x_min, x_max, y_min, y_max = sec_number_to_indices(w_sec, h_sec)
s = sum((weight(screen_array[x][y]) for x in range(x_min, x_max) for y in range(y_min, y_max)))
norm = section_width * section_height
return s / norm
def sections_to_intensities(screen_array, weight = lambda x: x):
"""
Creates a dictionary which associates sections indices to their
weighted average pixel intensity.
"""
sec_to_int = {}
for w_sec in range(nb_sections):
for h_sec in range(nb_sections):
sec_to_int[(w_sec, h_sec)] = section_intensity(screen_array, weight, w_sec, h_sec)
return sec_to_int
def sort_section_intensities(sec_to_int):
"""
Sorts the sections indices according to their intensities in
decreasing order.
"""
return sorted(sec_to_int.keys(), key = sec_to_int.get, reverse = True)
def generate_fractal_sequence(window, sequence = lambda z, c: z**2 + c, seq_len = 8, top_select = 5):
"""
Generates the multiple zoom on the Mandelbrot set. seq_len
pictures will be generated and the zoom will chose amongst the
top_select most intense sections.
"""
tl = complex(-2, 1) # top left complex number
br = complex(1, -1) # bottom right complex number
for i in range(seq_len):
min_re, max_re = tl.real, br.real
min_im, max_im = br.imag, tl.imag
# Experimental formula to have greater max_iter when we zoom
max_iter = 50 + i ** 3 * 16
print('iteration', i + 1)
print('min_re, max_re = ', min_re, ',', max_re)
print('min_im, max_im = ', min_im, ',', max_im)
print('max_iter', max_iter)
# Draw the fractal in the window, divide the result in
# sections and compute their intensities. Chose one of the
# most intense section and update the top left and bottom
# right complex numbers to zoom on this section.
screen_array = draw_mandel(window, sequence, max_iter, min_re, max_re, min_im, max_im)
sec_to_int = sections_to_intensities(screen_array)
w_sec_max, h_sec_max = random.choice(sort_section_intensities(sec_to_int)[:top_select])
x_min, x_max, y_min, y_max = sec_number_to_indices(w_sec_max, h_sec_max)
tl = convert_pixel_complex(x_min, y_min, min_re, max_re, min_im, max_im)
br = convert_pixel_complex(x_max, y_max, min_re, max_re, min_im, max_im)
if __name__ == '__main__':
window = pygame.display.set_mode((width, height))
generate_fractal_sequence(window, seq_len = 6)
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit(0)
|
gpl-3.0
| 5,040,484,455,637,613,000
| 37.648649
| 109
| 0.597203
| false
| 3.481436
| false
| false
| false
|
hsoft/pdfmasher
|
ebooks/compression/palmdoc.py
|
1
|
1892
|
# Copyright 2008, Kovid Goyal <kovid at kovidgoyal.net>
# Copyright 2013 Hardcoded Software (http://www.hardcoded.net)
#
# This software is licensed under the "GPL v3" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at
# http://www.hardcoded.net/licenses/gplv3_license
from struct import pack
def compress_doc(data):
out = bytearray()
i = 0
ldata = len(data)
while i < ldata:
if i > 10 and (ldata - i) > 10:
chunk = b''
match = -1
for j in range(10, 2, -1):
chunk = data[i:i+j]
try:
match = data.rindex(chunk, 0, i)
except ValueError:
continue
if (i - match) <= 2047:
break
match = -1
if match >= 0:
n = len(chunk)
m = i - match
code = 0x8000 + ((m << 3) & 0x3ff8) + (n - 3)
out += pack(b'>H', code)
i += n
continue
och = data[i]
ch = bytes([och])
i += 1
if ch == b' ' and (i + 1) < ldata:
onch = data[i]
if onch >= 0x40 and onch < 0x80:
out += pack(b'>B', onch ^ 0x80)
i += 1
continue
if och == 0 or (och > 8 and och < 0x80):
out += ch
else:
j = i
binseq = [ch]
while j < ldata and len(binseq) < 8:
och = data[j]
ch = bytes([och])
if och == 0 or (och > 8 and och < 0x80):
break
binseq.append(ch)
j += 1
out += pack(b'>B', len(binseq))
out += b''.join(binseq)
i += len(binseq) - 1
return bytes(out)
|
gpl-3.0
| -5,296,342,882,972,843,000
| 31.067797
| 91
| 0.426004
| false
| 3.673786
| false
| false
| false
|
markalansmith/draftmim
|
spider/nbadraft_net/nbadraft_net/spiders/nbadraft.py
|
1
|
2465
|
# -*- coding: utf-8 -*-
import scrapy
import urlparse
from urllib2 import quote
from nbadraft_net import PlayerItem
class NbadraftSpider(scrapy.Spider):
name = "nbadraft"
allowed_domains = ["nbadraft.net"]
start_urls = (
'http://www.nbadraft.net/2016mock_draft',
)
def parse(self, response):
selector = scrapy.Selector(response)
updated_on = selector.xpath('//p[@class="updated"]/text()').extract()[0]
mock_draft_one = selector.xpath('//div[@id="nba_consensus_mock1"]')
mock_draft_two = selector.xpath('//div[@id="nba_consensus_mock2"]')
for mock_draft in [mock_draft_one, mock_draft_two]:
player_rows = mock_draft.xpath('.//table/tbody/tr')
for player_row in player_rows:
player_item = PlayerItem()
player_info = player_row.xpath('./td/text()').extract()
player_name = player_row.xpath('./td/a/text()').extract()[0]
player_page = player_row.xpath('./td/a/@href').extract()[-1]
player_page_url = urlparse.urljoin(response.url, player_page.strip())
player_page_request = scrapy.Request(player_page_url, callback=self.parse_player_page_request)
player_mock_draft_pos = int(player_info[0])
player_height = player_info[2]
player_weight = int(player_info[3])
player_position = player_info[4]
player_school = player_info[5]
player_class = player_info[6]
player_item['name'] = player_name
self.logger.info("PlayerInfo: %s, Player Name: %s, Player Page: %s" % (str(player_info), player_name, str(player_page_request),))
yield player_page_request
def parse_player_page_request(self, response):
selector = scrapy.Selector(response)
player_stats = selector.xpath('//div[@id="nba_player_stats"]')
player_img_src = player_stats.xpath('./img/@src').extract()
player_attribute_scores = selector.xpath('//p[@class="nba_player_attrib_score"]/text()').extract()
player_overall_score = selector.xpath('//p[@class="whitebox"]/text()').extract()
player_notes = selector.xpath('//div[@id="nbap_content_bottom"]/p/text()').extract()
player_videos = selector.xpath('//div[@id="nbap_content_bottom"]/p/iframe/@src').extract()
return
|
apache-2.0
| -2,331,472,478,925,752,000
| 41.5
| 145
| 0.586207
| false
| 3.717949
| false
| false
| false
|
ngageoint/scale
|
scale/recipe/configuration/json/recipe_config_v6.py
|
1
|
8823
|
"""Manages the v6 recipe configuration schema"""
from __future__ import unicode_literals
from jsonschema import validate
from jsonschema.exceptions import ValidationError
from job.configuration.mount import HostMountConfig, VolumeMountConfig
from job.execution.configuration.volume import HOST_TYPE, VOLUME_TYPE
from recipe.configuration.configuration import DEFAULT_PRIORITY, RecipeConfiguration
from recipe.configuration.exceptions import InvalidRecipeConfiguration
SCHEMA_VERSION = '7'
SCHEMA_VERSIONS = ['6', '7']
RECIPE_CONFIG_SCHEMA = {
'type': 'object',
'required': ['output_workspaces'],
'additionalProperties': False,
'properties': {
'version': {
'description': 'Version of the recipe configuration schema',
'type': 'string',
},
'mounts': {
'description': 'Defines volumes to use for the jobs\' mounts',
'type': 'object',
'additionalProperties': {
'$ref': '#/definitions/mount'
},
},
'output_workspaces': {
'description': 'Defines workspaces to use for the jobs\' output files',
'type': 'object',
'required': ['default', 'outputs'],
'additionalProperties': False,
'properties': {
'default': {
'description': 'Defines the jobs\' default output workspace',
'type': 'string',
},
'outputs': {
'description': 'Defines a workspace for each given output name',
'type': 'object',
'additionalProperties': {
'type': 'string',
},
},
},
},
'priority': {
'description': 'Defines the jobs\' priority',
'type': 'integer',
'minimum': 1,
},
'settings': {
'description': 'Defines values to use for the jobs\' settings',
'type': 'object',
'additionalProperties': {
'type': 'string',
},
},
},
'definitions': {
'mount': {
'oneOf': [{
'type': 'object',
'description': 'A configuration for a host mount',
'required': ['type', 'host_path'],
'additionalProperties': False,
'properties': {
'type': {
'enum': ['host'],
},
'host_path': {
'type': 'string',
},
},
}, {
'type': 'object',
'description': 'A configuration for a volume mount',
'required': ['type', 'driver', 'driver_opts'],
'additionalProperties': False,
'properties': {
'type': {
'enum': ['volume'],
},
'driver': {
'type': 'string',
},
'driver_opts': {
'type': 'object',
'additionalProperties': {
'type': 'string',
},
},
},
}],
},
},
}
def convert_config_to_v6_json(config):
"""Returns the v6 recipe configuration JSON for the given configuration
:param config: The recipe configuration
:type config: :class:`recipe.configuration.configuration.RecipeConfiguration`
:returns: The v6 recipe configuration JSON
:rtype: :class:`recipe.configuration.json.recipe_config_v6.RecipeConfigurationV6`
"""
mounts_dict = {}
for mount_config in config.mounts.values():
if mount_config.mount_type == HOST_TYPE:
mounts_dict[mount_config.name] = {'type': 'host', 'host_path': mount_config.host_path}
elif mount_config.mount_type == VOLUME_TYPE:
vol_dict = {'type': 'volume', 'driver_opts': mount_config.driver_opts}
if mount_config.driver:
vol_dict['driver'] = mount_config.driver
mounts_dict[mount_config.name] = vol_dict
workspace_dict = {'outputs': config.output_workspaces}
if config.default_output_workspace:
workspace_dict['default'] = config.default_output_workspace
config_dict = {'version': SCHEMA_VERSION, 'mounts': mounts_dict, 'output_workspaces': workspace_dict,
'priority': config.priority, 'settings': config.settings}
return RecipeConfigurationV6(config=config_dict, do_validate=False)
class RecipeConfigurationV6(object):
"""Represents a v6 recipe configuration JSON"""
def __init__(self, config=None, existing=None, do_validate=False):
"""Creates a v6 job configuration JSON object from the given dictionary
:param config: The recipe configuration JSON dict
:type config: dict
:param existing: Existing RecipeConfiguration to use for default values for unspecified fields
:type existing: RecipeConfigurationV6
:param do_validate: Whether to perform validation on the JSON schema
:type do_validate: bool
:raises :class:`recipe.configuration.exceptions.InvalidRecipeConfiguration`: If the given configuration is invalid
"""
if not config:
config = {}
self._config = config
self._existing_config = None
if existing:
self._existing_config = existing._config
if 'version' not in self._config:
self._config['version'] = SCHEMA_VERSION
if self._config['version'] not in SCHEMA_VERSIONS:
msg = '%s is an unsupported version number'
raise InvalidRecipeConfiguration('INVALID_VERSION', msg % self._config['version'])
self._populate_default_values()
try:
if do_validate:
validate(self._config, RECIPE_CONFIG_SCHEMA)
except ValidationError as ex:
raise InvalidRecipeConfiguration('INVALID_CONFIGURATION', 'Invalid configuration: %s' % unicode(ex))
def get_configuration(self):
"""Returns the recipe configuration represented by this JSON
:returns: The recipe configuration
:rtype: :class:`recipe.configuration.configuration.RecipeConfiguration`:
"""
config = RecipeConfiguration()
for name, mount_dict in self._config['mounts'].items():
if mount_dict['type'] == 'host':
config.add_mount(HostMountConfig(name, mount_dict['host_path']))
elif mount_dict['type'] == 'volume':
config.add_mount(VolumeMountConfig(name, mount_dict['driver'], mount_dict['driver_opts']))
default_workspace = self._config['output_workspaces']['default']
if default_workspace:
config.default_output_workspace = default_workspace
for output, workspace in self._config['output_workspaces']['outputs'].items():
config.add_output_workspace(output, workspace)
config.priority = self._config['priority']
for name, value in self._config['settings'].items():
config.add_setting(name, value)
return config
def get_dict(self):
"""Returns the internal dictionary
:returns: The internal dictionary
:rtype: dict
"""
return self._config
def _populate_default_values(self):
"""Populates any missing required values with defaults
"""
if 'mounts' not in self._config:
self._config['mounts'] = self._existing_config['mounts'] if self._existing_config else {}
for mount_dict in self._config['mounts'].values():
if mount_dict['type'] == 'volume':
if 'driver' not in mount_dict:
mount_dict['driver'] = ''
if 'driver_opts' not in mount_dict:
mount_dict['driver_opts'] = {}
if 'output_workspaces' not in self._config:
self._config['output_workspaces'] = self._existing_config['output_workspaces'] if self._existing_config else {}
if 'default' not in self._config['output_workspaces']:
self._config['output_workspaces']['default'] = ''
if 'outputs' not in self._config['output_workspaces']:
self._config['output_workspaces']['outputs'] = {}
if 'priority' not in self._config:
self._config['priority'] = self._existing_config['priority'] if self._existing_config else DEFAULT_PRIORITY
if 'settings' not in self._config:
self._config['settings'] = self._existing_config['settings'] if self._existing_config else {}
|
apache-2.0
| 5,617,838,189,864,306,000
| 37.030172
| 123
| 0.557067
| false
| 4.7056
| true
| false
| false
|
ekansa/open-context-py
|
opencontext_py/apps/ldata/arachne/api.py
|
1
|
5134
|
import json
import requests
from urllib.parse import urlparse, parse_qs
from opencontext_py.libs.general import LastUpdatedOrderedDict
from opencontext_py.libs.generalapi import GeneralAPI
class ArachneAPI():
""" Interacts with Arachne """
ARACHNE_SEARCH = 'arachne.dainst.org/search'
DEFAULT_API_BASE_URL = 'https://arachne.dainst.org/data/search'
DEFAULT_HTML_BASE_URL = 'https://arachne.dainst.org/search'
DEFAULT_IMAGE_BASE_URL = 'https://arachne.dainst.org/data/image/height/'
DEFAULT_ENTITY_BASE_URL = 'https://arachne.dainst.org/entity/'
DEFAULT_IMAGE_HEIGHT = 120
def __init__(self):
self.arachne_json_r = False
self.arachne_json_url = False
self.arachne_html_url = False
self.filter_by_images = True
self.image_height = self.DEFAULT_IMAGE_HEIGHT
self.result_count = False
self.results = False
def get_keyword_results(self, keyword):
""" sends JSON request, makes list of oc_object entities if
search finds entities
"""
self.get_keyword_search_json(keyword)
self.get_result_metadata()
self.generate_results_list()
return self.results
def get_results_from_search_url(self, search_url):
""" parses a search URL, then makes a lost of
oc_object entities if search finds entities
"""
self.get_json_from_search_url(search_url)
self.get_result_metadata()
self.generate_results_list()
return self.results
def get_result_metadata(self):
""" gets metadata about the search result """
if self.arachne_json_r is not False:
if 'size' in self.arachne_json_r:
self.result_count = self.arachne_json_r['size']
def generate_results_list(self):
""" makes a list of results with full URLs """
if self.arachne_json_r is not False:
if 'entities' in self.arachne_json_r:
self.results = []
for entity in self.arachne_json_r['entities']:
oc_obj = LastUpdatedOrderedDict()
oc_obj['id'] = self.generate_entity_url(entity['entityId'])
oc_obj['slug'] = oc_obj['id']
if 'title' in entity:
oc_obj['label'] = entity['title']
elif 'subtitle' in entity:
oc_obj['label'] = entity['subtitle']
else:
oc_obj['label'] = '[No Arachne Label]'
oc_obj['oc-gen:thumbnail-uri'] = self.generate_thumbnail_image_src(entity['thumbnailId'])
oc_obj['type'] = 'oc-gen:image'
self.results.append(oc_obj)
def generate_entity_url(self, entity_id):
"""
makes a URL for the entity
"""
url = self.DEFAULT_ENTITY_BASE_URL + str(entity_id)
return url
def generate_thumbnail_image_src(self, thumb_id):
"""
makes a URL for the thumbnail image bitmap file
"""
url = self.DEFAULT_IMAGE_BASE_URL + str(thumb_id)
url += '?height=' + str(self.image_height)
return url
def get_json_from_search_url(self, search_url):
""" gets json data from Arachne by first parsing
a search url and then converting that into a
keyword search
"""
self.arachne_html_url = search_url
payload = parse_qs(urlparse(search_url).query)
print('payload: ' + str(payload))
json_r = self.get_arachne_json(payload)
return json_r
def get_keyword_search_json(self, keyword):
"""
gets json data from Arachne in response to a keyword search
"""
payload = {}
payload['q'] = keyword
json_r = self.get_arachne_json(payload)
return json_r
def get_arachne_json(self, payload):
"""
executes a search for json data from arachne
"""
if isinstance(payload, dict):
if self.filter_by_images:
payload['fq'] = 'facet_image:"ja"'
url = self.DEFAULT_API_BASE_URL
try:
gapi = GeneralAPI()
r = requests.get(url,
params=payload,
timeout=240,
headers=gapi.client_headers)
print('r url: ' + r.url)
self.set_arachne_search_urls(r.url)
r.raise_for_status()
json_r = r.json()
except:
json_r = False
else:
json_r = False
self.arachne_json_r = json_r
return json_r
def set_arachne_search_urls(self, arachne_json_url):
""" Sets URLs for Arachne searches, JSON + HTML """
self.arachne_json_url = arachne_json_url
if not isinstance(self.arachne_html_url, str):
self.arachne_html_url = arachne_json_url.replace(self.DEFAULT_API_BASE_URL,
self.DEFAULT_HTML_BASE_URL)
|
gpl-3.0
| 7,220,749,312,685,747,000
| 37.313433
| 109
| 0.553954
| false
| 3.880574
| false
| false
| false
|
tensorflow/graphics
|
tensorflow_graphics/image/transformer.py
|
1
|
7541
|
# Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""This module implements image transformation functionalities."""
import enum
from typing import Optional
from six.moves import range
import tensorflow as tf
from tensorflow_addons import image as tfa_image
from tensorflow_graphics.geometry.representation import grid
from tensorflow_graphics.util import export_api
from tensorflow_graphics.util import shape
from tensorflow_graphics.util import type_alias
class ResamplingType(enum.Enum):
NEAREST = 0
BILINEAR = 1
class BorderType(enum.Enum):
ZERO = 0
DUPLICATE = 1
class PixelType(enum.Enum):
INTEGER = 0
HALF_INTEGER = 1
def sample(image: type_alias.TensorLike,
warp: type_alias.TensorLike,
resampling_type: ResamplingType = ResamplingType.BILINEAR,
border_type: BorderType = BorderType.ZERO,
pixel_type: PixelType = PixelType.HALF_INTEGER,
name: Optional[str] = "sample") -> tf.Tensor:
"""Samples an image at user defined coordinates.
Note:
The warp maps target to source. In the following, A1 to An are optional
batch dimensions.
Args:
image: A tensor of shape `[B, H_i, W_i, C]`, where `B` is the batch size,
`H_i` the height of the image, `W_i` the width of the image, and `C` the
number of channels of the image.
warp: A tensor of shape `[B, A_1, ..., A_n, 2]` containing the x and y
coordinates at which sampling will be performed. The last dimension must
be 2, representing the (x, y) coordinate where x is the index for width
and y is the index for height.
resampling_type: Resampling mode. Supported values are
`ResamplingType.NEAREST` and `ResamplingType.BILINEAR`.
border_type: Border mode. Supported values are `BorderType.ZERO` and
`BorderType.DUPLICATE`.
pixel_type: Pixel mode. Supported values are `PixelType.INTEGER` and
`PixelType.HALF_INTEGER`.
name: A name for this op. Defaults to "sample".
Returns:
Tensor of sampled values from `image`. The output tensor shape
is `[B, A_1, ..., A_n, C]`.
Raises:
ValueError: If `image` has rank != 4. If `warp` has rank < 2 or its last
dimension is not 2. If `image` and `warp` batch dimension does not match.
"""
with tf.name_scope(name):
image = tf.convert_to_tensor(value=image, name="image")
warp = tf.convert_to_tensor(value=warp, name="warp")
shape.check_static(image, tensor_name="image", has_rank=4)
shape.check_static(
warp,
tensor_name="warp",
has_rank_greater_than=1,
has_dim_equals=(-1, 2))
shape.compare_batch_dimensions(
tensors=(image, warp), last_axes=0, broadcast_compatible=False)
if pixel_type == PixelType.HALF_INTEGER:
warp -= 0.5
if resampling_type == ResamplingType.NEAREST:
warp = tf.math.round(warp)
if border_type == BorderType.DUPLICATE:
image_size = tf.cast(tf.shape(input=image)[1:3], dtype=warp.dtype)
height, width = tf.unstack(image_size, axis=-1)
warp_x, warp_y = tf.unstack(warp, axis=-1)
warp_x = tf.clip_by_value(warp_x, 0.0, width - 1.0)
warp_y = tf.clip_by_value(warp_y, 0.0, height - 1.0)
warp = tf.stack((warp_x, warp_y), axis=-1)
return tfa_image.resampler(image, warp)
def perspective_transform(
image: type_alias.TensorLike,
transform_matrix: type_alias.TensorLike,
output_shape: Optional[type_alias.TensorLike] = None,
resampling_type: ResamplingType = ResamplingType.BILINEAR,
border_type: BorderType = BorderType.ZERO,
pixel_type: PixelType = PixelType.HALF_INTEGER,
name: Optional[str] = "perspective_transform",
) -> tf.Tensor:
"""Applies a projective transformation to an image.
The projective transformation is represented by a 3 x 3 matrix
[[a0, a1, a2], [b0, b1, b2], [c0, c1, c2]], mapping a point `[x, y]` to a
transformed point
`[x', y'] = [(a0 x + a1 y + a2) / k, (b0 x + b1 y + b2) / k]`, where
`k = c0 x + c1 y + c2`.
Note:
The transformation matrix maps target to source by transforming output
points to input points.
Args:
image: A tensor of shape `[B, H_i, W_i, C]`, where `B` is the batch size,
`H_i` the height of the image, `W_i` the width of the image, and `C` the
number of channels of the image.
transform_matrix: A tensor of shape `[B, 3, 3]` containing projective
transform matrices. The transformation maps target to source by
transforming output points to input points.
output_shape: The heigh `H_o` and width `W_o` output dimensions after the
transform. If None, output is the same size as input image.
resampling_type: Resampling mode. Supported values are
`ResamplingType.NEAREST` and `ResamplingType.BILINEAR`.
border_type: Border mode. Supported values are `BorderType.ZERO` and
`BorderType.DUPLICATE`.
pixel_type: Pixel mode. Supported values are `PixelType.INTEGER` and
`PixelType.HALF_INTEGER`.
name: A name for this op. Defaults to "perspective_transform".
Returns:
A tensor of shape `[B, H_o, W_o, C]` containing transformed images.
Raises:
ValueError: If `image` has rank != 4. If `transform_matrix` has rank < 3 or
its last two dimensions are not 3. If `image` and `transform_matrix` batch
dimension does not match.
"""
with tf.name_scope(name):
image = tf.convert_to_tensor(value=image, name="image")
transform_matrix = tf.convert_to_tensor(
value=transform_matrix, name="transform_matrix")
output_shape = tf.shape(
input=image)[-3:-1] if output_shape is None else tf.convert_to_tensor(
value=output_shape, name="output_shape")
shape.check_static(image, tensor_name="image", has_rank=4)
shape.check_static(
transform_matrix,
tensor_name="transform_matrix",
has_rank=3,
has_dim_equals=((-1, 3), (-2, 3)))
shape.compare_batch_dimensions(
tensors=(image, transform_matrix),
last_axes=0,
broadcast_compatible=False)
dtype = image.dtype
zero = tf.cast(0.0, dtype)
height, width = tf.unstack(output_shape, axis=-1)
warp = grid.generate(
starts=(zero, zero),
stops=(tf.cast(width, dtype) - 1.0, tf.cast(height, dtype) - 1.0),
nums=(width, height))
warp = tf.transpose(a=warp, perm=[1, 0, 2])
if pixel_type == PixelType.HALF_INTEGER:
warp += 0.5
padding = [[0, 0] for _ in range(warp.shape.ndims)]
padding[-1][-1] = 1
warp = tf.pad(
tensor=warp, paddings=padding, mode="CONSTANT", constant_values=1.0)
warp = warp[..., tf.newaxis]
transform_matrix = transform_matrix[:, tf.newaxis, tf.newaxis, ...]
warp = tf.linalg.matmul(transform_matrix, warp)
warp = warp[..., 0:2, 0] / warp[..., 2, :]
return sample(image, warp, resampling_type, border_type, pixel_type)
# API contains all public functions and classes.
__all__ = export_api.get_functions_and_classes()
|
apache-2.0
| -7,227,354,629,905,629,000
| 36.331683
| 79
| 0.670335
| false
| 3.457588
| false
| false
| false
|
buscarini/meta
|
templates/model/platforms/sql/Platform.py
|
1
|
1689
|
import sys
import os
import json
from meta.MetaProcessor import MetaProcessor
class Platform(MetaProcessor):
"""docstring for Platform"""
def preprocess_property(self,property,hash,hashes):
"""docstring for preprocess_property"""
property['_camelcase_'] = self.stringUtils.camelcase(str(property['name']))
property['_capitalized_'] = self.stringUtils.capitalize(str(property['name']))
if 'default' in property:
property['default'] = self.globalPlatform.platformValueForValue(property['default'])
type = property['type']
property['type_' + type] = True
platformType = self.globalPlatform.platformTypeForType(type)
if platformType!=None:
property['type'] = platformType
else:
print("Error: unknown property type: " + type)
sys.exit()
def preprocess(self,hash,hashes):
if hash!=None and 'properties' in hash:
i=0
properties = hash['properties']
for property in properties:
self.preprocess_property(property,hash,hashes)
i=i+1
self.preprocessList(properties)
if hash!=None and 'primaryKeys' in hash:
self.preprocessList(hash['primaryKeys'])
def finalFileName(self,fileName,hash):
"""docstring for finalFileName"""
entityName = None
if hash!=None and 'entityName' in hash:
entityName = hash['entityName']
if (entityName):
fileName = fileName.replace("entity",entityName)
return fileName
|
mit
| -3,116,892,086,885,498,400
| 32.8
| 96
| 0.58733
| false
| 4.825714
| false
| false
| false
|
chrsbats/kvstore
|
kvstore/fs.py
|
1
|
2352
|
from __future__ import absolute_import
import traceback
import os, errno
import shutil
from .signal import interrupt_protect
class FileSystemAdapter(object):
def __init__(self, path, **kwargs):
# expand ~ or we'll end up creating a /~ directory
# abspath doesn't do this for us
self.path = os.path.abspath(os.path.expanduser(path))
self.make_sure_path_exists(self.path)
def make_sure_path_exists(self, key):
try:
os.makedirs(key)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
def key_path(self, key):
key = key
if key[0] == '/':
key = key[1:]
return os.path.join(self.path, key)
def get(self, key):
full_path = self.key_path(key)
try:
with open(full_path,'r') as f:
return f.read()
except IOError as e:
if e.errno == errno.ENOENT:
raise KeyError('{}: {}'.format(key,str(e)))
raise
@interrupt_protect
def put(self, key, data, **kwargs):
full_path = self.key_path(key)
directory = os.path.dirname(full_path)
self.make_sure_path_exists(directory)
with open(full_path,'w') as f:
f.write(data)
def delete(self, key):
full_path = self.key_path(key)
try:
os.remove(full_path)
except OSError:
# doesn't exist
pass
def exists(self, key):
full_path = self.key_path(key)
if os.path.isfile(full_path):
try:
with open(full_path,'r') as f:
return True
except IOError:
return False
else:
return False
def list(self, key='/'):
full_path = self.key_path(key)
for directory, subdirs, files in os.walk(full_path):
for file in files:
if file[0] == '.':
continue
path = os.path.join(directory, file)
# remove our directory
path = path.split(self.path)[1]
yield path
def drop_all(self):
# delete the directory and then recreate it
shutil.rmtree(self.path, ignore_errors=True)
self.make_sure_path_exists(self.path)
|
mit
| 6,117,030,195,128,649,000
| 28.772152
| 61
| 0.528912
| false
| 4.013652
| false
| false
| false
|
UncleBarney/ochothon
|
images/portal/resources/toolset/toolset/commands/grep.py
|
1
|
2581
|
#
# Copyright (c) 2015 Autodesk Inc.
# All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import json
from toolset.io import fire, run
from toolset.tool import Template
#: Our ochopod logger.
logger = logging.getLogger('ochopod')
def go():
class _Tool(Template):
help = \
'''
Displays high-level information for the specified cluster(s).
'''
tag = 'grep'
def customize(self, parser):
parser.add_argument('clusters', type=str, nargs='+', help='1+ clusters (can be a glob pattern, e.g foo*)')
parser.add_argument('-j', '--json', action='store_true', help='switch for json output')
def body(self, args, proxy):
outs = {}
for token in args.clusters:
def _query(zk):
replies = fire(zk, token, 'info')
return len(replies), [[key, '|', hints['ip'], '|', hints['node'], '|', hints['process'], '|', hints['state']]
for key, (_, hints, code) in sorted(replies.items()) if code == 200]
total, js = run(proxy, _query)
outs.update({item[0]: {'ip': item[2], 'node': item[4], 'process': item[6], 'state': item[8]} for item in js})
if js and not args.json:
#
# - justify & format the whole thing in a nice set of columns
#
pct = (len(js) * 100) / total
logger.info('<%s> -> %d%% replies (%d pods total) ->\n' % (token, pct, len(js)))
rows = [['pod', '|', 'pod IP', '|', 'node', '|', 'process', '|', 'state'], ['', '|', '', '|', '', '|', '', '|', '']] + js
widths = [max(map(len, col)) for col in zip(*rows)]
for row in rows:
logger.info(' '.join((val.ljust(width) for val, width in zip(row, widths))))
if args.json:
logger.info(json.dumps(outs))
return _Tool()
|
apache-2.0
| 3,284,121,562,353,902,000
| 34.356164
| 141
| 0.532352
| false
| 3.989181
| false
| false
| false
|
windworship/kmeansds
|
clustered_ds.py
|
1
|
17562
|
# -*- coding: utf-8 -*-
#
# Author: huang
#
'''
The implementation of the framework of combining kmeans with distant supervision
'''
import argparse
import logging
import time
import random
import collections
from sklearn.cluster import MiniBatchKMeans, Birch
from sklearn.feature_extraction import FeatureHasher
from sklearn.metrics.pairwise import euclidean_distances
NEG_RATIO = 0.05 # the ratio of subsample negatives
SUBSAMPLE = True # Subsample the cluster or not
class MentionDatum(object):
'''
The Class of Mention in the Datum
'''
ENTITY = {} # First entity of the entity pair
TYPE = {} # Type of first entity
NE = {} # Type of second entity
SLOT = {} # Second entity of the entity pair
RELATION = {} # Belonged Relation in DS
FEATURE = {}
FEATURE_APPEARENCE = []
# Class variable of the counts of values
entity_number = 0
type_number = 0
ne_number = 0
slot_number = 0
relation_number = 0
feature_number = 0
# Initialization for @property
_entity_id = None
_entity_type = None
_ne_type = None
_slot_value = None
_relation = []
_features = []
def __init__(self, args):
self.entity_id = args[0]
self.entity_type = args[1]
self.ne_type = args[2]
self.slot_value = args[3]
self.relation = args[4]
self.features = args[5:]
self.relabel_relation = []
@property
def entity_id(self):
return self._entity_id
@property
def entity_type(self):
return self._entity_type
@property
def ne_type(self):
return self._ne_type
@property
def slot_value(self):
return self._slot_value
@property
def relation(self):
return self._relation
@property
def features(self):
return self._features
@entity_id.setter
def entity_id(self, value):
if value not in MentionDatum.ENTITY:
MentionDatum.ENTITY[value] = self.entity_number
MentionDatum.entity_number += 1
self._entity_id = MentionDatum.ENTITY.get(value)
@entity_type.setter
def entity_type(self, value):
if value not in MentionDatum.TYPE:
MentionDatum.TYPE[value] = self.type_number
MentionDatum.type_number += 1
self._entity_type = MentionDatum.TYPE.get(value)
@ne_type.setter
def ne_type(self, value):
if value not in MentionDatum.NE:
MentionDatum.NE[value] = self.ne_number
MentionDatum.ne_number += 1
self._ne_type = MentionDatum.NE.get(value)
@slot_value.setter
def slot_value(self, value):
if value not in MentionDatum.SLOT:
MentionDatum.SLOT[value] = self.slot_number
MentionDatum.slot_number += 1
self._slot_value = MentionDatum.SLOT.get(value)
@relation.setter
def relation(self, value):
value = value.split('|')
reform_relation = []
for rel in value:
if rel not in MentionDatum.RELATION:
MentionDatum.RELATION[rel] = self.relation_number
MentionDatum.relation_number += 1
reform_relation.append(MentionDatum.RELATION.get(rel))
self._relation = reform_relation
@features.setter
def features(self, value):
reform_feature = []
for feature in value:
if feature not in MentionDatum.FEATURE:
MentionDatum.FEATURE[feature] = self.feature_number
MentionDatum.feature_number += 1
MentionDatum.FEATURE_APPEARENCE.append(0)
feature_index = MentionDatum.FEATURE.get(feature)
MentionDatum.FEATURE_APPEARENCE[feature_index] += 1
reform_feature.append(feature_index)
self._features = reform_feature
def __str__(self):
relation = self.relation if not self.relabel_relation else self.relabel_relation
mention_str =\
(
'{0} {1} {2} {3} {4} {5}'
).format(
MentionDatum.ENTITY.get(self.entity_id),
MentionDatum.TYPE.get(self.entity_type),
MentionDatum.NE.get(self.ne_type),
MentionDatum.SLOT.get(self.slot_value),
'|'.join([MentionDatum.RELATION.get(rel) for rel in relation]),
' '.join([MentionDatum.FEATURE.get(fea) for fea in self.features]),
)
return mention_str
@classmethod
def shrink_features(cls, threshold=5):
'''
Shrink the features whose appearence is less than the setting threshold.
'''
shrinked_index = 0
shrinked_feature = {}
cls.FEATURE_INDEX = {} # Regenerate index for shrinked feature space
for fea, index in cls.FEATURE.iteritems():
if cls.FEATURE_APPEARENCE[index] >= threshold:
shrinked_feature[fea] = index
cls.FEATURE_INDEX[index] = shrinked_index
shrinked_index += 1
shrinked_feature_number = cls.feature_number - shrinked_index
cls.feature_number = shrinked_index
cls.FEATURE_APPEARENCE = None
logging.info('[OK]...Feature Shrinking')
logging.info('---# of shrinked Features: {0}'.format(shrinked_feature_number))
def _feature_vector_generation(self):
'''
Generate the feature vector in the shrinked feature space.
'''
return dict(
[
(str(MentionDatum.FEATURE_INDEX[index]), 1)
for index in self.features
if index in MentionDatum.FEATURE_INDEX
]
)
@classmethod
def regenerate_feature(cls, mentions):
'''
Generate feature vectors for all relation mentions
'''
return [mention._feature_vector_generation() for mention in mentions]
@classmethod
def transpose_values(cls):
'''
Transpose all value dicts for the generation of datum files.
'''
cls.ENTITY = dict(
zip(cls.ENTITY.values(), cls.ENTITY.keys())
)
cls.TYPE = dict(zip(cls.TYPE.values(), cls.TYPE.keys()))
cls.NE = dict(zip(cls.NE.values(), cls.NE.keys()))
cls.SLOT = dict(zip(cls.SLOT.values(), cls.SLOT.keys()))
cls.RELATION = dict(
zip(cls.RELATION.values(), cls.RELATION.keys())
)
cls.FEATURE = dict(
zip(cls.FEATURE.values(), cls.FEATURE.keys())
)
def _subsample_negatives(mention):
'''
Subsample negatives from mention.
:type mention: MentionDatum
:rtype boolean
'''
nr = MentionDatum.RELATION.get('_NR', None)
if nr is not None\
and [nr] == mention.relation\
and random.uniform(0, 1) > NEG_RATIO:
return False
return True
def _read_datum_file(file_path):
'''
Load the datum from the datum file
:type file_path: basestring
:type neg_ratio: double in [0,1]
:rtype List[MentionDatum]
'''
mentions = []
with open(file_path) as f:
for line in f:
mention = MentionDatum(line.split())
if not _subsample_negatives(mention):
continue
mentions.append(mention)
logging.debug(
'---[OK]...Datum File {0} Loaded | {1} Mentions Loaded'.format(
file_path,
len(mentions),
)
)
return mentions
def datums_read(directory, number=88):
'''
Load datums from NUMBER of datum files in the DIRECTORY
:type directory: basestring
:type number: int in [0, # of datum file in the DIRECTORY]
:rtype List[MentionDatum]
'''
def _generate_file_path(index, generate_mode='{0}/kb_part-00{1:0>2d}.datums'):
'''
Generate the file path in the directory
'''
return generate_mode.format(directory, index)
start = time.clock()
loaded_mentions = []
for datum_number in xrange(number):
loaded_mentions += _read_datum_file(_generate_file_path(datum_number+1))
time_cost = time.clock() - start
logging.info(
(
'[OK]...All Datums Loaded\n'
'---Cost Time: {0} | Average Per File: {1}\n'
'---# of Loaded Mentions: {2}\n'
'---# of Loaded Entities: {3}\n'
'---# of Loaded Entity Types: {4}\n'
'---# of Loaded NE Types: {5}\n'
'---# of Loaded Slots: {6}\n'
'---# of Loaded Relations: {7}\n'
'---# of Loaded Features: {8}\n'
).format(
time_cost,
time_cost/number,
len(loaded_mentions),
MentionDatum.entity_number,
MentionDatum.type_number,
MentionDatum.ne_number,
MentionDatum.slot_number,
MentionDatum.relation_number,
MentionDatum.feature_number,
)
)
return loaded_mentions
def _generate_feature_space(mentions):
'''
Generate the features space.
---------------------------------
:type mentions: List[MentionDatum]
:rtype: numpy.ndarray
'''
start = time.clock()
# Shrink the features
MentionDatum.shrink_features(threshold=5)
# Regenerate feature vectors
feature_space = MentionDatum.regenerate_feature(mentions)
# Generate feature space
feature_space =\
FeatureHasher(
n_features=MentionDatum.feature_number
).transform(feature_space)
time_cost = time.clock() - start
logging.info('[OK]...Generate Feature Space in {0}s'.format(time_cost))
return feature_space
def _minibatchkmeans(feature_space, cluster_number):
'''
Use MinibatchKkmeans to divide the feature_space into cluster_number bags.
-------------------------------------------------------------------------
:type feature_space: numpy.ndarray
:type cluster_number: int
:rtype: numpy.ndarray[n_mentions,] labels of the mentions
'''
start = time.clock()
model =\
MiniBatchKMeans(
n_clusters=cluster_number,
n_init=22,
batch_size=5700
)
predicts = model.fit_predict(feature_space)
logging.info('[OK]...Kmeans Clustering | Cost {0}s'.format(time.clock()-start))
return predicts
def _predict_to_cluster(predicts, mentions):
'''
Transform predicts to clusters.
-------------------------------
:type predicts: numpy.ndarray[n_samples,]
:type mentions: List[MentionDatum]
:rtype: List[[int,]]
'''
cluster_number = len(set(predicts))
clusters = [[] for size in xrange(cluster_number)]
for index, predict in enumerate(predicts):
clusters[predict]+=mentions[index].relation
logging.info('------[OK]...Labels Transform To Clusters')
return clusters
def _assign_cluster_relation(predicts, mentions):
'''
Assign each cluster the most similar relation according to the assumption.
--------------------------------------------------------------------------
:type predicts: numpy.ndarray[n_samples,]
:type mentions: List[MentionDatum]
:rtype: List[(int, double)]
'''
start = time.clock()
relation_for_clusters = []
# Predicts -> clusters
clusters = _predict_to_cluster(predicts, mentions)
for cluster in clusters:
relation_counter = collections.Counter(cluster)
logging.info('---Cluster assign: {0}'.format(relation_counter))
assign_relation = relation_counter.most_common(1)[0]
relation_for_clusters.append(
(
assign_relation[0],
(assign_relation[1]+0.0)/len(cluster),
)
)
time_cost = time.clock() - start
logging.info('---[OK]...Assign cluster relations cost of {0}'.format(time_cost))
return relation_for_clusters
def _subsample_mention(predicts, clusters, mentions):
'''
Subsample mentions in a cluster based on the probability of the relation.
-------------------------------------------------------------------------
:type predicts: numpy.ndarray[n_samples,]
:type clusters: List[(int, double)]
:type mentions: List[MentionDatum]
:rtype: None
'''
start = time.clock()
subsample_number = 0
for index, predict in enumerate(predicts):
relation, probability = clusters[predict]
if not SUBSAMPLE or random.random() < probability:
mentions[index].relabel_relation.append(relation)
subsample_number += 1
time_cost = time.clock() - start
logging.info('---[OK]...Subsample mentions cost of {0}'.format(time_cost))
logging.info('------# of subsamples: {0}'.format(subsample_number))
def kmeans_predict(mentions, cluster_number=100):
'''
The framework predicts labels of mentions as following:
1. Generate the feature space
2. Kmeans divides the feature space into k clusters
3. Reassign each cluster a relation based on DS
4. Subsample mentions in the cluster to be labeled with corresponding relation
NOTE: Usually k is much higher than the # of known relations.
---------------------------------------------------
:type mentions:List[DatumMention]
:type cluster_number:int
:rtype None
'''
start = time.clock()
feature_space = _generate_feature_space(mentions)
predicts = _minibatchkmeans(feature_space, cluster_number)
relation_for_clusters = _assign_cluster_relation(predicts, mentions)
_generate_cluster(predicts, relation_for_clusters, mentions)
_subsample_mention(predicts, relation_for_clusters, mentions)
logging.info('[OK]...Framework | Cost {0}s'.format(time.clock()-start))
def regenerate_datums(mentions, filepath):
'''
Regenerate datums with the new relation
-------------------------------------------------
:type mentions: List[MentionDatum]
:type filepath: basestring
:rtype: None
'''
start = time.clock()
file_number = len(mentions) / 90000 + 1
negative_number = 0
nr = MentionDatum.RELATION.get('_NR')
#transpose values
MentionDatum.transpose_values()
for index in xrange(file_number):
with open(filepath + '/{0:0>2d}.datums'.format(index), 'w') as f:
for mention in mentions[index*90000:(index+1)*90000]:
if nr in mention.relabel_relation:
negative_number += 1
f.write(str(mention))
f.write('\n')
logging.debug('---[OK]...Generate {0:0>2d}.datums'.format(index))
spend = time.clock() - start
logging.info('[OK]...Generate {0} Datums File'.format(file_number))
logging.info('[OK]...Negative number: {0}'.format(negative_number))
logging.info('---Cost time: {0} | Average per file: {1}'.format(spend, spend/file_number))
def _generate_cluster(predicts, clusters, mentions):
'''
Generate clusters from predicts.
=======================================
:type predicts: numpy.ndarray[n_samples,]
:type clusters: List[(int, double)]
:type mentions: List[MentionDatum]
:rtype: None
'''
entity_index = dict(
zip(MentionDatum.ENTITY.values(), MentionDatum.ENTITY.keys())
)
slot_index = dict(
zip(MentionDatum.SLOT.values(), MentionDatum.SLOT.keys())
)
relation_index = dict(
zip(MentionDatum.RELATION.values(), MentionDatum.RELATION.keys())
)
cluster_results = [[] for index in xrange(100)]
for index, predict in enumerate(predicts):
relation, probability = clusters[predict]
cluster_results[predict].append(
(
entity_index[mentions[index].entity_id],
slot_index[mentions[index].slot_value],
relation_index[mentions[index].relation[0]],
relation_index[relation],
)
)
for index, cluster_result in enumerate(cluster_results):
with open('result/'+str(index), 'w') as f:
f.write('\n'.join([str(result) for result in cluster_result]))
with open('result/index', 'w') as f:
f.write('\n'.join([str(index) for index in sorted(enumerate(clusters), key=lambda x:x[1][1])]))
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
logging.info('METHOD START')
parser = argparse.ArgumentParser()
parser.add_argument('-n', type=int)
parser.add_argument('-r', type=float)
parser.add_argument('-o', type=str)
parser.add_argument('-d', type=str)
parser.add_argument('-s', type=bool)
parser.add_argument('-f', type=int)
args = parser.parse_args()
start = time.clock()
cluster_number = args.n
NEG_RATIO = (args.r + 0.0) / 100
SUBSAMPLE = True if args.s else False
logging.info('CLUSTER NUMBER:{0}'.format(cluster_number))
logging.info('NEG_RATIO:{0}'.format(NEG_RATIO))
logging.info('OUTPUT_DIR:{0}'.format(args.o))
logging.info('DATA_DIR:{0}'.format(args.d))
logging.info('SUBSAMPLE:{0}'.format(SUBSAMPLE))
mentions = datums_read(args.d, number=args.f)
kmeans_predict(mentions, cluster_number)
regenerate_datums(
mentions,
args.o,
)
logging.info('Method End With {0}s'.format(time.clock()-start))
|
mit
| -1,580,487,062,954,818,800
| 31.643123
| 103
| 0.581597
| false
| 3.894013
| false
| false
| false
|
SwordYork/sequencing
|
sequencing_np/nn/rnn_cells/rnn.py
|
1
|
2729
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Author: Sword York
# GitHub: https://github.com/SwordYork/sequencing
# No rights reserved.
#
from abc import ABCMeta, abstractmethod
from ..base import Layer
from ... import np, TIME_MAJOR
class RNN(Layer, metaclass=ABCMeta):
def __init__(self, init_state, param_keys, activation=None,
base_name=None, name=None, *args, **kwargs):
"""
numpy rnn cell.
It only used for inferring, not training, thus we don't need initialization
in this implementation.
The weights and other things are passed by params.
:param init_state: initial states of RNN, [B, H] or tuple([B, H], ...)
:param param_keys: name of params, such as kernel and bias
:param activation: activation function
:param base_name: name of parent Layer
:param name: name of this Layer
"""
super(RNN, self).__init__(param_keys, base_name, name, **kwargs)
# get state size
if type(init_state) != type(np.empty([])):
self.init_state = tuple(init_state)
self.hidden_units = tuple(init_state)[0].shape[1]
else:
self.init_state = init_state
self.hidden_units = init_state.shape[1]
self.time_major = TIME_MAJOR
self.activation = activation or np.tanh
def encode(self, inputs, sequence_length=None, reverse=False):
"""
Encode multi-step inputs.
:param inputs: if time_major [T, B, ...] else [B, T, ...]
:param sequence_length: length of the sequence [B]
:param reverse: used in bidirectional RNN
:return: lstm outputs
"""
if not self.time_major:
inputs = np.transpose(inputs, (1, 0, 2))
steps = inputs.shape[0]
outputs = np.zeros(inputs.shape[:-1] + (self.hidden_units,),
inputs.dtype)
state = self.init_state
iter_range = reversed(range(steps)) if reverse else range(steps)
for idx in iter_range:
# rnn step
curr_input = inputs[idx, :, :]
mask = idx < sequence_length if sequence_length is not None else None
outputs[idx, :, :], state = self.step(state, curr_input, mask)
if not self.time_major:
outputs = np.transpose(outputs, (1, 0, 2))
return outputs, state
@abstractmethod
def step(self, prev_states, input_, mask=None):
"""
run rnn for one step
:param prev_states: [B, ...]
:param input_: [B, ...]
:param mask: mask the terminated sequence in the batch
:return: output, state
"""
raise NotImplementedError
|
mit
| -3,731,958,042,420,923,400
| 33.544304
| 83
| 0.5797
| false
| 3.955072
| false
| false
| false
|
olemb/mido
|
tests/test_syx.py
|
1
|
1299
|
from pytest import raises
from mido.messages import Message
from mido.syx import read_syx_file, write_syx_file
def test_read(tmpdir):
path = tmpdir.join("test.syx").strpath
msg = Message('sysex', data=(1, 2, 3))
with open(path, 'wb') as outfile:
outfile.write(msg.bin())
assert read_syx_file(path) == [msg]
with open(path, 'wt') as outfile:
outfile.write(msg.hex())
assert read_syx_file(path) == [msg]
with open(path, 'wt') as outfile:
outfile.write('NOT HEX')
with raises(ValueError):
read_syx_file(path)
def test_handle_any_whitespace(tmpdir):
path = tmpdir.join("test.syx").strpath
with open(path, 'wt') as outfile:
outfile.write('F0 01 02 \t F7\n F0 03 04 F7\n')
assert read_syx_file(path) == [Message('sysex', data=[1, 2]),
Message('sysex', data=[3, 4])]
def test_write(tmpdir):
# p = tmpdir.mkdir("sub").join("hello.txt")
path = tmpdir.join("test.syx").strpath
msg = Message('sysex', data=(1, 2, 3))
write_syx_file(path, [msg])
with open(path, 'rb') as infile:
assert infile.read() == msg.bin()
write_syx_file(path, [msg], plaintext=True)
with open(path, 'rt') as infile:
assert infile.read().strip() == msg.hex()
|
mit
| 8,884,796,596,589,155,000
| 27.23913
| 65
| 0.595843
| false
| 3.137681
| true
| false
| false
|
leanix/leanix-sdk-python
|
src/leanix/models/ProjectHasResource.py
|
1
|
1781
|
#!/usr/bin/env python
"""
The MIT License (MIT)
Copyright (c) 2017 LeanIX GmbH
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually.
"""
class ProjectHasResource:
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
self.swaggerTypes = {
'ID': 'str',
'projectID': 'str',
'resourceID': 'str',
'comment': 'str',
'projectImpactID': 'str'
}
self.ID = None # str
self.projectID = None # str
self.resourceID = None # str
self.comment = None # str
self.projectImpactID = None # str
|
mit
| 5,855,163,189,732,606,000
| 37.717391
| 105
| 0.705783
| false
| 4.441397
| false
| false
| false
|
stormi/tsunami
|
src/primaires/vehicule/vecteur.py
|
1
|
8948
|
# -*-coding:Utf-8 -*
# Copyright (c) 2010 DAVY Guillaume
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant la classe Vecteur, détaillée plus bas."""
from math import sqrt, cos, sin, radians, degrees, atan, pi
from abstraits.obase import *
from primaires.salle.coordonnees import Coordonnees
# Constantes
NPRECISION = 5
class Vecteur(BaseObj):
"""Classe représentant un vecteur en trois dimensions.
Elle gère les opérations usuelles dessus, ainsi que leur rotation
autour d'un axe du repère.
"""
def __init__(self, x=0, y=0, z=0, parent=None):
"""Constructeur du vecteur"""
BaseObj.__init__(self)
self.parent = parent
self._x = x
self._y = y
self._z = z
self._construire()
def __getnewargs__(self):
return ()
def __str__(self):
"""Affiche le vecteur plus proprement"""
return "({}, {}, {})".format(self.x, self.y, self.z)
def __repr__(self):
"""Affichage des coordonnées dans un cas de debug"""
return "Vecteur(x={}, y={}, z={})".format(self.x, self.y, self.z)
@property
def coordonnees(self):
return Coordonnees(self.x, self.y, self.z)
@property
def tuple(self):
"""Retourne le tuple (x, y, z)"""
return (self.x, self.y, self.z)
@property
def norme(self):
return sqrt(self.x ** 2 + self.y ** 2 + self.z ** 2)
@property
def direction(self):
"""Retourne un angle en degré représentant la direction.
0 => est
45 => sud-est
90 => sud
135 => sud-ouest
180 => ouest
225 => nord-ouest
270 => nord
315 => nord-est
"""
return -self.argument() % 360
@property
def inclinaison(self):
"""Retourne l'angle d'inclinaison en degré."""
x, y, z = self.x, self.y, self.z
n = sqrt(x ** 2 + y ** 2)
if n == 0:
if z == 0:
return 0
else:
return 90
return degrees(atan(z/n))
@property
def nom_direction(self):
"""Retourne le nom de la direction.
0 => "est"
45 => "sud-est"
...
"""
direction = self.direction
if direction < 22.5:
return "est"
elif direction < 67.5:
return "sud-est"
elif direction < 112.5:
return "sud"
elif direction < 157.5:
return "sud-ouest"
elif direction < 202.5:
return "ouest"
elif direction < 247.5:
return "nord-ouest"
elif direction < 292.5:
return "nord"
elif direction < 337.5:
return "nord-est"
else:
return "est"
def _get_x(self):
return self._x
def _set_x(self, x):
self._x = round(x, NPRECISION)
x = property(_get_x, _set_x)
def _get_y(self):
return self._y
def _set_y(self, y):
self._y = round(y, NPRECISION)
y = property(_get_y, _set_y)
def _get_z(self):
return self._z
def _set_z(self, z):
self._z = round(z, NPRECISION)
z = property(_get_z, _set_z)
def copier(self):
"""Retourne une copie de self"""
return Vecteur(self.x, self.y, self.z, self.parent)
def est_nul(self, arrondi=3):
"""Retourne True si le vcteur est considéré comme nul."""
x = round(self._x, arrondi)
y = round(self._y, arrondi)
z = round(self._z, arrondi)
return x == 0 and y == 0 and z == 0
def tourner_autour_x(self, angle):
"""Tourne autour de l'âxe X.
L'angle doit être en degré.
"""
r = radians(angle)
x, y, z = self.x, self.y, self.z
self.x = x * 1 + y * 0 + z * 0
self.y = x * 0 + y * cos(r) - z * sin(r)
self.z = x * 0 + y * sin(r) + z * cos(r)
return self
def tourner_autour_y(self, angle):
"""Tourne autour de l'âxe Y.
L'angle doit être en degré.
"""
r = radians(angle)
x, y, z = self.x, self.y, self.z
self.x = x * cos(r) - y * 0 + z * sin(r)
self.y = x * 0 + y * 1 + z * 0
self.z = x * sin(r) + y * 0 + z * cos(r)
return self
def tourner_autour_z(self, angle):
"""Tourne autour de l'âxe Z.
L'angle doit être en degré.
"""
r = radians(angle)
x, y, z = self.x, self.y, self.z
self.x = x * cos(r) - -1 * y * sin(r) + z * 0
self.y = -1 * x * sin(r) + y * cos(r) + z * 0
self.z = x * 0 + y * 0 + z * 1
return self
def incliner(self, angle):
"""Incline le véhicule.
L'angle doit être en degré.
"""
r = radians(angle)
x, y, z = self.x, self.y, self.z
n = sqrt(x * x + y * y)
if n == 0:
if z == 0 or sin(r) == 0 or (x == 0 and y == 0):
self.x = 0
self.y = 0
self.z = z * cos(r)
else:
raise ValueError("impossible d'incliner un vecteur vertical")
else:
self.x = x * cos(r) - z * x * sin(r) / n
self.y = y * cos(r) - z * y * sin(r) / n
self.z = z * cos(r) + sin(r) * n
return self
def argument(self):
x, y = self.x, self.y
if x > 0:
return degrees(atan(y / x)) % 360
elif x < 0:
return (180 + degrees(atan(y / x))) % 360
elif y > 0:
return 90
elif y < 0:
return -90
else:
return 0
def normalise(self):
norme = self.norme
if norme == 0:
raise ValueError("impossible de normaliser nul")
return Vecteur(self.x / norme, self.y / norme, self.z / norme)
def orienter(self, angle):
"""Oriente le vecteur horizontalement.
L'angle doit être indiqué en degré.
A la différence de tourner_autour_z, l'angle précisé est absolu.
Après l'appelle à la méthode vecteur.orienter(180) par exemple,
vecteur.direction doit être 180.
"""
direction = self.direction
angle -= direction
self.tourner_autour_z(angle)
# Méthodes spéciales mathématiques
def __neg__(self):
"""Retourne le vecteur négatif."""
return Vecteur(-self.x, -self.y, -self.z)
def __add__(self, autre):
"""Additionne deux vecteurs."""
return Vecteur(self.x + autre.x, self.y + autre.y, self.z + autre.z)
def __sub__(self, autre):
"""Soustrait deux vecteurs."""
return Vecteur(self.x - autre.x, self.y - autre.y, self.z - autre.z)
def __mul__(self, valeur):
"""Multiplie le vecteur par un nombre."""
return Vecteur(self.x * valeur, self.y * valeur, self.z * valeur)
def __rmul__(self, valeur):
"""Multiplie le vecteur par un nombre."""
return Vecteur(self.x * valeur, self.y * valeur, self.z * valeur)
def __eq__(self, autre):
return self.x == autre.x and self.y == autre.y and self.z == autre.z
def __hash__(self):
return hash(self.tuple)
# Fonctions du module (à utiliser pour l'optimisation)
def get_direction(vecteur):
"""Retourne la direction en degrés du vecteur."""
argument = (-vecteur.argument) % (pi * 2)
return degrees(argument) % 360
|
bsd-3-clause
| 3,360,314,228,164,489,000
| 28.892617
| 79
| 0.554558
| false
| 3.272594
| false
| false
| false
|
musically-ut/python-glob2
|
setup.py
|
2
|
1227
|
#!/usr/bin/env python
import os
from setuptools import setup, find_packages
# Figure out the version
import re
here = os.path.dirname(os.path.abspath(__file__))
version_re = re.compile(
r'__version__ = (\(.*?\))')
fp = open(os.path.join(here, 'glob2', '__init__.py'))
version = None
for line in fp:
match = version_re.search(line)
if match:
version = eval(match.group(1))
break
else:
raise Exception("Cannot find version in __init__.py")
fp.close()
setup(
name = 'glob2',
version = ".".join(map(str, version)),
description = 'Version of the glob module that can capture patterns '+
'and supports recursive wildcards',
author = 'Michael Elsdoerfer',
author_email = 'michael@elsdoerfer.com',
license='BSD',
url = 'http://github.com/miracle2k/python-glob2/',
classifiers = [
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Topic :: Software Development :: Libraries',
],
packages = find_packages()
)
|
bsd-2-clause
| -5,408,287,253,185,717,000
| 28.214286
| 74
| 0.611247
| false
| 3.858491
| false
| false
| false
|
sammyshj/nyx
|
nyx/panel/header.py
|
1
|
16295
|
# Copyright 2009-2016, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
Top panel for every page, containing basic system and tor related information.
This expands the information it presents to two columns if there's room
available.
"""
import os
import time
import stem
import stem.control
import stem.util.proc
import stem.util.str_tools
import stem.util.system
import nyx.controller
import nyx.curses
import nyx.panel
import nyx.popups
import nyx.tracker
from stem.util import conf, log
from nyx import msg, tor_controller
from nyx.curses import RED, GREEN, YELLOW, CYAN, WHITE, BOLD, HIGHLIGHT
MIN_DUAL_COL_WIDTH = 141 # minimum width where we'll show two columns
SHOW_FD_THRESHOLD = 60 # show file descriptor usage if usage is over this percentage
UPDATE_RATE = 5 # rate in seconds at which we refresh
CONFIG = conf.config_dict('nyx', {
'attr.flag_colors': {},
'attr.version_status_colors': {},
'tor.chroot': '',
})
class HeaderPanel(nyx.panel.DaemonPanel):
"""
Top area containing tor settings and system information.
"""
def __init__(self):
nyx.panel.DaemonPanel.__init__(self, UPDATE_RATE)
self._vals = Sampling.create()
self._last_width = nyx.curses.screen_size().width
self._reported_inactive = False
self._message = None
self._message_attr = []
tor_controller().add_status_listener(self.reset_listener)
def show_message(self, message = None, *attr, **kwargs):
"""
Sets the message displayed at the bottom of the header. If not called with
anything it clears the override.
:param str message: message to be displayed
:param list attr: text attributes to apply
:param int max_wait: seconds to wait for user input, no limit if **None**
:returns: :class:`~nyx.curses.KeyInput` user pressed if provided a
**max_wait**, **None** otherwise or if prompt was canceled
"""
self._message = message
self._message_attr = attr
self.redraw()
if 'max_wait' in kwargs:
user_input = nyx.curses.key_input(kwargs['max_wait'])
self.show_message() # clear override
return user_input
def is_wide(self):
"""
True if we should show two columns of information, False otherwise.
"""
return self._last_width >= MIN_DUAL_COL_WIDTH
def get_height(self):
"""
Provides the height of the content, which is dynamically determined by the
panel's maximum width.
"""
max_height = nyx.panel.DaemonPanel.get_height(self)
if self._vals.is_relay:
return min(max_height, 5 if self.is_wide() else 7)
else:
return min(max_height, 4 if self.is_wide() else 5)
def send_newnym(self):
"""
Requests a new identity and provides a visual queue.
"""
controller = tor_controller()
if not controller.is_newnym_available():
return
controller.signal(stem.Signal.NEWNYM)
# If we're wide then the newnym label in this panel will give an
# indication that the signal was sent. Otherwise use a msg.
if not self.is_wide():
self.show_message('Requesting a new identity', HIGHLIGHT, max_wait = 1)
def key_handlers(self):
def _reconnect():
if self._vals.is_connected:
return
controller = tor_controller()
self.show_message('Reconnecting...', HIGHLIGHT)
try:
try:
controller.reconnect(chroot_path = CONFIG['tor.chroot'])
except stem.connection.MissingPassword:
password = nyx.controller.input_prompt('Controller Password: ')
if password:
controller.authenticate(password)
log.notice("Reconnected to Tor's control port")
self.show_message('Tor reconnected', HIGHLIGHT, max_wait = 1)
except Exception as exc:
self.show_message('Unable to reconnect (%s)' % exc, HIGHLIGHT, max_wait = 3)
controller.close()
return (
nyx.panel.KeyHandler('n', action = self.send_newnym),
nyx.panel.KeyHandler('r', action = _reconnect),
)
def _draw(self, subwindow):
vals = self._vals # local reference to avoid concurrency concerns
self._last_width = subwindow.width
is_wide = self.is_wide()
# space available for content
nyx_controller = nyx.controller.get_controller()
left_width = max(subwindow.width / 2, 77) if is_wide else subwindow.width
right_width = subwindow.width - left_width
pause_time = nyx_controller.get_pause_time() if nyx_controller.is_paused() else None
_draw_platform_section(subwindow, 0, 0, left_width, vals)
if vals.is_connected:
_draw_ports_section(subwindow, 0, 1, left_width, vals)
else:
_draw_disconnected(subwindow, 0, 1, vals.last_heartbeat)
if is_wide:
_draw_resource_usage(subwindow, left_width, 0, right_width, vals, pause_time)
if vals.is_relay:
_draw_fingerprint_and_fd_usage(subwindow, left_width, 1, right_width, vals)
_draw_flags(subwindow, 0, 2, vals.flags)
_draw_exit_policy(subwindow, left_width, 2, vals.exit_policy)
elif vals.is_connected:
_draw_newnym_option(subwindow, left_width, 1, vals.newnym_wait)
else:
_draw_resource_usage(subwindow, 0, 2, left_width, vals, pause_time)
if vals.is_relay:
_draw_fingerprint_and_fd_usage(subwindow, 0, 3, left_width, vals)
_draw_flags(subwindow, 0, 4, vals.flags)
_draw_status(subwindow, 0, self.get_height() - 1, nyx_controller.is_paused(), self._message, *self._message_attr)
def reset_listener(self, controller, event_type, _):
self._update()
if event_type == stem.control.State.CLOSED:
log.notice('Tor control port closed')
def _update(self):
self._vals = Sampling.create(self._vals)
if self._vals.fd_used and self._vals.fd_limit != -1:
fd_percent = 100 * self._vals.fd_used / self._vals.fd_limit
if fd_percent >= 90:
log_msg = msg('panel.header.fd_used_at_ninety_percent', percentage = fd_percent)
log.log_once('fd_used_at_ninety_percent', log.WARN, log_msg)
log.DEDUPLICATION_MESSAGE_IDS.add('fd_used_at_sixty_percent')
elif fd_percent >= 60:
log_msg = msg('panel.header.fd_used_at_sixty_percent', percentage = fd_percent)
log.log_once('fd_used_at_sixty_percent', log.NOTICE, log_msg)
if self._vals.is_connected:
if not self._reported_inactive and (time.time() - self._vals.last_heartbeat) >= 10:
self._reported_inactive = True
log.notice('Relay unresponsive (last heartbeat: %s)' % time.ctime(self._vals.last_heartbeat))
elif self._reported_inactive and (time.time() - self._vals.last_heartbeat) < 10:
self._reported_inactive = False
log.notice('Relay resumed')
self.redraw()
class Sampling(object):
def __init__(self, **attr):
self._attr = attr
for key, value in attr.items():
setattr(self, key, value)
@staticmethod
def create(last_sampling = None):
controller = tor_controller()
retrieved = time.time()
pid = controller.get_pid('')
tor_resources = nyx.tracker.get_resource_tracker().get_value()
nyx_total_cpu_time = sum(os.times()[:3], stem.util.system.SYSTEM_CALL_TIME)
or_listeners = controller.get_listeners(stem.control.Listener.OR, [])
control_listeners = controller.get_listeners(stem.control.Listener.CONTROL, [])
if controller.get_conf('HashedControlPassword', None):
auth_type = 'password'
elif controller.get_conf('CookieAuthentication', None) == '1':
auth_type = 'cookie'
else:
auth_type = 'open'
try:
fd_used = stem.util.proc.file_descriptors_used(pid)
except IOError:
fd_used = None
if last_sampling:
nyx_cpu_delta = nyx_total_cpu_time - last_sampling.nyx_total_cpu_time
nyx_time_delta = retrieved - last_sampling.retrieved
nyx_cpu = nyx_cpu_delta / nyx_time_delta
else:
nyx_cpu = 0.0
attr = {
'retrieved': retrieved,
'is_connected': controller.is_alive(),
'connection_time': controller.connection_time(),
'last_heartbeat': controller.get_latest_heartbeat(),
'fingerprint': controller.get_info('fingerprint', 'Unknown'),
'nickname': controller.get_conf('Nickname', ''),
'newnym_wait': controller.get_newnym_wait(),
'exit_policy': controller.get_exit_policy(None),
'flags': getattr(controller.get_network_status(default = None), 'flags', []),
'version': str(controller.get_version('Unknown')).split()[0],
'version_status': controller.get_info('status/version/current', 'Unknown'),
'address': or_listeners[0][0] if (or_listeners and or_listeners[0][0] != '0.0.0.0') else controller.get_info('address', 'Unknown'),
'or_port': or_listeners[0][1] if or_listeners else '',
'dir_port': controller.get_conf('DirPort', '0'),
'control_port': str(control_listeners[0][1]) if control_listeners else None,
'socket_path': controller.get_conf('ControlSocket', None),
'is_relay': bool(or_listeners),
'auth_type': auth_type,
'pid': pid,
'start_time': stem.util.system.start_time(pid),
'fd_limit': int(controller.get_info('process/descriptor-limit', '-1')),
'fd_used': fd_used,
'nyx_total_cpu_time': nyx_total_cpu_time,
'tor_cpu': '%0.1f' % (100 * tor_resources.cpu_sample),
'nyx_cpu': '%0.1f' % (nyx_cpu),
'memory': stem.util.str_tools.size_label(tor_resources.memory_bytes) if tor_resources.memory_bytes > 0 else 0,
'memory_percent': '%0.1f' % (100 * tor_resources.memory_percent),
'hostname': os.uname()[1],
'platform': '%s %s' % (os.uname()[0], os.uname()[2]), # [platform name] [version]
}
return Sampling(**attr)
def format(self, message, crop_width = None):
formatted_msg = message.format(**self._attr)
if crop_width is not None:
formatted_msg = stem.util.str_tools.crop(formatted_msg, crop_width)
return formatted_msg
def _draw_platform_section(subwindow, x, y, width, vals):
"""
Section providing the user's hostname, platform, and version information...
nyx - odin (Linux 3.5.0-52-generic) Tor 0.2.5.1-alpha-dev (unrecommended)
|------ platform (40 characters) ------| |----------- tor version -----------|
"""
initial_x, space_left = x, min(width, 40)
x = subwindow.addstr(x, y, vals.format('nyx - {hostname}', space_left))
space_left -= x - initial_x
if space_left >= 10:
subwindow.addstr(x, y, ' (%s)' % vals.format('{platform}', space_left - 3))
x, space_left = initial_x + 43, width - 43
if vals.version != 'Unknown' and space_left >= 10:
x = subwindow.addstr(x, y, vals.format('Tor {version}', space_left))
space_left -= x - 43 - initial_x
if space_left >= 7 + len(vals.version_status):
version_color = CONFIG['attr.version_status_colors'].get(vals.version_status, WHITE)
x = subwindow.addstr(x, y, ' (')
x = subwindow.addstr(x, y, vals.version_status, version_color)
subwindow.addstr(x, y, ')')
def _draw_ports_section(subwindow, x, y, width, vals):
"""
Section providing our nickname, address, and port information...
Unnamed - 0.0.0.0:7000, Control Port (cookie): 9051
"""
if not vals.is_relay:
x = subwindow.addstr(x, y, 'Relaying Disabled', CYAN)
else:
x = subwindow.addstr(x, y, vals.format('{nickname} - {address}:{or_port}'))
if vals.dir_port != '0':
x = subwindow.addstr(x, y, vals.format(', Dir Port: {dir_port}'))
if vals.control_port:
if width >= x + 19 + len(vals.control_port) + len(vals.auth_type):
auth_color = RED if vals.auth_type == 'open' else GREEN
x = subwindow.addstr(x, y, ', Control Port (')
x = subwindow.addstr(x, y, vals.auth_type, auth_color)
subwindow.addstr(x, y, vals.format('): {control_port}'))
else:
subwindow.addstr(x, y, vals.format(', Control Port: {control_port}'))
elif vals.socket_path:
subwindow.addstr(x, y, vals.format(', Control Socket: {socket_path}'))
def _draw_disconnected(subwindow, x, y, last_heartbeat):
"""
Message indicating that tor is disconnected...
Tor Disconnected (15:21 07/13/2014, press r to reconnect)
"""
x = subwindow.addstr(x, y, 'Tor Disconnected', RED, BOLD)
last_heartbeat_str = time.strftime('%H:%M %m/%d/%Y', time.localtime(last_heartbeat))
subwindow.addstr(x, y, ' (%s, press r to reconnect)' % last_heartbeat_str)
def _draw_resource_usage(subwindow, x, y, width, vals, pause_time):
"""
System resource usage of the tor process...
cpu: 0.0% tor, 1.0% nyx mem: 0 (0.0%) pid: 16329 uptime: 12-20:42:07
"""
if vals.start_time:
if not vals.is_connected:
now = vals.connection_time
elif pause_time:
now = pause_time
else:
now = time.time()
uptime = stem.util.str_tools.short_time_label(now - vals.start_time)
else:
uptime = ''
sys_fields = (
(0, vals.format('cpu: {tor_cpu}% tor, {nyx_cpu}% nyx')),
(27, vals.format('mem: {memory} ({memory_percent}%)')),
(47, vals.format('pid: {pid}')),
(59, 'uptime: %s' % uptime),
)
for (start, label) in sys_fields:
if width >= start + len(label):
subwindow.addstr(x + start, y, label)
else:
break
def _draw_fingerprint_and_fd_usage(subwindow, x, y, width, vals):
"""
Presents our fingerprint, and our file descriptor usage if we're running
out...
fingerprint: 1A94D1A794FCB2F8B6CBC179EF8FDD4008A98D3B, file desc: 900 / 1000 (90%)
"""
initial_x, space_left = x, width
x = subwindow.addstr(x, y, vals.format('fingerprint: {fingerprint}', width))
space_left -= x - initial_x
if space_left >= 30 and vals.fd_used and vals.fd_limit != -1:
fd_percent = 100 * vals.fd_used / vals.fd_limit
if fd_percent >= SHOW_FD_THRESHOLD:
if fd_percent >= 95:
percentage_format = (RED, BOLD)
elif fd_percent >= 90:
percentage_format = (RED,)
elif fd_percent >= 60:
percentage_format = (YELLOW,)
else:
percentage_format = ()
x = subwindow.addstr(x, y, ', file descriptors' if space_left >= 37 else ', file desc')
x = subwindow.addstr(x, y, vals.format(': {fd_used} / {fd_limit} ('))
x = subwindow.addstr(x, y, '%i%%' % fd_percent, *percentage_format)
subwindow.addstr(x, y, ')')
def _draw_flags(subwindow, x, y, flags):
"""
Presents flags held by our relay...
flags: Running, Valid
"""
x = subwindow.addstr(x, y, 'flags: ')
if flags:
for i, flag in enumerate(flags):
flag_color = CONFIG['attr.flag_colors'].get(flag, WHITE)
x = subwindow.addstr(x, y, flag, flag_color, BOLD)
if i < len(flags) - 1:
x = subwindow.addstr(x, y, ', ')
else:
subwindow.addstr(x, y, 'none', CYAN, BOLD)
def _draw_exit_policy(subwindow, x, y, exit_policy):
"""
Presents our exit policy...
exit policy: reject *:*
"""
x = subwindow.addstr(x, y, 'exit policy: ')
if not exit_policy:
return
rules = list(exit_policy.strip_private().strip_default())
for i, rule in enumerate(rules):
policy_color = GREEN if rule.is_accept else RED
x = subwindow.addstr(x, y, str(rule), policy_color, BOLD)
if i < len(rules) - 1:
x = subwindow.addstr(x, y, ', ')
if exit_policy.has_default():
if rules:
x = subwindow.addstr(x, y, ', ')
subwindow.addstr(x, y, '<default>', CYAN, BOLD)
def _draw_newnym_option(subwindow, x, y, newnym_wait):
"""
Provide a notice for requiesting a new identity, and time until it's next
available if in the process of building circuits.
"""
if newnym_wait == 0:
subwindow.addstr(x, y, "press 'n' for a new identity")
else:
plural = 's' if newnym_wait > 1 else ''
subwindow.addstr(x, y, 'building circuits, available again in %i second%s' % (newnym_wait, plural))
def _draw_status(subwindow, x, y, is_paused, message, *attr):
"""
Provides general usage information or a custom message.
"""
if message:
subwindow.addstr(x, y, message, *attr)
elif not is_paused:
controller = nyx.controller.get_controller()
subwindow.addstr(x, y, 'page %i / %i - m: menu, p: pause, h: page help, q: quit' % (controller.get_page() + 1, controller.get_page_count()))
else:
subwindow.addstr(x, y, 'Paused', HIGHLIGHT)
|
gpl-3.0
| -6,505,644,122,368,500,000
| 31.076772
| 144
| 0.639828
| false
| 3.259
| false
| false
| false
|
nens/dpnetcdf
|
dpnetcdf/migrations/0050_auto__del_field_datasource_imported.py
|
1
|
5366
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'Datasource.imported'
db.delete_column('dpnetcdf_datasource', 'imported')
def backwards(self, orm):
# Adding field 'Datasource.imported'
db.add_column('dpnetcdf_datasource', 'imported',
self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True),
keep_default=False)
models = {
'dpnetcdf.datasource': {
'Meta': {'object_name': 'Datasource'},
'dataset': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dpnetcdf.OpendapDataset']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'shape_file': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dpnetcdf.ShapeFile']", 'null': 'True'}),
'variable': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dpnetcdf.Variable']", 'null': 'True'})
},
'dpnetcdf.maplayer': {
'Meta': {'object_name': 'MapLayer'},
'datasources': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['dpnetcdf.Datasource']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parameter': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'styles': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['dpnetcdf.Style']", 'symmetrical': 'False', 'blank': 'True'})
},
'dpnetcdf.opendapcatalog': {
'Meta': {'object_name': 'OpendapCatalog'},
'base_url': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'catalog_url': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'http_url': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'opendap_url': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'service_prefix': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'dpnetcdf.opendapdataset': {
'Meta': {'object_name': 'OpendapDataset'},
'calculation_facility': ('django.db.models.fields.CharField', [], {'max_length': '10', 'blank': 'True'}),
'catalog': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dpnetcdf.OpendapSubcatalog']"}),
'date': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'program': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'scenario': ('django.db.models.fields.CharField', [], {'max_length': '10', 'blank': 'True'}),
'strategy': ('django.db.models.fields.CharField', [], {'max_length': '10', 'blank': 'True'}),
'time_zero': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'variables': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['dpnetcdf.Variable']", 'symmetrical': 'False'}),
'year': ('django.db.models.fields.CharField', [], {'max_length': '10', 'blank': 'True'})
},
'dpnetcdf.opendapsubcatalog': {
'Meta': {'object_name': 'OpendapSubcatalog'},
'catalog': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dpnetcdf.OpendapCatalog']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
'dpnetcdf.shapefile': {
'Meta': {'object_name': 'ShapeFile'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
'dpnetcdf.style': {
'Meta': {'object_name': 'Style'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'xml': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
'dpnetcdf.variable': {
'Meta': {'object_name': 'Variable'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['dpnetcdf']
|
gpl-3.0
| -3,347,639,049,884,668,400
| 59.988636
| 162
| 0.547521
| false
| 3.623228
| false
| false
| false
|
antonxy/audiosync
|
gui.py
|
1
|
5008
|
import tkinter as tk
from tkinter.ttk import Progressbar
from tkinter import filedialog, messagebox
import os
import errno
from threading import Thread
import program_logic
def get_paths_in_dir(directory):
filenames = os.listdir(directory)
return [os.path.abspath(os.path.join(directory, name)) for name in filenames]
class MainFrame(tk.Frame):
def __init__(self, parent):
super(MainFrame, self).__init__(parent)
self.parent = parent
self.dir_var = tk.StringVar()
self.audio_progress_var = tk.IntVar()
self.video_progress_var = tk.IntVar()
self.fps_var = tk.StringVar()
self.audio_shift_var = tk.StringVar()
self.start_button = None
self.generate_ui()
self.center_window()
def generate_ui(self):
self.parent.title('audiosync')
dir_frame = tk.Frame(self)
dir_frame.pack(fill=tk.X)
tk.Label(dir_frame, text='Directory:').pack(side=tk.LEFT)
tk.Entry(dir_frame, textvariable=self.dir_var).pack(fill=tk.X, expand=1, side=tk.LEFT)
tk.Button(dir_frame, text='Select', command=self.select_dir).pack(side=tk.LEFT)
tk.Button(dir_frame, text='Create Structure', command=self.create_directory_structure).pack(side=tk.RIGHT)
fps_frame = tk.Frame(self)
fps_frame.pack()
tk.Label(fps_frame, text='FPS:').pack(side=tk.LEFT)
tk.Entry(fps_frame, textvariable=self.fps_var).pack(side=tk.LEFT)
audio_shift_frame = tk.Frame(self)
audio_shift_frame.pack()
tk.Label(audio_shift_frame, text='Shift Audio forward').pack(side=tk.LEFT)
tk.Entry(audio_shift_frame, textvariable=self.audio_shift_var).pack(side=tk.LEFT)
tk.Label(audio_shift_frame, text='frames').pack(side=tk.LEFT)
cmd_frame = tk.Frame(self)
cmd_frame.pack(fill=tk.X)
self.start_button = tk.Button(cmd_frame, text='Start', command=self.execute)
self.start_button.pack()
Progressbar(self, variable=self.video_progress_var).pack(fill=tk.X)
Progressbar(self, variable=self.audio_progress_var).pack(fill=tk.X)
self.pack(fill=tk.BOTH, expand=1)
def center_window(self):
w = 500
h = 140
sw = self.parent.winfo_screenwidth()
sh = self.parent.winfo_screenheight()
x = (sw - w) / 2
y = (sh - h) / 2
self.parent.geometry('%dx%d+%d+%d' % (w, h, x, y))
def select_dir(self):
dir_path = filedialog.askdirectory()
if dir_path != '':
self.dir_var.set(dir_path)
def create_directory_structure(self):
dir_path = self.dir_var.get()
if dir_path != '':
dir_names = ['video', 'audio', 'edl']
for dir_name in dir_names:
new_dir_path = os.path.join(dir_path, dir_name)
try:
os.makedirs(new_dir_path)
except OSError as ex:
if ex.errno != errno.EEXIST:
raise
def execute(self):
directory = self.dir_var.get()
if directory is '':
messagebox.showerror(title='audiosync', message='No directory selected')
return
try:
fps = float(self.fps_var.get())
except ValueError:
messagebox.showerror(title='audiosync', message='FPS has to be decimal number')
return
try:
audio_shift = int(self.audio_shift_var.get())
except ValueError:
messagebox.showerror(title='audiosync', message='Audio shift has to be integer')
return
thread = Thread(target=self.thread_target,
args=(self.audio_progress_var, self.video_progress_var, self.start_button, fps, directory, audio_shift))
thread.start()
self.start_button.config(state='disabled')
@staticmethod
def thread_target(audio_progress_var, video_progress_var, start_button, fps, directory, audio_shift):
video_ret = analyse_directory(os.path.join(directory, 'video'), video_progress_var)
audio_ret = analyse_directory(os.path.join(directory, 'audio'), audio_progress_var)
program_logic.rename_files(audio_ret, 'a')
program_logic.rename_files(video_ret, 'v')
program_logic.generate_edls(video_ret, audio_ret, fps, os.path.join(directory, 'edl'), audio_shift)
audio_progress_var.set(0)
video_progress_var.set(0)
start_button.config(state='normal')
def analyse_directory(directory, progress_var):
ret_list = []
files = os.listdir(directory)
for n, filename in enumerate(files):
path = os.path.abspath(os.path.join(directory, filename))
result = program_logic.analyse_file(path)
if result is not None:
ret_list.append(result)
progress_var.set(int((n + 1) / len(files) * 100))
return ret_list
if __name__ == '__main__':
root = tk.Tk()
app = MainFrame(root)
root.mainloop()
|
mit
| 6,802,534,413,358,462,000
| 33.784722
| 128
| 0.609425
| false
| 3.516854
| false
| false
| false
|
felipenaselva/felipe.repository
|
plugin.program.Build.Tools/resources/libs/skinSwitch.py
|
1
|
9353
|
################################################################################
# Copyright (C) 2015 OpenELEQ #
# #
# This Program is free software; you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation; either version 2, or (at your option) #
# any later version. #
# #
# This Program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with XBMC; see the file COPYING. If not, write to #
# the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. #
# http://www.gnu.org/copyleft/gpl.html #
################################################################################
import os, re,glob, shutil, time, xbmc, xbmcaddon, thread, wizard as wiz, uservar
try:
import json as simplejson
except:
import simplejson
KODIV = float(xbmc.getInfoLabel("System.BuildVersion")[:4])
COLOR1 = uservar.COLOR1
COLOR2 = uservar.COLOR2
HOME = xbmc.translatePath('special://home/')
ADDONS = os.path.join(HOME, 'addons')
#DIALOG = xbmcgui.Dialog()
def getOld(old):
try:
old = '"%s"' % old
query = '{"jsonrpc":"2.0", "method":"Settings.GetSettingValue","params":{"setting":%s}, "id":1}' % (old)
response = xbmc.executeJSONRPC(query)
response = simplejson.loads(response)
if response.has_key('result'):
if response['result'].has_key('value'):
return response ['result']['value']
except:
pass
return None
def setNew(new, value):
try:
new = '"%s"' % new
value = '"%s"' % value
query = '{"jsonrpc":"2.0", "method":"Settings.SetSettingValue","params":{"setting":%s,"value":%s}, "id":1}' % (new, value)
response = xbmc.executeJSONRPC(query)
except:
pass
return None
def swapSkins(skin):
if skin == 'skin.confluence':
HOME = xbmc.translatePath('special://home/')
skinfold = os.path.join(HOME, 'userdata', 'addon_data', 'skin.confluence')
settings = os.path.join(skinfold, 'settings.xml')
if not os.path.exists(settings):
string = '<settings>\n <setting id="FirstTimeRun" type="bool">true</setting>\n</settings>'
os.makedirs(skinfold)
f = open(settings, 'w'); f.write(string); f.close()
else: xbmcaddon.Addon(id='skin.confluence').setSetting('FirstTimeRun', 'true')
old = 'lookandfeel.skin'
value = skin
current = getOld(old)
new = old
setNew(new, value)
# if not xbmc.getCondVisibility(Skin.HasSetting(FirstTimeRun)):
# while xbmc.getCondVisibility('Window.IsVisible(1112)'):
# xbmc.executebuiltin('SendClick(100)')
def swapUS():
new = '"addons.unknownsources"'
value = 'true'
query = '{"jsonrpc":"2.0", "method":"Settings.GetSettingValue","params":{"setting":%s}, "id":1}' % (new)
response = xbmc.executeJSONRPC(query)
wiz.log("Unknown Sources Get Settings: %s" % str(response), xbmc.LOGDEBUG)
if 'false' in response:
thread.start_new_thread(dialogWatch, ())
xbmc.sleep(200)
query = '{"jsonrpc":"2.0", "method":"Settings.SetSettingValue","params":{"setting":%s,"value":%s}, "id":1}' % (new, value)
response = xbmc.executeJSONRPC(query)
wiz.LogNotify("[COLOR %s]%s[/COLOR]" % (COLOR1, ADDONTITLE), '[COLOR %s]Unknown Sources:[/COLOR] [COLOR %s]Enabled[/COLOR]' % (COLOR1, COLOR2))
wiz.log("Unknown Sources Set Settings: %s" % str(response), xbmc.LOGDEBUG)
def dialogWatch():
x = 0
while not xbmc.getCondVisibility("Window.isVisible(yesnodialog)") and x < 100:
x += 1
xbmc.sleep(100)
if xbmc.getCondVisibility("Window.isVisible(yesnodialog)"):
xbmc.executebuiltin('SendClick(11)')
########################################################################################
#######################################Still Needs Work#########################################
########################################################################################
#def popUPmenu():
# fold = glob.glob(os.path.join(ADDONS, 'skin*'))
# addonnames = []; addonids = []; addonfolds = []
# for folder in sorted(fold, key = lambda x: x):
# xml = os.path.join(folder, 'addon.xml')
# if os.path.exists(xml):
# foldername = os.path.split(folder[:-1])[1]
# f = open(xml)
# a = f.read()
# f.close()
# getid = parseDOM(a, 'addon', ret='id')
# getname = parseDOM(a, 'addon', ret='name')
# addid = foldername if len(getid) == 0 else getid[0]
# title = foldername if len(getname) == 0 else getname[0]
# temp = title.replace('[', '<').replace(']', '>')
# temp = re.sub('<[^<]+?>', '', temp)
# addonnames.append(temp)
# addonids.append(addid)
# addonfolds.append(foldername)
# #currskin = ["Current Skin -- %s" % currSkin()] + addonids
# select = DIALOG.select("Select the Skin you want to swap with.", addonids#currskin )
# if select == -1: return
# elif select == 1: addonids[select]
# swapSkins(addonids)
def parseDOM(html, name=u"", attrs={}, ret=False):
# Copyright (C) 2010-2011 Tobias Ussing And Henrik Mosgaard Jensen
if isinstance(html, str):
try:
html = [html.decode("utf-8")]
except:
html = [html]
elif isinstance(html, unicode):
html = [html]
elif not isinstance(html, list):
return u""
if not name.strip():
return u""
ret_lst = []
for item in html:
temp_item = re.compile('(<[^>]*?\n[^>]*?>)').findall(item)
for match in temp_item:
item = item.replace(match, match.replace("\n", " "))
lst = []
for key in attrs:
lst2 = re.compile('(<' + name + '[^>]*?(?:' + key + '=[\'"]' + attrs[key] + '[\'"].*?>))', re.M | re.S).findall(item)
if len(lst2) == 0 and attrs[key].find(" ") == -1:
lst2 = re.compile('(<' + name + '[^>]*?(?:' + key + '=' + attrs[key] + '.*?>))', re.M | re.S).findall(item)
if len(lst) == 0:
lst = lst2
lst2 = []
else:
test = range(len(lst))
test.reverse()
for i in test:
if not lst[i] in lst2:
del(lst[i])
if len(lst) == 0 and attrs == {}:
lst = re.compile('(<' + name + '>)', re.M | re.S).findall(item)
if len(lst) == 0:
lst = re.compile('(<' + name + ' .*?>)', re.M | re.S).findall(item)
if isinstance(ret, str):
lst2 = []
for match in lst:
attr_lst = re.compile('<' + name + '.*?' + ret + '=([\'"].[^>]*?[\'"])>', re.M | re.S).findall(match)
if len(attr_lst) == 0:
attr_lst = re.compile('<' + name + '.*?' + ret + '=(.[^>]*?)>', re.M | re.S).findall(match)
for tmp in attr_lst:
cont_char = tmp[0]
if cont_char in "'\"":
if tmp.find('=' + cont_char, tmp.find(cont_char, 1)) > -1:
tmp = tmp[:tmp.find('=' + cont_char, tmp.find(cont_char, 1))]
if tmp.rfind(cont_char, 1) > -1:
tmp = tmp[1:tmp.rfind(cont_char)]
else:
if tmp.find(" ") > 0:
tmp = tmp[:tmp.find(" ")]
elif tmp.find("/") > 0:
tmp = tmp[:tmp.find("/")]
elif tmp.find(">") > 0:
tmp = tmp[:tmp.find(">")]
lst2.append(tmp.strip())
lst = lst2
else:
lst2 = []
for match in lst:
endstr = u"</" + name
start = item.find(match)
end = item.find(endstr, start)
pos = item.find("<" + name, start + 1 )
while pos < end and pos != -1:
tend = item.find(endstr, end + len(endstr))
if tend != -1:
end = tend
pos = item.find("<" + name, pos + 1)
if start == -1 and end == -1:
temp = u""
elif start > -1 and end > -1:
temp = item[start + len(match):end]
elif end > -1:
temp = item[:end]
elif start > -1:
temp = item[start + len(match):]
if ret:
endstr = item[end:item.find(">", item.find(endstr)) + 1]
temp = match + temp + endstr
item = item[item.find(temp, item.find(match)) + len(temp):]
lst2.append(temp)
lst = lst2
ret_lst += lst
return ret_lst
|
gpl-2.0
| -3,929,239,323,546,178,600
| 40.207048
| 145
| 0.481664
| false
| 3.576673
| false
| false
| false
|
DLR-SC/prov-db-connector
|
provdbconnector/utils/converter.py
|
1
|
3299
|
from functools import reduce
from io import BufferedReader
from provdbconnector.exceptions.utils import ParseException, NoDocumentException
import six
from prov.model import ProvDocument
import logging
log = logging.getLogger(__name__)
def form_string(content):
"""
Take a string or BufferedReader as argument and transform the string into a ProvDocument
:param content: Takes a sting or BufferedReader
:return: ProvDocument
"""
if isinstance(content, ProvDocument):
return content
elif isinstance(content, BufferedReader):
content = reduce(lambda total, a: total + a, content.readlines())
if type(content) is six.binary_type:
content_str = content[0:15].decode()
if content_str.find("{") > -1:
return ProvDocument.deserialize(content=content, format='json')
if content_str.find('<?xml') > -1:
return ProvDocument.deserialize(content=content, format='xml')
elif content_str.find('document') > -1:
return ProvDocument.deserialize(content=content, format='provn')
raise ParseException("Unsupported input type {}".format(type(content)))
def to_json(document=None):
"""
Try to convert a ProvDocument into the json representation
:param document:
:type document: prov.model.ProvDocument
:return: Json string of the document
:rtype: str
"""
if document is None:
raise NoDocumentException()
return document.serialize(format='json')
def from_json(json=None):
"""
Try to convert a json string into a document
:param json: The json str
:type json: str
:return: Prov Document
:rtype: prov.model.ProvDocument
:raise: NoDocumentException
"""
if json is None:
raise NoDocumentException()
return ProvDocument.deserialize(source=json, format='json')
def to_provn(document=None):
"""
Try to convert a document into a provn representation
:param document: Prov document to convert
:type document: prov.model.ProvDocument
:return: The prov-n str
:rtype: str
:raise: NoDocumentException
"""
if document is None:
raise NoDocumentException()
return document.serialize(format='provn')
def from_provn(provn_str=None):
"""
Try to convert a provn string into a ProvDocument
:param provn_str: The string to convert
:type provn_str: str
:return: The Prov document
:rtype: ProvDocument
:raises: NoDocumentException
"""
if provn_str is None:
raise NoDocumentException()
return ProvDocument.deserialize(source=provn_str, format='provn')
def to_xml(document=None):
"""
Try to convert a document into an xml string
:param document: The ProvDocument to convert
:param document: ProvDocument
:return: The xml string
:rtype: str
"""
if document is None:
raise NoDocumentException()
return document.serialize(format='xml')
def from_xml(xml_str=None):
"""
Try to convert a xml string into a ProvDocument
:param xml_str: The xml string
:type xml_str: str
:return: The Prov document
:rtype: ProvDocument
"""
if xml_str is None:
raise NoDocumentException()
return ProvDocument.deserialize(source=xml_str, format='xml')
|
apache-2.0
| -8,106,690,724,174,002,000
| 26.491667
| 92
| 0.678387
| false
| 4.023171
| false
| false
| false
|
fangxingli/hue
|
desktop/libs/indexer/src/indexer/smart_indexer_tests.py
|
1
|
9419
|
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.from nose.tools import assert_equal
import StringIO
import logging
from nose.tools import assert_equal, assert_true
from nose.plugins.skip import SkipTest
from desktop.lib.django_test_util import make_logged_in_client
from desktop.lib.test_utils import grant_access, add_to_group
from hadoop.pseudo_hdfs4 import is_live_cluster, shared_cluster
from indexer.file_format import ApacheCombinedFormat, RubyLogFormat, HueLogFormat
from indexer.fields import Field
from indexer.controller import CollectionManagerController
from indexer.operations import get_operator
from indexer.smart_indexer import Indexer
LOG = logging.getLogger(__name__)
def _test_fixed_type_format_generate_morphline(format_):
indexer = Indexer("test")
format_instance = format_()
morphline = indexer.generate_morphline_config("test_collection", {
"columns": [field.to_dict() for field in format_instance.fields],
"format": format_instance.get_format()
})
assert_true(isinstance(morphline, basestring))
def _test_generate_field_operation_morphline(operation_format):
fields = TestIndexer.simpleCSVFields[:]
fields[0]['operations'].append(operation_format)
indexer = Indexer("test")
morphline =indexer.generate_morphline_config("test_collection", {
"columns": fields,
"format": TestIndexer.simpleCSVFormat
})
assert_true(isinstance(morphline, basestring))
class TestIndexer():
simpleCSVString = """id,Rating,Location,Name,Time
1,5,San Francisco,Good Restaurant,8:30pm
2,4,San Mateo,Cafe,11:30am
3,3,Berkeley,Sauls,2:30pm
"""
simpleCSVFields = [
{
"name": "id",
"type": "long",
"operations": [],
"keep": True,
"required": False
},
{
"name": "Rating",
"type": "long",
"operations": [],
"keep": True,
"required": False
},
{
"name": "Location",
"type": "string",
"operations": [],
"keep": True,
"required": False
},
{
"name": "Name",
"type": "string",
"operations": [],
"keep": True,
"required": False
},
{
"name": "Time",
"type": "string",
"operations": [],
"keep": True,
"required": False
}
]
simpleCSVFormat = {
'type': 'csv',
'fieldSeparator': ',',
'recordSeparator': '\n',
'hasHeader': True,
'quoteChar': '"'
}
def setUp(self):
self.c = make_logged_in_client(is_superuser=False)
grant_access("test", "test", "indexer")
add_to_group("test")
def test_guess_csv_format(self):
stream = StringIO.StringIO(TestIndexer.simpleCSVString)
indexer = Indexer("test")
guessed_format = indexer.guess_format({'file': {"stream": stream, "name": "test.csv"}})
fields = indexer.guess_field_types({"file":{"stream": stream, "name": "test.csv"}, "format": guessed_format})['columns']
# test format
expected_format = self.simpleCSVFormat
assert_equal(expected_format, guessed_format)
# test fields
expected_fields = self.simpleCSVFields
for expected, actual in zip(expected_fields, fields):
for key in ("name", "type"):
assert_equal(expected[key], actual[key])
def test_guess_format_invalid_csv_format(self):
indexer = Indexer("test")
stream = StringIO.StringIO(TestIndexer.simpleCSVString)
guessed_format = indexer.guess_format({'file': {"stream": stream, "name": "test.csv"}})
guessed_format["fieldSeparator"] = "invalid separator"
fields = indexer.guess_field_types({"file": {"stream": stream, "name": "test.csv"}, "format": guessed_format})['columns']
assert_equal(fields, [])
stream.seek(0)
guessed_format = indexer.guess_format({'file': {"stream": stream, "name": "test.csv"}})
guessed_format["recordSeparator"] = "invalid separator"
fields = indexer.guess_field_types({"file": {"stream": stream, "name": "test.csv"}, "format": guessed_format})['columns']
assert_equal(fields, [])
stream.seek(0)
guessed_format = indexer.guess_format({'file': {"stream": stream, "name": "test.csv"}})
guessed_format["quoteChar"] = "invalid quoteChar"
fields = indexer.guess_field_types({"file": {"stream": stream, "name": "test.csv"}, "format": guessed_format})['columns']
assert_equal(fields, [])
def test_generate_csv_morphline(self):
indexer = Indexer("test")
morphline =indexer.generate_morphline_config("test_collection", {
"columns": self.simpleCSVFields,
"format": self.simpleCSVFormat
})
assert_true(isinstance(morphline, basestring))
def test_generate_apache_combined_morphline(self):
_test_fixed_type_format_generate_morphline(ApacheCombinedFormat)
def test_generate_ruby_logs_morphline(self):
_test_fixed_type_format_generate_morphline(RubyLogFormat)
def test_generate_hue_log_morphline(self):
_test_fixed_type_format_generate_morphline(HueLogFormat)
def test_generate_split_operation_morphline(self):
split_dict = get_operator('split').get_default_operation()
split_dict['fields'] = [
Field("test_field_1", "string").to_dict(),
Field("test_field_2", "string").to_dict()
]
_test_generate_field_operation_morphline(split_dict)
def test_generate_extract_uri_components_operation_morphline(self):
extract_uri_dict = get_operator('extract_uri_components').get_default_operation()
extract_uri_dict['fields'] = [
Field("test_field_1", "string").to_dict(),
Field("test_field_2", "string").to_dict()
]
_test_generate_field_operation_morphline(extract_uri_dict)
def test_generate_grok_operation_morphline(self):
grok_dict = get_operator('grok').get_default_operation()
grok_dict['fields'] = [
Field("test_field_1", "string").to_dict(),
Field("test_field_2", "string").to_dict()
]
_test_generate_field_operation_morphline(grok_dict)
def test_generate_convert_date_morphline(self):
convert_date_dict = get_operator('convert_date').get_default_operation()
_test_generate_field_operation_morphline(convert_date_dict)
def test_generate_geo_ip_morphline(self):
geo_ip_dict = get_operator('geo_ip').get_default_operation()
geo_ip_dict['fields'] = [
Field("test_field_1", "string").to_dict(),
Field("test_field_2", "string").to_dict()
]
_test_generate_field_operation_morphline(geo_ip_dict)
def test_generate_translate_morphline(self):
translate_dict = get_operator('translate').get_default_operation()
translate_dict['fields'] = [
Field("test_field_1", "string").to_dict(),
Field("test_field_2", "string").to_dict()
]
translate_dict['settings']['mapping'].append({"key":"key","value":"value"})
_test_generate_field_operation_morphline(translate_dict)
def test_generate_find_replace_morphline(self):
find_replace_dict = get_operator('find_replace').get_default_operation()
_test_generate_field_operation_morphline(find_replace_dict)
def test_end_to_end(self):
if not is_live_cluster():
raise SkipTest()
cluster = shared_cluster()
fs = cluster.fs
collection_name = "test_collection"
indexer = Indexer("test", fs=fs, jt=cluster.jt)
input_loc = "/tmp/test.csv"
# upload the test file to hdfs
fs.create(input_loc, data=TestIndexer.simpleCSVString, overwrite=True)
# open a filestream for the file on hdfs
stream = fs.open(input_loc)
# guess the format of the file
file_type_format = indexer.guess_format({'file': {"stream": stream, "name": "test.csv"}})
field_types = indexer.guess_field_types({"file":{"stream": stream, "name": "test.csv"}, "format": file_type_format})
format_ = field_types.copy()
format_['format'] = file_type_format
# find a field name available to use for the record's uuid
unique_field = indexer.get_unique_field(format_)
is_unique_generated = indexer.is_unique_generated(format_)
# generate morphline
morphline = indexer.generate_morphline_config(collection_name, format_, unique_field)
schema_fields = indexer.get_kept_field_list(format_['columns'])
if is_unique_generated:
schema_fields += [{"name": unique_field, "type": "string"}]
# create the collection from the specified fields
collection_manager = CollectionManagerController("test")
if collection_manager.collection_exists(collection_name):
collection_manager.delete_collection(collection_name, None)
collection_manager.create_collection(collection_name, schema_fields, unique_key_field=unique_field)
# index the file
indexer.run_morphline(collection_name, morphline, input_loc)
|
apache-2.0
| -8,800,027,289,137,888,000
| 31.818815
| 125
| 0.675656
| false
| 3.622692
| true
| false
| false
|
oVirt/ovirt-setup-lib
|
tests/commons.py
|
1
|
1744
|
#
# ovirt-setup-lib -- ovirt setup library
# Copyright (C) 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import mock
import unittest2
class BaseTestCase(unittest2.TestCase):
_patchers = {}
def mock_base(self):
if 'Base' not in self._patchers:
self._patchers['Base'] = 'otopi.base.Base'
def mock_plugin(self):
import otopi.plugin # imported here to make mock happy
assert otopi.plugin # assertion here to make pyflakes happy
if 'Plugin' not in self._patchers:
self.mock_base()
self._patchers['Plugin'] = 'otopi.plugin.PluginBase'
def mock_context(self):
import otopi.context # imported here to make mock happy
assert otopi.context # assertion here to make pyflakes happy
if 'Context' not in self._patchers:
self.mock_base()
self._patchers['Context'] = 'otopi.context.Context'
def mock_otopi(self):
self.mock_plugin()
self.mock_context()
def apply_patch(self):
for cls_name in self._patchers:
patcher = mock.patch(self._patchers[cls_name])
setattr(self, cls_name, patcher.start())
self.addCleanup(patcher.stop)
|
apache-2.0
| -5,747,025,881,998,846,000
| 31.90566
| 74
| 0.661697
| false
| 3.799564
| false
| false
| false
|
XiaodunServerGroup/medicalmooc
|
cms/djangoapps/contentstore/views/course.py
|
1
|
55485
|
# -*- coding: utf-8 -*-
#coding=utf-8
import Queue
import sys,os
from envs.common import PROJECT_ROOT
import xlrd
from student.views import do_institution_import_teacher_create_account,do_institution_import_student_create_account
import random
reload(sys)
sys.setdefaultencoding('utf8')
"""
Views related to operations on course objects
"""
import json
import random
import string # pylint: disable=W0402
import re
import bson
import socket
import urllib2
from Crypto.Cipher import DES
import base64
import hashlib
import analytics.basic
from datetime import *
from django.utils import timezone
from django.db.models import Q
from django.utils.translation import ugettext as _
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User, Group
from django_future.csrf import ensure_csrf_cookie
from django.conf import settings
from django.views.decorators.http import require_http_methods
from django.views.decorators.csrf import csrf_exempt
from django.core.exceptions import PermissionDenied
from django.core.urlresolvers import reverse
from django.http import HttpResponseBadRequest, HttpResponseNotFound, HttpResponseRedirect, HttpResponse, Http404
from util.json_request import JsonResponse
from edxmako.shortcuts import render_to_response
from xmodule.error_module import ErrorDescriptor
from xmodule.modulestore.django import modulestore, loc_mapper
from xmodule.contentstore.content import StaticContent
from xmodule.modulestore.exceptions import (
ItemNotFoundError, InvalidLocationError)
from xmodule.modulestore import Location
from xmodule.fields import Date
from contentstore.course_info_model import get_course_updates, update_course_updates, delete_course_update, get_course_update_items
from contentstore.utils import (
get_lms_link_for_item, add_extra_panel_tab, remove_extra_panel_tab,
get_modulestore)
from contentstore.utils import send_mail_update
from models.settings.course_details import CourseDetails, CourseSettingsEncoder
from models.settings.course_grading import CourseGradingModel
from models.settings.course_metadata import CourseMetadata
from util.json_request import expect_json
from .access import has_course_access
from .tabs import initialize_course_tabs
from .component import (
OPEN_ENDED_COMPONENT_TYPES, NOTE_COMPONENT_TYPES,
ADVANCED_COMPONENT_POLICY_KEY)
from django_comment_common.models import assign_default_role
from django_comment_common.utils import seed_permissions_roles
from student.models import CourseEnrollment,UserProfile,UploadFileForm
from xmodule.html_module import AboutDescriptor
from xmodule.modulestore.locator import BlockUsageLocator, CourseLocator
from course_creators.views import get_course_creator_status, add_user_with_status_unrequested
from contentstore import utils
from student.roles import CourseInstructorRole, CourseStaffRole, CourseCreatorRole, GlobalStaff
from student import auth
from microsite_configuration import microsite
__all__ = ['course_info_handler', 'course_handler', 'course_info_update_handler',
'settings_handler',
'grading_handler',
'advanced_settings_handler',
'calendar_settings_handler',
'calendar_common',
'calendar_common_addevent',
'calendar_common_deleteevent',
'calendar_common_updateevent',
'calendar_settings_getevents',
'textbooks_list_handler',
'textbooks_detail_handler', 'course_audit_api', 'institution_upload_teacher', 'remove_institute_teacher', 'teacher_intro_edit', 'import_student']
WENJUAN_STATUS = {
"0": "未发布",
"1": "收集中",
"2": "已结束",
"3": "暂停中",
"4": "状态未明",
"-1": "已删除",
}
def _get_locator_and_course(package_id, branch, version_guid, block_id, user, depth=0):
"""
Internal method used to calculate and return the locator and course module
for the view functions in this file.
"""
locator = BlockUsageLocator(package_id=package_id, branch=branch, version_guid=version_guid, block_id=block_id)
if not has_course_access(user, locator):
raise PermissionDenied()
course_location = loc_mapper().translate_locator_to_location(locator)
course_module = modulestore().get_item(course_location, depth=depth)
return locator, course_module
def _get_institute(curr_user):
course_org = ""
u = UserProfile.objects.get(user_id=curr_user.id)
if u.profile_role == 'in':
course_org = u.name
elif u.profile_role == 'th' and u.institute:
course_org = UserProfile.objects.get(user_id=u.institute).name
print course_org.encode('utf-8')
return course_org
# pylint: disable=unused-argument
@login_required
def course_handler(request, tag=None, package_id=None, branch=None, version_guid=None, block=None):
"""
The restful handler for course specific requests.
It provides the course tree with the necessary information for identifying and labeling the parts. The root
will typically be a 'course' object but may not be especially as we support modules.
GET
html: return course listing page if not given a course id
html: return html page overview for the given course if given a course id
json: return json representing the course branch's index entry as well as dag w/ all of the children
replaced w/json docs where each doc has {'_id': , 'display_name': , 'children': }
POST
json: create a course, return resulting json
descriptor (same as in GET course/...). Leaving off /branch/draft would imply create the course w/ default
branches. Cannot change the structure contents ('_id', 'display_name', 'children') but can change the
index entry.
PUT
json: update this course (index entry not xblock) such as repointing head, changing display name, org,
package_id, prettyid. Return same json as above.
DELETE
json: delete this branch from this course (leaving off /branch/draft would imply delete the course)
"""
response_format = request.REQUEST.get('format', 'html')
if response_format == 'json' or 'application/json' in request.META.get('HTTP_ACCEPT', 'application/json'):
if request.method == 'GET':
return JsonResponse(_course_json(request, package_id, branch, version_guid, block))
elif request.method == 'POST': # not sure if this is only post. If one will have ids, it goes after access
return create_new_course(request)
elif not has_course_access(
request.user,
BlockUsageLocator(package_id=package_id, branch=branch, version_guid=version_guid, block_id=block)
):
raise PermissionDenied()
elif request.method == 'PUT':
raise NotImplementedError()
elif request.method == 'DELETE':
raise NotImplementedError()
else:
return HttpResponseBadRequest()
elif request.method == 'GET': # assume html
if package_id is None:
return course_listing(request)
else:
return course_index(request, package_id, branch, version_guid, block)
else:
return HttpResponseNotFound()
@login_required
def _course_json(request, package_id, branch, version_guid, block):
"""
Returns a JSON overview of a course
"""
__, course = _get_locator_and_course(
package_id, branch, version_guid, block, request.user, depth=None
)
return _xmodule_json(course, course.location.course_id)
def _xmodule_json(xmodule, course_id):
"""
Returns a JSON overview of an XModule
"""
locator = loc_mapper().translate_location(
course_id, xmodule.location, published=False, add_entry_if_missing=True
)
is_container = xmodule.has_children
result = {
'display_name': xmodule.display_name,
'id': unicode(locator),
'category': xmodule.category,
'is_draft': getattr(xmodule, 'is_draft', False),
'is_container': is_container,
}
if is_container:
result['children'] = [_xmodule_json(child, course_id) for child in xmodule.get_children()]
return result
def _accessible_courses_list(request):
"""
List all courses available to the logged in user by iterating through all the courses
"""
courses = modulestore('direct').get_courses()
# filter out courses that we don't have access too
def course_filter(course):
"""
Get courses to which this user has access
"""
if GlobalStaff().has_user(request.user):
return course.location.course != 'templates'
return (has_course_access(request.user, course.location)
# pylint: disable=fixme
# TODO remove this condition when templates purged from db
and course.location.course != 'templates'
)
courses = filter(course_filter, courses)
return courses
# pylint: disable=invalid-name
def _accessible_courses_list_from_groups(request):
"""
List all courses available to the logged in user by reversing access group names
"""
courses_list = []
course_ids = set()
user_staff_group_names = request.user.groups.filter(
Q(name__startswith='instructor_') | Q(name__startswith='staff_')
).values_list('name', flat=True)
# we can only get course_ids from role names with the new format (instructor_org/number/run or
# instructor_org.number.run but not instructor_number).
for user_staff_group_name in user_staff_group_names:
# to avoid duplication try to convert all course_id's to format with dots e.g. "edx.course.run"
if user_staff_group_name.startswith("instructor_"):
# strip starting text "instructor_"
course_id = user_staff_group_name[11:]
else:
# strip starting text "staff_"
course_id = user_staff_group_name[6:]
course_ids.add(course_id.replace('/', '.').lower())
for course_id in course_ids:
# get course_location with lowercase id
course_location = loc_mapper().translate_locator_to_location(
CourseLocator(package_id=course_id), get_course=True, lower_only=True
)
if course_location is None:
raise ItemNotFoundError(course_id)
course = modulestore('direct').get_course(course_location.course_id)
courses_list.append(course)
return courses_list
@login_required
@ensure_csrf_cookie
def course_listing(request):
"""
List all courses available to the logged in user
Try to get all courses by first reversing django groups and fallback to old method if it fails
Note: overhead of pymongo reads will increase if getting courses from django groups fails
"""
if GlobalStaff().has_user(request.user):
# user has global access so no need to get courses from django groups
courses = _accessible_courses_list(request)
else:
try:
courses = _accessible_courses_list_from_groups(request)
except ItemNotFoundError:
# user have some old groups or there was some error getting courses from django groups
# so fallback to iterating through all courses
courses = _accessible_courses_list(request)
# update location entry in "loc_mapper" for user courses (add keys 'lower_id' and 'lower_course_id')
for course in courses:
loc_mapper().create_map_entry(course.location)
def format_course_for_view(course):
"""
return tuple of the data which the view requires for each course
"""
# published = false b/c studio manipulates draft versions not b/c the course isn't pub'd
course_loc = loc_mapper().translate_location(
course.location.course_id, course.location, published=False, add_entry_if_missing=True
)
return (
course.display_name,
# note, couldn't get django reverse to work; so, wrote workaround
course_loc.url_reverse('course/', ''),
get_lms_link_for_item(course.location),
course.display_org_with_default,
course.display_number_with_default,
course.location.name
)
# questionnaire
# KEY: site(required) user(required) ctime(required format yyyy-mm-dd HH:MM) email(required) mobile
qparams = {
"site": '99999', # TODO: settings.WENJUANSITEID
"user": request.user.username,
"ctime": datetime.now().strftime("%Y-%m-%d %H:%M"),
"email": request.user.email
}
def sorted_url(params_hash={}):
pkeys = params_hash.keys()
pkeys.sort()
demd5_str = hashlib.md5("".join([params_hash[k] for k in pkeys]) + settings.WENJUAN_SECKEY).hexdigest()
return ("&".join(["".join([k, '=', v]) for k, v in params_hash.iteritems()]), demd5_str)
# demd5_qparams_str = hashlib.md5("".join([qparams[k] for k in qparams_keys]) + "9d15a674a6e621058f1ea9171413b7c0").hexdigest()
wenjuan_domain = settings.WENJUAN_DOMAIN
wenjuan_loginapi = "{}/openapi/login?{}&md5={}".format(wenjuan_domain, *sorted_url(qparams))
# get questionnaire list
qlist = []
try:
list_url = "{}/openapi/proj_list?{}&md5={}".format(wenjuan_domain, *sorted_url(qparams))
timeout = 10
socket.setdefaulttimeout(timeout)
req = urllib2.Request(list_url.replace(' ', '%20'))
# {"status": 1, "respondent_count": 0, "proj_id": "AzaYja", "ctime": "2014-08-08 15:23", "title": "测试问卷", "type": "survey"}
for wj in json.load(urllib2.urlopen(req)):
"""
list structure
[
title,
status,
reponse,
create time,
q url
result url
]
"""
qlist.append([
wj.get('title', "未知"),
WENJUAN_STATUS[str(wj.get('status', 4))],
wj.get('respondent_count', 0),
wj.get('ctime', ''),
"{}/s/{}".format(wenjuan_domain, wj.get('proj_id', '')),
"{}/openapi/basic_chart/?{}&md5={}".format(wenjuan_domain, *sorted_url({"site": '99999', "user": request.user.username,"proj_id": wj.get("proj_id", "")}))
])
except:
print "=====error===== " * 5
curr_user = User.objects.get(username=request.user)
course_org = _get_institute(curr_user)
profile = UserProfile.objects.get(user_id=curr_user.id)
# get institute teacher user
userprofile_list = UserProfile.objects.all()
user_institute_teacher_list = []
for ul in userprofile_list:
if ul.institute == str(profile.user_id) and ul.profile_role == 'th':
u = User.objects.get(id=ul.user_id)
content = {
'id': int(u.id),
'username': u.username.encode('utf8'),
'email': u.email.encode('utf8'),
'name': ul.name.encode('utf8')
}
user_institute_teacher_list.append(content)
# import student
user_student_list = []
for sl in userprofile_list:
if sl.institute == str(profile.user_id) and sl.profile_role == 'st':
s = User.objects.get(id=sl.user_id)
student_context = {
'id': int(s.id),
'username': s.username.encode('utf8'),
'email': s.email.encode('utf8'),
'name': sl.name.encode('utf8')
}
user_student_list.append(student_context)
return render_to_response('index.html', {
'courses': [format_course_for_view(c) for c in courses if not isinstance(c, ErrorDescriptor)],
'user': request.user,
'request_course_creator_url': reverse('contentstore.views.request_course_creator'),
'course_creator_status': _get_course_creator_status(request.user),
'course_org': course_org,
'wenjuan_link': wenjuan_loginapi,
'qlist': qlist,
'profile': profile,
'user_institute_teacher_list': user_institute_teacher_list,
'user_student_list': user_student_list
})
@login_required
@ensure_csrf_cookie
def course_index(request, package_id, branch, version_guid, block):
"""
Display an editable course overview.
org, course, name: Attributes of the Location for the item to edit
"""
locator, course = _get_locator_and_course(
package_id, branch, version_guid, block, request.user, depth=3
)
lms_link = get_lms_link_for_item(course.location)
sections = course.get_children()
return render_to_response('overview.html', {
'context_course': course,
'lms_link': lms_link,
'sections': sections,
'course_graders': json.dumps(
CourseGradingModel.fetch(locator).graders
),
'parent_locator': locator,
'new_section_category': 'chapter',
'new_subsection_category': 'sequential',
'new_unit_category': 'vertical',
'category': 'vertical'
})
@expect_json
def create_new_course(request):
"""
Create a new course.
Returns the URL for the course overview page.
"""
if not auth.has_access(request.user, CourseCreatorRole()):
raise PermissionDenied()
org = request.json.get('org')
number = request.json.get('number')
display_name = request.json.get('display_name')
course_category = request.json.get('course_category')
course_level = request.json.get('course_level')
course_price = request.json.get('course_price')
run = request.json.get('run')
try:
dest_location = Location(u'i4x', org, number, u'course', run)
except InvalidLocationError as error:
return JsonResponse({
"ErrMsg": _("Unable to create course '{name}'.\n\n{err}").format(
name=display_name, err=error.message)})
# see if the course already exists
existing_course = None
try:
existing_course = modulestore('direct').get_item(dest_location)
except ItemNotFoundError:
pass
if existing_course is not None:
return JsonResponse({
'ErrMsg': _(
'There is already a course defined with the same '
'organization, course number, and course run. Please '
'change either organization or course number to be '
'unique.'
),
'OrgErrMsg': _(
'Please change either the organization or '
'course number so that it is unique.'
),
'CourseErrMsg': _(
'Please change either the organization or '
'course number so that it is unique.'
),
})
# dhm: this query breaks the abstraction, but I'll fix it when I do my suspended refactoring of this
# file for new locators. get_items should accept a query rather than requiring it be a legal location
course_search_location = bson.son.SON({
'_id.tag': 'i4x',
# cannot pass regex to Location constructor; thus this hack
# pylint: disable=E1101
'_id.org': re.compile(u'^{}$'.format(dest_location.org), re.IGNORECASE | re.UNICODE),
# pylint: disable=E1101
'_id.course': re.compile(u'^{}$'.format(dest_location.course), re.IGNORECASE | re.UNICODE),
'_id.category': 'course',
})
courses = modulestore().collection.find(course_search_location, fields=('_id'))
if courses.count() > 0:
return JsonResponse({
'ErrMsg': _(
'There is already a course defined with the same '
'organization and course number. Please '
'change at least one field to be unique.'),
'OrgErrMsg': _(
'Please change either the organization or '
'course number so that it is unique.'),
'CourseErrMsg': _(
'Please change either the organization or '
'course number so that it is unique.'),
})
# instantiate the CourseDescriptor and then persist it
# note: no system to pass
if display_name is None and course_category is None and course_level is None:
metadata = {}
else:
metadata = {'display_name': display_name, 'course_category': course_category, 'course_level': course_level, 'course_price': course_price}
modulestore('direct').create_and_save_xmodule(
dest_location,
metadata=metadata
)
new_course = modulestore('direct').get_item(dest_location)
# clone a default 'about' overview module as well
dest_about_location = dest_location.replace(
category='about',
name='overview'
)
overview_template = AboutDescriptor.get_template('overview.yaml')
modulestore('direct').create_and_save_xmodule(
dest_about_location,
system=new_course.system,
definition_data=overview_template.get('data')
)
initialize_course_tabs(new_course, request.user)
new_location = loc_mapper().translate_location(new_course.location.course_id, new_course.location, False, True)
# can't use auth.add_users here b/c it requires request.user to already have Instructor perms in this course
# however, we can assume that b/c this user had authority to create the course, the user can add themselves
CourseInstructorRole(new_location).add_users(request.user)
auth.add_users(request.user, CourseStaffRole(new_location), request.user)
# seed the forums
seed_permissions_roles(new_course.location.course_id)
# auto-enroll the course creator in the course so that "View Live" will
# work.
CourseEnrollment.enroll(request.user, new_course.location.course_id)
_users_assign_default_role(new_course.location)
# begin change showanswer to attempted
# it can also add other parameter on Advanced settings
course_location = loc_mapper().translate_locator_to_location(new_location)
course_module = get_modulestore(course_location).get_item(course_location)
data_json = {
"showanswer": "always",
"course_audit": "1"
}
CourseMetadata.update_from_json(course_module, data_json, True, request.user)
# end
return JsonResponse({'url': new_location.url_reverse("course/", "")})
def _users_assign_default_role(course_location):
"""
Assign 'Student' role to all previous users (if any) for this course
"""
enrollments = CourseEnrollment.objects.filter(course_id=course_location.course_id)
for enrollment in enrollments:
assign_default_role(course_location.course_id, enrollment.user)
# pylint: disable=unused-argument
@login_required
@ensure_csrf_cookie
@require_http_methods(["GET"])
def course_info_handler(request, tag=None, package_id=None, branch=None, version_guid=None, block=None):
"""
GET
html: return html for editing the course info handouts and updates.
"""
__, course_module = _get_locator_and_course(
package_id, branch, version_guid, block, request.user
)
if 'text/html' in request.META.get('HTTP_ACCEPT', 'text/html'):
handouts_old_location = course_module.location.replace(category='course_info', name='handouts')
handouts_locator = loc_mapper().translate_location(
course_module.location.course_id, handouts_old_location, False, True
)
update_location = course_module.location.replace(category='course_info', name='updates')
update_locator = loc_mapper().translate_location(
course_module.location.course_id, update_location, False, True
)
return render_to_response(
'course_info.html',
{
'context_course': course_module,
'updates_url': update_locator.url_reverse('course_info_update/'),
'handouts_locator': handouts_locator,
'base_asset_url': StaticContent.get_base_url_path_for_course_assets(course_module.location) + '/'
}
)
else:
return HttpResponseBadRequest("Only supports html requests")
# pylint: disable=unused-argument
@login_required
@ensure_csrf_cookie
@require_http_methods(("GET", "POST", "PUT", "DELETE"))
@expect_json
def course_info_update_handler(request, tag=None, package_id=None, branch=None, version_guid=None, block=None,
provided_id=None):
"""
restful CRUD operations on course_info updates.
provided_id should be none if it's new (create) and index otherwise.
GET
json: return the course info update models
POST
json: create an update
PUT or DELETE
json: change an existing update
"""
if 'application/json' not in request.META.get('HTTP_ACCEPT', 'application/json'):
return HttpResponseBadRequest("Only supports json requests")
course_location = loc_mapper().translate_locator_to_location(
CourseLocator(package_id=package_id), get_course=True
)
updates_location = course_location.replace(category='course_info', name=block)
print request.path
print 'course_location: ' , course_location
print 'updates_location:', updates_location
if provided_id == '':
provided_id = None
# check that logged in user has permissions to this item (GET shouldn't require this level?)
if not has_course_access(request.user, updates_location):
raise PermissionDenied()
if request.method == 'GET':
course_updates = get_course_updates(updates_location, provided_id)
if isinstance(course_updates, dict) and course_updates.get('error'):
return JsonResponse(get_course_updates(updates_location, provided_id), course_updates.get('status', 400))
else:
return JsonResponse(get_course_updates(updates_location, provided_id))
elif request.method == 'DELETE':
try:
return JsonResponse(delete_course_update(updates_location, request.json, provided_id, request.user))
except:
return HttpResponseBadRequest(
"Failed to delete",
content_type="text/plain"
)
# can be either and sometimes django is rewriting one to the other:
elif request.method in ('POST', 'PUT'):
if request.json.get('is_send_mail','false')=='true':
notice_course_update_to_student(request.json,course_location, package_id)
try:
return JsonResponse(update_course_updates(updates_location, request.json, provided_id, request.user))
except:
return HttpResponseBadRequest(
"Failed to save",
content_type="text/plain"
)
def notice_course_update_to_student(json,course_location,package_id):
# 发送邮件给所有的注册学生
queue = Queue.Queue()
course_module = modulestore().get_item(course_location, depth=0)
sub = "课程 [" + course_module.display_name_with_default + '] 更新提醒'
try:
update_content = json['content']
update_content = "<p>感谢您参加人卫慕课"+course_module.display_name_with_default.encode("utf-8")+"课程,目前该门课程有新内容更新,具体如下:</p><p>"+"\n\n"+update_content+"\n\n"+"</p><p>为了保证您的学习进度,请尽快开始学习,"+course_module.display_name_with_default.encode("utf-8")+"课程团队竭诚为您服务。<br/>祝您学习愉快!<br/>"+course_module.display_name_with_default.encode("utf-8")+"课程团队</p>"
student_email_list = analytics.basic.enrolled_students_features(package_id.replace(".", "/"), ['email'])
print student_email_list
student_data_email_list = []
for i in student_email_list:
queue.put(i.values()[0])
for k in range(2):
threadname = 'Thread' + str(k)
send_mail_update(threadname, queue, update_content, sub)
print 'success'
# queue.join()
except:
raise
print 'failure'
@login_required
@ensure_csrf_cookie
@require_http_methods(("GET", "PUT", "POST"))
@expect_json
def settings_handler(request, tag=None, package_id=None, branch=None, version_guid=None, block=None):
"""
Course settings for dates and about pages
GET
html: get the page
json: get the CourseDetails model
PUT
json: update the Course and About xblocks through the CourseDetails model
"""
locator, course_module = _get_locator_and_course(
package_id, branch, version_guid, block, request.user
)
if 'text/html' in request.META.get('HTTP_ACCEPT', '') and request.method == 'GET':
upload_asset_url = locator.url_reverse('assets/')
# see if the ORG of this course can be attributed to a 'Microsite'. In that case, the
# course about page should be editable in Studio
about_page_editable = not microsite.get_value_for_org(
course_module.location.org,
'ENABLE_MKTG_SITE',
settings.FEATURES.get('ENABLE_MKTG_SITE', False)
)
short_description_editable = settings.FEATURES.get('EDITABLE_SHORT_DESCRIPTION', True)
return render_to_response('settings.html', {
'context_course': course_module,
'course_locator': locator,
'lms_link_for_about_page': utils.get_lms_link_for_about_page(course_module.location),
'course_image_url': utils.course_image_url(course_module),
'details_url': locator.url_reverse('/settings/details/'),
'about_page_editable': about_page_editable,
'short_description_editable': short_description_editable,
'upload_asset_url': upload_asset_url
})
elif 'application/json' in request.META.get('HTTP_ACCEPT', ''):
if request.method == 'GET':
return JsonResponse(
CourseDetails.fetch(locator),
# encoder serializes dates, old locations, and instances
encoder=CourseSettingsEncoder
)
else: # post or put, doesn't matter.
return JsonResponse(
CourseDetails.update_from_json(locator, request.json, request.user),
encoder=CourseSettingsEncoder
)
@login_required
@ensure_csrf_cookie
@require_http_methods(("GET", "POST", "PUT", "DELETE"))
@expect_json
def grading_handler(request, tag=None, package_id=None, branch=None, version_guid=None, block=None, grader_index=None):
"""
Course Grading policy configuration
GET
html: get the page
json no grader_index: get the CourseGrading model (graceperiod, cutoffs, and graders)
json w/ grader_index: get the specific grader
PUT
json no grader_index: update the Course through the CourseGrading model
json w/ grader_index: create or update the specific grader (create if index out of range)
"""
locator, course_module = _get_locator_and_course(
package_id, branch, version_guid, block, request.user
)
if 'text/html' in request.META.get('HTTP_ACCEPT', '') and request.method == 'GET':
course_details = CourseGradingModel.fetch(locator)
return render_to_response('settings_graders.html', {
'context_course': course_module,
'course_locator': locator,
'course_details': json.dumps(course_details, cls=CourseSettingsEncoder),
'grading_url': locator.url_reverse('/settings/grading/'),
})
elif 'application/json' in request.META.get('HTTP_ACCEPT', ''):
if request.method == 'GET':
if grader_index is None:
return JsonResponse(
CourseGradingModel.fetch(locator),
# encoder serializes dates, old locations, and instances
encoder=CourseSettingsEncoder
)
else:
return JsonResponse(CourseGradingModel.fetch_grader(locator, grader_index))
elif request.method in ('POST', 'PUT'): # post or put, doesn't matter.
# None implies update the whole model (cutoffs, graceperiod, and graders) not a specific grader
if grader_index is None:
return JsonResponse(
CourseGradingModel.update_from_json(locator, request.json, request.user),
encoder=CourseSettingsEncoder
)
else:
return JsonResponse(
CourseGradingModel.update_grader_from_json(locator, request.json, request.user)
)
elif request.method == "DELETE" and grader_index is not None:
CourseGradingModel.delete_grader(locator, grader_index, request.user)
return JsonResponse()
# pylint: disable=invalid-name
def _config_course_advanced_components(request, course_module):
"""
Check to see if the user instantiated any advanced components. This
is a hack that does the following :
1) adds/removes the open ended panel tab to a course automatically
if the user has indicated that they want to edit the
combinedopendended or peergrading module
2) adds/removes the notes panel tab to a course automatically if
the user has indicated that they want the notes module enabled in
their course
"""
# TODO refactor the above into distinct advanced policy settings
filter_tabs = True # Exceptional conditions will pull this to False
if ADVANCED_COMPONENT_POLICY_KEY in request.json: # Maps tab types to components
tab_component_map = {
'open_ended': OPEN_ENDED_COMPONENT_TYPES,
'notes': NOTE_COMPONENT_TYPES,
}
# Check to see if the user instantiated any notes or open ended
# components
for tab_type in tab_component_map.keys():
component_types = tab_component_map.get(tab_type)
found_ac_type = False
for ac_type in component_types:
if ac_type in request.json[ADVANCED_COMPONENT_POLICY_KEY]:
# Add tab to the course if needed
changed, new_tabs = add_extra_panel_tab(tab_type, course_module)
# If a tab has been added to the course, then send the
# metadata along to CourseMetadata.update_from_json
if changed:
course_module.tabs = new_tabs
request.json.update({'tabs': new_tabs})
# Indicate that tabs should not be filtered out of
# the metadata
filter_tabs = False # Set this flag to avoid the tab removal code below.
found_ac_type = True #break
# If we did not find a module type in the advanced settings,
# we may need to remove the tab from the course.
if not found_ac_type: # Remove tab from the course if needed
changed, new_tabs = remove_extra_panel_tab(tab_type, course_module)
if changed:
course_module.tabs = new_tabs
request.json.update({'tabs':new_tabs})
# Indicate that tabs should *not* be filtered out of
# the metadata
filter_tabs = False
return filter_tabs
@login_required
@ensure_csrf_cookie
@require_http_methods(("GET", "POST", "PUT"))
@expect_json
def calendar_common(request, course_id):
"""
Course settings configuration
GET
html: get the page
json: get the model
PUT, POST
json: update the Course's settings. The payload is a json rep of the
metadata dicts. The dict can include a "unsetKeys" entry which is a list
of keys whose values to unset: i.e., revert to default
"""
return render_to_response('calendar_common.html', { })
@login_required
@ensure_csrf_cookie
@require_http_methods(("GET", "POST", "PUT"))
@expect_json
def calendar_settings_handler(request, package_id=None, branch=None, version_guid=None, block=None, tag=None):
locator, course_module = _get_locator_and_course(
package_id, branch, version_guid, block, request.user
)
if 'text/html' in request.META.get('HTTP_ACCEPT', '') and request.method == 'GET':
return render_to_response('settings_calendar.html', {
'package_id': package_id,
'context_course': course_module,
'advanced_dict': json.dumps(CourseMetadata.fetch(course_module)),
'advanced_settings_url': locator.url_reverse('settings/calendar')
})
elif 'application/json' in request.META.get('HTTP_ACCEPT', ''):
if request.method == 'GET':
return JsonResponse(CourseMetadata.fetch(course_module))
else:
# Whether or not to filter the tabs key out of the settings metadata
filter_tabs = _config_course_advanced_components(request, course_module)
try:
return JsonResponse(CourseMetadata.update_from_json(
course_module,
request.json,
filter_tabs=filter_tabs,
user=request.user,
))
except (TypeError, ValueError) as err:
return HttpResponseBadRequest(
"Incorrect setting format. {}".format(err),
content_type="text/plain"
)
@login_required
@ensure_csrf_cookie
@require_http_methods(("GET", "POST", "PUT"))
@expect_json
def calendar_common_addevent(request,course_id):
return JsonResponse(modulestore("course_calendar").save_event(course_id,{"title":request.GET.get("title"),"start":request.GET.get("start"),"end":request.GET.get("end")}))
@login_required
@ensure_csrf_cookie
@require_http_methods(("GET", "POST", "PUT"))
@expect_json
def calendar_common_delevent(request,course_id):
print request.GET.get("title")
print request.GET.get("start")
print request.GET.get("end")
return modulestore("course_calendar")._get_cals()
@login_required
@ensure_csrf_cookie
@require_http_methods(("GET", "POST", "PUT"))
@expect_json
def calendar_common_updateevent(request,course_id):
event_id = request.GET.get("id")
start = request.GET.get("start")
end = request.GET.get("end")
title = request.GET.get("title")
modulestore("course_calendar").update_event(course_id,event_id,{"title":title, "start": start, "end": end})
return JsonResponse({"success":1})
@login_required
@ensure_csrf_cookie
@require_http_methods(("GET", "POST", "PUT"))
@expect_json
def calendar_settings_getevents(request, course_id):
events_json = []
for event in modulestore("course_calendar").range_events(course_id,request.GET.get("start"),request.GET.get("end")):
events_json.append({"id":event["id"],"title":event["calendar"]["title"],"start":event["calendar"]["start"],"end":event["calendar"]["end"]})
return JsonResponse(events_json)
@login_required
@ensure_csrf_cookie
@require_http_methods(("GET", "POST", "PUT"))
@expect_json
def calendar_common_deleteevent(request, course_id):
events_json = []
modulestore("course_calendar").delete_event(request.GET.get("delete_id"))
return JsonResponse({"success":1})
@login_required
@ensure_csrf_cookie
@require_http_methods(("GET", "POST", "PUT"))
@expect_json
def advanced_settings_handler(request, package_id=None, branch=None, version_guid=None, block=None, tag=None):
"""
Course settings configuration
GET
html: get the page
json: get the model
PUT, POST
json: update the Course's settings. The payload is a json rep of the
metadata dicts. The dict can include a "unsetKeys" entry which is a list
of keys whose values to unset: i.e., revert to default
"""
locator, course_module = _get_locator_and_course(
package_id, branch, version_guid, block, request.user
)
if 'text/html' in request.META.get('HTTP_ACCEPT', '') and request.method == 'GET':
return render_to_response('settings_advanced.html', {
'context_course': course_module,
'advanced_dict': json.dumps(CourseMetadata.fetch(course_module)),
'advanced_settings_url': locator.url_reverse('settings/advanced')
})
elif 'application/json' in request.META.get('HTTP_ACCEPT', ''):
if request.method == 'GET':
return JsonResponse(CourseMetadata.fetch(course_module))
else:
# Whether or not to filter the tabs key out of the settings metadata
filter_tabs = _config_course_advanced_components(request, course_module)
try:
return JsonResponse(CourseMetadata.update_from_json(
course_module,
request.json,
filter_tabs=filter_tabs,
user=request.user,
))
except (TypeError, ValueError) as err:
return HttpResponseBadRequest(
"Incorrect setting format. {}".format(err),
content_type="text/plain"
)
class TextbookValidationError(Exception):
"An error thrown when a textbook input is invalid"
pass
def validate_textbooks_json(text):
"""
Validate the given text as representing a single PDF textbook
"""
try:
textbooks = json.loads(text)
except ValueError:
raise TextbookValidationError("invalid JSON")
if not isinstance(textbooks, (list, tuple)):
raise TextbookValidationError("must be JSON list")
for textbook in textbooks:
validate_textbook_json(textbook)
# check specified IDs for uniqueness
all_ids = [textbook["id"] for textbook in textbooks if "id" in textbook]
unique_ids = set(all_ids)
if len(all_ids) > len(unique_ids):
raise TextbookValidationError("IDs must be unique")
return textbooks
def validate_textbook_json(textbook):
"""
Validate the given text as representing a list of PDF textbooks
"""
if isinstance(textbook, basestring):
try:
textbook = json.loads(textbook)
except ValueError:
raise TextbookValidationError("invalid JSON")
if not isinstance(textbook, dict):
raise TextbookValidationError("must be JSON object")
if not textbook.get("tab_title"):
raise TextbookValidationError("must have tab_title")
tid = unicode(textbook.get("id", ""))
if tid and not tid[0].isdigit():
raise TextbookValidationError("textbook ID must start with a digit")
return textbook
def assign_textbook_id(textbook, used_ids=()):
"""
Return an ID that can be assigned to a textbook
and doesn't match the used_ids
"""
tid = Location.clean(textbook["tab_title"])
if not tid[0].isdigit():
# stick a random digit in front
tid = random.choice(string.digits) + tid
while tid in used_ids:
# add a random ASCII character to the end
tid = tid + random.choice(string.ascii_lowercase)
return tid
@require_http_methods(("GET", "POST", "PUT"))
@login_required
@ensure_csrf_cookie
def textbooks_list_handler(request, tag=None, package_id=None, branch=None, version_guid=None, block=None):
"""
A RESTful handler for textbook collections.
GET
html: return textbook list page (Backbone application)
json: return JSON representation of all textbooks in this course
POST
json: create a new textbook for this course
PUT
json: overwrite all textbooks in the course with the given list
"""
locator, course = _get_locator_and_course(
package_id, branch, version_guid, block, request.user
)
store = get_modulestore(course.location)
if not "application/json" in request.META.get('HTTP_ACCEPT', 'text/html'):
# return HTML page
upload_asset_url = locator.url_reverse('assets/', '')
textbook_url = locator.url_reverse('/textbooks')
return render_to_response('textbooks.html', {
'context_course': course,
'textbooks': course.pdf_textbooks,
'upload_asset_url': upload_asset_url,
'textbook_url': textbook_url,
})
# from here on down, we know the client has requested JSON
if request.method == 'GET':
return JsonResponse(course.pdf_textbooks)
elif request.method == 'PUT':
try:
textbooks = validate_textbooks_json(request.body)
except TextbookValidationError as err:
return JsonResponse({"error": err.message}, status=400)
tids = set(t["id"] for t in textbooks if "id" in t)
for textbook in textbooks:
if not "id" in textbook:
tid = assign_textbook_id(textbook, tids)
textbook["id"] = tid
tids.add(tid)
if not any(tab['type'] == 'pdf_textbooks' for tab in course.tabs):
course.tabs.append({"type": "pdf_textbooks"})
course.pdf_textbooks = textbooks
store.update_item(course, request.user.id)
return JsonResponse(course.pdf_textbooks)
elif request.method == 'POST':
# create a new textbook for the course
try:
textbook = validate_textbook_json(request.body)
except TextbookValidationError as err:
return JsonResponse({"error": err.message}, status=400)
if not textbook.get("id"):
tids = set(t["id"] for t in course.pdf_textbooks if "id" in t)
textbook["id"] = assign_textbook_id(textbook, tids)
existing = course.pdf_textbooks
existing.append(textbook)
course.pdf_textbooks = existing
if not any(tab['type'] == 'pdf_textbooks' for tab in course.tabs):
tabs = course.tabs
tabs.append({"type": "pdf_textbooks"})
course.tabs = tabs
store.update_item(course, request.user.id)
resp = JsonResponse(textbook, status=201)
resp["Location"] = locator.url_reverse('textbooks', textbook["id"]).encode("utf-8")
return resp
@login_required
@ensure_csrf_cookie
@require_http_methods(("GET", "POST", "PUT", "DELETE"))
def textbooks_detail_handler(request, tid, tag=None, package_id=None, branch=None, version_guid=None, block=None):
"""
JSON API endpoint for manipulating a textbook via its internal ID.
Used by the Backbone application.
GET
json: return JSON representation of textbook
POST or PUT
json: update textbook based on provided information
DELETE
json: remove textbook
"""
__, course = _get_locator_and_course(
package_id, branch, version_guid, block, request.user
)
store = get_modulestore(course.location)
matching_id = [tb for tb in course.pdf_textbooks
if unicode(tb.get("id")) == unicode(tid)]
if matching_id:
textbook = matching_id[0]
else:
textbook = None
if request.method == 'GET':
if not textbook:
return JsonResponse(status=404)
return JsonResponse(textbook)
elif request.method in ('POST', 'PUT'): # can be either and sometimes
# django is rewriting one to the other
try:
new_textbook = validate_textbook_json(request.body)
except TextbookValidationError as err:
return JsonResponse({"error": err.message}, status=400)
new_textbook["id"] = tid
if textbook:
i = course.pdf_textbooks.index(textbook)
new_textbooks = course.pdf_textbooks[0:i]
new_textbooks.append(new_textbook)
new_textbooks.extend(course.pdf_textbooks[i + 1:])
course.pdf_textbooks = new_textbooks
else:
course.pdf_textbooks.append(new_textbook)
store.update_item(course, request.user.id)
return JsonResponse(new_textbook, status=201)
elif request.method == 'DELETE':
if not textbook:
return JsonResponse(status=404)
i = course.pdf_textbooks.index(textbook)
new_textbooks = course.pdf_textbooks[0:i]
new_textbooks.extend(course.pdf_textbooks[i + 1:])
course.pdf_textbooks = new_textbooks
store.update_item(course, request.user.id)
return JsonResponse()
def _get_course_creator_status(user):
"""
Helper method for returning the course creator status for a particular user,
taking into account the values of DISABLE_COURSE_CREATION and ENABLE_CREATOR_GROUP.
If the user passed in has not previously visited the index page, it will be
added with status 'unrequested' if the course creator group is in use.
"""
if user.is_staff:
course_creator_status = 'granted'
elif settings.FEATURES.get('DISABLE_COURSE_CREATION', False):
course_creator_status = 'disallowed_for_this_site'
elif settings.FEATURES.get('ENABLE_CREATOR_GROUP', False):
course_creator_status = get_course_creator_status(user)
if course_creator_status is None:
# User not grandfathered in as an existing user, has not previously visited the dashboard page.
# Add the user to the course creator admin table with status 'unrequested'.
add_user_with_status_unrequested(user)
course_creator_status = get_course_creator_status(user)
else:
course_creator_status = 'granted'
return course_creator_status
@csrf_exempt
def course_audit_api(request, course_id, operation):
re_json = {"success": False}
request_method = request.method
if request_method != "POST":
return JsonResponse(re_json)
# get course location and module infomation
try:
course_location_info = course_id.split('.')
locator = BlockUsageLocator(package_id=course_id, branch='draft', version_guid=None, block_id=course_location_info[-1])
course_location = loc_mapper().translate_locator_to_location(locator)
course_module = get_modulestore(course_location).get_item(course_location)
instructors = CourseInstructorRole(locator).users_with_role()
if len(instructors) <= 0:
return JsonResponse(re_json)
user = instructors[0]
meta_json = {}
if operation == "pass":
meta_json["course_audit"] = 1
elif operation == "offline":
meta_json["course_audit"] = 0
else:
return JsonResponse(re_json)
re_json["success"] = True
CourseMetadata.update_from_json(course_module, meta_json, True, user)
return JsonResponse(re_json)
except:
return JsonResponse(re_json)
@csrf_exempt
def institution_upload_teacher(request):
messg=''
if request.method == 'POST':
use_id = request.GET['id']
form = UploadFileForm(request.POST, request.FILES)
if form.is_valid():
filename = form.cleaned_data['file']
filename_suffix = filename.name.split('.')[-1]
if filename_suffix == 'xls' or filename_suffix == 'xlsx':
f = handle_uploaded_file(filename)
os.chmod(f, 0o777)
xls_insert_into_db(request, f, use_id)
messg = '教师导入成功'
else:
messg = '上传文件要为excel格式'
else:
form = UploadFileForm()
return JsonResponse({'messg': messg})
def handle_uploaded_file(f):
f_path = PROJECT_ROOT + '/static/upload/'+f.name.encode('utf8')
with open(f_path.encode('utf8'), 'wb+') as info:
for chunk in f.chunks():
info.write(chunk)
return f_path.encode('utf8')
def xls_insert_into_db(request, xlsfile, instutition_id):
wb = xlrd.open_workbook(xlsfile)
sh = wb.sheet_by_index(0)
rows = sh.nrows
def as_display_string(cell):
if cell.ctype in (2,3):
cell_value = int(cell.value)
else:
cell_value = cell.value
return str(cell_value).strip()
for i in range(1, rows):
username = sh.cell(i, 2).value
email = sh.cell(i, 0).value
password = as_display_string(sh.cell(i, 1))
name = sh.cell(i, 3).value
post_vars = {
'username': username,
'email': email,
'password': password,
'name': name
}
do_institution_import_teacher_create_account(post_vars, instutition_id)
return HttpResponseRedirect('/course')
def remove_institute_teacher(request):
institute_id = request.GET['id']
profile_user = UserProfile.objects.get(user_id=institute_id)
profile_user.institute = None
profile_user.save()
return JsonResponse('/course')
@login_required
@ensure_csrf_cookie
def teacher_intro_edit(request, id):
if request.user.id !=int(id):
raise Http404
if request.method == 'POST':
picurl = request.POST.get('picurl', '').strip()
shortbio = request.POST.get('shortbio', '')
profile = UserProfile.objects.get(user_id=id)
if picurl:
if not picurl.startswith('http://'):
picurl = 'http://' + picurl
profile.picurl = picurl
profile.shortbio = shortbio
profile.save()
else:
profile = UserProfile.objects.get(user_id=id)
if not profile.shortbio:
profile.shortbio = ""
return render_to_response('teacher_intro_edit.html', {'profile':profile})
# import_student
@csrf_exempt
def import_student(request):
messg=''
if request.method == 'POST':
use_id = request.GET['id']
print use_id
form = UploadFileForm(request.POST, request.FILES)
if form.is_valid():
filename = form.cleaned_data['file']
filename_suffix = filename.name.split('.')[-1]
if filename_suffix == 'xls' or filename_suffix == 'xlsx':
f = handle_uploaded_file(filename)
os.chmod(f, 0o777)
xls_student_insert_into_db(request, f, use_id)
messg = '学生导入成功'
else:
messg = '导入文件要为excel格式'
else:
form = UploadFileForm()
return JsonResponse({'messg': messg})
def xls_student_insert_into_db(request, xlsfile, instutition_id):
wb = xlrd.open_workbook(xlsfile)
sh = wb.sheet_by_index(0)
rows = sh.nrows
def as_display_string(cell):
if cell.ctype in (2,3):
cell_value = int(cell.value)
else:
cell_value = cell.value
return str(cell_value).strip()
for i in range(1, rows):
username = sh.cell(i, 2).value
email = sh.cell(i, 0).value
password = as_display_string(sh.cell(i, 1))
name = sh.cell(i, 3).value
post_vars = {
'username': username,
'email': email,
'password': password,
'name': name
}
if len(User.objects.filter(username=post_vars['username'])) > 0:
post_vars['username'] = post_vars['username'] + str(random.randint(0,10000))
do_institution_import_student_create_account(post_vars, instutition_id)
return HttpResponseRedirect('/course')
|
agpl-3.0
| 6,436,884,706,447,725,000
| 38.430714
| 338
| 0.633716
| false
| 3.935762
| false
| false
| false
|
cuoretech/dowork
|
dowork/Model/Comment.py
|
1
|
5452
|
from database_config import *
from py2neo import neo4j, node
import json
# Class : Blog
# Methods:
# 1) db_init(self) - Private
# 2) getNode(self) - Returns the Comment Node
# 3) getName(self) - Returns name of Comment
# 4) setDescription(self, description) - Takes description as a string
# 5) getDescription(self) - Returns description
# 6) setContent(self, content) - Takes content in as a string
# 7) getContent(self) - Returns content as a string
# 8) setTime(self, time) - Set the time of when the post was created (in millis)
# 9) getTime(self) - Gets the time in millis
# 12) setOwner(self, owner) - owner is a User node, Owner.getNode()
# 13) getOwner(self) - Returns a User Node
# Constants:
class Comment:
graph_db = None
commentInstance = None
def db_init(self):
if self.graph_db is None:
self.graph_db = neo4j.GraphDatabaseService(db_config['uri'])
#
# Function : getNode
# Arguments :
# Returns : instance Node
#
def getNode(self):
return self.commentInstance
#
# Function : Constructor
# Arguments : Uri of Existing Blog Node OR Name of Blog
#
def __init__(self, URI=None, Name=None, Content=None, Owner=None, Parent=None):
global LBL_COMMENT
self.db_init()
temp = None
if URI is not None:
temp = neo4j.Node(URI)
elif Name is not None:
temp, = self.graph_db.create({"name": Name})
temp.add_labels(LBL_COMMENT)
else:
raise Exception("Name or URI not specified")
self.commentInstance = temp
if Content is not None:
self.commentInstance["content"] = Content
if Owner is not None:
global REL_CREATEDBY, LBL_USER
if LBL_USER in Owner.get_labels():
self.commentInstance.get_or_create_path(REL_CREATEDBY, Owner)
else:
raise Exception("The Node Provided is not a User")
if Parent is not None:
global REL_HASCOMMENT, LBL_TASK, LBL_POST, LBL_EVENT
if (LBL_TASK in Parent.get_labels()) or (LBL_POST in Parent.get_labels()):
Parent.get_or_create_path(REL_HASCOMMENT, self.commentInstance)
#
# Function : getName
# Arguments :
# Returns : name of blog
#
def getName(self):
if self.commentInstance is not None:
return self.commentInstance["name"]
else:
return None
#
# Function : setDescription
# Arguments : (String) description
#
def setDescription(self, description):
self.commentInstance["description"] = description
#
# Function : getDescription
# Arguments :
# Returns : (String) description
#
def getDescription(self):
return self.commentInstance["description"]
#
# Function : setContent
# Arguments : String content
# Returns :
#
def setContent(self, content):
self.commentInstance["content"] = content
#
# Function : getContent
# Arguments :
# Returns : (String) content
#
def getContent(self):
return self.commentInstance["content"]
#
# Function : setTime
# Arguments : String time (in milliseconds)
# Returns :
#
def setTime(self, time):
self.commentInstance["time"] = time
#
# Function : getTime
# Arguments :
# Returns : (String) time
#
def getTime(self):
return self.commentInstance["time"]
#
# Function : setOwner
# Arguments : (User Node) owner
# Returns : a 'Path' object containing nodes and relationships used
#
def setOwner(self, owner):
global HAS_OWNER, LBL_USER
if LBL_USER in owner.get_labels():
return self.commentInstance.get_or_create_path(REL_HASOWNER, owner)
else:
raise Exception("The Node Provided is not a User")
#
# Function : getOwner
# Arguments :
# Returns : a Owner Node or None (if there is no node)
#
def getOwner(self):
global REL_HASOWNER
relationships = list(self.commentInstance.match_outgoing(REL_HASOWNER))
if len(relationships) != 0:
return relationships[0].end_node
else:
return None
#
# Function : setParent
# Arguments : (Task or Post or Comment Node) parent
# Returns : a 'Path' object containing nodes and relationships used
#
def setParent(self, parent):
global REL_HASCOMMENT, LBL_POST, LBL_TASK, LBL_COMMENT
if (LBL_POST in parent.get_labels()) \
or (LBL_TASK in parent.get_labels()) \
or (LBL_COMMENT in parent.get_labels()):
return parent.get_or_create_path(REL_HASCOMMENT, self.commentInstance)
else:
raise Exception("The Node Provided is not a Post or Task")
#
# Function : getParent
# Arguments :
# Returns : a Parent Node or None (if there is no node)
#
def getParent(self):
global REL_HASCOMMENT
relationships = list(self.commentInstance.match_incoming(REL_HASCOMMENT))
if len(relationships) != 0:
return relationships[0].start_node
else:
return None
|
apache-2.0
| 5,440,216,274,128,202,000
| 29.458101
| 111
| 0.586574
| false
| 3.850282
| false
| false
| false
|
stormrose-va/xobox
|
xobox/utils/loader.py
|
1
|
3215
|
# -*- coding: utf-8 -*-
"""
xobox.utils.loader
~~~~~~~~~~~~~~~~~~
:copyright: Copyright 2017 by the Stormrose Project team, see AUTHORS.
:license: MIT License, see LICENSE for details.
"""
import importlib
import os
from xobox.utils import filters
def detect_class_modules(mod, parent=object):
"""
Detect available class modules or packages and return a dictionary of valid
class names, referring to the module they are contained within.
:param str mod: the module or package to be scanned for classes
:param parent: the class potential candidates must be derived off
:returns: dictionary of detected classes, mapping the class name to the module name in
which the class has been detected
"""
# initialise result dictionary
result = {}
candidates = []
# get a list of all files and directories inside the module
try:
package_instance = importlib.import_module(mod)
except ImportError:
return result
pkg_file = os.path.splitext(package_instance.__file__)
if pkg_file[0][-8:] == '__init__' and pkg_file[1][1:3] == 'py':
# it's a package, so we have to look for modules
gen_dir = os.listdir(os.path.dirname(os.path.realpath(package_instance.__file__)))
# only consider modules and packages, and exclude the base module
for file_candidate in filter(filters.modules, gen_dir):
# Python files are modules; the name needs to be without file ending
if file_candidate[-3:] == '.py':
file_candidate = file_candidate[:-3]
# try if the detected package or module can be imported
try:
class_module_candidate = importlib.import_module('.'.join([mod, file_candidate]))
except ImportError:
class_module_candidate = None
# if the module or module could be imported, append it to the list of candidate modules.
if class_module_candidate:
candidates.append(class_module_candidate)
else:
candidates.append(package_instance)
# test if any of the candidates contain
# classes derived from the parent class
for candidate in candidates:
for member_candidate in filter(filters.members, dir(candidate)):
try:
if issubclass(getattr(candidate, member_candidate), parent) \
and getattr(candidate, member_candidate).__name__ != parent.__name__:
result[member_candidate] = candidate.__name__
except TypeError:
pass
# return the dictionary
return result
def load_member(mod, member):
"""
Load a member (function, class, ...) from a module and return it
:param str mod: the module or package name where the class should be loaded from
:param str member: the name of the member to be loaded
:returns: reference to the loaded member (i. e. class or function pointer)
"""
try:
mod = importlib.import_module(mod)
except ImportError:
return None
try:
result = getattr(mod, member)
except AttributeError:
return None
return result
|
mit
| -4,367,746,705,474,805,000
| 33.945652
| 100
| 0.63297
| false
| 4.606017
| false
| false
| false
|
hideaki-t/sqlite-fts-python
|
tests/test_many.py
|
1
|
3058
|
from __future__ import print_function, unicode_literals
import sqlite3
import os
import tempfile
from faker import Factory
import pytest
import sqlitefts as fts
from sqlitefts import fts5
igo = pytest.importorskip('igo')
fake = Factory.create('ja_JP')
class IgoTokenizer(fts.Tokenizer):
def __init__(self, path=None):
self.tagger = igo.tagger.Tagger(path)
def tokenize(self, text):
for m in self.tagger.parse(text):
start = len(text[:m.start].encode('utf-8'))
yield m.surface, start, start + len(m.surface.encode('utf-8'))
class IgoTokenizer5(fts5.FTS5Tokenizer):
def __init__(self, path=None):
self.tagger = igo.tagger.Tagger(path)
def tokenize(self, text, flags=None):
for m in self.tagger.parse(text):
start = len(text[:m.start].encode('utf-8'))
yield m.surface, start, start + len(m.surface.encode('utf-8'))
@pytest.fixture
def conn():
f, db = tempfile.mkstemp()
try:
os.close(f)
c = sqlite3.connect(db)
create_table(c)
yield c
c.close()
finally:
os.remove(db)
@pytest.fixture
def nr():
return 10000
def create_table(c):
fts.register_tokenizer(c, 'igo', fts.make_tokenizer_module(IgoTokenizer()))
fts5.register_tokenizer(c, 'igo',
fts5.make_fts5_tokenizer(IgoTokenizer5()))
c.execute("CREATE VIRTUAL TABLE fts USING FTS4(tokenize=igo)")
c.execute("CREATE VIRTUAL TABLE fts5 USING FTS5(w, tokenize=igo)")
def test_insert_many_each(conn, nr):
with conn:
for i in range(nr):
conn.execute('INSERT INTO fts VALUES(?)', [fake.address()])
conn.execute('INSERT INTO fts5 VALUES(?)', [fake.address()])
assert conn.execute("SELECT COUNT(*) FROM fts").fetchall()[0][0] == nr
assert conn.execute("SELECT COUNT(*) FROM fts5").fetchall()[0][0] == nr
def test_insert_many_many(conn, nr):
with conn:
conn.executemany('INSERT INTO fts VALUES(?)', ([fake.address()]
for _ in range(nr)))
conn.executemany('INSERT INTO fts5 VALUES(?)', ([fake.address()]
for _ in range(nr)))
assert conn.execute("SELECT COUNT(*) FROM fts").fetchall()[0][0] == nr
assert conn.execute("SELECT COUNT(*) FROM fts5").fetchall()[0][0] == nr
def test_insert_many_use_select(conn, nr):
with conn:
conn.executemany('INSERT INTO fts VALUES(?)', ([fake.address()]
for _ in range(nr)))
conn.executemany('INSERT INTO fts5 VALUES(?)', ([fake.address()]
for _ in range(nr)))
with conn:
conn.execute('INSERT INTO fts SELECT * FROM fts')
conn.execute('INSERT INTO fts5 SELECT * FROM fts5')
assert conn.execute("SELECT COUNT(*) FROM fts").fetchall()[0][0] == nr * 2
assert conn.execute("SELECT COUNT(*) FROM fts5").fetchall()[0][0] == nr * 2
|
mit
| 4,999,511,117,820,486,000
| 32.604396
| 79
| 0.581099
| false
| 3.589202
| true
| false
| false
|
kooksee/TIOT
|
test/project/src/app/proto/protocol/LightProtocol.py
|
1
|
3560
|
# encoding=utf-8
import binascii
import json
from twisted.internet.protocol import Protocol
class LightProtocol(Protocol):
def __init__(self):
self.ip = ''
self.port = ''
def connectionMade(self):
# import socket
#self.transport.socket._sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
#在客户端连接上那一瞬间就会触发服务器,然后服务器开始循环发送数据
self.ip = str(self.transport.client[0])
self.port = str(self.transport.client[1])
self.factory.numProtocols += 1
print 'conn build From ip:' + self.ip + ' port:' + self.port
print 'current conn num is ' + str(self.factory.numProtocols) + "\n"
self.divName = self.ip + ":" + self.port + "##" + self.__class__.__name__
self.factory.controller.add_client(self.divName, self.transport)
# import threading
# timer = threading.Timer(0, self.dataReceived, [""])
# timer.start()
return
def connectionLost(self, reason):
print 'conn lost reason --> ' + str(reason)
self.factory.numProtocols -= 1
print 'conn lost. ip:' + self.ip + ' port:' + self.port
print 'current conn num is ' + str(self.factory.numProtocols) + "\n"
self.factory.controller.del_client(self.divName)
return
def dataReceived(self, data):
# print 'recv data from ip:' + self.ip + ' port:' + self.port + ' data:' + "\n" + data
kdiv = self.factory.controller.online_session
# data = str(data)
data_hex = ''
data_hex1 = ''
if data == '1':
data_hex = ' 7e 00 16 10 00 00 7d 33 a2 00 40 71 54 0a ff fe 00 00 01 00 00 01 00 00 00 00 2c'
data_hex = str(bytearray.fromhex(data_hex))#无
data_hex1 = '7e 00 16 10 00 00 7d 33 a2 00 40 71 53 bc ff fe 00 00 01 00 00 01 00 00 00 00 7b'
data_hex1 = str(bytearray.fromhex(data_hex1))#风扇
print data_hex
elif data == '2':
data_hex = ' 7e 00 16 10 00 00 7d 33 a2 00 40 71 54 0a ff fe 00 00 01 00 00 02 00 00 00 00 2b'
data_hex = str(bytearray.fromhex(data_hex))#灯
print data_hex
data_hex1 = '7e 00 16 10 00 00 7d 33 a2 00 40 71 53 bc ff fe 00 00 01 00 00 02 00 00 00 00 7a'
data_hex1 = str(bytearray.fromhex(data_hex1))#灯
elif data == '3':
data_hex = ' 7e 00 16 10 00 00 7d 33 a2 00 40 71 54 0a ff fe 00 00 01 00 00 03 00 00 00 00 2a'
data_hex = str(bytearray.fromhex(data_hex))
print data_hex
data_hex1 = '7e 00 16 10 00 00 7d 33 a2 00 40 71 53 bc ff fe 00 00 01 00 00 03 00 00 00 00 79'
data_hex1 = str(bytearray.fromhex(data_hex1))
elif data == '0':
data_hex = ' 7e 00 16 10 00 00 7d 33 a2 00 40 71 54 0a ff fe 00 00 01 00 00 00 00 00 00 00 2d'
data_hex = str(bytearray.fromhex(data_hex))
print data_hex
data_hex1 = '7e 00 16 10 00 00 7d 33 a2 00 40 71 53 bc ff fe 00 00 01 00 00 00 00 00 00 00 7c'
data_hex1 = str(bytearray.fromhex(data_hex1))
for div in kdiv:
if div == self.divName:
print "设备" + div + "正在把数据-->"
for div in kdiv:
# print div.split("##")[-1]," ",self.__class__.__name__
if div.split("##")[-1] == self.__class__.__name__:
kdiv[div].write(data_hex)
kdiv[div].write(data_hex1)
print div
print "传递给:" + div
print "\n"
return
|
gpl-2.0
| -6,347,997,613,682,692,000
| 35.125
| 106
| 0.563437
| false
| 3.118705
| false
| false
| false
|
manuvarkey/cmbautomiser
|
cmbautomiser/openpyxl/descriptors/excel.py
|
1
|
2252
|
from __future__ import absolute_import
#copyright openpyxl 2010-2015
"""
Excel specific descriptors
"""
from openpyxl.xml.constants import REL_NS
from openpyxl.compat import safe_string
from openpyxl.xml.functions import Element
from . import (
MatchPattern,
MinMax,
Integer,
String,
Typed,
Sequence,
)
from .serialisable import Serialisable
from openpyxl.utils.cell import RANGE_EXPR
class HexBinary(MatchPattern):
pattern = "[0-9a-fA-F]+$"
class UniversalMeasure(MatchPattern):
pattern = r"[0-9]+(\.[0-9]+)?(mm|cm|in|pt|pc|pi)"
class TextPoint(MinMax):
"""
Size in hundredths of points.
In theory other units of measurement can be used but these are unbounded
"""
expected_type = int
min = -400000
max = 400000
Coordinate = Integer
class Percentage(MatchPattern):
pattern = r"((100)|([0-9][0-9]?))(\.[0-9][0-9]?)?%"
class Extension(Serialisable):
uri = String()
def __init__(self,
uri=None,
):
self.uri = uri
class ExtensionList(Serialisable):
ext = Sequence(expected_type=Extension)
def __init__(self,
ext=(),
):
self.ext = ext
class Relation(String):
namespace = REL_NS
allow_none = True
class Base64Binary(MatchPattern):
# http://www.w3.org/TR/xmlschema11-2/#nt-Base64Binary
pattern = "^(?:[A-Za-z0-9+/]{4})*(?:[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=|[A-Za-z0-9+/]{4})$"
class Guid(MatchPattern):
# https://msdn.microsoft.com/en-us/library/dd946381(v=office.12).aspx
pattern = r"{[0-9A-F]{8}-[0-9A-F]{4}-[0-9A-F]{4}-[0-9A-F]{4}-[0-9A-F]{12}\}"
class CellRange(MatchPattern):
pattern = r"^[$]?([A-Za-z]{1,3})[$]?(\d+)(:[$]?([A-Za-z]{1,3})[$]?(\d+)?)?$|^[A-Za-z]{1,3}:[A-Za-z]{1,3}$"
allow_none = True
def __set__(self, instance, value):
if value is not None:
value = value.upper()
super(CellRange, self).__set__(instance, value)
def _explicit_none(tagname, value, namespace=None):
"""
Override serialisation because explicit none required
"""
if namespace is not None:
tagname = "{%s}%s" % (namespace, tagname)
return Element(tagname, val=safe_string(value))
|
gpl-3.0
| -4,408,916,376,172,166,000
| 20.245283
| 110
| 0.596803
| false
| 3.010695
| false
| false
| false
|
abingham/yapga
|
setup.py
|
1
|
1127
|
import distribute_setup
distribute_setup.use_setuptools()
from setuptools import setup, find_packages
setup(
name='yapga',
version='1',
packages=find_packages(),
# metadata for upload to PyPI
author='Austin Bingham',
author_email='austin.bingham@gmail.com',
description="Yet Another Python Gerrit API",
license='MIT',
keywords='gerrit',
url='http://github.com/abingham/yapga',
# download_url = '',
long_description='An API for working with Gerrit '
'from Python via the REST API.',
zip_safe=True,
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
],
platforms='any',
setup_requires=[],
install_requires=[
'baker',
'matplotlib',
# 'nltk', <-- This doesn't work right now for python3.
'numpy',
],
entry_points={
'console_scripts': [
'yapga = yapga.app.main:main',
],
},
)
|
mit
| 5,641,950,641,626,381,000
| 25.209302
| 63
| 0.583851
| false
| 3.954386
| false
| false
| false
|
conradoplg/navi
|
libnavi/model/note.py
|
1
|
1196
|
from pubsub import pub
class Note(object):
def __init__(self, name, path):
self.name = name
self.path = path
def open(self, create=False):
self.text = u''
if self.path:
try:
with self.path.open('r') as f:
#TODO: detect encoding
self.text = f.read().decode('utf-8')
except EnvironmentError:
#TODO: add nicer message
if not create:
raise
except UnicodeDecodeError:
#TODO: add nicer message
raise
pub.sendMessage('note.opened', note=self)
def save(self, text):
self.text = text
if self.path:
try:
with self.path.open('w') as f:
f.write(text.encode('utf-8'))
except EnvironmentError:
#TODO: add nicer message
raise
except UnicodeEncodeError:
#TODO: add nicer message
raise
pub.sendMessage('note.saved', note=self)
def close(self):
pub.sendMessage('note.closed', note=self)
|
mit
| 5,750,002,080,750,410,000
| 29.692308
| 56
| 0.474916
| false
| 4.803213
| false
| false
| false
|
Fokko/incubator-airflow
|
airflow/sensors/external_task_sensor.py
|
1
|
7096
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
from sqlalchemy import func
from airflow.exceptions import AirflowException
from airflow.models import DagBag, DagModel, DagRun, TaskInstance
from airflow.sensors.base_sensor_operator import BaseSensorOperator
from airflow.utils.db import provide_session
from airflow.utils.decorators import apply_defaults
from airflow.utils.state import State
class ExternalTaskSensor(BaseSensorOperator):
"""
Waits for a different DAG or a task in a different DAG to complete for a
specific execution_date
:param external_dag_id: The dag_id that contains the task you want to
wait for
:type external_dag_id: str
:param external_task_id: The task_id that contains the task you want to
wait for. If ``None`` (default value) the sensor waits for the DAG
:type external_task_id: str or None
:param allowed_states: list of allowed states, default is ``['success']``
:type allowed_states: list
:param execution_delta: time difference with the previous execution to
look at, the default is the same execution_date as the current task or DAG.
For yesterday, use [positive!] datetime.timedelta(days=1). Either
execution_delta or execution_date_fn can be passed to
ExternalTaskSensor, but not both.
:type execution_delta: datetime.timedelta
:param execution_date_fn: function that receives the current execution date
and returns the desired execution dates to query. Either execution_delta
or execution_date_fn can be passed to ExternalTaskSensor, but not both.
:type execution_date_fn: callable
:param check_existence: Set to `True` to check if the external task exists (when
external_task_id is not None) or check if the DAG to wait for exists (when
external_task_id is None), and immediately cease waiting if the external task
or DAG does not exist (default value: False).
:type check_existence: bool
"""
template_fields = ['external_dag_id', 'external_task_id']
ui_color = '#19647e'
@apply_defaults
def __init__(self,
external_dag_id,
external_task_id=None,
allowed_states=None,
execution_delta=None,
execution_date_fn=None,
check_existence=False,
*args,
**kwargs):
super().__init__(*args, **kwargs)
self.allowed_states = allowed_states or [State.SUCCESS]
if external_task_id:
if not set(self.allowed_states) <= set(State.task_states):
raise ValueError(
'Valid values for `allowed_states` '
'when `external_task_id` is not `None`: {}'.format(State.task_states)
)
else:
if not set(self.allowed_states) <= set(State.dag_states):
raise ValueError(
'Valid values for `allowed_states` '
'when `external_task_id` is `None`: {}'.format(State.dag_states)
)
if execution_delta is not None and execution_date_fn is not None:
raise ValueError(
'Only one of `execution_delta` or `execution_date_fn` may '
'be provided to ExternalTaskSensor; not both.')
self.execution_delta = execution_delta
self.execution_date_fn = execution_date_fn
self.external_dag_id = external_dag_id
self.external_task_id = external_task_id
self.check_existence = check_existence
# we only check the existence for the first time.
self.has_checked_existence = False
@provide_session
def poke(self, context, session=None):
if self.execution_delta:
dttm = context['execution_date'] - self.execution_delta
elif self.execution_date_fn:
dttm = self.execution_date_fn(context['execution_date'])
else:
dttm = context['execution_date']
dttm_filter = dttm if isinstance(dttm, list) else [dttm]
serialized_dttm_filter = ','.join(
[datetime.isoformat() for datetime in dttm_filter])
self.log.info(
'Poking for %s.%s on %s ... ',
self.external_dag_id, self.external_task_id, serialized_dttm_filter
)
DM = DagModel
TI = TaskInstance
DR = DagRun
# we only do the check for 1st time, no need for subsequent poke
if self.check_existence and not self.has_checked_existence:
dag_to_wait = session.query(DM).filter(
DM.dag_id == self.external_dag_id
).first()
if not dag_to_wait:
raise AirflowException('The external DAG '
'{} does not exist.'.format(self.external_dag_id))
else:
if not os.path.exists(dag_to_wait.fileloc):
raise AirflowException('The external DAG '
'{} was deleted.'.format(self.external_dag_id))
if self.external_task_id:
refreshed_dag_info = DagBag(dag_to_wait.fileloc).get_dag(self.external_dag_id)
if not refreshed_dag_info.has_task(self.external_task_id):
raise AirflowException('The external task'
'{} in DAG {} does not exist.'.format(self.external_task_id,
self.external_dag_id))
self.has_checked_existence = True
if self.external_task_id:
# .count() is inefficient
count = session.query(func.count()).filter(
TI.dag_id == self.external_dag_id,
TI.task_id == self.external_task_id,
TI.state.in_(self.allowed_states),
TI.execution_date.in_(dttm_filter),
).scalar()
else:
# .count() is inefficient
count = session.query(func.count()).filter(
DR.dag_id == self.external_dag_id,
DR.state.in_(self.allowed_states),
DR.execution_date.in_(dttm_filter),
).scalar()
session.commit()
return count == len(dttm_filter)
|
apache-2.0
| 4,054,005,438,354,515,500
| 42.533742
| 103
| 0.609076
| false
| 4.211276
| false
| false
| false
|
stefanwebb/tensorflow-models
|
tensorflow_models/models/emvb_debug2.py
|
1
|
9483
|
# MIT License
#
# Copyright (c) 2017, Stefan Webb. All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
import math
import tensorflow_models as tf_models
def create_placeholders(settings):
x = tf.placeholder(tf.float32, shape=tf_models.batchshape(settings), name='samples')
z = tf.placeholder(tf.float32, shape=tf_models.latentshape(settings), name='codes')
return x, z
def create_prior(settings):
temperature = 0.5
prior_prob = settings['prior_prob']
dist_prior = tf.contrib.distributions.RelaxedBernoulli(temperature, probs=prior_prob)
return tf.identity(tf.cast(dist_prior.sample(sample_shape=tf_models.latentshape(settings)), dtype=tf.float32) * 2. - 1., name='p_z/sample')
def create_encoder(settings, reuse=True):
encoder_network = settings['architecture']['encoder']['fn']
temperature = 2./3.
x_placeholder = tf_models.samples_placeholder()
assert(not x_placeholder is None)
noise = tf.random_normal(tf_models.noiseshape(settings), 0, 1, dtype=tf.float32)
with tf.variable_scope('encoder', reuse=reuse):
logits_z = encoder_network(settings, x_placeholder, noise, is_training=False)
dist_z_given_x = tf.contrib.distributions.RelaxedBernoulli(temperature, logits=logits_z)
encoder = tf.identity(tf.cast(dist_z_given_x.sample(), dtype=tf.float32) * 2. - 1., name='q_z_given_x_eps/sample')
return encoder
def create_decoder(settings, reuse=True):
decoder_network = settings['architecture']['decoder']['fn']
z_placeholder = tf_models.codes_placeholder()
assert(not z_placeholder is None)
with tf.variable_scope('decoder', reuse=reuse):
logits_x = decoder_network(settings, z_placeholder, is_training=False)
#dist_x_given_z = tf.contrib.distributions.Bernoulli(logits=logits_x, dtype=tf.float32)
#decoder = tf.identity(dist_x_given_z.sample(), name='p_x_given_z/sample')
decoder = tf.identity(tf.nn.sigmoid(logits_x), name='p_x_given_z/sample')
return decoder
def create_probs(settings, inputs, is_training, reuse=False):
temperature = 2./3.
encoder_network = settings['architecture']['encoder']['fn']
decoder_network = settings['architecture']['decoder']['fn']
critic_network = settings['architecture']['critic']['fn']
discriminator_network = settings['architecture']['discriminator']['fn']
# The noise is distributed i.i.d. N(0, 1)
noise = tf.random_normal(tf_models.noiseshape(settings), 0, 1, dtype=tf.float32)
# Create a tiled version of the inputs for adaptive contrast
inputs_ac = tf.tile(tf.expand_dims(tf_models.flatten(inputs), axis=0), multiples=[settings['ac_size'],1,1])
noise_ac = tf.random_normal((settings['ac_size'], settings['batch_size'], settings['noise_dimension']), 0, 1, dtype=tf.float32)
ac_batchshape = tf_models.batchshape(settings)
ac_batchshape[0] *= settings['ac_size']
#print(ac_batchshape)
#raise Exception()
# Use black-box inference network to sample z, given inputs and noise
with tf.variable_scope('encoder', reuse=reuse):
logits_z = encoder_network(settings, inputs, noise, is_training=is_training)
tf.get_variable_scope().reuse_variables()
logits_z_ac = encoder_network(settings, tf.reshape(inputs_ac, ac_batchshape), tf.reshape(noise_ac, (settings['ac_size']*settings['batch_size'], -1)), is_training=is_training)
#logits_z_ac = tf.reduce_mean(tf.reshape(logits_z_ac, (settings['ac_size'], settings['batch_size'], -1)), 0)
logits_z_ac = tf.reduce_logsumexp(tf.reshape(logits_z_ac, (settings['ac_size'], settings['batch_size'], -1)), 0) - tf.log(tf.constant(settings['ac_size'], dtype=tf.float32))
dist_z_given_x_ac = tf.contrib.distributions.Logistic(loc=logits_z_ac/temperature, scale=tf.constant(1./temperature, shape=logits_z_ac.shape))
logits_sample_ac = tf.identity(tf.cast(dist_z_given_x_ac.sample(), dtype=tf.float32))
z_sample_ac = tf.identity(tf.sigmoid(logits_sample_ac) * 2. - 1.)
dist_z_given_x = tf.contrib.distributions.Logistic(loc=logits_z/temperature, scale=tf.constant(1./temperature, shape=logits_z.shape))
logits_sample = tf.cast(dist_z_given_x.sample(), dtype=tf.float32)
z_sample = tf.sigmoid(logits_sample) * 2. - 1.
dist_prior_ac = tf.contrib.distributions.Logistic(loc=0., scale=1./temperature)
sample_prior_ac = tf.sigmoid(tf.cast(dist_prior_ac.sample(sample_shape=(settings['batch_size'], settings['latent_dimension'])), dtype=tf.float32))*2. - 1.
sample_for_discr = tf.identity(tf.sigmoid(logits_sample - logits_z_ac/temperature)*2. - 1., name='z/sample')
# Prior
temperature_prior = 0.5
prior_prob = settings['prior_prob']
logits_prior_prob = math.log(prior_prob / (1. - prior_prob))
dist_prior = tf.contrib.distributions.Logistic(loc=logits_prior_prob/temperature_prior, scale=1./temperature_prior)
logits_prior = tf.cast(dist_prior.sample(sample_shape=tf_models.latentshape(settings)), dtype=tf.float32)
z_prior = tf.identity(tf.sigmoid(logits_prior)*2. - 1., name='z/prior')
# Use generator to determine distribution of reconstructed input
with tf.variable_scope('decoder', reuse=reuse):
logits_x = decoder_network(settings, z_sample, is_training=is_training)
dist_x_given_z = tf.contrib.distributions.Bernoulli(logits=tf_models.flatten(logits_x), dtype=tf.float32)
# Log likelihood of reconstructed inputs
lg_p_x_given_z = tf.identity(tf.reduce_sum(dist_x_given_z.log_prob(tf_models.flatten(inputs)), 1), name='p_x_given_z/log_prob')
lg_r_alpha = tf.identity(tf.reduce_sum(dist_z_given_x_ac.log_prob(logits_sample), 1), name='r_alpha/log_prob')
# Form interpolated variable
eps = tf.random_uniform([settings['batch_size'], 1], minval=0., maxval=1.)
#z_inter = eps*z_prior + (1. - eps)*z_sample
z_inter = tf.identity(eps*sample_for_discr + (1. - eps)*sample_prior_ac, name='z/interpolated')
#logits_inter = tf.identity(tf_models.safe_log(z_inter) - tf_models.safe_log(1. - z_inter), name='z/interpolated')
#print(logits_prior.shape, logits_sample.shape, logits_inter.shape)
#raise Exception()
# Critic D(x, z) for EMVB learning
with tf.variable_scope('critic', reuse=reuse):
critic = tf.identity(critic_network(settings, inputs, sample_for_discr, is_training=is_training), name='generator')
tf.get_variable_scope().reuse_variables()
prior_critic = tf.identity(critic_network(settings, inputs, sample_prior_ac, is_training=is_training), name='prior')
inter_critic = tf.identity(critic_network(settings, inputs, z_inter, is_training=is_training), name='inter')
# Discriminator T(x, z) for AVB learning
with tf.variable_scope('discriminator', reuse=reuse):
discriminator = tf.identity(discriminator_network(settings, inputs, sample_for_discr, is_training=is_training), name='generator')
tf.get_variable_scope().reuse_variables()
prior_discriminator = tf.identity(discriminator_network(settings, inputs, sample_prior_ac, is_training=is_training), name='prior')
x = tf.identity(inputs, name='x')
#print('inputs.name', inputs.name)
lg_p_z = tf.identity(tf.reduce_sum(dist_prior.log_prob(logits_sample), 1), name='p_z/log_prob')
return lg_p_x_given_z, critic, prior_critic, inter_critic, z_inter, discriminator, prior_discriminator, lg_p_z, lg_r_alpha
def lg_likelihood(x, z, settings, reuse=True, is_training=False):
decoder_network = settings['architecture']['decoder']['fn']
real_z = tf.sigmoid(z)*2. - 1.
with tf.variable_scope('model'):
with tf.variable_scope('decoder', reuse=reuse):
logits_x = decoder_network(settings, real_z, is_training=is_training)
dist_x_given_z = tf.contrib.distributions.Bernoulli(logits=tf_models.flatten(logits_x), dtype=tf.float32)
return tf.reduce_sum(dist_x_given_z.log_prob(tf_models.flatten(x)), 1)
def lg_prior(z, settings, reuse=True, is_training=False):
temperature = 0.5
prior_prob = settings['prior_prob']
logits_prior_prob = math.log(prior_prob / (1. - prior_prob))
dist_prior = tf.contrib.distributions.Logistic(loc=logits_prior_prob/temperature, scale=1./temperature)
return tf.reduce_sum(tf_models.flatten(dist_prior.log_prob(z)), 1)
def sample_prior(settings):
temperature = 0.5
prior_prob = settings['prior_prob']
logits_prior_prob = math.log(prior_prob / (1. - prior_prob))
dist_prior = tf.contrib.distributions.Logistic(loc=logits_prior_prob/temperature, scale=1./temperature)
return tf.identity(tf.cast(dist_prior.sample(sample_shape=tf_models.latentshape(settings)), dtype=tf.float32), name='p_z/sample')
|
mit
| 9,025,626,535,856,672,000
| 49.71123
| 176
| 0.735949
| false
| 3.076898
| false
| false
| false
|
ingadhoc/stock
|
stock_ux/models/stock_warehouse_orderpoint.py
|
1
|
2683
|
##############################################################################
# For copyright and license notices, see __manifest__.py file in module root
# directory
##############################################################################
from odoo import models, fields, api
class StockWarehouseOrderpoint(models.Model):
_name = 'stock.warehouse.orderpoint'
_inherit = ['stock.warehouse.orderpoint', 'mail.thread']
rotation_stdev = fields.Float(
compute='_compute_rotation',
help="Desvío estandar de las cantidades entregas a clientes en los "
"últimos 120 días.",
digits='Product Unit of Measure',
)
warehouse_rotation_stdev = fields.Float(
compute='_compute_rotation',
help="Desvío estandar de las cantidades entregas desde este almacen"
" a clientes en los últimos 120 días.",
digits='Product Unit of Measure',
)
rotation = fields.Float(
help='Cantidades entregadas a clientes en los '
'últimos 120 días dividido por 4 para mensualizar '
'(restadas devoluciones).',
compute='_compute_rotation',
digits='Product Unit of Measure',
)
warehouse_rotation = fields.Float(
help='Cantidades entregadas desde este almacen a clientes en los '
'últimos 120 días dividido por 4 para mensualizar'
'(restadas devoluciones).',
compute='_compute_rotation',
digits='Product Unit of Measure',
)
product_min_qty = fields.Float(tracking=True)
product_max_qty = fields.Float(tracking=True)
qty_multiple = fields.Float(tracking=True)
location_id = fields.Many2one(tracking=True)
product_id = fields.Many2one(tracking=True)
@api.depends('product_id', 'location_id')
def _compute_rotation(self):
warehouse_with_products = self.filtered('product_id')
(self - warehouse_with_products).update({
'rotation': 0.0,
'rotation_stdev': 0.0,
'warehouse_rotation_stdev': 0.0,
'warehouse_rotation': 0.0,
})
for rec in warehouse_with_products:
rotation, rotation_stdev = rec.product_id.get_product_rotation(
compute_stdev=True)
warehouse_rotation, warehouse_rotation_stdev = \
rec.product_id.get_product_rotation(
rec.warehouse_id.view_location_id, compute_stdev=True)
rec.update({
'rotation': rotation,
'rotation_stdev': rotation_stdev,
'warehouse_rotation_stdev': warehouse_rotation_stdev,
'warehouse_rotation': warehouse_rotation,
})
|
agpl-3.0
| 7,736,426,344,456,536,000
| 40.123077
| 78
| 0.5896
| false
| 3.948301
| false
| false
| false
|
EmilioK97/pydeepl
|
pydeepl/pydeepl.py
|
1
|
3012
|
import requests
BASE_URL = 'https://www2.deepl.com/jsonrpc'
LANGUAGES = {
'auto': 'Auto',
'DE': 'German',
'EN': 'English',
'FR': 'French',
'ES': 'Spanish',
'IT': 'Italian',
'NL': 'Dutch',
'PL': 'Polish'
}
JSONRPC_VERSION = '2.0'
class SplittingError(Exception):
def __init__(self, message):
super(SplittingError, self).__init__(message)
def split_sentences(text, lang='auto', json=False):
if text is None:
raise SplittingError('Text can\'t be be None.')
if lang not in LANGUAGES.keys():
raise SplittingError('Language {} not available.'.format(lang))
parameters = {
'jsonrpc': JSONRPC_VERSION,
'method': 'LMT_split_into_sentences',
'params': {
'texts': [
text
],
'lang': {
'lang_user_selected': lang
},
},
}
response = requests.post(BASE_URL, json=parameters).json()
if 'result' not in response:
raise SplittingError('DeepL call resulted in a unknown result.')
splitted_texts = response['result']['splitted_texts']
if len(splitted_texts) == 0:
raise SplittingError('Text could not be splitted.')
if json:
return response
return splitted_texts[0]
class TranslationError(Exception):
def __init__(self, message):
super(TranslationError, self).__init__(message)
def translate(text, to_lang, from_lang='auto', json=False):
if text is None:
raise TranslationError('Text can\'t be None.')
if len(text) > 5000:
raise TranslationError('Text too long (limited to 5000 characters).')
if to_lang not in LANGUAGES.keys():
raise TranslationError('Language {} not available.'.format(to_lang))
if from_lang is not None and from_lang not in LANGUAGES.keys():
raise TranslationError('Language {} not available.'.format(from_lang))
parameters = {
'jsonrpc': JSONRPC_VERSION,
'method': 'LMT_handle_jobs',
'params': {
'jobs': [
{
'kind':'default',
'raw_en_sentence': text
}
],
'lang': {
'user_preferred_langs': [
from_lang,
to_lang
],
'source_lang_user_selected': from_lang,
'target_lang': to_lang
},
},
}
response = requests.post(BASE_URL, json=parameters).json()
if 'result' not in response:
raise TranslationError('DeepL call resulted in a unknown result.')
translations = response['result']['translations']
if len(translations) == 0 \
or translations[0]['beams'] is None \
or translations[0]['beams'][0]['postprocessed_sentence'] is None:
raise TranslationError('No translations found.')
if json:
return response
return translations[0]['beams'][0]['postprocessed_sentence']
|
mit
| -3,052,785,514,397,001,000
| 26.888889
| 78
| 0.557437
| false
| 4.075778
| false
| false
| false
|
ehouarn-perret/EhouarnPerret.Python.HackerRank
|
0 - Tutorials/30 Days of Code/Day 4 - Class vs. Instance.py
|
1
|
1605
|
"""
In this challenge, we're going to learn about the difference between a class and an instance;
because this is an Object Oriented concept, it's only enabled in certain languages.
Task
Write a Person class with an instance variable, age, and a constructor that takes an integer, initial_age, as a parameter.
The constructor must assign initial_age to _age after confirming the argument passed as _initial_age is not negative.
If a negative argument is passed as initial_age, the constructor should set to and print "Age is not valid, setting age to 0."
In addition, you must write the following instance methods:
age_1_year() should increase the instance variable _age by 1.
is_old() should perform the following conditional actions:
If age < 13, print "You are young.".
If age >= 13 and age < 18, print "You are a teenager.".
Otherwise, print "You are old.".
"""
class Person:
# Add some more code to run some checks on initial_age
def __init__(self, initial_age):
if initial_age < 0:
print("Age is not valid, setting age to 0.")
self._age = 0
else:
self._age = initial_age
# Do some computations in here and print out the correct statement to the console
def is_old(self):
if self._age < 13:
print("You are young.")
elif (13 <= self._age) and (self._age < 18):
print("You are a teenager.")
else:
print("You are old.")
# Increment the age of the person in here
def age_1_year(self):
self._age += 1
T = int(input())
for i in range(0, T):
age = int(input())
p = Person(age)
p.is_old()
for j in range(0, 3):
p.age_1_year()
p.is_old()
print("")
|
mit
| -2,068,696,048,610,836,000
| 30.470588
| 126
| 0.694704
| false
| 3.282209
| false
| false
| false
|
RCMRD/geonode
|
geonode/documents/views.py
|
1
|
16713
|
import json
from guardian.shortcuts import get_perms
from django.shortcuts import render_to_response, get_object_or_404
from django.http import HttpResponse, HttpResponseRedirect, Http404
from django.template import RequestContext, loader
from django.utils.translation import ugettext as _
from django.contrib.auth.decorators import login_required
from django.conf import settings
from django.core.urlresolvers import reverse
from django.core.exceptions import PermissionDenied
from django_downloadview.response import DownloadResponse
from django.views.generic.edit import UpdateView, CreateView
from django.db.models import F
from django.forms.util import ErrorList
from geonode.utils import resolve_object
from geonode.security.views import _perms_info_json
from geonode.people.forms import ProfileForm
from geonode.base.forms import CategoryForm
from geonode.base.models import TopicCategory, ResourceBase
from geonode.documents.models import Document
from geonode.documents.forms import DocumentForm, DocumentCreateForm, DocumentReplaceForm
from geonode.documents.models import IMGTYPES
from geonode.utils import build_social_links
ALLOWED_DOC_TYPES = settings.ALLOWED_DOCUMENT_TYPES
_PERMISSION_MSG_DELETE = _("You are not permitted to delete this document")
_PERMISSION_MSG_GENERIC = _("You do not have permissions for this document.")
_PERMISSION_MSG_MODIFY = _("You are not permitted to modify this document")
_PERMISSION_MSG_METADATA = _(
"You are not permitted to modify this document's metadata")
_PERMISSION_MSG_VIEW = _("You are not permitted to view this document")
def _resolve_document(request, docid, permission='base.change_resourcebase',
msg=_PERMISSION_MSG_GENERIC, **kwargs):
'''
Resolve the document by the provided primary key and check the optional permission.
'''
return resolve_object(request, Document, {'pk': docid},
permission=permission, permission_msg=msg, **kwargs)
def document_detail(request, docid):
"""
The view that show details of each document
"""
document = None
try:
document = _resolve_document(
request,
docid,
'base.view_resourcebase',
_PERMISSION_MSG_VIEW)
except Http404:
return HttpResponse(
loader.render_to_string(
'404.html', RequestContext(
request, {
})), status=404)
except PermissionDenied:
return HttpResponse(
loader.render_to_string(
'401.html', RequestContext(
request, {
'error_message': _("You are not allowed to view this document.")})), status=403)
if document is None:
return HttpResponse(
'An unknown error has occured.',
content_type="text/plain",
status=401
)
else:
try:
related = document.content_type.get_object_for_this_type(
id=document.object_id)
except:
related = ''
# Update count for popularity ranking,
# but do not includes admins or resource owners
if request.user != document.owner and not request.user.is_superuser:
Document.objects.filter(id=document.id).update(popular_count=F('popular_count') + 1)
metadata = document.link_set.metadata().filter(
name__in=settings.DOWNLOAD_FORMATS_METADATA)
context_dict = {
'perms_list': get_perms(request.user, document.get_self_resource()),
'permissions_json': _perms_info_json(document),
'resource': document,
'metadata': metadata,
'imgtypes': IMGTYPES,
'related': related}
if settings.SOCIAL_ORIGINS:
context_dict["social_links"] = build_social_links(request, document)
if getattr(settings, 'EXIF_ENABLED', False):
try:
from geonode.contrib.exif.utils import exif_extract_dict
exif = exif_extract_dict(document)
if exif:
context_dict['exif_data'] = exif
except:
print "Exif extraction failed."
return render_to_response(
"documents/document_detail.html",
RequestContext(request, context_dict))
def document_download(request, docid):
document = get_object_or_404(Document, pk=docid)
if not request.user.has_perm(
'base.download_resourcebase',
obj=document.get_self_resource()):
return HttpResponse(
loader.render_to_string(
'401.html', RequestContext(
request, {
'error_message': _("You are not allowed to view this document.")})), status=401)
return DownloadResponse(document.doc_file)
class DocumentUploadView(CreateView):
template_name = 'documents/document_upload.html'
form_class = DocumentCreateForm
def get_context_data(self, **kwargs):
context = super(DocumentUploadView, self).get_context_data(**kwargs)
context['ALLOWED_DOC_TYPES'] = ALLOWED_DOC_TYPES
return context
def form_valid(self, form):
"""
If the form is valid, save the associated model.
"""
self.object = form.save(commit=False)
self.object.owner = self.request.user
resource_id = self.request.POST.get('resource', None)
if resource_id:
self.object.content_type = ResourceBase.objects.get(id=resource_id).polymorphic_ctype
self.object.object_id = resource_id
# by default, if RESOURCE_PUBLISHING=True then document.is_published
# must be set to False
is_published = True
if settings.RESOURCE_PUBLISHING:
is_published = False
self.object.is_published = is_published
self.object.save()
self.object.set_permissions(form.cleaned_data['permissions'])
abstract = None
date = None
regions = []
keywords = []
bbox = None
if getattr(settings, 'EXIF_ENABLED', False):
try:
from geonode.contrib.exif.utils import exif_extract_metadata_doc
exif_metadata = exif_extract_metadata_doc(self.object)
if exif_metadata:
date = exif_metadata.get('date', None)
keywords.extend(exif_metadata.get('keywords', []))
bbox = exif_metadata.get('bbox', None)
abstract = exif_metadata.get('abstract', None)
except:
print "Exif extraction failed."
if getattr(settings, 'NLP_ENABLED', False):
try:
from geonode.contrib.nlp.utils import nlp_extract_metadata_doc
nlp_metadata = nlp_extract_metadata_doc(self.object)
if nlp_metadata:
regions.extend(nlp_metadata.get('regions', []))
keywords.extend(nlp_metadata.get('keywords', []))
except:
print "NLP extraction failed."
if abstract:
self.object.abstract = abstract
self.object.save()
if date:
self.object.date = date
self.object.date_type = "Creation"
self.object.save()
if len(regions) > 0:
self.object.regions.add(*regions)
if len(keywords) > 0:
self.object.keywords.add(*keywords)
if bbox:
bbox_x0, bbox_x1, bbox_y0, bbox_y1 = bbox
Document.objects.filter(id=self.object.pk).update(
bbox_x0=bbox_x0,
bbox_x1=bbox_x1,
bbox_y0=bbox_y0,
bbox_y1=bbox_y1)
if getattr(settings, 'SLACK_ENABLED', False):
try:
from geonode.contrib.slack.utils import build_slack_message_document, send_slack_message
send_slack_message(build_slack_message_document("document_new", self.object))
except:
print "Could not send slack message for new document."
return HttpResponseRedirect(
reverse(
'document_metadata',
args=(
self.object.id,
)))
class DocumentUpdateView(UpdateView):
template_name = 'documents/document_replace.html'
pk_url_kwarg = 'docid'
form_class = DocumentReplaceForm
queryset = Document.objects.all()
context_object_name = 'document'
def get_context_data(self, **kwargs):
context = super(DocumentUpdateView, self).get_context_data(**kwargs)
context['ALLOWED_DOC_TYPES'] = ALLOWED_DOC_TYPES
return context
def form_valid(self, form):
"""
If the form is valid, save the associated model.
"""
self.object = form.save()
return HttpResponseRedirect(
reverse(
'document_metadata',
args=(
self.object.id,
)))
@login_required
def document_metadata(
request,
docid,
template='documents/document_metadata.html'):
document = None
try:
document = _resolve_document(
request,
docid,
'base.change_resourcebase_metadata',
_PERMISSION_MSG_METADATA)
except Http404:
return HttpResponse(
loader.render_to_string(
'404.html', RequestContext(
request, {
})), status=404)
except PermissionDenied:
return HttpResponse(
loader.render_to_string(
'401.html', RequestContext(
request, {
'error_message': _("You are not allowed to edit this document.")})), status=403)
if document is None:
return HttpResponse(
'An unknown error has occured.',
content_type="text/plain",
status=401
)
else:
poc = document.poc
metadata_author = document.metadata_author
topic_category = document.category
if request.method == "POST":
document_form = DocumentForm(
request.POST,
instance=document,
prefix="resource")
category_form = CategoryForm(
request.POST,
prefix="category_choice_field",
initial=int(
request.POST["category_choice_field"]) if "category_choice_field" in request.POST else None)
else:
document_form = DocumentForm(instance=document, prefix="resource")
category_form = CategoryForm(
prefix="category_choice_field",
initial=topic_category.id if topic_category else None)
if request.method == "POST" and document_form.is_valid(
) and category_form.is_valid():
new_poc = document_form.cleaned_data['poc']
new_author = document_form.cleaned_data['metadata_author']
new_keywords = document_form.cleaned_data['keywords']
new_category = TopicCategory.objects.get(
id=category_form.cleaned_data['category_choice_field'])
if new_poc is None:
if poc is None:
poc_form = ProfileForm(
request.POST,
prefix="poc",
instance=poc)
else:
poc_form = ProfileForm(request.POST, prefix="poc")
if poc_form.is_valid():
if len(poc_form.cleaned_data['profile']) == 0:
# FIXME use form.add_error in django > 1.7
errors = poc_form._errors.setdefault('profile', ErrorList())
errors.append(_('You must set a point of contact for this resource'))
poc = None
if poc_form.has_changed and poc_form.is_valid():
new_poc = poc_form.save()
if new_author is None:
if metadata_author is None:
author_form = ProfileForm(request.POST, prefix="author",
instance=metadata_author)
else:
author_form = ProfileForm(request.POST, prefix="author")
if author_form.is_valid():
if len(author_form.cleaned_data['profile']) == 0:
# FIXME use form.add_error in django > 1.7
errors = author_form._errors.setdefault('profile', ErrorList())
errors.append(_('You must set an author for this resource'))
metadata_author = None
if author_form.has_changed and author_form.is_valid():
new_author = author_form.save()
if new_poc is not None and new_author is not None:
the_document = document_form.save()
the_document.poc = new_poc
the_document.metadata_author = new_author
the_document.keywords.add(*new_keywords)
Document.objects.filter(id=the_document.id).update(category=new_category)
if getattr(settings, 'SLACK_ENABLED', False):
try:
from geonode.contrib.slack.utils import build_slack_message_document, send_slack_messages
send_slack_messages(build_slack_message_document("document_edit", the_document))
except:
print "Could not send slack message for modified document."
return HttpResponseRedirect(
reverse(
'document_detail',
args=(
document.id,
)))
if poc is not None:
document_form.fields['poc'].initial = poc.id
poc_form = ProfileForm(prefix="poc")
poc_form.hidden = True
if metadata_author is not None:
document_form.fields['metadata_author'].initial = metadata_author.id
author_form = ProfileForm(prefix="author")
author_form.hidden = True
return render_to_response(template, RequestContext(request, {
"document": document,
"document_form": document_form,
"poc_form": poc_form,
"author_form": author_form,
"category_form": category_form,
}))
def document_search_page(request):
# for non-ajax requests, render a generic search page
if request.method == 'GET':
params = request.GET
elif request.method == 'POST':
params = request.POST
else:
return HttpResponse(status=405)
return render_to_response(
'documents/document_search.html',
RequestContext(
request,
{
'init_search': json.dumps(
params or {}),
"site": settings.SITEURL}))
@login_required
def document_remove(request, docid, template='documents/document_remove.html'):
try:
document = _resolve_document(
request,
docid,
'base.delete_resourcebase',
_PERMISSION_MSG_DELETE)
if request.method == 'GET':
return render_to_response(template, RequestContext(request, {
"document": document
}))
if request.method == 'POST':
if getattr(settings, 'SLACK_ENABLED', False):
slack_message = None
try:
from geonode.contrib.slack.utils import build_slack_message_document
slack_message = build_slack_message_document("document_delete", document)
except:
print "Could not build slack message for delete document."
document.delete()
try:
from geonode.contrib.slack.utils import send_slack_messages
send_slack_messages(slack_message)
except:
print "Could not send slack message for delete document."
else:
document.delete()
return HttpResponseRedirect(reverse("document_browse"))
else:
return HttpResponse("Not allowed", status=403)
except PermissionDenied:
return HttpResponse(
'You are not allowed to delete this document',
content_type="text/plain",
status=401
)
|
gpl-3.0
| -4,495,816,925,612,968,000
| 36.05765
| 113
| 0.569557
| false
| 4.566393
| false
| false
| false
|
intelxed/xed
|
tests/split-tests.py
|
1
|
2317
|
#!/usr/bin/env python
# -*- python -*-
#BEGIN_LEGAL
#
#Copyright (c) 2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#END_LEGAL
import os,sys,re,glob
def work():
files = glob.glob("*.txt")
for fn in files:
lines = file(fn).readlines()
lines = map(lambda x: x.strip(), lines)
ofn = fn + ".new"
of = open(ofn,'w')
for line in lines:
if line:
incodes, cmd = line.split(';') # incodes are tossed
cmd = cmd.strip()
codes = []
if ' -de ' in cmd:
codes.append('DEC')
codes.append('ENC')
elif ' -e ' in cmd:
codes.append('ENC')
elif ' -d ' in cmd:
codes.append('DEC')
elif 'ild' in cmd:
codes.append('DEC')
elif 'ex1' in cmd:
codes.append('DEC')
elif 'ex3' in cmd:
codes.append('ENC')
elif 'ex4' in cmd:
codes.append('DEC')
elif 'ex6' in cmd:
codes.append('DEC')
codes.append('ENC')
else:
codes.append('OTHER')
if 'C4' in cmd or 'C5' in cmd or 'c4' in cmd or 'c5' in cmd:
codes.append('AVX')
if ' 8f' in cmd: # total hack: FIXME, miss some xop stuff in c4 space
codes.append('XOP')
if ' v' in cmd or ' V' in cmd:
codes.append('AVX')
cs = " ".join(codes)
of.write("{0:20s} ; {1}\n".format(cs, cmd))
of.close()
if __name__ == "__main__":
work()
|
apache-2.0
| 1,102,383,340,792,462,800
| 32.1
| 86
| 0.482952
| false
| 3.9811
| false
| false
| false
|
ewdurbin/sentry-datadog-helpers
|
sentry_datadog_helpers/test/utils.py
|
1
|
5855
|
import sys
from functools import wraps
__all__ = ('surrogate', )
class surrogate(object):
"""
Originally Created by Kostia Balitsky
Contains Modifications by Griffin Smith
See:
https://github.com/ikostia/surrogate
and
https://github.com/glittershark/surrogate
Licensed as:
This code can be used, distributed and modified in any ways
one wants. If one gets any use of it author is already rewarded.
On the other hand, do not expect any guaranteed support
from author. Use it as is.
Add empty module stub that can be imported
for every subpath in path.
Those stubs can later be patched by mock's
patch decorator.
Example:
@surrogate('sys.my.cool.module1')
@surrogate('sys.my.cool.module2')
@mock.patch('sys.my.cool.module1', mock1)
@mock.patch('sys.my.cool.module2', mock2)
def function():
from sys.my import cool
from sys.my.cool import module1
from sys.my.cool import module2
"""
def __init__(self, path):
self.path = path
self.elements = self.path.split('.')
def __enter__(self):
self.prepared = self.prepare()
def __exit__(self, *args):
if self.prepared:
self.restore()
def __call__(self, func):
@wraps(func)
def _wrapper(*args, **kwargs):
prepared = self.prepare()
result = func(*args, **kwargs)
if prepared:
self.restore()
return result
return _wrapper
@property
def nothing_to_stub(self):
"""Check if there are no modules to stub"""
return len(self.elements) == 0
def prepare(self):
"""Preparations before actual function call"""
self._determine_existing_modules()
if self.nothing_to_stub:
return False
self._create_module_stubs()
self._save_base_module()
self._add_module_stubs()
return True
def restore(self):
"""Post-actions to restore initial state of the system"""
self._remove_module_stubs()
self._restore_base_module()
def _get_importing_path(self, elements):
"""Return importing path for a module that is last in elements list"""
ip = '.'.join(elements)
if self.known_path:
ip = self.known_path + '.' + ip
return ip
def _create_module_stubs(self):
"""Create stubs for all not-existing modules"""
# last module in our sequence
# it should be loaded
last_module = type(self.elements[-1], (object, ), {
'__all__': [],
'_importing_path': self._get_importing_path(self.elements)})
modules = [last_module]
# now we create a module stub for each
# element in a path.
# each module stub contains `__all__`
# list and a member that
# points to the next module stub in
# sequence
for element in reversed(self.elements[:-1]):
next_module = modules[-1]
module = type(element, (object, ), {
next_module.__name__: next_module,
'__all__': [next_module.__name__]})
modules.append(module)
self.modules = list(reversed(modules))
self.modules[0].__path__ = []
def _determine_existing_modules(self):
"""
Find out which of the modules
from specified path are already
imported (e.g. present in sys.modules)
those modules should not be replaced
by stubs.
"""
known = 0
while known < len(self.elements) and\
'.'.join(self.elements[:known + 1]) in sys.modules:
known += 1
self.known_path = '.'.join(self.elements[:known])
self.elements = self.elements[known:]
def _save_base_module(self):
"""
Remember state of the last of existing modules
The last of the sequence of existing modules
is the only one we will change. So we must
remember it's state in order to restore it
afterwards.
"""
try:
# save last of the existing modules
self.base_module = sys.modules[self.known_path]
except KeyError:
self.base_module = None
# save `__all__` attribute of the base_module
self.base_all = []
if hasattr(self.base_module, '__all__'):
self.base_all = list(self.base_module.__all__)
if self.base_module:
# change base_module's `__all__` attribute
# to include the first module of the sequence
self.base_module.__all__ = self.base_all + [self.elements[0]]
setattr(self.base_module, self.elements[0], self.modules[0])
def _add_module_stubs(self):
"""Push created module stubs into sys.modules"""
for i, module in enumerate(self.modules):
module._importing_path =\
self._get_importing_path(self.elements[:i + 1])
sys.modules[module._importing_path] = module
def _remove_module_stubs(self):
"""Remove fake modules from sys.modules"""
for module in reversed(self.modules):
if module._importing_path in sys.modules:
del sys.modules[module._importing_path]
def _restore_base_module(self):
"""Restore the state of the last existing module"""
if self.base_module:
self.base_module.__all__ = self.base_all
if not self.base_all:
del self.base_module.__all__
if hasattr(self.base_module, self.elements[0]):
delattr(self.base_module, self.elements[0])
|
bsd-3-clause
| 1,469,070,899,879,551,500
| 33.040698
| 78
| 0.561913
| false
| 4.292522
| false
| false
| false
|
dagss/numpy_svn
|
numpy/polynomial/chebyshev.py
|
1
|
38012
|
"""
Objects for dealing with Chebyshev series.
This module provides a number of objects (mostly functions) useful for
dealing with Chebyshev series, including a `Chebyshev` class that
encapsulates the usual arithmetic operations. (General information
on how this module represents and works with such polynomials is in the
docstring for its "parent" sub-package, `numpy.polynomial`).
Constants
---------
- `chebdomain` -- Chebyshev series default domain, [-1,1].
- `chebzero` -- (Coefficients of the) Chebyshev series that evaluates
identically to 0.
- `chebone` -- (Coefficients of the) Chebyshev series that evaluates
identically to 1.
- `chebx` -- (Coefficients of the) Chebyshev series for the identity map,
``f(x) = x``.
Arithmetic
----------
- `chebadd` -- add two Chebyshev series.
- `chebsub` -- subtract one Chebyshev series from another.
- `chebmul` -- multiply two Chebyshev series.
- `chebdiv` -- divide one Chebyshev series by another.
- `chebval` -- evaluate a Chebyshev series at given points.
Calculus
--------
- `chebder` -- differentiate a Chebyshev series.
- `chebint` -- integrate a Chebyshev series.
Misc Functions
--------------
- `chebfromroots` -- create a Chebyshev series with specified roots.
- `chebroots` -- find the roots of a Chebyshev series.
- `chebvander` -- Vandermonde-like matrix for Chebyshev polynomials.
- `chebfit` -- least-squares fit returning a Chebyshev series.
- `chebtrim` -- trim leading coefficients from a Chebyshev series.
- `chebline` -- Chebyshev series of given straight line.
- `cheb2poly` -- convert a Chebyshev series to a polynomial.
- `poly2cheb` -- convert a polynomial to a Chebyshev series.
Classes
-------
- `Chebyshev` -- A Chebyshev series class.
See also
--------
`numpy.polynomial`
Notes
-----
The implementations of multiplication, division, integration, and
differentiation use the algebraic identities [1]_:
.. math ::
T_n(x) = \\frac{z^n + z^{-n}}{2} \\\\
z\\frac{dx}{dz} = \\frac{z - z^{-1}}{2}.
where
.. math :: x = \\frac{z + z^{-1}}{2}.
These identities allow a Chebyshev series to be expressed as a finite,
symmetric Laurent series. In this module, this sort of Laurent series
is referred to as a "z-series."
References
----------
.. [1] A. T. Benjamin, et al., "Combinatorial Trigonometry with Chebyshev
Polynomials," *Journal of Statistical Planning and Inference 14*, 2008
(preprint: http://www.math.hmc.edu/~benjamin/papers/CombTrig.pdf, pg. 4)
"""
from __future__ import division
__all__ = ['chebzero', 'chebone', 'chebx', 'chebdomain', 'chebline',
'chebadd', 'chebsub', 'chebmul', 'chebdiv', 'chebval', 'chebder',
'chebint', 'cheb2poly', 'poly2cheb', 'chebfromroots', 'chebvander',
'chebfit', 'chebtrim', 'chebroots', 'Chebyshev']
import numpy as np
import numpy.linalg as la
import polyutils as pu
import warnings
from polytemplate import polytemplate
chebtrim = pu.trimcoef
#
# A collection of functions for manipulating z-series. These are private
# functions and do minimal error checking.
#
def _cseries_to_zseries(cs) :
"""Covert Chebyshev series to z-series.
Covert a Chebyshev series to the equivalent z-series. The result is
never an empty array. The dtype of the return is the same as that of
the input. No checks are run on the arguments as this routine is for
internal use.
Parameters
----------
cs : 1-d ndarray
Chebyshev coefficients, ordered from low to high
Returns
-------
zs : 1-d ndarray
Odd length symmetric z-series, ordered from low to high.
"""
n = cs.size
zs = np.zeros(2*n-1, dtype=cs.dtype)
zs[n-1:] = cs/2
return zs + zs[::-1]
def _zseries_to_cseries(zs) :
"""Covert z-series to a Chebyshev series.
Covert a z series to the equivalent Chebyshev series. The result is
never an empty array. The dtype of the return is the same as that of
the input. No checks are run on the arguments as this routine is for
internal use.
Parameters
----------
zs : 1-d ndarray
Odd length symmetric z-series, ordered from low to high.
Returns
-------
cs : 1-d ndarray
Chebyshev coefficients, ordered from low to high.
"""
n = (zs.size + 1)//2
cs = zs[n-1:].copy()
cs[1:n] *= 2
return cs
def _zseries_mul(z1, z2) :
"""Multiply two z-series.
Multiply two z-series to produce a z-series.
Parameters
----------
z1, z2 : 1-d ndarray
The arrays must be 1-d but this is not checked.
Returns
-------
product : 1-d ndarray
The product z-series.
Notes
-----
This is simply convolution. If symmetic/anti-symmetric z-series are
denoted by S/A then the following rules apply:
S*S, A*A -> S
S*A, A*S -> A
"""
return np.convolve(z1, z2)
def _zseries_div(z1, z2) :
"""Divide the first z-series by the second.
Divide `z1` by `z2` and return the quotient and remainder as z-series.
Warning: this implementation only applies when both z1 and z2 have the
same symmetry, which is sufficient for present purposes.
Parameters
----------
z1, z2 : 1-d ndarray
The arrays must be 1-d and have the same symmetry, but this is not
checked.
Returns
-------
(quotient, remainder) : 1-d ndarrays
Quotient and remainder as z-series.
Notes
-----
This is not the same as polynomial division on account of the desired form
of the remainder. If symmetic/anti-symmetric z-series are denoted by S/A
then the following rules apply:
S/S -> S,S
A/A -> S,A
The restriction to types of the same symmetry could be fixed but seems like
uneeded generality. There is no natural form for the remainder in the case
where there is no symmetry.
"""
z1 = z1.copy()
z2 = z2.copy()
len1 = len(z1)
len2 = len(z2)
if len2 == 1 :
z1 /= z2
return z1, z1[:1]*0
elif len1 < len2 :
return z1[:1]*0, z1
else :
dlen = len1 - len2
scl = z2[0]
z2 /= scl
quo = np.empty(dlen + 1, dtype=z1.dtype)
i = 0
j = dlen
while i < j :
r = z1[i]
quo[i] = z1[i]
quo[dlen - i] = r
tmp = r*z2
z1[i:i+len2] -= tmp
z1[j:j+len2] -= tmp
i += 1
j -= 1
r = z1[i]
quo[i] = r
tmp = r*z2
z1[i:i+len2] -= tmp
quo /= scl
rem = z1[i+1:i-1+len2].copy()
return quo, rem
def _zseries_der(zs) :
"""Differentiate a z-series.
The derivative is with respect to x, not z. This is achieved using the
chain rule and the value of dx/dz given in the module notes.
Parameters
----------
zs : z-series
The z-series to differentiate.
Returns
-------
derivative : z-series
The derivative
Notes
-----
The zseries for x (ns) has been multiplied by two in order to avoid
using floats that are incompatible with Decimal and likely other
specialized scalar types. This scaling has been compensated by
multiplying the value of zs by two also so that the two cancels in the
division.
"""
n = len(zs)//2
ns = np.array([-1, 0, 1], dtype=zs.dtype)
zs *= np.arange(-n, n+1)*2
d, r = _zseries_div(zs, ns)
return d
def _zseries_int(zs) :
"""Integrate a z-series.
The integral is with respect to x, not z. This is achieved by a change
of variable using dx/dz given in the module notes.
Parameters
----------
zs : z-series
The z-series to integrate
Returns
-------
integral : z-series
The indefinite integral
Notes
-----
The zseries for x (ns) has been multiplied by two in order to avoid
using floats that are incompatible with Decimal and likely other
specialized scalar types. This scaling has been compensated by
dividing the resulting zs by two.
"""
n = 1 + len(zs)//2
ns = np.array([-1, 0, 1], dtype=zs.dtype)
zs = _zseries_mul(zs, ns)
div = np.arange(-n, n+1)*2
zs[:n] /= div[:n]
zs[n+1:] /= div[n+1:]
zs[n] = 0
return zs
#
# Chebyshev series functions
#
def poly2cheb(pol) :
"""
poly2cheb(pol)
Convert a polynomial to a Chebyshev series.
Convert an array representing the coefficients of a polynomial (relative
to the "standard" basis) ordered from lowest degree to highest, to an
array of the coefficients of the equivalent Chebyshev series, ordered
from lowest to highest degree.
Parameters
----------
pol : array_like
1-d array containing the polynomial coefficients
Returns
-------
cs : ndarray
1-d array containing the coefficients of the equivalent Chebyshev
series.
See Also
--------
cheb2poly
Notes
-----
Note that a consequence of the input needing to be array_like and that
the output is an ndarray, is that if one is going to use this function
to convert a Polynomial instance, P, to a Chebyshev instance, T, the
usage is ``T = Chebyshev(poly2cheb(P.coef))``; see Examples below.
Examples
--------
>>> from numpy import polynomial as P
>>> p = P.Polynomial(np.arange(4))
>>> p
Polynomial([ 0., 1., 2., 3.], [-1., 1.])
>>> c = P.Chebyshev(P.poly2cheb(p.coef))
>>> c
Chebyshev([ 1. , 3.25, 1. , 0.75], [-1., 1.])
"""
[pol] = pu.as_series([pol])
pol = pol[::-1]
zs = pol[:1].copy()
x = np.array([.5, 0, .5], dtype=pol.dtype)
for i in range(1, len(pol)) :
zs = _zseries_mul(zs, x)
zs[i] += pol[i]
return _zseries_to_cseries(zs)
def cheb2poly(cs) :
"""
cheb2poly(cs)
Convert a Chebyshev series to a polynomial.
Convert an array representing the coefficients of a Chebyshev series,
ordered from lowest degree to highest, to an array of the coefficients
of the equivalent polynomial (relative to the "standard" basis) ordered
from lowest to highest degree.
Parameters
----------
cs : array_like
1-d array containing the Chebyshev series coefficients, ordered
from lowest order term to highest.
Returns
-------
pol : ndarray
1-d array containing the coefficients of the equivalent polynomial
(relative to the "standard" basis) ordered from lowest order term
to highest.
See Also
--------
poly2cheb
Notes
-----
Note that a consequence of the input needing to be array_like and that
the output is an ndarray, is that if one is going to use this function
to convert a Chebyshev instance, T, to a Polynomial instance, P, the
usage is ``P = Polynomial(cheb2poly(T.coef))``; see Examples below.
Examples
--------
>>> from numpy import polynomial as P
>>> c = P.Chebyshev(np.arange(4))
>>> c
Chebyshev([ 0., 1., 2., 3.], [-1., 1.])
>>> p = P.Polynomial(P.cheb2poly(c.coef))
>>> p
Polynomial([ -2., -8., 4., 12.], [-1., 1.])
"""
[cs] = pu.as_series([cs])
pol = np.zeros(len(cs), dtype=cs.dtype)
quo = _cseries_to_zseries(cs)
x = np.array([.5, 0, .5], dtype=pol.dtype)
for i in range(0, len(cs) - 1) :
quo, rem = _zseries_div(quo, x)
pol[i] = rem[0]
pol[-1] = quo[0]
return pol
#
# These are constant arrays are of integer type so as to be compatible
# with the widest range of other types, such as Decimal.
#
# Chebyshev default domain.
chebdomain = np.array([-1,1])
# Chebyshev coefficients representing zero.
chebzero = np.array([0])
# Chebyshev coefficients representing one.
chebone = np.array([1])
# Chebyshev coefficients representing the identity x.
chebx = np.array([0,1])
def chebline(off, scl) :
"""
Chebyshev series whose graph is a straight line.
Parameters
----------
off, scl : scalars
The specified line is given by ``off + scl*x``.
Returns
-------
y : ndarray
This module's representation of the Chebyshev series for
``off + scl*x``.
See Also
--------
polyline
Examples
--------
>>> import numpy.polynomial.chebyshev as C
>>> C.chebline(3,2)
array([3, 2])
>>> C.chebval(-3, C.chebline(3,2)) # should be -3
-3.0
"""
if scl != 0 :
return np.array([off,scl])
else :
return np.array([off])
def chebfromroots(roots) :
"""
Generate a Chebyshev series with the given roots.
Return the array of coefficients for the C-series whose roots (a.k.a.
"zeros") are given by *roots*. The returned array of coefficients is
ordered from lowest order "term" to highest, and zeros of multiplicity
greater than one must be included in *roots* a number of times equal
to their multiplicity (e.g., if `2` is a root of multiplicity three,
then [2,2,2] must be in *roots*).
Parameters
----------
roots : array_like
Sequence containing the roots.
Returns
-------
out : ndarray
1-d array of the C-series' coefficients, ordered from low to
high. If all roots are real, ``out.dtype`` is a float type;
otherwise, ``out.dtype`` is a complex type, even if all the
coefficients in the result are real (see Examples below).
See Also
--------
polyfromroots
Notes
-----
What is returned are the :math:`c_i` such that:
.. math::
\\sum_{i=0}^{n} c_i*T_i(x) = \\prod_{i=0}^{n} (x - roots[i])
where ``n == len(roots)`` and :math:`T_i(x)` is the `i`-th Chebyshev
(basis) polynomial over the domain `[-1,1]`. Note that, unlike
`polyfromroots`, due to the nature of the C-series basis set, the
above identity *does not* imply :math:`c_n = 1` identically (see
Examples).
Examples
--------
>>> import numpy.polynomial.chebyshev as C
>>> C.chebfromroots((-1,0,1)) # x^3 - x relative to the standard basis
array([ 0. , -0.25, 0. , 0.25])
>>> j = complex(0,1)
>>> C.chebfromroots((-j,j)) # x^2 + 1 relative to the standard basis
array([ 1.5+0.j, 0.0+0.j, 0.5+0.j])
"""
if len(roots) == 0 :
return np.ones(1)
else :
[roots] = pu.as_series([roots], trim=False)
prd = np.array([1], dtype=roots.dtype)
for r in roots :
fac = np.array([.5, -r, .5], dtype=roots.dtype)
prd = _zseries_mul(fac, prd)
return _zseries_to_cseries(prd)
def chebadd(c1, c2):
"""
Add one Chebyshev series to another.
Returns the sum of two Chebyshev series `c1` + `c2`. The arguments
are sequences of coefficients ordered from lowest order term to
highest, i.e., [1,2,3] represents the series ``T_0 + 2*T_1 + 3*T_2``.
Parameters
----------
c1, c2 : array_like
1-d arrays of Chebyshev series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Array representing the Chebyshev series of their sum.
See Also
--------
chebsub, chebmul, chebdiv, chebpow
Notes
-----
Unlike multiplication, division, etc., the sum of two Chebyshev series
is a Chebyshev series (without having to "reproject" the result onto
the basis set) so addition, just like that of "standard" polynomials,
is simply "component-wise."
Examples
--------
>>> from numpy.polynomial import chebyshev as C
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> C.chebadd(c1,c2)
array([ 4., 4., 4.])
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2) :
c1[:c2.size] += c2
ret = c1
else :
c2[:c1.size] += c1
ret = c2
return pu.trimseq(ret)
def chebsub(c1, c2):
"""
Subtract one Chebyshev series from another.
Returns the difference of two Chebyshev series `c1` - `c2`. The
sequences of coefficients are from lowest order term to highest, i.e.,
[1,2,3] represents the series ``T_0 + 2*T_1 + 3*T_2``.
Parameters
----------
c1, c2 : array_like
1-d arrays of Chebyshev series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Of Chebyshev series coefficients representing their difference.
See Also
--------
chebadd, chebmul, chebdiv, chebpow
Notes
-----
Unlike multiplication, division, etc., the difference of two Chebyshev
series is a Chebyshev series (without having to "reproject" the result
onto the basis set) so subtraction, just like that of "standard"
polynomials, is simply "component-wise."
Examples
--------
>>> from numpy.polynomial import chebyshev as C
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> C.chebsub(c1,c2)
array([-2., 0., 2.])
>>> C.chebsub(c2,c1) # -C.chebsub(c1,c2)
array([ 2., 0., -2.])
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2) :
c1[:c2.size] -= c2
ret = c1
else :
c2 = -c2
c2[:c1.size] += c1
ret = c2
return pu.trimseq(ret)
def chebmul(c1, c2):
"""
Multiply one Chebyshev series by another.
Returns the product of two Chebyshev series `c1` * `c2`. The arguments
are sequences of coefficients, from lowest order "term" to highest,
e.g., [1,2,3] represents the series ``T_0 + 2*T_1 + 3*T_2``.
Parameters
----------
c1, c2 : array_like
1-d arrays of Chebyshev series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Of Chebyshev series coefficients representing their product.
See Also
--------
chebadd, chebsub, chebdiv, chebpow
Notes
-----
In general, the (polynomial) product of two C-series results in terms
that are not in the Chebyshev polynomial basis set. Thus, to express
the product as a C-series, it is typically necessary to "re-project"
the product onto said basis set, which typically produces
"un-intuitive" (but correct) results; see Examples section below.
Examples
--------
>>> from numpy.polynomial import chebyshev as C
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> C.chebmul(c1,c2) # multiplication requires "reprojection"
array([ 6.5, 12. , 12. , 4. , 1.5])
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
z1 = _cseries_to_zseries(c1)
z2 = _cseries_to_zseries(c2)
prd = _zseries_mul(z1, z2)
ret = _zseries_to_cseries(prd)
return pu.trimseq(ret)
def chebdiv(c1, c2):
"""
Divide one Chebyshev series by another.
Returns the quotient-with-remainder of two Chebyshev series
`c1` / `c2`. The arguments are sequences of coefficients from lowest
order "term" to highest, e.g., [1,2,3] represents the series
``T_0 + 2*T_1 + 3*T_2``.
Parameters
----------
c1, c2 : array_like
1-d arrays of Chebyshev series coefficients ordered from low to
high.
Returns
-------
[quo, rem] : ndarrays
Of Chebyshev series coefficients representing the quotient and
remainder.
See Also
--------
chebadd, chebsub, chebmul, chebpow
Notes
-----
In general, the (polynomial) division of one C-series by another
results in quotient and remainder terms that are not in the Chebyshev
polynomial basis set. Thus, to express these results as C-series, it
is typically necessary to "re-project" the results onto said basis
set, which typically produces "un-intuitive" (but correct) results;
see Examples section below.
Examples
--------
>>> from numpy.polynomial import chebyshev as C
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> C.chebdiv(c1,c2) # quotient "intuitive," remainder not
(array([ 3.]), array([-8., -4.]))
>>> c2 = (0,1,2,3)
>>> C.chebdiv(c2,c1) # neither "intuitive"
(array([ 0., 2.]), array([-2., -4.]))
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if c2[-1] == 0 :
raise ZeroDivisionError()
lc1 = len(c1)
lc2 = len(c2)
if lc1 < lc2 :
return c1[:1]*0, c1
elif lc2 == 1 :
return c1/c2[-1], c1[:1]*0
else :
z1 = _cseries_to_zseries(c1)
z2 = _cseries_to_zseries(c2)
quo, rem = _zseries_div(z1, z2)
quo = pu.trimseq(_zseries_to_cseries(quo))
rem = pu.trimseq(_zseries_to_cseries(rem))
return quo, rem
def chebpow(cs, pow, maxpower=16) :
"""Raise a Chebyshev series to a power.
Returns the Chebyshev series `cs` raised to the power `pow`. The
arguement `cs` is a sequence of coefficients ordered from low to high.
i.e., [1,2,3] is the series ``T_0 + 2*T_1 + 3*T_2.``
Parameters
----------
cs : array_like
1d array of chebyshev series coefficients ordered from low to
high.
pow : integer
Power to which the series will be raised
maxpower : integer, optional
Maximum power allowed. This is mainly to limit growth of the series
to umanageable size. Default is 16
Returns
-------
coef : ndarray
Chebyshev series of power.
See Also
--------
chebadd, chebsub, chebmul, chebdiv
Examples
--------
"""
# cs is a trimmed copy
[cs] = pu.as_series([cs])
power = int(pow)
if power != pow or power < 0 :
raise ValueError("Power must be a non-negative integer.")
elif maxpower is not None and power > maxpower :
raise ValueError("Power is too large")
elif power == 0 :
return np.array([1], dtype=cs.dtype)
elif power == 1 :
return cs
else :
# This can be made more efficient by using powers of two
# in the usual way.
zs = _cseries_to_zseries(cs)
prd = zs
for i in range(2, power + 1) :
prd = np.convolve(prd, zs)
return _zseries_to_cseries(prd)
def chebder(cs, m=1, scl=1) :
"""
Differentiate a Chebyshev series.
Returns the series `cs` differentiated `m` times. At each iteration the
result is multiplied by `scl` (the scaling factor is for use in a linear
change of variable). The argument `cs` is the sequence of coefficients
from lowest order "term" to highest, e.g., [1,2,3] represents the series
``T_0 + 2*T_1 + 3*T_2``.
Parameters
----------
cs: array_like
1-d array of Chebyshev series coefficients ordered from low to high.
m : int, optional
Number of derivatives taken, must be non-negative. (Default: 1)
scl : scalar, optional
Each differentiation is multiplied by `scl`. The end result is
multiplication by ``scl**m``. This is for use in a linear change of
variable. (Default: 1)
Returns
-------
der : ndarray
Chebyshev series of the derivative.
See Also
--------
chebint
Notes
-----
In general, the result of differentiating a C-series needs to be
"re-projected" onto the C-series basis set. Thus, typically, the
result of this function is "un-intuitive," albeit correct; see Examples
section below.
Examples
--------
>>> from numpy.polynomial import chebyshev as C
>>> cs = (1,2,3,4)
>>> C.chebder(cs)
array([ 14., 12., 24.])
>>> C.chebder(cs,3)
array([ 96.])
>>> C.chebder(cs,scl=-1)
array([-14., -12., -24.])
>>> C.chebder(cs,2,-1)
array([ 12., 96.])
"""
cnt = int(m)
if cnt != m:
raise ValueError, "The order of derivation must be integer"
if cnt < 0 :
raise ValueError, "The order of derivation must be non-negative"
if not np.isscalar(scl) :
raise ValueError, "The scl parameter must be a scalar"
# cs is a trimmed copy
[cs] = pu.as_series([cs])
if cnt == 0:
return cs
elif cnt >= len(cs):
return cs[:1]*0
else :
zs = _cseries_to_zseries(cs)
for i in range(cnt):
zs = _zseries_der(zs)*scl
return _zseries_to_cseries(zs)
def chebint(cs, m=1, k=[], lbnd=0, scl=1):
"""
Integrate a Chebyshev series.
Returns, as a C-series, the input C-series `cs`, integrated `m` times
from `lbnd` to `x`. At each iteration the resulting series is
**multiplied** by `scl` and an integration constant, `k`, is added.
The scaling factor is for use in a linear change of variable. ("Buyer
beware": note that, depending on what one is doing, one may want `scl`
to be the reciprocal of what one might expect; for more information,
see the Notes section below.) The argument `cs` is a sequence of
coefficients, from lowest order C-series "term" to highest, e.g.,
[1,2,3] represents the series :math:`T_0(x) + 2T_1(x) + 3T_2(x)`.
Parameters
----------
cs : array_like
1-d array of C-series coefficients, ordered from low to high.
m : int, optional
Order of integration, must be positive. (Default: 1)
k : {[], list, scalar}, optional
Integration constant(s). The value of the first integral at zero
is the first value in the list, the value of the second integral
at zero is the second value, etc. If ``k == []`` (the default),
all constants are set to zero. If ``m == 1``, a single scalar can
be given instead of a list.
lbnd : scalar, optional
The lower bound of the integral. (Default: 0)
scl : scalar, optional
Following each integration the result is *multiplied* by `scl`
before the integration constant is added. (Default: 1)
Returns
-------
S : ndarray
C-series coefficients of the integral.
Raises
------
ValueError
If ``m < 1``, ``len(k) > m``, ``np.isscalar(lbnd) == False``, or
``np.isscalar(scl) == False``.
See Also
--------
chebder
Notes
-----
Note that the result of each integration is *multiplied* by `scl`.
Why is this important to note? Say one is making a linear change of
variable :math:`u = ax + b` in an integral relative to `x`. Then
:math:`dx = du/a`, so one will need to set `scl` equal to :math:`1/a`
- perhaps not what one would have first thought.
Also note that, in general, the result of integrating a C-series needs
to be "re-projected" onto the C-series basis set. Thus, typically,
the result of this function is "un-intuitive," albeit correct; see
Examples section below.
Examples
--------
>>> from numpy.polynomial import chebyshev as C
>>> cs = (1,2,3)
>>> C.chebint(cs)
array([ 0.5, -0.5, 0.5, 0.5])
>>> C.chebint(cs,3)
array([ 0.03125 , -0.1875 , 0.04166667, -0.05208333, 0.01041667,
0.00625 ])
>>> C.chebint(cs, k=3)
array([ 3.5, -0.5, 0.5, 0.5])
>>> C.chebint(cs,lbnd=-2)
array([ 8.5, -0.5, 0.5, 0.5])
>>> C.chebint(cs,scl=-2)
array([-1., 1., -1., -1.])
"""
cnt = int(m)
if np.isscalar(k) :
k = [k]
if cnt != m:
raise ValueError, "The order of integration must be integer"
if cnt < 0 :
raise ValueError, "The order of integration must be non-negative"
if len(k) > cnt :
raise ValueError, "Too many integration constants"
if not np.isscalar(lbnd) :
raise ValueError, "The lbnd parameter must be a scalar"
if not np.isscalar(scl) :
raise ValueError, "The scl parameter must be a scalar"
# cs is a trimmed copy
[cs] = pu.as_series([cs])
if cnt == 0:
return cs
else:
k = list(k) + [0]*(cnt - len(k))
for i in range(cnt) :
zs = _cseries_to_zseries(cs)*scl
zs = _zseries_int(zs)
cs = _zseries_to_cseries(zs)
cs[0] += k[i] - chebval(lbnd, cs)
return cs
def chebval(x, cs):
"""Evaluate a Chebyshev series.
If `cs` is of length `n`, this function returns :
``p(x) = cs[0]*T_0(x) + cs[1]*T_1(x) + ... + cs[n-1]*T_{n-1}(x)``
If x is a sequence or array then p(x) will have the same shape as x.
If r is a ring_like object that supports multiplication and addition
by the values in `cs`, then an object of the same type is returned.
Parameters
----------
x : array_like, ring_like
Array of numbers or objects that support multiplication and
addition with themselves and with the elements of `cs`.
cs : array_like
1-d array of Chebyshev coefficients ordered from low to high.
Returns
-------
values : ndarray, ring_like
If the return is an ndarray then it has the same shape as `x`.
See Also
--------
chebfit
Examples
--------
Notes
-----
The evaluation uses Clenshaw recursion, aka synthetic division.
Examples
--------
"""
# cs is a trimmed copy
[cs] = pu.as_series([cs])
if isinstance(x, tuple) or isinstance(x, list) :
x = np.asarray(x)
if len(cs) == 1 :
c0 = cs[0]
c1 = 0
elif len(cs) == 2 :
c0 = cs[0]
c1 = cs[1]
else :
x2 = 2*x
c0 = cs[-2]
c1 = cs[-1]
for i in range(3, len(cs) + 1) :
tmp = c0
c0 = cs[-i] - c1
c1 = tmp + c1*x2
return c0 + c1*x
def chebvander(x, deg) :
"""Vandermonde matrix of given degree.
Returns the Vandermonde matrix of degree `deg` and sample points `x`.
This isn't a true Vandermonde matrix because `x` can be an arbitrary
ndarray and the Chebyshev polynomials aren't powers. If ``V`` is the
returned matrix and `x` is a 2d array, then the elements of ``V`` are
``V[i,j,k] = T_k(x[i,j])``, where ``T_k`` is the Chebyshev polynomial
of degree ``k``.
Parameters
----------
x : array_like
Array of points. The values are converted to double or complex
doubles.
deg : integer
Degree of the resulting matrix.
Returns
-------
vander : Vandermonde matrix.
The shape of the returned matrix is ``x.shape + (deg+1,)``. The last
index is the degree.
"""
x = np.asarray(x) + 0.0
order = int(deg) + 1
v = np.ones((order,) + x.shape, dtype=x.dtype)
if order > 1 :
x2 = 2*x
v[1] = x
for i in range(2, order) :
v[i] = v[i-1]*x2 - v[i-2]
return np.rollaxis(v, 0, v.ndim)
def chebfit(x, y, deg, rcond=None, full=False, w=None):
"""
Least squares fit of Chebyshev series to data.
Fit a Chebyshev series ``p(x) = p[0] * T_{0}(x) + ... + p[deg] *
T_{deg}(x)`` of degree `deg` to points `(x, y)`. Returns a vector of
coefficients `p` that minimises the squared error.
Parameters
----------
x : array_like, shape (M,)
x-coordinates of the M sample points ``(x[i], y[i])``.
y : array_like, shape (M,) or (M, K)
y-coordinates of the sample points. Several data sets of sample
points sharing the same x-coordinates can be fitted at once by
passing in a 2D-array that contains one dataset per column.
deg : int
Degree of the fitting polynomial
rcond : float, optional
Relative condition number of the fit. Singular values smaller than
this relative to the largest singular value will be ignored. The
default value is len(x)*eps, where eps is the relative precision of
the float type, about 2e-16 in most cases.
full : bool, optional
Switch determining nature of return value. When it is False (the
default) just the coefficients are returned, when True diagnostic
information from the singular value decomposition is also returned.
w : array_like, shape (`M`,), optional
Weights. If not None, the contribution of each point
``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the
weights are chosen so that the errors of the products ``w[i]*y[i]``
all have the same variance. The default value is None.
.. versionadded:: 1.5.0
Returns
-------
coef : ndarray, shape (M,) or (M, K)
Chebyshev coefficients ordered from low to high. If `y` was 2-D,
the coefficients for the data in column k of `y` are in column
`k`.
[residuals, rank, singular_values, rcond] : present when `full` = True
Residuals of the least-squares fit, the effective rank of the
scaled Vandermonde matrix and its singular values, and the
specified value of `rcond`. For more details, see `linalg.lstsq`.
Warns
-----
RankWarning
The rank of the coefficient matrix in the least-squares fit is
deficient. The warning is only raised if `full` = False. The
warnings can be turned off by
>>> import warnings
>>> warnings.simplefilter('ignore', RankWarning)
See Also
--------
chebval : Evaluates a Chebyshev series.
chebvander : Vandermonde matrix of Chebyshev series.
polyfit : least squares fit using polynomials.
linalg.lstsq : Computes a least-squares fit from the matrix.
scipy.interpolate.UnivariateSpline : Computes spline fits.
Notes
-----
The solution are the coefficients ``c[i]`` of the Chebyshev series
``T(x)`` that minimizes the squared error
``E = \\sum_j |y_j - T(x_j)|^2``.
This problem is solved by setting up as the overdetermined matrix
equation
``V(x)*c = y``,
where ``V`` is the Vandermonde matrix of `x`, the elements of ``c`` are
the coefficients to be solved for, and the elements of `y` are the
observed values. This equation is then solved using the singular value
decomposition of ``V``.
If some of the singular values of ``V`` are so small that they are
neglected, then a `RankWarning` will be issued. This means that the
coeficient values may be poorly determined. Using a lower order fit
will usually get rid of the warning. The `rcond` parameter can also be
set to a value smaller than its default, but the resulting fit may be
spurious and have large contributions from roundoff error.
Fits using Chebyshev series are usually better conditioned than fits
using power series, but much can depend on the distribution of the
sample points and the smoothness of the data. If the quality of the fit
is inadequate splines may be a good alternative.
References
----------
.. [1] Wikipedia, "Curve fitting",
http://en.wikipedia.org/wiki/Curve_fitting
Examples
--------
"""
order = int(deg) + 1
x = np.asarray(x) + 0.0
y = np.asarray(y) + 0.0
# check arguments.
if deg < 0 :
raise ValueError, "expected deg >= 0"
if x.ndim != 1:
raise TypeError, "expected 1D vector for x"
if x.size == 0:
raise TypeError, "expected non-empty vector for x"
if y.ndim < 1 or y.ndim > 2 :
raise TypeError, "expected 1D or 2D array for y"
if len(x) != len(y):
raise TypeError, "expected x and y to have same length"
# set up the least squares matrices
lhs = chebvander(x, deg)
rhs = y
if w is not None:
w = np.asarray(w) + 0.0
if w.ndim != 1:
raise TypeError, "expected 1D vector for w"
if len(x) != len(w):
raise TypeError, "expected x and w to have same length"
# apply weights
if rhs.ndim == 2:
lhs *= w[:, np.newaxis]
rhs *= w[:, np.newaxis]
else:
lhs *= w[:, np.newaxis]
rhs *= w
# set rcond
if rcond is None :
rcond = len(x)*np.finfo(x.dtype).eps
# scale the design matrix and solve the least squares equation
scl = np.sqrt((lhs*lhs).sum(0))
c, resids, rank, s = la.lstsq(lhs/scl, rhs, rcond)
c = (c.T/scl).T
# warn on rank reduction
if rank != order and not full:
msg = "The fit may be poorly conditioned"
warnings.warn(msg, pu.RankWarning)
if full :
return c, [resids, rank, s, rcond]
else :
return c
def chebroots(cs):
"""
Compute the roots of a Chebyshev series.
Return the roots (a.k.a "zeros") of the C-series represented by `cs`,
which is the sequence of the C-series' coefficients from lowest order
"term" to highest, e.g., [1,2,3] represents the C-series
``T_0 + 2*T_1 + 3*T_2``.
Parameters
----------
cs : array_like
1-d array of C-series coefficients ordered from low to high.
Returns
-------
out : ndarray
Array of the roots. If all the roots are real, then so is the
dtype of ``out``; otherwise, ``out``'s dtype is complex.
See Also
--------
polyroots
Notes
-----
Algorithm(s) used:
Remember: because the C-series basis set is different from the
"standard" basis set, the results of this function *may* not be what
one is expecting.
Examples
--------
>>> import numpy.polynomial as P
>>> import numpy.polynomial.chebyshev as C
>>> P.polyroots((-1,1,-1,1)) # x^3 - x^2 + x - 1 has two complex roots
array([ -4.99600361e-16-1.j, -4.99600361e-16+1.j, 1.00000e+00+0.j])
>>> C.chebroots((-1,1,-1,1)) # T3 - T2 + T1 - T0 has only real roots
array([ -5.00000000e-01, 2.60860684e-17, 1.00000000e+00])
"""
# cs is a trimmed copy
[cs] = pu.as_series([cs])
if len(cs) <= 1 :
return np.array([], dtype=cs.dtype)
if len(cs) == 2 :
return np.array([-cs[0]/cs[1]])
n = len(cs) - 1
cmat = np.zeros((n,n), dtype=cs.dtype)
cmat.flat[1::n+1] = .5
cmat.flat[n::n+1] = .5
cmat[1, 0] = 1
cmat[:,-1] -= cs[:-1]*(.5/cs[-1])
roots = la.eigvals(cmat)
roots.sort()
return roots
#
# Chebyshev series class
#
exec polytemplate.substitute(name='Chebyshev', nick='cheb', domain='[-1,1]')
|
bsd-3-clause
| 4,376,316,724,196,033,500
| 28.466667
| 79
| 0.597154
| false
| 3.458151
| false
| false
| false
|
francielsilvestrini/soupport
|
controllers/attachments.py
|
1
|
1465
|
# -*- coding: utf-8 -*-
def attachments():
owner_table = getlist(request.args, 0)
owner_key = getlist(request.args, 1)
if not (owner_table and owner_key):
response.view = 'others/gadget_error.html'
return dict(msg='attachments dont work!')
delete_id = request.vars.get('delete', 0)
if delete_id:
db(db.attachments.id == delete_id).delete()
db.attachments.owner_table.default = owner_table
db.attachments.owner_key.default = owner_key
query = ((db.attachments.owner_table == owner_table) & (db.attachments.owner_key == owner_key))
form = SQLFORM(db.attachments, upload=UPLOAD_URLS['attachments'])
if request.vars.attachment != None:
form.vars.name = request.vars.attachment.filename
form.post_vars = form.vars.name
form.process()
content = db(query).select()
return dict(form=form, content=content)
def attachment_download():
if not request.args(0) or not request.args[0].isdigit():
raise HTTP(404)
id = int(request.args[0])
import cStringIO
import contenttype as c
s=cStringIO.StringIO()
(filename,file) = db.attachments.attachment.retrieve(db.attachments[id].attachment)
s.write(file.read())
response.headers['Content-Type'] = c.contenttype(filename)
response.headers['Content-Disposition'] = "attachment; filename=%s" % filename
return s.getvalue()
|
lgpl-3.0
| 645,892,210,916,495,400
| 33.731707
| 99
| 0.63959
| false
| 3.737245
| false
| false
| false
|
FDelporte/PiGameConsole
|
SlideShow.py
|
1
|
2521
|
import Tkinter as tk
from itertools import cycle
from Tkinter import *
from PIL import Image, ImageTk # pip install pillow + sudo apt-get install python-imaging-tk
# based on example found on
# https://raspberrypi.stackexchange.com/questions/18261/how-do-i-display-an-image-file-png-in-a-simple-window
class SlideShow(tk.Frame):
canvas = None
current_image = 0
stopShowing = False
SLIDE_DURATION = 7500
NUMBER_OF_SLIDES = 1
def __init__(self, parent, w, h):
tk.Frame.__init__(self, parent)
# Set up the GUI window via Tk
self.canvas = Canvas(self, background="black", width=w, height=h)
self.canvas.pack(side="bottom", fill="x", padx=4)
# pick an image file you have .bmp .jpg .gif. .png
# load the file and covert it to a Tkinter image object
self.image1 = ImageTk.PhotoImage(Image.open('pictures/speelpong.jpg'))
if self.NUMBER_OF_SLIDES >= 2:
self.image2 = ImageTk.PhotoImage(Image.open('pictures/ouderraad2.jpg'))
if self.NUMBER_OF_SLIDES >= 3:
self.image3 = ImageTk.PhotoImage(Image.open('pictures/ouderraad3.jpg'))
# make the root window the size of the image
#self.canvas.geometry("%dx%d+%d+%d" % (w, h, 0, 0))
# root has no image argument, so use a label as a panel
self.panel1 = tk.Label(self.canvas, image=self.image1)
self.display = self.image1
self.panel1.pack(side=tk.TOP, fill=tk.BOTH, expand=tk.YES)
print "Display image1"
if self.NUMBER_OF_SLIDES > 1:
self.after(self.SLIDE_DURATION, self.update_image)
#self.root.mainloop()
def stop(self):
self.stopShowing = True
def update_image(self):
if self.display == self.image1 and self.NUMBER_OF_SLIDES >= 2:
self.panel1.configure(image=self.image2)
print "Display image2"
self.display = self.image2
elif self.display == self.image2 and self.NUMBER_OF_SLIDES >= 3:
self.panel1.configure(image=self.image3)
print "Display image3"
self.display = self.image3
else:
self.panel1.configure(image=self.image1)
print "Display image1"
self.display = self.image1
if self.stopShowing == False:
self.after(self.SLIDE_DURATION, self.update_image) # Set to call again in 30 seconds
|
apache-2.0
| 5,571,100,893,828,448,000
| 35.028571
| 109
| 0.603332
| false
| 3.535764
| false
| false
| false
|
bas-stringer/scry
|
log.py
|
1
|
2549
|
from __init__ import LOG_DIRECTORY
from utility import assert_dir
from os.path import join
from datetime import datetime
from shutil import copyfile
REQUEST_DIR = join(LOG_DIRECTORY,'requests')
RESPONSE_DIR = join(LOG_DIRECTORY,'responses')
assert_dir(REQUEST_DIR)
assert_dir(RESPONSE_DIR)
def log_request(request):
now = datetime.now()
date = now.date().isoformat()
time = now.time().isoformat()
last_path = join(LOG_DIRECTORY,'last_request.log')
spacer = '\n\n----------\n\n'
vals = request.values
print 'Logging HTTP request ('+time+')'
with open(last_path,'w') as f:
f.write('Method :\t'+request.method+'\n')
f.write('Time :\t'+time+'\n')
f.write('Base URL :\t'+request.base_url+'\n')
f.write('Full Path:\t'+request.full_path+spacer)
f.write('Values (Len '+str(len(vals))+'):'+'\t'+str(vals) + '\n')
for k in vals:
f.write('\n'+k+':\t'+vals[k])
f.write(spacer)
f.write('Content Length :\t'+str(request.content_length)+'\n')
f.write('Content Type :\t'+str(request.content_type)+'\n')
f.write('Parsed Content Type:\t'+str(request._parsed_content_type)+spacer)
f.write('Accepted Response Types:\t'+str(request.accept_mimetypes)+spacer)
f.write(str(dir(request)) + spacer)
for prop in dir(request):
if prop.find('__') != -1: continue
elif prop == 'access_route': continue # Not sure why, but not skipping this causes issues
f.write('=== ' + prop + ' ===\n\n')
val = getattr(request,prop)
fnc = hasattr(val,'__call__')
if fnc:
f.write(str(type(val)) + spacer)
else:
f.write(str(val) + spacer)
# Copy the new last_request.log file to the appropriate location
dir_path = join(REQUEST_DIR,date)
file_path = join(dir_path,'%s.log' % (time))
assert_dir(dir_path)
copyfile(last_path,file_path)
return date, time
def log_response(response, date, time):
print 'Logging HTTP response ('+time+')'
last_path = join(LOG_DIRECTORY,'last_response.log')
with open(last_path,'w') as f:
f.write(response)
# Copy the new last_response.log file to the appropriate location
dir_path = join(RESPONSE_DIR,date)
file_path = join(dir_path,'%s.log' % (time))
assert_dir(dir_path)
copyfile(last_path,file_path)
|
mit
| 8,292,879,116,061,946,000
| 33.931507
| 101
| 0.572381
| false
| 3.421477
| false
| false
| false
|
Krakn/learning
|
src/python/mit_opencourseware/6001x/wk02pset02/problem03.py
|
1
|
1204
|
def exactPayment(balance, annualInterestRate):
"""
The following variables contain values as described below:
balance - the outstanding balance on the credit card
annualInterestRate - annual interest rate as a decimal
valueLow - Balance without interest
valueHigh - Balance with full interest
reallyGoodGuess - Average of the previous two variables
"""
monthlyInterestRate = annualInterestRate / 12.0
boundLow = balance / 12.0
boundHigh = (balance * (1 + annualInterestRate)) / 12.0
reallyGoodGuess = round((boundLow + boundHigh) / 2, 2)
remainingBalance = balance
while round(boundLow, 1) != round(boundHigh, 1):
remainingBalance = balance
for _ in range(1, 13):
remainingBalance -= reallyGoodGuess
remainingBalance += remainingBalance * monthlyInterestRate
if round(remainingBalance, 1) > 0:
boundLow = reallyGoodGuess
reallyGoodGuess = round((boundLow + boundHigh) / 2, 2)
if round(remainingBalance, 1) < 0:
boundHigh = reallyGoodGuess
reallyGoodGuess = round((boundLow + boundHigh) / 2, 2)
print(reallyGoodGuess)
|
isc
| 5,623,401,946,856,234,000
| 40.517241
| 70
| 0.658638
| false
| 4.284698
| false
| false
| false
|
massivezh/qmc
|
gui.py
|
1
|
3955
|
#!/usr/bin/env python
# Simple GUI for qmc.py
# FIXME Experimental - doesn't do any check on what is passed as input ;)
#
# Copyright (C) 2011 Marcello Pogliani
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import qmc
import sys
import signal
from PyQt4.Qt import *
signal.signal(signal.SIGINT, signal.SIG_DFL)
# TODO refactor the library to allow GUI to output some intermediate steps!
if __name__ == "__main__":
app = QApplication(sys.argv)
widget = QWidget()
widget.resize(450, 350)
widget.setWindowTitle('Quine McCluskey Algorithm')
layout = QGridLayout(widget)
widget.setLayout(layout)
# widgets
go = QPushButton('GO!', widget)
reset = QPushButton('Reset', widget)
add_function = QPushButton('Add function', widget)
costfun_selector = QButtonGroup(widget)
costfun_selector_literals = QRadioButton('# of literals', widget)
costfun_selector_implicants = QRadioButton('# of implicants', widget)
costfun_selector.addButton(costfun_selector_literals)
costfun_selector.addButton(costfun_selector_implicants)
costfun_selector_literals.setChecked(True) # default cost function
cost = QLCDNumber(widget)
result = QTextEdit(widget)
insert_pane = QTableWidget(1, 2, widget);
insert_pane.setHorizontalHeaderLabels(['ONset', 'DCset'])
# bind widgets to layout
layout.addWidget (insert_pane, 1, 1, 1, 4)
layout.addWidget(add_function, 2, 1, 1, 1)
layout.addWidget(go, 2, 2, 1, 2)
layout.addWidget(reset, 2, 4, 1, 1)
layout.addWidget(QLabel('Cost function:', widget), 3, 1, 1, 2)
layout.addWidget(costfun_selector_implicants, 4, 1, 1, 2)
layout.addWidget(costfun_selector_literals, 5, 1, 1, 2)
layout.addWidget(QLabel('Computed cost:', widget), 6, 1, 2, 1)
layout.addWidget(cost, 6, 2, 2, 1)
layout.addWidget(result, 3, 3, 5, 2)
def addFunction():
insert_pane.setRowCount(insert_pane.rowCount()+1)
def toList(obj):
if obj == None:
l = []
else:
s = obj.text().toAscii()
l = s.split(',')
l = [i.toInt()[0] for i in l]
return l
def startMinimization():
lof = []
for row in range(insert_pane.rowCount()):
curf_onset = toList(insert_pane.item(row, 0))
curf_dcset = toList(insert_pane.item(row, 1))
if curf_onset != []:
lof.append(qmc.QmcFunction(curf_onset, curf_dcset))
if costfun_selector_literals.isChecked():
costf = qmc.LITERALS_COST_FUNCTION
elif costfun_selector_implicants.isChecked():
costf = qmc.IMPLICANTS_COST_FUNCTION
if lof != []:
qmc.VERBOSE = False # no debug printfs when running from the GUI!
q = qmc.QuineMcCluskey(lof, costf)
q.findPrimeImplicants()
q.simplify()
result.setText(str(q.sol))
cost.display(q.sol.getCost())
else:
result.setText("Input is empty!")
def clearAll():
insert_pane.setRowCount(1)
insert_pane.clearContents()
result.clear()
cost.display(0)
pass
widget.connect(add_function, SIGNAL('clicked()'), addFunction)
widget.connect(go, SIGNAL('clicked()'), startMinimization)
widget.connect(reset, SIGNAL('clicked()'), clearAll)
widget.show()
sys.exit(app.exec_())
|
apache-2.0
| -570,660,592,206,888,800
| 33.692982
| 77
| 0.636157
| false
| 3.585675
| false
| false
| false
|
jldbc/pybaseball
|
pybaseball/datahelpers/postprocessing.py
|
1
|
7149
|
import re
from datetime import datetime
from typing import Any, List, Union, Optional
import attr
import numpy as np
import pandas as pd
null_regexes = [
re.compile(r'^\s*$'),
re.compile(r'^null$', re.RegexFlag.IGNORECASE)
]
date_formats = [
# Standard statcast format
(re.compile(r'^\d{4}-\d{1,2}-\d{1,2}$'), '%Y-%m-%d'),
# Just in case (https://github.com/jldbc/pybaseball/issues/104)
(re.compile(r'^\d{4}-\d{1,2}-\d{1,2}T\d{2}:\d{2}:\d{2}.\d{1,6}Z$'), '%Y-%m-%dT%H:%M:%S.%fZ'),
]
def try_parse_dataframe(
data: pd.DataFrame,
parse_numerics: bool = True,
null_replacement: Union[str, int, float, datetime] = np.nan,
known_percentages: Optional[List[str]] = None
) -> pd.DataFrame:
data_copy = data.copy()
if parse_numerics:
data_copy = coalesce_nulls(data_copy, null_replacement)
data_copy = data_copy.apply(
pd.to_numeric,
errors='ignore',
downcast='signed'
).convert_dtypes(convert_string=False)
string_columns = [
dtype_tuple[0] for dtype_tuple in data_copy.dtypes.items() if str(dtype_tuple[1]) in ["object", "string"]
]
for column in string_columns:
# Only check the first value of the column and test that;
# this is faster than blindly trying to convert entire columns
first_value_index = data_copy[column].first_valid_index()
if first_value_index is None:
# All nulls
continue
first_value = data_copy[column].loc[first_value_index]
if str(first_value).endswith('%') or column.endswith('%') or \
(known_percentages is not None and column in known_percentages):
data_copy[column] = data_copy[column].astype(str).str.replace("%", "").astype(float) / 100.0
else:
# Doing it this way as just applying pd.to_datetime on
# the whole dataframe just tries to gobble up ints/floats as timestamps
for date_regex, date_format in date_formats:
if isinstance(first_value, str) and date_regex.match(first_value):
data_copy[column] = data_copy[column].apply(pd.to_datetime, errors='ignore', format=date_format)
data_copy[column] = data_copy[column].convert_dtypes(convert_string=False)
break
return data_copy
# pylint: disable=too-many-return-statements
def try_parse(
value: Union[None, str, int, datetime, float],
column_name: str,
null_replacement: Union[str, int, float, datetime] = np.nan,
known_percentages: Optional[List[str]] = None
) -> Union[str, int, float, datetime]:
if value is None:
return null_replacement
if not isinstance(value, str):
return value
for regex in null_regexes:
if regex.match(value):
return null_replacement
# Is it a date?
for date_regex, date_format in date_formats:
if date_regex.match(value):
try:
return datetime.strptime(value, date_format)
except: # pylint: disable=bare-except
pass
# Is it an float or an int (including percetages)?
try:
percentage = (
value.endswith('%') or column_name.endswith('%') or \
(known_percentages is not None and column_name in known_percentages)
)
if percentage:
return try_parse_percentage(value)
if '.' in value:
return float(value)
return int(value)
except: # pylint: disable=bare-except
pass
return value
def try_parse_percentage(value: str) -> float:
return float(value.strip(' %')) / 100.0
def coalesce_nulls(data: pd.DataFrame, value: Union[str, int, float, datetime] = np.nan) -> pd.DataFrame:
# Fill missing values with NaN
for regex in null_regexes:
data.replace(regex.pattern, value, regex=True, inplace=True)
return data
def columns_except(data: pd.DataFrame, columns: List[str]) -> List[str]:
return list(np.setdiff1d(data.columns, columns))
def convert_numeric(data: pd.DataFrame, numeric_columns: List[str]) -> pd.DataFrame:
# data.loc[data[numeric_cols] == ''] = None
# data[numeric_cols] = data[numeric_cols].astype(float)
# Ideally we'd do it the pandas way ^, but it's barfing when some columns have no data
for col in numeric_columns:
data[col] = data[col].astype(float)
return data
def convert_percentages(data: pd.DataFrame, columns: List[str]) -> pd.DataFrame:
# convert percent strings to float values
for col in columns:
# Skip if column is all NA (happens for some of the more obscure stats + in older seasons)
if col in data.columns and data[col].count() > 0:
data[col] = data[col].str.strip(' %')
data[col] = data[col].astype(float) / 100.0
else:
# print(col)
pass
return data
def compute_pa(bat_df: pd.DataFrame) -> pd.Series:
"""
Computes PA, using AB, HBP, SH, and SF. If any of those columns are null,
they're filled with 0
:param bat_df:
:return:
"""
plate_appearances = bat_df.loc[:, "AB"].fillna(0)
for stat in ["BB", "HBP", "SH", "SF"]:
plate_appearances += bat_df.loc[:, stat].fillna(0)
return plate_appearances.astype(int)
def augment_lahman_batting(bat_df: pd.DataFrame) -> pd.DataFrame:
"""
augments the Lahman batting data frame, with PA, X1B (singles), and TB.
:param bat_df:
:return:
"""
plate_appearances = compute_pa(bat_df)
singles = (
bat_df.loc[:, "H"]
- bat_df.loc[:, "2B"]
- bat_df.loc[:, "3B"]
- bat_df.loc[:, "HR"]
)
total_bases = (
bat_df.loc[:, "HR"] * 4
+ bat_df.loc[:, "3B"] * 3
+ bat_df.loc[:, "2B"] * 2
+ singles
)
return bat_df.assign(
PA=plate_appearances.astype(int),
X1B=singles.astype(int),
TB=total_bases.astype(int)
).rename({"X1B": "1B"}, axis=1)
def augment_lahman_pitching(stats_df: pd.DataFrame) -> pd.DataFrame:
"""
augments the Lahman pitching data frame. currently a noop.
:param stats_df:
:return:
"""
return stats_df
def aggregate_by_season(stats_df: pd.DataFrame) -> pd.DataFrame:
return stats_df.groupby(["playerID", "yearID"]).sum().reset_index()
# pylint: disable=unused-argument
def check_is_zero_one(instance: Any, attribute: attr.Attribute, value: Union[int, float]) -> None:
if value not in [0, 1]:
raise ValueError(f"{attribute} must be either 0 or 1, not {value}")
# pylint: disable=unused-argument
def check_greater_zero(instance: Any, attribute: attr.Attribute, value: Union[int, float]) -> None:
if value <= 0:
raise ValueError(
f"{attribute} must be greater than zero, not {value}"
)
# pylint: disable=unused-argument
def check_between_zero_one(instance: Any, attribute: attr.Attribute, value: Union[int, float]) -> None:
if not 0 <= value <= 1:
raise ValueError(
f"{attribute} must be between zero and one, not {value}"
)
|
mit
| 1,013,509,468,966,282,900
| 31.202703
| 116
| 0.608896
| false
| 3.428777
| false
| false
| false
|
ifsmirnov/jngen
|
build.py
|
1
|
2593
|
#!/usr/bin/python3
import re, os
HEADER_REGEX = re.compile('#include "(.*)"')
# This list may contain not all headers directly, but each jngen header
# must be among the dependencies of some file from here.
LIBRARY_HEADERS = [
"array.h",
"random.h",
"common.h",
"tree.h",
"graph.h",
"geometry.h",
"math_jngen.h",
"rnda.h",
"rnds.h",
"testcases.h",
"options.h",
"printers.h",
"repr.h",
"query_builder.h",
"drawer/drawer.h",
"suites/suites.h",
]
def posix_path_to_native(posix_path):
return os.path.join(*posix_path.split('/'))
def extract_header(line):
res = HEADER_REGEX.match(line)
if res:
return res.groups()[0]
def extract_direct_deps(posix_filename):
dir = os.path.dirname(posix_filename) # check explicitly on win
res = set()
with open(posix_path_to_native(posix_filename)) as fin:
for line in fin.readlines():
t = extract_header(line)
if t and not t.endswith("_inl.h"):
res.add(dir + '/' + t if dir else t)
return res
deps = {}
def extract_deps(posix_filename):
posix_filename = os.path.normpath(posix_filename)
if posix_filename in deps:
return deps[posix_filename]
deps[posix_filename] = set((posix_filename,))
for dep in extract_direct_deps(posix_filename):
deps[posix_filename].update(extract_deps(dep))
return deps[posix_filename]
def write_file(filename, stream):
dir = os.path.dirname(filename) # check explicitly on win
with open(posix_path_to_native(filename)) as fin:
for line in fin.readlines():
include_or_not = HEADER_REGEX.match(line)
if include_or_not:
if include_or_not.groups()[0].endswith("_inl.h"):
t = include_or_not.groups()[0]
write_file(dir + '/' + t if dir else t, stream)
elif '#pragma once' not in line:
stream.write(line)
headers = set()
for h in LIBRARY_HEADERS:
headers.update(extract_deps(h))
headers = ['header.h'] + sorted(headers)
deps['footer.h'] = set(headers + ['footer.h'])
headers += ['footer.h']
deps['header.h'] = set(('header.h',))
headers_in_order = []
while headers:
for h in headers:
if len(deps[h]) == 1:
headers_in_order.append(h)
for other in deps:
deps[other].discard(h)
del deps[h]
headers.remove(h)
break
with open("jngen.h", "w") as fout:
for filename in headers_in_order:
write_file(filename, fout)
|
mit
| 101,050,475,562,257,710
| 24.421569
| 71
| 0.587736
| false
| 3.371912
| false
| false
| false
|
d0ugal/discode-server
|
discode_server/db.py
|
1
|
4273
|
import collections
import datetime
import hashlib
import logging
from sanic import exceptions
import aiopg.sa
import sqlalchemy as sa
from discode_server.utils import baseconv
from discode_server.utils import highlight
log = logging.getLogger(__file__)
meta = sa.MetaData()
paste = sa.Table(
'pastes', meta,
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('contents', sa.Text(), nullable=False),
sa.Column('created_on', sa.DateTime, default=datetime.datetime.utcnow),
sa.Column('sha', sa.String(64), nullable=False),
sa.Column('lexer', sa.String(60), nullable=True),
sa.Column('lexer_guessed', sa.Boolean, default=False),
)
comment = sa.Table(
'comments', meta,
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('paste_id', sa.Integer,
sa.ForeignKey("pastes.id", ondelete="CASCADE"), nullable=False),
sa.Column('line', sa.Integer, nullable=False),
sa.Column('contents', sa.Text(), nullable=False),
sa.Column('created_on', sa.DateTime, default=datetime.datetime.utcnow),
)
class Paste:
def __init__(self, record, comments=None):
self._record = record
self.comments = collections.defaultdict(list)
if not comments:
return
for comment in comments:
self.comments[comment.line].append(comment.contents)
@property
def id(self):
return baseconv.base36.from_decimal(self._record.id)
@property
def decimal_id(self):
return self._record.id
@property
def contents(self):
return self._record.contents
@property
def lexer(self):
return self._record.lexer
@property
def created_on(self):
return self._record.created_on
class Comment:
def __init__(self, record):
self._record = record
@property
def id(self):
return self._record.id
@property
def contents(self):
return self._record.contents
@property
def line(self):
return self._record.line
class PasteNotFound(exceptions.NotFound):
pass
async def create_engine(db_config, loop):
return await aiopg.sa.create_engine(
**db_config,
loop=loop
)
async def get_paste(conn, paste_id):
query = sa.select([paste]).where(paste.c.id == paste_id)
result = await conn.execute(query)
p = await result.first()
comments = await get_comments(conn, paste_id)
if not p:
raise PasteNotFound("Paste Not Found")
return Paste(p, comments)
async def get_pastes(conn):
query = sa.select([paste.c.id, paste.c.created_on])
result = await conn.execute(query)
pastes = await result.fetchall()
if not pastes:
raise PasteNotFound("Paste Not Found")
return [Paste(r) for r in pastes]
async def delete_expired(conn):
try:
log.info("Deleteing expired pastes")
days = 30
utcnow = datetime.datetime.utcnow()
delete_after = utcnow - datetime.timedelta(days=days)
await conn.execute(paste.delete().where(
paste.c.created_on < delete_after))
except:
log.exception("Failed to delete expired pastes")
async def create_comment(conn, paste_id, line, contents):
result = await conn.execute(comment.insert().values(
paste_id=paste_id, line=line, contents=contents))
record = await result.fetchone()
await conn.execute(f"NOTIFY channel, %s", f"{paste_id},{line},{record.id}")
return record
async def get_comments(conn, paste_id):
query = sa.select([comment]).where(comment.c.paste_id == paste_id)
result = await conn.execute(query)
comments = await result.fetchall()
return [Comment(c) for c in comments]
async def create(conn, contents, lexer, created_on=None):
sha = hashlib.sha256(contents.encode('utf-8')).hexdigest()
lexer, detected = highlight.guess(contents, lexer)
values = {
'contents': contents,
'sha': sha,
'lexer': lexer,
'lexer_guessed': detected,
}
if created_on is not None:
values['created_on'] = created_on
result = await conn.execute(paste.insert().values(**values))
record = await result.fetchone()
if not record:
raise Exception("whelp")
return Paste(record)
|
bsd-2-clause
| 5,807,472,579,117,242,000
| 25.054878
| 79
| 0.646852
| false
| 3.696367
| false
| false
| false
|
shoyer/xarray
|
xarray/core/indexing.py
|
1
|
51766
|
import enum
import functools
import operator
from collections import defaultdict
from contextlib import suppress
from datetime import timedelta
from typing import Any, Callable, Iterable, Sequence, Tuple, Union
import numpy as np
import pandas as pd
from . import duck_array_ops, nputils, utils
from .npcompat import DTypeLike
from .pycompat import dask_array_type, integer_types, sparse_array_type
from .utils import is_dict_like, maybe_cast_to_coords_dtype
def expanded_indexer(key, ndim):
"""Given a key for indexing an ndarray, return an equivalent key which is a
tuple with length equal to the number of dimensions.
The expansion is done by replacing all `Ellipsis` items with the right
number of full slices and then padding the key with full slices so that it
reaches the appropriate dimensionality.
"""
if not isinstance(key, tuple):
# numpy treats non-tuple keys equivalent to tuples of length 1
key = (key,)
new_key = []
# handling Ellipsis right is a little tricky, see:
# http://docs.scipy.org/doc/numpy/reference/arrays.indexing.html#advanced-indexing
found_ellipsis = False
for k in key:
if k is Ellipsis:
if not found_ellipsis:
new_key.extend((ndim + 1 - len(key)) * [slice(None)])
found_ellipsis = True
else:
new_key.append(slice(None))
else:
new_key.append(k)
if len(new_key) > ndim:
raise IndexError("too many indices")
new_key.extend((ndim - len(new_key)) * [slice(None)])
return tuple(new_key)
def _expand_slice(slice_, size):
return np.arange(*slice_.indices(size))
def _sanitize_slice_element(x):
from .variable import Variable
from .dataarray import DataArray
if isinstance(x, (Variable, DataArray)):
x = x.values
if isinstance(x, np.ndarray):
if x.ndim != 0:
raise ValueError(
f"cannot use non-scalar arrays in a slice for xarray indexing: {x}"
)
x = x[()]
if isinstance(x, np.timedelta64):
# pandas does not support indexing with np.timedelta64 yet:
# https://github.com/pandas-dev/pandas/issues/20393
x = pd.Timedelta(x)
return x
def _asarray_tuplesafe(values):
"""
Convert values into a numpy array of at most 1-dimension, while preserving
tuples.
Adapted from pandas.core.common._asarray_tuplesafe
"""
if isinstance(values, tuple):
result = utils.to_0d_object_array(values)
else:
result = np.asarray(values)
if result.ndim == 2:
result = np.empty(len(values), dtype=object)
result[:] = values
return result
def _is_nested_tuple(possible_tuple):
return isinstance(possible_tuple, tuple) and any(
isinstance(value, (tuple, list, slice)) for value in possible_tuple
)
def get_indexer_nd(index, labels, method=None, tolerance=None):
"""Wrapper around :meth:`pandas.Index.get_indexer` supporting n-dimensional
labels
"""
flat_labels = np.ravel(labels)
flat_indexer = index.get_indexer(flat_labels, method=method, tolerance=tolerance)
indexer = flat_indexer.reshape(labels.shape)
return indexer
def convert_label_indexer(index, label, index_name="", method=None, tolerance=None):
"""Given a pandas.Index and labels (e.g., from __getitem__) for one
dimension, return an indexer suitable for indexing an ndarray along that
dimension. If `index` is a pandas.MultiIndex and depending on `label`,
return a new pandas.Index or pandas.MultiIndex (otherwise return None).
"""
new_index = None
if isinstance(label, slice):
if method is not None or tolerance is not None:
raise NotImplementedError(
"cannot use ``method`` argument if any indexers are " "slice objects"
)
indexer = index.slice_indexer(
_sanitize_slice_element(label.start),
_sanitize_slice_element(label.stop),
_sanitize_slice_element(label.step),
)
if not isinstance(indexer, slice):
# unlike pandas, in xarray we never want to silently convert a
# slice indexer into an array indexer
raise KeyError(
"cannot represent labeled-based slice indexer for dimension "
f"{index_name!r} with a slice over integer positions; the index is "
"unsorted or non-unique"
)
elif is_dict_like(label):
is_nested_vals = _is_nested_tuple(tuple(label.values()))
if not isinstance(index, pd.MultiIndex):
raise ValueError(
"cannot use a dict-like object for selection on "
"a dimension that does not have a MultiIndex"
)
elif len(label) == index.nlevels and not is_nested_vals:
indexer = index.get_loc(tuple(label[k] for k in index.names))
else:
for k, v in label.items():
# index should be an item (i.e. Hashable) not an array-like
if isinstance(v, Sequence) and not isinstance(v, str):
raise ValueError(
"Vectorized selection is not "
"available along level variable: " + k
)
indexer, new_index = index.get_loc_level(
tuple(label.values()), level=tuple(label.keys())
)
# GH2619. Raise a KeyError if nothing is chosen
if indexer.dtype.kind == "b" and indexer.sum() == 0:
raise KeyError(f"{label} not found")
elif isinstance(label, tuple) and isinstance(index, pd.MultiIndex):
if _is_nested_tuple(label):
indexer = index.get_locs(label)
elif len(label) == index.nlevels:
indexer = index.get_loc(label)
else:
indexer, new_index = index.get_loc_level(
label, level=list(range(len(label)))
)
else:
label = (
label
if getattr(label, "ndim", 1) > 1 # vectorized-indexing
else _asarray_tuplesafe(label)
)
if label.ndim == 0:
if isinstance(index, pd.MultiIndex):
indexer, new_index = index.get_loc_level(label.item(), level=0)
elif isinstance(index, pd.CategoricalIndex):
if method is not None:
raise ValueError(
"'method' is not a valid kwarg when indexing using a CategoricalIndex."
)
if tolerance is not None:
raise ValueError(
"'tolerance' is not a valid kwarg when indexing using a CategoricalIndex."
)
indexer = index.get_loc(label.item())
else:
indexer = index.get_loc(
label.item(), method=method, tolerance=tolerance
)
elif label.dtype.kind == "b":
indexer = label
else:
if isinstance(index, pd.MultiIndex) and label.ndim > 1:
raise ValueError(
"Vectorized selection is not available along "
"MultiIndex variable: " + index_name
)
indexer = get_indexer_nd(index, label, method, tolerance)
if np.any(indexer < 0):
raise KeyError(f"not all values found in index {index_name!r}")
return indexer, new_index
def get_dim_indexers(data_obj, indexers):
"""Given a xarray data object and label based indexers, return a mapping
of label indexers with only dimension names as keys.
It groups multiple level indexers given on a multi-index dimension
into a single, dictionary indexer for that dimension (Raise a ValueError
if it is not possible).
"""
invalid = [
k
for k in indexers
if k not in data_obj.dims and k not in data_obj._level_coords
]
if invalid:
raise ValueError(f"dimensions or multi-index levels {invalid!r} do not exist")
level_indexers = defaultdict(dict)
dim_indexers = {}
for key, label in indexers.items():
(dim,) = data_obj[key].dims
if key != dim:
# assume here multi-index level indexer
level_indexers[dim][key] = label
else:
dim_indexers[key] = label
for dim, level_labels in level_indexers.items():
if dim_indexers.get(dim, False):
raise ValueError(
"cannot combine multi-index level indexers with an indexer for "
f"dimension {dim}"
)
dim_indexers[dim] = level_labels
return dim_indexers
def remap_label_indexers(data_obj, indexers, method=None, tolerance=None):
"""Given an xarray data object and label based indexers, return a mapping
of equivalent location based indexers. Also return a mapping of updated
pandas index objects (in case of multi-index level drop).
"""
if method is not None and not isinstance(method, str):
raise TypeError("``method`` must be a string")
pos_indexers = {}
new_indexes = {}
dim_indexers = get_dim_indexers(data_obj, indexers)
for dim, label in dim_indexers.items():
try:
index = data_obj.indexes[dim]
except KeyError:
# no index for this dimension: reuse the provided labels
if method is not None or tolerance is not None:
raise ValueError(
"cannot supply ``method`` or ``tolerance`` "
"when the indexed dimension does not have "
"an associated coordinate."
)
pos_indexers[dim] = label
else:
coords_dtype = data_obj.coords[dim].dtype
label = maybe_cast_to_coords_dtype(label, coords_dtype)
idxr, new_idx = convert_label_indexer(index, label, dim, method, tolerance)
pos_indexers[dim] = idxr
if new_idx is not None:
new_indexes[dim] = new_idx
return pos_indexers, new_indexes
def slice_slice(old_slice, applied_slice, size):
"""Given a slice and the size of the dimension to which it will be applied,
index it with another slice to return a new slice equivalent to applying
the slices sequentially
"""
step = (old_slice.step or 1) * (applied_slice.step or 1)
# For now, use the hack of turning old_slice into an ndarray to reconstruct
# the slice start and stop. This is not entirely ideal, but it is still
# definitely better than leaving the indexer as an array.
items = _expand_slice(old_slice, size)[applied_slice]
if len(items) > 0:
start = items[0]
stop = items[-1] + int(np.sign(step))
if stop < 0:
stop = None
else:
start = 0
stop = 0
return slice(start, stop, step)
def _index_indexer_1d(old_indexer, applied_indexer, size):
assert isinstance(applied_indexer, integer_types + (slice, np.ndarray))
if isinstance(applied_indexer, slice) and applied_indexer == slice(None):
# shortcut for the usual case
return old_indexer
if isinstance(old_indexer, slice):
if isinstance(applied_indexer, slice):
indexer = slice_slice(old_indexer, applied_indexer, size)
else:
indexer = _expand_slice(old_indexer, size)[applied_indexer]
else:
indexer = old_indexer[applied_indexer]
return indexer
class ExplicitIndexer:
"""Base class for explicit indexer objects.
ExplicitIndexer objects wrap a tuple of values given by their ``tuple``
property. These tuples should always have length equal to the number of
dimensions on the indexed array.
Do not instantiate BaseIndexer objects directly: instead, use one of the
sub-classes BasicIndexer, OuterIndexer or VectorizedIndexer.
"""
__slots__ = ("_key",)
def __init__(self, key):
if type(self) is ExplicitIndexer:
raise TypeError("cannot instantiate base ExplicitIndexer objects")
self._key = tuple(key)
@property
def tuple(self):
return self._key
def __repr__(self):
return f"{type(self).__name__}({self.tuple})"
def as_integer_or_none(value):
return None if value is None else operator.index(value)
def as_integer_slice(value):
start = as_integer_or_none(value.start)
stop = as_integer_or_none(value.stop)
step = as_integer_or_none(value.step)
return slice(start, stop, step)
class BasicIndexer(ExplicitIndexer):
"""Tuple for basic indexing.
All elements should be int or slice objects. Indexing follows NumPy's
rules for basic indexing: each axis is independently sliced and axes
indexed with an integer are dropped from the result.
"""
__slots__ = ()
def __init__(self, key):
if not isinstance(key, tuple):
raise TypeError(f"key must be a tuple: {key!r}")
new_key = []
for k in key:
if isinstance(k, integer_types):
k = int(k)
elif isinstance(k, slice):
k = as_integer_slice(k)
else:
raise TypeError(
f"unexpected indexer type for {type(self).__name__}: {k!r}"
)
new_key.append(k)
super().__init__(new_key)
class OuterIndexer(ExplicitIndexer):
"""Tuple for outer/orthogonal indexing.
All elements should be int, slice or 1-dimensional np.ndarray objects with
an integer dtype. Indexing is applied independently along each axis, and
axes indexed with an integer are dropped from the result. This type of
indexing works like MATLAB/Fortran.
"""
__slots__ = ()
def __init__(self, key):
if not isinstance(key, tuple):
raise TypeError(f"key must be a tuple: {key!r}")
new_key = []
for k in key:
if isinstance(k, integer_types):
k = int(k)
elif isinstance(k, slice):
k = as_integer_slice(k)
elif isinstance(k, np.ndarray):
if not np.issubdtype(k.dtype, np.integer):
raise TypeError(
f"invalid indexer array, does not have integer dtype: {k!r}"
)
if k.ndim != 1:
raise TypeError(
f"invalid indexer array for {type(self).__name__}; must have "
f"exactly 1 dimension: {k!r}"
)
k = np.asarray(k, dtype=np.int64)
else:
raise TypeError(
f"unexpected indexer type for {type(self).__name__}: {k!r}"
)
new_key.append(k)
super().__init__(new_key)
class VectorizedIndexer(ExplicitIndexer):
"""Tuple for vectorized indexing.
All elements should be slice or N-dimensional np.ndarray objects with an
integer dtype and the same number of dimensions. Indexing follows proposed
rules for np.ndarray.vindex, which matches NumPy's advanced indexing rules
(including broadcasting) except sliced axes are always moved to the end:
https://github.com/numpy/numpy/pull/6256
"""
__slots__ = ()
def __init__(self, key):
if not isinstance(key, tuple):
raise TypeError(f"key must be a tuple: {key!r}")
new_key = []
ndim = None
for k in key:
if isinstance(k, slice):
k = as_integer_slice(k)
elif isinstance(k, np.ndarray):
if not np.issubdtype(k.dtype, np.integer):
raise TypeError(
f"invalid indexer array, does not have integer dtype: {k!r}"
)
if ndim is None:
ndim = k.ndim
elif ndim != k.ndim:
ndims = [k.ndim for k in key if isinstance(k, np.ndarray)]
raise ValueError(
"invalid indexer key: ndarray arguments "
f"have different numbers of dimensions: {ndims}"
)
k = np.asarray(k, dtype=np.int64)
else:
raise TypeError(
f"unexpected indexer type for {type(self).__name__}: {k!r}"
)
new_key.append(k)
super().__init__(new_key)
class ExplicitlyIndexed:
"""Mixin to mark support for Indexer subclasses in indexing.
"""
__slots__ = ()
class ExplicitlyIndexedNDArrayMixin(utils.NDArrayMixin, ExplicitlyIndexed):
__slots__ = ()
def __array__(self, dtype=None):
key = BasicIndexer((slice(None),) * self.ndim)
return np.asarray(self[key], dtype=dtype)
class ImplicitToExplicitIndexingAdapter(utils.NDArrayMixin):
"""Wrap an array, converting tuples into the indicated explicit indexer."""
__slots__ = ("array", "indexer_cls")
def __init__(self, array, indexer_cls=BasicIndexer):
self.array = as_indexable(array)
self.indexer_cls = indexer_cls
def __array__(self, dtype=None):
return np.asarray(self.array, dtype=dtype)
def __getitem__(self, key):
key = expanded_indexer(key, self.ndim)
result = self.array[self.indexer_cls(key)]
if isinstance(result, ExplicitlyIndexed):
return type(self)(result, self.indexer_cls)
else:
# Sometimes explicitly indexed arrays return NumPy arrays or
# scalars.
return result
class LazilyOuterIndexedArray(ExplicitlyIndexedNDArrayMixin):
"""Wrap an array to make basic and outer indexing lazy.
"""
__slots__ = ("array", "key")
def __init__(self, array, key=None):
"""
Parameters
----------
array : array_like
Array like object to index.
key : ExplicitIndexer, optional
Array indexer. If provided, it is assumed to already be in
canonical expanded form.
"""
if isinstance(array, type(self)) and key is None:
# unwrap
key = array.key
array = array.array
if key is None:
key = BasicIndexer((slice(None),) * array.ndim)
self.array = as_indexable(array)
self.key = key
def _updated_key(self, new_key):
iter_new_key = iter(expanded_indexer(new_key.tuple, self.ndim))
full_key = []
for size, k in zip(self.array.shape, self.key.tuple):
if isinstance(k, integer_types):
full_key.append(k)
else:
full_key.append(_index_indexer_1d(k, next(iter_new_key), size))
full_key = tuple(full_key)
if all(isinstance(k, integer_types + (slice,)) for k in full_key):
return BasicIndexer(full_key)
return OuterIndexer(full_key)
@property
def shape(self):
shape = []
for size, k in zip(self.array.shape, self.key.tuple):
if isinstance(k, slice):
shape.append(len(range(*k.indices(size))))
elif isinstance(k, np.ndarray):
shape.append(k.size)
return tuple(shape)
def __array__(self, dtype=None):
array = as_indexable(self.array)
return np.asarray(array[self.key], dtype=None)
def transpose(self, order):
return LazilyVectorizedIndexedArray(self.array, self.key).transpose(order)
def __getitem__(self, indexer):
if isinstance(indexer, VectorizedIndexer):
array = LazilyVectorizedIndexedArray(self.array, self.key)
return array[indexer]
return type(self)(self.array, self._updated_key(indexer))
def __setitem__(self, key, value):
if isinstance(key, VectorizedIndexer):
raise NotImplementedError(
"Lazy item assignment with the vectorized indexer is not yet "
"implemented. Load your data first by .load() or compute()."
)
full_key = self._updated_key(key)
self.array[full_key] = value
def __repr__(self):
return f"{type(self).__name__}(array={self.array!r}, key={self.key!r})"
class LazilyVectorizedIndexedArray(ExplicitlyIndexedNDArrayMixin):
"""Wrap an array to make vectorized indexing lazy.
"""
__slots__ = ("array", "key")
def __init__(self, array, key):
"""
Parameters
----------
array : array_like
Array like object to index.
key : VectorizedIndexer
"""
if isinstance(key, (BasicIndexer, OuterIndexer)):
self.key = _outer_to_vectorized_indexer(key, array.shape)
else:
self.key = _arrayize_vectorized_indexer(key, array.shape)
self.array = as_indexable(array)
@property
def shape(self):
return np.broadcast(*self.key.tuple).shape
def __array__(self, dtype=None):
return np.asarray(self.array[self.key], dtype=None)
def _updated_key(self, new_key):
return _combine_indexers(self.key, self.shape, new_key)
def __getitem__(self, indexer):
# If the indexed array becomes a scalar, return LazilyOuterIndexedArray
if all(isinstance(ind, integer_types) for ind in indexer.tuple):
key = BasicIndexer(tuple(k[indexer.tuple] for k in self.key.tuple))
return LazilyOuterIndexedArray(self.array, key)
return type(self)(self.array, self._updated_key(indexer))
def transpose(self, order):
key = VectorizedIndexer(tuple(k.transpose(order) for k in self.key.tuple))
return type(self)(self.array, key)
def __setitem__(self, key, value):
raise NotImplementedError(
"Lazy item assignment with the vectorized indexer is not yet "
"implemented. Load your data first by .load() or compute()."
)
def __repr__(self):
return f"{type(self).__name__}(array={self.array!r}, key={self.key!r})"
def _wrap_numpy_scalars(array):
"""Wrap NumPy scalars in 0d arrays."""
if np.isscalar(array):
return np.array(array)
else:
return array
class CopyOnWriteArray(ExplicitlyIndexedNDArrayMixin):
__slots__ = ("array", "_copied")
def __init__(self, array):
self.array = as_indexable(array)
self._copied = False
def _ensure_copied(self):
if not self._copied:
self.array = as_indexable(np.array(self.array))
self._copied = True
def __array__(self, dtype=None):
return np.asarray(self.array, dtype=dtype)
def __getitem__(self, key):
return type(self)(_wrap_numpy_scalars(self.array[key]))
def transpose(self, order):
return self.array.transpose(order)
def __setitem__(self, key, value):
self._ensure_copied()
self.array[key] = value
class MemoryCachedArray(ExplicitlyIndexedNDArrayMixin):
__slots__ = ("array",)
def __init__(self, array):
self.array = _wrap_numpy_scalars(as_indexable(array))
def _ensure_cached(self):
if not isinstance(self.array, NumpyIndexingAdapter):
self.array = NumpyIndexingAdapter(np.asarray(self.array))
def __array__(self, dtype=None):
self._ensure_cached()
return np.asarray(self.array, dtype=dtype)
def __getitem__(self, key):
return type(self)(_wrap_numpy_scalars(self.array[key]))
def transpose(self, order):
return self.array.transpose(order)
def __setitem__(self, key, value):
self.array[key] = value
def as_indexable(array):
"""
This function always returns a ExplicitlyIndexed subclass,
so that the vectorized indexing is always possible with the returned
object.
"""
if isinstance(array, ExplicitlyIndexed):
return array
if isinstance(array, np.ndarray):
return NumpyIndexingAdapter(array)
if isinstance(array, pd.Index):
return PandasIndexAdapter(array)
if isinstance(array, dask_array_type):
return DaskIndexingAdapter(array)
if hasattr(array, "__array_function__"):
return NdArrayLikeIndexingAdapter(array)
raise TypeError("Invalid array type: {}".format(type(array)))
def _outer_to_vectorized_indexer(key, shape):
"""Convert an OuterIndexer into an vectorized indexer.
Parameters
----------
key : Outer/Basic Indexer
An indexer to convert.
shape : tuple
Shape of the array subject to the indexing.
Returns
-------
VectorizedIndexer
Tuple suitable for use to index a NumPy array with vectorized indexing.
Each element is an array: broadcasting them together gives the shape
of the result.
"""
key = key.tuple
n_dim = len([k for k in key if not isinstance(k, integer_types)])
i_dim = 0
new_key = []
for k, size in zip(key, shape):
if isinstance(k, integer_types):
new_key.append(np.array(k).reshape((1,) * n_dim))
else: # np.ndarray or slice
if isinstance(k, slice):
k = np.arange(*k.indices(size))
assert k.dtype.kind in {"i", "u"}
shape = [(1,) * i_dim + (k.size,) + (1,) * (n_dim - i_dim - 1)]
new_key.append(k.reshape(*shape))
i_dim += 1
return VectorizedIndexer(tuple(new_key))
def _outer_to_numpy_indexer(key, shape):
"""Convert an OuterIndexer into an indexer for NumPy.
Parameters
----------
key : Basic/OuterIndexer
An indexer to convert.
shape : tuple
Shape of the array subject to the indexing.
Returns
-------
tuple
Tuple suitable for use to index a NumPy array.
"""
if len([k for k in key.tuple if not isinstance(k, slice)]) <= 1:
# If there is only one vector and all others are slice,
# it can be safely used in mixed basic/advanced indexing.
# Boolean index should already be converted to integer array.
return key.tuple
else:
return _outer_to_vectorized_indexer(key, shape).tuple
def _combine_indexers(old_key, shape, new_key):
""" Combine two indexers.
Parameters
----------
old_key: ExplicitIndexer
The first indexer for the original array
shape: tuple of ints
Shape of the original array to be indexed by old_key
new_key:
The second indexer for indexing original[old_key]
"""
if not isinstance(old_key, VectorizedIndexer):
old_key = _outer_to_vectorized_indexer(old_key, shape)
if len(old_key.tuple) == 0:
return new_key
new_shape = np.broadcast(*old_key.tuple).shape
if isinstance(new_key, VectorizedIndexer):
new_key = _arrayize_vectorized_indexer(new_key, new_shape)
else:
new_key = _outer_to_vectorized_indexer(new_key, new_shape)
return VectorizedIndexer(
tuple(o[new_key.tuple] for o in np.broadcast_arrays(*old_key.tuple))
)
@enum.unique
class IndexingSupport(enum.Enum):
# for backends that support only basic indexer
BASIC = 0
# for backends that support basic / outer indexer
OUTER = 1
# for backends that support outer indexer including at most 1 vector.
OUTER_1VECTOR = 2
# for backends that support full vectorized indexer.
VECTORIZED = 3
def explicit_indexing_adapter(
key: ExplicitIndexer,
shape: Tuple[int, ...],
indexing_support: IndexingSupport,
raw_indexing_method: Callable,
) -> Any:
"""Support explicit indexing by delegating to a raw indexing method.
Outer and/or vectorized indexers are supported by indexing a second time
with a NumPy array.
Parameters
----------
key : ExplicitIndexer
Explicit indexing object.
shape : Tuple[int, ...]
Shape of the indexed array.
indexing_support : IndexingSupport enum
Form of indexing supported by raw_indexing_method.
raw_indexing_method: callable
Function (like ndarray.__getitem__) that when called with indexing key
in the form of a tuple returns an indexed array.
Returns
-------
Indexing result, in the form of a duck numpy-array.
"""
raw_key, numpy_indices = decompose_indexer(key, shape, indexing_support)
result = raw_indexing_method(raw_key.tuple)
if numpy_indices.tuple:
# index the loaded np.ndarray
result = NumpyIndexingAdapter(np.asarray(result))[numpy_indices]
return result
def decompose_indexer(
indexer: ExplicitIndexer, shape: Tuple[int, ...], indexing_support: IndexingSupport
) -> Tuple[ExplicitIndexer, ExplicitIndexer]:
if isinstance(indexer, VectorizedIndexer):
return _decompose_vectorized_indexer(indexer, shape, indexing_support)
if isinstance(indexer, (BasicIndexer, OuterIndexer)):
return _decompose_outer_indexer(indexer, shape, indexing_support)
raise TypeError(f"unexpected key type: {indexer}")
def _decompose_slice(key, size):
""" convert a slice to successive two slices. The first slice always has
a positive step.
"""
start, stop, step = key.indices(size)
if step > 0:
# If key already has a positive step, use it as is in the backend
return key, slice(None)
else:
# determine stop precisely for step > 1 case
# e.g. [98:2:-2] -> [98:3:-2]
stop = start + int((stop - start - 1) / step) * step + 1
start, stop = stop + 1, start + 1
return slice(start, stop, -step), slice(None, None, -1)
def _decompose_vectorized_indexer(
indexer: VectorizedIndexer,
shape: Tuple[int, ...],
indexing_support: IndexingSupport,
) -> Tuple[ExplicitIndexer, ExplicitIndexer]:
"""
Decompose vectorized indexer to the successive two indexers, where the
first indexer will be used to index backend arrays, while the second one
is used to index loaded on-memory np.ndarray.
Parameters
----------
indexer: VectorizedIndexer
indexing_support: one of IndexerSupport entries
Returns
-------
backend_indexer: OuterIndexer or BasicIndexer
np_indexers: an ExplicitIndexer (VectorizedIndexer / BasicIndexer)
Notes
-----
This function is used to realize the vectorized indexing for the backend
arrays that only support basic or outer indexing.
As an example, let us consider to index a few elements from a backend array
with a vectorized indexer ([0, 3, 1], [2, 3, 2]).
Even if the backend array only supports outer indexing, it is more
efficient to load a subslice of the array than loading the entire array,
>>> backend_indexer = OuterIndexer([0, 1, 3], [2, 3])
>>> array = array[backend_indexer] # load subslice of the array
>>> np_indexer = VectorizedIndexer([0, 2, 1], [0, 1, 0])
>>> array[np_indexer] # vectorized indexing for on-memory np.ndarray.
"""
assert isinstance(indexer, VectorizedIndexer)
if indexing_support is IndexingSupport.VECTORIZED:
return indexer, BasicIndexer(())
backend_indexer_elems = []
np_indexer_elems = []
# convert negative indices
indexer_elems = [
np.where(k < 0, k + s, k) if isinstance(k, np.ndarray) else k
for k, s in zip(indexer.tuple, shape)
]
for k, s in zip(indexer_elems, shape):
if isinstance(k, slice):
# If it is a slice, then we will slice it as-is
# (but make its step positive) in the backend,
# and then use all of it (slice(None)) for the in-memory portion.
bk_slice, np_slice = _decompose_slice(k, s)
backend_indexer_elems.append(bk_slice)
np_indexer_elems.append(np_slice)
else:
# If it is a (multidimensional) np.ndarray, just pickup the used
# keys without duplication and store them as a 1d-np.ndarray.
oind, vind = np.unique(k, return_inverse=True)
backend_indexer_elems.append(oind)
np_indexer_elems.append(vind.reshape(*k.shape))
backend_indexer = OuterIndexer(tuple(backend_indexer_elems))
np_indexer = VectorizedIndexer(tuple(np_indexer_elems))
if indexing_support is IndexingSupport.OUTER:
return backend_indexer, np_indexer
# If the backend does not support outer indexing,
# backend_indexer (OuterIndexer) is also decomposed.
backend_indexer1, np_indexer1 = _decompose_outer_indexer(
backend_indexer, shape, indexing_support
)
np_indexer = _combine_indexers(np_indexer1, shape, np_indexer)
return backend_indexer1, np_indexer
def _decompose_outer_indexer(
indexer: Union[BasicIndexer, OuterIndexer],
shape: Tuple[int, ...],
indexing_support: IndexingSupport,
) -> Tuple[ExplicitIndexer, ExplicitIndexer]:
"""
Decompose outer indexer to the successive two indexers, where the
first indexer will be used to index backend arrays, while the second one
is used to index the loaded on-memory np.ndarray.
Parameters
----------
indexer: OuterIndexer or BasicIndexer
indexing_support: One of the entries of IndexingSupport
Returns
-------
backend_indexer: OuterIndexer or BasicIndexer
np_indexers: an ExplicitIndexer (OuterIndexer / BasicIndexer)
Notes
-----
This function is used to realize the vectorized indexing for the backend
arrays that only support basic or outer indexing.
As an example, let us consider to index a few elements from a backend array
with a orthogonal indexer ([0, 3, 1], [2, 3, 2]).
Even if the backend array only supports basic indexing, it is more
efficient to load a subslice of the array than loading the entire array,
>>> backend_indexer = BasicIndexer(slice(0, 3), slice(2, 3))
>>> array = array[backend_indexer] # load subslice of the array
>>> np_indexer = OuterIndexer([0, 2, 1], [0, 1, 0])
>>> array[np_indexer] # outer indexing for on-memory np.ndarray.
"""
if indexing_support == IndexingSupport.VECTORIZED:
return indexer, BasicIndexer(())
assert isinstance(indexer, (OuterIndexer, BasicIndexer))
backend_indexer = []
np_indexer = []
# make indexer positive
pos_indexer = []
for k, s in zip(indexer.tuple, shape):
if isinstance(k, np.ndarray):
pos_indexer.append(np.where(k < 0, k + s, k))
elif isinstance(k, integer_types) and k < 0:
pos_indexer.append(k + s)
else:
pos_indexer.append(k)
indexer_elems = pos_indexer
if indexing_support is IndexingSupport.OUTER_1VECTOR:
# some backends such as h5py supports only 1 vector in indexers
# We choose the most efficient axis
gains = [
(np.max(k) - np.min(k) + 1.0) / len(np.unique(k))
if isinstance(k, np.ndarray)
else 0
for k in indexer_elems
]
array_index = np.argmax(np.array(gains)) if len(gains) > 0 else None
for i, (k, s) in enumerate(zip(indexer_elems, shape)):
if isinstance(k, np.ndarray) and i != array_index:
# np.ndarray key is converted to slice that covers the entire
# entries of this key.
backend_indexer.append(slice(np.min(k), np.max(k) + 1))
np_indexer.append(k - np.min(k))
elif isinstance(k, np.ndarray):
# Remove duplicates and sort them in the increasing order
pkey, ekey = np.unique(k, return_inverse=True)
backend_indexer.append(pkey)
np_indexer.append(ekey)
elif isinstance(k, integer_types):
backend_indexer.append(k)
else: # slice: convert positive step slice for backend
bk_slice, np_slice = _decompose_slice(k, s)
backend_indexer.append(bk_slice)
np_indexer.append(np_slice)
return (OuterIndexer(tuple(backend_indexer)), OuterIndexer(tuple(np_indexer)))
if indexing_support == IndexingSupport.OUTER:
for k, s in zip(indexer_elems, shape):
if isinstance(k, slice):
# slice: convert positive step slice for backend
bk_slice, np_slice = _decompose_slice(k, s)
backend_indexer.append(bk_slice)
np_indexer.append(np_slice)
elif isinstance(k, integer_types):
backend_indexer.append(k)
elif isinstance(k, np.ndarray) and (np.diff(k) >= 0).all():
backend_indexer.append(k)
np_indexer.append(slice(None))
else:
# Remove duplicates and sort them in the increasing order
oind, vind = np.unique(k, return_inverse=True)
backend_indexer.append(oind)
np_indexer.append(vind.reshape(*k.shape))
return (OuterIndexer(tuple(backend_indexer)), OuterIndexer(tuple(np_indexer)))
# basic indexer
assert indexing_support == IndexingSupport.BASIC
for k, s in zip(indexer_elems, shape):
if isinstance(k, np.ndarray):
# np.ndarray key is converted to slice that covers the entire
# entries of this key.
backend_indexer.append(slice(np.min(k), np.max(k) + 1))
np_indexer.append(k - np.min(k))
elif isinstance(k, integer_types):
backend_indexer.append(k)
else: # slice: convert positive step slice for backend
bk_slice, np_slice = _decompose_slice(k, s)
backend_indexer.append(bk_slice)
np_indexer.append(np_slice)
return (BasicIndexer(tuple(backend_indexer)), OuterIndexer(tuple(np_indexer)))
def _arrayize_vectorized_indexer(indexer, shape):
""" Return an identical vindex but slices are replaced by arrays """
slices = [v for v in indexer.tuple if isinstance(v, slice)]
if len(slices) == 0:
return indexer
arrays = [v for v in indexer.tuple if isinstance(v, np.ndarray)]
n_dim = arrays[0].ndim if len(arrays) > 0 else 0
i_dim = 0
new_key = []
for v, size in zip(indexer.tuple, shape):
if isinstance(v, np.ndarray):
new_key.append(np.reshape(v, v.shape + (1,) * len(slices)))
else: # slice
shape = (1,) * (n_dim + i_dim) + (-1,) + (1,) * (len(slices) - i_dim - 1)
new_key.append(np.arange(*v.indices(size)).reshape(shape))
i_dim += 1
return VectorizedIndexer(tuple(new_key))
def _dask_array_with_chunks_hint(array, chunks):
"""Create a dask array using the chunks hint for dimensions of size > 1."""
import dask.array as da
if len(chunks) < array.ndim:
raise ValueError("not enough chunks in hint")
new_chunks = []
for chunk, size in zip(chunks, array.shape):
new_chunks.append(chunk if size > 1 else (1,))
return da.from_array(array, new_chunks)
def _logical_any(args):
return functools.reduce(operator.or_, args)
def _masked_result_drop_slice(key, data=None):
key = (k for k in key if not isinstance(k, slice))
chunks_hint = getattr(data, "chunks", None)
new_keys = []
for k in key:
if isinstance(k, np.ndarray):
if isinstance(data, dask_array_type):
new_keys.append(_dask_array_with_chunks_hint(k, chunks_hint))
elif isinstance(data, sparse_array_type):
import sparse
new_keys.append(sparse.COO.from_numpy(k))
else:
new_keys.append(k)
else:
new_keys.append(k)
mask = _logical_any(k == -1 for k in new_keys)
return mask
def create_mask(indexer, shape, data=None):
"""Create a mask for indexing with a fill-value.
Parameters
----------
indexer : ExplicitIndexer
Indexer with -1 in integer or ndarray value to indicate locations in
the result that should be masked.
shape : tuple
Shape of the array being indexed.
data : optional
Data for which mask is being created. If data is a dask arrays, its chunks
are used as a hint for chunks on the resulting mask. If data is a sparse
array, the returned mask is also a sparse array.
Returns
-------
mask : bool, np.ndarray, SparseArray or dask.array.Array with dtype=bool
Same type as data. Has the same shape as the indexing result.
"""
if isinstance(indexer, OuterIndexer):
key = _outer_to_vectorized_indexer(indexer, shape).tuple
assert not any(isinstance(k, slice) for k in key)
mask = _masked_result_drop_slice(key, data)
elif isinstance(indexer, VectorizedIndexer):
key = indexer.tuple
base_mask = _masked_result_drop_slice(key, data)
slice_shape = tuple(
np.arange(*k.indices(size)).size
for k, size in zip(key, shape)
if isinstance(k, slice)
)
expanded_mask = base_mask[(Ellipsis,) + (np.newaxis,) * len(slice_shape)]
mask = duck_array_ops.broadcast_to(expanded_mask, base_mask.shape + slice_shape)
elif isinstance(indexer, BasicIndexer):
mask = any(k == -1 for k in indexer.tuple)
else:
raise TypeError("unexpected key type: {}".format(type(indexer)))
return mask
def _posify_mask_subindexer(index):
"""Convert masked indices in a flat array to the nearest unmasked index.
Parameters
----------
index : np.ndarray
One dimensional ndarray with dtype=int.
Returns
-------
np.ndarray
One dimensional ndarray with all values equal to -1 replaced by an
adjacent non-masked element.
"""
masked = index == -1
unmasked_locs = np.flatnonzero(~masked)
if not unmasked_locs.size:
# indexing unmasked_locs is invalid
return np.zeros_like(index)
masked_locs = np.flatnonzero(masked)
prev_value = np.maximum(0, np.searchsorted(unmasked_locs, masked_locs) - 1)
new_index = index.copy()
new_index[masked_locs] = index[unmasked_locs[prev_value]]
return new_index
def posify_mask_indexer(indexer):
"""Convert masked values (-1) in an indexer to nearest unmasked values.
This routine is useful for dask, where it can be much faster to index
adjacent points than arbitrary points from the end of an array.
Parameters
----------
indexer : ExplicitIndexer
Input indexer.
Returns
-------
ExplicitIndexer
Same type of input, with all values in ndarray keys equal to -1
replaced by an adjacent non-masked element.
"""
key = tuple(
_posify_mask_subindexer(k.ravel()).reshape(k.shape)
if isinstance(k, np.ndarray)
else k
for k in indexer.tuple
)
return type(indexer)(key)
def is_fancy_indexer(indexer: Any) -> bool:
"""Return False if indexer is a int, slice, a 1-dimensional list, or a 0 or
1-dimensional ndarray; in all other cases return True
"""
if isinstance(indexer, (int, slice)):
return False
if isinstance(indexer, np.ndarray):
return indexer.ndim > 1
if isinstance(indexer, list):
return bool(indexer) and not isinstance(indexer[0], int)
return True
class NumpyIndexingAdapter(ExplicitlyIndexedNDArrayMixin):
"""Wrap a NumPy array to use explicit indexing."""
__slots__ = ("array",)
def __init__(self, array):
# In NumpyIndexingAdapter we only allow to store bare np.ndarray
if not isinstance(array, np.ndarray):
raise TypeError(
"NumpyIndexingAdapter only wraps np.ndarray. "
"Trying to wrap {}".format(type(array))
)
self.array = array
def _indexing_array_and_key(self, key):
if isinstance(key, OuterIndexer):
array = self.array
key = _outer_to_numpy_indexer(key, self.array.shape)
elif isinstance(key, VectorizedIndexer):
array = nputils.NumpyVIndexAdapter(self.array)
key = key.tuple
elif isinstance(key, BasicIndexer):
array = self.array
# We want 0d slices rather than scalars. This is achieved by
# appending an ellipsis (see
# https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html#detailed-notes).
key = key.tuple + (Ellipsis,)
else:
raise TypeError("unexpected key type: {}".format(type(key)))
return array, key
def transpose(self, order):
return self.array.transpose(order)
def __getitem__(self, key):
array, key = self._indexing_array_and_key(key)
return array[key]
def __setitem__(self, key, value):
array, key = self._indexing_array_and_key(key)
try:
array[key] = value
except ValueError:
# More informative exception if read-only view
if not array.flags.writeable and not array.flags.owndata:
raise ValueError(
"Assignment destination is a view. "
"Do you want to .copy() array first?"
)
else:
raise
class NdArrayLikeIndexingAdapter(NumpyIndexingAdapter):
__slots__ = ("array",)
def __init__(self, array):
if not hasattr(array, "__array_function__"):
raise TypeError(
"NdArrayLikeIndexingAdapter must wrap an object that "
"implements the __array_function__ protocol"
)
self.array = array
class DaskIndexingAdapter(ExplicitlyIndexedNDArrayMixin):
"""Wrap a dask array to support explicit indexing."""
__slots__ = ("array",)
def __init__(self, array):
""" This adapter is created in Variable.__getitem__ in
Variable._broadcast_indexes.
"""
self.array = array
def __getitem__(self, key):
if not isinstance(key, VectorizedIndexer):
# if possible, short-circuit when keys are effectively slice(None)
# This preserves dask name and passes lazy array equivalence checks
# (see duck_array_ops.lazy_array_equiv)
rewritten_indexer = False
new_indexer = []
for idim, k in enumerate(key.tuple):
if isinstance(k, Iterable) and duck_array_ops.array_equiv(
k, np.arange(self.array.shape[idim])
):
new_indexer.append(slice(None))
rewritten_indexer = True
else:
new_indexer.append(k)
if rewritten_indexer:
key = type(key)(tuple(new_indexer))
if isinstance(key, BasicIndexer):
return self.array[key.tuple]
elif isinstance(key, VectorizedIndexer):
return self.array.vindex[key.tuple]
else:
assert isinstance(key, OuterIndexer)
key = key.tuple
try:
return self.array[key]
except NotImplementedError:
# manual orthogonal indexing.
# TODO: port this upstream into dask in a saner way.
value = self.array
for axis, subkey in reversed(list(enumerate(key))):
value = value[(slice(None),) * axis + (subkey,)]
return value
def __setitem__(self, key, value):
raise TypeError(
"this variable's data is stored in a dask array, "
"which does not support item assignment. To "
"assign to this variable, you must first load it "
"into memory explicitly using the .load() "
"method or accessing its .values attribute."
)
def transpose(self, order):
return self.array.transpose(order)
class PandasIndexAdapter(ExplicitlyIndexedNDArrayMixin):
"""Wrap a pandas.Index to preserve dtypes and handle explicit indexing.
"""
__slots__ = ("array", "_dtype")
def __init__(self, array: Any, dtype: DTypeLike = None):
self.array = utils.safe_cast_to_index(array)
if dtype is None:
if isinstance(array, pd.PeriodIndex):
dtype = np.dtype("O")
elif hasattr(array, "categories"):
# category isn't a real numpy dtype
dtype = array.categories.dtype
elif not utils.is_valid_numpy_dtype(array.dtype):
dtype = np.dtype("O")
else:
dtype = array.dtype
else:
dtype = np.dtype(dtype)
self._dtype = dtype
@property
def dtype(self) -> np.dtype:
return self._dtype
def __array__(self, dtype: DTypeLike = None) -> np.ndarray:
if dtype is None:
dtype = self.dtype
array = self.array
if isinstance(array, pd.PeriodIndex):
with suppress(AttributeError):
# this might not be public API
array = array.astype("object")
return np.asarray(array.values, dtype=dtype)
@property
def shape(self) -> Tuple[int]:
return (len(self.array),)
def __getitem__(
self, indexer
) -> Union[NumpyIndexingAdapter, np.ndarray, np.datetime64, np.timedelta64]:
key = indexer.tuple
if isinstance(key, tuple) and len(key) == 1:
# unpack key so it can index a pandas.Index object (pandas.Index
# objects don't like tuples)
(key,) = key
if getattr(key, "ndim", 0) > 1: # Return np-array if multidimensional
return NumpyIndexingAdapter(self.array.values)[indexer]
result = self.array[key]
if isinstance(result, pd.Index):
result = PandasIndexAdapter(result, dtype=self.dtype)
else:
# result is a scalar
if result is pd.NaT:
# work around the impossibility of casting NaT with asarray
# note: it probably would be better in general to return
# pd.Timestamp rather np.than datetime64 but this is easier
# (for now)
result = np.datetime64("NaT", "ns")
elif isinstance(result, timedelta):
result = np.timedelta64(getattr(result, "value", result), "ns")
elif isinstance(result, pd.Timestamp):
# Work around for GH: pydata/xarray#1932 and numpy/numpy#10668
# numpy fails to convert pd.Timestamp to np.datetime64[ns]
result = np.asarray(result.to_datetime64())
elif self.dtype != object:
result = np.asarray(result, dtype=self.dtype)
# as for numpy.ndarray indexing, we always want the result to be
# a NumPy array.
result = utils.to_0d_array(result)
return result
def transpose(self, order) -> pd.Index:
return self.array # self.array should be always one-dimensional
def __repr__(self) -> str:
return "{}(array={!r}, dtype={!r})".format(
type(self).__name__, self.array, self.dtype
)
def copy(self, deep: bool = True) -> "PandasIndexAdapter":
# Not the same as just writing `self.array.copy(deep=deep)`, as
# shallow copies of the underlying numpy.ndarrays become deep ones
# upon pickling
# >>> len(pickle.dumps((self.array, self.array)))
# 4000281
# >>> len(pickle.dumps((self.array, self.array.copy(deep=False))))
# 8000341
array = self.array.copy(deep=True) if deep else self.array
return PandasIndexAdapter(array, self._dtype)
|
apache-2.0
| -2,998,736,085,517,629,000
| 34.431896
| 98
| 0.60333
| false
| 4.087327
| false
| false
| false
|
liugangabc/ccs_web
|
tcpclient.py
|
1
|
2137
|
#!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
from tornado import ioloop, httpclient, gen
from tornado.gen import Task
import pdb, time, logging
import tornado.ioloop
import tornado.iostream
import socket
#Init logging
def init_logging():
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
sh = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s -%(module)s:%(filename)s-L%(lineno)d-%(levelname)s: %(message)s')
sh.setFormatter(formatter)
logger.addHandler(sh)
logging.info("Current log level is : %s", logging.getLevelName(logger.getEffectiveLevel()))
class TCPClient(object):
def __init__(self, host, port, io_loop=None):
self.host = host
self.port = port
self.io_loop = io_loop
self.shutdown = False
self.stream = None
self.sock_fd = None
self.EOF = b' END'
def get_stream(self):
self.sock_fd = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
self.stream = tornado.iostream.IOStream(self.sock_fd)
self.stream.set_close_callback(self.on_close)
def connect(self):
self.get_stream()
self.stream.connect((self.host, self.port), self.send_message)
def on_receive(self, data):
logging.info("Received: %s", data)
self.stream.close()
def on_close(self):
if self.shutdown:
self.io_loop.stop()
def send_message(self):
logging.info("Send message....")
self.stream.write(b"Hello Server!" + self.EOF)
self.stream.read_until(self.EOF, self.on_receive)
logging.info("After send....")
def set_shutdown(self):
self.shutdown = True
def main():
init_logging()
io_loop = tornado.ioloop.IOLoop.instance()
c1 = TCPClient("127.0.0.1", 8001, io_loop)
c2 = TCPClient("127.0.0.1", 8001, io_loop)
c1.connect()
c2.connect()
c2.set_shutdown()
logging.info("**********************start ioloop******************")
io_loop.start()
if __name__ == "__main__":
try:
main()
except Exception, ex:
print "Ocurred Exception: %s" % str(ex)
quit()
|
apache-2.0
| -9,188,828,094,435,973,000
| 27.506667
| 110
| 0.613009
| false
| 3.435691
| false
| false
| false
|
atodorov/pykickstart
|
tests/commands/timezone.py
|
1
|
6651
|
#
# Chris Lumens <clumens@redhat.com>
#
# Copyright 2009 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use, modify,
# copy, or redistribute it subject to the terms and conditions of the GNU
# General Public License v.2. This program is distributed in the hope that it
# will be useful, but WITHOUT ANY WARRANTY expressed or implied, including the
# implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. Any Red Hat
# trademarks that are incorporated in the source code or documentation are not
# subject to the GNU General Public License and may only be used or replicated
# with the express permission of Red Hat, Inc.
#
import unittest
from tests.baseclass import CommandTest
from pykickstart.errors import KickstartParseError
from pykickstart.commands.timezone import FC3_Timezone, F18_Timezone
class Timezone_TestCase(unittest.TestCase):
def runTest(self):
cmd = F18_Timezone()
self.assertEqual(cmd.__str__(), '')
class FC3_TestCase(CommandTest):
command = "timezone"
def runTest(self):
# assert defaults
self.assertFalse(FC3_Timezone().isUtc)
self.assertFalse(F18_Timezone().nontp)
# pass
self.assert_parse("timezone Eastern", "timezone Eastern\n")
# On FC6 and later, we write out --isUtc regardless of what the input was.
if self.__class__.__name__ == "FC3_TestCase":
self.assert_parse("timezone --utc Eastern", "timezone --utc Eastern\n")
else:
self.assert_parse("timezone --utc Eastern", "timezone --isUtc Eastern\n")
# fail
self.assert_parse_error("timezone")
self.assert_parse_error("timezone Eastern Central")
self.assert_parse_error("timezone --blah Eastern")
self.assert_parse_error("timezone --utc")
self.assert_parse_error("timezone --bogus-option")
# extra test coverage
cmd = self.handler().commands[self.command]
cmd.timezone = None
self.assertEqual(cmd.__str__(), "")
class FC6_TestCase(FC3_TestCase):
def runTest(self):
FC3_TestCase.runTest(self)
# pass
self.assert_parse("timezone --isUtc Eastern", "timezone --isUtc Eastern\n")
# fail
self.assert_parse_error("timezone --isUtc")
class F18_TestCase(FC6_TestCase):
def runTest(self):
# pass
self.assert_parse("timezone --utc Europe/Prague")
self.assert_parse("timezone --isUtc Europe/Prague\n")
self.assert_parse("timezone --isUtc Eastern", "timezone Eastern --isUtc\n")
self.assert_parse("timezone Europe/Prague")
self.assert_parse("timezone Europe/Prague --nontp",
"timezone Europe/Prague --nontp\n")
self.assert_parse("timezone Europe/Prague "
"--ntpservers=ntp.cesnet.cz,tik.nic.cz")
self.assert_parse("timezone Europe/Prague --ntpservers=ntp.cesnet.cz",
"timezone Europe/Prague --ntpservers=ntp.cesnet.cz\n")
# fail
self.assert_parse_error("timezone")
self.assert_parse_error("timezone Eastern Central")
self.assert_parse_error("timezone --blah Eastern")
self.assert_parse_error("timezone --utc")
self.assert_parse_error("timezone --isUtc")
self.assert_parse_error("timezone Europe/Prague --nontp "
"--ntpservers=ntp.cesnet.cz")
self.assert_parse_error("timezone Europe/Prague --ntpservers="
"ntp.cesnet.cz, tik.nic.cz")
class F23_TestCase(F18_TestCase):
def runTest(self):
# should keep multiple instances of the same URL
self.assert_parse("timezone --utc Europe/Prague --ntpservers=ntp.cesnet.cz,0.fedora.pool.ntp.org," +
"0.fedora.pool.ntp.org,0.fedora.pool.ntp.org,0.fedora.pool.ntp.org",
"timezone Europe/Prague --isUtc --ntpservers=ntp.cesnet.cz,0.fedora.pool.ntp.org," +
"0.fedora.pool.ntp.org,0.fedora.pool.ntp.org,0.fedora.pool.ntp.org\n")
self.assert_parse("timezone --utc Europe/Sofia --ntpservers=,0.fedora.pool.ntp.org,")
# fail
self.assert_parse_error("timezone Europe/Sofia --nontp --ntpservers=0.fedora.pool.ntp.org,1.fedora.pool.ntp.org")
class RHEL7_TestCase(F18_TestCase):
def runTest(self):
# since RHEL7 command version the timezone command can be used
# without a timezone specification
self.assert_parse("timezone --utc")
self.assert_parse("timezone Europe/Sofia")
self.assert_parse("timezone --isUtc")
self.assert_parse("timezone --ntpservers=ntp.cesnet.cz")
self.assert_parse("timezone --ntpservers=ntp.cesnet.cz,tik.nic.cz")
# unknown argument
self.assert_parse_error("timezone --blah")
# more than two timezone specs
self.assert_parse_error("timezone foo bar", exception=KickstartParseError)
self.assert_parse_error("timezone --utc foo bar", exception=KickstartParseError)
# just "timezone" without any arguments is also wrong as it really dosn't make sense
self.assert_parse_error("timezone")
# fail
self.assert_parse_error("timezone Europe/Sofia --nontp --ntpservers=0.fedora.pool.ntp.org,1.fedora.pool.ntp.org")
class F25_TestCase(F23_TestCase):
def runTest(self):
# since RHEL7 command version the timezone command can be used
# without a timezone specification
self.assert_parse("timezone --utc")
self.assert_parse("timezone --isUtc")
self.assert_parse("timezone --ntpservers=ntp.cesnet.cz")
self.assert_parse("timezone --ntpservers=ntp.cesnet.cz,tik.nic.cz")
# unknown argument
self.assert_parse_error("timezone --blah")
# more than two timezone specs
self.assert_parse_error("timezone foo bar", exception=KickstartParseError)
self.assert_parse_error("timezone --utc foo bar", exception=KickstartParseError)
# just "timezone" without any arguments is also wrong as it really dosn't make sense
self.assert_parse_error("timezone")
# fail
self.assert_parse_error("timezone Europe/Sofia --nontp --ntpservers=0.fedora.pool.ntp.org,1.fedora.pool.ntp.org")
if __name__ == "__main__":
unittest.main()
|
gpl-2.0
| -2,127,721,929,382,395,400
| 43.046358
| 121
| 0.658097
| false
| 3.778977
| true
| false
| false
|
scorpilix/Golemtest
|
apps/rendering/resources/imgcompare.py
|
1
|
3797
|
import logging
import math
from apps.rendering.resources.imgrepr import (EXRImgRepr, ImgRepr, load_img,
PILImgRepr)
logger = logging.getLogger("apps.rendering")
PSNR_ACCEPTABLE_MIN = 30
def check_size(file_, res_x, res_y):
img = load_img(file_)
if img is None:
return False
return img.get_size() == (res_x, res_y)
def calculate_psnr(mse, max_=255):
if mse <= 0 or max_ <= 0:
raise ValueError("MSE & MAX_ must be higher than 0")
return 20 * math.log10(max_) - 10 * math.log10(mse)
def calculate_mse(img1, img2, start1=(0, 0), start2=(0, 0), box=None):
mse = 0
if not isinstance(img1, ImgRepr) or not isinstance(img2, ImgRepr):
raise TypeError("img1 and img2 must be ImgRepr")
if box is None:
(res_x, res_y) = img1.get_size()
else:
(res_x, res_y) = box
for i in range(0, res_x):
for j in range(0, res_y):
[r1, g1, b1] = img1.get_pixel((start1[0] + i, start1[1] + j))
[r2, g2, b2] = img2.get_pixel((start2[0] + i, start2[1] + j))
mse += (r1 - r2) * (r1 - r2) + \
(g1 - g2) * (g1 - g2) + \
(b1 - b2) * (b1 - b2)
if res_x <= 0 or res_y <= 0:
raise ValueError("Image or box resolution must be greater than 0")
mse /= res_x * res_y * 3
return mse
def compare_imgs(img1, img2, max_col=255, start1=(0, 0),
start2=(0, 0), box=None):
mse = calculate_mse(img1, img2, start1, start2, box)
logger.debug("MSE = {}".format(mse))
if mse == 0:
return True
psnr = calculate_psnr(mse, max_col)
logger.debug("PSNR = {}".format(psnr))
return psnr >= PSNR_ACCEPTABLE_MIN
def compare_pil_imgs(file1, file2):
try:
img1 = PILImgRepr()
img1.load_from_file(file1)
img2 = PILImgRepr()
img2.load_from_file(file2)
return compare_imgs(img1, img2)
except Exception as err:
logger.info("Can't compare images {}, {}: {}".format(file1, file2,
err))
return False
def compare_exr_imgs(file1, file2):
try:
img1 = EXRImgRepr()
img1.load_from_file(file1)
img2 = EXRImgRepr()
img2.load_from_file(file2)
return compare_imgs(img1, img2, 1)
except Exception as err:
logger.info("Can't compare images {}, {}: {}".format(file1, file2,
err))
return False
def advance_verify_img(file_, res_x, res_y, start_box, box_size, compare_file,
cmp_start_box):
try:
img = load_img(file_)
cmp_img = load_img(compare_file)
if img is None or cmp_img is None:
return False
if img.get_size() != (res_x, res_y):
return False
def _box_too_small(box):
return box[0] <= 0 or box[1] <= 0
def _box_too_big(box):
return box[0] > res_x or box[1] > res_y
if _box_too_small(box_size) or _box_too_big(box_size):
logger.error("Wrong box size for advanced verification " \
"{}".format(box_size))
if isinstance(img, PILImgRepr) and isinstance(cmp_img, PILImgRepr):
return compare_imgs(img, cmp_img, start1=start_box,
start2=cmp_start_box, box=box_size)
else:
return compare_imgs(img, cmp_img, max_col=1, start1=start_box,
start2=cmp_start_box, box=box_size)
except Exception:
logger.exception("Cannot verify images {} and {}".format(file_,
compare_file))
return False
|
gpl-3.0
| -3,704,521,870,128,538,000
| 32.307018
| 79
| 0.523045
| false
| 3.2761
| false
| false
| false
|
cloudedbats/cloudedbats_wurb
|
cloudedbats_wurb/wurb_raspberry_pi/control_by_gpio.py
|
1
|
4508
|
#!/usr/bin/python3
# -*- coding:utf-8 -*-
# Project: http://cloudedbats.org
# Copyright (c) 2016-2018 Arnold Andreasson
# License: MIT License (see LICENSE.txt or http://opensource.org/licenses/mit).
import time
import logging
import threading
# Check if GPIO is available.
gpio_available = True
try: import RPi.GPIO as GPIO
except: gpio_available = False
class ControlByGpio(object):
""" Use GPIO for control when running without a graphical user interface. """
def __init__(self, callback_function=None):
""" """
self._callback_function = callback_function
self._logger = logging.getLogger('CloudedBatsWURB')
# Recording control.
self.rec_on_state = False
self.rec_off_state = False
self.rec_on_count = 0
self.rec_off_count = 0
# GPIO
if not gpio_available:
self._logger.error('GPIO control: RaspberryPi-GPIO not available.')
return
#
self._gpio_pin_rec_on = 37 # '#37 (GPIO 26)'
self._gpio_pin_rec_off = 38 # '#38 (GPIO 20)'
self._setup_gpio()
#
self._active = True
self._start_gpio_check()
def stop(self):
""" """
self._active = False
def is_gpio_rec_on(self):
""" """
return self.rec_on_state
def is_gpio_rec_off(self):
""" """
return self.rec_off_state
def is_gpio_rec_auto(self):
""" """
return (self.rec_on_state == False) and (self.rec_off_state == False)
def _fire_event(self, event):
""" Event for the state machine. """
if self._callback_function:
self._callback_function(event)
def _setup_gpio(self):
""" """
GPIO.setmode(GPIO.BOARD) # Use pin numbers (1-40).
# Use the built in pull-up resistors.
GPIO.setup(self._gpio_pin_rec_on, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(self._gpio_pin_rec_off, GPIO.IN, pull_up_down=GPIO.PUD_UP)
def _start_gpio_check(self):
""" """
# Check GPIO activity in a separate thread.
self._check_gpio_thread = threading.Thread(target = self._check_gpio, args = [])
self._check_gpio_thread.start()
def _check_gpio(self):
""" """
old_rec_on_state = self.rec_on_state
old_rec_off_state = self.rec_off_state
while self._active:
time.sleep(0.1)
try:
# Check if recording on is active.
if GPIO.input(self._gpio_pin_rec_on):
# High = inactive.
self.rec_on_count = 0
self.rec_on_state = False
else:
# Low = active.
if self.rec_on_count >= 5: # After 0.5 sec.
self.rec_on_state = True
else:
self.rec_on_count += 1
# Check if recording off is active.
if GPIO.input(self._gpio_pin_rec_off):
# High = inactive.
self.rec_off_count = 0
self.rec_off_state = False
else:
# Low = active.
if self.rec_off_count >= 5: # After 0.5 sec.
self.rec_off_state = True
else:
self.rec_off_count += 1
# Fire event.
if (old_rec_on_state != self.rec_on_state) or \
(old_rec_off_state != self.rec_off_state):
if self.rec_on_state:
# Rec on active.
self._fire_event('gpio_rec_on')
self._logger.debug('GPIO control: Fire event: gpio_rec_on.')
elif self.rec_off_state:
# Rec off active.
self._fire_event('gpio_rec_off')
self._logger.debug('GPIO control: Fire event: gpio_rec_off.')
else:
# Both inactive = Auto.
self._fire_event('gpio_rec_auto')
self._logger.debug('GPIO control: Fire event: gpio_rec_auto.')
#
old_rec_on_state = self.rec_on_state
old_rec_off_state = self.rec_off_state
except:
pass
|
mit
| -5,799,279,537,577,791,000
| 34.496063
| 88
| 0.482919
| false
| 4.017825
| false
| false
| false
|
jabez007/Archons_Oracle
|
Oracle/Oracle.py
|
1
|
5883
|
#!/usr/bin/env python3
# https://theneuralperspective.com/2016/10/04/05-recurrent-neural-networks-rnn-part-1-basic-rnn-char-rnn/
# https://machinelearningmastery.com/text-generation-lstm-recurrent-neural-networks-python-keras/
# Larger LSTM Network to Generate Text for Last Hope LARP
import sys
import os
import numpy
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout
from keras.layers import LSTM
from keras.callbacks import ModelCheckpoint
from keras.utils import np_utils
SEQ_LENGTH = 29
STEP = 1
HIDDEN_LAYER = 512
BATCH_SIZE = 128
_dataX_ = list()
_int_to_char_ = dict()
_char_to_int_ = dict()
def train():
"""
Below are 10 ideas that may further improve the model that you could experiment with are:
-- Predict fewer than 1,000 characters as output for a given seed.
-- Remove all punctuation from the source text, and therefore from the models’ vocabulary.
-- Try a one hot encoded for the input sequences.
* Train the model on padded sentences rather than random sequences of characters.
* Increase the number of training epochs to 100 or many hundreds.
-- Add dropout to the visible input layer and consider tuning the dropout percentage.
-- Tune the batch size, try a batch size of 1 as a (very slow) baseline and larger sizes from there.
-- Add more memory units to the layers and/or more layers.
-- Experiment with scale factors (temperature) when interpreting the prediction probabilities.
* Change the LSTM layers to be “stateful” to maintain state across batches.
"""
raw_text = load_data()
X, y = format_data(raw_text)
model = build_model(X, y)
# define the checkpoint
filepath="weights-{epoch:02d}-{loss:.4f}.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='loss', verbose=2, save_best_only=True, mode='min') # verbose = 2. This gives you one output per epoch.
callbacks_list = [checkpoint]
# fit the model
model.fit(X, y, epochs=50, batch_size=BATCH_SIZE, callbacks=callbacks_list) # Tune the batch size
def sample(seed = "", length = 280): # on November 7, 2017, the limit was doubled to 280
raw_text = load_data()
X, y = format_data(raw_text)
model = build_model(X, y)
# pick a random seed
if seed == "":
start = numpy.random.randint(0, len(_dataX_)-1)
pattern = _dataX_[start]
else:
pattern = [ _char_to_int_[char] if char in _char_to_int_.keys() else _char_to_int_[" "]
for char in (seed.lower().strip() + " ").rjust(SEQ_LENGTH)[-SEQ_LENGTH:] ]
print("Seed:")
print("\"" + ''.join([_int_to_char_[value] for value in pattern]) + "\"")
# generate characters
generated_text = ""
n_vocab = len(_int_to_char_)
for i in range(length): # 140 for twitter character limit...
x = numpy.zeros((1, SEQ_LENGTH, n_vocab))
for tt, char in enumerate(pattern):
x[0, tt, char] = 1.
prediction = model.predict(x, verbose=0)[0]
#index = numpy.argmax(prediction)
index = numpy.random.choice(range(n_vocab), 1, p=prediction[SEQ_LENGTH-1])[0]
result = _int_to_char_[index]
sys.stdout.write(result)
sys.stdout.flush()
generated_text += result
pattern.append(index)
pattern = pattern[1:len(pattern)]
print("\nDone.")
return generated_text
def load_data(filename = "lasthope.txt"):
# load text and covert to lowercase
raw_text = open(filename, encoding="utf8").read()
raw_text = raw_text.lower()
return raw_text
def format_data(raw_text):
global _int_to_char_, _char_to_int_
# create mapping of unique chars to integers
chars = sorted(list(set(raw_text)))
_char_to_int_ = dict((c, i) for i, c in enumerate(chars))
_int_to_char_ = dict((i, c) for i, c in enumerate(chars))
# summarize the loaded data
n_chars = len(raw_text)
n_vocab = len(chars)
print("Total Characters: " + str(n_chars))
print("Total Vocab: " + str(n_vocab))
# prepare the dataset of input to output pairs encoded as integers
dataY = []
for i in range(0, n_chars - SEQ_LENGTH, STEP):
seq_in = raw_text[i: i+SEQ_LENGTH]
seq_out = raw_text[i+1: i+1+SEQ_LENGTH]
_dataX_.append([_char_to_int_[char] for char in seq_in])
dataY.append([_char_to_int_[char] for char in seq_out])
n_patterns = len(_dataX_)
print("Total Patterns: " + str(n_patterns))
# One-hot encode X and y
X = numpy.zeros((n_patterns, SEQ_LENGTH, n_vocab), dtype=numpy.bool)
for i, seq in enumerate(_dataX_):
for t, char in enumerate(seq):
X[i, t, char] = 1
y = numpy.zeros((n_patterns, SEQ_LENGTH, n_vocab), dtype=numpy.bool)
for i, seq in enumerate(dataY):
for t, char in enumerate(seq):
y[i, t, char] = 1
return X, y
def build_model(X, y):
# define the LSTM model
model = Sequential()
model.add(LSTM(HIDDEN_LAYER, input_shape=(X.shape[1], X.shape[2]), return_sequences=True))
model.add(Dropout(0.3))
model.add(LSTM(HIDDEN_LAYER, return_sequences=True))
model.add(LSTM(HIDDEN_LAYER, return_sequences=True))
model.add(Dense(y.shape[2], activation='softmax'))
# load previous network weights
loss = 10
filename = ""
for f in os.listdir():
if f.endswith('.hdf5'):
if float(f.split('.')[0].split('-')[2]) < loss:
filename = f
if filename != "":
print("checkpoint file: " + filename)
model.load_weights(filename)
model.compile(loss='categorical_crossentropy', optimizer='adam')
return model
# # # #
if __name__ == "__main__":
train()
|
mit
| -5,439,092,132,026,910,000
| 38.442953
| 159
| 0.622767
| false
| 3.506563
| false
| false
| false
|
hydroffice/hyo_soundspeed
|
hyo2/soundspeed/formats/writers/abstract.py
|
1
|
2150
|
from abc import ABCMeta, abstractmethod # , abstractproperty
import os
import logging
logger = logging.getLogger(__name__)
from hyo2.soundspeed.base.files import FileManager
from hyo2.soundspeed.formats.abstract import AbstractFormat
class AbstractWriter(AbstractFormat, metaclass=ABCMeta):
""" Abstract data writer """
def __repr__(self):
return "<%s:writer:%s:%s>" % (self.name, self.version, ",".join(self._ext))
def __init__(self):
super(AbstractWriter, self).__init__()
self.fod = None
@abstractmethod
def write(self, ssp, data_path, data_file=None, project=''):
pass
@abstractmethod
def _write_header(self):
pass
@abstractmethod
def _write_body(self):
pass
def finalize(self):
if self.fod:
if not self.fod.io.closed:
self.fod.io.close()
class AbstractTextWriter(AbstractWriter, metaclass=ABCMeta):
""" Abstract text data writer """
def __init__(self):
super(AbstractTextWriter, self).__init__()
def _write(self, data_path, data_file, encoding='utf8', append=False, binary=False):
"""Helper function to write the raw file"""
# data_path = os.path.join(data_path, self.name.lower()) # commented to avoid the creation of sub-folders
if not os.path.exists(data_path):
os.makedirs(data_path)
if data_file:
if len(data_file.split('.')) == 1:
data_file += (".%s" % (list(self.ext)[0],))
file_path = os.path.join(data_path, data_file)
else:
if self.ssp.cur.meta.original_path:
data_file = "%s.%s" % (os.path.basename(self.ssp.cur.meta.original_path), list(self.ext)[0])
else:
data_file = 'output.%s' % (list(self.ext)[0],)
file_path = os.path.join(data_path, data_file)
logger.info("output file: %s" % file_path)
if append:
mode = 'a'
else:
mode = 'w'
if binary:
mode = '%sb' % mode
self.fod = FileManager(file_path, mode=mode, encoding=encoding)
|
lgpl-2.1
| -3,848,063,316,767,106,600
| 27.289474
| 114
| 0.58
| false
| 3.668942
| false
| false
| false
|
Fluent-networks/floranet
|
floranet/web/rest/gateway.py
|
1
|
7710
|
import ipaddress
from flask_restful import Resource, reqparse, abort, inputs, fields, marshal
from flask_login import login_required
from twisted.internet.defer import inlineCallbacks, returnValue
from crochet import wait_for, TimeoutError
from floranet.models.gateway import Gateway
from floranet.log import log
# Crochet timeout. If the code block does not complete within this time,
# a TimeoutError exception is raised.
from __init__ import TIMEOUT
class GatewayResource(Resource):
"""Gateway resource base class.
Attributes:
restapi (RestApi): Flask Restful API object
server (NetServer): FloraNet network server object
fields (dict): Dictionary of attributes to be returned to a REST request
parser (RequestParser): Flask RESTful request parser
args (dict): Parsed request argument
"""
def __init__(self, **kwargs):
self.restapi = kwargs['restapi']
self.server = kwargs['server']
self.fields = {
'host': fields.String,
'eui': fields.Integer,
'name': fields.String,
'enabled': fields.Boolean,
'power': fields.Integer,
'created': fields.DateTime(dt_format='iso8601'),
'updated': fields.DateTime(dt_format='iso8601')
}
self.parser = reqparse.RequestParser(bundle_errors=True)
self.parser.add_argument('host', type=str)
self.parser.add_argument('eui', type=int)
self.parser.add_argument('name', type=str)
self.parser.add_argument('enabled', type=inputs.boolean)
self.parser.add_argument('power', type=int)
self.args = self.parser.parse_args()
class RestGateway(GatewayResource):
"""RestGateway Resource class.
Manages RESTAPI GET and PUT transactions for gateways.
"""
def __init__(self, **kwargs):
super(RestGateway, self).__init__(**kwargs)
@login_required
@wait_for(timeout=TIMEOUT)
@inlineCallbacks
def get(self, host):
"""Method to handle gateway GET requests"""
try:
g = yield Gateway.find(where=['host = ?', host], limit=1)
# Return a 404 if not found.
if g is None:
abort(404, message={'error': "Gateway {} doesn't exist.".format(host)})
returnValue(marshal(g, self.fields))
except TimeoutError:
log.error("REST API timeout retrieving gateway {host}",
host=host)
@login_required
@wait_for(timeout=TIMEOUT)
@inlineCallbacks
def put(self, host):
"""Method to handle gateway PUT requests
Args:
host (str): Gateway host address
"""
try:
gateway = yield Gateway.find(where=['host = ?', host], limit=1)
# Return a 404 if not found.
if gateway is None:
abort(404, message={'error': "Gateway {} doesn't exist".format(host)})
kwargs = {}
for a,v in self.args.items():
if v is not None and v != getattr(gateway, a):
kwargs[a] = v
setattr(gateway, a, v)
(valid, message) = yield gateway.valid()
if not valid:
abort(400, message=message)
# Update the gateway and server with the new attributes
if kwargs:
gateway.update(**kwargs)
self.server.lora.updateGateway(host, gateway)
returnValue(({}, 200))
except TimeoutError:
log.error("REST API timeout retrieving gateway {host}",
host=host)
@login_required
@wait_for(timeout=TIMEOUT)
@inlineCallbacks
def delete(self, host):
"""Method to handle gateway DELETE requests
Args:
host (str): Gateway host
"""
try:
g = yield Gateway.find(where=['host = ?', host], limit=1)
# Return a 404 if not found.
if g is None:
abort(404, message={'error': "Gateway {} doesn't exist.".format(host)})
deleted = yield g.delete()
self.server.lora.deleteGateway(g)
returnValue(({}, 200))
except TimeoutError:
log.error("REST API timeout retrieving gateway {host}",
host=host)
class RestGateways(GatewayResource):
""" RestGateways Resource class.
Manages REST API GET and POST transactions for reading multiple gateways,
and creating gateways.
"""
def __init__(self, **kwargs):
super(RestGateways, self).__init__(**kwargs)
@login_required
@wait_for(timeout=TIMEOUT)
@inlineCallbacks
def get(self):
"""Method to get all gateways"""
try:
gateways = yield Gateway.all()
if gateways is None:
returnValue({})
data = {}
for i,g in enumerate(gateways):
data[i] = marshal(g, self.fields)
returnValue(data)
except TimeoutError:
# Exception returns 500 to client
log.error("REST API timeout retrieving all gateways")
@login_required
@wait_for(timeout=TIMEOUT)
@inlineCallbacks
def post(self):
"""Method to create a gateway"""
host = self.args['host']
name = self.args['name']
eui = self.args['eui']
enabled = self.args['enabled']
power = self.args['power']
message = {}
# Check for required args
required = {'host', 'name', 'eui', 'enabled', 'power'}
for r in required:
if self.args[r] is None:
message[r] = "Missing the {} parameter.".format(r)
if message:
abort(400, message=message)
# Ensure we have a valid address
try:
ipaddress.ip_address(host)
except (ipaddress.AddressValueError, ValueError):
message = {'error': "Invalid IP address {} ".format(host)}
abort(400, message=message)
# Ensure we have a valid EUI
if not isinstance(eui, (int, long)):
message = {'error': "Invalid gateway EUI {} ".format(eui)}
abort(400, message=message)
# Check this gateway does not currently exist
exists = yield Gateway.exists(where=['host = ?', host])
if exists:
message = {'error': "Gateway address {} ".format(host) + \
"currently exists."}
abort(400, message=message)
# Check the EUI does not currently exist
exists = yield Gateway.exists(where=['eui = ?', eui])
if exists:
message = {'error': "Gateway EUI {} ".format(eui) + \
"currently exists."}
abort(400, message=message)
# Create and validate
gateway = Gateway(host=host, eui=eui, name=name, enabled=enabled, power=power)
(valid, message) = gateway.valid()
if not valid:
abort(400, message=message)
try:
g = yield gateway.save()
if g is None:
abort(500, message={'error': "Error saving the gateway."})
# Add the new gateway to the server.
self.server.lora.addGateway(g)
location = self.restapi.api.prefix + '/gateway/' + str(host)
returnValue(({}, 201, {'Location': location}))
except TimeoutError:
# Exception returns 500 to client
log.error("REST API timeout for gateway POST request")
|
mit
| 563,634,068,682,700,800
| 34.045455
| 87
| 0.557717
| false
| 4.482558
| false
| false
| false
|
firelab/viirs_ba
|
misc_utils/run_fom.py
|
1
|
1752
|
#
# Script to run all of the figure of merit code for a single run back to back.
#
import viirs_fom as vf
import sys
def all_fom(database_name, workers=12) :
# I over U FOM
vf.calc_all_ioveru_fom('{}_schema_info.csv'.format(database_name),
'gt', 'burnmask13', workers=workers)
# Zones
vf.do_all_zonetbl_runs('.','gt','burnmask13',
zone_tbl='fixed_zone_counts',
workers=workers,
mask_tbl='bobafet13')
# 2013 events
vf.do_all_zonetbl_runs('.','gt','burnmask13',
zonedef_tbl='calevents_2013',
zone_tbl='fixed_events_2013_counts',
zone_col='fireid',
year=2013,
workers=workers,
spatial_filter=True,
mask_tbl='bobafet13')
# 2014 events
vf.do_all_zonetbl_runs('.','gt','burnmask14',
zonedef_tbl='calevents_2014',
zone_tbl='fixed_events_2014_counts',
zone_col='fireid',
year=2014,
workers=workers,
spatial_filter=True,
mask_tbl='bobafet14')
#
if __name__ == "__main__" :
if len(sys.argv) != 2 :
print "Usage: {0} database_name".format(sys.argv[0])
print "Run this from the base directory of a batch-of-runs, and"
print "provide the database name associated with the entire batch."
sys.exit()
all_fom(sys.argv[1])
|
cc0-1.0
| -7,760,519,851,290,668,000
| 36.933333
| 78
| 0.450913
| false
| 4
| false
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.