code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
import numpy as np
from skimage import io
def read_image(fn, normalize=True):
"""Read a CCD/CMOS image in .da format (Redshirt). [1_]
Parameters
----------
fn : string
The input filename.
Returns
-------
images : array, shape (nrow, ncol, nframes)
The images (normalized by the dark frame if desired).
frame_interval : float
The time elapsed between frames, in milliseconds.
bnc : array, shape (8, nframes)
The bnc data.
dark_frame : array, shape (nrow, ncol)
The dark frame by which the image data should be normalized.
Notes
-----
Interlaced images, as produced by the option "write directly to disk",
are not currently supported.
References
----------
.. [1] http://www.redshirtimaging.com/support/dfo.html
"""
data = np.fromfile(fn, dtype=np.int16)
header_size = 2560
header = data[:header_size]
ncols, nrows = map(int, header[384:386]) # prevent int16 overflow
nframes = int(header[4])
frame_interval = header[388] / 1000
acquisition_ratio = header[391]
if frame_interval >= 10:
frame_interval *= header[390] # dividing factor
image_size = nrows * ncols * nframes
bnc_start = header_size + image_size
images = np.reshape(np.array(data[header_size:bnc_start]),
(nrows, ncols, nframes))
bnc_end = bnc_start + 8 * acquisition_ratio * nframes
bnc = np.reshape(np.array(data[bnc_start:bnc_end]), (8, nframes * acquisition_ratio))
dark_frame = np.reshape(np.array(data[bnc_end:-8]), (nrows, ncols))
if normalize:
images -= dark_frame[..., np.newaxis]
return images, frame_interval, bnc, dark_frame
def convert_images(fns, normalize=True):
for fn in fns:
image, frame_interval, bnc, dark_frame = read_image(fn, normalize)
out_fn = fn[:-3] + '.tif'
out_fn_dark = fn[:-3] + '.dark_frame.tif'
io.imsave(out_fn, np.transpose(image, (2, 0, 1)),
plugin='tifffile', compress=1)
io.imsave(out_fn_dark, dark_frame, plugin='tifffile', compress=1)
|
jni/python-redshirt
|
redshirt/read.py
|
Python
|
mit
| 2,123
|
#!/usr/bin/env python
from os.path import dirname
import os
import sys
if __name__ == "__main__":
sample_dir = dirname(os.path.abspath(__file__))
root = dirname(dirname(sample_dir))
sys.path.append(root)
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "sampleproj.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
tipsi/tipsi_tools
|
django_tests/manage.py
|
Python
|
mit
| 957
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10a1 on 2016-07-19 15:11
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dashboard', '0041_auto_20160707_1856'),
]
operations = [
migrations.AddField(
model_name='studytableevent',
name='notes',
field=models.TextField(blank=True, null=True),
),
]
|
DLance96/ox-dashboard
|
dashboard/migrations/0042_studytableevent_notes.py
|
Python
|
mit
| 467
|
import os
from distutils.core import setup
ROOT = os.path.dirname(os.path.realpath(__file__))
setup(
name='wmsigner',
version='0.1.1',
url='https://github.com/egorsmkv/wmsigner',
description='WebMoney Signer',
long_description=open(os.path.join(ROOT, 'README.rst')).read(),
author='Egor Smolyakov',
author_email='egorsmkv@gmail.com',
license='MIT',
keywords='webmoney singer security wmsigner WMXI',
packages=['wmsigner'],
data_files=[('', ['README.rst'])],
classifiers=[
'Intended Audience :: Developers',
'Intended Audience :: Financial and Insurance Industry',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'License :: OSI Approved :: MIT License',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
|
egorsmkv/wmsigner
|
setup.py
|
Python
|
mit
| 848
|
# -*- coding:utf-8 -*-
import test_core
import sys
import demjson
test_core.title("搜索用户")
f = open("testconfig.json", 'r')
lines = f.read()
f.close()
jsonfiledata = demjson.decode(lines)
if jsonfiledata["url"] == "":
test_core.terr("错误: 'testconfig.json' 配置不完全。")
exit()
uurl = jsonfiledata["url"]+"search.php"
udataarr = {
'type': "username",
'word': sys.argv[1]
}
test_core.postarray(uurl,udataarr,True)
|
cxchope/YashiLogin
|
tests/test_searchuser.py
|
Python
|
mit
| 448
|
"""Models for the ``feedback_form`` app."""
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.conf import settings
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
@python_2_unicode_compatible
class Feedback(models.Model):
"""
Holds information about one user feedback.
:user: User account of the poster, if logged in.
:email: Email field, if user isn't logged in and wants to send her email.
:current_url: URL of the current page.
:message: Feedback text.
:creation_date: Datetime of the feedback creation.
:content_object: Optional related object the feedback is referring to.
"""
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
verbose_name=_('User'),
related_name='feedback_form_submissions',
blank=True, null=True,
)
email = models.EmailField(
verbose_name=_('Email'),
blank=True,
)
current_url = models.URLField(
verbose_name=_('Current URL'),
max_length=4000,
blank=True,
)
message = models.TextField(
verbose_name=_('Message'),
max_length=4000,
)
creation_date = models.DateTimeField(
auto_now_add=True,
verbose_name=_('Creation Date'),
)
# Generic FK to the object this feedback is about
content_type = models.ForeignKey(
ContentType,
related_name='feedback_content_objects',
null=True, blank=True,
)
object_id = models.PositiveIntegerField(null=True, blank=True)
content_object = GenericForeignKey('content_type', 'object_id')
class Meta:
ordering = ['-creation_date']
def __str__(self):
if self.user:
return '{0} - {1}'.format(self.creation_date, self.user)
elif self.email:
return '{0} - {1}'.format(self.creation_date, self.email)
return '{0}'.format(self.creation_date)
|
bitmazk/django-feedback-form
|
feedback_form/models.py
|
Python
|
mit
| 2,071
|
#!/usr/bin/python
"""
This tests that all the PLE games launch, except for doom; we
explicitly check that it isn't defined.
"""
import nose
import numpy as np
import unittest
NUM_STEPS=150
class NaiveAgent():
def __init__(self, actions):
self.actions = actions
def pickAction(self, reward, obs):
return self.actions[np.random.randint(0, len(self.actions))]
class MyTestCase(unittest.TestCase):
def run_a_game(self,game):
from ple import PLE
p = PLE(game,display_screen=True)
agent = NaiveAgent(p.getActionSet())
p.init()
reward = p.act(p.NOOP)
for i in range(NUM_STEPS):
obs = p.getScreenRGB()
reward = p.act(agent.pickAction(reward,obs))
def test_catcher(self):
from ple.games.catcher import Catcher
game = Catcher()
self.run_a_game(game)
def test_monsterkong(self):
from ple.games.monsterkong import MonsterKong
game = MonsterKong()
self.run_a_game(game)
def test_flappybird(self):
from ple.games.flappybird import FlappyBird
game = FlappyBird()
self.run_a_game(game)
def test_pixelcopter(self):
from ple.games.pixelcopter import Pixelcopter
game = Pixelcopter()
self.run_a_game(game)
def test_puckworld(self):
from ple.games.puckworld import PuckWorld
game = PuckWorld()
self.run_a_game(game)
def test_raycastmaze(self):
from ple.games.raycastmaze import RaycastMaze
game = RaycastMaze()
self.run_a_game(game)
def test_snake(self):
from ple.games.snake import Snake
game = Snake()
self.run_a_game(game)
def test_waterworld(self):
from ple.games.waterworld import WaterWorld
game = WaterWorld()
self.run_a_game(game)
def test_pong(self):
from ple.games.pong import Pong
game = Pong()
self.run_a_game(game)
def test_doom_not_defined(self):
from nose.tools import assert_raises
def invoke_doom():
DoomWrapper
assert_raises(NameError,invoke_doom)
if __name__ == "__main__":
nose.runmodule()
|
ntasfi/PyGame-Learning-Environment
|
tests/test_ple.py
|
Python
|
mit
| 2,211
|
import socket
import random
from PIL import Image
import json
import sys, getopt
import math
import pika
# Screen VARS
offset_x = 80
offset_y = 24
screen_width = 240
screen_height = 240
# Internal options
queueAddress = ''
fileName = ''
workers = 36
Matrix = []
def main(argv):
global fileName, workers
inputFile = ''
try:
opts, args = getopt.getopt(argv, "hi:w:", ["file=", "workers="])
except getopt.GetoptError:
print('img_to_queue.py -i <inputfile> -w workers')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print('img_to_queue.py -i <inputfile> -w workers')
sys.exit()
elif opt in ("-i", "--file"):
fileName = arg
print("File to process: " + fileName)
elif opt in ("-w", "--workers"):
workers = int(arg)
if (math.sqrt(float(workers)) - int(math.sqrt(float(workers))) > 0):
print('The square root of amount of workers is not a whole numbers. GTFO!')
sys.exit()
print("Amount of available workers: " + str(workers))
pompImage()
def addPixelToWorkFile(x, y, r, g, b, index_x, index_y, Matrix):
#print("Current index x:" + str(index_x) + " y: " + str(index_y))
Matrix[index_x][index_y].append({'x': x, 'y': y, 'rgb': "%0.2X" % r + '' + "%0.2X" % g + '' + "%0.2X" % b})
def pompImage():
print("Processiong image to JSON")
im = Image.open(fileName).convert('RGB')
im.thumbnail((240, 240), Image.ANTIALIAS)
_, _, width, height = im.getbbox()
# start with x and y index 1
slice_size = int(screen_width / int(math.sqrt(workers)))
amount_of_keys = int(screen_width / slice_size)
print(amount_of_keys)
w, h = amount_of_keys, amount_of_keys
Matrix = [[[] for x in range(w)] for y in range(h)]
# workFile = [[0 for x in range(amount_of_keys)] for y in range(amount_of_keys)]
for x in range(width):
index_x = int((x / slice_size))
for y in range(height):
r, g, b = im.getpixel((x, y))
index_y = int((y / slice_size))
addPixelToWorkFile(x + offset_x, y + offset_y, r, g, b, index_x, index_y, Matrix)
# print("Current index x:"+str(index_x)+" y: "+str(index_y)+" WORKER:"+str(index_y*index_x))
sendToQueue(Matrix)
def sendToQueue(arrayOfWorkers):
connection = pika.BlockingConnection(pika.ConnectionParameters(host='localhost',
credentials=pika.PlainCredentials(username='pomper',
password='pomper')))
channel = connection.channel()
channel.queue_declare(queue='pomper', durable=False,)
channel.queue_purge(queue='pomper')
for worker in arrayOfWorkers:
for pixels in worker:
channel.basic_publish(exchange='',
routing_key='pomper',
body=json.dumps(pixels))
if __name__ == "__main__":
main(sys.argv[1:])
|
jargij/led-pomper-sha2017
|
img_to_queue.py
|
Python
|
mit
| 3,094
|
import re
from astropy import units as u
from astropy.coordinates import SkyCoord
from astropy.time import Time
from panoptes.mount.mount import AbstractMount
from ..utils.logger import has_logger
from ..utils.config import load_config
from ..utils import error as error
@has_logger
class Mount(AbstractMount):
"""
Mount class for iOptron mounts. Overrides the base `initialize` method
and providers some helper methods to convert coordinates.
"""
def __init__(self, *args, **kwargs):
self.logger.info('Creating iOptron mount')
super().__init__(*args, **kwargs)
self.config = load_config()
# Regexp to match the iOptron RA/Dec format
self._ra_format = '(?P<ra_millisecond>\d{8})'
self._dec_format = '(?P<dec_sign>[\+\-])(?P<dec_arcsec>\d{8})'
self._coords_format = re.compile(self._dec_format + self._ra_format)
self._raw_status = None
self._status_format = re.compile(
'(?P<gps>[0-2]{1})' +
'(?P<system>[0-7]{1})' +
'(?P<tracking>[0-4]{1})' +
'(?P<movement_speed>[1-9]{1})' +
'(?P<time_source>[1-3]{1})' +
'(?P<hemisphere>[01]{1})'
)
self._status_lookup = {
'gps': {
'0': 'Off',
'1': 'On',
'2': 'Data Extracted'
},
'system': {
'0': 'Stopped - Not at Zero Position',
'1': 'Tracking (PEC disabled)',
'2': 'Slewing',
'3': 'Guiding',
'4': 'Meridian Flipping',
'5': 'Tracking (PEC enabled)',
'6': 'Parked',
'7': 'Stopped - Zero Position'
},
'tracking': {
'0': 'Sidereal',
'1': 'Lunar',
'2': 'Solar',
'3': 'King',
'4': 'Custom'
},
'movement_speed': {
'1': '1x sidereal',
'2': '2x sidereal',
'3': '8x sidereal',
'4': '16x sidereal',
'5': '64x sidereal',
'6': '128x sidereal',
'7': '256x sidereal',
'8': '512x sidereal',
'9': 'Max sidereal',
},
'time_source': {
'1': 'RS-232',
'2': 'Hand Controller',
'3': 'GPS'
},
'hemisphere': {
'0': 'Southern',
'1': 'Northern'
}
}
self.logger.info('Mount created')
##################################################################################################
# Properties
##################################################################################################
@property
def is_parked(self):
""" bool: Mount parked status. """
self._is_parked = 'Parked' in self.status().get('system', '')
return self._is_parked
@property
def is_home(self):
""" bool: Mount home status. """
self._is_home = 'Stopped - Zero Position' in self.status().get('system', '')
return self._is_home
@property
def is_tracking(self):
""" bool: Mount tracking status. """
self._is_tracking = 'Tracking' in self.status().get('system', '')
return self._is_tracking
@property
def is_slewing(self):
""" bool: Mount slewing status. """
self._is_slewing = 'Slewing' in self.status().get('system', '')
return self._is_slewing
##################################################################################################
# Public Methods
##################################################################################################
def initialize(self):
""" Initialize the connection with the mount and setup for location.
iOptron mounts are initialized by sending the following two commands
to the mount:
* Version
* MountInfo
If the mount is successfully initialized, the `_setup_location_for_mount` method
is also called.
Returns:
bool: Returns the value from `self.is_initialized`.
"""
self.logger.info('Initializing {} mount'.format(__name__))
if not self.is_connected:
self.connect()
if self.is_connected and not self.is_initialized:
# We trick the mount into thinking it's initialized while we
# initialize otherwise the `serial_query` method will test
# to see if initialized and be put into loop.
self.is_initialized = True
actual_version = self.serial_query('version')
actual_mount_info = self.serial_query('mount_info')
expected_version = self.commands.get('version').get('response')
expected_mount_info = self.commands.get( 'mount_info').get('response')
self.is_initialized = False
# Test our init procedure for iOptron
if actual_version != expected_version or actual_mount_info != expected_mount_info:
self.logger.debug('{} != {}'.format(actual_version, expected_version))
self.logger.debug('{} != {}'.format(actual_mount_info, expected_mount_info))
raise error.MountNotFound('Problem initializing mount')
else:
self.is_initialized = True
self._setup_location_for_mount()
self.logger.info('Mount initialized: {}'.format(self.is_initialized))
return self.is_initialized
##################################################################################################
# Private Methods
##################################################################################################
def _setup_location_for_mount(self):
"""
Sets the mount up to the current location. Mount must be initialized first.
This uses mount.location (an astropy.coords.EarthLocation) to set most of the params and the rest is
read from a config file. Users should not call this directly.
Includes:
* Latitude set_long
* Longitude set_lat
* Daylight Savings disable_daylight_savings
* Universal Time Offset set_gmt_offset
* Current Date set_local_date
* Current Time set_local_time
"""
assert self.is_initialized, self.logger.warning('Mount has not been initialized')
assert self.location is not None, self.logger.warning( 'Please set a location before attempting setup')
self.logger.info('Setting up mount for location')
# Location
# Adjust the lat/long for format expected by iOptron
lat = '{:+07.0f}'.format(self.location.latitude.to(u.arcsecond).value)
lon = '{:+07.0f}'.format(self.location.longitude.to(u.arcsecond).value)
self.serial_query('set_long', lon)
self.serial_query('set_lat', lat)
# Time
self.serial_query('disable_daylight_savings')
gmt_offset = self.config.get('location').get('gmt_offset', 0)
self.serial_query('set_gmt_offset', gmt_offset)
now = Time.now() + gmt_offset * u.minute
self.serial_query('set_local_time', now.datetime.strftime("%H%M%S"))
self.serial_query('set_local_date', now.datetime.strftime("%y%m%d"))
def _mount_coord_to_skycoord(self, mount_coords):
"""
Converts between iOptron RA/Dec format and a SkyCoord
Args:
mount_coords (str): Coordinates as returned by mount
Returns:
astropy.SkyCoord: Mount coordinates as astropy SkyCoord with
EarthLocation included.
"""
coords_match = self._coords_format.fullmatch(mount_coords)
coords = None
self.logger.info("Mount coordinates: {}".format(coords_match))
if coords_match is not None:
ra = (coords_match.group('ra_millisecond') * u.millisecond).to(u.hour)
dec = (coords_match.group('dec_arcsec') * u.centiarcsecond).to(u.arcsec)
dec_sign = coords_match.group('dec_sign')
if dec_sign == '-':
dec = dec * -1
coords = SkyCoord(ra=ra, dec=dec, frame='icrs', unit=(u.hour, u.arcsecond))
else:
self.logger.warning(
"Cannot create SkyCoord from mount coordinates")
return coords
def _skycoord_to_mount_coord(self, coords):
"""
Converts between SkyCoord and a iOptron RA/Dec format.
`
TTTTTTTT(T) 0.01 arc-seconds
XXXXX(XXX) milliseconds
Command: “:SrXXXXXXXX#”
Defines the commanded right ascension, RA. Slew, calibrate and park commands operate on the
most recently defined right ascension.
Command: “:SdsTTTTTTTT#”
Defines the commanded declination, Dec. Slew, calibrate and park commands operate on the most
recently defined declination.
`
@param coords astropy.coordinates.SkyCoord
@retval A tuple of RA/Dec coordinates
"""
# RA in milliseconds
ra_ms = (coords.ra.hour * u.hour).to(u.millisecond)
mount_ra = "{:08.0f}".format(ra_ms.value)
self.logger.debug("RA (ms): {}".format(ra_ms))
dec_dms = (coords.dec.degree * u.degree).to(u.centiarcsecond)
self.logger.debug("Dec (centiarcsec): {}".format(dec_dms))
mount_dec = "{:=+08.0f}".format(dec_dms.value)
mount_coords = (mount_ra, mount_dec)
return mount_coords
def _set_zero_position(self):
""" Sets the current position as the zero position.
The iOptron allows you to set the current position directly, so
we simply call the iOptron command.
"""
self.logger.info("Setting zero position")
return self.serial_query('set_zero_position')
|
fmin2958/POCS
|
panoptes/mount/ioptron.py
|
Python
|
mit
| 10,044
|
"""
WSGI config for sms_relay project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
import djcelery
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "sms_relay.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "sms_relay.settings")
djcelery.setup_loader()
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
|
yeleman/sms_relay
|
sms_relay/wsgi.py
|
Python
|
mit
| 1,468
|
import matplotlib.pyplot as plt
import numpy as np
import sys
import time
import scipy.interpolate as ip
infile = sys.argv[1]
indata = np.load(infile)
spec = indata[0]
samp_rate = indata[1]
fftsize = indata[2]
center_freq = 1419.4 # MHz
halffft = int(0.5*fftsize)
freqs = 0.5*samp_rate*np.array(range(-halffft,halffft))/(halffft)
#plt.plot(spec)
delta_nu = samp_rate/fftsize
plt.plot(freqs,spec)
plt.xlabel('relative to center [Mhz]')
RFI = [[1419.4-0.210, 0.02],
#[1419.4-1.937, 0.015],
#[1419.4-4.4, 0.015],
#[1419.4+3.0, 0.01],
#[center_freq, 8*delta_nu] # remove dip in the center of band, always about 4 fft points wide. Use 8, else errors
]
#plt.figure()
#plt.plot(spec)
# DEFINE FLAGS in HZ
for item in RFI:
print item
RFI_freq = item[0]
RFI_width = item[1]
ch0_freq = center_freq - 0.5*samp_rate
ind_low = int(np.floor((RFI_freq-0.5*RFI_width - ch0_freq)/delta_nu))
ind_high = int(np.ceil((RFI_freq+0.5*RFI_width - ch0_freq)/delta_nu))
margin = min((ind_high-ind_low), ind_low, len(spec)-ind_high)
RFI_org = np.array([spec[ind_low-margin:ind_low], spec[ind_high:ind_high+margin]])
RFI_part = RFI_org.flatten()
xdata = range(ind_low-margin, ind_low) + range(ind_high, ind_high+margin)
print np.size(xdata), np.size(RFI_part)
spl = ip.UnivariateSpline(xdata,RFI_part, k=1, s=0)
interpdata = spl(range(ind_low, ind_high))
print interpdata
spec[ind_low:ind_high] = interpdata[:]
plt.figure()
plt.plot(RFI_part)
plt.plot(interpdata)
#plt.figure()
#plt.plot(freqs, spec)
#for flag in flags:
#
# Calculate flag indices
# For each flag, interpolate flagged values (splines)
# when all flaggs are applied and interpolated, proceed with convolve!
#plt.figure()
#convspec = np.convolve(spec, [1,1,1,1], mode='same')
#w = sig.boxcar(4)
#convspec=np.convolve(w/w.sum(),spec,mode='valid')
##convspec = sig.decimate(spec, 2)
#fftsize = fftsize/2
#halffft = int(0.5*fftsize)
#convfreqs = 0.5*samp_rate*np.array(range(-halffft,halffft))/(halffft)
#print np.shape(convspec)
#print np.shape(convfreqs)
#plt.plot(convfreqs,convspec)
#plt.xlabel('relative to center [Mhz]')
plt.show()
|
varenius/salsa
|
USRP/usrp_gnuradio_dev/testold.py
|
Python
|
mit
| 2,198
|
"""
To understand why this file is here, please read:
http://cookiecutter-django.readthedocs.io/en/latest/faq.html#why-is-there-a-django-contrib-sites-directory-in-cookiecutter-django
"""
from django.conf import settings
from django.db import migrations
def update_site_forward(apps, schema_editor):
"""Set site domain and name."""
Site = apps.get_model('sites', 'Site')
Site.objects.update_or_create(
id=settings.SITE_ID,
defaults={
'domain': 'example.com',
'name': 'BAC'
}
)
def update_site_backward(apps, schema_editor):
"""Revert site domain and name to default."""
Site = apps.get_model('sites', 'Site')
Site.objects.update_or_create(
id=settings.SITE_ID,
defaults={
'domain': 'example.com',
'name': 'example.com'
}
)
class Migration(migrations.Migration):
dependencies = [
('sites', '0002_alter_domain_unique'),
]
operations = [
migrations.RunPython(update_site_forward, update_site_backward),
]
|
pavlovicr/bac
|
bac/contrib/sites/migrations/0003_set_site_domain_and_name.py
|
Python
|
mit
| 1,071
|
import os
import re
from subprocess import Popen, PIPE
from kivy.uix.popup import Popup
def run_syscall(cmd):
"""
run_syscall; handle sys calls this function used as shortcut.
::cmd: String, shell command is expected.
"""
p = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
return out.rstrip()
def striptags(text):
"""
striptags; markuped text should be cleared to use
most of times this function is used as shortcuts.
::text: String; markuped text is expected
"""
return re.sub(r'\[[^>]*?\]', '', text)
def create_popup(title, content):
"""
create_popup; couple of actions' result displayed as popup,
this function used as shortcut.
::title: String.
::content: Label, kivy Label class expected.
"""
popup = Popup(title=title, content=content,
size_hint=(None, None), size=(300, 100))
return popup
def diff_formatter(text):
"""
diff_formatter; diff text formats with this function lines starts with '+'
line colored with green if starts with '-' then line should be
colored with red others should keep with black. diff datas such as
commiter, commit date, commit message, commit log id short one are
collecting and result returned.
::text: String
"""
def replacer(text, search, color):
return text
# convertion should wait for a while.
result_text = ""
location = 0
while location != -1:
tmp_location = text.find(search)
if tmp_location != -1:
result_text += text[:tmp_location]
line_end = text[tmp_location + 2:].find("\n")
if line_end > 0:
result_text += "\n[color=%s]%s[/color]" % \
(color,
text[tmp_location + 1:tmp_location + 2 + line_end])
else:
result_text += "\n[color=%s]%s[/color]" % \
(color, text[tmp_location + 1:])
text = ""
location = tmp_location + 2 + line_end
text = text[location:]
else:
result_text += text
location = -1
return result_text
green = "\n+"
red = "\n-"
tmp_text = text
result_text = replacer(replacer(tmp_text, green, "00ff00"), red, "ff0000")
commit, merge, author, date = "", "", "", ""
data = '<<\n'.join(result_text.split("<<\n")[:1]).strip()
if data.startswith('sha'):
diff = '<<\n'.join(result_text.split("<<\n")[1:]).strip()
message = data.split('>>')[1].strip()
commit = data.split('author:')[0].split(
'sha:(')[1].replace(')', '').strip()
author = data.split('date:')[0].split(
'author:(')[1].replace(')', '').strip()
date = data.split('message:')[0].split(
'date:(')[1].replace(')', '').strip()
else:
diff = data
message, commit, author, date = "", "", "", ""
return diff, message, commit, author, date
def findparent(curclass, targetclass):
"""
findparent; each classes has a parent, in an action
parent classes methods in generally are used to
reach needed class this function is used as shortcut.
until target class and current class names are equal
recursion continues.
::curclass: class, current class
::targetclass: class, target class
"""
reqclass = curclass
if type(targetclass) in [unicode, str]:
targetclass_name = targetclass
else:
targetclass_name = str(targetclass().__class__).\
split('.')[1].replace("'>", "")
while True:
cls = str(reqclass.__class__).split('.')[1].replace("'>", "")
if cls == targetclass_name:
break
elif cls == 'core':
reqclass = None
break
reqclass = reqclass.parent
return reqclass
|
RedXBeard/gitwatcher-ui
|
shortcuts.py
|
Python
|
mit
| 4,040
|
"""
@author: Daniel Butum, Group 911
"""
from domain.number import Number, NumberException
from utils.number import convert_to_int
class Console:
def __init__(self):
pass
def run(self):
"""
Run the console gui
"""
# print initial help menu
print(self._get_help_menu())
while True:
option = self._get_command()
try:
if option == "q": # quit the loop
break
elif option == "h": # display help
print(self._get_help_menu())
elif option == "1": # add two numbers
print("Addition")
base = self._get_int_command("Base of the numbers: ")
number_a = Number(self._get_command("Number a: "), base)
number_b = Number(self._get_command("Number b: "), base)
print("Result: {0} (base {1})".format(number_a + number_b, base))
elif option == "2":
print("Subtraction")
base = self._get_int_command("Base of the numbers: ")
number_a = Number(self._get_command("Number a(must be larger than b): "), base)
number_b = Number(self._get_command("Number b: "), base)
print("Result: {0} (base {1})".format(number_a - number_b, base))
elif option == "3":
print("Multiplication")
base = self._get_int_command("Base of the number: ")
number = Number(self._get_command("Number: "), base)
scalar = self._get_int_command("Scalar: ")
print("Result: {0} (base {1})".format(number * scalar, base))
elif option == "4":
print("Division")
base = self._get_int_command("Base of the number: ")
number = Number(self._get_command("Number: "), base)
scalar = self._get_int_command("Scalar: ")
quotient, remainder = divmod(number, scalar)
print("Quotient = {0} (base {1}), \n Remainder = {2}".format(quotient, base, remainder))
elif option == "5" or option == "6" or option == "7":
if option == "5":
print("Conversion using substitution")
if option == "6":
print("Conversion using multiplication and division method")
if option == "7":
print("Using rapid conversions")
source_base = self._get_int_command("Base of the number: ")
number = Number(self._get_command("Number: "), source_base)
destination_base = self._get_int_command("Destination base: ")
if option == "5":
number.convert_substitution(destination_base)
if option == "6":
number.convert_division(destination_base)
if option == "7":
number.convert_rapid(destination_base)
print("Result: {0} (base {1})".format(number, destination_base))
else:
print("Option does not exist. Please try again")
except NumberException as ex:
print(ex)
@staticmethod
def _get_help_menu():
"""
Returns the menu
"""
return """Basic operations and conversions by Daniel Butum
1. Add two numbers
2. Subtract two numbers
3. Multiplication by one digit
4. Division by one digit
Base conversion using:
5. substitution method
6. successive divisions method
7. rapid conversions
h. Display this help menu
q. Quit
"""
@staticmethod
def _input(message):
"""
Improved input
Return:
string - user command
"""
try:
return input(message).strip()
except KeyboardInterrupt: # handle CTRL + C interrupt
return exit('CTRL + D')
except EOFError: # handle CTRL + D
return exit('CTRL + D')
@staticmethod
def _get_command(message=">>> "):
"""
Gets the command inputted by the user
"""
return Console._input(message).lower()
@staticmethod
def _get_int_command(message, not_empty=True):
"""
Gets the command inputted by the user if is an int
"""
command = Console._input(message)
while convert_to_int(command) is None:
# if empty command and option is set => return
if not not_empty and command == "":
return command
print("Please retry again with an integer")
command = Console._input(message)
return int(command)
@staticmethod
def display_help():
"""
Display the message to the user
"""
print(Console._get_help_menu())
|
leyyin/university
|
computational-logic/src-number-converter/ui/console.py
|
Python
|
mit
| 5,104
|
"""
Coverers of the filtrated space
"""
from __future__ import print_function
from sklearn.base import BaseEstimator, TransformerMixin
import numpy as np
class HyperRectangleCoverer(BaseEstimator, TransformerMixin):
""" Covers the space using overlapping hyperectangles
Parameters
----------
intervals: integer or list of integers
number of intervals in each filtered space dimension, if an integer
is specified the same number is used in all dimensions.
overlap: float or list of floats
fraction of overlap between hyperectangles in each space dimension,
if a single float is specified the same overlap is used in all
dimensions.
Attributes
----------
"""
def __init__(self, intervals=10, overlap=0.5):
self.intervals = intervals
self.overlap = overlap
def fit(self, X, y=None):
""" Creates the space covering for the input data
It creates a hyperectangle covering of the multidimensional space of X.
Parameters
----------
X: array-like, shape=(n_samples, n_features)
Data which will be covered.
"""
if y is not None:
raise ValueError("y value will not be used")
if np.iterable(self.intervals):
if len(self.intervals) != X.shape[1]:
raise ValueError("length of intervals not matches X dimension")
else:
intervals = np.array(self.intervals, dtype=int)
else:
intervals = np.full((X.shape[1]), self.intervals, dtype=int)
if np.iterable(self.overlap):
if len(self.overlap) != X.shape[1]:
raise ValueError("length of overlap not matches X dimension")
else:
overlap = np.array(self.overlap, dtype=float)
else:
overlap = np.full((X.shape[1]), self.overlap, dtype=float)
# partition each dimension, incluiding last point
bbs, ws = zip(*[np.linspace(*min_max_num, endpoint=True, retstep=True)
for min_max_num in
zip(np.min(X, axis=0),
np.max(X, axis=0), intervals + 1)])
# get cover lower and upper bounds
self.lowerbounds = np.array(np.meshgrid(*[bb[:-1] - shift for
bb, shift in
zip(bbs, ws * overlap)])) \
.T.reshape(-1, X.shape[1])
self.upperbounds = np.array(np.meshgrid(*[bb[1:] + shift for
bb, shift in
zip(bbs, ws * overlap)])) \
.T.reshape(-1, X.shape[1])
return self
def transform(self, X, y=None):
""" Returns boolean array of space partition membership
Returns a (n_samples, n_partitions) boolean array whose elements
are true when the sample (row) is a member of each space partition
(column). This will be used to filter in the clustering space.
Parameters
----------
X: array-like, shape=(n_samples, n_features)
Data which will be partition in hyperectangles.
Returns
-------
m_matrix: boolean array, shape=(n_samples, n_partitions)
Boolean matrix of sample membership to each partition
"""
if y is not None:
raise ValueError("y value will not be used")
return np.logical_and(
np.all(X[:, :, np.newaxis] > self.lowerbounds.T, axis=1),
np.all(X[:, :, np.newaxis] < self.upperbounds.T, axis=1))
def overlap_matrix(self):
""" Returns a boolean array with the overlaps between space partitions
Returns a (n_partitions, n_partitions) boolean array whose elements
are true when there is overlap between the i and j partitions, only
upper triangle is filled (rest is False).
Returns
-------
overlap_matrix: boolean array, shape=(n_partitions, n_partitions)
Boolean matrix of overlaping between partitions, only the upper
triangle is filled and the rest is False.
"""
overlap_matrix = None
i_min_leq_j_min = self.lowerbounds[
:, :, np.newaxis] <= self.lowerbounds.T
i_max_geq_j_min = self.upperbounds[
:, :, np.newaxis] >= self.lowerbounds.T
overlap_matrix = np.all((i_min_leq_j_min, i_max_geq_j_min), axis=0)
overlap_matrix = np.any((overlap_matrix, overlap_matrix.T), axis=0)
overlap_matrix = np.all(overlap_matrix, axis=1)
# only upper triagular filled
np.fill_diagonal(overlap_matrix, False)
return np.triu(overlap_matrix)
|
pablodecm/cartographer
|
cartographer/coverers.py
|
Python
|
mit
| 4,786
|
from command import Command, is_command
from event import Event
class Say(Command):
shortname = 'say'
name = 'Say something to someone, or in the public chat'
@is_command
def say(self, player, *args):
if args[0] in self.world.players:
prefix = "(private) <%s> " % player.name
# a message to a user
msg_base = ' '.join(args[1:])
msg = prefix + ' '.join(args[1:])
target_player = self.find_player(args[0])
self.tell_player(args[0], msg)
self.world.emit_scripting_event('say', {
'source': player.to_dict(),
'target': target_player.to_dict(),
'msg': msg_base
}, scope=[target_player])
else:
prefix = "<%s> " % player.name
msg_base = ' '.join(args)
msg = prefix + ' '.join(args)
for p in self.world.players:
self.tell_player(p, msg)
self.world.emit_scripting_event('say', {
'source': player.to_dict(),
'target': player.location.to_dict(),
'msg': msg_base
}, scope=[player.location, player])
|
lysol/lvlss
|
src/commands/say.py
|
Python
|
mit
| 1,205
|
from abc import ABCMeta
from up.utils.up_logger import UpLogger
class BaseModule(metaclass=ABCMeta):
LOAD_ORDER = 0
def __init__(self, silent=False):
self.__silent = silent
self.__logger = UpLogger.get_logger()
self.__up = None
def initialize(self, up):
self.__up = up
self._log_debug("Initializing {}".format(self.class_name))
self._execute_initialization()
def _execute_initialization(self):
"""
Subclasses must override this method. Initialize the provider here.
:return: None
"""
pass
def _log_debug(self, message):
if not self.__silent:
self.logger.debug(message)
def _log_info(self, message):
if not self.__silent:
self.logger.info(message)
def _log_warning(self, message):
if not self.__silent:
self.logger.warning(message)
def _log_error(self, message):
if not self.__silent:
self.logger.error(message)
def _log_critical(self, message):
if not self.__silent:
self.logger.critical(message)
def load(self):
return False
def is_a(self, module_name):
return self.__class__.__name__ == module_name
@property
def logger(self):
return self.__logger
@property
def class_name(self):
"""
:rtype: str
"""
return self.__class__.__name__
@property
def up(self):
return self.__up
@property
def telemetry_content(self):
return None
@classmethod
def instance(cls, up):
up.get_module(cls)
|
Rashitko/up
|
up/base_module.py
|
Python
|
mit
| 1,652
|
import urllib2
import base64
import json
from link import *;
from GitFetcher import GitHubFetcher;
username = "debuggerman"
password = "megadeth"
orgUrl = "https://api.github.com/orgs"
orgName = "coeus-solutions"
gitFetcher = GitHubFetcher(username = username, password = password, orgUrl = orgUrl, orgName = orgName)
gitFetcher.getOrgInfo()
|
debuggerman/gitstats.py
|
gitstats.py
|
Python
|
mit
| 344
|
#!/usr/bin/env python
def print_banner(s):
print('##------------------------------------------------------------------------------')
print(f'## {s}')
print('##------------------------------------------------------------------------------')
print_banner('First implementation')
class Base:
def foo(self):
raise NotImplementedError()
def bar(self):
raise NotImplementedError()
class Concrete(Base):
def foo(self):
return 'foo() called'
b = Base()
try:
b.foo()
except NotImplementedError as err:
print(f'Got this NotImplementedError error')
c = Concrete()
print(c.foo())
try:
c.bar()
except NotImplementedError as err:
print(f'Got this NotImplementedError error')
print_banner('Implementation with abc module')
from abc import ABCMeta, abstractmethod
class Base(metaclass=ABCMeta):
@abstractmethod
def foo(self):
pass
@abstractmethod
def bar(self):
pass
class Concrete(Base):
def foo(self):
pass
assert issubclass(Concrete, Base)
try:
print('Instantiate b = Base()')
b = Base()
except TypeError as err:
print(f'Got this TypeError error: {err!r}')
try:
print('Instantiate c = Concrete()')
c = Concrete()
except TypeError as err:
print(f'Got this TypeError error: {err!r}')
|
eroicaleo/LearningPython
|
PythonTricks/ch04_05.py
|
Python
|
mit
| 1,316
|
import functools
import operator
class Env(dict):
"""
Computational environment for some expression.
Implemented with recursive composition -
each environment has the outer environment.
Every lookup for a name N in the environment E
with outer environment O goes like this:
1) E.lookup(N)
2) O.lookup(N)
3) O.O.lookup(N)
...
Until we find N in some environment or fail with exception.
"""
lookup_error_msg = '{} not found in Env<{}>'
def __init__(self, names=(), values=(), outer=None):
self.update(zip(names, values))
self.outer = outer
def set(self, name, new_value):
self.lookup(name) # Will fail if no name in Env
self[name] = new_value
def lookup(self, name):
if name in self:
return self[name]
elif self.outer:
return self.outer.lookup(name)
else:
raise LookupError(self.lookup_error_msg.format(name, self))
def builtins():
"""
Define default environment full of builtin procedures.
Basic primitives which all Lisps should have:
eq?
quote
cons
car
cdr
atom?
In addition, this Lisp also have:
- a set of numeric operations (+, -, =, /, etc)
- reflection functions (list?, number?, symbol?, etc)
- list processing functions (map, filter, foldl, etc) # TODO
"""
env = Env()
env.update({
'+': lambda *args: sum(args),
'*': lambda *args: functools.reduce(operator.mul, args),
'-': operator.sub,
'/': operator.truediv,
'>': operator.gt,
'<': operator.lt,
'>=': operator.ge,
'<=': operator.le,
'=': operator.eq,
'abs': abs,
'and': operator.and_,
'or': operator.or_,
'car': lambda alist: alist[0],
'cdr': lambda alist: alist[1:],
'cons': lambda head, tail: [head] + tail,
'list': lambda *terms: list(terms),
'sum': sum,
'list?': lambda term: isinstance(term, list),
'atom?': lambda term: isinstance(term, (int, float, str)),
'number?': lambda term: isinstance(term, (int, float)),
'symbol?': lambda term: isinstance(term, str),
'function?': callable,
'map': lambda fn, xs: [fn(x) for x in xs],
'filter': lambda fn, xs: [x for x in xs if fn(x)],
'reverse': lambda xs: xs[::-1],
'fold': functools.reduce,
'sum': sum,
'mul': functools.partial(functools.reduce, operator.mul),
'eq?': operator.eq,
})
return env
default = builtins()
|
begor/lisp
|
lisp/environment.py
|
Python
|
mit
| 2,626
|
import cv2
from collections import namedtuple
import logging
import re
import os
RecorderConfig = namedtuple('RecorderConfig',
['file_limit',
'time_limit',
'directory',
'file_base'],
verbose=False)
class Recorder:
def __init__(self, config, fps, resolution):
self._logger = logging.getLogger(__name__)
self._file_limit = config.file_limit
self._directory = config.directory
self._file_base = config.file_base
self._frame_limit = config.time_limit * fps
self._resolution = resolution
self._fps = fps
self._ext = '.avi'
self._scan_video_files()
self._open_new_video_file()
def _scan_video_files(self):
directory = self._directory
base = self._file_base
ext = self._ext
regex = re.compile(base + '(\d+)')
self._logger.info("Video files dir: %s. File base: %s",
directory, base)
lowest_idx = 0x7fffffff
highest_idx = 0
nbr_of_files = 0
for anyfile in os.listdir(directory):
(anyfile_base, anyfile_ext) = os.path.splitext(anyfile)
if not anyfile_ext == ext:
continue
m = regex.match(anyfile_base)
if m is None:
continue
idx = int(m.group(1))
if idx < lowest_idx:
lowest_idx = idx
if idx > highest_idx:
highest_idx = idx
nbr_of_files += 1
self._nbr_of_outfiles = nbr_of_files
if nbr_of_files == 0:
# There are no logfiles stored in the log file directory
self._logger.info("Videofile dir empty.")
self._cur_outfile_index = 0
self._cur_outfile_lowest_index = 0
else:
self._cur_outfile_index = highest_idx + 1
self._cur_outfile_lowest_index = lowest_idx
self._logger.info("Cur indices: highest = %d, lowest = %d",
self._cur_outfile_index, self._cur_outfile_lowest_index)
def _open_new_video_file(self):
directory = self._directory
base = self._file_base
ext = self._ext
new_file_name = directory + '/' + base + str(self._cur_outfile_index) + ext
self._logger.info("Opening new output file: %s", new_file_name)
fourcc = cv2.VideoWriter_fourcc(*'mjpa')
self._logger.info("recording resoluton: {}".format(self._resolution))
self._outfile = cv2.VideoWriter(new_file_name, fourcc,
self._fps,
self._resolution)
self._nbr_of_outfiles += 1
self._cur_nbr_of_recorded_frames = 0
def _remove_old_video_file(self):
directory = self._directory
base = self._file_base
ext = self._ext
oldest_filename = directory + '/' + base + str(self._cur_outfile_lowest_index) + ext
self._logger.info("Removing old output file: %s", oldest_filename)
os.remove(oldest_filename)
# Update oldest and current index by rescanning all outfiles
self._scan_video_files()
def record_frame(self, frame):
if self._cur_nbr_of_recorded_frames > self._frame_limit:
self._logger.info("Switching output file")
self._outfile.release()
self._cur_outfile_index += 1
self._open_new_video_file()
self._outfile.write(frame)
self._cur_nbr_of_recorded_frames += 1
if self._nbr_of_outfiles > self._file_limit:
self._remove_old_video_file()
def close(self):
if self._outfile is not None:
self._logger.info("Closing video output file")
self._outfile.release()
|
erstrom/opencv-home-cam
|
opencv_home_cam/recorder.py
|
Python
|
mit
| 3,907
|
# coding=utf-8
from ..models.checksum import Checksum
from ..models.address import Address
from ..models.shopping_cart_item import ShoppingCartItem
from ..models.shopping_cart import ShoppingCart
from .paymill_service import PaymillService
import json
__author__ = 'yalnazov'
class ChecksumService(PaymillService):
def endpoint_path(self):
return '/checksums'
def paymill_object(self):
return Checksum
def create(self, checksum_type, amount, currency, return_url, cancel_url, description=None, checksum_action='transaction',
fee_amount=None, fee_payment=None, fee_currency=None, checkout_options=None, require_reusable_payment=None,
reusable_payment_description=None, items=None, shipping_address=None, billing_address=None, app_id=None,
client_id=None):
"""Creates new transaction/payment Checksum
:param str checksum_type: Type of request verified by this checksum
:param int amount: Amount (in cents) which will be charged
:param str currency: ISO 4217 formatted currency code
:param str return_url: URL to redirect customers to after checkout has completed.
:param int cancel_url: URL to redirect customers to after they have canceled the checkout.
:param str description: A short description for the transaction
:param str checksum_action: enum(transaction, payment) or null_ Requested action verified by this checksum (default: transaction)
:param int fee_amount: Fee included in the transaction amount (set by a connected app). Mandatory if fee_payment is set.
:param str fee_payment: The identifier of the payment from which the fee will be charged (Payment object).
:param str fee_currency: The currency of the fee (e.g. EUR, USD). If it´s not set, the currency of the transaction is used.
We suggest to always use as it might cause problems, if your account does not support the same currencies as your merchants accounts.
:param list checkout_options: Various options that determine behavior before/during/after checkout such as editability of address fields.
:param boolean require_reusable_payment: Set this to true if you want to ask the buyer for a billing agreement during checkout.
If the buyer accepts, the resulting payment can be reused for transactions and subscriptions without additional interaction.
:param str reusable_payment_description: Description appears at the acquirers checkout page (e.g. PayPal) when you request permission for a reusable payment, max. 127 characters.
:param list of ShoppingCartItem items: Shopping cart items purchased with this transaction.
:param Address shipping_address: Shipping address for this transaction.
:param Address billing_address: Billing address for this transaction.
:params str app_id: App (ID) that created this payment or null if created by yourself.
:param str client_id or None: The identifier of a client
:return Checksum: the created Checksum object
"""
params = dict(checksum_type=checksum_type, amount=amount, currency=currency, return_url=return_url, cancel_url=cancel_url)
if description is not None:
params.update(description=description)
if checksum_action is not None:
params.update(checksum_action=checksum_action)
if shipping_address is not None and isinstance(shipping_address, Address):
params.update(shipping_address=str(shipping_address.to_json()))
if billing_address is not None and isinstance(billing_address, Address):
params.update(billing_address=str(billing_address.to_json()))
if items is not None and isinstance(items, list) and len(items) > 0 and isinstance(items[0], ShoppingCartItem):
params.update(items=str(ShoppingCart(items=items).to_json()))
if fee_amount is not None:
params.update(fee_amount=fee_amount)
if fee_payment is not None:
params.update(fee_payment=fee_payment)
if fee_currency is not None:
params.update(fee_currency=fee_currency)
if checkout_options is not None and isinstance(checkout_options, dict):
params.update(checkout_options=json.dumps(checkout_options))
if app_id is not None:
params.update(app_id=app_id)
if reusable_payment_description is not None:
params.update(reusable_payment_description=reusable_payment_description)
if require_reusable_payment is not None:
params.update(require_reusable_payment=require_reusable_payment)
if client_id is not None:
params.update(client=client_id)
return self._create(params)
def detail(self, obj):
"""Returns/refreshes the remote Subscription representation with that obj.id
:param Subscription obj: the Subscription object with an id set
:return Subscription: the fresh Subscription object
"""
return self._detail(obj)
|
paymill/paymill-python
|
paymill/services/checksum_service.py
|
Python
|
mit
| 5,102
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-07-25 15:51
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='GithubAccount',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('access_token', models.CharField(max_length=100)),
('username', models.CharField(max_length=256)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'github account',
'verbose_name_plural': 'github accounts',
},
),
]
|
daisychainme/daisychain
|
daisychain/channel_github/migrations/0001_initial.py
|
Python
|
mit
| 1,029
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This file is part of the web2py Web Framework
Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu>
License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
Provides:
- List; like list but returns None instead of IndexOutOfBounds
- Storage; like dictionary allowing also for `obj.foo` for `obj['foo']`
"""
import cPickle
import portalocker
__all__ = ['List', 'Storage', 'Settings', 'Messages',
'StorageList', 'load_storage', 'save_storage']
class List(list):
"""
Like a regular python list but a[i] if i is out of bounds return None
instead of IndexOutOfBounds
"""
def __call__(self, i, default=None):
if 0<=i<len(self):
return self[i]
else:
return default
class Storage(dict):
"""
A Storage object is like a dictionary except `obj.foo` can be used
in addition to `obj['foo']`.
>>> o = Storage(a=1)
>>> print o.a
1
>>> o['a']
1
>>> o.a = 2
>>> print o['a']
2
>>> del o.a
>>> print o.a
None
"""
def __getattr__(self, key):
if key in self:
return self[key]
else:
return None
def __setattr__(self, key, value):
if value == None:
if key in self:
del self[key]
else:
self[key] = value
def __delattr__(self, key):
if key in self:
del self[key]
else:
raise AttributeError, "missing key=%s" % key
def __repr__(self):
return '<Storage ' + dict.__repr__(self) + '>'
def __getstate__(self):
return dict(self)
def __setstate__(self, value):
for (k, v) in value.items():
self[k] = v
def getlist(self, key):
"""Return a Storage value as a list.
If the value is a list it will be returned as-is.
If object is None, an empty list will be returned.
Otherwise, [value] will be returned.
Example output for a query string of ?x=abc&y=abc&y=def
>>> request = Storage()
>>> request.vars = Storage()
>>> request.vars.x = 'abc'
>>> request.vars.y = ['abc', 'def']
>>> request.vars.getlist('x')
['abc']
>>> request.vars.getlist('y')
['abc', 'def']
>>> request.vars.getlist('z')
[]
"""
value = self.get(key, None)
if isinstance(value, (list, tuple)):
return value
elif value is None:
return []
return [value]
def getfirst(self, key):
"""Return the first or only value when given a request.vars-style key.
If the value is a list, its first item will be returned;
otherwise, the value will be returned as-is.
Example output for a query string of ?x=abc&y=abc&y=def
>>> request = Storage()
>>> request.vars = Storage()
>>> request.vars.x = 'abc'
>>> request.vars.y = ['abc', 'def']
>>> request.vars.getfirst('x')
'abc'
>>> request.vars.getfirst('y')
'abc'
>>> request.vars.getfirst('z')
"""
value = self.getlist(key)
if len(value):
return value[0]
return None
def getlast(self, key):
"""Returns the last or only single value when given a request.vars-style key.
If the value is a list, the last item will be returned;
otherwise, the value will be returned as-is.
Simulated output with a query string of ?x=abc&y=abc&y=def
>>> request = Storage()
>>> request.vars = Storage()
>>> request.vars.x = 'abc'
>>> request.vars.y = ['abc', 'def']
>>> request.vars.getlast('x')
'abc'
>>> request.vars.getlast('y')
'def'
>>> request.vars.getlast('z')
"""
value = self.getlist(key)
if len(value):
return value[-1]
return None
class StorageList(Storage):
"""
like Storage but missing elements default to [] instead of None
"""
def __getattr__(self, key):
if key in self:
return self[key]
else:
self[key] = []
return self[key]
def load_storage(filename):
fp = open(filename, 'rb')
portalocker.lock(fp, portalocker.LOCK_EX)
storage = cPickle.load(fp)
portalocker.unlock(fp)
fp.close()
return Storage(storage)
def save_storage(storage, filename):
fp = open(filename, 'wb')
portalocker.lock(fp, portalocker.LOCK_EX)
cPickle.dump(dict(storage), fp)
portalocker.unlock(fp)
fp.close()
class Settings(Storage):
def __setattr__(self, key, value):
if key != 'lock_keys' and self.get('lock_keys', None)\
and not key in self:
raise SyntaxError, 'setting key \'%s\' does not exist' % key
if key != 'lock_values' and self.get('lock_values', None):
raise SyntaxError, 'setting value cannot be changed: %s' % key
self[key] = value
class Messages(Storage):
def __init__(self, T):
self['T'] = T
def __setattr__(self, key, value):
if key != 'lock_keys' and self.get('lock_keys', None)\
and not key in self:
raise SyntaxError, 'setting key \'%s\' does not exist' % key
if key != 'lock_values' and self.get('lock_values', None):
raise SyntaxError, 'setting value cannot be changed: %s' % key
self[key] = value
def __getattr__(self, key):
value = self[key]
if isinstance(value, str):
return str(self['T'](value))
return value
if __name__ == '__main__':
import doctest
doctest.testmod()
|
stryder199/RyarkAssignments
|
Assignment2/web2py/gluon/storage.py
|
Python
|
mit
| 5,788
|
# pandas and numpy for data manipulation
import pandas as pd
import numpy as np
import sqlite3
from bokeh.plotting import Figure
from bokeh.models import (
CategoricalColorMapper,
HoverTool,
ColumnDataSource,
Panel,
FuncTickFormatter,
SingleIntervalTicker,
LinearAxis,
)
from bokeh.models import Legend
from bokeh.models.widgets import (
CheckboxGroup,
Slider,
RangeSlider,
Tabs,
CheckboxButtonGroup,
TableColumn,
DataTable,
Select,
)
from bokeh.layouts import column, row, WidgetBox
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import matplotlib.colors as colors
def perfmon_tab(db):
def make_dataset(perfmon_list):
newdf = perfmon[perfmon_list]
# Convert dataframe to column data source
return ColumnDataSource(newdf)
def make_plot(src):
# Blank plot with correct labels
p = Figure(
plot_width=1024,
plot_height=768,
x_axis_type="datetime",
title="perfmon",
output_backend="webgl",
)
cm = plt.get_cmap("gist_rainbow")
numlines = len(perfmon.columns)
mypal = [cm(1.0 * i / numlines) for i in range(numlines)]
mypal = list(map(lambda x: colors.rgb2hex(x), mypal))
col = 0
legenditems = []
for key in src.data.keys():
if key == "datetime":
continue
l = key + " "
col = col + 1
cline = p.line(
perfmon.index.values,
perfmon[key],
line_width=1,
alpha=0.8,
color=mypal[col],
)
legenditems += [(key, [cline])]
p.legend.click_policy = "hide"
legend = Legend(items=legenditems, location=(0, 0))
p.add_layout(legend, "below")
return p
def update(attr, old, new):
perfmons_to_plot = [
perfmon_selection.labels[i] for i in perfmon_selection.active
]
new_src = make_dataset(perfmons_to_plot)
plot = make_plot(new_src)
# TODO:crude hack in lack of a better solution so far
layout.children[1] = plot
# get data from DB, setup index
cur = db.cursor()
cur.execute(
"SELECT name FROM sqlite_master WHERE type='table' AND name=?", ["perfmon"]
)
if len(cur.fetchall()) == 0:
return None
perfmon = pd.read_sql_query("select * from perfmon", db)
perfmon.index = pd.to_datetime(perfmon["datetime"])
perfmon = perfmon.drop(["datetime"], axis=1)
perfmon.index.name = "datetime"
perfmon_selection = CheckboxGroup(
labels=list(perfmon.columns),
active=[0, 5],
width=300,
height=800,
sizing_mode="fixed",
)
perfmon_list = [perfmon_selection.labels[i] for i in perfmon_selection.active]
src = make_dataset(perfmon_list)
plot = make_plot(src)
perfmon_selection.on_change("active", update)
controls = WidgetBox(perfmon_selection, width=300, height=800, sizing_mode="fixed")
layout = row(controls, plot)
tab = Panel(child=layout, title="perfmon")
return tab
|
murrayo/yape
|
yapesrv/scripts/perfmon_tab.py
|
Python
|
mit
| 3,197
|
import sys, subprocess, time
"""
This script is made as a wrapper for sc2 bots to set a timeout to the bots (in case they cant find the last enemy structure or the game is ending in a draw)
Usage:
cd into python-sc2/ directory
docker build -t test_image -f test/Dockerfile .
docker run test_image -c "python test/travis_test_script.py test/autotest_bot.py"
"""
retries = 2
timeout_time = 3*60 # My maxout bot took 110 - 140 real seconds for 7 minutes in game time
if len(sys.argv) > 1:
# Attempt to run process with retries and timeouts
t0 = time.time()
process, result = None, None
for i in range(retries):
t0 = time.time()
process = subprocess.Popen(["python", sys.argv[1]], stdout=subprocess.PIPE)
try:
# Stop the current bot if the timeout was reached - the bot needs to finish a game within 3 minutes real time
result = process.communicate(timeout=timeout_time)
except subprocess.TimeoutExpired:
continue
out, err = result
result = out.decode("utf-8")
# Break as the bot run was successful
break
if process.returncode is not None:
# Reformat the output into a list
print_output: str = result
linebreaks = [
["\r\n", print_output.count("\r\n")],
["\r", print_output.count("\r")],
["\n", print_output.count("\n")],
]
most_linebreaks_type = max(linebreaks, key=lambda x: x[1])
linebreak_type, linebreak_count = most_linebreaks_type
output_as_list = print_output.split(linebreak_type)
print("Travis test script, bot output:\r\n{}".format("\r\n".join(output_as_list)))
# Bot was not successfully run in time, returncode will be None
if process.returncode is None or process.returncode != 0:
print("Exiting with exit code 5, error: Attempted to launch script {} timed out after {} seconds. Retries completed: {}".format(sys.argv[1], timeout_time, retries))
exit(5)
# process.returncode will always return 0 if the game was run successfully or if there was a python error (in this case it returns as defeat)
print("Returncode: {}".format(process.returncode))
print("Game took {} real time seconds".format(round(time.time() - t0, 1)))
if process is not None and process.returncode == 0:
for line in output_as_list:
# This will throw an error if a bot is called Traceback
if "Traceback " in line:
print("Exiting with exit code 3")
exit(3)
print("Exiting with exit code 0")
exit(0)
# Exit code 1: game crashed I think
print("Exiting with exit code 1")
exit(1)
# Exit code 2: bot was not launched
print("Exiting with exit code 2")
exit(2)
|
Dentosal/python-sc2
|
test/travis_test_script.py
|
Python
|
mit
| 2,800
|
# -*- coding: utf-8 -*-
""" S3 Query Construction
@copyright: 2009-2015 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ("FS",
"S3FieldSelector",
"S3Joins",
"S3ResourceField",
"S3ResourceQuery",
"S3URLQuery",
"S3URLQueryParser",
)
import datetime
import re
import sys
from gluon import current
from gluon.storage import Storage
from s3dal import Field, Row
from s3fields import S3RepresentLazy
from s3utils import s3_get_foreign_key, s3_unicode, S3TypeConverter
ogetattr = object.__getattribute__
TEXTTYPES = ("string", "text")
# =============================================================================
class S3FieldSelector(object):
""" Helper class to construct a resource query """
LOWER = "lower"
UPPER = "upper"
OPERATORS = [LOWER, UPPER]
def __init__(self, name, type=None):
""" Constructor """
if not isinstance(name, basestring) or not name:
raise SyntaxError("name required")
self.name = str(name)
self.type = type
self.op = None
# -------------------------------------------------------------------------
def __lt__(self, value):
return S3ResourceQuery(S3ResourceQuery.LT, self, value)
# -------------------------------------------------------------------------
def __le__(self, value):
return S3ResourceQuery(S3ResourceQuery.LE, self, value)
# -------------------------------------------------------------------------
def __eq__(self, value):
return S3ResourceQuery(S3ResourceQuery.EQ, self, value)
# -------------------------------------------------------------------------
def __ne__(self, value):
return S3ResourceQuery(S3ResourceQuery.NE, self, value)
# -------------------------------------------------------------------------
def __ge__(self, value):
return S3ResourceQuery(S3ResourceQuery.GE, self, value)
# -------------------------------------------------------------------------
def __gt__(self, value):
return S3ResourceQuery(S3ResourceQuery.GT, self, value)
# -------------------------------------------------------------------------
def like(self, value):
return S3ResourceQuery(S3ResourceQuery.LIKE, self, value)
# -------------------------------------------------------------------------
def belongs(self, value):
return S3ResourceQuery(S3ResourceQuery.BELONGS, self, value)
# -------------------------------------------------------------------------
def contains(self, value):
return S3ResourceQuery(S3ResourceQuery.CONTAINS, self, value)
# -------------------------------------------------------------------------
def anyof(self, value):
return S3ResourceQuery(S3ResourceQuery.ANYOF, self, value)
# -------------------------------------------------------------------------
def typeof(self, value):
return S3ResourceQuery(S3ResourceQuery.TYPEOF, self, value)
# -------------------------------------------------------------------------
def lower(self):
self.op = self.LOWER
return self
# -------------------------------------------------------------------------
def upper(self):
self.op = self.UPPER
return self
# -------------------------------------------------------------------------
def expr(self, val):
if self.op and val is not None:
if self.op == self.LOWER and \
hasattr(val, "lower") and callable(val.lower) and \
(not isinstance(val, Field) or val.type in TEXTTYPES):
return val.lower()
elif self.op == self.UPPER and \
hasattr(val, "upper") and callable(val.upper) and \
(not isinstance(val, Field) or val.type in TEXTTYPES):
return val.upper()
return val
# -------------------------------------------------------------------------
def represent(self, resource):
try:
rfield = S3ResourceField(resource, self.name)
except:
colname = None
else:
colname = rfield.colname
if colname:
if self.op is not None:
return "%s.%s()" % (colname, self.op)
else:
return colname
else:
return "(%s?)" % self.name
# -------------------------------------------------------------------------
@classmethod
def extract(cls, resource, row, field):
"""
Extract a value from a Row
@param resource: the resource
@param row: the Row
@param field: the field
@return: field if field is not a Field/S3FieldSelector instance,
the value from the row otherwise
"""
error = lambda fn: KeyError("Field not found: %s" % fn)
t = type(field)
if isinstance(field, Field):
colname = str(field)
tname, fname = colname.split(".", 1)
elif t is S3FieldSelector:
rfield = S3ResourceField(resource, field.name)
colname = rfield.colname
if not colname:
# unresolvable selector
raise error(field.name)
fname = rfield.fname
tname = rfield.tname
elif t is S3ResourceField:
colname = field.colname
if not colname:
# unresolved selector
return None
fname = field.fname
tname = field.tname
else:
return field
if type(row) is Row:
try:
if tname in row.__dict__:
value = ogetattr(ogetattr(row, tname), fname)
else:
value = ogetattr(row, fname)
except:
try:
value = row[colname]
except (KeyError, AttributeError):
raise error(colname)
elif fname in row:
value = row[fname]
elif colname in row:
value = row[colname]
elif tname is not None and \
tname in row and fname in row[tname]:
value = row[tname][fname]
else:
raise error(colname)
if callable(value):
# Lazy virtual field
try:
value = value()
except:
current.log.error(sys.exc_info()[1])
value = None
if hasattr(field, "expr"):
return field.expr(value)
return value
# -------------------------------------------------------------------------
def resolve(self, resource):
"""
Resolve this field against a resource
@param resource: the resource
"""
return S3ResourceField(resource, self.name)
# =============================================================================
# Short name for the S3FieldSelector class
#
FS = S3FieldSelector
# =============================================================================
class S3FieldPath(object):
""" Helper class to parse field selectors """
# -------------------------------------------------------------------------
@classmethod
def resolve(cls, resource, selector, tail=None):
"""
Resolve a selector (=field path) against a resource
@param resource: the S3Resource to resolve against
@param selector: the field selector string
@param tail: tokens to append to the selector
The general syntax for a selector is:
selector = {[alias].}{[key]$}[field|selector]
(Parts in {} are optional, | indicates alternatives)
* Alias can be:
~ refers to the resource addressed by the
preceding parts of the selector (=last
resource)
component alias of a component of the last resource
linktable alias of a link table of the last resource
table name of a table that has a foreign key for
the last resource (auto-detect the key)
key:table same as above, but specifying the foreign key
* Key can be:
key the name of a foreign key in the last resource
context a context expression
* Field can be:
fieldname the name of a field or virtual field of the
last resource
context a context expression
A "context expression" is a name enclosed in parentheses:
(context)
During parsing, context expressions get replaced by the
string which has been configured for this name for the
last resource with:
s3db.configure(tablename, context = dict(name = "string"))
With context expressions, the same selector can be used
for different resources, each time resolving into the
specific field path. However, the field addressed must
be of the same type in all resources to form valid
queries.
If a context name can not be resolved, resolve() will
still succeed - but the S3FieldPath returned will have
colname=None and ftype="context" (=unresolvable context).
"""
if not selector:
raise SyntaxError("Invalid selector: %s" % selector)
tokens = re.split("(\.|\$)", selector)
if tail:
tokens.extend(tail)
parser = cls(resource, None, tokens)
parser.original = selector
return parser
# -------------------------------------------------------------------------
def __init__(self, resource, table, tokens):
"""
Constructor - not to be called directly, use resolve() instead
@param resource: the S3Resource
@param table: the table
@param tokens: the tokens as list
"""
s3db = current.s3db
if table is None:
table = resource.table
# Initialize
self.original = None
self.tname = table._tablename
self.fname = None
self.field = None
self.ftype = None
self.virtual = False
self.colname = None
self.joins = {}
self.distinct = False
self.multiple = True
head = tokens.pop(0)
tail = None
if head and head[0] == "(" and head[-1] == ")":
# Context expression
head = head.strip("()")
self.fname = head
self.ftype = "context"
if not resource:
resource = s3db.resource(table, components=[])
context = resource.get_config("context")
if context and head in context:
tail = self.resolve(resource, context[head], tail=tokens)
else:
# unresolvable
pass
elif tokens:
# Resolve the tail
op = tokens.pop(0)
if tokens:
if op == ".":
# head is a component or linktable alias, and tokens is
# a field expression in the component/linked table
if not resource:
resource = s3db.resource(table, components=[])
ktable, join, m, d = self._resolve_alias(resource, head)
self.multiple = m
self.distinct = d
else:
# head is a foreign key in the current table and tokens is
# a field expression in the referenced table
ktable, join = self._resolve_key(table, head)
self.distinct = True
if join is not None:
self.joins[ktable._tablename] = join
tail = S3FieldPath(None, ktable, tokens)
else:
raise SyntaxError("trailing operator")
if tail is None:
# End of the expression
if self.ftype != "context":
# Expression is resolved, head is a field name:
self.field = self._resolve_field(table, head)
if not self.field:
self.virtual = True
self.ftype = "virtual"
else:
self.virtual = False
self.ftype = str(self.field.type)
self.fname = head
self.colname = "%s.%s" % (self.tname, self.fname)
else:
# Read field data from tail
self.tname = tail.tname
self.fname = tail.fname
self.field = tail.field
self.ftype = tail.ftype
self.virtual = tail.virtual
self.colname = tail.colname
self.distinct |= tail.distinct
self.multiple |= tail.multiple
self.joins.update(tail.joins)
# -------------------------------------------------------------------------
@staticmethod
def _resolve_field(table, fieldname):
"""
Resolve a field name against the table, recognizes "id" as
table._id.name, and "uid" as current.xml.UID.
@param table: the Table
@param fieldname: the field name
@return: the Field
"""
if fieldname == "uid":
fieldname = current.xml.UID
if fieldname == "id":
field = table._id
elif fieldname in table.fields:
field = ogetattr(table, fieldname)
else:
field = None
return field
# -------------------------------------------------------------------------
@staticmethod
def _resolve_key(table, fieldname):
"""
Resolve a foreign key into the referenced table and the
join and left join between the current table and the
referenced table
@param table: the current Table
@param fieldname: the fieldname of the foreign key
@return: tuple of (referenced table, join, left join)
@raise: AttributeError is either the field or
the referended table are not found
@raise: SyntaxError if the field is not a foreign key
"""
if fieldname in table.fields:
f = table[fieldname]
else:
raise AttributeError("key not found: %s" % fieldname)
ktablename, pkey, multiple = s3_get_foreign_key(f, m2m=False)
if not ktablename:
raise SyntaxError("%s is not a foreign key" % f)
ktable = current.s3db.table(ktablename,
AttributeError("undefined table %s" % ktablename),
db_only=True)
pkey = ktable[pkey] if pkey else ktable._id
join = [ktable.on(f == pkey)]
return ktable, join
# -------------------------------------------------------------------------
@staticmethod
def _resolve_alias(resource, alias):
"""
Resolve a table alias into the linked table (component, linktable
or free join), and the joins and left joins between the current
resource and the linked table.
@param resource: the current S3Resource
@param alias: the alias
@return: tuple of (linked table, joins, left joins, multiple,
distinct), the two latter being flags to indicate
possible ambiguous query results (needed by the query
builder)
@raise: AttributeError if one of the key fields or tables
can not be found
@raise: SyntaxError if the alias can not be resolved (e.g.
because on of the keys isn't a foreign key, points
to the wrong table or is ambiguous)
"""
# Alias for this resource?
if alias in ("~", resource.alias):
return resource.table, None, False, False
multiple = True
linked = resource.linked
if linked and linked.alias == alias:
# It's the linked table
linktable = resource.table
ktable = linked.table
join = [ktable.on(ktable[linked.fkey] == linktable[linked.rkey])]
return ktable, join, multiple, True
s3db = current.s3db
tablename = resource.tablename
# Try to attach the component
if alias not in resource.components and \
alias not in resource.links:
_alias = alias
hook = s3db.get_component(tablename, alias)
if not hook:
_alias = s3db.get_alias(tablename, alias)
if _alias:
hook = s3db.get_component(tablename, _alias)
if hook:
resource._attach(_alias, hook)
components = resource.components
links = resource.links
if alias in components:
# Is a component
component = components[alias]
ktable = component.table
join = component._join()
multiple = component.multiple
elif alias in links:
# Is a linktable
link = links[alias]
ktable = link.table
join = link._join()
elif "_" in alias:
# Is a free join
DELETED = current.xml.DELETED
table = resource.table
tablename = resource.tablename
pkey = fkey = None
# Find the table
fkey, kname = (alias.split(":") + [None])[:2]
if not kname:
fkey, kname = kname, fkey
ktable = s3db.table(kname,
AttributeError("table not found: %s" % kname),
db_only=True)
if fkey is None:
# Autodetect left key
for fname in ktable.fields:
tn, key, m = s3_get_foreign_key(ktable[fname], m2m=False)
if not tn:
continue
if tn == tablename:
if fkey is not None:
raise SyntaxError("ambiguous foreign key in %s" %
alias)
else:
fkey = fname
if key:
pkey = key
if fkey is None:
raise SyntaxError("no foreign key for %s in %s" %
(tablename, kname))
else:
# Check left key
if fkey not in ktable.fields:
raise AttributeError("no field %s in %s" % (fkey, kname))
tn, pkey, m = s3_get_foreign_key(ktable[fkey], m2m=False)
if tn and tn != tablename:
raise SyntaxError("%s.%s is not a foreign key for %s" %
(kname, fkey, tablename))
elif not tn:
raise SyntaxError("%s.%s is not a foreign key" %
(kname, fkey))
# Default primary key
if pkey is None:
pkey = table._id.name
# Build join
query = (table[pkey] == ktable[fkey])
if DELETED in ktable.fields:
query &= ktable[DELETED] != True
join = [ktable.on(query)]
else:
raise SyntaxError("Invalid tablename: %s" % alias)
return ktable, join, multiple, True
# =============================================================================
class S3ResourceField(object):
""" Helper class to resolve a field selector against a resource """
# -------------------------------------------------------------------------
def __init__(self, resource, selector, label=None):
"""
Constructor
@param resource: the resource
@param selector: the field selector (string)
"""
self.resource = resource
self.selector = selector
lf = S3FieldPath.resolve(resource, selector)
self.tname = lf.tname
self.fname = lf.fname
self.colname = lf.colname
self._joins = lf.joins
self.distinct = lf.distinct
self.multiple = lf.multiple
self._join = None
self.field = lf.field
self.virtual = False
self.represent = s3_unicode
self.requires = None
if self.field is not None:
field = self.field
self.ftype = str(field.type)
if resource.linked is not None and self.ftype == "id":
# Always represent the link-table's ID as the
# linked record's ID => needed for data tables
self.represent = lambda i, resource=resource: \
resource.component_id(None, i)
else:
self.represent = field.represent
self.requires = field.requires
elif self.colname:
self.virtual = True
self.ftype = "virtual"
else:
self.ftype = "context"
# Fall back to the field label
if label is None:
fname = self.fname
if fname in ["L1", "L2", "L3", "L3", "L4", "L5"]:
try:
label = current.gis.get_location_hierarchy(fname)
except:
label = None
elif fname == "L0":
label = current.messages.COUNTRY
if label is None:
f = self.field
if f:
label = f.label
elif fname:
label = " ".join([s.strip().capitalize()
for s in fname.split("_") if s])
else:
label = None
self.label = label
self.show = True
# -------------------------------------------------------------------------
def __repr__(self):
""" String representation of this instance """
return "<S3ResourceField " \
"selector='%s' " \
"label='%s' " \
"table='%s' " \
"field='%s' " \
"type='%s'>" % \
(self.selector, self.label, self.tname, self.fname, self.ftype)
# -------------------------------------------------------------------------
@property
def join(self):
"""
Implicit join (Query) for this field, for backwards-compatibility
"""
if self._join is not None:
return self._join
join = self._join = {}
for tablename, joins in self._joins.items():
query = None
for expression in joins:
if query is None:
query = expression.second
else:
query &= expression.second
if query:
join[tablename] = query
return join
# -------------------------------------------------------------------------
@property
def left(self):
"""
The left joins for this field, for backwards-compability
"""
return self._joins
# -------------------------------------------------------------------------
def extract(self, row, represent=False, lazy=False):
"""
Extract the value for this field from a row
@param row: the Row
@param represent: render a text representation for the value
@param lazy: return a lazy representation handle if available
"""
tname = self.tname
fname = self.fname
colname = self.colname
error = "Field not found in Row: %s" % colname
if type(row) is Row:
try:
if tname in row.__dict__:
value = ogetattr(ogetattr(row, tname), fname)
else:
value = ogetattr(row, fname)
except:
try:
value = row[colname]
except (KeyError, AttributeError):
raise KeyError(error)
elif fname in row:
value = row[fname]
elif colname in row:
value = row[colname]
elif tname is not None and \
tname in row and fname in row[tname]:
value = row[tname][fname]
else:
raise KeyError(error)
if callable(value):
# Lazy virtual field
try:
value = value()
except:
current.log.error(sys.exc_info()[1])
value = None
if represent:
renderer = self.represent
if callable(renderer):
if lazy and hasattr(renderer, "bulk"):
return S3RepresentLazy(value, renderer)
else:
return renderer(value)
else:
return s3_unicode(value)
else:
return value
# =============================================================================
class S3Joins(object):
""" A collection of joins """
def __init__(self, tablename, joins=None):
"""
Constructor
@param tablename: the name of the master table
@param joins: list of joins
"""
self.tablename = tablename
self.joins = {}
self.tables = set()
self.add(joins)
# -------------------------------------------------------------------------
def __iter__(self):
"""
Iterate over the names of all joined tables in the collection
"""
return self.joins.__iter__()
# -------------------------------------------------------------------------
def __getitem__(self, tablename):
"""
Get the list of joins for a table
@param tablename: the tablename
"""
return self.joins.__getitem__(tablename)
# -------------------------------------------------------------------------
def __setitem__(self, tablename, joins):
"""
Update the joins for a table
@param tablename: the tablename
@param joins: the list of joins for this table
"""
master = self.tablename
joins_dict = self.joins
tables = current.db._adapter.tables
joins_dict[tablename] = joins
if len(joins) > 1:
for join in joins:
try:
tname = join.first._tablename
except AttributeError:
tname = str(join.first)
if tname not in joins_dict and \
master in tables(join.second):
joins_dict[tname] = [join]
self.tables.add(tablename)
return
# -------------------------------------------------------------------------
def keys(self):
"""
Get a list of names of all joined tables
"""
return self.joins.keys()
# -------------------------------------------------------------------------
def items(self):
"""
Get a list of tuples (tablename, [joins]) for all joined tables
"""
return self.joins.items()
# -------------------------------------------------------------------------
def values(self):
"""
Get a list of joins for all joined tables
@return: a nested list like [[join, join, ...], ...]
"""
return self.joins.values()
# -------------------------------------------------------------------------
def add(self, joins):
"""
Add joins to this collection
@param joins: a join or a list/tuple of joins
@return: the list of names of all tables for which joins have
been added to the collection
"""
tablenames = set()
if joins:
if not isinstance(joins, (list, tuple)):
joins = [joins]
for join in joins:
tablename = join.first._tablename
self[tablename] = [join]
tablenames.add(tablename)
return list(tablenames)
# -------------------------------------------------------------------------
def extend(self, other):
"""
Extend this collection with the joins from another collection
@param other: the other collection (S3Joins), or a dict like
{tablename: [join, join]}
@return: the list of names of all tables for which joins have
been added to the collection
"""
if type(other) is S3Joins:
add = self.tables.add
else:
add = None
joins = self.joins if type(other) is S3Joins else self
for tablename in other:
if tablename not in self.joins:
joins[tablename] = other[tablename]
if add:
add(tablename)
return other.keys()
# -------------------------------------------------------------------------
def __repr__(self):
"""
String representation of this collection
"""
return "<S3Joins %s>" % str([str(j) for j in self.as_list()])
# -------------------------------------------------------------------------
def as_list(self, tablenames=None, aqueries=None, prefer=None):
"""
Return joins from this collection as list
@param tablenames: the names of the tables for which joins
shall be returned, defaults to all tables
in the collection. Dependencies will be
included automatically (if available)
@param aqueries: dict of accessible-queries {tablename: query}
to include in the joins; if there is no entry
for a particular table, then it will be looked
up from current.auth and added to the dict.
To prevent differential authorization of a
particular joined table, set {<tablename>: None}
in the dict
@param prefer: If any table or any of its dependencies would be
joined by this S3Joins collection, then skip this
table here (and enforce it to be joined by the
preferred collection), to prevent duplication of
left joins as inner joins:
join = inner_joins.as_list(prefer=left_joins)
left = left_joins.as_list()
@return: a list of joins, ordered by their interdependency, which
can be used as join/left parameter of Set.select()
"""
accessible_query = current.auth.s3_accessible_query
if tablenames is None:
tablenames = self.tables
else:
tablenames = set(tablenames)
skip = set()
if prefer:
preferred_joins = prefer.as_list(tablenames=tablenames)
for join in preferred_joins:
try:
tname = join.first._tablename
except AttributeError:
tname = str(join.first)
skip.add(tname)
tablenames -= skip
joins = self.joins
# Resolve dependencies
required_tables = set()
get_tables = current.db._adapter.tables
for tablename in tablenames:
if tablename not in joins or \
tablename == self.tablename or \
tablename in skip:
continue
join_list = joins[tablename]
preferred = False
dependencies = set()
for join in join_list:
join_tables = set(get_tables(join.second))
if join_tables:
if any((tname in skip for tname in join_tables)):
preferred = True
dependencies |= join_tables
if preferred:
skip.add(tablename)
skip |= dependencies
prefer.extend({tablename: join_list})
else:
required_tables.add(tablename)
required_tables |= dependencies
# Collect joins
joins_dict = {}
for tablename in required_tables:
if tablename not in joins or tablename == self.tablename:
continue
for join in joins[tablename]:
j = join
table = j.first
tname = table._tablename
if aqueries is not None and tname in tablenames:
if tname not in aqueries:
aquery = accessible_query("read", table)
aqueries[tname] = aquery
else:
aquery = aqueries[tname]
if aquery is not None:
j = join.first.on(join.second & aquery)
joins_dict[tname] = j
# Sort joins (if possible)
try:
return self.sort(joins_dict.values())
except RuntimeError:
return joins_dict.values()
# -------------------------------------------------------------------------
@classmethod
def sort(cls, joins):
"""
Sort a list of left-joins by their interdependency
@param joins: the list of joins
"""
if len(joins) <= 1:
return joins
r = list(joins)
tables = current.db._adapter.tables
append = r.append
head = None
for i in xrange(len(joins)):
join = r.pop(0)
head = join
tablenames = tables(join.second)
for j in r:
try:
tn = j.first._tablename
except AttributeError:
tn = str(j.first)
if tn in tablenames:
head = None
break
if head is not None:
break
else:
append(join)
if head is not None:
return [head] + cls.sort(r)
else:
raise RuntimeError("circular join dependency")
# =============================================================================
class S3ResourceQuery(object):
"""
Helper class representing a resource query
- unlike DAL Query objects, these can be converted to/from URL filters
"""
# Supported operators
NOT = "not"
AND = "and"
OR = "or"
LT = "lt"
LE = "le"
EQ = "eq"
NE = "ne"
GE = "ge"
GT = "gt"
LIKE = "like"
BELONGS = "belongs"
CONTAINS = "contains"
ANYOF = "anyof"
TYPEOF = "typeof"
COMPARISON = [LT, LE, EQ, NE, GE, GT,
LIKE, BELONGS, CONTAINS, ANYOF, TYPEOF]
OPERATORS = [NOT, AND, OR] + COMPARISON
# -------------------------------------------------------------------------
def __init__(self, op, left=None, right=None):
""" Constructor """
if op not in self.OPERATORS:
raise SyntaxError("Invalid operator: %s" % op)
self.op = op
self.left = left
self.right = right
# -------------------------------------------------------------------------
def __and__(self, other):
""" AND """
return S3ResourceQuery(self.AND, self, other)
# -------------------------------------------------------------------------
def __or__(self, other):
""" OR """
return S3ResourceQuery(self.OR, self, other)
# -------------------------------------------------------------------------
def __invert__(self):
""" NOT """
if self.op == self.NOT:
return self.left
else:
return S3ResourceQuery(self.NOT, self)
# -------------------------------------------------------------------------
def _joins(self, resource, left=False):
op = self.op
l = self.left
r = self.right
if op in (self.AND, self.OR):
if isinstance(l, S3ResourceQuery):
ljoins, ld = l._joins(resource, left=left)
else:
ljoins, ld = {}, False
if isinstance(r, S3ResourceQuery):
rjoins, rd = r._joins(resource, left=left)
else:
rjoins, rd = {}, False
ljoins = dict(ljoins)
ljoins.update(rjoins)
return (ljoins, ld or rd)
elif op == self.NOT:
if isinstance(l, S3ResourceQuery):
return l._joins(resource, left=left)
else:
return {}, False
joins, distinct = {}, False
if isinstance(l, S3FieldSelector):
try:
rfield = l.resolve(resource)
except (SyntaxError, AttributeError):
pass
else:
distinct = rfield.distinct
if distinct and left or not distinct and not left:
joins = rfield._joins
return (joins, distinct)
# -------------------------------------------------------------------------
def fields(self):
""" Get all field selectors involved with this query """
op = self.op
l = self.left
r = self.right
if op in (self.AND, self.OR):
lf = l.fields()
rf = r.fields()
return lf + rf
elif op == self.NOT:
return l.fields()
elif isinstance(l, S3FieldSelector):
return [l.name]
else:
return []
# -------------------------------------------------------------------------
def split(self, resource):
"""
Split this query into a real query and a virtual one (AND)
@param resource: the S3Resource
@return: tuple (DAL-translatable sub-query, virtual filter),
both S3ResourceQuery instances
"""
op = self.op
l = self.left
r = self.right
if op == self.AND:
lq, lf = l.split(resource) \
if isinstance(l, S3ResourceQuery) else (l, None)
rq, rf = r.split(resource) \
if isinstance(r, S3ResourceQuery) else (r, None)
q = lq
if rq is not None:
if q is not None:
q &= rq
else:
q = rq
f = lf
if rf is not None:
if f is not None:
f &= rf
else:
f = rf
return q, f
elif op == self.OR:
lq, lf = l.split(resource) \
if isinstance(l, S3ResourceQuery) else (l, None)
rq, rf = r.split(resource) \
if isinstance(r, S3ResourceQuery) else (r, None)
if lf is not None or rf is not None:
return None, self
else:
q = lq
if rq is not None:
if q is not None:
q |= rq
else:
q = rq
return q, None
elif op == self.NOT:
if isinstance(l, S3ResourceQuery):
if l.op == self.OR:
i = (~(l.left)) & (~(l.right))
return i.split(resource)
else:
q, f = l.split(resource)
if q is not None and f is not None:
return None, self
elif q is not None:
return ~q, None
elif f is not None:
return None, ~f
else:
return ~l, None
l = self.left
try:
if isinstance(l, S3FieldSelector):
lfield = l.resolve(resource)
else:
lfield = S3ResourceField(resource, l)
except:
lfield = None
if not lfield or lfield.field is None:
return None, self
else:
return self, None
# -------------------------------------------------------------------------
def transform(self, resource):
"""
Placeholder for transformation method
@param resource: the S3Resource
"""
# @todo: implement
return self
# -------------------------------------------------------------------------
def query(self, resource):
"""
Convert this S3ResourceQuery into a DAL query, ignoring virtual
fields (the necessary joins for this query can be constructed
with the joins() method)
@param resource: the resource to resolve the query against
"""
op = self.op
l = self.left
r = self.right
# Resolve query components
if op == self.AND:
l = l.query(resource) if isinstance(l, S3ResourceQuery) else l
r = r.query(resource) if isinstance(r, S3ResourceQuery) else r
if l is None or r is None:
return None
elif l is False or r is False:
return l if r is False else r if l is False else False
else:
return l & r
elif op == self.OR:
l = l.query(resource) if isinstance(l, S3ResourceQuery) else l
r = r.query(resource) if isinstance(r, S3ResourceQuery) else r
if l is None or r is None:
return None
elif l is False or r is False:
return l if r is False else r if l is False else False
else:
return l | r
elif op == self.NOT:
l = l.query(resource) if isinstance(l, S3ResourceQuery) else l
if l is None:
return None
elif l is False:
return False
else:
return ~l
# Resolve the fields
if isinstance(l, S3FieldSelector):
try:
rfield = S3ResourceField(resource, l.name)
except:
return None
if rfield.virtual:
return None
elif not rfield.field:
return False
lfield = l.expr(rfield.field)
elif isinstance(l, Field):
lfield = l
else:
return None # not a field at all
if isinstance(r, S3FieldSelector):
try:
rfield = S3ResourceField(resource, r.name)
except:
return None
rfield = rfield.field
if rfield.virtual:
return None
elif not rfield.field:
return False
rfield = r.expr(rfield.field)
else:
rfield = r
# Resolve the operator
invert = False
query_bare = self._query_bare
ftype = str(lfield.type)
if isinstance(rfield, (list, tuple)) and ftype[:4] != "list":
if op == self.EQ:
op = self.BELONGS
elif op == self.NE:
op = self.BELONGS
invert = True
elif op not in (self.BELONGS, self.TYPEOF):
query = None
for v in rfield:
q = query_bare(op, lfield, v)
if q is not None:
if query is None:
query = q
else:
query |= q
return query
# Convert date(time) strings
if ftype == "datetime" and \
isinstance(rfield, basestring):
rfield = S3TypeConverter.convert(datetime.datetime, rfield)
elif ftype == "date" and \
isinstance(rfield, basestring):
rfield = S3TypeConverter.convert(datetime.date, rfield)
query = query_bare(op, lfield, rfield)
if invert:
query = ~(query)
return query
# -------------------------------------------------------------------------
def _query_bare(self, op, l, r):
"""
Translate a filter expression into a DAL query
@param op: the operator
@param l: the left operand
@param r: the right operand
"""
if op == self.CONTAINS:
q = l.contains(r, all=True)
elif op == self.ANYOF:
# NB str/int doesn't matter here
q = l.contains(r, all=False)
elif op == self.BELONGS:
q = self._query_belongs(l, r)
elif op == self.TYPEOF:
q = self._query_typeof(l, r)
elif op == self.LIKE:
q = l.like(s3_unicode(r))
elif op == self.LT:
q = l < r
elif op == self.LE:
q = l <= r
elif op == self.EQ:
q = l == r
elif op == self.NE:
q = l != r
elif op == self.GE:
q = l >= r
elif op == self.GT:
q = l > r
else:
q = None
return q
# -------------------------------------------------------------------------
def _query_typeof(self, l, r):
"""
Translate TYPEOF into DAL expression
@param l: the left operator
@param r: the right operator
"""
hierarchy, field, nodeset, none = self._resolve_hierarchy(l, r)
if not hierarchy:
# Not a hierarchical query => use simple belongs
return self._query_belongs(l, r)
if not field:
# Field does not exist (=>skip subquery)
return None
# Construct the subquery
list_type = str(field.type)[:5] == "list:"
if nodeset:
if list_type:
q = (field.contains(list(nodeset)))
elif len(nodeset) > 1:
q = (field.belongs(nodeset))
else:
q = (field == tuple(nodeset)[0])
else:
q = None
if none:
# None needs special handling with older DAL versions
if not list_type:
if q is None:
q = (field == None)
else:
q |= (field == None)
if q is None:
# Values not resolvable (=subquery always fails)
q = field.belongs(set())
return q
# -------------------------------------------------------------------------
@classmethod
def _resolve_hierarchy(cls, l, r):
"""
Resolve the hierarchical lookup in a typeof-query
@param l: the left operator
@param r: the right operator
"""
from s3hierarchy import S3Hierarchy
tablename = l.tablename
# Connect to the hierarchy
hierarchy = S3Hierarchy(tablename)
if hierarchy.config is None:
# Reference to a hierarchical table?
ktablename, key = s3_get_foreign_key(l)[:2]
if ktablename:
hierarchy = S3Hierarchy(ktablename)
else:
key = None
list_type = str(l.type)[:5] == "list:"
if hierarchy.config is None and not list_type:
# No hierarchy configured and no list:reference
return False, None, None, None
field, keys = l, r
if not key:
s3db = current.s3db
table = s3db[tablename]
if l.name != table._id.name:
# Lookup-field rather than primary key => resolve it
# Build a filter expression for the lookup table
fs = S3FieldSelector(l.name)
if list_type:
expr = fs.contains(r)
else:
expr = cls._query_belongs(l, r, field = fs)
# Resolve filter expression into subquery
resource = s3db.resource(tablename)
if expr is not None:
subquery = expr.query(resource)
else:
subquery = None
if not subquery:
# Field doesn't exist
return True, None, None, None
# Execute query and retrieve the lookup table IDs
DELETED = current.xml.DELETED
if DELETED in table.fields:
subquery &= table[DELETED] != True
rows = current.db(subquery).select(table._id)
# Override field/keys
field = table[hierarchy.pkey.name]
keys = set([row[table._id.name] for row in rows])
nodeset, none = None, False
if keys:
# Lookup all descendant types from the hierarchy
none = False
if not isinstance(keys, (list, tuple, set)):
keys = set([keys])
nodes = set()
for node in keys:
if node is None:
none = True
else:
try:
node_id = long(node)
except ValueError:
continue
nodes.add(node_id)
if hierarchy.config is not None:
nodeset = hierarchy.findall(nodes, inclusive=True)
else:
nodeset = nodes
elif keys is None:
none = True
return True, field, nodeset, none
# -------------------------------------------------------------------------
@staticmethod
def _query_belongs(l, r, field=None):
"""
Resolve BELONGS into a DAL expression (or S3ResourceQuery if
field is an S3FieldSelector)
@param l: the left operator
@param r: the right operator
@param field: alternative left operator
"""
if field is None:
field = l
expr = None
none = False
if not isinstance(r, (list, tuple, set)):
items = [r]
else:
items = r
if None in items:
none = True
items = [item for item in items if item is not None]
wildcard = False
if str(l.type) in ("string", "text"):
for item in items:
if isinstance(item, basestring):
if "*" in item and "%" not in item:
s = item.replace("*", "%")
else:
s = item
else:
try:
s = str(item)
except:
continue
if "%" in s:
wildcard = True
_expr = (field.like(s))
else:
_expr = (field == s)
if expr is None:
expr = _expr
else:
expr |= _expr
if not wildcard:
if len(items) == 1:
# Don't use belongs() for single value
expr = (field == tuple(items)[0])
elif items:
expr = (field.belongs(items))
if none:
# None needs special handling with older DAL versions
if expr is None:
expr = (field == None)
else:
expr |= (field == None)
elif expr is None:
expr = field.belongs(set())
return expr
# -------------------------------------------------------------------------
def __call__(self, resource, row, virtual=True):
"""
Probe whether the row matches the query
@param resource: the resource to resolve the query against
@param row: the DB row
@param virtual: execute only virtual queries
"""
if self.op == self.AND:
l = self.left(resource, row, virtual=False)
r = self.right(resource, row, virtual=False)
if l is None:
return r
if r is None:
return l
return l and r
elif self.op == self.OR:
l = self.left(resource, row, virtual=False)
r = self.right(resource, row, virtual=False)
if l is None:
return r
if r is None:
return l
return l or r
elif self.op == self.NOT:
l = self.left(resource, row)
if l is None:
return None
else:
return not l
real = False
left = self.left
if isinstance(left, S3FieldSelector):
try:
lfield = left.resolve(resource)
except (AttributeError, KeyError, SyntaxError):
return None
if lfield.field is not None:
real = True
elif not lfield.virtual:
# Unresolvable expression => skip
return None
else:
lfield = left
if isinstance(left, Field):
real = True
right = self.right
if isinstance(right, S3FieldSelector):
try:
rfield = right.resolve(resource)
except (AttributeError, KeyError, SyntaxError):
return None
if rfield.virtual:
real = False
elif rfield.field is None:
# Unresolvable expression => skip
return None
else:
rfield = right
if virtual and real:
return None
extract = lambda f: S3FieldSelector.extract(resource, row, f)
try:
l = extract(lfield)
r = extract(rfield)
except (KeyError, SyntaxError):
current.log.error(sys.exc_info()[1])
return None
if isinstance(left, S3FieldSelector):
l = left.expr(l)
if isinstance(right, S3FieldSelector):
r = right.expr(r)
op = self.op
invert = False
probe = self._probe
if isinstance(rfield, (list, tuple)) and \
not isinstance(lfield, (list, tuple)):
if op == self.EQ:
op = self.BELONGS
elif op == self.NE:
op = self.BELONGS
invert = True
elif op != self.BELONGS:
for v in r:
try:
r = probe(op, l, v)
except (TypeError, ValueError):
r = False
if r:
return True
return False
try:
r = probe(op, l, r)
except (TypeError, ValueError):
return False
if invert and r is not None:
return not r
else:
return r
# -------------------------------------------------------------------------
def _probe(self, op, l, r):
"""
Probe whether the value pair matches the query
@param l: the left value
@param r: the right value
"""
result = False
convert = S3TypeConverter.convert
# Fallbacks for TYPEOF
if op == self.TYPEOF:
if isinstance(l, (list, tuple, set)):
op = self.ANYOF
elif isinstance(r, (list, tuple, set)):
op = self.BELONGS
else:
op = self.EQ
if op == self.CONTAINS:
r = convert(l, r)
result = self._probe_contains(l, r)
elif op == self.ANYOF:
if not isinstance(r, (list, tuple, set)):
r = [r]
for v in r:
if isinstance(l, (list, tuple, set, basestring)):
if self._probe_contains(l, v):
return True
elif l == v:
return True
return False
elif op == self.BELONGS:
if not isinstance(r, (list, tuple, set)):
r = [r]
r = convert(l, r)
result = self._probe_contains(r, l)
elif op == self.LIKE:
pattern = re.escape(str(r)).replace("\\%", ".*").replace(".*.*", "\\%")
return re.match(pattern, str(l)) is not None
else:
r = convert(l, r)
if op == self.LT:
result = l < r
elif op == self.LE:
result = l <= r
elif op == self.EQ:
result = l == r
elif op == self.NE:
result = l != r
elif op == self.GE:
result = l >= r
elif op == self.GT:
result = l > r
return result
# -------------------------------------------------------------------------
@staticmethod
def _probe_contains(a, b):
"""
Probe whether a contains b
"""
if a is None:
return False
try:
if isinstance(a, basestring):
return str(b) in a
elif isinstance(a, (list, tuple, set)):
if isinstance(b, (list, tuple, set)):
convert = S3TypeConverter.convert
found = True
for _b in b:
if _b not in a:
found = False
for _a in a:
try:
if convert(_a, _b) == _a:
found = True
break
except (TypeError, ValueError):
continue
if not found:
break
return found
else:
return b in a
else:
return str(b) in str(a)
except:
return False
# -------------------------------------------------------------------------
def represent(self, resource):
"""
Represent this query as a human-readable string.
@param resource: the resource to resolve the query against
"""
op = self.op
l = self.left
r = self.right
if op == self.AND:
l = l.represent(resource) \
if isinstance(l, S3ResourceQuery) else str(l)
r = r.represent(resource) \
if isinstance(r, S3ResourceQuery) else str(r)
return "(%s and %s)" % (l, r)
elif op == self.OR:
l = l.represent(resource) \
if isinstance(l, S3ResourceQuery) else str(l)
r = r.represent(resource) \
if isinstance(r, S3ResourceQuery) else str(r)
return "(%s or %s)" % (l, r)
elif op == self.NOT:
l = l.represent(resource) \
if isinstance(l, S3ResourceQuery) else str(l)
return "(not %s)" % l
else:
if isinstance(l, S3FieldSelector):
l = l.represent(resource)
elif isinstance(l, basestring):
l = '"%s"' % l
if isinstance(r, S3FieldSelector):
r = r.represent(resource)
elif isinstance(r, basestring):
r = '"%s"' % r
if op == self.CONTAINS:
return "(%s in %s)" % (r, l)
elif op == self.BELONGS:
return "(%s in %s)" % (l, r)
elif op == self.ANYOF:
return "(%s contains any of %s)" % (l, r)
elif op == self.TYPEOF:
return "(%s is a type of %s)" % (l, r)
elif op == self.LIKE:
return "(%s like %s)" % (l, r)
elif op == self.LT:
return "(%s < %s)" % (l, r)
elif op == self.LE:
return "(%s <= %s)" % (l, r)
elif op == self.EQ:
return "(%s == %s)" % (l, r)
elif op == self.NE:
return "(%s != %s)" % (l, r)
elif op == self.GE:
return "(%s >= %s)" % (l, r)
elif op == self.GT:
return "(%s > %s)" % (l, r)
else:
return "(%s ?%s? %s)" % (l, op, r)
# -------------------------------------------------------------------------
def serialize_url(self, resource=None):
"""
Serialize this query as URL query
@return: a Storage of URL variables
"""
op = self.op
l = self.left
r = self.right
url_query = Storage()
def _serialize(n, o, v, invert):
try:
quote = lambda s: s if "," not in s else '"%s"' % s
if isinstance(v, list):
v = ",".join([quote(S3TypeConverter.convert(str, val))
for val in v])
else:
v = quote(S3TypeConverter.convert(str, v))
except:
return
if "." not in n:
if resource is not None:
n = "~.%s" % n
else:
return url_query
if o == self.LIKE:
v = v.replace("%", "*")
if o == self.EQ:
operator = ""
else:
operator = "__%s" % o
if invert:
operator = "%s!" % operator
key = "%s%s" % (n, operator)
if key in url_query:
url_query[key] = "%s,%s" % (url_query[key], v)
else:
url_query[key] = v
return url_query
if op == self.AND:
lu = l.serialize_url(resource=resource)
url_query.update(lu)
ru = r.serialize_url(resource=resource)
url_query.update(ru)
elif op == self.OR:
sub = self._or()
if sub is None:
# This OR-subtree is not serializable
return url_query
n, o, v, invert = sub
_serialize(n, o, v, invert)
elif op == self.NOT:
lu = l.serialize_url(resource=resource)
for k in lu:
url_query["%s!" % k] = lu[k]
elif isinstance(l, S3FieldSelector):
_serialize(l.name, op, r, False)
return url_query
# -------------------------------------------------------------------------
def _or(self):
"""
Helper method to URL-serialize an OR-subtree in a query in
alternative field selector syntax if they all use the same
operator and value (this is needed to URL-serialize an
S3SearchSimpleWidget query).
"""
op = self.op
l = self.left
r = self.right
if op == self.AND:
return None
elif op == self.NOT:
lname, lop, lval, linv = l._or()
return (lname, lop, lval, not linv)
elif op == self.OR:
lvars = l._or()
rvars = r._or()
if lvars is None or rvars is None:
return None
lname, lop, lval, linv = lvars
rname, rop, rval, rinv = rvars
if lop != rop or linv != rinv:
return None
if lname == rname:
return (lname, lop, [lval, rval], linv)
elif lval == rval:
return ("%s|%s" % (lname, rname), lop, lval, linv)
else:
return None
else:
return (l.name, op, r, False)
# =============================================================================
class S3URLQuery(object):
""" URL Query Parser """
# -------------------------------------------------------------------------
@classmethod
def parse(cls, resource, vars):
"""
Construct a Storage of S3ResourceQuery from a Storage of get_vars
@param resource: the S3Resource
@param vars: the get_vars
@return: Storage of S3ResourceQuery like {alias: query}, where
alias is the alias of the component the query concerns
"""
query = Storage()
if resource is None:
return query
if not vars:
return query
subquery = cls._subquery
allof = lambda l, r: l if r is None else r if l is None else r & l
for key, value in vars.iteritems():
if key == "$filter":
# Instantiate the advanced filter parser
parser = S3URLQueryParser()
if parser.parser is None:
# not available
continue
# Multiple $filter expressions?
expressions = value if type(value) is list else [value]
# Default alias (=master)
default_alias = resource.alias
# Parse all expressions
for expression in expressions:
parsed = parser.parse(expression)
for alias in parsed:
q = parsed[alias]
qalias = alias if alias is not None else default_alias
if qalias not in query:
query[qalias] = [q]
else:
query[qalias].append(q)
# Stop here
continue
elif not("." in key or key[0] == "(" and ")" in key):
# Not a filter expression
continue
# Process old-style filters
selectors, op, invert = cls.parse_expression(key)
if type(value) is list:
# Multiple queries with the same selector (AND)
q = reduce(allof,
[subquery(selectors, op, invert, v) for v in value],
None)
else:
q = subquery(selectors, op, invert, value)
if q is None:
continue
# Append to query
if len(selectors) > 1:
aliases = [s.split(".", 1)[0] for s in selectors]
if len(set(aliases)) == 1:
alias = aliases[0]
else:
alias = resource.alias
#alias = resource.alias
else:
alias = selectors[0].split(".", 1)[0]
if alias == "~":
alias = resource.alias
if alias not in query:
query[alias] = [q]
else:
query[alias].append(q)
return query
# -------------------------------------------------------------------------
@staticmethod
def parse_url(url):
"""
Parse a URL query into get_vars
@param query: the URL query string
@return: the get_vars (Storage)
"""
if not url:
return Storage()
elif "?" in url:
query = url.split("?", 1)[1]
elif "=" in url:
query = url
else:
return Storage()
import cgi
dget = cgi.parse_qsl(query, keep_blank_values=1)
get_vars = Storage()
for (key, value) in dget:
if key in get_vars:
if type(get_vars[key]) is list:
get_vars[key].append(value)
else:
get_vars[key] = [get_vars[key], value]
else:
get_vars[key] = value
return get_vars
# -------------------------------------------------------------------------
@staticmethod
def parse_expression(key):
"""
Parse a URL expression
@param key: the key for the URL variable
@return: tuple (selectors, operator, invert)
"""
if key[-1] == "!":
invert = True
else:
invert = False
fs = key.rstrip("!")
op = None
if "__" in fs:
fs, op = fs.split("__", 1)
op = op.strip("_")
if not op:
op = "eq"
if "|" in fs:
selectors = [s for s in fs.split("|") if s]
else:
selectors = [fs]
return selectors, op, invert
# -------------------------------------------------------------------------
@staticmethod
def parse_value(value):
"""
Parse a URL query value
@param value: the value
@return: the parsed value
"""
uquote = lambda w: w.replace('\\"', '\\"\\') \
.strip('"') \
.replace('\\"\\', '"')
NONE = ("NONE", "None")
if type(value) is not list:
value = [value]
vlist = []
for item in value:
w = ""
quote = False
ignore_quote = False
for c in s3_unicode(item):
if c == '"' and not ignore_quote:
w += c
quote = not quote
elif c == "," and not quote:
if w in NONE:
w = None
else:
w = uquote(w).encode("utf-8")
vlist.append(w)
w = ""
else:
w += c
if c == "\\":
ignore_quote = True
else:
ignore_quote = False
if w in NONE:
w = None
else:
w = uquote(w).encode("utf-8")
vlist.append(w)
if len(vlist) == 1:
return vlist[0]
return vlist
# -------------------------------------------------------------------------
@classmethod
def _subquery(cls, selectors, op, invert, value):
"""
Construct a sub-query from URL selectors, operator and value
@param selectors: the selector(s)
@param op: the operator
@param invert: invert the query
@param value: the value
"""
v = cls.parse_value(value)
q = None
for fs in selectors:
if op == S3ResourceQuery.LIKE:
# Auto-lowercase and replace wildcard
f = S3FieldSelector(fs).lower()
if isinstance(v, basestring):
v = v.replace("*", "%").lower()
elif isinstance(v, list):
v = [x.replace("*", "%").lower() for x in v if x is not None]
else:
f = S3FieldSelector(fs)
rquery = None
try:
rquery = S3ResourceQuery(op, f, v)
except SyntaxError:
current.log.error("Invalid URL query operator: %s (sub-query ignored)" % op)
q = None
break
# Invert operation
if invert:
rquery = ~rquery
# Add to subquery
if q is None:
q = rquery
elif invert:
q &= rquery
else:
q |= rquery
return q
# =============================================================================
# Helper to combine multiple queries using AND
#
combine = lambda x, y: x & y if x is not None else y
# =============================================================================
class S3URLQueryParser(object):
""" New-style URL Filter Parser """
def __init__(self):
""" Constructor """
self.parser = None
self.ParseResults = None
self.ParseException = None
self._parser()
# -------------------------------------------------------------------------
def _parser(self):
""" Import PyParsing and define the syntax for filter expressions """
# PyParsing available?
try:
import pyparsing as pp
except ImportError:
current.log.error("Advanced filter syntax requires pyparsing, $filter ignored")
return False
# Selector Syntax
context = lambda s, l, t: t[0].replace("[", "(").replace("]", ")")
selector = pp.Word(pp.alphas + "[]~", pp.alphanums + "_.$:[]")
selector.setParseAction(context)
keyword = lambda x, y: x | pp.Keyword(y) if x else pp.Keyword(y)
# Expression Syntax
function = reduce(keyword, S3FieldSelector.OPERATORS)
expression = function + \
pp.Literal("(").suppress() + \
selector + \
pp.Literal(")").suppress()
# Comparison Syntax
comparison = reduce(keyword, S3ResourceQuery.COMPARISON)
# Value Syntax
number = pp.Regex(r"[+-]?\d+(:?\.\d*)?(:?[eE][+-]?\d+)?")
value = number | \
pp.Keyword("NONE") | \
pp.quotedString | \
pp.Word(pp.alphanums + pp.printables)
qe = pp.Group(pp.Group(expression | selector) +
comparison +
pp.originalTextFor(pp.delimitedList(value, combine=True)))
parser = pp.operatorPrecedence(qe, [("not", 1, pp.opAssoc.RIGHT, ),
("and", 2, pp.opAssoc.LEFT, ),
("or", 2, pp.opAssoc.LEFT, ),
])
self.parser = parser
self.ParseResults = pp.ParseResults
self.ParseException = pp.ParseException
return True
# -------------------------------------------------------------------------
def parse(self, expression):
"""
Parse a string expression and convert it into a dict
of filters (S3ResourceQueries).
@parameter expression: the filter expression as string
@return: a dict of {component_alias: filter_query}
"""
query = {}
parser = self.parser
if not expression or parser is None:
return query
try:
parsed = parser.parseString(expression)
except self.ParseException:
current.log.error("Invalid URL Filter Expression: '%s'" %
expression)
else:
if parsed:
query = self.convert_expression(parsed[0])
return query
# -------------------------------------------------------------------------
def convert_expression(self, expression):
"""
Convert a parsed filter expression into a dict of
filters (S3ResourceQueries)
@param expression: the parsed filter expression (ParseResults)
@returns: a dict of {component_alias: filter_query}
"""
ParseResults = self.ParseResults
convert = self.convert_expression
if isinstance(expression, ParseResults):
first, op, second = ([None, None, None] + list(expression))[-3:]
if isinstance(first, ParseResults):
first = convert(first)
if isinstance(second, ParseResults):
second = convert(second)
if op == "not":
return self._not(second)
elif op == "and":
return self._and(first, second)
elif op == "or":
return self._or(first, second)
elif op in S3ResourceQuery.COMPARISON:
return self._query(op, first, second)
elif op in S3FieldSelector.OPERATORS and second:
selector = S3FieldSelector(second)
selector.op = op
return selector
elif op is None and second:
return S3FieldSelector(second)
else:
return None
# -------------------------------------------------------------------------
def _and(self, first, second):
"""
Conjunction of two query {component_alias: filter_query} (AND)
@param first: the first dict
@param second: the second dict
@return: the combined dict
"""
if not first:
return second
if not second:
return first
result = dict(first)
for alias, subquery in second.items():
if alias not in result:
result[alias] = subquery
else:
result[alias] &= subquery
return result
# -------------------------------------------------------------------------
def _or(self, first, second):
"""
Disjunction of two query dicts {component_alias: filter_query} (OR)
@param first: the first query dict
@param second: the second query dict
@return: the combined dict
"""
if not first:
return second
if not second:
return first
if len(first) > 1:
first = {None: reduce(combine, first.values())}
if len(second) > 1:
second = {None: reduce(combine, second.values())}
falias = first.keys()[0]
salias = second.keys()[0]
alias = falias if falias == salias else None
return {alias: first[falias] | second[salias]}
# -------------------------------------------------------------------------
def _not(self, query):
"""
Negation of a query dict
@param query: the query dict {component_alias: filter_query}
"""
if query is None:
return None
if len(query) == 1:
alias, sub = query.items()[0]
if sub.op == S3ResourceQuery.OR and alias is None:
l = sub.left
r = sub.right
lalias = self._alias(sub.left.left)
ralias = self._alias(sub.right.left)
if lalias == ralias:
return {alias: ~sub}
else:
# not(A or B) => not(A) and not(B)
return {lalias: ~sub.left, ralias: ~sub.right}
else:
if sub.op == S3ResourceQuery.NOT:
return {alias: sub.left}
else:
return {alias: ~sub}
else:
return {None: ~reduce(combine, query.values())}
# -------------------------------------------------------------------------
def _query(self, op, first, second):
"""
Create an S3ResourceQuery
@param op: the operator
@param first: the first operand (=S3FieldSelector)
@param second: the second operand (=value)
"""
if not isinstance(first, S3FieldSelector):
return {}
selector = first
alias = self._alias(selector)
value = S3URLQuery.parse_value(second.strip())
if op == S3ResourceQuery.LIKE:
if isinstance(value, basestring):
value = value.replace("*", "%").lower()
elif isinstance(value, list):
value = [x.replace("*", "%").lower() for x in value if x is not None]
return {alias: S3ResourceQuery(op, selector, value)}
# -------------------------------------------------------------------------
@staticmethod
def _alias(selector):
"""
Get the component alias from an S3FieldSelector (DRY Helper)
@param selector: the S3FieldSelector
@return: the alias as string or None for the master resource
"""
alias = None
if selector and isinstance(selector, S3FieldSelector):
prefix = selector.name.split("$", 1)[0]
if "." in prefix:
alias = prefix.split(".", 1)[0]
if alias in ("~", ""):
alias = None
return alias
# END =========================================================================
|
ScottBuchanan/eden
|
modules/s3/s3query.py
|
Python
|
mit
| 82,174
|
# -*- coding: utf-8 -*-
"""
Main __init__.py for the vsgen package
"""
import pkg_resources
try:
pkg = pkg_resources.get_distribution("vsgen")
__version__ = pkg.version
except pkg_resources.DistributionNotFound:
__version__ = "0.0.0.0"
from vsgen.solution import VSGSolution
from vsgen.project import VSGProject
from vsgen.register import VSGRegisterable, VSGRegisterCommand
from vsgen.writer import VSGWriter, VSGWritable, VSGWriteCommand
from vsgen.suite import VSGSuite
from vsgen.util.logger import VSGLogger
from vsgen.util.timer import VSGTimer
from vsgen.util.config import VSGConfigParser
__all__ = [
'VSGSolution',
'VSGProject',
'VSGRegisterable',
'VSGRegisterCommand',
'VSGWriter',
'VSGWritable',
'VSGWriteCommand',
'VSGSuite',
'VSGLogger',
'VSGTimer',
'VSGConfigParser'
]
|
dbarsam/python-vsgen
|
vsgen/__init__.py
|
Python
|
mit
| 841
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
# Copyright (c) 2015 Steffen Deusch
# Licensed under the MIT license
# Beilage zu MonitorNjus, 14.09.2015 (Version 0.9.3)
import os
workingdir = os.path.dirname(os.path.realpath(__file__))
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
import cgi
import imp
modulesdir = workingdir+"/../modules"
common = imp.load_source("common", modulesdir+"/common.py")
colors = imp.load_source("colors", modulesdir+"/colors.py")
try:
if common.authentication:
auth = imp.load_source("auth", modulesdir+"/auth.py")
auth.me()
rows = int(common.getrows())
rowsone = rows + 1
def displaysets():
x = 1
out = ""
while x <= rows:
if unicode(x) in common.getallrows():
out += u"""\
<div class="col s12">
<h5 class="header center """+colors.color+"""-text">Displayset """+unicode(x)+"""</h5>
<div class="row">
<div class="col s6">
<div class="card white darken-1">
<div class="card-content white-text">
<span class="card-title """+colors.color+"""-text text-darken-2">Linke Seite</span><br>
<div class="row">
<div class="input-field col s6">
<input value=\""""+cgi.escape(unicode(common.testexist("URL", "Links", x)))+"""\" name="URL-Links-"""+unicode(x)+"""\" id="URL-Links-"""+unicode(x)+"""\" type="text">
<label for="URL-Links-"""+unicode(x)+"""\">URL Links</label>
</div>
<div class="input-field col s6">
<input value=\""""+cgi.escape(unicode(common.testexist("REFRESH", "Links", x)))+"""\" name="REFRESH-Links-"""+unicode(x)+"""\" id="REFRESH-Links-"""+unicode(x)+"""\" type="number">
<label for="REFRESH-Links-"""+unicode(x)+"""\">Refresh Links</label>
</div>
</div>
<div>
<input type="checkbox" name="AKTIV-Links-"""+unicode(x)+"""\" id="AKTIV-Links-"""+unicode(x)+"""\" """+common.aktiv("AKTIV", "Links", x)+"""/>
<label for="AKTIV-Links-"""+unicode(x)+"""\">Links aktiviert</label>
<input type="hidden" value="0" name="HIDDEN.AKTIV-Links-"""+unicode(x)+"""\">
<input type="checkbox" name="REFRESHAKTIV-Links-"""+unicode(x)+"""\" id="REFRESHAKTIV-Links-"""+unicode(x)+"""\" """+common.aktiv("REFRESHAKTIV", "Links", x)+"""/>
<label for="REFRESHAKTIV-Links-"""+unicode(x)+"""\">Links neu laden</label>
<input type="hidden" value="0" name="HIDDEN.REFRESHAKTIV-Links-"""+unicode(x)+"""\">
</div>
<div class="row">
<div class="input-field col s4">
<input value=\""""+cgi.escape(unicode(common.getdate("uhrzeit", "Links", x)))+"""\" name="uhrzeit-Links-"""+unicode(x)+"""\" id="uhrzeit-Links-"""+unicode(x)+"""\" type="text">
<label for="uhrzeit-Links-"""+unicode(x)+"""\">Uhrzeit</label>
</div>
<div class="input-field col s4">
<input value=\""""+cgi.escape(unicode(common.getdate("wochentag", "Links", x)))+"""\" name="wochentag-Links-"""+unicode(x)+"""\" id="wochentag-Links-"""+unicode(x)+"""\" type="text">
<label for="wochentag-Links-"""+unicode(x)+"""\">Wochentag</label>
</div>
<div class="input-field col s2">
<input value=\""""+cgi.escape(unicode(common.getdate("tag", "Links", x)))+"""\" name="tag-Links-"""+unicode(x)+"""\" id="tag-Links-"""+unicode(x)+"""\" type="text">
<label for="tag-Links-"""+unicode(x)+"""\">Tag</label>
</div>
<div class="input-field col s2">
<input value=\""""+cgi.escape(unicode(common.getdate("monat", "Links", x)))+"""\" name="monat-Links-"""+unicode(x)+"""\" id="monat-Links-"""+unicode(x)+"""\" type="text">
<label for="monat-Links-"""+unicode(x)+"""\">Monat</label>
</div>
</div>
<div class="row">
<div class="input-field col s3">
<input value=\""""+cgi.escape(unicode(common.getinfo("MARGINLEFT","Links",x)))+"""\" name="MARGINLEFT-Links-"""+unicode(x)+"""\" id="MARGINLEFT-Links-"""+unicode(x)+"""\" type="text">
<label for="MARGINLEFT-Links-"""+unicode(x)+"""\">Rand-Links</label>
</div>
<div class="input-field col s3">
<input value=\""""+cgi.escape(unicode(common.getinfo("MARGINRIGHT","Links",x)))+"""\" name="MARGINRIGHT-Links-"""+unicode(x)+"""\" id="MARGINRIGHT-Links-"""+unicode(x)+"""\" type="text">
<label for="MARGINRIGHT-Links-"""+unicode(x)+"""\">Rand-Rechts</label>
</div>
<div class="input-field col s3">
<input value=\""""+cgi.escape(unicode(common.getinfo("MARGINTOP","Links",x)))+"""\" name="MARGINTOP-Links-"""+unicode(x)+"""\" id="MARGINTOP-Links-"""+unicode(x)+"""\" type="text">
<label for="MARGINTOP-Links-"""+unicode(x)+"""\">Rand-Oben</label>
</div>
<div class="input-field col s3">
<input value=\""""+cgi.escape(unicode(common.getinfo("MARGINBOTTOM","Links",x)))+"""\" name="MARGINBOTTOM-Links-"""+unicode(x)+"""\" id="MARGINBOTTOM-Links-"""+unicode(x)+"""\" type="text">
<label for="MARGINBOTTOM-Links-"""+unicode(x)+"""\">Rand-Unten</label>
</div>
</div>
</div>
</div>
</div>
<div class="col s6">
<div class="card white darken-1">
<div class="card-content white-text">
<span class="card-title """+colors.color+"""-text text-darken-2">Rechte Seite</span><br>
<div class="row">
<div class="input-field col s6">
<input value=\""""+cgi.escape(unicode(common.testexist("URL", "Rechts", x)))+"""\" name="URL-Rechts-"""+unicode(x)+"""\" id="URL-Rechts-"""+unicode(x)+"""\" type="text">
<label for="URL-Rechts-"""+unicode(x)+"""\">URL Rechts</label>
</div>
<div class="input-field col s6">
<input value=\""""+cgi.escape(unicode(common.testexist("REFRESH", "Rechts", x)))+"""\" name="REFRESH-Rechts-"""+unicode(x)+"""\" id="REFRESH-Rechts-"""+unicode(x)+"""\" type="number">
<label for="REFRESH-Rechts-"""+unicode(x)+"""\">Refresh Rechts</label>
</div>
</div>
<div>
<input type="checkbox" name="AKTIV-Rechts-"""+unicode(x)+"""\" id="AKTIV-Rechts-"""+unicode(x)+"""\" """+common.aktiv("AKTIV", "Rechts", x)+"""/>
<label for="AKTIV-Rechts-"""+unicode(x)+"""\">Rechts aktiviert</label>
<input type="hidden" value="0" name="HIDDEN.AKTIV-Rechts-"""+unicode(x)+"""\">
<input type="checkbox" name="REFRESHAKTIV-Rechts-"""+unicode(x)+"""\" id="REFRESHAKTIV-Rechts-"""+unicode(x)+"""\" """+common.aktiv("REFRESHAKTIV", "Rechts", x)+"""/>
<label for="REFRESHAKTIV-Rechts-"""+unicode(x)+"""\">Rechts neu laden</label>
<input type="hidden" value="0" name="HIDDEN.REFRESHAKTIV-Rechts-"""+unicode(x)+"""\">
</div>
<div class="row">
<div class="input-field col s4">
<input value=\""""+cgi.escape(unicode(common.getdate("uhrzeit", "Rechts", x)))+"""\" name="uhrzeit-Rechts-"""+unicode(x)+"""\" id="uhrzeit-Rechts-"""+unicode(x)+"""\" type="text">
<label for="uhrzeit-Rechts-"""+unicode(x)+"""\">Uhrzeit</label>
</div>
<div class="input-field col s4">
<input value=\""""+cgi.escape(unicode(common.getdate("wochentag", "Rechts", x)))+"""\" name="wochentag-Rechts-"""+unicode(x)+"""\" id="wochentag-Rechts-"""+unicode(x)+"""\" type="text">
<label for="wochentag-Rechts-"""+unicode(x)+"""\">Wochentag</label>
</div>
<div class="input-field col s2">
<input value=\""""+cgi.escape(unicode(common.getdate("tag", "Rechts", x)))+"""\" name="tag-Rechts-"""+unicode(x)+"""\" id="tag-Rechts-"""+unicode(x)+"""\" type="text">
<label for="tag-Rechts-"""+unicode(x)+"""\">Tag</label>
</div>
<div class="input-field col s2">
<input value=\""""+cgi.escape(unicode(common.getdate("monat", "Rechts", x)))+"""\" name="monat-Rechts-"""+unicode(x)+"""\" id="monat-Rechts-"""+unicode(x)+"""\" type="text">
<label for="monat-Rechts-"""+unicode(x)+"""\">Monat</label>
</div>
</div>
<div class="row">
<div class="input-field col s3">
<input value=\""""+cgi.escape(unicode(common.getinfo("MARGINLEFT","Rechts",x)))+"""\" name="MARGINLEFT-Rechts-"""+unicode(x)+"""\" id="MARGINLEFT-Rechts-"""+unicode(x)+"""\" type="text">
<label for="MARGINLEFT-Rechts-"""+unicode(x)+"""\">Rand-Links</label>
</div>
<div class="input-field col s3">
<input value=\""""+cgi.escape(unicode(common.getinfo("MARGINRIGHT","Rechts",x)))+"""\" name="MARGINRIGHT-Rechts-"""+unicode(x)+"""\" id="MARGINRIGHT-Rechts-"""+unicode(x)+"""\" type="text">
<label for="MARGINRIGHT-Rechts-"""+unicode(x)+"""\">Rand-Rechts</label>
</div>
<div class="input-field col s3">
<input value=\""""+cgi.escape(unicode(common.getinfo("MARGINTOP","Rechts",x)))+"""\" name="MARGINTOP-Rechts-"""+unicode(x)+"""\" id="MARGINTOP-Rechts-"""+unicode(x)+"""\" type="text">
<label for="MARGINTOP-Rechts-"""+unicode(x)+"""\">Rand-Oben</label>
</div>
<div class="input-field col s3">
<input value=\""""+cgi.escape(unicode(common.getinfo("MARGINBOTTOM","Rechts",x)))+"""\" name="MARGINBOTTOM-Rechts-"""+unicode(x)+"""\" id="MARGINBOTTOM-Rechts-"""+unicode(x)+"""\" type="text">
<label for="MARGINBOTTOM-Rechts-"""+unicode(x)+"""\">Rand-Unten</label>
</div>
</div>
</div>
</div>
</div>\n"""
if rows != 1:
out += u"""<center><a class="waves-effect waves-light btn" href="setn.py?referer=row&delnum="""+unicode(x)+"""\">Displayset löschen</a></center>\n"""
out += u"""\
</div>
</div>\n"""
x = x + 1
return out
out = u"""\
Content-Type: text/html;charset=utf-8
<!DOCTYPE html>
<html lang="de">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1, maximum-scale=1.0, user-scalable=no"/>
<link href="../bin/css/materialize.css" type="text/css" rel="stylesheet" media="screen,projection"/>
<title>MonitorNjus Admin-Panel</title>
<!-- MonitorNjus -->
<!-- Copyright (c) """+unicode(common.datum.year)+""" Steffen Deusch -->
<!-- https://github.com/SteffenDE/MonitorNjus -->\n"""
out += unicode(colors.adminstyles)
out += u"""
</head>
<body>
<script type="text/javascript" src="../bin/js/jquery-2.1.4.min.js"></script>
<script type="text/javascript" src="../bin/js/materialize.min.js"></script>
<nav class=\""""+colors.color+"""\"="navigation">
<div class="container">
<div class="nav-wrapper">
<a id="logo-container" href="#" class="brand-logo">MonitorNjus Admin Panel</a>
<a href="#" data-activates="mobile" class="button-collapse"><i class="mdi-navigation-menu"></i></a>
<ul id="nav-mobile" class="right hide-on-med-and-down">
<li><a href="widgets.py">Widgets</a></li>
<li><a href="../bin/">Zum Frontend</a></li>
</ul>
<ul class="side-nav" id="mobile">
<li><a href="widgets.py">Widgets</a></li>
<li><a href="../bin/">Zum Frontend</a></li>
</ul>
</div>
</div>
</nav>
<h3 class="header center """+colors.color+"""-text">Haupteinstellungen</h3>
<div class="container">
<div class="row">
<form class="col s12" action="setn.py" method="post">
<input type="hidden" name="referer" value="index" />
<div class="row">\n"""
out += unicode(displaysets())
out += u"""\
<div class="col s12">
<center><a class="btn waves-effect waves-light """+colors.color+"""\" href=setn.py?referer=row&createnum="""+unicode(rowsone)+"""><i class="mdi-content-add"></i></a></center>
<p class="range-field"><input type="range" id="teilung" name="teilung" min="1" max="99" value=\""""+unicode(common.readsettings("TEILUNG"))+"""\" /></p>
<div class="row">
<div class="col s6">
<div class="card white darken-1">
<div class="card-content white-text">
<span class="card-title """+colors.color+"""-text text-darken-2">Alle Seiten</span><br>
<div class="row">
<div class="input-field col s12">
<input value=\""""+cgi.escape(unicode(common.testexist("REFRESH", "global", 0)))+"""\" name="REFRESH-global-0" id="REFRESH-global-0" type="text">
<label for="REFRESH-global-0">Alle Seiten neu laden</label>
</div>
</div>
<div>
<input type="checkbox" name="REFRESHAKTIV-global-0" id="REFRESHAKTIV-global-0" """+common.aktiv("REFRESHAKTIV", "global", 0)+"""/>
<label for="REFRESHAKTIV-global-0">Globales neu laden aktiviert</label>
<input type="hidden" value="0" name="HIDDEN.REFRESHAKTIV-global-0">
</div>
</div>
</div>
</div>
<div class="col s6">
<div class="card white darken-1">
<div class="card-content white-text">
<span class="card-title """+colors.color+"""-text text-darken-2">Monitornjus Frontend</span><br>
<div class="row">
<div class="input-field col s12">
<input value=\""""+cgi.escape(unicode(common.testexist("REFRESH", "globalmon", 0)))+"""\" name="REFRESH-globalmon-0" id="REFRESH-globalmon-0" type="text">
<label for="REFRESH-globalmon-0">Monitornjus Frontend neu laden</label>
</div>
</div>
<div>
<input type="checkbox" name="REFRESHAKTIV-globalmon-0" id="REFRESHAKTIV-globalmon-0" """+common.aktiv("REFRESHAKTIV", "globalmon", 0)+"""/>
<label for="REFRESHAKTIV-globalmon-0">Monitornjus neu laden</label>
<input type="hidden" value="0" name="HIDDEN.REFRESHAKTIV-globalmon-0">
</div>
</div>
</div>
</div>
</div>
<button class="btn waves-effect waves-light" type="submit">Abschicken<i class="mdi-content-send right"></i></button>\n"""
if common.triggerrefresh:
out += """\
<a class="waves-effect waves-light btn right" href="setn.py?referer=triggerrefresh">Neuladen auslösen</a>\n"""
out += """\
</div>
</div>
</form>
</div>
</div>
<footer class="page-footer """+colors.color+"""\">
<div class="container">
<div class="row">
<div class="col l6 s12">
<h5 class="white-text">MonitorNjus für das JVG-Ehingen</h5>
</div>
</div>
</div>
<div class="footer-copyright">
<div class="container">
© Steffen Deusch """+unicode(common.datum.year)+"""
<a class="grey-text text-lighten-4 right" href="https://github.com/SteffenDE/monitornjus">"""+common.version+"""</a>
</div>
</div>
</footer>
<!-- Scripts -->
<script src="../bin/js/init.js"></script>
</body>
</html>"""
########### Ausgabe ###########
print(unicode(out))
except Exception as e:
common.debug(e)
|
SteffenDE/monitornjus-classic
|
admin/index.py
|
Python
|
mit
| 15,077
|
#!/usr/bin/env python
# Reports a summary of Kraken's results
# and optionally creates a newick Tree
# Copyright (c) 2016 Daniel Blankenberg
# Licensed under the Academic Free License version 3.0
# https://github.com/blankenberg/Kraken-Taxonomy-Report
from __future__ import print_function
import optparse
import os
import re
import sys
__VERSION__ = '0.0.2'
__URL__ = "https://github.com/blankenberg/Kraken-Taxonomy-Report"
# Rank names were pulled from ncbi nodes.dmp on 02/02/2016
# cat nodes.dmp | cut -f 5 | sort | uniq
# "root" is added manually
NO_RANK_NAME = "no rank"
RANK_NAMES = [ NO_RANK_NAME,
"root",
"superkingdom",
"kingdom",
"subkingdom",
"superphylum",
"phylum",
"subphylum",
"superclass",
"class",
"subclass",
"infraclass",
"superorder",
"order",
"suborder",
"infraorder",
"parvorder",
"superfamily",
"family",
"subfamily",
"tribe",
"subtribe",
"genus",
"subgenus",
"species group",
"species subgroup",
"species",
"subspecies",
"varietas",
"forma" ]
# NB: We put 'no rank' at top of list for generating trees, due to e.g.
# root (root) -> cellular organisms (no rank) -> bacteria (superkingdom)
RANK_NAME_TO_INTS = dict( [ (y, x) for (x, y) in enumerate( RANK_NAMES ) ] )
RANK_NAMES_INTS = range( len( RANK_NAMES ) )
NO_RANK_INT = RANK_NAMES.index( NO_RANK_NAME )
NO_RANK_CODE = 'n'
PRIMARY_RANK_NAMES = [ 'species', 'genus', 'family', 'order', 'class', 'phylum', 'kingdom' ]
RANK_INT_TO_CODE = {}
for name in PRIMARY_RANK_NAMES:
RANK_INT_TO_CODE[ RANK_NAMES.index( name ) ] = name[0]
RANK_INT_TO_CODE[ RANK_NAMES.index( 'superkingdom' ) ] = 'd'
PRIMARY_RANK_NAMES.append( 'superkingdom' )
NAME_STUB = "%s__%s"
NAME_RE = re.compile( "(\t| |\||\.;)" )
NAME_REPL = "_"
def get_kraken_db_path( db ):
assert db, ValueError( "You must provide a kraken database" )
k_db_path = os.getenv('KRAKEN_DB_PATH', None )
if k_db_path:
db = os.path.join( k_db_path, db )
return db
def load_taxonomy( db_path, sanitize_names=False ):
child_lists = {}
name_map = {}
rank_map = {}
names = {} # Store names here to look for duplicates (id, True/False name fixed)
with open( os.path.join( db_path, "taxonomy/names.dmp" ) ) as fh:
for line in fh:
line = line.rstrip( "\n\r" )
if line.endswith( "\t|" ):
line = line[:-2]
fields = line.split( "\t|\t" )
node_id = fields[0]
name = fields[1]
if sanitize_names:
name = NAME_RE.sub( NAME_REPL, name )
name_type = fields[3]
if name_type == "scientific name":
if name in names:
print( 'Warning: name "%s" found at node "%s" but already exists originally for node "%s".' % ( name, node_id, names[name][0] ), file=sys.stderr )
new_name = "%s_%s" % ( name, node_id )
print( 'Transforming node "%s" named "%s" to "%s".' % ( node_id, name, new_name ), file=sys.stderr )
assert new_name not in names, 'Transformed Name "%s" already exists. Cannot recover at this time.' % new_name
if not names[name][1]:
orig_new_name = "%s_%s" % ( name, names[name][0] )
print( 'Transforming node "%s" named "%s" to "%s".' % ( names[name][0], name, orig_new_name ), file=sys.stderr )
assert orig_new_name not in names, 'Transformed Name "%s" already exists. Cannot recover at this time.' % orig_new_name
name_map[names[name][0]] = orig_new_name
names[name] = ( names[name][0], True )
name = new_name
else:
names[name] = ( node_id, False )
name_map[ node_id ] = name
with open( os.path.join( db_path, "taxonomy/nodes.dmp" ) ) as fh:
for line in fh:
line = line.rstrip( "\n\r" )
fields = line.split( "\t|\t" )
node_id = fields[0]
parent_id = fields[1]
rank = RANK_NAME_TO_INTS.get( fields[2].lower(), None )
if rank is None:
# This should never happen, unless new taxonomy ranks are created
print( 'Unrecognized rank: Node "%s" is "%s", setting to "%s"' % ( node_id, fields[2], NO_RANK_NAME ), file=sys.stderr )
rank = NO_RANK_INT
if node_id == '1':
parent_id = '0'
if parent_id not in child_lists:
child_lists[ parent_id ] = []
child_lists[ parent_id ].append( node_id )
rank_map[node_id] = rank
return ( child_lists, name_map, rank_map )
def dfs_summation( node, counts, child_lists ):
children = child_lists.get( node, None )
if children:
for child in children:
dfs_summation( child, counts, child_lists )
counts[ node ] = counts.get( node, 0 ) + counts.get( child, 0 )
def dfs_report( node, file_data, hit_taxa, rank_map, name_map, child_lists, output_lines, options, name=None, tax=None ):
rank_int = rank_map[node]
code = RANK_INT_TO_CODE.get( rank_int, NO_RANK_CODE )
if ( code != NO_RANK_CODE or options.intermediate ) and ( options.show_zeros or node in hit_taxa):
if name is None:
name = ""
else:
name = "%s|" % name
if tax is None:
tax = ''
else:
tax = "%s;" % tax
sanitized_name = name_map[ node ]
name_stub = NAME_STUB % ( code, sanitized_name )
name = name + name_stub
tax = tax + name_stub
if options.name_id:
output = node
elif options.name_long:
output = name
else:
output = sanitized_name
for val in file_data:
output = "%s\t%i" % ( output, val.get( node, 0 ) )
if options.show_rank:
output = "%s\t%s" % ( output, RANK_NAMES[ rank_int ] )
if options.taxonomy:
output = "%s\t%s" % ( output, tax )
output_lines[ rank_int ].append( output )
children = child_lists.get( node )
if children:
for child in children:
dfs_report( child, file_data, hit_taxa, rank_map, name_map, child_lists, output_lines, options, name=name, tax=tax )
def write_tree( child_lists, name_map, rank_map, options, branch_length=1 ):
# Uses Biopython, only load if making tree
import Bio.Phylo
from Bio.Phylo import BaseTree
def _get_name( node_id ):
if options.name_id:
return node_id
return name_map[node_id]
nodes = {}
root_node_id = child_lists["0"][0]
nodes[root_node_id] = BaseTree.Clade( name=_get_name( root_node_id), branch_length=branch_length )
def recurse_children( parent_id ):
if options.cluster is not None and rank_map[parent_id] == options.cluster:
# Short circuit if we found our rank, prevents 'hanging' no ranks from being output
# e.g. clustering by "species" (Escherichia coli), but have "no rank" below (Escherichia coli K-12) in test_db
return
if parent_id not in nodes:
nodes[parent_id] = BaseTree.Clade( name=_get_name( parent_id ), branch_length=branch_length )
for child_id in child_lists.get( parent_id, [] ):
if options.cluster is None or ( rank_map[child_id] <= options.cluster ):
if child_id not in nodes:
nodes[child_id] = BaseTree.Clade(name=_get_name( child_id ), branch_length=branch_length)
nodes[parent_id].clades.append(nodes[child_id])
recurse_children( child_id )
recurse_children( root_node_id )
tree = BaseTree.Tree(root=nodes[root_node_id])
Bio.Phylo.write( [tree], options.output_tree, 'newick' )
def __main__():
parser = optparse.OptionParser( usage="%prog [options] file1 file...fileN" )
parser.add_option( '-v', '--version', dest='version', action='store_true', default=False, help='print version and exit' )
parser.add_option( '', '--show-zeros', dest='show_zeros', action='store_true', default=False, help='Show empty nodes' )
parser.add_option( '', '--header-line', dest='header_line', action='store_true', default=False, help='Provide a header on output' )
parser.add_option( '', '--intermediate', dest='intermediate', action='store_true', default=False, help='Intermediate Ranks' )
parser.add_option( '', '--name-id', dest='name_id', action='store_true', default=False, help='Use Taxa ID instead of Name' )
parser.add_option( '', '--name-long', dest='name_long', action='store_true', default=False, help='Use Long taxa ID instead of base name' )
parser.add_option( '', '--taxonomy', dest='taxonomy', action='store_true', default=False, help='Output taxonomy in last column' )
parser.add_option( '', '--cluster', dest='cluster', action='store', type="string", default=None, help='Cluster counts to specified rank' )
parser.add_option( '', '--summation', dest='summation', action='store_true', default=False, help='Add summation of child counts to each taxa' )
parser.add_option( '', '--sanitize-names', dest='sanitize_names', action='store_true', default=False, help='Replace special chars (\t| |\||\.;) with underscore (_)' )
parser.add_option( '', '--show-rank', dest='show_rank', action='store_true', default=False, help='Output column with Rank name' )
parser.add_option( '', '--db', dest='db', action='store', type="string", default=None, help='Name of Kraken database' )
parser.add_option( '', '--output', dest='output', action='store', type="string", default=None, help='Name of output file' )
parser.add_option( '', '--output-tree', dest='output_tree', action='store', type="string", default=None, help='Name of output file to place newick tree' )
(options, args) = parser.parse_args()
if options.version:
print( "Kraken Taxonomy Report (%s) version %s" % ( __URL__, __VERSION__ ), file=sys.stderr )
sys.exit()
if not args:
print( parser.get_usage(), file=sys.stderr )
sys.exit()
if options.cluster:
cluster_name = options.cluster.lower()
cluster = RANK_NAME_TO_INTS.get( cluster_name, None )
assert cluster is not None, ValueError( '"%s" is not a valid rank for clustering.' % options.cluster )
if cluster_name not in PRIMARY_RANK_NAMES:
assert options.intermediate, ValueError( 'You cannot cluster by "%s", unless you enable intermediate ranks.' % options.cluster )
ranks_to_report = [ cluster ]
options.cluster = cluster
# When clustering we need to do summatation
options.summation = True
else:
options.cluster = None # make empty string into None
ranks_to_report = RANK_NAMES_INTS
if options.output:
output_fh = open(options.output, 'w')
else:
output_fh = sys.stdout
db_path = get_kraken_db_path( options.db )
( child_lists, name_map, rank_map ) = load_taxonomy( db_path, sanitize_names=options.sanitize_names )
file_data = []
hit_taxa = []
for input_filename in args:
taxo_counts = {}
with open( input_filename ) as fh:
for line in fh:
fields = line.split( "\t" )
taxo_counts[ fields[2] ] = taxo_counts.get( fields[2], 0 ) + 1
clade_counts = taxo_counts.copy() # fixme remove copying?
if options.summation:
dfs_summation( '1', clade_counts, child_lists )
for key, value in clade_counts.items():
if value and key not in hit_taxa:
hit_taxa.append( key )
file_data.append( clade_counts )
if options.header_line:
output_fh.write( "#ID\t" )
output_fh.write( "\t".join( args ) )
if options.show_rank:
output_fh.write( "\trank" )
if options.taxonomy:
output_fh.write( "\ttaxonomy" )
output_fh.write( '\n' )
output_lines = dict( [ ( x, [] ) for x in RANK_NAMES_INTS ] )
dfs_report( '1', file_data, hit_taxa, rank_map, name_map, child_lists, output_lines, options, name=None, tax=None )
for rank_int in ranks_to_report:
for line in output_lines.get( rank_int, [] ):
output_fh.write( line )
output_fh.write( '\n' )
fh.close()
if options.output_tree:
write_tree( child_lists, name_map, rank_map, options )
if __name__ == "__main__":
__main__()
|
SANBI-SA/tools-iuc
|
tools/kraken_taxonomy_report/kraken_taxonomy_report.py
|
Python
|
mit
| 12,936
|
from django.contrib.auth.models import User
from django.test import TestCase, Client
from django.urls import reverse
# Declaration of Username and Password
username = 'admin'
password = 'Test1234$'
"""
Method to replicate
~~~~~~~~~~~~~~~~~~~
1. Bring up a new instance of NearBeach (grab from fixtures)
2. Try and log in as the admin user
Expected Results
~~~~~~~~~~~~~~~~
User will log in with no issues, system will create all of the user's permission sets and groups
"""
def login_user(c: object, self: object) -> object:
response = c.post(
reverse('login'),
self.credentials,
follow=True,
)
self.assertTrue(response.context['user'].is_active)
class NewInstanceLoginTest(TestCase):
fixtures = ['NearBeach_no_setup.json']
def setUp(self):
self.credentials = {
'username': username,
'password': password
}
def test_admin_login(self):
c = Client()
# User will be logged in
login_user(c, self)
# Make sure the admin user can open up the project
response = c.get(reverse('dashboard'))
self.assertEqual(response.status_code, 200)
|
robotichead/NearBeach
|
NearBeach/tests/tests_specific_bugs/test_new_instance.py
|
Python
|
mit
| 1,174
|
# correctness: 100%, performance: 0%
def solution(a):
l = len(a)
if l < 3: return reduce(lambda x, y: x * y, a)
products = []
for i in xrange(0, l):
for j in xrange(i+1, l):
for k in xrange (j+1, l):
products.append(a[i] * a[j] * a[k])
return max(products)
if __name__ == '__main__':
array = [ -3 , 1 , 2 , -2 , 5 , 6]
print "result: ", solution(array)
|
nomuna/codility
|
Lesson_06/max_prod_three.py
|
Python
|
mit
| 390
|
# coding=utf8
"""
asm.py - (dis)assembly features.
(c) 2014 Samuel Groß
"""
from willie import web
from willie.module import commands, nickname_commands, example
from random import choice
from binascii import hexlify, unhexlify
import string
import re
import os
from subprocess import Popen, PIPE
@commands('disas', 'disas64', 'disassemble', 'disassemble64')
@example('.disas 66556689e590c9c3')
def disassemble(bot, trigger):
"""Disassemble x86 machine code."""
if not trigger.group(2):
return bot.reply('Nothing to disassemble')
try:
arg = trigger.group(2)
# remove all 0x
while "0x" in arg:
arg = arg.replace("0x","")
# remove everything except hex
arg = re.sub(r"[^a-fA-F0-9]", r"", arg)
code = unhexlify(arg)
except Exception:
return bot.say('Invalid hex sequence')
bits = 64 if '64' in trigger.group(1) else 32
filename = '/tmp/' + ''.join( choice(string.ascii_lowercase) for i in range(10)) + '.bin'
with open(filename, 'wb') as f:
f.write(code)
result = Popen(['ndisasm', '-b', str(bits), '-o', '0x1000', filename], stdout=PIPE).stdout.read()
os.remove(filename)
for line in result.split('\n'):
bot.say(line)
@commands('as', 'as64', 'assemble', 'assemble64')
@example('.as push ebp; mov ebp, esp; jmp 0x14')
def assemble(bot, trigger):
"""Assemble x86 instructions."""
code = trigger.group(2)
if not code:
return bot.reply('Nothing to assemble')
bits = 64 if '64' in trigger.group(1) else 32
filename = '/tmp/' + ''.join(choice(string.ascii_lowercase) for i in range(10)) + '.asm'
with open(filename, 'w') as f:
f.write('BITS %i\n' % bits + re.sub(r';\s*', ';\n', code))
p = Popen(['nasm', '-f', 'bin', '-o', filename[:-4], filename], stderr=PIPE)
p.wait()
os.remove(filename)
for line in p.stderr.read().split('\n'):
bot.say(line)
if p.returncode == 0:
with open(filename[:-4], 'rb') as f:
raw = f.read()
hex = hexlify(raw)
if hex:
bot.say(hex)
os.remove(filename[:-4])
def x86jmp(bot, instr):
"""Display information about a x86 conditional jump."""
if instr not in jxx:
return bot.say('I can\'t find anything about that instruction, sorry')
bot.say('%s : %s' % (instr, jxx[instr]))
def x86instr(bot, instr):
"""Display information about any x86 instruction thats no a conditional jump."""
raw = web.get('http://www.felixcloutier.com/x86/')
match = re.search('<tr><td><a href="./(?P<page>[A-Z:]*).html">%s</a></td><td>(?P<desc>[^<]*)</td></tr>' % instr, raw)
if not match:
return bot.say('I can\'t find anything about that instruction, sorry')
bot.say('%s : %s -- %s' % (instr, match.group('desc'), 'http://www.felixcloutier.com/x86/%s' % match.group('page')))
@commands('x86', 'instr', 'instruction')
def instruction(bot, trigger):
"""Display information about an x86 instruction."""
instr = trigger.group(2)
if not instr:
return bot.reply('Give me an instruction')
instr = instr.strip().upper()
if 'J' == instr[0] and not instr == 'JMP':
return x86jmp(bot, instr)
x86instr(bot, instr)
jxx = {
'JA' : 'Jump if above (CF=0 and ZF=0)',
'JAE' : 'Jump if above or equal (CF=0)',
'JB' : 'Jump if below (CF=1)',
'JBE' : 'Jump if below or equal (CF=1 or ZF=1)',
'JC' : 'Jump if carry (CF=1)',
'JCXZ' : 'Jump if CX register is 0',
'JECXZ': 'Jump if ECX register is 0',
'JRCXZ': 'Jump if RCX register is 0',
'JE' : 'Jump if equal (ZF=1)',
'JG' : 'Jump if greater (ZF=0 and SF=OF)',
'JGE' : 'Jump if greater or equal (SF=OF)',
'JL' : 'Jump if less (SF!=OF)',
'JLE' : 'Jump if less or equal (ZF=1 or SF!=OF)',
'JNA' : 'Jump if not above (CF=1 or ZF=1)',
'JNAE' : 'Jump if not above or equal (CF=1)',
'JNB' : 'Jump if not below (CF=0)',
'JNBE' : 'Jump if not below or equal (CF=0 and ZF=0)',
'JNC' : 'Jump if not carry (CF=0)',
'JNE' : 'Jump if not equal (ZF=0)',
'JNG' : 'Jump if not greater (ZF=1 or SF!=OF)',
'JNGE' : 'Jump if not greater or equal (SF!=OF)',
'JNL' : 'Jump if not less (SF=OF)',
'JNLE' : 'Jump if not less or equal (ZF=0 and SF=OF)',
'JNO' : 'Jump if not overflow (OF=0)',
'JNP' : 'Jump if not parity (PF=0)',
'JNS' : 'Jump if not sign (SF=0)',
'JNZ' : 'Jump if not zero (ZF=0)',
'JO' : 'Jump if overflow (OF=1)',
'JP' : 'Jump if parity (PF=1)',
'JPE' : 'Jump if parity even (PF=1)',
'JPO' : 'Jump if parity odd (PF=0)',
'JS' : 'Jump if sign (SF=1)'
}
|
saelo/willie-modules
|
asm.py
|
Python
|
mit
| 4,715
|
from setuptools import setup
setup(
name="sgf",
version="0.5",
description="Python library for reading and writing Smart Game Format",
license="MIT",
url="http://github.com/jtauber/sgf",
author="James Tauber",
author_email="jtauber@jtauber.com",
py_modules=["sgf"],
classifiers=[
"Development Status :: 4 - Beta",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 2.7",
"Topic :: Games/Entertainment :: Board Games",
"Topic :: Utilities",
],
)
|
jtauber/sgf
|
setup.py
|
Python
|
mit
| 552
|
#!/usr/bin/env python
from math import *
def i321():
n = 0L
mn = 0L
tn = 0L
sn = 0L
n3 = 0L
n9 = 0L
n = 372889431
mn = 139046528497282623
tn = 1L + 8L * mn
print "M(%i) = %i\n" % (n, mn)
print "sqrt(%d) = %i\n" % (n, sqrt(n))
print "sqrt(%d) = %i\n" % (tn, sqrt(tn))
print "sqrt(%i) mod 1 = %i\n" % (tn, sqrt(tn) % 1)
print "%i mod 3 = %i\n %i mod 9 = %i\n" % (mn, mn % 3L, mn, mn % 9L)
i321()
|
gotclout/PythonJunk
|
3.py
|
Python
|
mit
| 429
|
# Copyright (c) 2017 Civic Knowledge. This file is licensed under the terms of the
# MIT, included in this distribution as LICENSE
""" """
from rowgenerators.appurl.file.file import FileUrl
from rowgenerators.exceptions import AppUrlError
class ZipUrlError(AppUrlError):
pass
class ZipUrl(FileUrl):
"""Zip URLS represent a zip file, as a local resource. """
match_priority = FileUrl.match_priority - 10
def __init__(self, url=None, downloader=None, **kwargs):
kwargs['resource_format'] = 'zip'
super().__init__(url, downloader=downloader, **kwargs)
@property
def target_file(self):
"""
Returns the target file, which is usually stored in the first slot in the ``fragment``,
but may have been overridden with a ``fragment_query``.
:return:
"""
if self._target_file:
return self._target_file
if self.fragment[0]:
return self.fragment[0]
for ext in ('csv', 'xls', 'xlsx'):
if self.resource_file.endswith('.' + ext + '.zip'):
return self.resource_file.replace('.zip', '')
# Want to return none, so get_files_from-zip can assume to use the first file in the archive.
return None
def join_target(self, tf):
"""
Joins the target ``tf`` by setting the value of the first slot of the fragment.
:param tf:
:return: a clone of this url with a new fragment.
"""
u = self.clone()
try:
tf = str(tf.path)
except:
pass
u.fragment = [tf, u.fragment[1]] # In case its a tuple, don't edit in place
return u
def get_resource(self):
return self
@property
def zip_dir(self):
"""Directory that files will be extracted to"""
from os.path import abspath
cache_dir = self.downloader.cache.getsyspath('/')
target_path = abspath(self.fspath)
if target_path.startswith(cache_dir): # Case when file is already in cache
return str(self.fspath) + '_d'
else: # file is not in cache; it may exist elsewhere.
return self.downloader.cache.getsyspath(target_path.lstrip('/'))+'_d'
def get_target(self):
"""
Extract the target file from the archive, store it in the cache, and return a file Url to the
cached file.
"""
from rowgenerators.appurl.url import parse_app_url
from zipfile import ZipFile
import io
from os.path import join, dirname
from rowgenerators.appurl.util import copy_file_or_flo, ensure_dir
assert self.zip_dir
zf = ZipFile(str(self.fspath))
self._target_file = ZipUrl.get_file_from_zip(self)
target_path = join(self.zip_dir, self.target_file)
ensure_dir(dirname(target_path))
with io.open(target_path, 'wb') as f, zf.open(self.target_file) as flo:
copy_file_or_flo(flo, f)
fq = self.fragment_query
if 'resource_format' in fq:
del fq['resource_format']
if 'resource_file' in fq:
del fq['resource_file']
tu = parse_app_url(target_path,
fragment_query=fq,
fragment=[self.target_segment, None],
scheme_extension=self.scheme_extension,
# Clear out the resource info so we don't get a ZipUrl
downloader=self.downloader
)
if self.target_format != tu.target_format:
try:
tu.target_format = self.target_format
except AttributeError:
pass # Some URLS don't allow resetting target type.
return tu
def list(self):
"""List the files in the referenced Zip file"""
from zipfile import ZipFile
if self.target_file:
return list(self.set_target_segment(tl.target_segment) for tl in self.get_target().list())
else:
real_files = ZipUrl.real_files_in_zf(ZipFile(str(self.fspath)))
return list(self.set_target_file(rf) for rf in real_files)
@staticmethod
def get_file_from_zip(url):
"""Given a file name that may be a regular expression, return the full name for the file
from a zip archive"""
from zipfile import ZipFile
import re
names = []
zf = ZipFile(str(url.fspath))
nl = list(ZipUrl.real_files_in_zf(zf)) # Old way, but maybe gets links? : list(zf.namelist())
tf = url.target_file
ts = url.target_segment
if not nl:
# sometimes real_files_in_zf doesn't work at all. I don't know why it does work,
# so I certainly don't know why it does not.
nl = list(zf.namelist())
# the target_file may be a string, or a regular expression
if tf:
names = list([e for e in nl if re.search(tf, e)
and not (e.startswith('__') or e.startswith('.'))
])
if len(names) > 0:
return names[0]
# The segment, if it exists, can only be an integer, and should probably be
# '0' to indicate the first file. This clause is probably a bad idea, since
# andy other integer is probably meaningless.
if ts:
try:
return nl[int(ts)]
except (IndexError, ValueError):
pass
# Just return the first file in the archive.
if not tf and not ts:
return nl[0]
else:
raise ZipUrlError("Could not find file in Zip {} for target='{}' nor segment='{}'"
.format(url.fspath, url.target_file, url.target_segment))
@staticmethod
def real_files_in_zf(zf):
"""Return a list of internal paths of real files in a zip file, based on the 'external_attr' values"""
from os.path import basename
for e in zf.infolist():
# Get rid of __MACOS and .DS_whatever
if basename(e.filename).startswith('__') or basename(e.filename).startswith('.'):
continue
# I really don't understand external_attr, but no one else seems to either,
# so we're just hacking here.
# e.external_attr>>31&1 works when the archive has external attrs set, and a dir heirarchy
# e.external_attr==0 works in cases where there are no external attrs set
# e.external_attr==32 is true for some single-file archives.
if bool(e.external_attr >> 31 & 1 or e.external_attr == 0 or e.external_attr == 32):
yield e.filename
@classmethod
def _match(cls, url, **kwargs):
return url.resource_format == 'zip' or kwargs.get('force_archive')
|
CivicKnowledge/rowgenerators
|
rowgenerators/appurl/archive/zip.py
|
Python
|
mit
| 6,923
|
# -*- coding: utf-8 -*-
from Scraping4blog import Scraping4blog
import sys,os
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/../util')
from settings import SettingManager
def main():
conf = SettingManager()
instance = Scraping4blog(conf)
instance.run()
if __name__ == "__main__":
main()
|
yamanakahirofumi/mokobot
|
Scraping4blog/run.py
|
Python
|
mit
| 323
|
class Solution(object):
def maxProfit(self, prices):
"""
:type prices: List[int]
:rtype: int
"""
low=1<<31
profit=0
for p in prices:
if p<low:
low=p
if p-low>profit:
profit=p-low
return profit
|
Tanych/CodeTracking
|
121-Best-Time-to-Buy-and-Sell-Stock/solution.py
|
Python
|
mit
| 316
|
import collections
import functools
import dpath
import six
import pprint
import stringcase
from pyutrack.logger import get_logger
def print_friendly(value, sep=', '):
if isinstance(value, six.string_types):
return value
if isinstance(value, collections.Iterable):
return sep.join(str(k) for k in value)
return str(value)
def remove_empty_querystring(url):
parsed = six.moves.urllib.parse.urlparse(url)
new_query = six.moves.urllib.parse.urlencode(
six.moves.urllib.parse.parse_qsl(parsed.query)
)
return six.moves.urllib.parse.ParseResult(
scheme=parsed.scheme, netloc=parsed.netloc, path=parsed.path,
params=parsed.params, query=new_query, fragment=None
).geturl()
class Response(dict):
def __init__(self, data={}, aliases={}):
"""
:param data:
:param aliases:
"""
super(Response, self).__init__()
self.__aliases = aliases
self.update(data)
def update(self, other):
"""
:param other:
:return:
"""
if not other:
return
super(Response, self).update(
{
self.__resolve_alias(k): v
for k, v in other.items() if k != 'field'
}
)
for f in other.get('field', []):
super(Response, self).update({f['name']: f['value']})
def __resolve_alias(self, key):
while key in self.__aliases:
key = self.__aliases[key]
return key
def __getitem__(self, item):
return super(Response, self).__getitem__(
self.__resolve_alias(item)
)
def __setitem__(self, item, value):
return super(Response, self).__setitem__(
self.__resolve_alias(item), value
)
class Type(type):
class Association(collections.Iterable):
def __init__(self, type, parent, get_config, add_config, del_config):
self.type = type
self.parent = parent
self.get_config = get_config
self.add_config = add_config
self.del_config = del_config
self._cache = None
def __iadd__(self, other):
if not self.add_config:
raise NotImplementedError
url = self.add_config.get('url')
fields = self.parent.fields.copy()
method = getattr(
self.parent.connection, self.add_config.get('method')
)
for item in other:
fields.update({self.add_config.get('key'): item})
method(remove_empty_querystring(url % fields), {}, parse=False)
return self
def __len__(self):
return len(list(self.__iter__()))
def __isub__(self, other):
if not self.del_config:
raise NotImplementedError
url = self.del_config.get('url')
fields = self.parent.fields.copy()
for item in other:
fields.update({self.del_config.get('key'): item})
self.parent.connection.delete(
remove_empty_querystring(url % fields)
)
return self
def __iter__(self):
if not self._cache:
self._cache = self()
hydrate = self.get_config.get('hydrate', False)
if not isinstance(self._cache, collections.Iterable):
raise TypeError(
'%s->%s is not iterable' %
(self.parent.__class__.__name__, self.type.__name__)
)
else:
for v in self._cache:
yield self.type(
self.parent.connection, hydrate=hydrate, **v
)
def __call__(self):
fields = self.parent.fields.copy()
fields.update(self.get_config.get('kwargs', {}))
url = remove_empty_querystring(self.get_config.get('url') % fields)
return self.get_config.get(
'callback', lambda r: r
)(self.parent.connection.get(url))
class AssociationProperty(object):
def __init__(self, binding):
self.binding = binding
def __get__(self, instance, obj_type):
return self.binding(instance)
class Base(object):
__aliases__ = {}
def __init__(self, connection, hydrate=False, **fields):
"""
:param connection:
:param hydrate:
:param fields:
"""
aliases = super(Type.Base, self).__getattribute__('__aliases__')
super(Type.Base, self).__setattr__('connection', connection)
super(Type.Base,
self).__setattr__('fields', Response(fields, aliases))
if hydrate:
self._get(
self.__get__.get('callback', lambda response: response)
)
def _get_attribute(self, lookup):
try:
return self.fields[lookup]
except KeyError:
return dpath.util.get(self.fields, lookup)
def _update(self, callback, **kwargs):
resource_data = self.__update_data(kwargs)
self.connection.post(
self.__update_endpoint(), resource_data, parse=False
)
self.fields.update(resource_data)
def _delete(self, callback):
return callback(
self.connection.delete(self.__delete_endpoint(), parse=False)
)
@classmethod
def _create(cls, connection, callback, **kwargs):
return cls(
connection,
**callback(
connection.put(cls.__create_endpoint(**kwargs), kwargs)
)
)
@classmethod
def _list(cls, connection, callback, **kwargs):
hydrate = cls.__list__.get('hydrate', False)
data = [
cls(connection, hydrate=hydrate, **obj)
for obj in
callback(connection.get(cls.__list_endpoint(**kwargs)))
]
return data
def _get(self, callback):
self.fields.update(
Response(
callback(self.connection.get(self.__get_endpoint())),
self.__aliases__
)
)
def _get_association(self, params, **kwargs):
if 'kwargs' in params['get']:
params['get']['kwargs'].update(kwargs)
return Type.Association(
params['type'], self, params['get'],
params.get('add', {}), params.get('remove', {})
)
def __get_endpoint(self):
return remove_empty_querystring(
self.__get__.get('url') % self.fields
)
@classmethod
def __create_endpoint(cls, **kwargs):
return remove_empty_querystring(
cls.__create__.get('url') % kwargs
)
def __update_endpoint(self):
return remove_empty_querystring(
self.__update__.get('url') % self.fields
)
def __update_data(self, kwargs):
data = kwargs.copy()
fields = kwargs.keys()
data.update(
{
k: self.fields[k]
for k in fields if not kwargs[k] and self.fields.get(k)
}
)
return data
def __delete_endpoint(self):
return self.__delete__.get('url') % self.fields
@classmethod
def __list_endpoint(cls, **kwargs):
return cls.__list__.get('url') % kwargs
def format(self, template=None, oneline=False):
"""
:param template:
:param oneline:
:return:
"""
data_source = self.fields
if oneline:
fields = getattr(self, '__render_min__', self.__render__)
return '\t'.join(
str(data_source.get(k, getattr(self, k))) for k in fields
)
else:
fields = template or self.__render__
resp = ''
for k in fields:
try:
label = stringcase.sentencecase(k).ljust(20)
value = data_source.get(k, getattr(self, k, None))
resp += "%s : %s\n" % (label, print_friendly(value))
except KeyError:
get_logger().debug(
"No %s attribute found for %s",
k, self.__class__.__name__
)
return resp
def __repr__(self):
return pprint.pformat(self.fields)
def __str__(self):
return (
getattr(self, '__label__', None)
or ' '.join('%%(%s)s' % k for k in self.__render__)
) % self.fields
def __new__(mcs, name, bases, dct):
for verb in ['get', 'delete', 'update']:
if '__%s__' % verb in dct:
info = dct['__%s__' % verb]
fn = Type.__build_func(
verb,
info.get('args', (())),
info.get('kwargs', {}), {
'callback':
info.get('callback', lambda response: response)
}
)
fn.__doc__ = '%s %s' % (verb, name.lower())
dct[verb] = fn
for verb in ['create', 'list']:
if '__%s__' % verb in dct:
info = dct['__%s__' % verb]
fn = Type.__build_func(
verb, ('connection', ) + info.get('args', ()),
info.get('kwargs', {}), {
'callback':
info.get('callback', lambda response: response)
}
)
fn.__doc__ = '%s %s' % (verb, name.lower())
dct[verb] = classmethod(fn)
for association, params in dct.get('__associations__', {}).items():
fn = Type.__build_func(
'get_association', (),
params.get('get', {}).get('kwargs', {}), {'params': params}
)
fn.__doc__ = 'get %s %s' % (name.lower(), association)
dct[association] = Type.AssociationProperty(fn)
dct[association].__doc__ = '%s %s' % (name.lower(), association)
dct["get_%s" % association] = fn
for attribute, lookup in dct.get('__attributes__', {}).items():
getter = functools.partial(Type.Base._get_attribute, lookup=lookup)
getter.__doc__ = attribute
dct[attribute] = property(getter)
return super(Type, mcs).__new__(mcs, name, (mcs.Base, ) + bases, dct)
@staticmethod
def __build_func(verb, args, kwargs, _locals):
params = ['self']
params += ['%s' % stringcase.snakecase(k) for k in args]
params += [
'%s=%s' % (
stringcase.snakecase(k), "'%s'" % v
if isinstance(v, six.string_types) else v
) for k, v in kwargs.items()
]
largs = list(_locals.keys()) + list(args) + list(kwargs.keys())
fn = eval(
'lambda %s: self._%s(%s)' % (
','.join(params), verb, ','.join(
['%s=%s' % (k, stringcase.snakecase(k)) for k in largs]
)
), _locals
)
return fn
|
alisaifee/pyutrack
|
pyutrack/util.py
|
Python
|
mit
| 11,714
|
# this program requires the 32 bit version of Python!!
import os
import glob
import math
import subprocess
import re
import sys
import string
from decimal import Decimal
from astropy.io import fits
from astropy.wcs import WCS
import matplotlib.pyplot as plt
import numpy as np
import numpy.ma as ma
from scipy.ndimage import median_filter
#from pyds9 import DS9
import argparse
import pandas as pd
import ch # custom callHorizons library
import dateutil
from datetime import datetime
from datetime import timedelta
from astropy.wcs import WCS
from astropy.coordinates import SkyCoord
import pandas as pd
from astropy.time import Time
import shutil
#
# START SETTINGS
# MODIFY THESE FIELDS AS NEEDED!
#
# input path *with* ending forward slash
input_path = './'
# output path *with* ending forward slash
sex_output_path = './firstlook/'
# bad path
bad_path = './bad/'
# suffix for output files, if any...
sex_output_suffix = '.sex'
# log file name
log_fname = './log.firstlook.txt'
# path to sextractor executable and config files (incl. the filenames!)
sextractor_bin_fname = os.path.dirname(
os.path.realpath(__file__)) + '\\' + 'sextractor.exe'
sextractor_cfg_fname = os.path.dirname(
os.path.realpath(__file__)) + '\\' + 'sexcurve.sex'
sextractor_param_fname = os.path.dirname(
os.path.realpath(__file__)) + '\\' + 'sexcurve.param'
sextractor_filter_fname = os.path.dirname(
os.path.realpath(__file__)) + '\\' + 'sexcurve.conv'
# tolerance for object matching
dRa = 0.00062
dDec = 0.00062
# target/comp list
comps_fname = './comps.in.txt'
targets_out_fname = './targets.out.csv'
counts_out_fname = './counts.out.csv'
# mask file that identifies bad pixels
bad_pixels_fname = './bad_pixels.txt'
cleaned_output_path = './cor/'
# observatory code
obs_code = 'G52'
# panstarrs
# panstarrs ref magnitude
pso_ref_mag = 'rPSFMag'
# panstarrs max magnitude
pso_max_mag = 16
# panstarrs min magnitude
pso_min_mag = 0
#
# END SETTINGS
#
# logger
def logme(str):
log.write(str + "\n")
print str
return
def exit():
logme('Program execution halted.')
log.close()
os.sys.exit(1)
# run external process
def runSubprocess(command_array):
# command array is array with command and all required parameters
try:
with open(os.devnull, 'w') as fp:
sp = subprocess.Popen(command_array, stderr=fp, stdout=fp)
# logme('Running subprocess ("%s" %s)...'%(' '.join(command_array), sp.pid))
sp.wait()
output, error = sp.communicate()
return (output, error, sp.pid)
except:
logme('Error. Subprocess ("%s" %d) failed.' %
(' '.join(command_array), sp.pid))
return ('', '', 0)
# get current ra/dec of target asteroid
def getAsteroidRaDec(name, dt):
ra = ''
dec = ''
start = dt
end = dt + timedelta(minutes=1)
# get ephemerides for target in JPL Horizons from start to end times
result = ch.query(name.upper(), smallbody=True)
result.set_epochrange(start.isoformat(), end.isoformat(), '1m')
result.get_ephemerides(obs_code)
if result and len(result['EL']):
ra = result['RA'][0]
dec = result['DEC'][0]
else:
logme('Error. Asteroid (%s) not found for %s.' %
(name, start.isoformat()))
exit()
return (ra, dec)
def jdToYYMMDD_HHMMSS(jd):
t = Time(jd, format='mjd', scale='utc')
return t.iso
# open log file
log = open(log_fname, 'a+')
# set up the command line argument parser
parser = argparse.ArgumentParser(
description='Perform lightcurve photometry using sextractor.')
# parser.add_argument('asteroid', metavar='asteroid#', type=int,
# help='Target asteroid number')
args = parser.parse_args()
# make sure input files and folder exist
inputs = [input_path, sextractor_bin_fname, sextractor_cfg_fname,
sextractor_param_fname, sextractor_filter_fname, comps_fname]
for input in inputs:
if not os.path.exists(input_path):
logme('Error. The file or path (%s) does not exist.' % input)
exit()
# does output directory exist? If not, create it...
outputs = [sex_output_path, cleaned_output_path, bad_path]
for output in outputs:
try:
os.mkdir(output)
except:
pass
image_data = []
# get a list of all FITS files in the input directory
fits_files = glob.glob(input_path+'*.fits')+glob.glob(input_path+'*.fit')
# loop through all qualifying files and perform sextraction
for fits_file in sorted(fits_files):
fits_data = fits.open(fits_file)
header = fits_data[0].header
wcs = WCS(header)
airmass = header['AIRMASS']
try:
dt_obs = dateutil.parser.parse(header['DATE-OBS'])
except:
logme('Error. Invalid observation date found in %s.' % fits_file)
exit()
try:
naxis1 = header['NAXIS1']
naxis2 = header['NAXIS2']
except:
logme('Error. Invalid CCD pixel size found in %s.' % fits_file)
exit()
try:
ra = header['CRVAL1']
dec = header['CRVAL2']
except:
logme('Error. Invalid RA/DEC found in %s.' % fits_file)
exit()
try:
JD = header['MJD-OBS']
except KeyError:
JD = header['JD']
# calculate image corners in ra/dec
ra1, dec1 = wcs.all_pix2world(0, 0, 0)
ra2, dec2 = wcs.all_pix2world(naxis1, naxis2, 0)
# calculate search radius in degrees from the center!
c1 = SkyCoord(ra1, dec1, unit="deg")
c2 = SkyCoord(ra2, dec2, unit="deg")
# estimate radius of FOV in arcmin
r_arcmin = '%f' % (c1.separation(c2).deg*60/2)
logme("Sextracting %s" % (fits_file))
output_file = sex_output_path + \
fits_file.replace('\\', '/').rsplit('/', 1)[1]
output_file = '%s%s.txt' % (output_file, sex_output_suffix)
# add input filename, output filename, airmass, and jd to sex_file list
image_data.append(
{'image': fits_file, 'sex': output_file, 'jd': JD, 'airmass': airmass, 'ra': ra, 'dec': dec, 'dt_obs': dt_obs, 'r_arcmin': r_arcmin})
# sextract this file
(output, error, id) = runSubprocess([sextractor_bin_fname, fits_file, '-c', sextractor_cfg_fname, '-catalog_name',
output_file, '-parameters_name', sextractor_param_fname, '-filter_name', sextractor_filter_fname])
if error:
logme('Error. Sextractor failed: %s' % output)
exit()
logme('Sextracted %d files.' % len(image_data))
# build list of comparison stars in comps_fname using
# PanSTARRS Stack Object Catalog Search
logme('Searching for comparison stars in the PANSTARRS catalog (ra=%s deg, dec=%s deg, radius=%s min)...' %
(image_data[0]['ra'], image_data[0]['dec'], image_data[0]['r_arcmin']))
pso_url_base = 'http://archive.stsci.edu/panstarrs/stackobject/search.php'
pso_url_parms = '?resolver=Resolve&radius=%s&ra=%s&dec=%s&equinox=J2000&nDetections=&selectedColumnsCsv=objname%%2Cobjid%%2Cramean%%2Cdecmean%%2Cgpsfmag%%2Crpsfmag%%2Cipsfmag' + \
'&coordformat=dec&outputformat=CSV_file&skipformat=on' + \
'&max_records=50001&action=Search'
url = pso_url_base + \
pso_url_parms % (image_data[0]['r_arcmin'], image_data[0]['ra'], image_data[0]
['dec'])
# get the results of the REST query
comps = pd.read_csv(url)
if len(comps) <= 0:
logme('Error. No comparison stars found!')
exit()
# remove dupes, keep first
comps.drop_duplicates(subset=['objName'], keep='first', inplace=True)
# make sure magnitudes are treated as floats
comps[pso_ref_mag] = pd.to_numeric(comps[pso_ref_mag], errors='coerce')
# remove spaces from obj names
comps['objName'] = comps['objName'].str.replace('PSO ', '')
# filter based on ref (r?) magnitude!
comps = comps.query("%s > %f & %s < %f" %
(pso_ref_mag, pso_min_mag, pso_ref_mag, pso_max_mag))
if len(comps) <= 0:
logme('Error. No comparison stars meet the criteria (%s > %f & %s < %f)!' %
(pso_ref_mag, pso_min_mag, pso_ref_mag, pso_max_mag))
exit()
logme('A total of %d comparison star(s) met the criteria (%s > %f & %s < %f)!' %
(len(comps), pso_ref_mag, pso_min_mag, pso_ref_mag, pso_max_mag))
# output objects to comps_fname in sextract input format
comps_for_sex = comps[['raMean', 'decMean', 'objName']]
comps_for_sex.to_csv(comps_fname, sep=' ', index=False, header=False)
# read ra/dec from target/comp stars list
# this is legacy and duplicative, but we will go with it
object_data = []
sfile = file('%s' % comps_fname, 'rt')
lines = [s for s in sfile if len(s) > 2 and s[0] != '#']
sfile.close()
count = 0
target_index = -1
for index, l in enumerate(lines):
spl = l.split()
ra = float(spl[0])
dec = float(spl[1])
name = spl[2]
object_data.append(
{'index': index, 'ra': ra, 'dec': dec, 'object_name': name, 'found': True})
# add the asteroid to the object list
# we don't know the ra/dec yet until we get the date/time from the FITS file
#target_index = index + 1
# object_data.append({'index': target_index, 'ra': '',
# 'dec': '', 'object_name': '%d' % args.asteroid, 'found': True})
logme('Searching for %d objects in sextracted data.' % len(object_data))
ofile = file(counts_out_fname, 'wt')
# look for target/comp matches in sextracted files
counts = []
images = []
for image in image_data:
num_found = 0
lines = [s for s in file(image['sex'], 'rt') if len(s) > 2]
# unless object is target, stop looking for it if it was not found in one of the images
for s in (x for x in object_data):
found = False
# assign the asteroid ra/dec
# if s['object_name'] == '%d' % args.asteroid:
# # get ra/dec of asteroid at the time image was taken
# (s['ra'], s['dec']) = getAsteroidRaDec(
# s['object_name'], image['dt_obs'])
for l in lines:
spl = l.split()
ra = float(spl[0])
dec = float(spl[1])
if abs(ra-s['ra']) < dRa and abs(dec-s['dec']) < dDec:
num_found += 1
break
images.append(image['image'])
counts.append(num_found)
ofile.write('%s,%d\n' % (image['sex'], num_found))
ofile.close()
mode = np.bincount(counts).argmax()
std = np.array(counts).std()
mask = np.array(counts) >= mode - std
logme('A total of %d stars were for found in %d (of %d) images.' %
(mode, len(np.array(images)[mask]), len(images)))
mask = np.array(counts) < mode - std
bad_images = np.array(images)[mask]
for image in bad_images:
head, tail = os.path.split(image)
shutil.copy(image, '%s%s' % (bad_path, tail))
logme('A total of %d images were copied to %s.' %
(len(bad_images), bad_path))
|
mcnowinski/various-and-sundry
|
lightcurve/windows/firstlook.py
|
Python
|
mit
| 10,661
|
import _plotly_utils.basevalidators
class FamilysrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self,
plotly_name="familysrc",
parent_name="funnelarea.hoverlabel.font",
**kwargs
):
super(FamilysrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs
)
|
plotly/plotly.py
|
packages/python/plotly/plotly/validators/funnelarea/hoverlabel/font/_familysrc.py
|
Python
|
mit
| 456
|
# being a bit too dynamic
# pylint: disable=E1101
import datetime
import warnings
import re
from math import ceil
from collections import namedtuple
from contextlib import contextmanager
from distutils.version import LooseVersion
import numpy as np
from pandas.util.decorators import cache_readonly, deprecate_kwarg
import pandas.core.common as com
from pandas.core.common import AbstractMethodError
from pandas.core.generic import _shared_docs, _shared_doc_kwargs
from pandas.core.index import Index, MultiIndex
from pandas.core.series import Series, remove_na
from pandas.tseries.index import DatetimeIndex
from pandas.tseries.period import PeriodIndex, Period
import pandas.tseries.frequencies as frequencies
from pandas.tseries.offsets import DateOffset
from pandas.compat import range, lrange, lmap, map, zip, string_types
import pandas.compat as compat
from pandas.util.decorators import Appender
try: # mpl optional
import pandas.tseries.converter as conv
conv.register() # needs to override so set_xlim works with str/number
except ImportError:
pass
# Extracted from https://gist.github.com/huyng/816622
# this is the rcParams set when setting display.with_mpl_style
# to True.
mpl_stylesheet = {
'axes.axisbelow': True,
'axes.color_cycle': ['#348ABD',
'#7A68A6',
'#A60628',
'#467821',
'#CF4457',
'#188487',
'#E24A33'],
'axes.edgecolor': '#bcbcbc',
'axes.facecolor': '#eeeeee',
'axes.grid': True,
'axes.labelcolor': '#555555',
'axes.labelsize': 'large',
'axes.linewidth': 1.0,
'axes.titlesize': 'x-large',
'figure.edgecolor': 'white',
'figure.facecolor': 'white',
'figure.figsize': (6.0, 4.0),
'figure.subplot.hspace': 0.5,
'font.family': 'monospace',
'font.monospace': ['Andale Mono',
'Nimbus Mono L',
'Courier New',
'Courier',
'Fixed',
'Terminal',
'monospace'],
'font.size': 10,
'interactive': True,
'keymap.all_axes': ['a'],
'keymap.back': ['left', 'c', 'backspace'],
'keymap.forward': ['right', 'v'],
'keymap.fullscreen': ['f'],
'keymap.grid': ['g'],
'keymap.home': ['h', 'r', 'home'],
'keymap.pan': ['p'],
'keymap.save': ['s'],
'keymap.xscale': ['L', 'k'],
'keymap.yscale': ['l'],
'keymap.zoom': ['o'],
'legend.fancybox': True,
'lines.antialiased': True,
'lines.linewidth': 1.0,
'patch.antialiased': True,
'patch.edgecolor': '#EEEEEE',
'patch.facecolor': '#348ABD',
'patch.linewidth': 0.5,
'toolbar': 'toolbar2',
'xtick.color': '#555555',
'xtick.direction': 'in',
'xtick.major.pad': 6.0,
'xtick.major.size': 0.0,
'xtick.minor.pad': 6.0,
'xtick.minor.size': 0.0,
'ytick.color': '#555555',
'ytick.direction': 'in',
'ytick.major.pad': 6.0,
'ytick.major.size': 0.0,
'ytick.minor.pad': 6.0,
'ytick.minor.size': 0.0
}
def _get_standard_kind(kind):
return {'density': 'kde'}.get(kind, kind)
def _get_standard_colors(num_colors=None, colormap=None, color_type='default',
color=None):
import matplotlib.pyplot as plt
if color is None and colormap is not None:
if isinstance(colormap, compat.string_types):
import matplotlib.cm as cm
cmap = colormap
colormap = cm.get_cmap(colormap)
if colormap is None:
raise ValueError("Colormap {0} is not recognized".format(cmap))
colors = lmap(colormap, np.linspace(0, 1, num=num_colors))
elif color is not None:
if colormap is not None:
warnings.warn("'color' and 'colormap' cannot be used "
"simultaneously. Using 'color'")
colors = color
else:
if color_type == 'default':
# need to call list() on the result to copy so we don't
# modify the global rcParams below
colors = list(plt.rcParams.get('axes.color_cycle',
list('bgrcmyk')))
if isinstance(colors, compat.string_types):
colors = list(colors)
elif color_type == 'random':
import random
def random_color(column):
random.seed(column)
return [random.random() for _ in range(3)]
colors = lmap(random_color, lrange(num_colors))
else:
raise ValueError("color_type must be either 'default' or 'random'")
if len(colors) != num_colors:
multiple = num_colors//len(colors) - 1
mod = num_colors % len(colors)
colors += multiple * colors
colors += colors[:mod]
return colors
class _Options(dict):
"""
Stores pandas plotting options.
Allows for parameter aliasing so you can just use parameter names that are
the same as the plot function parameters, but is stored in a canonical
format that makes it easy to breakdown into groups later
"""
# alias so the names are same as plotting method parameter names
_ALIASES = {'x_compat': 'xaxis.compat'}
_DEFAULT_KEYS = ['xaxis.compat']
def __init__(self):
self['xaxis.compat'] = False
def __getitem__(self, key):
key = self._get_canonical_key(key)
if key not in self:
raise ValueError('%s is not a valid pandas plotting option' % key)
return super(_Options, self).__getitem__(key)
def __setitem__(self, key, value):
key = self._get_canonical_key(key)
return super(_Options, self).__setitem__(key, value)
def __delitem__(self, key):
key = self._get_canonical_key(key)
if key in self._DEFAULT_KEYS:
raise ValueError('Cannot remove default parameter %s' % key)
return super(_Options, self).__delitem__(key)
def __contains__(self, key):
key = self._get_canonical_key(key)
return super(_Options, self).__contains__(key)
def reset(self):
"""
Reset the option store to its initial state
Returns
-------
None
"""
self.__init__()
def _get_canonical_key(self, key):
return self._ALIASES.get(key, key)
@contextmanager
def use(self, key, value):
"""
Temporarily set a parameter value using the with statement.
Aliasing allowed.
"""
old_value = self[key]
try:
self[key] = value
yield self
finally:
self[key] = old_value
plot_params = _Options()
def scatter_matrix(frame, alpha=0.5, figsize=None, ax=None, grid=False,
diagonal='hist', marker='.', density_kwds=None,
hist_kwds=None, range_padding=0.05, **kwds):
"""
Draw a matrix of scatter plots.
Parameters
----------
frame : DataFrame
alpha : float, optional
amount of transparency applied
figsize : (float,float), optional
a tuple (width, height) in inches
ax : Matplotlib axis object, optional
grid : bool, optional
setting this to True will show the grid
diagonal : {'hist', 'kde'}
pick between 'kde' and 'hist' for
either Kernel Density Estimation or Histogram
plot in the diagonal
marker : str, optional
Matplotlib marker type, default '.'
hist_kwds : other plotting keyword arguments
To be passed to hist function
density_kwds : other plotting keyword arguments
To be passed to kernel density estimate plot
range_padding : float, optional
relative extension of axis range in x and y
with respect to (x_max - x_min) or (y_max - y_min),
default 0.05
kwds : other plotting keyword arguments
To be passed to scatter function
Examples
--------
>>> df = DataFrame(np.random.randn(1000, 4), columns=['A','B','C','D'])
>>> scatter_matrix(df, alpha=0.2)
"""
import matplotlib.pyplot as plt
from matplotlib.artist import setp
df = frame._get_numeric_data()
n = df.columns.size
naxes = n * n
fig, axes = _subplots(naxes=naxes, figsize=figsize, ax=ax,
squeeze=False)
# no gaps between subplots
fig.subplots_adjust(wspace=0, hspace=0)
mask = com.notnull(df)
marker = _get_marker_compat(marker)
hist_kwds = hist_kwds or {}
density_kwds = density_kwds or {}
# workaround because `c='b'` is hardcoded in matplotlibs scatter method
kwds.setdefault('c', plt.rcParams['patch.facecolor'])
boundaries_list = []
for a in df.columns:
values = df[a].values[mask[a].values]
rmin_, rmax_ = np.min(values), np.max(values)
rdelta_ext = (rmax_ - rmin_) * range_padding / 2.
boundaries_list.append((rmin_ - rdelta_ext, rmax_+ rdelta_ext))
for i, a in zip(lrange(n), df.columns):
for j, b in zip(lrange(n), df.columns):
ax = axes[i, j]
if i == j:
values = df[a].values[mask[a].values]
# Deal with the diagonal by drawing a histogram there.
if diagonal == 'hist':
ax.hist(values, **hist_kwds)
elif diagonal in ('kde', 'density'):
from scipy.stats import gaussian_kde
y = values
gkde = gaussian_kde(y)
ind = np.linspace(y.min(), y.max(), 1000)
ax.plot(ind, gkde.evaluate(ind), **density_kwds)
ax.set_xlim(boundaries_list[i])
else:
common = (mask[a] & mask[b]).values
ax.scatter(df[b][common], df[a][common],
marker=marker, alpha=alpha, **kwds)
ax.set_xlim(boundaries_list[j])
ax.set_ylim(boundaries_list[i])
ax.set_xlabel(b)
ax.set_ylabel(a)
if j!= 0:
ax.yaxis.set_visible(False)
if i != n-1:
ax.xaxis.set_visible(False)
if len(df.columns) > 1:
lim1 = boundaries_list[0]
locs = axes[0][1].yaxis.get_majorticklocs()
locs = locs[(lim1[0] <= locs) & (locs <= lim1[1])]
adj = (locs - lim1[0]) / (lim1[1] - lim1[0])
lim0 = axes[0][0].get_ylim()
adj = adj * (lim0[1] - lim0[0]) + lim0[0]
axes[0][0].yaxis.set_ticks(adj)
if np.all(locs == locs.astype(int)):
# if all ticks are int
locs = locs.astype(int)
axes[0][0].yaxis.set_ticklabels(locs)
_set_ticks_props(axes, xlabelsize=8, xrot=90, ylabelsize=8, yrot=0)
return axes
def _gca():
import matplotlib.pyplot as plt
return plt.gca()
def _gcf():
import matplotlib.pyplot as plt
return plt.gcf()
def _get_marker_compat(marker):
import matplotlib.lines as mlines
import matplotlib as mpl
if mpl.__version__ < '1.1.0' and marker == '.':
return 'o'
if marker not in mlines.lineMarkers:
return 'o'
return marker
def radviz(frame, class_column, ax=None, color=None, colormap=None, **kwds):
"""RadViz - a multivariate data visualization algorithm
Parameters:
-----------
frame: DataFrame
class_column: str
Column name containing class names
ax: Matplotlib axis object, optional
color: list or tuple, optional
Colors to use for the different classes
colormap : str or matplotlib colormap object, default None
Colormap to select colors from. If string, load colormap with that name
from matplotlib.
kwds: keywords
Options to pass to matplotlib scatter plotting method
Returns:
--------
ax: Matplotlib axis object
"""
import matplotlib.pyplot as plt
import matplotlib.patches as patches
def normalize(series):
a = min(series)
b = max(series)
return (series - a) / (b - a)
n = len(frame)
classes = frame[class_column].drop_duplicates()
class_col = frame[class_column]
df = frame.drop(class_column, axis=1).apply(normalize)
if ax is None:
ax = plt.gca(xlim=[-1, 1], ylim=[-1, 1])
to_plot = {}
colors = _get_standard_colors(num_colors=len(classes), colormap=colormap,
color_type='random', color=color)
for kls in classes:
to_plot[kls] = [[], []]
m = len(frame.columns) - 1
s = np.array([(np.cos(t), np.sin(t))
for t in [2.0 * np.pi * (i / float(m))
for i in range(m)]])
for i in range(n):
row = df.iloc[i].values
row_ = np.repeat(np.expand_dims(row, axis=1), 2, axis=1)
y = (s * row_).sum(axis=0) / row.sum()
kls = class_col.iat[i]
to_plot[kls][0].append(y[0])
to_plot[kls][1].append(y[1])
for i, kls in enumerate(classes):
ax.scatter(to_plot[kls][0], to_plot[kls][1], color=colors[i],
label=com.pprint_thing(kls), **kwds)
ax.legend()
ax.add_patch(patches.Circle((0.0, 0.0), radius=1.0, facecolor='none'))
for xy, name in zip(s, df.columns):
ax.add_patch(patches.Circle(xy, radius=0.025, facecolor='gray'))
if xy[0] < 0.0 and xy[1] < 0.0:
ax.text(xy[0] - 0.025, xy[1] - 0.025, name,
ha='right', va='top', size='small')
elif xy[0] < 0.0 and xy[1] >= 0.0:
ax.text(xy[0] - 0.025, xy[1] + 0.025, name,
ha='right', va='bottom', size='small')
elif xy[0] >= 0.0 and xy[1] < 0.0:
ax.text(xy[0] + 0.025, xy[1] - 0.025, name,
ha='left', va='top', size='small')
elif xy[0] >= 0.0 and xy[1] >= 0.0:
ax.text(xy[0] + 0.025, xy[1] + 0.025, name,
ha='left', va='bottom', size='small')
ax.axis('equal')
return ax
@deprecate_kwarg(old_arg_name='data', new_arg_name='frame')
def andrews_curves(frame, class_column, ax=None, samples=200, color=None,
colormap=None, **kwds):
"""
Parameters:
-----------
frame : DataFrame
Data to be plotted, preferably normalized to (0.0, 1.0)
class_column : Name of the column containing class names
ax : matplotlib axes object, default None
samples : Number of points to plot in each curve
color: list or tuple, optional
Colors to use for the different classes
colormap : str or matplotlib colormap object, default None
Colormap to select colors from. If string, load colormap with that name
from matplotlib.
kwds: keywords
Options to pass to matplotlib plotting method
Returns:
--------
ax: Matplotlib axis object
"""
from math import sqrt, pi, sin, cos
import matplotlib.pyplot as plt
def function(amplitudes):
def f(x):
x1 = amplitudes[0]
result = x1 / sqrt(2.0)
harmonic = 1.0
for x_even, x_odd in zip(amplitudes[1::2], amplitudes[2::2]):
result += (x_even * sin(harmonic * x) +
x_odd * cos(harmonic * x))
harmonic += 1.0
if len(amplitudes) % 2 != 0:
result += amplitudes[-1] * sin(harmonic * x)
return result
return f
n = len(frame)
class_col = frame[class_column]
classes = frame[class_column].drop_duplicates()
df = frame.drop(class_column, axis=1)
x = [-pi + 2.0 * pi * (t / float(samples)) for t in range(samples)]
used_legends = set([])
color_values = _get_standard_colors(num_colors=len(classes),
colormap=colormap, color_type='random',
color=color)
colors = dict(zip(classes, color_values))
if ax is None:
ax = plt.gca(xlim=(-pi, pi))
for i in range(n):
row = df.iloc[i].values
f = function(row)
y = [f(t) for t in x]
kls = class_col.iat[i]
label = com.pprint_thing(kls)
if label not in used_legends:
used_legends.add(label)
ax.plot(x, y, color=colors[kls], label=label, **kwds)
else:
ax.plot(x, y, color=colors[kls], **kwds)
ax.legend(loc='upper right')
ax.grid()
return ax
def bootstrap_plot(series, fig=None, size=50, samples=500, **kwds):
"""Bootstrap plot.
Parameters:
-----------
series: Time series
fig: matplotlib figure object, optional
size: number of data points to consider during each sampling
samples: number of times the bootstrap procedure is performed
kwds: optional keyword arguments for plotting commands, must be accepted
by both hist and plot
Returns:
--------
fig: matplotlib figure
"""
import random
import matplotlib.pyplot as plt
# random.sample(ndarray, int) fails on python 3.3, sigh
data = list(series.values)
samplings = [random.sample(data, size) for _ in range(samples)]
means = np.array([np.mean(sampling) for sampling in samplings])
medians = np.array([np.median(sampling) for sampling in samplings])
midranges = np.array([(min(sampling) + max(sampling)) * 0.5
for sampling in samplings])
if fig is None:
fig = plt.figure()
x = lrange(samples)
axes = []
ax1 = fig.add_subplot(2, 3, 1)
ax1.set_xlabel("Sample")
axes.append(ax1)
ax1.plot(x, means, **kwds)
ax2 = fig.add_subplot(2, 3, 2)
ax2.set_xlabel("Sample")
axes.append(ax2)
ax2.plot(x, medians, **kwds)
ax3 = fig.add_subplot(2, 3, 3)
ax3.set_xlabel("Sample")
axes.append(ax3)
ax3.plot(x, midranges, **kwds)
ax4 = fig.add_subplot(2, 3, 4)
ax4.set_xlabel("Mean")
axes.append(ax4)
ax4.hist(means, **kwds)
ax5 = fig.add_subplot(2, 3, 5)
ax5.set_xlabel("Median")
axes.append(ax5)
ax5.hist(medians, **kwds)
ax6 = fig.add_subplot(2, 3, 6)
ax6.set_xlabel("Midrange")
axes.append(ax6)
ax6.hist(midranges, **kwds)
for axis in axes:
plt.setp(axis.get_xticklabels(), fontsize=8)
plt.setp(axis.get_yticklabels(), fontsize=8)
return fig
@deprecate_kwarg(old_arg_name='colors', new_arg_name='color')
@deprecate_kwarg(old_arg_name='data', new_arg_name='frame')
def parallel_coordinates(frame, class_column, cols=None, ax=None, color=None,
use_columns=False, xticks=None, colormap=None,
axvlines=True, **kwds):
"""Parallel coordinates plotting.
Parameters
----------
frame: DataFrame
class_column: str
Column name containing class names
cols: list, optional
A list of column names to use
ax: matplotlib.axis, optional
matplotlib axis object
color: list or tuple, optional
Colors to use for the different classes
use_columns: bool, optional
If true, columns will be used as xticks
xticks: list or tuple, optional
A list of values to use for xticks
colormap: str or matplotlib colormap, default None
Colormap to use for line colors.
axvlines: bool, optional
If true, vertical lines will be added at each xtick
kwds: keywords
Options to pass to matplotlib plotting method
Returns
-------
ax: matplotlib axis object
Examples
--------
>>> from pandas import read_csv
>>> from pandas.tools.plotting import parallel_coordinates
>>> from matplotlib import pyplot as plt
>>> df = read_csv('https://raw.github.com/pydata/pandas/master/pandas/tests/data/iris.csv')
>>> parallel_coordinates(df, 'Name', color=('#556270', '#4ECDC4', '#C7F464'))
>>> plt.show()
"""
import matplotlib.pyplot as plt
n = len(frame)
classes = frame[class_column].drop_duplicates()
class_col = frame[class_column]
if cols is None:
df = frame.drop(class_column, axis=1)
else:
df = frame[cols]
used_legends = set([])
ncols = len(df.columns)
# determine values to use for xticks
if use_columns is True:
if not np.all(np.isreal(list(df.columns))):
raise ValueError('Columns must be numeric to be used as xticks')
x = df.columns
elif xticks is not None:
if not np.all(np.isreal(xticks)):
raise ValueError('xticks specified must be numeric')
elif len(xticks) != ncols:
raise ValueError('Length of xticks must match number of columns')
x = xticks
else:
x = lrange(ncols)
if ax is None:
ax = plt.gca()
color_values = _get_standard_colors(num_colors=len(classes),
colormap=colormap, color_type='random',
color=color)
colors = dict(zip(classes, color_values))
for i in range(n):
y = df.iloc[i].values
kls = class_col.iat[i]
label = com.pprint_thing(kls)
if label not in used_legends:
used_legends.add(label)
ax.plot(x, y, color=colors[kls], label=label, **kwds)
else:
ax.plot(x, y, color=colors[kls], **kwds)
if axvlines:
for i in x:
ax.axvline(i, linewidth=1, color='black')
ax.set_xticks(x)
ax.set_xticklabels(df.columns)
ax.set_xlim(x[0], x[-1])
ax.legend(loc='upper right')
ax.grid()
return ax
def lag_plot(series, lag=1, ax=None, **kwds):
"""Lag plot for time series.
Parameters:
-----------
series: Time series
lag: lag of the scatter plot, default 1
ax: Matplotlib axis object, optional
kwds: Matplotlib scatter method keyword arguments, optional
Returns:
--------
ax: Matplotlib axis object
"""
import matplotlib.pyplot as plt
# workaround because `c='b'` is hardcoded in matplotlibs scatter method
kwds.setdefault('c', plt.rcParams['patch.facecolor'])
data = series.values
y1 = data[:-lag]
y2 = data[lag:]
if ax is None:
ax = plt.gca()
ax.set_xlabel("y(t)")
ax.set_ylabel("y(t + %s)" % lag)
ax.scatter(y1, y2, **kwds)
return ax
def autocorrelation_plot(series, ax=None, **kwds):
"""Autocorrelation plot for time series.
Parameters:
-----------
series: Time series
ax: Matplotlib axis object, optional
kwds : keywords
Options to pass to matplotlib plotting method
Returns:
-----------
ax: Matplotlib axis object
"""
import matplotlib.pyplot as plt
n = len(series)
data = np.asarray(series)
if ax is None:
ax = plt.gca(xlim=(1, n), ylim=(-1.0, 1.0))
mean = np.mean(data)
c0 = np.sum((data - mean) ** 2) / float(n)
def r(h):
return ((data[:n - h] - mean) * (data[h:] - mean)).sum() / float(n) / c0
x = np.arange(n) + 1
y = lmap(r, x)
z95 = 1.959963984540054
z99 = 2.5758293035489004
ax.axhline(y=z99 / np.sqrt(n), linestyle='--', color='grey')
ax.axhline(y=z95 / np.sqrt(n), color='grey')
ax.axhline(y=0.0, color='black')
ax.axhline(y=-z95 / np.sqrt(n), color='grey')
ax.axhline(y=-z99 / np.sqrt(n), linestyle='--', color='grey')
ax.set_xlabel("Lag")
ax.set_ylabel("Autocorrelation")
ax.plot(x, y, **kwds)
if 'label' in kwds:
ax.legend()
ax.grid()
return ax
class MPLPlot(object):
"""
Base class for assembling a pandas plot using matplotlib
Parameters
----------
data :
"""
_layout_type = 'vertical'
_default_rot = 0
orientation = None
_pop_attributes = ['label', 'style', 'logy', 'logx', 'loglog',
'mark_right', 'stacked']
_attr_defaults = {'logy': False, 'logx': False, 'loglog': False,
'mark_right': True, 'stacked': False}
def __init__(self, data, kind=None, by=None, subplots=False, sharex=None,
sharey=False, use_index=True,
figsize=None, grid=None, legend=True, rot=None,
ax=None, fig=None, title=None, xlim=None, ylim=None,
xticks=None, yticks=None,
sort_columns=False, fontsize=None,
secondary_y=False, colormap=None,
table=False, layout=None, **kwds):
self.data = data
self.by = by
self.kind = kind
self.sort_columns = sort_columns
self.subplots = subplots
if sharex is None:
if ax is None:
self.sharex = True
else:
# if we get an axis, the users should do the visibility setting...
self.sharex = False
else:
self.sharex = sharex
self.sharey = sharey
self.figsize = figsize
self.layout = layout
self.xticks = xticks
self.yticks = yticks
self.xlim = xlim
self.ylim = ylim
self.title = title
self.use_index = use_index
self.fontsize = fontsize
if rot is not None:
self.rot = rot
# need to know for format_date_labels since it's rotated to 30 by
# default
self._rot_set = True
else:
self._rot_set = False
if isinstance(self._default_rot, dict):
self.rot = self._default_rot[self.kind]
else:
self.rot = self._default_rot
if grid is None:
grid = False if secondary_y else self.plt.rcParams['axes.grid']
self.grid = grid
self.legend = legend
self.legend_handles = []
self.legend_labels = []
for attr in self._pop_attributes:
value = kwds.pop(attr, self._attr_defaults.get(attr, None))
setattr(self, attr, value)
self.ax = ax
self.fig = fig
self.axes = None
# parse errorbar input if given
xerr = kwds.pop('xerr', None)
yerr = kwds.pop('yerr', None)
self.errors = {}
for kw, err in zip(['xerr', 'yerr'], [xerr, yerr]):
self.errors[kw] = self._parse_errorbars(kw, err)
if not isinstance(secondary_y, (bool, tuple, list, np.ndarray, Index)):
secondary_y = [secondary_y]
self.secondary_y = secondary_y
# ugly TypeError if user passes matplotlib's `cmap` name.
# Probably better to accept either.
if 'cmap' in kwds and colormap:
raise TypeError("Only specify one of `cmap` and `colormap`.")
elif 'cmap' in kwds:
self.colormap = kwds.pop('cmap')
else:
self.colormap = colormap
self.table = table
self.kwds = kwds
self._validate_color_args()
def _validate_color_args(self):
if 'color' not in self.kwds and 'colors' in self.kwds:
warnings.warn(("'colors' is being deprecated. Please use 'color'"
"instead of 'colors'"))
colors = self.kwds.pop('colors')
self.kwds['color'] = colors
if ('color' in self.kwds and self.nseries == 1):
# support series.plot(color='green')
self.kwds['color'] = [self.kwds['color']]
if ('color' in self.kwds or 'colors' in self.kwds) and \
self.colormap is not None:
warnings.warn("'color' and 'colormap' cannot be used "
"simultaneously. Using 'color'")
if 'color' in self.kwds and self.style is not None:
if com.is_list_like(self.style):
styles = self.style
else:
styles = [self.style]
# need only a single match
for s in styles:
if re.match('^[a-z]+?', s) is not None:
raise ValueError("Cannot pass 'style' string with a color "
"symbol and 'color' keyword argument. Please"
" use one or the other or pass 'style' "
"without a color symbol")
def _iter_data(self, data=None, keep_index=False, fillna=None):
if data is None:
data = self.data
if fillna is not None:
data = data.fillna(fillna)
if self.sort_columns:
columns = com._try_sort(data.columns)
else:
columns = data.columns
for col in columns:
if keep_index is True:
yield col, data[col]
else:
yield col, data[col].values
@property
def nseries(self):
if self.data.ndim == 1:
return 1
else:
return self.data.shape[1]
def draw(self):
self.plt.draw_if_interactive()
def generate(self):
self._args_adjust()
self._compute_plot_data()
self._setup_subplots()
self._make_plot()
self._add_table()
self._make_legend()
self._post_plot_logic()
self._adorn_subplots()
def _args_adjust(self):
pass
def _has_plotted_object(self, ax):
"""check whether ax has data"""
return (len(ax.lines) != 0 or
len(ax.artists) != 0 or
len(ax.containers) != 0)
def _maybe_right_yaxis(self, ax, axes_num):
if not self.on_right(axes_num):
# secondary axes may be passed via ax kw
return self._get_ax_layer(ax)
if hasattr(ax, 'right_ax'):
# if it has right_ax proparty, ``ax`` must be left axes
return ax.right_ax
elif hasattr(ax, 'left_ax'):
# if it has left_ax proparty, ``ax`` must be right axes
return ax
else:
# otherwise, create twin axes
orig_ax, new_ax = ax, ax.twinx()
new_ax._get_lines.color_cycle = orig_ax._get_lines.color_cycle
orig_ax.right_ax, new_ax.left_ax = new_ax, orig_ax
if not self._has_plotted_object(orig_ax): # no data on left y
orig_ax.get_yaxis().set_visible(False)
return new_ax
def _setup_subplots(self):
if self.subplots:
fig, axes = _subplots(naxes=self.nseries,
sharex=self.sharex, sharey=self.sharey,
figsize=self.figsize, ax=self.ax,
layout=self.layout,
layout_type=self._layout_type)
else:
if self.ax is None:
fig = self.plt.figure(figsize=self.figsize)
axes = fig.add_subplot(111)
else:
fig = self.ax.get_figure()
if self.figsize is not None:
fig.set_size_inches(self.figsize)
axes = self.ax
axes = _flatten(axes)
if self.logx or self.loglog:
[a.set_xscale('log') for a in axes]
if self.logy or self.loglog:
[a.set_yscale('log') for a in axes]
self.fig = fig
self.axes = axes
@property
def result(self):
"""
Return result axes
"""
if self.subplots:
if self.layout is not None and not com.is_list_like(self.ax):
return self.axes.reshape(*self.layout)
else:
return self.axes
else:
sec_true = isinstance(self.secondary_y, bool) and self.secondary_y
all_sec = (com.is_list_like(self.secondary_y) and
len(self.secondary_y) == self.nseries)
if (sec_true or all_sec):
# if all data is plotted on secondary, return right axes
return self._get_ax_layer(self.axes[0], primary=False)
else:
return self.axes[0]
def _compute_plot_data(self):
data = self.data
if isinstance(data, Series):
label = self.label
if label is None and data.name is None:
label = 'None'
data = data.to_frame(name=label)
numeric_data = data.convert_objects()._get_numeric_data()
try:
is_empty = numeric_data.empty
except AttributeError:
is_empty = not len(numeric_data)
# no empty frames or series allowed
if is_empty:
raise TypeError('Empty {0!r}: no numeric data to '
'plot'.format(numeric_data.__class__.__name__))
self.data = numeric_data
def _make_plot(self):
raise AbstractMethodError(self)
def _add_table(self):
if self.table is False:
return
elif self.table is True:
data = self.data.transpose()
else:
data = self.table
ax = self._get_ax(0)
table(ax, data)
def _post_plot_logic(self):
pass
def _adorn_subplots(self):
to_adorn = self.axes
if len(self.axes) > 0:
all_axes = self._get_axes()
nrows, ncols = self._get_axes_layout()
_handle_shared_axes(axarr=all_axes, nplots=len(all_axes),
naxes=nrows * ncols, nrows=nrows,
ncols=ncols, sharex=self.sharex,
sharey=self.sharey)
for ax in to_adorn:
if self.yticks is not None:
ax.set_yticks(self.yticks)
if self.xticks is not None:
ax.set_xticks(self.xticks)
if self.ylim is not None:
ax.set_ylim(self.ylim)
if self.xlim is not None:
ax.set_xlim(self.xlim)
ax.grid(self.grid)
if self.title:
if self.subplots:
self.fig.suptitle(self.title)
else:
self.axes[0].set_title(self.title)
labels = [com.pprint_thing(key) for key in self.data.index]
labels = dict(zip(range(len(self.data.index)), labels))
for ax in self.axes:
if self.orientation == 'vertical' or self.orientation is None:
if self._need_to_set_index:
xticklabels = [labels.get(x, '') for x in ax.get_xticks()]
ax.set_xticklabels(xticklabels)
self._apply_axis_properties(ax.xaxis, rot=self.rot,
fontsize=self.fontsize)
self._apply_axis_properties(ax.yaxis, fontsize=self.fontsize)
elif self.orientation == 'horizontal':
if self._need_to_set_index:
yticklabels = [labels.get(y, '') for y in ax.get_yticks()]
ax.set_yticklabels(yticklabels)
self._apply_axis_properties(ax.yaxis, rot=self.rot,
fontsize=self.fontsize)
self._apply_axis_properties(ax.xaxis, fontsize=self.fontsize)
def _apply_axis_properties(self, axis, rot=None, fontsize=None):
labels = axis.get_majorticklabels() + axis.get_minorticklabels()
for label in labels:
if rot is not None:
label.set_rotation(rot)
if fontsize is not None:
label.set_fontsize(fontsize)
@property
def legend_title(self):
if not isinstance(self.data.columns, MultiIndex):
name = self.data.columns.name
if name is not None:
name = com.pprint_thing(name)
return name
else:
stringified = map(com.pprint_thing,
self.data.columns.names)
return ','.join(stringified)
def _add_legend_handle(self, handle, label, index=None):
if not label is None:
if self.mark_right and index is not None:
if self.on_right(index):
label = label + ' (right)'
self.legend_handles.append(handle)
self.legend_labels.append(label)
def _make_legend(self):
ax, leg = self._get_ax_legend(self.axes[0])
handles = []
labels = []
title = ''
if not self.subplots:
if not leg is None:
title = leg.get_title().get_text()
handles = leg.legendHandles
labels = [x.get_text() for x in leg.get_texts()]
if self.legend:
if self.legend == 'reverse':
self.legend_handles = reversed(self.legend_handles)
self.legend_labels = reversed(self.legend_labels)
handles += self.legend_handles
labels += self.legend_labels
if not self.legend_title is None:
title = self.legend_title
if len(handles) > 0:
ax.legend(handles, labels, loc='best', title=title)
elif self.subplots and self.legend:
for ax in self.axes:
if ax.get_visible():
ax.legend(loc='best')
def _get_ax_legend(self, ax):
leg = ax.get_legend()
other_ax = (getattr(ax, 'left_ax', None) or
getattr(ax, 'right_ax', None))
other_leg = None
if other_ax is not None:
other_leg = other_ax.get_legend()
if leg is None and other_leg is not None:
leg = other_leg
ax = other_ax
return ax, leg
@cache_readonly
def plt(self):
import matplotlib.pyplot as plt
return plt
_need_to_set_index = False
def _get_xticks(self, convert_period=False):
index = self.data.index
is_datetype = index.inferred_type in ('datetime', 'date',
'datetime64', 'time')
if self.use_index:
if convert_period and isinstance(index, PeriodIndex):
self.data = self.data.reindex(index=index.order())
x = self.data.index.to_timestamp()._mpl_repr()
elif index.is_numeric():
"""
Matplotlib supports numeric values or datetime objects as
xaxis values. Taking LBYL approach here, by the time
matplotlib raises exception when using non numeric/datetime
values for xaxis, several actions are already taken by plt.
"""
x = index._mpl_repr()
elif is_datetype:
self.data = self.data.sort_index()
x = self.data.index._mpl_repr()
else:
self._need_to_set_index = True
x = lrange(len(index))
else:
x = lrange(len(index))
return x
def _is_datetype(self):
index = self.data.index
return (isinstance(index, (PeriodIndex, DatetimeIndex)) or
index.inferred_type in ('datetime', 'date', 'datetime64',
'time'))
def _get_plot_function(self):
'''
Returns the matplotlib plotting function (plot or errorbar) based on
the presence of errorbar keywords.
'''
errorbar = any(e is not None for e in self.errors.values())
def plotf(ax, x, y, style=None, **kwds):
mask = com.isnull(y)
if mask.any():
y = np.ma.array(y)
y = np.ma.masked_where(mask, y)
if errorbar:
return self.plt.Axes.errorbar(ax, x, y, **kwds)
else:
# prevent style kwarg from going to errorbar, where it is unsupported
if style is not None:
args = (ax, x, y, style)
else:
args = (ax, x, y)
return self.plt.Axes.plot(*args, **kwds)
return plotf
def _get_index_name(self):
if isinstance(self.data.index, MultiIndex):
name = self.data.index.names
if any(x is not None for x in name):
name = ','.join([com.pprint_thing(x) for x in name])
else:
name = None
else:
name = self.data.index.name
if name is not None:
name = com.pprint_thing(name)
return name
@classmethod
def _get_ax_layer(cls, ax, primary=True):
"""get left (primary) or right (secondary) axes"""
if primary:
return getattr(ax, 'left_ax', ax)
else:
return getattr(ax, 'right_ax', ax)
def _get_ax(self, i):
# get the twinx ax if appropriate
if self.subplots:
ax = self.axes[i]
ax = self._maybe_right_yaxis(ax, i)
self.axes[i] = ax
else:
ax = self.axes[0]
ax = self._maybe_right_yaxis(ax, i)
ax.get_yaxis().set_visible(True)
return ax
def on_right(self, i):
if isinstance(self.secondary_y, bool):
return self.secondary_y
if isinstance(self.secondary_y, (tuple, list, np.ndarray, Index)):
return self.data.columns[i] in self.secondary_y
def _get_style(self, i, col_name):
style = ''
if self.subplots:
style = 'k'
if self.style is not None:
if isinstance(self.style, list):
try:
style = self.style[i]
except IndexError:
pass
elif isinstance(self.style, dict):
style = self.style.get(col_name, style)
else:
style = self.style
return style or None
def _get_colors(self, num_colors=None, color_kwds='color'):
if num_colors is None:
num_colors = self.nseries
return _get_standard_colors(num_colors=num_colors,
colormap=self.colormap,
color=self.kwds.get(color_kwds))
def _maybe_add_color(self, colors, kwds, style, i):
has_color = 'color' in kwds or self.colormap is not None
if has_color and (style is None or re.match('[a-z]+', style) is None):
kwds['color'] = colors[i % len(colors)]
def _parse_errorbars(self, label, err):
'''
Look for error keyword arguments and return the actual errorbar data
or return the error DataFrame/dict
Error bars can be specified in several ways:
Series: the user provides a pandas.Series object of the same
length as the data
ndarray: provides a np.ndarray of the same length as the data
DataFrame/dict: error values are paired with keys matching the
key in the plotted DataFrame
str: the name of the column within the plotted DataFrame
'''
if err is None:
return None
from pandas import DataFrame, Series
def match_labels(data, e):
e = e.reindex_axis(data.index)
return e
# key-matched DataFrame
if isinstance(err, DataFrame):
err = match_labels(self.data, err)
# key-matched dict
elif isinstance(err, dict):
pass
# Series of error values
elif isinstance(err, Series):
# broadcast error series across data
err = match_labels(self.data, err)
err = np.atleast_2d(err)
err = np.tile(err, (self.nseries, 1))
# errors are a column in the dataframe
elif isinstance(err, string_types):
evalues = self.data[err].values
self.data = self.data[self.data.columns.drop(err)]
err = np.atleast_2d(evalues)
err = np.tile(err, (self.nseries, 1))
elif com.is_list_like(err):
if com.is_iterator(err):
err = np.atleast_2d(list(err))
else:
# raw error values
err = np.atleast_2d(err)
err_shape = err.shape
# asymmetrical error bars
if err.ndim == 3:
if (err_shape[0] != self.nseries) or \
(err_shape[1] != 2) or \
(err_shape[2] != len(self.data)):
msg = "Asymmetrical error bars should be provided " + \
"with the shape (%u, 2, %u)" % \
(self.nseries, len(self.data))
raise ValueError(msg)
# broadcast errors to each data series
if len(err) == 1:
err = np.tile(err, (self.nseries, 1))
elif com.is_number(err):
err = np.tile([err], (self.nseries, len(self.data)))
else:
msg = "No valid %s detected" % label
raise ValueError(msg)
return err
def _get_errorbars(self, label=None, index=None, xerr=True, yerr=True):
from pandas import DataFrame
errors = {}
for kw, flag in zip(['xerr', 'yerr'], [xerr, yerr]):
if flag:
err = self.errors[kw]
# user provided label-matched dataframe of errors
if isinstance(err, (DataFrame, dict)):
if label is not None and label in err.keys():
err = err[label]
else:
err = None
elif index is not None and err is not None:
err = err[index]
if err is not None:
errors[kw] = err
return errors
def _get_axes(self):
return self.axes[0].get_figure().get_axes()
def _get_axes_layout(self):
axes = self._get_axes()
x_set = set()
y_set = set()
for ax in axes:
# check axes coordinates to estimate layout
points = ax.get_position().get_points()
x_set.add(points[0][0])
y_set.add(points[0][1])
return (len(y_set), len(x_set))
class ScatterPlot(MPLPlot):
_layout_type = 'single'
def __init__(self, data, x, y, c=None, **kwargs):
MPLPlot.__init__(self, data, **kwargs)
if x is None or y is None:
raise ValueError( 'scatter requires and x and y column')
if com.is_integer(x) and not self.data.columns.holds_integer():
x = self.data.columns[x]
if com.is_integer(y) and not self.data.columns.holds_integer():
y = self.data.columns[y]
if com.is_integer(c) and not self.data.columns.holds_integer():
c = self.data.columns[c]
self.x = x
self.y = y
self.c = c
@property
def nseries(self):
return 1
def _make_plot(self):
import matplotlib as mpl
mpl_ge_1_3_1 = str(mpl.__version__) >= LooseVersion('1.3.1')
import matplotlib.pyplot as plt
x, y, c, data = self.x, self.y, self.c, self.data
ax = self.axes[0]
c_is_column = com.is_hashable(c) and c in self.data.columns
# plot a colorbar only if a colormap is provided or necessary
cb = self.kwds.pop('colorbar', self.colormap or c_is_column)
# pandas uses colormap, matplotlib uses cmap.
cmap = self.colormap or 'Greys'
cmap = plt.cm.get_cmap(cmap)
if c is None:
c_values = self.plt.rcParams['patch.facecolor']
elif c_is_column:
c_values = self.data[c].values
else:
c_values = c
if self.legend and hasattr(self, 'label'):
label = self.label
else:
label = None
scatter = ax.scatter(data[x].values, data[y].values, c=c_values,
label=label, cmap=cmap, **self.kwds)
if cb:
img = ax.collections[0]
kws = dict(ax=ax)
if mpl_ge_1_3_1:
kws['label'] = c if c_is_column else ''
self.fig.colorbar(img, **kws)
if label is not None:
self._add_legend_handle(scatter, label)
else:
self.legend = False
errors_x = self._get_errorbars(label=x, index=0, yerr=False)
errors_y = self._get_errorbars(label=y, index=0, xerr=False)
if len(errors_x) > 0 or len(errors_y) > 0:
err_kwds = dict(errors_x, **errors_y)
err_kwds['ecolor'] = scatter.get_facecolor()[0]
ax.errorbar(data[x].values, data[y].values, linestyle='none', **err_kwds)
def _post_plot_logic(self):
ax = self.axes[0]
x, y = self.x, self.y
ax.set_ylabel(com.pprint_thing(y))
ax.set_xlabel(com.pprint_thing(x))
class HexBinPlot(MPLPlot):
_layout_type = 'single'
def __init__(self, data, x, y, C=None, **kwargs):
MPLPlot.__init__(self, data, **kwargs)
if x is None or y is None:
raise ValueError('hexbin requires and x and y column')
if com.is_integer(x) and not self.data.columns.holds_integer():
x = self.data.columns[x]
if com.is_integer(y) and not self.data.columns.holds_integer():
y = self.data.columns[y]
if com.is_integer(C) and not self.data.columns.holds_integer():
C = self.data.columns[C]
self.x = x
self.y = y
self.C = C
@property
def nseries(self):
return 1
def _make_plot(self):
import matplotlib.pyplot as plt
x, y, data, C = self.x, self.y, self.data, self.C
ax = self.axes[0]
# pandas uses colormap, matplotlib uses cmap.
cmap = self.colormap or 'BuGn'
cmap = plt.cm.get_cmap(cmap)
cb = self.kwds.pop('colorbar', True)
if C is None:
c_values = None
else:
c_values = data[C].values
ax.hexbin(data[x].values, data[y].values, C=c_values, cmap=cmap,
**self.kwds)
if cb:
img = ax.collections[0]
self.fig.colorbar(img, ax=ax)
def _make_legend(self):
pass
def _post_plot_logic(self):
ax = self.axes[0]
x, y = self.x, self.y
ax.set_ylabel(com.pprint_thing(y))
ax.set_xlabel(com.pprint_thing(x))
class LinePlot(MPLPlot):
_default_rot = 0
orientation = 'vertical'
def __init__(self, data, **kwargs):
MPLPlot.__init__(self, data, **kwargs)
if self.stacked:
self.data = self.data.fillna(value=0)
self.x_compat = plot_params['x_compat']
if 'x_compat' in self.kwds:
self.x_compat = bool(self.kwds.pop('x_compat'))
def _index_freq(self):
freq = getattr(self.data.index, 'freq', None)
if freq is None:
freq = getattr(self.data.index, 'inferred_freq', None)
if freq == 'B':
weekdays = np.unique(self.data.index.dayofweek)
if (5 in weekdays) or (6 in weekdays):
freq = None
return freq
def _is_dynamic_freq(self, freq):
if isinstance(freq, DateOffset):
freq = freq.rule_code
else:
freq = frequencies.get_base_alias(freq)
freq = frequencies.get_period_alias(freq)
return freq is not None and self._no_base(freq)
def _no_base(self, freq):
# hack this for 0.10.1, creating more technical debt...sigh
if isinstance(self.data.index, DatetimeIndex):
base = frequencies.get_freq(freq)
x = self.data.index
if (base <= frequencies.FreqGroup.FR_DAY):
return x[:1].is_normalized
return Period(x[0], freq).to_timestamp(tz=x.tz) == x[0]
return True
def _use_dynamic_x(self):
freq = self._index_freq()
ax = self._get_ax(0)
ax_freq = getattr(ax, 'freq', None)
if freq is None: # convert irregular if axes has freq info
freq = ax_freq
else: # do not use tsplot if irregular was plotted first
if (ax_freq is None) and (len(ax.get_lines()) > 0):
return False
return (freq is not None) and self._is_dynamic_freq(freq)
def _is_ts_plot(self):
# this is slightly deceptive
return not self.x_compat and self.use_index and self._use_dynamic_x()
def _make_plot(self):
self._initialize_prior(len(self.data))
if self._is_ts_plot():
data = self._maybe_convert_index(self.data)
x = data.index # dummy, not used
plotf = self._get_ts_plot_function()
it = self._iter_data(data=data, keep_index=True)
else:
x = self._get_xticks(convert_period=True)
plotf = self._get_plot_function()
it = self._iter_data()
colors = self._get_colors()
for i, (label, y) in enumerate(it):
ax = self._get_ax(i)
style = self._get_style(i, label)
kwds = self.kwds.copy()
self._maybe_add_color(colors, kwds, style, i)
errors = self._get_errorbars(label=label, index=i)
kwds = dict(kwds, **errors)
label = com.pprint_thing(label) # .encode('utf-8')
kwds['label'] = label
newlines = plotf(ax, x, y, style=style, column_num=i, **kwds)
self._add_legend_handle(newlines[0], label, index=i)
lines = _get_all_lines(ax)
left, right = _get_xlim(lines)
ax.set_xlim(left, right)
def _get_stacked_values(self, y, label):
if self.stacked:
if (y >= 0).all():
return self._pos_prior + y
elif (y <= 0).all():
return self._neg_prior + y
else:
raise ValueError('When stacked is True, each column must be either all positive or negative.'
'{0} contains both positive and negative values'.format(label))
else:
return y
def _get_plot_function(self):
f = MPLPlot._get_plot_function(self)
def plotf(ax, x, y, style=None, column_num=None, **kwds):
# column_num is used to get the target column from protf in line and area plots
if column_num == 0:
self._initialize_prior(len(self.data))
y_values = self._get_stacked_values(y, kwds['label'])
lines = f(ax, x, y_values, style=style, **kwds)
self._update_prior(y)
return lines
return plotf
def _get_ts_plot_function(self):
from pandas.tseries.plotting import tsplot
plotf = self._get_plot_function()
def _plot(ax, x, data, style=None, **kwds):
# accept x to be consistent with normal plot func,
# x is not passed to tsplot as it uses data.index as x coordinate
lines = tsplot(data, plotf, ax=ax, style=style, **kwds)
return lines
return _plot
def _initialize_prior(self, n):
self._pos_prior = np.zeros(n)
self._neg_prior = np.zeros(n)
def _update_prior(self, y):
if self.stacked and not self.subplots:
# tsplot resample may changedata length
if len(self._pos_prior) != len(y):
self._initialize_prior(len(y))
if (y >= 0).all():
self._pos_prior += y
elif (y <= 0).all():
self._neg_prior += y
def _maybe_convert_index(self, data):
# tsplot converts automatically, but don't want to convert index
# over and over for DataFrames
if isinstance(data.index, DatetimeIndex):
freq = getattr(data.index, 'freq', None)
if freq is None:
freq = getattr(data.index, 'inferred_freq', None)
if isinstance(freq, DateOffset):
freq = freq.rule_code
if freq is None:
ax = self._get_ax(0)
freq = getattr(ax, 'freq', None)
if freq is None:
raise ValueError('Could not get frequency alias for plotting')
freq = frequencies.get_base_alias(freq)
freq = frequencies.get_period_alias(freq)
data.index = data.index.to_period(freq=freq)
return data
def _post_plot_logic(self):
df = self.data
condition = (not self._use_dynamic_x()
and df.index.is_all_dates
and not self.subplots
or (self.subplots and self.sharex))
index_name = self._get_index_name()
for ax in self.axes:
if condition:
# irregular TS rotated 30 deg. by default
# probably a better place to check / set this.
if not self._rot_set:
self.rot = 30
format_date_labels(ax, rot=self.rot)
if index_name is not None and self.use_index:
ax.set_xlabel(index_name)
class AreaPlot(LinePlot):
def __init__(self, data, **kwargs):
kwargs.setdefault('stacked', True)
data = data.fillna(value=0)
LinePlot.__init__(self, data, **kwargs)
if not self.stacked:
# use smaller alpha to distinguish overlap
self.kwds.setdefault('alpha', 0.5)
def _get_plot_function(self):
if self.logy or self.loglog:
raise ValueError("Log-y scales are not supported in area plot")
else:
f = MPLPlot._get_plot_function(self)
def plotf(ax, x, y, style=None, column_num=None, **kwds):
if column_num == 0:
self._initialize_prior(len(self.data))
y_values = self._get_stacked_values(y, kwds['label'])
lines = f(ax, x, y_values, style=style, **kwds)
# get data from the line to get coordinates for fill_between
xdata, y_values = lines[0].get_data(orig=False)
if (y >= 0).all():
start = self._pos_prior
elif (y <= 0).all():
start = self._neg_prior
else:
start = np.zeros(len(y))
if not 'color' in kwds:
kwds['color'] = lines[0].get_color()
self.plt.Axes.fill_between(ax, xdata, start, y_values, **kwds)
self._update_prior(y)
return lines
return plotf
def _add_legend_handle(self, handle, label, index=None):
from matplotlib.patches import Rectangle
# Because fill_between isn't supported in legend,
# specifically add Rectangle handle here
alpha = self.kwds.get('alpha', None)
handle = Rectangle((0, 0), 1, 1, fc=handle.get_color(), alpha=alpha)
LinePlot._add_legend_handle(self, handle, label, index=index)
def _post_plot_logic(self):
LinePlot._post_plot_logic(self)
if self.ylim is None:
if (self.data >= 0).all().all():
for ax in self.axes:
ax.set_ylim(0, None)
elif (self.data <= 0).all().all():
for ax in self.axes:
ax.set_ylim(None, 0)
class BarPlot(MPLPlot):
_default_rot = {'bar': 90, 'barh': 0}
def __init__(self, data, **kwargs):
self.bar_width = kwargs.pop('width', 0.5)
pos = kwargs.pop('position', 0.5)
kwargs.setdefault('align', 'center')
self.tick_pos = np.arange(len(data))
self.bottom = kwargs.pop('bottom', 0)
self.left = kwargs.pop('left', 0)
self.log = kwargs.pop('log',False)
MPLPlot.__init__(self, data, **kwargs)
if self.stacked or self.subplots:
self.tickoffset = self.bar_width * pos
if kwargs['align'] == 'edge':
self.lim_offset = self.bar_width / 2
else:
self.lim_offset = 0
else:
if kwargs['align'] == 'edge':
w = self.bar_width / self.nseries
self.tickoffset = self.bar_width * (pos - 0.5) + w * 0.5
self.lim_offset = w * 0.5
else:
self.tickoffset = self.bar_width * pos
self.lim_offset = 0
self.ax_pos = self.tick_pos - self.tickoffset
def _args_adjust(self):
if com.is_list_like(self.bottom):
self.bottom = np.array(self.bottom)
if com.is_list_like(self.left):
self.left = np.array(self.left)
def _get_plot_function(self):
if self.kind == 'bar':
def f(ax, x, y, w, start=None, **kwds):
start = start + self.bottom
return ax.bar(x, y, w, bottom=start, log=self.log, **kwds)
elif self.kind == 'barh':
def f(ax, x, y, w, start=None, log=self.log, **kwds):
start = start + self.left
return ax.barh(x, y, w, left=start, log=self.log, **kwds)
else:
raise ValueError("BarPlot kind must be either 'bar' or 'barh'")
return f
def _make_plot(self):
import matplotlib as mpl
colors = self._get_colors()
ncolors = len(colors)
bar_f = self._get_plot_function()
pos_prior = neg_prior = np.zeros(len(self.data))
K = self.nseries
for i, (label, y) in enumerate(self._iter_data(fillna=0)):
ax = self._get_ax(i)
kwds = self.kwds.copy()
kwds['color'] = colors[i % ncolors]
errors = self._get_errorbars(label=label, index=i)
kwds = dict(kwds, **errors)
label = com.pprint_thing(label)
if (('yerr' in kwds) or ('xerr' in kwds)) \
and (kwds.get('ecolor') is None):
kwds['ecolor'] = mpl.rcParams['xtick.color']
start = 0
if self.log and (y >= 1).all():
start = 1
if self.subplots:
w = self.bar_width / 2
rect = bar_f(ax, self.ax_pos + w, y, self.bar_width,
start=start, label=label, **kwds)
ax.set_title(label)
elif self.stacked:
mask = y > 0
start = np.where(mask, pos_prior, neg_prior)
w = self.bar_width / 2
rect = bar_f(ax, self.ax_pos + w, y, self.bar_width,
start=start, label=label, **kwds)
pos_prior = pos_prior + np.where(mask, y, 0)
neg_prior = neg_prior + np.where(mask, 0, y)
else:
w = self.bar_width / K
rect = bar_f(ax, self.ax_pos + (i + 0.5) * w, y, w,
start=start, label=label, **kwds)
self._add_legend_handle(rect, label, index=i)
def _post_plot_logic(self):
for ax in self.axes:
if self.use_index:
str_index = [com.pprint_thing(key) for key in self.data.index]
else:
str_index = [com.pprint_thing(key) for key in
range(self.data.shape[0])]
name = self._get_index_name()
s_edge = self.ax_pos[0] - 0.25 + self.lim_offset
e_edge = self.ax_pos[-1] + 0.25 + self.bar_width + self.lim_offset
if self.kind == 'bar':
ax.set_xlim((s_edge, e_edge))
ax.set_xticks(self.tick_pos)
ax.set_xticklabels(str_index)
if name is not None and self.use_index:
ax.set_xlabel(name)
elif self.kind == 'barh':
# horizontal bars
ax.set_ylim((s_edge, e_edge))
ax.set_yticks(self.tick_pos)
ax.set_yticklabels(str_index)
if name is not None and self.use_index:
ax.set_ylabel(name)
else:
raise NotImplementedError(self.kind)
@property
def orientation(self):
if self.kind == 'bar':
return 'vertical'
elif self.kind == 'barh':
return 'horizontal'
else:
raise NotImplementedError(self.kind)
class HistPlot(LinePlot):
def __init__(self, data, bins=10, bottom=0, **kwargs):
self.bins = bins # use mpl default
self.bottom = bottom
# Do not call LinePlot.__init__ which may fill nan
MPLPlot.__init__(self, data, **kwargs)
def _args_adjust(self):
if com.is_integer(self.bins):
# create common bin edge
values = self.data.convert_objects()._get_numeric_data()
values = np.ravel(values)
values = values[~com.isnull(values)]
hist, self.bins = np.histogram(values, bins=self.bins,
range=self.kwds.get('range', None),
weights=self.kwds.get('weights', None))
if com.is_list_like(self.bottom):
self.bottom = np.array(self.bottom)
def _get_plot_function(self):
def plotf(ax, y, style=None, column_num=None, **kwds):
if column_num == 0:
self._initialize_prior(len(self.bins) - 1)
y = y[~com.isnull(y)]
bottom = self._pos_prior + self.bottom
# ignore style
n, bins, patches = self.plt.Axes.hist(ax, y, bins=self.bins,
bottom=bottom, **kwds)
self._update_prior(n)
return patches
return plotf
def _make_plot(self):
plotf = self._get_plot_function()
colors = self._get_colors()
for i, (label, y) in enumerate(self._iter_data()):
ax = self._get_ax(i)
style = self._get_style(i, label)
label = com.pprint_thing(label)
kwds = self.kwds.copy()
kwds['label'] = label
self._maybe_add_color(colors, kwds, style, i)
if style is not None:
kwds['style'] = style
artists = plotf(ax, y, column_num=i, **kwds)
self._add_legend_handle(artists[0], label, index=i)
def _post_plot_logic(self):
if self.orientation == 'horizontal':
for ax in self.axes:
ax.set_xlabel('Degree')
else:
for ax in self.axes:
ax.set_ylabel('Degree')
@property
def orientation(self):
if self.kwds.get('orientation', None) == 'horizontal':
return 'horizontal'
else:
return 'vertical'
class KdePlot(HistPlot):
orientation = 'vertical'
def __init__(self, data, bw_method=None, ind=None, **kwargs):
MPLPlot.__init__(self, data, **kwargs)
self.bw_method = bw_method
self.ind = ind
def _args_adjust(self):
pass
def _get_ind(self, y):
if self.ind is None:
sample_range = max(y) - min(y)
ind = np.linspace(min(y) - 0.5 * sample_range,
max(y) + 0.5 * sample_range, 1000)
else:
ind = self.ind
return ind
def _get_plot_function(self):
from scipy.stats import gaussian_kde
from scipy import __version__ as spv
f = MPLPlot._get_plot_function(self)
def plotf(ax, y, style=None, column_num=None, **kwds):
y = remove_na(y)
if LooseVersion(spv) >= '0.11.0':
gkde = gaussian_kde(y, bw_method=self.bw_method)
else:
gkde = gaussian_kde(y)
if self.bw_method is not None:
msg = ('bw_method was added in Scipy 0.11.0.' +
' Scipy version in use is %s.' % spv)
warnings.warn(msg)
ind = self._get_ind(y)
y = gkde.evaluate(ind)
lines = f(ax, ind, y, style=style, **kwds)
return lines
return plotf
def _post_plot_logic(self):
for ax in self.axes:
ax.set_ylabel('Density')
class PiePlot(MPLPlot):
_layout_type = 'horizontal'
def __init__(self, data, kind=None, **kwargs):
data = data.fillna(value=0)
if (data < 0).any().any():
raise ValueError("{0} doesn't allow negative values".format(kind))
MPLPlot.__init__(self, data, kind=kind, **kwargs)
def _args_adjust(self):
self.grid = False
self.logy = False
self.logx = False
self.loglog = False
def _validate_color_args(self):
pass
def _make_plot(self):
self.kwds.setdefault('colors', self._get_colors(num_colors=len(self.data),
color_kwds='colors'))
for i, (label, y) in enumerate(self._iter_data()):
ax = self._get_ax(i)
if label is not None:
label = com.pprint_thing(label)
ax.set_ylabel(label)
kwds = self.kwds.copy()
def blank_labeler(label, value):
if value == 0:
return ''
else:
return label
idx = [com.pprint_thing(v) for v in self.data.index]
labels = kwds.pop('labels', idx)
# labels is used for each wedge's labels
# Blank out labels for values of 0 so they don't overlap
# with nonzero wedges
if labels is not None:
blabels = [blank_labeler(label, value) for
label, value in zip(labels, y)]
else:
blabels = None
results = ax.pie(y, labels=blabels, **kwds)
if kwds.get('autopct', None) is not None:
patches, texts, autotexts = results
else:
patches, texts = results
autotexts = []
if self.fontsize is not None:
for t in texts + autotexts:
t.set_fontsize(self.fontsize)
# leglabels is used for legend labels
leglabels = labels if labels is not None else idx
for p, l in zip(patches, leglabels):
self._add_legend_handle(p, l)
class BoxPlot(LinePlot):
_layout_type = 'horizontal'
_valid_return_types = (None, 'axes', 'dict', 'both')
# namedtuple to hold results
BP = namedtuple("Boxplot", ['ax', 'lines'])
def __init__(self, data, return_type=None, **kwargs):
# Do not call LinePlot.__init__ which may fill nan
if return_type not in self._valid_return_types:
raise ValueError("return_type must be {None, 'axes', 'dict', 'both'}")
self.return_type = return_type
MPLPlot.__init__(self, data, **kwargs)
def _args_adjust(self):
if self.subplots:
# Disable label ax sharing. Otherwise, all subplots shows last column label
if self.orientation == 'vertical':
self.sharex = False
else:
self.sharey = False
def _get_plot_function(self):
def plotf(ax, y, column_num=None, **kwds):
if y.ndim == 2:
y = [remove_na(v) for v in y]
# Boxplot fails with empty arrays, so need to add a NaN
# if any cols are empty
# GH 8181
y = [v if v.size > 0 else np.array([np.nan]) for v in y]
else:
y = remove_na(y)
bp = ax.boxplot(y, **kwds)
if self.return_type == 'dict':
return bp, bp
elif self.return_type == 'both':
return self.BP(ax=ax, lines=bp), bp
else:
return ax, bp
return plotf
def _validate_color_args(self):
if 'color' in self.kwds:
if self.colormap is not None:
warnings.warn("'color' and 'colormap' cannot be used "
"simultaneously. Using 'color'")
self.color = self.kwds.pop('color')
if isinstance(self.color, dict):
valid_keys = ['boxes', 'whiskers', 'medians', 'caps']
for key, values in compat.iteritems(self.color):
if key not in valid_keys:
raise ValueError("color dict contains invalid key '{0}' "
"The key must be either {1}".format(key, valid_keys))
else:
self.color = None
# get standard colors for default
colors = _get_standard_colors(num_colors=3,
colormap=self.colormap,
color=None)
# use 2 colors by default, for box/whisker and median
# flier colors isn't needed here
# because it can be specified by ``sym`` kw
self._boxes_c = colors[0]
self._whiskers_c = colors[0]
self._medians_c = colors[2]
self._caps_c = 'k' # mpl default
def _get_colors(self, num_colors=None, color_kwds='color'):
pass
def maybe_color_bp(self, bp):
if isinstance(self.color, dict):
boxes = self.color.get('boxes', self._boxes_c)
whiskers = self.color.get('whiskers', self._whiskers_c)
medians = self.color.get('medians', self._medians_c)
caps = self.color.get('caps', self._caps_c)
else:
# Other types are forwarded to matplotlib
# If None, use default colors
boxes = self.color or self._boxes_c
whiskers = self.color or self._whiskers_c
medians = self.color or self._medians_c
caps = self.color or self._caps_c
from matplotlib.artist import setp
setp(bp['boxes'], color=boxes, alpha=1)
setp(bp['whiskers'], color=whiskers, alpha=1)
setp(bp['medians'], color=medians, alpha=1)
setp(bp['caps'], color=caps, alpha=1)
def _make_plot(self):
plotf = self._get_plot_function()
if self.subplots:
self._return_obj = compat.OrderedDict()
for i, (label, y) in enumerate(self._iter_data()):
ax = self._get_ax(i)
kwds = self.kwds.copy()
ret, bp = plotf(ax, y, column_num=i, **kwds)
self.maybe_color_bp(bp)
self._return_obj[label] = ret
label = [com.pprint_thing(label)]
self._set_ticklabels(ax, label)
else:
y = self.data.values.T
ax = self._get_ax(0)
kwds = self.kwds.copy()
ret, bp = plotf(ax, y, column_num=0, **kwds)
self.maybe_color_bp(bp)
self._return_obj = ret
labels = [l for l, y in self._iter_data()]
labels = [com.pprint_thing(l) for l in labels]
if not self.use_index:
labels = [com.pprint_thing(key) for key in range(len(labels))]
self._set_ticklabels(ax, labels)
def _set_ticklabels(self, ax, labels):
if self.orientation == 'vertical':
ax.set_xticklabels(labels)
else:
ax.set_yticklabels(labels)
def _make_legend(self):
pass
def _post_plot_logic(self):
pass
@property
def orientation(self):
if self.kwds.get('vert', True):
return 'vertical'
else:
return 'horizontal'
@property
def result(self):
if self.return_type is None:
return super(BoxPlot, self).result
else:
return self._return_obj
# kinds supported by both dataframe and series
_common_kinds = ['line', 'bar', 'barh', 'kde', 'density', 'area', 'hist', 'box']
# kinds supported by dataframe
_dataframe_kinds = ['scatter', 'hexbin']
# kinds supported only by series or dataframe single column
_series_kinds = ['pie']
_all_kinds = _common_kinds + _dataframe_kinds + _series_kinds
_plot_klass = {'line': LinePlot, 'bar': BarPlot, 'barh': BarPlot,
'kde': KdePlot, 'hist': HistPlot, 'box': BoxPlot,
'scatter': ScatterPlot, 'hexbin': HexBinPlot,
'area': AreaPlot, 'pie': PiePlot}
def _plot(data, x=None, y=None, subplots=False,
ax=None, kind='line', **kwds):
kind = _get_standard_kind(kind.lower().strip())
if kind in _all_kinds:
klass = _plot_klass[kind]
else:
raise ValueError("%r is not a valid plot kind" % kind)
from pandas import DataFrame
if kind in _dataframe_kinds:
if isinstance(data, DataFrame):
plot_obj = klass(data, x=x, y=y, subplots=subplots, ax=ax,
kind=kind, **kwds)
else:
raise ValueError("plot kind %r can only be used for data frames"
% kind)
elif kind in _series_kinds:
if isinstance(data, DataFrame):
if y is None and subplots is False:
msg = "{0} requires either y column or 'subplots=True'"
raise ValueError(msg.format(kind))
elif y is not None:
if com.is_integer(y) and not data.columns.holds_integer():
y = data.columns[y]
# converted to series actually. copy to not modify
data = data[y].copy()
data.index.name = y
plot_obj = klass(data, subplots=subplots, ax=ax, kind=kind, **kwds)
else:
if isinstance(data, DataFrame):
if x is not None:
if com.is_integer(x) and not data.columns.holds_integer():
x = data.columns[x]
data = data.set_index(x)
if y is not None:
if com.is_integer(y) and not data.columns.holds_integer():
y = data.columns[y]
label = kwds['label'] if 'label' in kwds else y
series = data[y].copy() # Don't modify
series.name = label
for kw in ['xerr', 'yerr']:
if (kw in kwds) and \
(isinstance(kwds[kw], string_types) or
com.is_integer(kwds[kw])):
try:
kwds[kw] = data[kwds[kw]]
except (IndexError, KeyError, TypeError):
pass
data = series
plot_obj = klass(data, subplots=subplots, ax=ax, kind=kind, **kwds)
plot_obj.generate()
plot_obj.draw()
return plot_obj.result
df_kind = """- 'scatter' : scatter plot
- 'hexbin' : hexbin plot"""
series_kind = ""
df_coord = """x : label or position, default None
y : label or position, default None
Allows plotting of one column versus another"""
series_coord = ""
df_unique = """stacked : boolean, default False in line and
bar plots, and True in area plot. If True, create stacked plot.
sort_columns : boolean, default False
Sort column names to determine plot ordering
secondary_y : boolean or sequence, default False
Whether to plot on the secondary y-axis
If a list/tuple, which columns to plot on secondary y-axis"""
series_unique = """label : label argument to provide to plot
secondary_y : boolean or sequence of ints, default False
If True then y-axis will be on the right"""
df_ax = """ax : matplotlib axes object, default None
subplots : boolean, default False
Make separate subplots for each column
sharex : boolean, default True if ax is None else False
In case subplots=True, share x axis and set some x axis labels to
invisible; defaults to True if ax is None otherwise False if an ax
is passed in; Be aware, that passing in both an ax and sharex=True
will alter all x axis labels for all axis in a figure!
sharey : boolean, default False
In case subplots=True, share y axis and set some y axis labels to
invisible
layout : tuple (optional)
(rows, columns) for the layout of subplots"""
series_ax = """ax : matplotlib axes object
If not passed, uses gca()"""
df_note = """- If `kind` = 'scatter' and the argument `c` is the name of a dataframe
column, the values of that column are used to color each point.
- If `kind` = 'hexbin', you can control the size of the bins with the
`gridsize` argument. By default, a histogram of the counts around each
`(x, y)` point is computed. You can specify alternative aggregations
by passing values to the `C` and `reduce_C_function` arguments.
`C` specifies the value at each `(x, y)` point and `reduce_C_function`
is a function of one argument that reduces all the values in a bin to
a single number (e.g. `mean`, `max`, `sum`, `std`)."""
series_note = ""
_shared_doc_df_kwargs = dict(klass='DataFrame', klass_kind=df_kind,
klass_coord=df_coord, klass_ax=df_ax,
klass_unique=df_unique, klass_note=df_note)
_shared_doc_series_kwargs = dict(klass='Series', klass_kind=series_kind,
klass_coord=series_coord, klass_ax=series_ax,
klass_unique=series_unique,
klass_note=series_note)
_shared_docs['plot'] = """
Make plots of %(klass)s using matplotlib / pylab.
Parameters
----------
data : %(klass)s
%(klass_coord)s
kind : str
- 'line' : line plot (default)
- 'bar' : vertical bar plot
- 'barh' : horizontal bar plot
- 'hist' : histogram
- 'box' : boxplot
- 'kde' : Kernel Density Estimation plot
- 'density' : same as 'kde'
- 'area' : area plot
- 'pie' : pie plot
%(klass_kind)s
%(klass_ax)s
figsize : a tuple (width, height) in inches
use_index : boolean, default True
Use index as ticks for x axis
title : string
Title to use for the plot
grid : boolean, default None (matlab style default)
Axis grid lines
legend : False/True/'reverse'
Place legend on axis subplots
style : list or dict
matplotlib line style per column
logx : boolean, default False
Use log scaling on x axis
logy : boolean, default False
Use log scaling on y axis
loglog : boolean, default False
Use log scaling on both x and y axes
xticks : sequence
Values to use for the xticks
yticks : sequence
Values to use for the yticks
xlim : 2-tuple/list
ylim : 2-tuple/list
rot : int, default None
Rotation for ticks (xticks for vertical, yticks for horizontal plots)
fontsize : int, default None
Font size for xticks and yticks
colormap : str or matplotlib colormap object, default None
Colormap to select colors from. If string, load colormap with that name
from matplotlib.
colorbar : boolean, optional
If True, plot colorbar (only relevant for 'scatter' and 'hexbin' plots)
position : float
Specify relative alignments for bar plot layout.
From 0 (left/bottom-end) to 1 (right/top-end). Default is 0.5 (center)
layout : tuple (optional)
(rows, columns) for the layout of the plot
table : boolean, Series or DataFrame, default False
If True, draw a table using the data in the DataFrame and the data will
be transposed to meet matplotlib's default layout.
If a Series or DataFrame is passed, use passed data to draw a table.
yerr : DataFrame, Series, array-like, dict and str
See :ref:`Plotting with Error Bars <visualization.errorbars>` for detail.
xerr : same types as yerr.
%(klass_unique)s
mark_right : boolean, default True
When using a secondary_y axis, automatically mark the column
labels with "(right)" in the legend
kwds : keywords
Options to pass to matplotlib plotting method
Returns
-------
axes : matplotlib.AxesSubplot or np.array of them
Notes
-----
- See matplotlib documentation online for more on this subject
- If `kind` = 'bar' or 'barh', you can specify relative alignments
for bar plot layout by `position` keyword.
From 0 (left/bottom-end) to 1 (right/top-end). Default is 0.5 (center)
%(klass_note)s
"""
@Appender(_shared_docs['plot'] % _shared_doc_df_kwargs)
def plot_frame(data, x=None, y=None, kind='line', ax=None, # Dataframe unique
subplots=False, sharex=None, sharey=False, layout=None, # Dataframe unique
figsize=None, use_index=True, title=None, grid=None,
legend=True, style=None, logx=False, logy=False, loglog=False,
xticks=None, yticks=None, xlim=None, ylim=None,
rot=None, fontsize=None, colormap=None, table=False,
yerr=None, xerr=None,
secondary_y=False, sort_columns=False, # Dataframe unique
**kwds):
return _plot(data, kind=kind, x=x, y=y, ax=ax,
subplots=subplots, sharex=sharex, sharey=sharey,
layout=layout, figsize=figsize, use_index=use_index,
title=title, grid=grid, legend=legend,
style=style, logx=logx, logy=logy, loglog=loglog,
xticks=xticks, yticks=yticks, xlim=xlim, ylim=ylim,
rot=rot, fontsize=fontsize, colormap=colormap, table=table,
yerr=yerr, xerr=xerr,
secondary_y=secondary_y, sort_columns=sort_columns,
**kwds)
@Appender(_shared_docs['plot'] % _shared_doc_series_kwargs)
def plot_series(data, kind='line', ax=None, # Series unique
figsize=None, use_index=True, title=None, grid=None,
legend=False, style=None, logx=False, logy=False, loglog=False,
xticks=None, yticks=None, xlim=None, ylim=None,
rot=None, fontsize=None, colormap=None, table=False,
yerr=None, xerr=None,
label=None, secondary_y=False, # Series unique
**kwds):
import matplotlib.pyplot as plt
"""
If no axes is specified, check whether there are existing figures
If there is no existing figures, _gca() will
create a figure with the default figsize, causing the figsize=parameter to
be ignored.
"""
if ax is None and len(plt.get_fignums()) > 0:
ax = _gca()
ax = MPLPlot._get_ax_layer(ax)
return _plot(data, kind=kind, ax=ax,
figsize=figsize, use_index=use_index, title=title,
grid=grid, legend=legend,
style=style, logx=logx, logy=logy, loglog=loglog,
xticks=xticks, yticks=yticks, xlim=xlim, ylim=ylim,
rot=rot, fontsize=fontsize, colormap=colormap, table=table,
yerr=yerr, xerr=xerr,
label=label, secondary_y=secondary_y,
**kwds)
_shared_docs['boxplot'] = """
Make a box plot from DataFrame column optionally grouped by some columns or
other inputs
Parameters
----------
data : the pandas object holding the data
column : column name or list of names, or vector
Can be any valid input to groupby
by : string or sequence
Column in the DataFrame to group by
ax : Matplotlib axes object, optional
fontsize : int or string
rot : label rotation angle
figsize : A tuple (width, height) in inches
grid : Setting this to True will show the grid
layout : tuple (optional)
(rows, columns) for the layout of the plot
return_type : {'axes', 'dict', 'both'}, default 'dict'
The kind of object to return. 'dict' returns a dictionary
whose values are the matplotlib Lines of the boxplot;
'axes' returns the matplotlib axes the boxplot is drawn on;
'both' returns a namedtuple with the axes and dict.
When grouping with ``by``, a dict mapping columns to ``return_type``
is returned.
kwds : other plotting keyword arguments to be passed to matplotlib boxplot
function
Returns
-------
lines : dict
ax : matplotlib Axes
(ax, lines): namedtuple
Notes
-----
Use ``return_type='dict'`` when you want to tweak the appearance
of the lines after plotting. In this case a dict containing the Lines
making up the boxes, caps, fliers, medians, and whiskers is returned.
"""
@Appender(_shared_docs['boxplot'] % _shared_doc_kwargs)
def boxplot(data, column=None, by=None, ax=None, fontsize=None,
rot=0, grid=True, figsize=None, layout=None, return_type=None,
**kwds):
# validate return_type:
if return_type not in BoxPlot._valid_return_types:
raise ValueError("return_type must be {None, 'axes', 'dict', 'both'}")
from pandas import Series, DataFrame
if isinstance(data, Series):
data = DataFrame({'x': data})
column = 'x'
def _get_colors():
return _get_standard_colors(color=kwds.get('color'), num_colors=1)
def maybe_color_bp(bp):
if 'color' not in kwds:
from matplotlib.artist import setp
setp(bp['boxes'], color=colors[0], alpha=1)
setp(bp['whiskers'], color=colors[0], alpha=1)
setp(bp['medians'], color=colors[2], alpha=1)
def plot_group(keys, values, ax):
keys = [com.pprint_thing(x) for x in keys]
values = [remove_na(v) for v in values]
bp = ax.boxplot(values, **kwds)
if kwds.get('vert', 1):
ax.set_xticklabels(keys, rotation=rot, fontsize=fontsize)
else:
ax.set_yticklabels(keys, rotation=rot, fontsize=fontsize)
maybe_color_bp(bp)
# Return axes in multiplot case, maybe revisit later # 985
if return_type == 'dict':
return bp
elif return_type == 'both':
return BoxPlot.BP(ax=ax, lines=bp)
else:
return ax
colors = _get_colors()
if column is None:
columns = None
else:
if isinstance(column, (list, tuple)):
columns = column
else:
columns = [column]
if by is not None:
result = _grouped_plot_by_column(plot_group, data, columns=columns,
by=by, grid=grid, figsize=figsize,
ax=ax, layout=layout,
return_type=return_type)
else:
if layout is not None:
raise ValueError("The 'layout' keyword is not supported when "
"'by' is None")
if return_type is None:
msg = ("\nThe default value for 'return_type' will change to "
"'axes' in a future release.\n To use the future behavior "
"now, set return_type='axes'.\n To keep the previous "
"behavior and silence this warning, set "
"return_type='dict'.")
warnings.warn(msg, FutureWarning)
return_type = 'dict'
if ax is None:
ax = _gca()
data = data._get_numeric_data()
if columns is None:
columns = data.columns
else:
data = data[columns]
result = plot_group(columns, data.values.T, ax)
ax.grid(grid)
return result
def format_date_labels(ax, rot):
# mini version of autofmt_xdate
try:
for label in ax.get_xticklabels():
label.set_ha('right')
label.set_rotation(rot)
fig = ax.get_figure()
fig.subplots_adjust(bottom=0.2)
except Exception: # pragma: no cover
pass
def scatter_plot(data, x, y, by=None, ax=None, figsize=None, grid=False,
**kwargs):
"""
Make a scatter plot from two DataFrame columns
Parameters
----------
data : DataFrame
x : Column name for the x-axis values
y : Column name for the y-axis values
ax : Matplotlib axis object
figsize : A tuple (width, height) in inches
grid : Setting this to True will show the grid
kwargs : other plotting keyword arguments
To be passed to scatter function
Returns
-------
fig : matplotlib.Figure
"""
import matplotlib.pyplot as plt
# workaround because `c='b'` is hardcoded in matplotlibs scatter method
kwargs.setdefault('c', plt.rcParams['patch.facecolor'])
def plot_group(group, ax):
xvals = group[x].values
yvals = group[y].values
ax.scatter(xvals, yvals, **kwargs)
ax.grid(grid)
if by is not None:
fig = _grouped_plot(plot_group, data, by=by, figsize=figsize, ax=ax)
else:
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
else:
fig = ax.get_figure()
plot_group(data, ax)
ax.set_ylabel(com.pprint_thing(y))
ax.set_xlabel(com.pprint_thing(x))
ax.grid(grid)
return fig
def hist_frame(data, column=None, by=None, grid=True, xlabelsize=None,
xrot=None, ylabelsize=None, yrot=None, ax=None, sharex=False,
sharey=False, figsize=None, layout=None, bins=10, **kwds):
"""
Draw histogram of the DataFrame's series using matplotlib / pylab.
Parameters
----------
data : DataFrame
column : string or sequence
If passed, will be used to limit data to a subset of columns
by : object, optional
If passed, then used to form histograms for separate groups
grid : boolean, default True
Whether to show axis grid lines
xlabelsize : int, default None
If specified changes the x-axis label size
xrot : float, default None
rotation of x axis labels
ylabelsize : int, default None
If specified changes the y-axis label size
yrot : float, default None
rotation of y axis labels
ax : matplotlib axes object, default None
sharex : boolean, default True if ax is None else False
In case subplots=True, share x axis and set some x axis labels to
invisible; defaults to True if ax is None otherwise False if an ax
is passed in; Be aware, that passing in both an ax and sharex=True
will alter all x axis labels for all subplots in a figure!
sharey : boolean, default False
In case subplots=True, share y axis and set some y axis labels to
invisible
figsize : tuple
The size of the figure to create in inches by default
layout: (optional) a tuple (rows, columns) for the layout of the histograms
bins: integer, default 10
Number of histogram bins to be used
kwds : other plotting keyword arguments
To be passed to hist function
"""
if by is not None:
axes = grouped_hist(data, column=column, by=by, ax=ax, grid=grid, figsize=figsize,
sharex=sharex, sharey=sharey, layout=layout, bins=bins,
xlabelsize=xlabelsize, xrot=xrot, ylabelsize=ylabelsize, yrot=yrot,
**kwds)
return axes
if column is not None:
if not isinstance(column, (list, np.ndarray, Index)):
column = [column]
data = data[column]
data = data._get_numeric_data()
naxes = len(data.columns)
fig, axes = _subplots(naxes=naxes, ax=ax, squeeze=False,
sharex=sharex, sharey=sharey, figsize=figsize,
layout=layout)
_axes = _flatten(axes)
for i, col in enumerate(com._try_sort(data.columns)):
ax = _axes[i]
ax.hist(data[col].dropna().values, bins=bins, **kwds)
ax.set_title(col)
ax.grid(grid)
_set_ticks_props(axes, xlabelsize=xlabelsize, xrot=xrot,
ylabelsize=ylabelsize, yrot=yrot)
fig.subplots_adjust(wspace=0.3, hspace=0.3)
return axes
def hist_series(self, by=None, ax=None, grid=True, xlabelsize=None,
xrot=None, ylabelsize=None, yrot=None, figsize=None, bins=10, **kwds):
"""
Draw histogram of the input series using matplotlib
Parameters
----------
by : object, optional
If passed, then used to form histograms for separate groups
ax : matplotlib axis object
If not passed, uses gca()
grid : boolean, default True
Whether to show axis grid lines
xlabelsize : int, default None
If specified changes the x-axis label size
xrot : float, default None
rotation of x axis labels
ylabelsize : int, default None
If specified changes the y-axis label size
yrot : float, default None
rotation of y axis labels
figsize : tuple, default None
figure size in inches by default
bins: integer, default 10
Number of histogram bins to be used
kwds : keywords
To be passed to the actual plotting function
Notes
-----
See matplotlib documentation online for more on this
"""
import matplotlib.pyplot as plt
if by is None:
if kwds.get('layout', None) is not None:
raise ValueError("The 'layout' keyword is not supported when "
"'by' is None")
# hack until the plotting interface is a bit more unified
fig = kwds.pop('figure', plt.gcf() if plt.get_fignums() else
plt.figure(figsize=figsize))
if (figsize is not None and tuple(figsize) !=
tuple(fig.get_size_inches())):
fig.set_size_inches(*figsize, forward=True)
if ax is None:
ax = fig.gca()
elif ax.get_figure() != fig:
raise AssertionError('passed axis not bound to passed figure')
values = self.dropna().values
ax.hist(values, bins=bins, **kwds)
ax.grid(grid)
axes = np.array([ax])
_set_ticks_props(axes, xlabelsize=xlabelsize, xrot=xrot,
ylabelsize=ylabelsize, yrot=yrot)
else:
if 'figure' in kwds:
raise ValueError("Cannot pass 'figure' when using the "
"'by' argument, since a new 'Figure' instance "
"will be created")
axes = grouped_hist(self, by=by, ax=ax, grid=grid, figsize=figsize, bins=bins,
xlabelsize=xlabelsize, xrot=xrot, ylabelsize=ylabelsize, yrot=yrot,
**kwds)
if hasattr(axes, 'ndim'):
if axes.ndim == 1 and len(axes) == 1:
return axes[0]
return axes
def grouped_hist(data, column=None, by=None, ax=None, bins=50, figsize=None,
layout=None, sharex=False, sharey=False, rot=90, grid=True,
xlabelsize=None, xrot=None, ylabelsize=None, yrot=None,
**kwargs):
"""
Grouped histogram
Parameters
----------
data: Series/DataFrame
column: object, optional
by: object, optional
ax: axes, optional
bins: int, default 50
figsize: tuple, optional
layout: optional
sharex: boolean, default False
sharey: boolean, default False
rot: int, default 90
grid: bool, default True
kwargs: dict, keyword arguments passed to matplotlib.Axes.hist
Returns
-------
axes: collection of Matplotlib Axes
"""
def plot_group(group, ax):
ax.hist(group.dropna().values, bins=bins, **kwargs)
xrot = xrot or rot
fig, axes = _grouped_plot(plot_group, data, column=column,
by=by, sharex=sharex, sharey=sharey, ax=ax,
figsize=figsize, layout=layout, rot=rot)
_set_ticks_props(axes, xlabelsize=xlabelsize, xrot=xrot,
ylabelsize=ylabelsize, yrot=yrot)
fig.subplots_adjust(bottom=0.15, top=0.9, left=0.1, right=0.9,
hspace=0.5, wspace=0.3)
return axes
def boxplot_frame_groupby(grouped, subplots=True, column=None, fontsize=None,
rot=0, grid=True, ax=None, figsize=None,
layout=None, **kwds):
"""
Make box plots from DataFrameGroupBy data.
Parameters
----------
grouped : Grouped DataFrame
subplots :
* ``False`` - no subplots will be used
* ``True`` - create a subplot for each group
column : column name or list of names, or vector
Can be any valid input to groupby
fontsize : int or string
rot : label rotation angle
grid : Setting this to True will show the grid
ax : Matplotlib axis object, default None
figsize : A tuple (width, height) in inches
layout : tuple (optional)
(rows, columns) for the layout of the plot
kwds : other plotting keyword arguments to be passed to matplotlib boxplot
function
Returns
-------
dict of key/value = group key/DataFrame.boxplot return value
or DataFrame.boxplot return value in case subplots=figures=False
Examples
--------
>>> import pandas
>>> import numpy as np
>>> import itertools
>>>
>>> tuples = [t for t in itertools.product(range(1000), range(4))]
>>> index = pandas.MultiIndex.from_tuples(tuples, names=['lvl0', 'lvl1'])
>>> data = np.random.randn(len(index),4)
>>> df = pandas.DataFrame(data, columns=list('ABCD'), index=index)
>>>
>>> grouped = df.groupby(level='lvl1')
>>> boxplot_frame_groupby(grouped)
>>>
>>> grouped = df.unstack(level='lvl1').groupby(level=0, axis=1)
>>> boxplot_frame_groupby(grouped, subplots=False)
"""
if subplots is True:
naxes = len(grouped)
fig, axes = _subplots(naxes=naxes, squeeze=False,
ax=ax, sharex=False, sharey=True, figsize=figsize,
layout=layout)
axes = _flatten(axes)
ret = compat.OrderedDict()
for (key, group), ax in zip(grouped, axes):
d = group.boxplot(ax=ax, column=column, fontsize=fontsize,
rot=rot, grid=grid, **kwds)
ax.set_title(com.pprint_thing(key))
ret[key] = d
fig.subplots_adjust(bottom=0.15, top=0.9, left=0.1, right=0.9, wspace=0.2)
else:
from pandas.tools.merge import concat
keys, frames = zip(*grouped)
if grouped.axis == 0:
df = concat(frames, keys=keys, axis=1)
else:
if len(frames) > 1:
df = frames[0].join(frames[1::])
else:
df = frames[0]
ret = df.boxplot(column=column, fontsize=fontsize, rot=rot,
grid=grid, ax=ax, figsize=figsize, layout=layout, **kwds)
return ret
def _grouped_plot(plotf, data, column=None, by=None, numeric_only=True,
figsize=None, sharex=True, sharey=True, layout=None,
rot=0, ax=None, **kwargs):
from pandas import DataFrame
if figsize == 'default':
# allowed to specify mpl default with 'default'
warnings.warn("figsize='default' is deprecated. Specify figure"
"size by tuple instead", FutureWarning)
figsize = None
grouped = data.groupby(by)
if column is not None:
grouped = grouped[column]
naxes = len(grouped)
fig, axes = _subplots(naxes=naxes, figsize=figsize,
sharex=sharex, sharey=sharey, ax=ax,
layout=layout)
_axes = _flatten(axes)
for i, (key, group) in enumerate(grouped):
ax = _axes[i]
if numeric_only and isinstance(group, DataFrame):
group = group._get_numeric_data()
plotf(group, ax, **kwargs)
ax.set_title(com.pprint_thing(key))
return fig, axes
def _grouped_plot_by_column(plotf, data, columns=None, by=None,
numeric_only=True, grid=False,
figsize=None, ax=None, layout=None, return_type=None,
**kwargs):
grouped = data.groupby(by)
if columns is None:
if not isinstance(by, (list, tuple)):
by = [by]
columns = data._get_numeric_data().columns.difference(by)
naxes = len(columns)
fig, axes = _subplots(naxes=naxes, sharex=True, sharey=True,
figsize=figsize, ax=ax, layout=layout)
_axes = _flatten(axes)
result = compat.OrderedDict()
for i, col in enumerate(columns):
ax = _axes[i]
gp_col = grouped[col]
keys, values = zip(*gp_col)
re_plotf = plotf(keys, values, ax, **kwargs)
ax.set_title(col)
ax.set_xlabel(com.pprint_thing(by))
result[col] = re_plotf
ax.grid(grid)
# Return axes in multiplot case, maybe revisit later # 985
if return_type is None:
result = axes
byline = by[0] if len(by) == 1 else by
fig.suptitle('Boxplot grouped by %s' % byline)
fig.subplots_adjust(bottom=0.15, top=0.9, left=0.1, right=0.9, wspace=0.2)
return result
def table(ax, data, rowLabels=None, colLabels=None,
**kwargs):
"""
Helper function to convert DataFrame and Series to matplotlib.table
Parameters
----------
`ax`: Matplotlib axes object
`data`: DataFrame or Series
data for table contents
`kwargs`: keywords, optional
keyword arguments which passed to matplotlib.table.table.
If `rowLabels` or `colLabels` is not specified, data index or column name will be used.
Returns
-------
matplotlib table object
"""
from pandas import DataFrame
if isinstance(data, Series):
data = DataFrame(data, columns=[data.name])
elif isinstance(data, DataFrame):
pass
else:
raise ValueError('Input data must be DataFrame or Series')
if rowLabels is None:
rowLabels = data.index
if colLabels is None:
colLabels = data.columns
cellText = data.values
import matplotlib.table
table = matplotlib.table.table(ax, cellText=cellText,
rowLabels=rowLabels, colLabels=colLabels, **kwargs)
return table
def _get_layout(nplots, layout=None, layout_type='box'):
if layout is not None:
if not isinstance(layout, (tuple, list)) or len(layout) != 2:
raise ValueError('Layout must be a tuple of (rows, columns)')
nrows, ncols = layout
# Python 2 compat
ceil_ = lambda x: int(ceil(x))
if nrows == -1 and ncols >0:
layout = nrows, ncols = (ceil_(float(nplots) / ncols), ncols)
elif ncols == -1 and nrows > 0:
layout = nrows, ncols = (nrows, ceil_(float(nplots) / nrows))
elif ncols <= 0 and nrows <= 0:
msg = "At least one dimension of layout must be positive"
raise ValueError(msg)
if nrows * ncols < nplots:
raise ValueError('Layout of %sx%s must be larger than required size %s' %
(nrows, ncols, nplots))
return layout
if layout_type == 'single':
return (1, 1)
elif layout_type == 'horizontal':
return (1, nplots)
elif layout_type == 'vertical':
return (nplots, 1)
layouts = {1: (1, 1), 2: (1, 2), 3: (2, 2), 4: (2, 2)}
try:
return layouts[nplots]
except KeyError:
k = 1
while k ** 2 < nplots:
k += 1
if (k - 1) * k >= nplots:
return k, (k - 1)
else:
return k, k
# copied from matplotlib/pyplot.py and modified for pandas.plotting
def _subplots(naxes=None, sharex=False, sharey=False, squeeze=True,
subplot_kw=None, ax=None, layout=None, layout_type='box', **fig_kw):
"""Create a figure with a set of subplots already made.
This utility wrapper makes it convenient to create common layouts of
subplots, including the enclosing figure object, in a single call.
Keyword arguments:
naxes : int
Number of required axes. Exceeded axes are set invisible. Default is
nrows * ncols.
sharex : bool
If True, the X axis will be shared amongst all subplots.
sharey : bool
If True, the Y axis will be shared amongst all subplots.
squeeze : bool
If True, extra dimensions are squeezed out from the returned axis object:
- if only one subplot is constructed (nrows=ncols=1), the resulting
single Axis object is returned as a scalar.
- for Nx1 or 1xN subplots, the returned object is a 1-d numpy object
array of Axis objects are returned as numpy 1-d arrays.
- for NxM subplots with N>1 and M>1 are returned as a 2d array.
If False, no squeezing at all is done: the returned axis object is always
a 2-d array containing Axis instances, even if it ends up being 1x1.
subplot_kw : dict
Dict with keywords passed to the add_subplot() call used to create each
subplots.
ax : Matplotlib axis object, optional
layout : tuple
Number of rows and columns of the subplot grid.
If not specified, calculated from naxes and layout_type
layout_type : {'box', 'horziontal', 'vertical'}, default 'box'
Specify how to layout the subplot grid.
fig_kw : Other keyword arguments to be passed to the figure() call.
Note that all keywords not recognized above will be
automatically included here.
Returns:
fig, ax : tuple
- fig is the Matplotlib Figure object
- ax can be either a single axis object or an array of axis objects if
more than one subplot was created. The dimensions of the resulting array
can be controlled with the squeeze keyword, see above.
**Examples:**
x = np.linspace(0, 2*np.pi, 400)
y = np.sin(x**2)
# Just a figure and one subplot
f, ax = plt.subplots()
ax.plot(x, y)
ax.set_title('Simple plot')
# Two subplots, unpack the output array immediately
f, (ax1, ax2) = plt.subplots(1, 2, sharey=True)
ax1.plot(x, y)
ax1.set_title('Sharing Y axis')
ax2.scatter(x, y)
# Four polar axes
plt.subplots(2, 2, subplot_kw=dict(polar=True))
"""
import matplotlib.pyplot as plt
if subplot_kw is None:
subplot_kw = {}
if ax is None:
fig = plt.figure(**fig_kw)
else:
if com.is_list_like(ax):
ax = _flatten(ax)
if layout is not None:
warnings.warn("When passing multiple axes, layout keyword is ignored", UserWarning)
if sharex or sharey:
warnings.warn("When passing multiple axes, sharex and sharey are ignored."
"These settings must be specified when creating axes", UserWarning)
if len(ax) == naxes:
fig = ax[0].get_figure()
return fig, ax
else:
raise ValueError("The number of passed axes must be {0}, the same as "
"the output plot".format(naxes))
fig = ax.get_figure()
# if ax is passed and a number of subplots is 1, return ax as it is
if naxes == 1:
if squeeze:
return fig, ax
else:
return fig, _flatten(ax)
else:
warnings.warn("To output multiple subplots, the figure containing the passed axes "
"is being cleared", UserWarning)
fig.clear()
nrows, ncols = _get_layout(naxes, layout=layout, layout_type=layout_type)
nplots = nrows * ncols
# Create empty object array to hold all axes. It's easiest to make it 1-d
# so we can just append subplots upon creation, and then
axarr = np.empty(nplots, dtype=object)
# Create first subplot separately, so we can share it if requested
ax0 = fig.add_subplot(nrows, ncols, 1, **subplot_kw)
if sharex:
subplot_kw['sharex'] = ax0
if sharey:
subplot_kw['sharey'] = ax0
axarr[0] = ax0
# Note off-by-one counting because add_subplot uses the MATLAB 1-based
# convention.
for i in range(1, nplots):
kwds = subplot_kw.copy()
# Set sharex and sharey to None for blank/dummy axes, these can
# interfere with proper axis limits on the visible axes if
# they share axes e.g. issue #7528
if i >= naxes:
kwds['sharex'] = None
kwds['sharey'] = None
ax = fig.add_subplot(nrows, ncols, i + 1, **kwds)
axarr[i] = ax
if naxes != nplots:
for ax in axarr[naxes:]:
ax.set_visible(False)
_handle_shared_axes(axarr, nplots, naxes, nrows, ncols, sharex, sharey)
if squeeze:
# Reshape the array to have the final desired dimension (nrow,ncol),
# though discarding unneeded dimensions that equal 1. If we only have
# one subplot, just return it instead of a 1-element array.
if nplots == 1:
axes = axarr[0]
else:
axes = axarr.reshape(nrows, ncols).squeeze()
else:
# returned axis array will be always 2-d, even if nrows=ncols=1
axes = axarr.reshape(nrows, ncols)
return fig, axes
def _remove_xlabels_from_axis(ax):
for label in ax.get_xticklabels():
label.set_visible(False)
try:
# set_visible will not be effective if
# minor axis has NullLocator and NullFormattor (default)
import matplotlib.ticker as ticker
if isinstance(ax.xaxis.get_minor_locator(), ticker.NullLocator):
ax.xaxis.set_minor_locator(ticker.AutoLocator())
if isinstance(ax.xaxis.get_minor_formatter(), ticker.NullFormatter):
ax.xaxis.set_minor_formatter(ticker.FormatStrFormatter(''))
for label in ax.get_xticklabels(minor=True):
label.set_visible(False)
except Exception: # pragma no cover
pass
ax.xaxis.get_label().set_visible(False)
def _remove_ylables_from_axis(ax):
for label in ax.get_yticklabels():
label.set_visible(False)
try:
import matplotlib.ticker as ticker
if isinstance(ax.yaxis.get_minor_locator(), ticker.NullLocator):
ax.yaxis.set_minor_locator(ticker.AutoLocator())
if isinstance(ax.yaxis.get_minor_formatter(), ticker.NullFormatter):
ax.yaxis.set_minor_formatter(ticker.FormatStrFormatter(''))
for label in ax.get_yticklabels(minor=True):
label.set_visible(False)
except Exception: # pragma no cover
pass
ax.yaxis.get_label().set_visible(False)
def _handle_shared_axes(axarr, nplots, naxes, nrows, ncols, sharex, sharey):
if nplots > 1:
# first find out the ax layout, so that we can correctly handle 'gaps"
layout = np.zeros((nrows+1,ncols+1), dtype=np.bool)
for ax in axarr:
layout[ax.rowNum, ax.colNum] = ax.get_visible()
if sharex and nrows > 1:
for ax in axarr:
# only the last row of subplots should get x labels -> all other off
# layout handles the case that the subplot is the last in the column,
# because below is no subplot/gap.
if not layout[ax.rowNum+1, ax.colNum]:
continue
_remove_xlabels_from_axis(ax)
if sharey and ncols > 1:
for ax in axarr:
# only the first column should get y labels -> set all other to off
# as we only have labels in teh first column and we always have a subplot there,
# we can skip the layout test
if ax.is_first_col():
continue
_remove_ylables_from_axis(ax)
def _flatten(axes):
if not com.is_list_like(axes):
return np.array([axes])
elif isinstance(axes, (np.ndarray, Index)):
return axes.ravel()
return np.array(axes)
def _get_all_lines(ax):
lines = ax.get_lines()
if hasattr(ax, 'right_ax'):
lines += ax.right_ax.get_lines()
if hasattr(ax, 'left_ax'):
lines += ax.left_ax.get_lines()
return lines
def _get_xlim(lines):
left, right = np.inf, -np.inf
for l in lines:
x = l.get_xdata(orig=False)
left = min(x[0], left)
right = max(x[-1], right)
return left, right
def _set_ticks_props(axes, xlabelsize=None, xrot=None,
ylabelsize=None, yrot=None):
import matplotlib.pyplot as plt
for ax in _flatten(axes):
if xlabelsize is not None:
plt.setp(ax.get_xticklabels(), fontsize=xlabelsize)
if xrot is not None:
plt.setp(ax.get_xticklabels(), rotation=xrot)
if ylabelsize is not None:
plt.setp(ax.get_yticklabels(), fontsize=ylabelsize)
if yrot is not None:
plt.setp(ax.get_yticklabels(), rotation=yrot)
return axes
if __name__ == '__main__':
# import pandas.rpy.common as com
# sales = com.load_data('sanfrancisco.home.sales', package='nutshell')
# top10 = sales['zip'].value_counts()[:10].index
# sales2 = sales[sales.zip.isin(top10)]
# _ = scatter_plot(sales2, 'squarefeet', 'price', by='zip')
# plt.show()
import matplotlib.pyplot as plt
import pandas.tools.plotting as plots
import pandas.core.frame as fr
reload(plots)
reload(fr)
from pandas.core.frame import DataFrame
data = DataFrame([[3, 6, -5], [4, 8, 2], [4, 9, -6],
[4, 9, -3], [2, 5, -1]],
columns=['A', 'B', 'C'])
data.plot(kind='barh', stacked=True)
plt.show()
|
bdh1011/wau
|
venv/lib/python2.7/site-packages/pandas/tools/plotting.py
|
Python
|
mit
| 117,602
|
from __future__ import absolute_import
import logging
from uuid import uuid1
import dockets.queue
import dockets.error_queue
from .base import Queue
class DocketsQueue(Queue):
def __init__(self, redis_client, queue_name, wait_time, timeout):
self.queue = dockets.queue.Queue(redis_client,
queue_name,
use_error_queue=True,
wait_time=wait_time,
timeout=timeout)
def make_error_queue(self):
return DocketsErrorQueue(self.queue)
def _push(self, item):
push_kwargs = {}
if 'delay' in item:
push_kwargs['delay'] = item['delay'] or None
return self.queue.push(item, **push_kwargs)
def _push_batch(self, items):
result = []
for item in items:
try:
self._push(item)
result.append((item, True))
except Exception:
logging.exception("Error pushing item {}".format(item))
result.append((item, False))
return result
def _pop(self):
envelope = self.queue.pop()
if envelope:
return envelope, envelope.get('item')
return None, None
def _pop_batch(self, batch_size):
batch = []
for _ in range(batch_size):
envelope, item = self._pop()
if envelope:
batch.append((envelope, item))
else:
break
return batch
def _touch(self, envelope, seconds):
"""Dockets heartbeat is consumer-level and does not
utilize the envelope or seconds arguments."""
return self.queue._heartbeat()
def _complete(self, envelope):
return self.queue.complete(envelope)
def _complete_batch(self, envelopes):
# Dockets doesn't return any information from complete, so here we go...
for envelope in envelopes:
self._complete(envelope)
return [(envelope, True) for envelope in envelopes]
def _flush(self):
while True:
envelope, item = self._pop()
if envelope is None:
break
self._complete(envelope)
def _stats(self):
return {'available': self.queue.queued(),
'in_flight': self.queue.working(),
'delayed': self.queue.delayed()}
class DocketsErrorQueue(Queue):
FIFO = False
SUPPORTS_DELAY = False
RECLAIMS_TO_BACK_OF_QUEUE = False
def __init__(self, parent_dockets_queue):
self.queue = dockets.error_queue.ErrorQueue(parent_dockets_queue)
def _push(self, item):
"""This error ID dance is Dockets-specific, since we need the ID
to interface with the hash error queue. Other backends shouldn't
need to do this and should use the envelope properly instead."""
try:
error_id = item['error']['id']
except KeyError:
logging.warn('No error ID found for item, will generate and add one: {}'.format(item))
error_id = str(uuid1())
item.setdefault('error', {})['id'] = error_id
return self.queue.queue_error_item(error_id, item)
def _push_batch(self, items):
result = []
for item in items:
try:
self._push(item)
result.append((item, True))
except Exception:
logging.exception("Error pushing item {}".format(item))
result.append((item, False))
return result
def _pop(self):
"""Dockets Error Queues are not actually queues, they're hashes. There's no way
for us to implement a pure pop that doesn't expose us to the risk of dropping
data. As such, we're going to return the first error in that hash but not actually
remove it until we call `_complete` later on. This keeps our data safe but may
deliver errors multiple times. That should be okay."""
error_ids = self.queue.error_ids()
if error_ids:
error_id = error_ids[0]
error = self.queue.error(error_id)
return error, error
return None, None
def _pop_batch(self, batch_size):
"""Similar to _pop, but returns a list of tuples containing batch_size pops
from our queue.
Again, this does not actually pop from the queue until we call _complete on
each queued item"""
error_ids = self.queue.error_ids()
batch = []
if error_ids:
for error_id in error_ids[:batch_size]:
error = self.queue.error(error_id)
batch.append((error, error))
return batch
def _touch(self, envelope, seconds):
return None
def _complete(self, envelope):
error_id = envelope['error']['id']
if not error_id:
raise AttributeError('Error item has no id field: {}'.format(envelope))
return self.queue.delete_error(error_id)
def _complete_batch(self, envelopes):
return [(envelope, bool(self._complete(envelope))) for envelope in envelopes]
def _flush(self):
for error_id in self.queue.error_ids():
self.queue.delete_error(error_id)
def _stats(self):
return {'available': self.queue.length()}
|
gamechanger/deferrable
|
deferrable/queue/dockets.py
|
Python
|
mit
| 5,390
|
import six
import warnings
from .. import errors
from ..utils.utils import (
convert_port_bindings, convert_tmpfs_mounts, convert_volume_binds,
format_environment, normalize_links, parse_bytes, parse_devices,
split_command, version_gte, version_lt,
)
from .base import DictType
from .healthcheck import Healthcheck
class LogConfigTypesEnum(object):
_values = (
'json-file',
'syslog',
'journald',
'gelf',
'fluentd',
'none'
)
JSON, SYSLOG, JOURNALD, GELF, FLUENTD, NONE = _values
class LogConfig(DictType):
types = LogConfigTypesEnum
def __init__(self, **kwargs):
log_driver_type = kwargs.get('type', kwargs.get('Type'))
config = kwargs.get('config', kwargs.get('Config')) or {}
if config and not isinstance(config, dict):
raise ValueError("LogConfig.config must be a dictionary")
super(LogConfig, self).__init__({
'Type': log_driver_type,
'Config': config
})
@property
def type(self):
return self['Type']
@type.setter
def type(self, value):
self['Type'] = value
@property
def config(self):
return self['Config']
def set_config_value(self, key, value):
self.config[key] = value
def unset_config(self, key):
if key in self.config:
del self.config[key]
class Ulimit(DictType):
def __init__(self, **kwargs):
name = kwargs.get('name', kwargs.get('Name'))
soft = kwargs.get('soft', kwargs.get('Soft'))
hard = kwargs.get('hard', kwargs.get('Hard'))
if not isinstance(name, six.string_types):
raise ValueError("Ulimit.name must be a string")
if soft and not isinstance(soft, int):
raise ValueError("Ulimit.soft must be an integer")
if hard and not isinstance(hard, int):
raise ValueError("Ulimit.hard must be an integer")
super(Ulimit, self).__init__({
'Name': name,
'Soft': soft,
'Hard': hard
})
@property
def name(self):
return self['Name']
@name.setter
def name(self, value):
self['Name'] = value
@property
def soft(self):
return self.get('Soft')
@soft.setter
def soft(self, value):
self['Soft'] = value
@property
def hard(self):
return self.get('Hard')
@hard.setter
def hard(self, value):
self['Hard'] = value
class HostConfig(dict):
def __init__(self, version, binds=None, port_bindings=None,
lxc_conf=None, publish_all_ports=False, links=None,
privileged=False, dns=None, dns_search=None,
volumes_from=None, network_mode=None, restart_policy=None,
cap_add=None, cap_drop=None, devices=None, extra_hosts=None,
read_only=None, pid_mode=None, ipc_mode=None,
security_opt=None, ulimits=None, log_config=None,
mem_limit=None, memswap_limit=None, mem_reservation=None,
kernel_memory=None, mem_swappiness=None, cgroup_parent=None,
group_add=None, cpu_quota=None, cpu_period=None,
blkio_weight=None, blkio_weight_device=None,
device_read_bps=None, device_write_bps=None,
device_read_iops=None, device_write_iops=None,
oom_kill_disable=False, shm_size=None, sysctls=None,
tmpfs=None, oom_score_adj=None, dns_opt=None, cpu_shares=None,
cpuset_cpus=None, userns_mode=None, pids_limit=None,
isolation=None):
if mem_limit is not None:
self['Memory'] = parse_bytes(mem_limit)
if memswap_limit is not None:
self['MemorySwap'] = parse_bytes(memswap_limit)
if mem_reservation:
if version_lt(version, '1.21'):
raise host_config_version_error('mem_reservation', '1.21')
self['MemoryReservation'] = parse_bytes(mem_reservation)
if kernel_memory:
if version_lt(version, '1.21'):
raise host_config_version_error('kernel_memory', '1.21')
self['KernelMemory'] = parse_bytes(kernel_memory)
if mem_swappiness is not None:
if version_lt(version, '1.20'):
raise host_config_version_error('mem_swappiness', '1.20')
if not isinstance(mem_swappiness, int):
raise host_config_type_error(
'mem_swappiness', mem_swappiness, 'int'
)
self['MemorySwappiness'] = mem_swappiness
if shm_size is not None:
if isinstance(shm_size, six.string_types):
shm_size = parse_bytes(shm_size)
self['ShmSize'] = shm_size
if pid_mode:
if version_lt(version, '1.24') and pid_mode != 'host':
raise host_config_value_error('pid_mode', pid_mode)
self['PidMode'] = pid_mode
if ipc_mode:
self['IpcMode'] = ipc_mode
if privileged:
self['Privileged'] = privileged
if oom_kill_disable:
if version_lt(version, '1.20'):
raise host_config_version_error('oom_kill_disable', '1.19')
self['OomKillDisable'] = oom_kill_disable
if oom_score_adj:
if version_lt(version, '1.22'):
raise host_config_version_error('oom_score_adj', '1.22')
if not isinstance(oom_score_adj, int):
raise host_config_type_error(
'oom_score_adj', oom_score_adj, 'int'
)
self['OomScoreAdj'] = oom_score_adj
if publish_all_ports:
self['PublishAllPorts'] = publish_all_ports
if read_only is not None:
self['ReadonlyRootfs'] = read_only
if dns_search:
self['DnsSearch'] = dns_search
if network_mode:
self['NetworkMode'] = network_mode
elif network_mode is None and version_gte(version, '1.20'):
self['NetworkMode'] = 'default'
if restart_policy:
if not isinstance(restart_policy, dict):
raise host_config_type_error(
'restart_policy', restart_policy, 'dict'
)
self['RestartPolicy'] = restart_policy
if cap_add:
self['CapAdd'] = cap_add
if cap_drop:
self['CapDrop'] = cap_drop
if devices:
self['Devices'] = parse_devices(devices)
if group_add:
if version_lt(version, '1.20'):
raise host_config_version_error('group_add', '1.20')
self['GroupAdd'] = [six.text_type(grp) for grp in group_add]
if dns is not None:
self['Dns'] = dns
if dns_opt is not None:
if version_lt(version, '1.21'):
raise host_config_version_error('dns_opt', '1.21')
self['DnsOptions'] = dns_opt
if security_opt is not None:
if not isinstance(security_opt, list):
raise host_config_type_error(
'security_opt', security_opt, 'list'
)
self['SecurityOpt'] = security_opt
if sysctls:
if not isinstance(sysctls, dict):
raise host_config_type_error('sysctls', sysctls, 'dict')
self['Sysctls'] = {}
for k, v in six.iteritems(sysctls):
self['Sysctls'][k] = six.text_type(v)
if volumes_from is not None:
if isinstance(volumes_from, six.string_types):
volumes_from = volumes_from.split(',')
self['VolumesFrom'] = volumes_from
if binds is not None:
self['Binds'] = convert_volume_binds(binds)
if port_bindings is not None:
self['PortBindings'] = convert_port_bindings(port_bindings)
if extra_hosts is not None:
if isinstance(extra_hosts, dict):
extra_hosts = [
'{0}:{1}'.format(k, v)
for k, v in sorted(six.iteritems(extra_hosts))
]
self['ExtraHosts'] = extra_hosts
if links is not None:
self['Links'] = normalize_links(links)
if isinstance(lxc_conf, dict):
formatted = []
for k, v in six.iteritems(lxc_conf):
formatted.append({'Key': k, 'Value': str(v)})
lxc_conf = formatted
if lxc_conf is not None:
self['LxcConf'] = lxc_conf
if cgroup_parent is not None:
self['CgroupParent'] = cgroup_parent
if ulimits is not None:
if not isinstance(ulimits, list):
raise host_config_type_error('ulimits', ulimits, 'list')
self['Ulimits'] = []
for l in ulimits:
if not isinstance(l, Ulimit):
l = Ulimit(**l)
self['Ulimits'].append(l)
if log_config is not None:
if not isinstance(log_config, LogConfig):
if not isinstance(log_config, dict):
raise host_config_type_error(
'log_config', log_config, 'LogConfig'
)
log_config = LogConfig(**log_config)
self['LogConfig'] = log_config
if cpu_quota:
if not isinstance(cpu_quota, int):
raise host_config_type_error('cpu_quota', cpu_quota, 'int')
if version_lt(version, '1.19'):
raise host_config_version_error('cpu_quota', '1.19')
self['CpuQuota'] = cpu_quota
if cpu_period:
if not isinstance(cpu_period, int):
raise host_config_type_error('cpu_period', cpu_period, 'int')
if version_lt(version, '1.19'):
raise host_config_version_error('cpu_period', '1.19')
self['CpuPeriod'] = cpu_period
if cpu_shares:
if version_lt(version, '1.18'):
raise host_config_version_error('cpu_shares', '1.18')
if not isinstance(cpu_shares, int):
raise host_config_type_error('cpu_shares', cpu_shares, 'int')
self['CpuShares'] = cpu_shares
if cpuset_cpus:
if version_lt(version, '1.18'):
raise host_config_version_error('cpuset_cpus', '1.18')
self['CpuSetCpus'] = cpuset_cpus
if blkio_weight:
if not isinstance(blkio_weight, int):
raise host_config_type_error(
'blkio_weight', blkio_weight, 'int'
)
if version_lt(version, '1.22'):
raise host_config_version_error('blkio_weight', '1.22')
self["BlkioWeight"] = blkio_weight
if blkio_weight_device:
if not isinstance(blkio_weight_device, list):
raise host_config_type_error(
'blkio_weight_device', blkio_weight_device, 'list'
)
if version_lt(version, '1.22'):
raise host_config_version_error('blkio_weight_device', '1.22')
self["BlkioWeightDevice"] = blkio_weight_device
if device_read_bps:
if not isinstance(device_read_bps, list):
raise host_config_type_error(
'device_read_bps', device_read_bps, 'list'
)
if version_lt(version, '1.22'):
raise host_config_version_error('device_read_bps', '1.22')
self["BlkioDeviceReadBps"] = device_read_bps
if device_write_bps:
if not isinstance(device_write_bps, list):
raise host_config_type_error(
'device_write_bps', device_write_bps, 'list'
)
if version_lt(version, '1.22'):
raise host_config_version_error('device_write_bps', '1.22')
self["BlkioDeviceWriteBps"] = device_write_bps
if device_read_iops:
if not isinstance(device_read_iops, list):
raise host_config_type_error(
'device_read_iops', device_read_iops, 'list'
)
if version_lt(version, '1.22'):
raise host_config_version_error('device_read_iops', '1.22')
self["BlkioDeviceReadIOps"] = device_read_iops
if device_write_iops:
if not isinstance(device_write_iops, list):
raise host_config_type_error(
'device_write_iops', device_write_iops, 'list'
)
if version_lt(version, '1.22'):
raise host_config_version_error('device_write_iops', '1.22')
self["BlkioDeviceWriteIOps"] = device_write_iops
if tmpfs:
if version_lt(version, '1.22'):
raise host_config_version_error('tmpfs', '1.22')
self["Tmpfs"] = convert_tmpfs_mounts(tmpfs)
if userns_mode:
if version_lt(version, '1.23'):
raise host_config_version_error('userns_mode', '1.23')
if userns_mode != "host":
raise host_config_value_error("userns_mode", userns_mode)
self['UsernsMode'] = userns_mode
if pids_limit:
if not isinstance(pids_limit, int):
raise host_config_type_error('pids_limit', pids_limit, 'int')
if version_lt(version, '1.23'):
raise host_config_version_error('pids_limit', '1.23')
self["PidsLimit"] = pids_limit
if isolation:
if not isinstance(isolation, six.string_types):
raise host_config_type_error('isolation', isolation, 'string')
if version_lt(version, '1.24'):
raise host_config_version_error('isolation', '1.24')
self['Isolation'] = isolation
def host_config_type_error(param, param_value, expected):
error_msg = 'Invalid type for {0} param: expected {1} but found {2}'
return TypeError(error_msg.format(param, expected, type(param_value)))
def host_config_version_error(param, version, less_than=True):
operator = '<' if less_than else '>'
error_msg = '{0} param is not supported in API versions {1} {2}'
return errors.InvalidVersion(error_msg.format(param, operator, version))
def host_config_value_error(param, param_value):
error_msg = 'Invalid value for {0} param: {1}'
return ValueError(error_msg.format(param, param_value))
class ContainerConfig(dict):
def __init__(
self, version, image, command, hostname=None, user=None, detach=False,
stdin_open=False, tty=False, mem_limit=None, ports=None, dns=None,
environment=None, volumes=None, volumes_from=None,
network_disabled=False, entrypoint=None, cpu_shares=None,
working_dir=None, domainname=None, memswap_limit=None, cpuset=None,
host_config=None, mac_address=None, labels=None, volume_driver=None,
stop_signal=None, networking_config=None, healthcheck=None,
):
if isinstance(command, six.string_types):
command = split_command(command)
if isinstance(entrypoint, six.string_types):
entrypoint = split_command(entrypoint)
if isinstance(environment, dict):
environment = format_environment(environment)
if labels is not None and version_lt(version, '1.18'):
raise errors.InvalidVersion(
'labels were only introduced in API version 1.18'
)
if cpuset is not None or cpu_shares is not None:
if version_gte(version, '1.18'):
warnings.warn(
'The cpuset_cpus and cpu_shares options have been moved to'
' host_config in API version 1.18, and will be removed',
DeprecationWarning
)
if stop_signal is not None and version_lt(version, '1.21'):
raise errors.InvalidVersion(
'stop_signal was only introduced in API version 1.21'
)
if healthcheck is not None and version_lt(version, '1.24'):
raise errors.InvalidVersion(
'Health options were only introduced in API version 1.24'
)
if version_lt(version, '1.19'):
if volume_driver is not None:
raise errors.InvalidVersion(
'Volume drivers were only introduced in API version 1.19'
)
mem_limit = mem_limit if mem_limit is not None else 0
memswap_limit = memswap_limit if memswap_limit is not None else 0
else:
if mem_limit is not None:
raise errors.InvalidVersion(
'mem_limit has been moved to host_config in API version'
' 1.19'
)
if memswap_limit is not None:
raise errors.InvalidVersion(
'memswap_limit has been moved to host_config in API '
'version 1.19'
)
if isinstance(labels, list):
labels = dict((lbl, six.text_type('')) for lbl in labels)
if mem_limit is not None:
mem_limit = parse_bytes(mem_limit)
if memswap_limit is not None:
memswap_limit = parse_bytes(memswap_limit)
if isinstance(ports, list):
exposed_ports = {}
for port_definition in ports:
port = port_definition
proto = 'tcp'
if isinstance(port_definition, tuple):
if len(port_definition) == 2:
proto = port_definition[1]
port = port_definition[0]
exposed_ports['{0}/{1}'.format(port, proto)] = {}
ports = exposed_ports
if isinstance(volumes, six.string_types):
volumes = [volumes, ]
if isinstance(volumes, list):
volumes_dict = {}
for vol in volumes:
volumes_dict[vol] = {}
volumes = volumes_dict
if volumes_from:
if not isinstance(volumes_from, six.string_types):
volumes_from = ','.join(volumes_from)
else:
# Force None, an empty list or dict causes client.start to fail
volumes_from = None
if healthcheck and isinstance(healthcheck, dict):
healthcheck = Healthcheck(**healthcheck)
attach_stdin = False
attach_stdout = False
attach_stderr = False
stdin_once = False
if not detach:
attach_stdout = True
attach_stderr = True
if stdin_open:
attach_stdin = True
stdin_once = True
if version_gte(version, '1.10'):
message = ('{0!r} parameter has no effect on create_container().'
' It has been moved to host_config')
if dns is not None:
raise errors.InvalidVersion(message.format('dns'))
if volumes_from is not None:
raise errors.InvalidVersion(message.format('volumes_from'))
self.update({
'Hostname': hostname,
'Domainname': domainname,
'ExposedPorts': ports,
'User': six.text_type(user) if user else None,
'Tty': tty,
'OpenStdin': stdin_open,
'StdinOnce': stdin_once,
'Memory': mem_limit,
'AttachStdin': attach_stdin,
'AttachStdout': attach_stdout,
'AttachStderr': attach_stderr,
'Env': environment,
'Cmd': command,
'Dns': dns,
'Image': image,
'Volumes': volumes,
'VolumesFrom': volumes_from,
'NetworkDisabled': network_disabled,
'Entrypoint': entrypoint,
'CpuShares': cpu_shares,
'Cpuset': cpuset,
'CpusetCpus': cpuset,
'WorkingDir': working_dir,
'MemorySwap': memswap_limit,
'HostConfig': host_config,
'NetworkingConfig': networking_config,
'MacAddress': mac_address,
'Labels': labels,
'VolumeDriver': volume_driver,
'StopSignal': stop_signal,
'Healthcheck': healthcheck,
})
|
jarv/cmdchallenge-site
|
lambda_src/runcmd/docker/types/containers.py
|
Python
|
mit
| 20,507
|
from ast import literal_eval
from json import dumps, loads
from urllib2 import Request, urlopen
from requests import post, get
from p3lzstring import LZString
def user_input(data):
i = 0
while i < len(data):
if 'xp' in data[i]['dragondata']['sidekick_name'][9:][5:]:
data[i]['dragondata']['value'] = data[i]['dragondata']['maximum']
print (data[i]['dragondata']['value'])
else:
if 'maturity' in data[i]['dragondata']['sidekick_name'][9:][5:]:
data[i]['dragondata']['value'] = data[i]['dragondata']['maximum']
print (data[i]['dragondata']['value'])
i = i + 1
return data
def lz_string_decode(lzstring):
lz_object = LZString.decompressFromBase64(lzstring)
return lz_object
def dict_loop(p, check_list, scheme_pid):
i = 0
while i < len(state_dict['instances']):
for key in state_dict['instances'].iterkeys():
if p in key:
return state_dict['instances'][key]
i = i + 1
return 'Found Nothing'
def build_xpmat_list(state_dict):
i = 0
while i < len(state_dict['instances']):
list = []
for key in state_dict['instances'].iterkeys():
pg = float((float(i) / float(len(state_dict['instances'])) * float(100)))
# update_progress(pg)
schemePID = state_dict['instances'][str(key)]['schemaPrimativeID']
dict_index = state_dict['instances'][str(key)]
if 'stats' in dict_index.keys() and 'sidekick' in schemePID:
check_list = []
stat_keys = dict_index['stats']
for stats in stat_keys:
data = dict_loop(stats, check_list, schemePID)
check_list.append(schemePID)
if 'maturity' in data['schemaPrimativeID']:
list.append({'Maturity': data})
if 'xp' in data['schemaPrimativeID']:
list.append({'XP': data})
i = i + 1
print "%s / %s" % (i, len(state_dict['instances']))
return list
def conv2Json(jsonString, *args, **kwargs):
jsonObject = literal_eval(jsonString)
return jsonObject
def conv2JsonStr(jsonObject):
jsonString = dumps(dumps(jsonObject))
return jsonString
def ever_wing_token():
req = Request("https://wintermute-151001.appspot.com/game/session/everwing/" + uID)
response = urlopen(req)
data = response.read()
Token = conv2Json(data)
return Token
def ever_wing_defaultaction():
return
def lz_string_encode(object):
lzObject = LZString.compressToBase64(object)
print (lzObject)
return lzObject
def default_state():
url = 'https://wintermute-151001.appspot.com'
gamestate_url = '/game/state/everwing/default/' + uID
state = get(url + gamestate_url)
return state.content
def post_to_winter(user_data, Token):
user_data = unicode(user_data)
headers = {"Host": "wintermute-151001.appspot.com",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64; rv:50.0) Gecko/20100101 Firefox/50.0",
"Accept": "application/json, text/plain, */*",
"Accept-Language": "en-US,en;q=0.5",
"Accept-Encoding": "gzip, deflate, br",
"Content-Type": "application/json;charset=utf-8",
"x-wintermute-session": str(Token['token']),
"Connection": "keep-alive"}
print (user_data)
print (headers)
post_data = post('https://wintermute-151001.appspot.com/game/action', data=user_data, headers=headers)
return
def rebuild_loop(p, list, x, maturity, XP):
i = 0
if maturity == 'Maturity':
while i < len(state_dict):
for key in state_dict['instances'].iterkeys():
if p in key:
state_dict['instances'][key] = list[x][maturity]
i = i + 1
if XP == 'XP':
while i < len(state_dict):
for key in state_dict['instances'].iterkeys():
if p in key:
state_dict['instances'][key] = list[x][XP]
i = i + 1
return 'THIS IS IT'
def build_state_dict(list):
i = 0
while i < len(list):
try:
if list[i]["Maturity"]:
key_index = list[i]['Maturity']['key']
rebuild_loop(key_index, list, i, maturity='Maturity', XP=2)
pass
except KeyError:
if list[i]['XP']:
key_index = list[i]['XP']['key']
rebuild_loop(key_index, list, i, XP='XP', maturity=3)
i = i + 1
print '%s / %s' % (i, len(list))
return
def fuck_dat_currency():
for instance in state_dict['instances']:
try:
if state_dict['instances'][instance]['schemaPrimativeID'] == "currency:trophy":
state_dict['instances'][instance]['balance'] = 999999
if state_dict['instances'][instance]['schemaPrimativeID'] == "currency:coin":
state_dict['instances'][instance]['balance'] = 999999
except Exception as e:
print "%s" % e
return
def rebuild_state(list, state_dict):
i = 0
while i < len(list):
if list[i]['Maturity']['value']:
list[i]['Maturity']['value'] = 3
if list[i]['Maturity']['value'] == 3:
list[i + 1]['XP']['value'] = 125800
list[i + 1]['XP']['maximum'] = 125800
if list[i]['Maturity']['value'] == 2:
list[i + 1]['XP']['value'] = 62800
list[i + 1]['XP']['maximum'] = 62800
if list[i]['Maturity']['value'] == 1:
list[i + 1]['XP']['value'] = 62800
list[i + 1]['XP']['maximum'] = 62800
i = i + 2
return list
def get_dat_toonies():
characterStrings = ['character:lenore_item_character', 'character:coin_item_character',
'character:sophia_item_character', 'character:jade_item_character',
'character:arcana_item_character', 'character:fiona_item_character',
'character:standard_item_character', 'character:magnet_item_character']
for instance in state_dict['instances']:
try:
if state_dict['instances'][instance]['schemaPrimativeID'] in characterStrings:
characterStat = state_dict['instances'][instance]['stats'][0]
state_dict['instances'][characterStat]['value'] = state_dict['instances'][characterStat]['maximum']
if state_dict['instances'][instance]['state'] == 'locked':
state_dict['instances'][instance]['state'] = 'idle'
except Exception:
print (Exception.message)
return
if __name__ == '__main__':
uID = raw_input('Please Input User ID: ')
user_data = loads(default_state())
state = user_data['state'][11:]
print (state)
state = lz_string_decode(str(state))
state_json_str = conv2Json(state)
state_dict = loads(state_json_str)
input = raw_input('Do you wanna upgrade all current Dragons? (Y/n)')
if input == 'Y':
build_state_dict(rebuild_state(build_xpmat_list(state_dict), state_dict))
else:
print('-------------------------------')
print("You must enter a 'Y' or 'n'!!")
print('-------------------------------')
input = raw_input('Do you wanna fuck da currency? (Y/n)')
if input == 'Y':
fuck_dat_currency()
else:
print('-------------------------------')
print("You must enter a 'Y' or 'n'!!")
print('-------------------------------')
input = raw_input('Do you want all Characters / level 50? (Y/N)')
if input == 'Y':
get_dat_toonies()
else:
print('-------------------------------')
print("You must enter a 'Y' or 'n'!!")
print('-------------------------------')
a = open('statefile.txt', 'w')
a.write(dumps(state_dict, sort_keys=True, indent=4))
a.close()
state_dict = conv2JsonStr(state_dict)
encoded_state = lz_string_encode(state_dict)
encoded_state = 'lz-string::' + encoded_state
user_data['state'] = encoded_state
user_data['timestamp'] = round(float(get('https://wintermute-151001.appspot.com/game/time').content),
ndigits=0)
user_data['server_timestamp'] = round(
float(get('https://wintermute-151001.appspot.com/game/time').content), ndigits=0)
user_data = dumps(user_data)
post_to_winter(user_data, ever_wing_token())
print(user_data)
|
IPFR33LY/EverwingHax
|
Everwing_data.py
|
Python
|
mit
| 8,827
|
# -*- coding: utf-8 -*-
__author__ = 'Steven Brien'
__email__ = 'sbrien@hlkagency.com'
__version__ = '0.1.0'
|
spbrien/norman
|
norman/__init__.py
|
Python
|
mit
| 110
|
from __future__ import division
import myhdl
from myhdl import instance, delay
ClockList = []
class Clock(myhdl.SignalType):
def __init__(self, val, frequency=1, timescale='1ns'):
self._frequency = frequency
self._period = 1/frequency
self._timescale = timescale
self._hticks = 0
self._set_hticks()
myhdl.SignalType.__init__(self, bool(val))
ClockList.append(self)
@property
def timescale(self):
return self._timescale
@timescale.setter
def timescale(self, t):
self._timescale = t
@property
def frequency(self):
return self._frequency
@frequency.setter
def frequency(self, f):
self._frequency = f
self._period = 1/f
self._set_hticks()
@property
def period(self):
return self._period
def _set_hticks(self):
# self._nts = self._convert_timescale(self._timescale)
# self._hticks = int(round(self._period/self._nts))
self._hticks = 5
def _convert_timescale(self, ts):
# @todo: need to complete this, ts is in the form
# "[0-9]*["ms","us","ns","ps"], parse the text
# format and retrieve a numerical value
# separate the numerical and text
nts = 1e9
return nts
def gen(self, hticks=None):
if hticks is None:
hticks = self._hticks
else:
self._hticks = hticks
# print('hticks %d'%(hticks))
@instance
def gclock():
self.next = False
while True:
yield delay(hticks)
self.next = not self.val
return gclock
class Reset(myhdl.ResetSignal):
def __init__(self, val, active, async):
myhdl.ResetSignal.__init__(self, val, active, async)
def pulse(self, delays=10):
if isinstance(delays, int):
self.next = self.active
yield delay(delays)
self.next = not self.active
elif isinstance(delays, tuple):
assert len(delays) in (1, 2, 3), "Incorrect number of delays"
self.next = not self.active if len(delays) == 3 else self.active
for dd in delays:
yield delay(dd)
self.next = not self.val
self.next = not self.active
else:
raise ValueError("{} type not supported".format(type(delays)))
|
cfelton/myhdl_exercises
|
support/mysigs.py
|
Python
|
mit
| 2,461
|
import io
from pathlib import Path
from typing import Mapping
import fontTools.designspaceLib
import fontTools.ttLib
import fontTools.ttLib.tables._n_a_m_e
import fontTools.varLib
import ufo2ft
import ufoLib2
import statmake.classes
import statmake.lib
def dump_axes(font, axes_array):
dump_list = []
for axis in axes_array:
entry = {
"Name": statmake.lib._default_name_string(font, axis.AxisNameID),
"AxisTag": axis.AxisTag,
"AxisOrdering": axis.AxisOrdering,
}
dump_list.append(entry)
return dump_list
def dump_axis_values(font, axis_value_array):
dump_list = []
for axis in axis_value_array:
entry = {
"Format": axis.Format,
"Name": dump_name_ids(font, axis.ValueNameID),
"Flags": axis.Flags,
}
if axis.Format == 1:
entry["AxisIndex"] = axis.AxisIndex
entry["Value"] = axis.Value
elif axis.Format == 2:
entry["AxisIndex"] = axis.AxisIndex
entry["NominalValue"] = axis.NominalValue
entry["RangeMinValue"] = axis.RangeMinValue
entry["RangeMaxValue"] = axis.RangeMaxValue
elif axis.Format == 3:
entry["AxisIndex"] = axis.AxisIndex
entry["Value"] = axis.Value
entry["LinkedValue"] = axis.LinkedValue
elif axis.Format == 4:
entry["AxisValueRecord"] = [
(r.AxisIndex, r.Value) for r in axis.AxisValueRecord
]
else:
raise ValueError("Unknown format")
dump_list.append(entry)
return dump_list
def dump_name_ids(otfont: fontTools.ttLib.TTFont, name_id: int) -> Mapping[str, str]:
"""Return a mapping of language codes to name strings."""
name_mapping = fontTools.ttLib.tables._n_a_m_e._WINDOWS_LANGUAGES
name_table = otfont["name"].names
matches = {
name_mapping[n.langID]: n.toUnicode() for n in name_table if n.nameID == name_id
}
return matches
def empty_UFO(style_name: str):
ufo = ufoLib2.Font()
ufo.info.familyName = "Test"
ufo.info.styleName = style_name
ufo.info.unitsPerEm = 1000
ufo.info.ascender = 800
ufo.info.descender = -200
ufo.info.xHeight = 500
ufo.info.capHeight = 700
ufo.info.postscriptUnderlineThickness = 50
ufo.info.postscriptUnderlinePosition = -75
g = ufo.newGlyph("a")
g.width = 500
return ufo
def reload_font(font):
buf = io.BytesIO()
font.save(buf)
buf.seek(0)
return fontTools.ttLib.TTFont(buf)
def generate_variable_font(
designspace_path: Path, stylespace_path: Path, additional_locations=None
) -> fontTools.ttLib.TTFont:
designspace = fontTools.designspaceLib.DesignSpaceDocument.fromfile(
designspace_path
)
for source in designspace.sources:
source.font = empty_UFO(source.styleName)
ufo2ft.compileInterpolatableTTFsFromDS(designspace, inplace=True)
varfont, _, _ = fontTools.varLib.build(designspace)
stylespace = statmake.classes.Stylespace.from_file(stylespace_path)
if additional_locations is None:
additional_locations = designspace.lib.get(
"org.statmake.additionalLocations", {}
)
statmake.lib.apply_stylespace_to_variable_font(
stylespace, varfont, additional_locations
)
return reload_font(varfont)
|
googlefonts/statmake
|
tests/testutil.py
|
Python
|
mit
| 3,392
|
#!/usr/bin/env python
# Usage:
# $ KEY=<YOUR APIKEY> DEVICE=<YOUR DEVICE ID> python example.py
import os
import sys
import pprint
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from m2x.client import M2XClient
KEY = os.environ['KEY']
DEVICE_ID = os.environ['DEVICE']
client = M2XClient(key=KEY)
device = client.device(DEVICE_ID)
pprint.pprint(device.data)
|
attm2x/m2x-python
|
examples/example.py
|
Python
|
mit
| 382
|
"""
Setup script for the Gimbal package
"""
from setuptools import setup
from setuptools import find_packages
def readme():
"""Returns the contents of the README without the header image."""
header = '======\nGimbal\n======\n'
with open('README.rst', 'r') as f:
f.readline()
return header + f.read()
def requirements():
"""Returns the requirement list."""
with open('requirements.txt', 'r') as f:
return [line.strip() for line in f.readlines()]
# read the current version number
exec(open('gimbal/_version.py').read())
setup(
name='gimbal',
version=__version__,
description=('Tools for importing, creating, editing and querying ' +
'molecular geometries'),
long_description=readme(),
long_description_content_type='text/x-rst',
keywords='gimbal molecule geometry displacement transformation 3D',
url='https://github.com/ryjmacdonell/gimbal',
author='Ryan J. MacDonell',
author_email='rmacd054@uottawa.ca',
license='MIT',
packages=find_packages(),
scripts=['bin/convgeom', 'bin/measure', 'bin/nudge', 'bin/subst'],
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Scientific/Engineering :: Chemistry'
],
install_requires=requirements()
)
|
ryjmacdonell/geomtools
|
setup.py
|
Python
|
mit
| 1,588
|
import hashlib
import shutil
import os
from datetime import datetime
list_of_paths_and_strings = [
["assignment1.cpp", "main()"]
]
def main():
if acceptance_test():
make_txt_file()
zip_dir()
def get_md5_hash(file):
# opening file
file_to_hash = open(file)
read_file = file_to_hash.read()
# get hash of file
md5_hash = hashlib.md5(read_file)
md5_hash_output = md5_hash.hexdigest()
# print file name and hash
print "File Name: %s" % file
print "MD5 Hash: %r" % md5_hash_output
# return hash
return file, md5_hash_output
def get_current_time():
print "The current time is " + " datetime.today()"
return datetime.today()
def acceptance_test():
# for each list of the list of paths and strings
# make sure that a file with that name exists within the folder
for my_list in list_of_paths_and_strings:
path = my_list[0]
list_of_strings = my_list[1:]
try:
with open(path) as file:
for string in list_of_strings:
if string in file.read():
print "Found " + string + " in file."
else:
print string + "not found in file."
return False
file.close()
return True
except:
print 'File does not exist. Please make sure all necessary files are in the correct place.'
return False
def make_txt_file():
# writes a text file with each of the hashes for each of the files using MD5
write_file = open("hash.txt", "w+")
write_file.write("Write time: " + str(get_current_time()) + '\n')
for file in os.listdir(os.getcwd()):
if "." in file:
f_name, file_hash = get_md5_hash(file)
write_file.write(f_name + '\n')
write_file.write(file_hash + '\n')
write_file.close()
def zip_dir():
# zips directory using shutil.make_archive()
zip_name = "submission"
directory_name = "./tmp"
os.mkdir("./tmp")
for file in os.listdir(os.getcwd()):
try:
if ".pdf" in file:
continue
elif "acceptancetest" in file:
continue
else:
shutil.copy(file, './tmp/')
except:
continue
shutil.make_archive(zip_name, 'zip', directory_name)
shutil.rmtree('./tmp')
if __name__ == '__main__':
main()
|
shanestockall/EECS-Grading-Rig
|
src/packages/EECS-211-Grader/acceptancetest.py
|
Python
|
mit
| 2,184
|
from django.db import models
from django.db.models import Count
from belt.managers import SearchQuerySetMixin
class PostQuerySet(SearchQuerySetMixin, models.QuerySet):
pass
class CategoryQuerySet(SearchQuerySetMixin, models.QuerySet):
pass
class BlogQuerySet(SearchQuerySetMixin, models.QuerySet):
def annotate_total_posts(self):
return self.annotate(total_posts=Count("posts"))
|
marcosgabarda/django-belt
|
tests/app/managers.py
|
Python
|
mit
| 406
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
from .generic import *
from criacao.forms import *
from criacao.models import *
from gerenciamento.models import *
logger = logging.getLogger(__name__)
class LinkView(GenericView):
def criar(self, request):
if request.method == 'POST':
try:
name = request.POST['name']
url = request.POST['url']
except Exception, e:
logger.error(str(e))
data = {
'leftover' : {
'alert-error' : 'Está faltando alguma informação, por favor, verifique os campos!',
}
}
else:
link = Link(name=name, url=url)
try:
link.save()
except Exception, e:
logger.error(str(e))
data = {
'leftover' : {
'alert-success' : 'Link criado com sucesso!',
'redirect' : '/criacao/link/listar/'
},
}
finally:
return data
else:
museu, museu_nome = UTIL_informacoes_museu()
form = LinkForm()
data = {
'template' : {
'request' : request,
'museu_nome' : museu_nome,
'form' : form,
},
}
return data
def visualizar(self, request):
try:
pk = self.kwargs['key']
except Exception, e:
logger.error(str(e))
data = {
'leftover' : {
'alert-error' : 'Não foi possível processar essa visualização.',
}
}
else:
museu, museu_nome = UTIL_informacoes_museu()
link = Link.objects.get(pk=pk)
data = {
'template' : {
'request' : request,
'museu_nome' : museu_nome,
'link' : link,
},
}
finally:
return data
def editar(self, request):
if request.method == 'POST':
try:
pk = self.kwargs['key']
name = request.POST['name']
url = request.POST['url']
except Exception, e:
logger.error(str(e))
data = {
'leftover' : {
'alert-error' : 'Não foi possível processar esta edição!',
}
}
else:
link = Link.objects.get(pk=pk);
link.name=name
link.url=url
link.save()
data = {
'leftover' : {
'alert-success' : 'Link editada com sucesso!',
'redirect' : '/criacao/link/listar/'
},
}
finally:
return data
else:
try:
pk = self.kwargs['key']
except Exception, e:
logger.error(str(e))
data = {
'leftover' : {
'alert-error' : 'Não foi possível processar essa edição!',
}
}
else:
museu, museu_nome = UTIL_informacoes_museu()
link = Link.objects.get(pk=pk);
form = LinkForm(initial={
'name': link.name,
'url': link.url,
})
data = {
'template' : {
'request' : request,
'museu_nome' : museu_nome,
'link' : link,
'form' : form,
},
}
finally:
return data
def excluir(self, request):
try:
pk = self.kwargs['key']
except Exception, e:
logger.error(str(e))
data = {
'leftover' : {
'alert-error' : 'Não foi possível processar essa exclusão!',
}
}
else:
Link.objects.get(pk=pk).delete()
data = {
'leftover' : {
'alert-success' : 'Link deletado com sucesso!',
},
}
finally:
return data
def listar(self, request):
museu, museu_nome = UTIL_informacoes_museu()
links = Link.objects.all()
try:
page = int(self.kwargs['key'])
except:
page = 1
finally:
links = paginate(obj=links, page=page, num_per_page=8)
data = {
'template' : {
'request' : request,
'museu' : museu,
'museu_nome' : museu_nome,
'links' : links,
},
}
return data
|
wendellpbarreto/mcc-django
|
criacao/views/link.py
|
Python
|
mit
| 3,504
|
#!/usr/bin/env python3
# Filters one or more input text files, building a collection of words from
# the corpora, and filtering them
#
# Copyright 2016 Mathew Hunter
import argparse
import nltk
import os
import re
# default regex that filters out links, numbers, and single characters
default_regex = "^(?!http|\d)\w{2,}"
# Filters the specified corpus
def filter_corpus_data(string_data, stopwords=[], use_nltk_default=True):
# Tokenize and get the words
tokenizer = nltk.tokenize.TweetTokenizer(preserve_case=False, strip_handles=True)
tokens = tokenizer.tokenize(string_data)
# Filter
filtered_words = filter_corpus_words(tokens, stopwords, use_nltk_default)
return " ".join(filtered_words)
# Filters the referenced files to produce a filtered corpus
def filter_corpus_files(filenames, stopwords=[], use_nltk_default=True):
# Process each file, filtering the data from it
filtered_corpus = ""
for filename in filenames:
# Read and filter the content from each
try:
with open(filename, "r") as file:
raw_data = str(file.read())
filtered_data = filter_corpus_data(raw_data, stopwords=stopwords, use_nltk_default=use_nltk_default)
filtered_corpus += " " + filtered_data
file = open(filename, "r")
except Exception as e:
print("There was an error filtering the data from '" + filename + "': " + str(e))
raise
return filtered_corpus
# Filters the corpus words
def filter_corpus_words(words, stopwords=[], use_nltk_default=True):
stopwords_set = set(stopwords)
if use_nltk_default:
stopwords_set |= set(nltk.corpus.stopwords.words("english"))
filtered_words = [word for word in words if word not in stopwords_set and re.match(default_regex, word)]
return filtered_words
# Loads a set of stopwords from a file
def __load_stopwords_from_file(file):
stopwords = set()
with open(file, "r") as file:
for line in file:
if line.strip():
stopwords.add(line.strip())
return stopwords
if __name__ == "__main__":
# Create an argument parser and parse the args
parser = argparse.ArgumentParser(description="Filters the incoming corpus, removing stopwords")
parser.add_argument("source_file", nargs="+", help="the source file(s) to process")
parser.add_argument("--stopwords", type=str, help="a file that contains custom stopwords")
args = parser.parse_args()
# If there was a custom stopwords file specified, load it
stopwords = []
custom_stopwords_file = args.stopwords
if custom_stopwords_file:
stopwords = __load_stopwords_from_file(custom_stopwords_file)
# Filter the corpus files
filtered_corpus = filter_corpus_files(args.source_file, stopwords=stopwords)
print(filtered_corpus)
|
mdhunter/twitter-to-wordcloud
|
filter_corpus.py
|
Python
|
mit
| 2,888
|
import sys
import wx
import wx.dataview as dv
#import os; print('PID:'+str(os.getpid())); raw_input("Press enter...")
#----------------------------------------------------------------------
class MyCustomRenderer(dv.DataViewCustomRenderer):
def __init__(self, log, *args, **kw):
dv.DataViewCustomRenderer.__init__(self, *args, **kw)
self.log = log
self.value = None
def SetValue(self, value):
#self.log.write('MyCustomRenderer.SetValue: %s\n' % value)
self.value = value
return True
def GetValue(self):
#self.log.write('MyCustomRenderer.GetValue\n')
return self.value
def GetSize(self):
# Return the size needed to display the value. The renderer
# has a helper function we can use for measuring text that is
# aware of any custom attributes that may have been set for
# this item.
value = self.value if self.value else ""
size = self.GetTextExtent(value)
return size
def Render(self, rect, dc, state):
if state != 0:
self.log.write('Render: %s, %d\n' % (rect, state))
if not state & dv.DATAVIEW_CELL_SELECTED:
# we'll draw a shaded background to see if the rect correctly
# fills the cell
dc.SetBrush(wx.Brush('light grey'))
dc.SetPen(wx.TRANSPARENT_PEN)
rect.Deflate(1, 1)
dc.DrawRoundedRectangle(rect, 2)
# And then finish up with this helper function that draws the
# text for us, dealing with alignment, font and color
# attributes, etc
value = self.value if self.value else ""
self.RenderText(value,
4, # x-offset, to compensate for the rounded rectangles
rect,
dc,
state # wxDataViewCellRenderState flags
)
return True
# The HasEditorCtrl, CreateEditorCtrl and GetValueFromEditorCtrl
# methods need to be implemented if this renderer is going to
# support in-place editing of the cell value, otherwise they can
# be omitted.
def HasEditorCtrl(self):
self.log.write('HasEditorCtrl')
return True
def CreateEditorCtrl(self, parent, labelRect, value):
self.log.write('CreateEditorCtrl: %s' % labelRect)
ctrl = wx.TextCtrl(parent,
value=value,
pos=labelRect.Position,
size=labelRect.Size)
# select the text and put the caret at the end
ctrl.SetInsertionPointEnd()
ctrl.SelectAll()
return ctrl
def GetValueFromEditorCtrl(self, editor):
self.log.write('GetValueFromEditorCtrl: %s' % editor)
value = editor.GetValue()
return True, value
# The LeftClick and Activate methods serve as notifications
# letting you know that the user has either clicked or
# double-clicked on an item. Implementing them in your renderer
# is optional.
def LeftClick(self, pos, cellRect, model, item, col):
self.log.write('LeftClick')
return False
def Activate(self, cellRect, model, item, col):
self.log.write('Activate')
return False
#----------------------------------------------------------------------
# To help focus this sample on the custom renderer, we'll reuse the
# model class from another sample.
from IndexListModel import TestModel
class TestPanel(wx.Panel):
def __init__(self, parent, log, model=None, data=None):
self.log = log
wx.Panel.__init__(self, parent, -1)
# Create a dataview control
self.dvc = dv.DataViewCtrl(self, style=wx.BORDER_THEME
| dv.DV_ROW_LINES
#| dv.DV_HORIZ_RULES
| dv.DV_VERT_RULES
| dv.DV_MULTIPLE
)
# Create an instance of the model
if model is None:
self.model = TestModel(data, log)
else:
self.model = model
self.dvc.AssociateModel(self.model)
# Now we create some columns.
c0 = self.dvc.AppendTextColumn("Id", 0, width=40)
c0.Alignment = wx.ALIGN_RIGHT
c0.MinWidth = 40
# We'll use our custom renderer for these columns
for title, col, width in [ ('Artist', 1, 170),
('Title', 2, 260),
('Genre', 3, 80)]:
renderer = MyCustomRenderer(self.log, mode=dv.DATAVIEW_CELL_EDITABLE)
column = dv.DataViewColumn(title, renderer, col, width=width)
column.Alignment = wx.ALIGN_LEFT
self.dvc.AppendColumn(column)
# Layout
self.Sizer = wx.BoxSizer(wx.VERTICAL)
self.Sizer.Add(self.dvc, 1, wx.EXPAND)
#----------------------------------------------------------------------
def main():
from data import musicdata
app = wx.App()
frm = wx.Frame(None, title="CustomRenderer sample", size=(700,500))
pnl = TestPanel(frm, sys.stdout, data=musicdata)
frm.Show()
app.MainLoop()
if __name__ == '__main__':
main()
#----------------------------------------------------------------------
|
dnxbjyj/python-basic
|
gui/wxpython/wxPython-demo-4.0.1/samples/dataview/CustomRenderer.py
|
Python
|
mit
| 5,439
|
from findSentence import sentenceGrab
from phoneticWords import findPhonetics
from phoneticIndex import findPhoneticIndex
from random import randint
from math import floor
import sys
def main():
library = sys.argv[1]
subject = sys.argv[2]
dictionary = "/usr/share/dict/words"
phonetics = findPhonetics(subject, dictionary)
if len(phonetics) == 0:
print("Could not find any phonetic words.")
return
nearPhoneticNum = floor((phonetics[0][1] + phonetics[len(phonetics)-1][1]) / 2)
phonetics = [i for i in phonetics if i[1] <= nearPhoneticNum]
sentences = []
tries = 10
index = 0
while len(sentences) == 0 and index <= tries:
if len(phonetics) == 0:
print("No more phonetic words. Ending")
return
index += 1
punWord = phonetics[randint(0, floor(len(phonetics)/2))][0]
print(punWord)
sentences = sentenceGrab(punWord, library, True)
if len(sentences) == 0:
phonetics = [i for i in phonetics if i[0] != punWord]
print("Could not find sentence... Trying again")
if index >= tries:
print("Reached maximum tries. Ending")
return
punSentence = sentences[randint(0, len(sentences) - 1)]
sentenceIndex = punSentence.find(punWord)
punIndex = findPhoneticIndex(subject, punWord)
punSentence = punSentence[0:sentenceIndex + punIndex] + subject + punSentence[sentenceIndex + punIndex + len(subject):len(punSentence)]
print(punSentence)
if __name__ == "__main__":
main()
|
Jflinchum/pow-generator
|
powGenerator.py
|
Python
|
mit
| 1,567
|
import numpy as np
import util
from datetime import datetime
from scipy.stats import norm
import better_exceptions
from scipy.stats import multivariate_normal as mvn
class NaiveBayers(object):
def __init__(self):
# Gaussian deviation
self.gaussians = dict()
# Class priors
self.priors = dict()
def fit(self, X, Y, smoothing=10e-3):
N, D = X.shape
# 1,2,3,4,5,6,7,8,9,0 - is labels
labels = set(Y)
for c in labels:
# get the current slice [0:number] where X in our class
current_x = X[Y == c]
# Compute mean and variance. Store in the dictionary by class key
self.gaussians[c] = {
'mean': current_x.mean(axis=0),
'var': np.var(current_x.T) + smoothing,
}
# Simple calculate prior probability. Divide current class by all classes
self.priors[c] = float(len(Y[Y == c])) / len(Y)
def score(self, X, Y):
# Get the predictions
P = self.predict(X)
# Return mean of array
return np.mean(P == Y)
def predict(self, X):
# N - samples, D - features (classes)
N, D = X.shape
# Hyperparameter (10)
K = len(self.gaussians)
# Fill by Zeros
P = np.zeros((N, K))
# for each class and mean/covariance
for c, g in self.gaussians.items():
mean, var = g['mean'], g['var']
log = np.log(self.priors[c])
# Calculate Log of the probability density function, all at once
P[:, c] = mvn.logpdf(X, mean=mean, cov=var) + log
return np.argmax(P, axis=1)
if __name__ == '__main__':
# Get train data
X, Y = util.get_data(40000)
Ntrain = len(Y) // 2
Xtest, Ytest = util.get_test_data(40000)
Xtrain, Ytrain = X[:Ntrain], Y[:Ntrain]
# Xtest, Ytest = X[Ntrain:], Y[Ntrain:]
model = NaiveBayers()
t0 = datetime.now()
model.fit(Xtrain, Ytrain)
print("Training time: ", (datetime.now() - t0))
t0 = datetime.now()
print("Training accuracy: ", model.score(Xtrain, Ytrain))
print("Time to compute train accuracy: ", (datetime.now() - t0), "Train size: ", len(Ytrain))
t0 = datetime.now()
print("Test accuracy: ", model.score(Xtest, Ytest))
print("Time to compute test accuracy: ", (datetime.now() - t0), "Test size: ", len(Ytest))
|
adexin/Python-Machine-Learning-Samples
|
Naive_bayes_mnist/nb.py
|
Python
|
mit
| 2,416
|
import collections
import functools
from typing import List
from test_framework import generic_test
from test_framework.test_utils import enable_executor_hook
Rect = collections.namedtuple('Rect', ('left', 'right', 'height'))
def compute_skyline(buildings: List[Rect]) -> List[Rect]:
# TODO - you fill in here.
return []
@enable_executor_hook
def compute_skyline_wrapper(executor, buildings):
buildings = [Rect(*x) for x in buildings]
result = executor.run(functools.partial(compute_skyline, buildings))
return [(x.left, x.right, x.height) for x in result]
if __name__ == '__main__':
exit(
generic_test.generic_test_main('drawing_skyline.py',
'drawing_skyline.tsv',
compute_skyline_wrapper))
|
shobhitmishra/CodingProblems
|
epi_judge_python/drawing_skyline.py
|
Python
|
mit
| 812
|
import sys
import os
import traceback
from django import db
sys.path.append('/root/wisely/wisely_project/')
os.environ['DJANGO_SETTINGS_MODULE'] = 'wisely_project.settings.production'
from django.db.models import F, Q
from django.utils import timezone
from users.tasks import get_coursera_courses, get_edx_courses, get_udemy_courses
__author__ = 'tmehta'
from users.models import CourseraProfile, EdxProfile, UdemyProfile
while True:
try:
for connection in db.connections.all():
if len(connection.queries) > 100:
db.reset_queries()
for user in CourseraProfile.objects.filter(last_updated__lt=F('user__last_login')).filter(~Q(username='')).filter(
incorrect_login=False):
print user.username
print "Start coursera"
get_coursera_courses(user)
user.last_updated = timezone.now()
print "Done Coursera"
user.save()
for user in EdxProfile.objects.filter(last_updated__lt=F('user__last_login')).filter(~Q(email='')).filter(
incorrect_login=False):
print user.email
print "Start edx"
get_edx_courses(user)
print "Done EDx"
user.last_updated = timezone.now()
user.save()
for user in UdemyProfile.objects.filter(last_updated__lt=F('user__last_login')).filter(~Q(email='')).filter(
incorrect_login=False):
print user.email
print "Start udemy"
get_udemy_courses(user)
print "Done Udemy"
user.last_updated = timezone.now()
user.save()
except Exception as e:
print traceback.format_exc()
|
TejasM/wisely
|
wisely_project/get_courses_file.py
|
Python
|
mit
| 1,721
|
# -*- coding: utf-8 -*-
#
# -----------------------------------------------------------------------------------
# Copyright (c) Microsoft Open Technologies (Shanghai) Co. Ltd. All rights reserved.
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# -----------------------------------------------------------------------------------
import importlib
import json
import os
from datetime import datetime
from hackathon.log import log
try:
from config import Config
except ImportError:
from config_sample import Config
__all__ = [
"get_config",
"safe_get_config",
"get_class",
"load_template",
"call",
"get_now",
"Utility"
]
def get_config(key):
"""Get configured value from configuration file according to specified key
:type key: str or unicode
:param key: the search key, separate section with '.'. For example: "mysql.connection"
:Example:
get_config("mysql.connection")
:return configured value if specified key exists else None
:rtype str or unicode or dict
"""
ret = Config
for arg in key.split("."):
if arg in ret and isinstance(ret, dict):
ret = ret[arg]
else:
return None
return ret
def safe_get_config(key, default_value):
"""Get configured value from configuration file according to specified key and a default value
:type key: str | unicode
:param key: the search key, separate section with '.'. For example: "mysql.connection"
:type default_value: object
:param default_value: the default value if specified key cannot be found in configuration file
:Example:
safe_get_config("mysql.connection", "mysql://root:root@localhost:3306/db")
:return configured value if specified key exists else the default value
:rtype str or unicode or dict
"""
r = get_config(key)
return r if r else default_value
def get_class(kls):
"""Get the class object by it's name
:type kls: str or unicode
:param kls: the the full name, including module name of class name , of a class obj
:return the class object
:rtype classobj
:Example:
get_class("hackathon.user.UserManager")
:raise ModuleException if module cannot be imported
"""
parts = kls.split('.')
module = ".".join(parts[:-1])
m = __import__(module)
for comp in parts[1:]:
m = getattr(m, comp)
return m
def load_template(url):
"""Load hackathon template from file into a dict
:type url: str|unicode
:param url: the absolute path of the template.
:return dict indicates a hackathon template
:rtype dict
"""
try:
template = json.load(file(url))
except Exception as e:
log.error(e)
return None
return template
def call(mdl_cls_func, cls_args, func_args):
# todo refactoring the call method to use standard hackathon_scheduler
mdl_name = mdl_cls_func[0]
cls_name = mdl_cls_func[1]
func_name = mdl_cls_func[2]
log.debug('call: mdl_name [%s], cls_name [%s], func_name [%s]' % (mdl_name, cls_name, func_name))
mdl = importlib.import_module(mdl_name)
cls = getattr(mdl, cls_name)
func = getattr(cls(*cls_args), func_name)
func(*func_args)
def get_now():
"""Return the current local date and time without tzinfo"""
return datetime.utcnow() # tzinfo=None
class Utility(object):
"""An utility class for those commonly used methods"""
def get_now(self):
"""Return the current local date and time without tzinfo"""
return get_now()
def convert(self, value):
"""Convert unicode string to str"""
if isinstance(value, dict):
return {self.convert(key): self.convert(value) for key, value in value.iteritems()}
elif isinstance(value, list):
return [self.convert(element) for element in value]
elif isinstance(value, unicode):
return value.encode('utf-8')
else:
return value
def get_config(self, key):
"""Get configured value from configuration file according to specified key
.. seealso:: get_config outside Utility class
"""
return get_config(key)
def safe_get_config(self, key, default_value):
"""Get configured value from configuration file according to specified key and a default value
.. seealso:: safe_get_config outside Utility class
"""
return safe_get_config(key, default_value)
def mkdir_safe(self, path):
"""Create a directory if it doesn't exist
:return the directory path
"""
if path and not (os.path.exists(path)):
os.makedirs(path)
return path
|
Fendoe/open-hackathon-o
|
open-hackathon-server/src/hackathon/util.py
|
Python
|
mit
| 5,753
|
import unittest
import slowboy.gpu
import slowboy.interrupts
from tests.mock_interrupt_controller import MockInterruptController
STAT_IE_ALL_MASK = (slowboy.gpu.STAT_LYC_IE_MASK |
slowboy.gpu.STAT_OAM_IE_MASK |
slowboy.gpu.STAT_HBLANK_IE_MASK |
slowboy.gpu.STAT_VBLANK_IE_MASK)
class TestGPU(unittest.TestCase):
def setUp(self):
self.gpu = slowboy.gpu.GPU()
self.interrupt_controller = MockInterruptController()
def test_constructor(self):
self.assertEqual(len(self.gpu.vram), 0x2000)
self.assertEqual(len(self.gpu.oam), 0xa0)
self.assertEqual(self.gpu.lcdc, 0x91)
self.assertEqual(self.gpu.scy, 0x00)
self.assertEqual(self.gpu.scx, 0x00)
self.assertEqual(self.gpu.ly, 0x00)
self.assertEqual(self.gpu.lyc, 0x00)
self.assertEqual(self.gpu.bgp, 0xfc)
self.assertEqual(self.gpu.obp0, 0xff)
self.assertEqual(self.gpu.obp1, 0xff)
self.assertEqual(self.gpu.wy, 0x00)
self.assertEqual(self.gpu.wx, 0x00)
# LYC=LY, Mode.OAM_READ
self.assertEqual(self.gpu.stat, 0x04 | 0x02)
self.assertEqual(self.gpu.mode, slowboy.gpu.Mode.OAM_READ)
self.assertEqual(self.gpu.mode_clock, 0)
def test_mode(self):
# Force ClockListener.notify and verify mode state transitions
for i in range(144):
# OAM_READ (2)
self.assertEqual(self.gpu.mode, slowboy.gpu.Mode.OAM_READ)
self.assertEqual(self.gpu.mode_clock, 0)
self.assertEqual(self.gpu.stat & slowboy.gpu.STAT_MODE_MASK,
slowboy.gpu.Mode.OAM_READ.value)
# OAM_VRAM_READ (3)
self.gpu.notify(0, 80)
self.assertEqual(self.gpu.mode, slowboy.gpu.Mode.OAM_VRAM_READ)
self.assertEqual(self.gpu.stat & slowboy.gpu.STAT_MODE_MASK,
slowboy.gpu.Mode.OAM_VRAM_READ.value)
self.assertEqual(self.gpu.mode_clock, 0)
# HBLANK (0)
self.gpu.notify(0, 172)
self.assertEqual(self.gpu.mode, slowboy.gpu.Mode.H_BLANK)
self.assertEqual(self.gpu.stat & slowboy.gpu.STAT_MODE_MASK,
slowboy.gpu.Mode.H_BLANK.value)
self.assertEqual(self.gpu.mode_clock, 0)
self.gpu.notify(0, 204)
# VBLANK (1)
self.assertEqual(self.gpu.mode, slowboy.gpu.Mode.V_BLANK)
self.assertEqual(self.gpu.stat & slowboy.gpu.STAT_MODE_MASK,
slowboy.gpu.Mode.V_BLANK.value)
self.assertEqual(self.gpu.mode_clock, 0)
def test_stat_mode(self):
# Initial mode is OAM_READ
self.assertEqual(self.gpu.stat & slowboy.gpu.STAT_MODE_MASK,
slowboy.gpu.Mode.OAM_READ.value)
self.gpu.mode = slowboy.gpu.Mode.OAM_VRAM_READ
self.assertEqual(self.gpu.stat & slowboy.gpu.STAT_MODE_MASK,
slowboy.gpu.Mode.OAM_VRAM_READ.value)
self.gpu.mode = slowboy.gpu.Mode.H_BLANK
self.assertEqual(self.gpu.stat & slowboy.gpu.STAT_MODE_MASK,
slowboy.gpu.Mode.H_BLANK.value)
self.gpu.mode = slowboy.gpu.Mode.V_BLANK
self.assertEqual(self.gpu.stat & slowboy.gpu.STAT_MODE_MASK,
slowboy.gpu.Mode.V_BLANK.value)
def test_stat_oam_interrupt(self):
self.gpu.load_interrupt_controller(self.interrupt_controller)
self.assertEqual(self.gpu.stat & slowboy.gpu.STAT_OAM_IE_MASK, 0)
self.gpu.stat |= slowboy.gpu.STAT_OAM_IE_MASK
self.gpu.mode = slowboy.gpu.Mode.OAM_READ
self.assertEqual(self.interrupt_controller.last_interrupt,
slowboy.interrupts.InterruptType.stat)
def test_stat_lyc_interrupt(self):
self.gpu.load_interrupt_controller(self.interrupt_controller)
self.assertEqual(self.gpu.stat & slowboy.gpu.STAT_LYC_IE_MASK, 0)
self.gpu.stat |= slowboy.gpu.STAT_LYC_IE_MASK
self.gpu.ly = self.gpu.lyc
self.assertEqual(self.interrupt_controller.last_interrupt,
slowboy.interrupts.InterruptType.stat)
def test_stat_hblank_interrupt(self):
self.gpu.load_interrupt_controller(self.interrupt_controller)
self.assertEqual(self.gpu.stat & slowboy.gpu.STAT_HBLANK_IE_MASK, 0)
self.gpu.stat |= slowboy.gpu.STAT_HBLANK_IE_MASK
self.gpu.mode = slowboy.gpu.Mode.H_BLANK
self.assertEqual(self.interrupt_controller.last_interrupt,
slowboy.interrupts.InterruptType.stat)
def test_stat_vblank_interrupt(self):
self.gpu.load_interrupt_controller(self.interrupt_controller)
self.assertEqual(self.gpu.stat & slowboy.gpu.STAT_VBLANK_IE_MASK, 0)
self.gpu.stat |= slowboy.gpu.STAT_VBLANK_IE_MASK
self.gpu.mode = slowboy.gpu.Mode.V_BLANK
self.assertEqual(self.interrupt_controller.last_interrupt,
slowboy.interrupts.InterruptType.stat)
def test__update_vram(self):
# TODO
self.fail('Not implemented: test__update_vram')
def test_colorto8bit(self):
self.assertRaises(ValueError, slowboy.gpu.colorto8bit, 4)
self.assertEqual(slowboy.gpu.colorto8bit(0), 0xff)
self.assertEqual(slowboy.gpu.colorto8bit(1), 0xaa)
self.assertEqual(slowboy.gpu.colorto8bit(2), 0x55)
self.assertEqual(slowboy.gpu.colorto8bit(3), 0x00)
def test_bgp(self):
# 11 11 11 00
self.assertEqual(self.gpu.bgp, 0xfc)
self.assertEqual(self.gpu._palette, [0xff, 0x00, 0x00, 0x00])
# 00 01 10 11
self.gpu.bgp = 0x1b
self.assertEqual(self.gpu.bgp, 0x1b)
self.assertEqual(self.gpu._palette, [0x00, 0x55, 0xaa, 0xff])
def test_obp(self):
self.assertEqual(self.gpu.obp0, 0xff)
self.assertEqual(self.gpu._sprite_palette0, [0xff, 0x00, 0x00, 0x00])
self.assertEqual(self.gpu.obp1, 0xff)
self.assertEqual(self.gpu._sprite_palette1, [0xff, 0x00, 0x00, 0x00])
# 00 01 10 11
self.gpu.obp0 = 0x1b
self.assertEqual(self.gpu.obp0, 0x1b)
self.assertEqual(self.gpu._sprite_palette0, [0xff, 0x55, 0xaa, 0xff])
# 11 10 01 00
self.gpu.obp1 = 0xe4
self.assertEqual(self.gpu.obp1, 0xe4)
self.assertEqual(self.gpu._sprite_palette1, [0xff, 0xaa, 0x55, 0x00])
def test_ly_lyc(self):
self.assertEqual(self.gpu.ly, 0)
# Changing LYC so that LYC != LY should clear STAT LYC flag
self.gpu.lyc = 5
self.assertEqual(self.gpu.stat & slowboy.gpu.STAT_LYC_FLAG_MASK, 0)
# Make LY = LYC -- STAT LYC flag should be set
self.gpu.ly = 5
self.assertEqual(self.gpu.stat & slowboy.gpu.STAT_LYC_FLAG_MASK,
slowboy.gpu.STAT_LYC_FLAG_MASK)
# Changing LY so that LYC != LY should *also* clear STAT LYC flag
self.gpu.ly = 6
self.assertEqual(self.gpu.stat & slowboy.gpu.STAT_LYC_FLAG_MASK, 0)
# Make LYC = LY -- should also set STAT LYC flag
self.gpu.lyc = 6
self.assertEqual(self.gpu.stat & slowboy.gpu.STAT_LYC_FLAG_MASK,
slowboy.gpu.STAT_LYC_FLAG_MASK)
def test_wx_wy(self):
self.assertEqual(self.gpu.wx, 0)
self.assertEqual(self.gpu.wy, 0)
self.gpu.wx = 7
self.assertEqual(self.gpu._wx, 0)
self.gpu.wy = 0
self.assertEqual(self.gpu._wy, 0)
|
zmarvel/slowboy
|
tests/test_gpu.py
|
Python
|
mit
| 7,556
|
#!/usr/bin/python
import random
def roll(qty, sides):
return sum(random.randrange(1, sides + 1) for _ in range(qty))
def chances(qty, out_of):
"""'1 chance in 5' is chances(1, 5)"""
# Chance succeeds if roll is lower than max.
return roll(1, out_of) <= qty
|
bstpierre/yendor
|
yendor/dice.py
|
Python
|
mit
| 278
|
# -*- coding: utf-8 -*-
from datetime import datetime
from app.model import db
class FeedRecord(db.Model):
__tablename__ = 'feeds'
id = db.Column(db.Integer, primary_key=True)
time = db.Column(db.DateTime, default=datetime.now())
quant = db.Column(db.Integer)
def __repr__(self):
return 'At %s eat %d ml' % (str(self.time), self.quant)
|
zeutrap/tiaotiao
|
app/model/feed.py
|
Python
|
mit
| 350
|
'''
Setup.py script.
'''
__author__ = 'riko'
from Cython.Build import cythonize
import numpy
from setuptools import setup, Extension, find_packages
try:
use_cython = True
from Cython.Distutils import build_ext
except ImportError:
use_cython = False
ext = '.pyx' if use_cython else '.c'
ext_modules = [Extension("calculations", ["models/calculations/calculations"+ext])]
include_dirs = []
cmdclass = {}
if use_cython:
print "Doing extensions: ", ext_modules
ext_modules = cythonize(ext_modules, include_dirs=[numpy.get_include()])
include_dirs=[numpy.get_include()]
cmdclass.update({ 'build_ext': build_ext })
print ext_modules
setup(name='TennisModelling',
version='1.0',
description='Tennis modelling tool.',
author='Erik Grabljevec',
author_email='erikgrabljevec5@gmail.com',
url='https://github.com/erix5son/TennisModelling',
packages=['data_tools', 'models', 'ranking_systems'],
py_modules = ['settings'],
cmdclass=cmdclass,
ext_modules=ext_modules,
include_dirs=[include_dirs, numpy.get_include()]
)
|
erix5son/Tennis-Modelling
|
setup.py
|
Python
|
mit
| 1,109
|
import pytest
TEST_HASHES = {
"test": "Jnh+8wNnELksNFVbxkya8RDrxJNL13dUWTXhp5DCx/quTM2/cYn7azzl2Uk3I2zc",
"test2": "sh33L5uQeLr//jJULb7mAnbVADkkWZrgcXx97DCacueGtEU5G2HtqUv73UTS0EI0",
"testing100" * 10: "5rznDSIcDPd/9rjom6P/qkJGtJSV47y/u5+KlkILROaqQ6axhEyVIQTahuBYerLG",
}
@pytest.mark.parametrize(('password', 'pwhash'), TEST_HASHES.items())
def test_edw_hash(password, pwhash):
from encoded.edw_hash import EDWHash
assert EDWHash.hash(password) == pwhash
|
ENCODE-DCC/encoded
|
src/encoded/tests/test_edw_hash.py
|
Python
|
mit
| 479
|
from __future__ import absolute_import
import csv
from django.contrib import admin
from django.http import HttpResponse
from .models import Donor, Donation
class DonationInline(admin.TabularInline):
model = Donation
extra = 0
@admin.register(Donor)
class DonorAdmin(admin.ModelAdmin):
inlines = [
DonationInline
]
# date_hierarchy = 'last_donation'
actions_on_bottom = True
list_display = 'name', 'business', 'last_donation', 'last_amount'
search_fields = 'name', 'business', 'email', 'address'
@staticmethod
def last_donation(obj):
return obj.donation_set.latest().when
@staticmethod
def last_amount(obj):
return obj.donation_set.latest().amount
actions = []
def make_list(self, request, queryset):
response = HttpResponse(content_type="text/plain")
response['Content-Disposition'] = 'attachment; filename=donors.txt'
for donor in queryset:
if donor.email:
response.write("{} <{}>\n".format(donor.name, donor.email))
return response
make_list.short_description = "Create email list (plain text)"
actions.append(make_list)
def make_csv(self, request, queryset):
fields = ('name', 'business', 'email', 'phone', 'address', 'last_donation', 'notes')
response = HttpResponse(content_type="text/csv")
response['Content-Disposition'] = 'attachment; filename=donors.csv'
writer = csv.DictWriter(response, fields, extrasaction='ignore')
writer.writeheader()
for donor in queryset:
row = {"last_donation": self.last_donation(donor)}
row.update(vars(donor))
writer.writerow(row)
return response
make_csv.short_description = "Create CSV"
actions.append(make_csv)
@admin.register(Donation)
class DonationAdmin(admin.ModelAdmin):
date_hierarchy = 'when'
actions_on_bottom = True
list_display = 'donor', 'when', 'amount', 'memo'
search_fields = 'donor', 'memo'
actions = []
def make_csv(self, request, queryset):
fields = ('name', 'business', 'when', 'amount', 'memo')
response = HttpResponse(content_type="text/csv")
response['Content-Disposition'] = 'attachment; filename=donations.csv'
writer = csv.DictWriter(response, fields, extrasaction='ignore')
writer.writeheader()
for donation in queryset:
row = {
"name": donation.donor.name,
"business": donation.donor.business,
}
row.update(vars(donation))
writer.writerow(row)
return response
make_csv.short_description = "Create CSV"
actions.append(make_csv)
|
thrive-refugee/thrive-refugee
|
donors/admin.py
|
Python
|
mit
| 2,727
|
import copy
def execute(moves):
players = [
{'name': 'Xena', 'score': 0},
{'name': 'Ophelia', 'score': 0},
]
idx = 0
first_player = 0
draw_count = 0
move_count = 0
init_map = [[False]*3, [False]*3, [False]*3]
map = copy.deepcopy(init_map)
for move in moves:
move = int(move)
player_idx = (idx + first_player) % 2
player = players[player_idx]
idx += 1
row = (move - 1) // 3
column = (move - 1) % 3
move_count += 1
map[row][column] = 'X' if player_idx == first_player else 'O'
done = False
if (check_winning(map)):
done = True
draw_count = 0
players[player_idx]['score'] += 1
first_player = 0 if player_idx else 1
print("win " + str(player_idx))
elif move_count == 9:
done = True
draw_count += 1
print("draw")
if draw_count == 3:
print("three draws, resetting")
draw_count = 0
first_player = 0 if first_player else 1
if done:
idx = 0
print_map(map)
move_count = 0
map = copy.deepcopy(init_map)
print(players)
def print_map(map):
for row in map:
for column in row:
print(column if column else '.', end='')
print('')
print('')
def check_winning(map):
if map[1][1] and map[0][0] == map[1][1] == map[2][2]:
print("win diag 1")
return map[0][0]
if map[1][1] and map[0][2] == map[1][1] == map[2][0]:
print("win diag 2")
return map[0][2]
for i in range(0, 3):
if map[i][0] and map[i][0] == map[i][1] == map[i][2]:
print("win vertical " + str(i))
return map[i][0]
if map[0][i] and map[0][i] == map[1][i] == map[2][i]:
print("win horizontal " + str(i))
return map[0][i]
return None
execute(open("input/dec23").read())
|
matslindh/codingchallenges
|
knowit2017/23.py
|
Python
|
mit
| 2,089
|
import thoonk
from thoonk.feeds import SortedFeed
import unittest
from ConfigParser import ConfigParser
class TestLeaf(unittest.TestCase):
def setUp(self):
conf = ConfigParser()
conf.read('test.cfg')
if conf.sections() == ['Test']:
self.ps = thoonk.Thoonk(host=conf.get('Test', 'host'),
port=conf.getint('Test', 'port'),
db=conf.getint('Test', 'db'))
self.ps.redis.flushdb()
else:
print 'No test configuration found in test.cfg'
exit()
def test_10_basic_sorted_feed(self):
"""Test basic sorted feed publish and retrieve."""
l = self.ps.sorted_feed("sortedfeed")
self.assertEqual(l.__class__, SortedFeed)
l.publish("hi")
l.publish("bye")
l.publish("thanks")
l.publish("you're welcome")
r = l.get_ids()
v = l.get_items()
items = {'1': 'hi',
'2': 'bye',
'3': 'thanks',
'4': "you're welcome"}
self.assertEqual(r, ['1', '2', '3', '4'], "Sorted feed results did not match publish: %s." % r)
self.assertEqual(v, items, "Sorted feed items don't match: %s" % v)
def test_20_sorted_feed_before(self):
"""Test addding an item before another item"""
l = self.ps.sorted_feed("sortedfeed")
l.publish("hi")
l.publish("bye")
l.publish_before('2', 'foo')
r = l.get_ids()
self.assertEqual(r, ['1', '3', '2'], "Sorted feed results did not match: %s." % r)
def test_30_sorted_feed_after(self):
"""Test adding an item after another item"""
l = self.ps.sorted_feed("sortedfeed")
l.publish("hi")
l.publish("bye")
l.publish_after('1', 'foo')
r = l.get_ids()
self.assertEqual(r, ['1', '3', '2'], "Sorted feed results did not match: %s." % r)
def test_40_sorted_feed_prepend(self):
"""Test addding an item to the front of the sorted feed"""
l = self.ps.sorted_feed("sortedfeed")
l.publish("hi")
l.publish("bye")
l.prepend('bar')
r = l.get_ids()
self.assertEqual(r, ['3', '1', '2'],
"Sorted feed results don't match: %s" % r)
def test_50_sorted_feed_edit(self):
"""Test editing an item in a sorted feed"""
l = self.ps.sorted_feed("sortedfeed")
l.publish("hi")
l.publish("bye")
l.edit('1', 'bar')
r = l.get_ids()
v = l.get_item('1')
vs = l.get_items()
items = {'1': 'bar',
'2': 'bye'}
self.assertEqual(r, ['1', '2'],
"Sorted feed results don't match: %s" % r)
self.assertEqual(v, 'bar', "Items don't match: %s" % v)
self.assertEqual(vs, items, "Sorted feed items don't match: %s" % vs)
def test_60_sorted_feed_retract(self):
"""Test retracting an item from a sorted feed"""
l = self.ps.sorted_feed("sortedfeed")
l.publish("hi")
l.publish("bye")
l.publish("thanks")
l.publish("you're welcome")
l.retract('3')
r = l.get_ids()
self.assertEqual(r, ['1', '2', '4'],
"Sorted feed results don't match: %s" % r)
def test_70_sorted_feed_move_first(self):
"""Test moving items around in the feed."""
l = self.ps.sorted_feed('sortedfeed')
l.publish("hi")
l.publish("bye")
l.publish("thanks")
l.publish("you're welcome")
l.move_first('4')
r = l.get_ids()
self.assertEqual(r, ['4', '1', '2', '3'],
"Sorted feed results don't match: %s" % r)
def test_71_sorted_feed_move_last(self):
"""Test moving items around in the feed."""
l = self.ps.sorted_feed('sortedfeed')
l.publish("hi")
l.publish("bye")
l.publish("thanks")
l.publish("you're welcome")
l.move_last('2')
r = l.get_ids()
self.assertEqual(r, ['1', '3', '4', '2'],
"Sorted feed results don't match: %s" % r)
def test_72_sorted_feed_move_before(self):
"""Test moving items around in the feed."""
l = self.ps.sorted_feed('sortedfeed')
l.publish("hi")
l.publish("bye")
l.publish("thanks")
l.publish("you're welcome")
l.move_before('1', '2')
r = l.get_ids()
self.assertEqual(r, ['2', '1', '3', '4'],
"Sorted feed results don't match: %s" % r)
def test_73_sorted_feed_move_after(self):
"""Test moving items around in the feed."""
l = self.ps.sorted_feed('sortedfeed')
l.publish("hi")
l.publish("bye")
l.publish("thanks")
l.publish("you're welcome")
l.move_after('1', '4')
r = l.get_ids()
self.assertEqual(r, ['1', '4', '2', '3'],
"Sorted feed results don't match: %s" % r)
suite = unittest.TestLoader().loadTestsFromTestCase(TestLeaf)
|
andyet/thoonk.py
|
tests/test_sorted_feed.py
|
Python
|
mit
| 5,064
|
from mininet.net import Mininet
from ..util import findPyroObjectOrNone
def monkeypatch(cls):
def decorator(func):
setattr(cls, func.__name__, func)
return func
return decorator
def getOneNodeByName(net, name):
if name in net.nameToNode:
return net.nameToNode[name]
else:
return findPyroObjectOrNone(name)
@monkeypatch(Mininet)
def intakeNodes(self, networks):
for netName in networks:
net = findPyroObjectOrNone(netName)
if self.name == net.getName():
continue
print "-- Intake nodes from %s" % net.getName()
for h in net.getHosts():
self.nameToNode[h.getName()] = h
self.hosts.append(h)
for s in net.getSwitches():
self.nameToNode[s.getName()] = s
self.hosts.append(s)
@monkeypatch(Mininet)
def remoteExecute(self, code):
print "-- Executing: %s" % code
exec(code, {'net': self})
|
itszero/onenet
|
emulab/onenet/onenet/inject/infect.py
|
Python
|
mit
| 867
|
'''
Geometry Transform
'''
|
luwei14/ShapeAnalyzer
|
shapeanalyzer/transform.py
|
Python
|
mit
| 27
|
import numpy as np
import scipy.cluster.hierarchy as hr
import scipy.spatial as spa
import clustering
import matplotlib.pyplot as plt
from sklearn.cluster import AgglomerativeClustering
import filter
class textMiningEac:
def __init__(self,k,N,low,high=0):
self.k = k
# Leer datos desde archivo [Temporal]
#data = np.genfromtxt('iris.data',delimiter=',')
#temp= spa.distance.pdist(data,'euclidean')
#self.D = spa.distance.squareform(temp)
self.D,self.tweets,self.words,self.freq = filter.filtertweets()
# Calcula la matriz de coasociacion
self.loadEAC(N,low,high)
def loadEAC(self,N,low,high=0):
"""
Genera de vuelta la matriz de coasociacion
"""
m,n = self.D.shape
coasocMatrix = clustering.EAC(self.D,N,low,high)
print(coasocMatrix)
self.EAC_D = np.ones(n) - coasocMatrix
def startPAM(self):
"""
Hace sobre PAM sobre la matriz de distancia del EAC
"""
(a,b,self.labels) = clustering.PAM(self.EAC_D, self.k,True)
return self.labels
def startHierarchical(self):
"""
Hace clustering Jerarquico sobre la matriz de distancia del EAC
"""
z = AgglomerativeClustering(n_clusters=self.k, linkage='ward').fit(self.EAC_D)
self.labels = z.labels_
return self.labels
def getClustersTweets(self):
"""
Obtiene clusters en relacion a la frecuencia de aparicion de las palabras
"""
labelsTweets = np.zeros(len(self.tweets),dtype=np.int)
for i in range(len(self.tweets)):
acum = np.zeros(2)
for j in range(len(self.labels)):
# Si la palabra se encuentra en el tweet
if(self.words[j] in self.tweets[i]):
#Acumula el valor en el acumulador del indice del cluster
acum[self.labels[j]] += self.freq[j]
# Asigna el cluster con mayor valor acumulado
labelsTweets[i] = np.argmax(acum)
lista = labelsTweets.tolist()
try:
saveFile = open('clustered.csv','w')
for i in range(len(self.tweets)):
saveFile.write(str(lista[i])+': '+' '.join(self.tweets[i])+'\n')
saveFile.close()
except Exception as e:
print("error: {0}".format(e))
return labelsTweets
def getPrecisionIris(self):
"""
Metodo de prueba
Calcula una precision de acierto. No es fiable.
"""
#Lee los cluster originales
originalClusters = np.genfromtxt('orCL.data',delimiter=',',dtype=None)
results ={}
j=0
for i in range(50,151,50):
# Encuentra el cluster con mayor frecuencia
unique, counts = np.unique(self.labels[i-50:i], return_count=True)
print(unique)
print(counts)
maxvalue = np.amax(counts)
results[j]=maxvalue/50
j=j+1
print("Setosa= " + '%.2f' % results[0] + "\nVersicolor= " + '%.2f' % results[1] + "\nVirginica= " + '%.2f' % results[2])
def getSilhouette(self):
"""
Grafica silhouette
"""
clustering.Silhouette(self.D,self.labels,self.k)
|
fbr1/textmining-eac
|
main.py
|
Python
|
mit
| 3,609
|
#!/usr/bin/env python
#Copyright (C) 2013 by Thomas Keane (tk2@sanger.ac.uk)
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in
#all copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#THE SOFTWARE.
import os
import re
import subprocess
import time
import sys
import random
import string
from Queue import Queue, Empty
from threading import Thread
from datetime import date
from sonLib.bioio import logger
from sonLib.bioio import system
from jobTree.batchSystems.abstractBatchSystem import AbstractBatchSystem
from jobTree.src.master import getParasolResultsFileName
class MemoryString:
def __init__(self, string):
if string[-1] == 'K' or string[-1] == 'M' or string[-1] == 'G':
self.unit = string[-1]
self.val = float(string[:-1])
else:
self.unit = 'B'
self.val = float(string)
self.bytes = self.byteVal()
def __str__(self):
if self.unit != 'B':
return str(self.val) + self.unit
else:
return str(self.val)
def byteVal(self):
if self.unit == 'B':
return self.val
elif self.unit == 'K':
return self.val * 1000
elif self.unit == 'M':
return self.val * 1000000
elif self.unit == 'G':
return self.val * 1000000000
def __cmp__(self, other):
return cmp(self.bytes, other.bytes)
def prepareBsub(cpu, mem):
mem = '' if mem is None else '-R "select[type==X86_64 && mem > ' + str(int(mem/ 1000000)) + '] rusage[mem=' + str(int(mem/ 1000000)) + ']" -M' + str(int(mem/ 1000000)) + '000'
cpu = '' if cpu is None else '-n ' + str(int(cpu))
bsubline = ["bsub", mem, cpu,"-cwd", ".", "-o", "/dev/null", "-e", "/dev/null"]
return bsubline
def bsub(bsubline):
process = subprocess.Popen(" ".join(bsubline), shell=True, stdout = subprocess.PIPE, stderr = subprocess.STDOUT)
liney = process.stdout.readline()
logger.info("BSUB: " + liney)
result = int(liney.strip().split()[1].strip('<>'))
logger.debug("Got the job id: %s" % (str(result)))
return result
def getjobexitcode(lsfJobID):
job, task = lsfJobID
#first try bjobs to find out job state
args = ["bjobs", "-l", str(job)]
logger.info("Checking job exit code for job via bjobs: " + str(job))
process = subprocess.Popen(" ".join(args), shell=True, stdout = subprocess.PIPE, stderr = subprocess.STDOUT)
started = 0
for line in process.stdout:
if line.find("Done successfully") > -1:
logger.info("bjobs detected job completed for job: " + str(job))
return 0
elif line.find("Completed <exit>") > -1:
logger.info("bjobs detected job failed for job: " + str(job))
return 1
elif line.find("New job is waiting for scheduling") > -1:
logger.info("bjobs detected job pending scheduling for job: " + str(job))
return None
elif line.find("PENDING REASONS") > -1:
logger.info("bjobs detected job pending for job: " + str(job))
return None
elif line.find("Started on ") > -1:
started = 1
if started == 1:
logger.info("bjobs detected job started but not completed: " + str(job))
return None
#if not found in bjobs, then try bacct (slower than bjobs)
logger.info("bjobs failed to detect job - trying bacct: " + str(job))
args = ["bacct", "-l", str(job)]
logger.info("Checking job exit code for job via bacct:" + str(job))
process = subprocess.Popen(" ".join(args), shell=True, stdout = subprocess.PIPE, stderr = subprocess.STDOUT)
for line in process.stdout:
if line.find("Completed <done>") > -1:
logger.info("Detected job completed for job: " + str(job))
return 0
elif line.find("Completed <exit>") > -1:
logger.info("Detected job failed for job: " + str(job))
return 1
logger.info("Cant determine exit code for job or job still running: " + str(job))
return None
class Worker(Thread):
def __init__(self, newJobsQueue, updatedJobsQueue, boss):
Thread.__init__(self)
self.newJobsQueue = newJobsQueue
self.updatedJobsQueue = updatedJobsQueue
self.currentjobs = list()
self.runningjobs = set()
self.boss = boss
def run(self):
while True:
# Load new job ids:
while not self.newJobsQueue.empty():
self.currentjobs.append(self.newJobsQueue.get())
# Launch jobs as necessary:
while len(self.currentjobs) > 0:
jobID, bsubline = self.currentjobs.pop()
lsfJobID = bsub(bsubline)
self.boss.jobIDs[(lsfJobID, None)] = jobID
self.boss.lsfJobIDs[jobID] = (lsfJobID, None)
self.runningjobs.add((lsfJobID, None))
# Test known job list
for lsfJobID in list(self.runningjobs):
exit = getjobexitcode(lsfJobID)
if exit is not None:
self.updatedJobsQueue.put((lsfJobID, exit))
self.runningjobs.remove(lsfJobID)
time.sleep(10)
class LSFBatchSystem(AbstractBatchSystem):
"""The interface for running jobs on lsf, runs all the jobs you
give it as they come in, but in parallel.
"""
@classmethod
def getDisplayNames(cls):
"""
Names used to select this batch system.
"""
return ["lsf","LSF"]
def __init__(self, config, maxCpus, maxMemory):
AbstractBatchSystem.__init__(self, config, maxCpus, maxMemory) #Call the parent constructor
self.lsfResultsFile = getParasolResultsFileName(config.attrib["job_tree"])
#Reset the job queue and results (initially, we do this again once we've killed the jobs)
self.lsfResultsFileHandle = open(self.lsfResultsFile, 'w')
self.lsfResultsFileHandle.close() #We lose any previous state in this file, and ensure the files existence
self.currentjobs = set()
self.obtainSystemConstants()
self.jobIDs = dict()
self.lsfJobIDs = dict()
self.nextJobID = 0
self.newJobsQueue = Queue()
self.updatedJobsQueue = Queue()
self.worker = Worker(self.newJobsQueue, self.updatedJobsQueue, self)
self.worker.setDaemon(True)
self.worker.start()
def __des__(self):
#Closes the file handle associated with the results file.
self.lsfResultsFileHandle.close() #Close the results file, cos were done.
def issueJob(self, command, memory, cpu):
jobID = self.nextJobID
self.nextJobID += 1
self.currentjobs.add(jobID)
bsubline = prepareBsub(cpu, memory) + [command]
self.newJobsQueue.put((jobID, bsubline))
logger.info("Issued the job command: %s with job id: %s " % (command, str(jobID)))
return jobID
def getLsfID(self, jobID):
if not jobID in self.lsfJobIDs:
RuntimeError("Unknown jobID, could not be converted")
(job,task) = self.lsfJobIDs[jobID]
if task is None:
return str(job)
else:
return str(job) + "." + str(task)
def killJobs(self, jobIDs):
"""Kills the given job IDs.
"""
for jobID in jobIDs:
logger.info("DEL: " + str(self.getLsfID(jobID)))
self.currentjobs.remove(jobID)
process = subprocess.Popen(["bkill", self.getLsfID(jobID)])
del self.jobIDs[self.lsfJobIDs[jobID]]
del self.lsfJobIDs[jobID]
toKill = set(jobIDs)
while len(toKill) > 0:
for jobID in list(toKill):
if getjobexitcode(self.lsfJobIDs[jobID]) is not None:
toKill.remove(jobID)
if len(toKill) > 0:
logger.critical("Tried to kill some jobs, but something happened and they are still going, so I'll try again")
time.sleep(5)
def getIssuedJobIDs(self):
"""A list of jobs (as jobIDs) currently issued (may be running, or maybe
just waiting).
"""
return self.currentjobs
def getRunningJobIDs(self):
"""Gets a map of jobs (as jobIDs) currently running (not just waiting)
and a how long they have been running for (in seconds).
"""
times = {}
currentjobs = set(self.lsfJobIDs[x] for x in self.getIssuedJobIDs())
process = subprocess.Popen(["bjobs"], stdout = subprocess.PIPE)
for currline in process.stdout:
items = curline.strip().split()
if (len(items) > 9 and (items[0]) in currentjobs) and items[2] == 'RUN':
jobstart = "/".join(items[7:9]) + '/' + str(date.today().year)
jobstart = jobstart + ' ' + items[9]
jobstart = time.mktime(time.strptime(jobstart,"%b/%d/%Y %H:%M"))
jobstart = time.mktime(time.strptime(jobstart,"%m/%d/%Y %H:%M:%S"))
times[self.jobIDs[(items[0])]] = time.time() - jobstart
return times
def getUpdatedJob(self, maxWait):
i = None
try:
sgeJobID, retcode = self.updatedJobsQueue.get(timeout=maxWait)
self.updatedJobsQueue.task_done()
i = (self.jobIDs[sgeJobID], retcode)
self.currentjobs -= set([self.jobIDs[sgeJobID]])
except Empty:
pass
return i
def getWaitDuration(self):
"""We give parasol a second to catch its breath (in seconds)
"""
#return 0.0
return 15
def getRescueJobFrequency(self):
"""Parasol leaks jobs, but rescuing jobs involves calls to parasol list jobs and pstat2,
making it expensive. We allow this every 10 minutes..
"""
return 1800
def obtainSystemConstants(self):
p = subprocess.Popen(["lshosts"], stdout = subprocess.PIPE, stderr = subprocess.STDOUT)
line = p.stdout.readline()
items = line.strip().split()
num_columns = len(items)
cpu_index = None
mem_index = None
for i in range(num_columns):
if items[i] == 'ncpus':
cpu_index = i
elif items[i] == 'maxmem':
mem_index = i
if cpu_index is None or mem_index is None:
RuntimeError("lshosts command does not return ncpus or maxmem columns")
p.stdout.readline()
self.maxCPU = 0
self.maxMEM = MemoryString("0")
for line in p.stdout:
items = line.strip().split()
if len(items) < num_columns:
RuntimeError("lshosts output has a varying number of columns")
if items[cpu_index] != '-' and items[cpu_index] > self.maxCPU:
self.maxCPU = items[cpu_index]
if items[mem_index] != '-' and MemoryString(items[mem_index]) > self.maxMEM:
self.maxMEM = MemoryString(items[mem_index])
if self.maxCPU is 0 or self.maxMEM is 0:
RuntimeError("lshosts returns null ncpus or maxmem info")
logger.info("Got the maxCPU: %s" % (self.maxMEM))
def main():
pass
def _test():
import doctest
return doctest.testmod()
if __name__ == '__main__':
_test()
main()
|
harvardinformatics/jobTree
|
batchSystems/lsf.py
|
Python
|
mit
| 12,569
|
# -*- coding: utf-8 -*-
from django.conf.urls import re_path
from . import views
app_name = 'topic'
urlpatterns = [
re_path(r'^$', views.deleted, name='index'),
re_path(r'^deleted/$', views.deleted, name='deleted'),
re_path(r'^closed/$', views.closed, name='closed'),
re_path(r'^pinned/$', views.pinned, name='pinned'),
]
|
nitely/Spirit
|
spirit/topic/admin/urls.py
|
Python
|
mit
| 342
|
class NoResultScraped(Exception):
pass
class NotCompleteParse(Exception):
pass
class CouldNotAuthorize(Exception):
pass
|
pddg/qkouserver
|
qkoubot/my_exception.py
|
Python
|
mit
| 136
|
"""
WSGI config for c2asm project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "c2asm.settings")
application = get_wsgi_application()
|
vladimir-nazarenko/c2asm
|
c2asm/wsgi.py
|
Python
|
mit
| 388
|
from datetime import datetime
from xml.etree import ElementTree
import pkgutil
from json import loads as base_loads
from random import choice
import logging
import re
import urlparse
from sleekxmpp import ClientXMPP
from redis import Redis, ConnectionPool
import requests
from humanize import intcomma, naturaltime, intword
from pyzkb import ZKillboard
from eveapi import EVEAPIConnection
from dropbot.map import Map, base_range, ship_class_to_range
from dropbot.utils import EVEAPIRedisCache
from dropbot.stomp_listener import ZKillboardStompListener
urlparse.uses_netloc.append("redis")
zkillboard_regex = re.compile(r'http(s|):\/\/(?P<host>.*)\/kill\/(?P<killID>\d+)\/')
class UnknownCommandException(Exception):
pass
class DropBot(ClientXMPP):
def __init__(self, *args, **kwargs):
self.rooms = kwargs.pop('rooms', [])
self.nickname = kwargs.pop('nickname', 'Dropbot')
self.cmd_prefix = kwargs.pop('cmd_prefix', '!')
self.kos_url = kwargs.pop('kos_url', 'http://kos.cva-eve.org/api/')
self.hidden_commands = ['cmd_prefix']
self.last_killdate = datetime.utcnow()
self.kill_corps = [int(x) for x in kwargs.pop('kill_corps', [])]
self.kills_disabled = kwargs.pop('kills_disabled', '0') == '1'
self.kills_muted = False
self.office_api_key_keyid = kwargs.pop('office_api_keyid', None)
self.office_api_key_vcode = kwargs.pop('office_api_vcode', None)
self.market_systems = kwargs.pop('market_systems', ['Jita', 'Amarr', 'Rens', 'Dodixie', 'Hek'])
if 'redis_url' in kwargs:
self.redis_pool = ConnectionPool.from_url(kwargs.pop('redis_url', 'redis://localhost:6379/0'))
self.redis = Redis(connection_pool=self.redis_pool)
else:
logging.warning('No DROPBOT_REDIS_URL defined, EVE API calls will not be cached!')
self.redis = None
self.map = Map.from_json(pkgutil.get_data('dropbot', 'data/map.json'))
jid = kwargs.pop('jid', None)
password = kwargs.pop('password', None)
super(DropBot, self).__init__(jid, password)
self.register_plugin('xep_0030') # Service Discovery
self.register_plugin('xep_0045') # Multi-User Chat
self.register_plugin('xep_0199') # XMPP Ping
# Basic bot auto config
self.auto_subscribe = False
self.auto_authorize = True
# Handlers
self.add_event_handler('session_start', self.handle_session_start)
self.add_event_handler('message', self.handle_message)
# Reference Data
@property
def types(self):
if not hasattr(self, '_types'):
data = pkgutil.get_data('dropbot', 'data/types.json')
self._types = base_loads(data)
return self._types
@property
def stations(self):
if not hasattr(self, '_stations'):
data = pkgutil.get_data('dropbot', 'data/stations.json')
self._stations = base_loads(data)
logging.debug('Getting ConquerableStationList')
for x in self.get_eveapi().eve.ConquerableStationList().outposts:
self._stations[unicode(x.stationID)] = x.solarSystemID
return self._stations
# Command / Connection Handling
def handle_session_start(self, event):
self.get_roster()
self.send_presence()
# Join the defined MUC rooms
for room in self.rooms:
self.plugin['xep_0045'].joinMUC(room, self.nickname, wait=True)
# Start the killchecker if we have corps to monitor
if len(self.kill_corps) > 0 and not self.kills_disabled:
logging.info('Starting ZKB Stomp monitor for corps: {}'.format(', '.join(self.kill_corps)))
self.stomp = ZKillboardStompListener(self)
self.stomp.connect('tcp://eve-kill.net:61613')
else:
logging.info('Kill monitoring disabled.')
def call_command(self, command, *args, **kwargs):
if hasattr(self, 'cmd_%s' % command):
try:
resp = getattr(self, 'cmd_%s' % command)(*args, **kwargs)
except:
resp = 'Oops, something went wrong...'
logging.getLogger(__name__).exception('Error handling command')
if resp:
if isinstance(resp, tuple) and len(resp) == 2:
return resp
else:
return resp, None
else:
return None, None
else:
raise UnknownCommandException
def handle_message(self, msg):
args = msg['body'].split(' ')
cmd = args[0].lower()
args.pop(0)
if msg['type'] == 'groupchat':
if msg['mucnick'] == self.nickname:
return
if msg['body'][0] != self.cmd_prefix:
# If its not a command, check for ZKB urls
seen = set([])
response_lines = []
for match in zkillboard_regex.finditer(msg['body']):
kill_id = match.groupdict()['killID']
host = match.groupdict()['host']
logging.info('Found Kill ID {}'.format(kill_id))
if kill_id in seen:
continue
body, html = self.call_command('kill', [kill_id], msg, no_url=True, host=host)
response_lines.append(body)
seen.add(kill_id)
response_lines = [x for x in response_lines if x]
if len(response_lines):
msg.reply('\n'.join(response_lines)).send()
return
# Strip the cmd_prefix
cmd = cmd[1:]
# Call the command
try:
body, html = self.call_command(cmd, args, msg)
except UnknownCommandException:
if msg['type'] != 'groupchat':
msg.reply('Unknown command, use "help" to list all commands available').send()
pass
else:
if body:
msg.reply(body).send()
# Helpers
def _system_picker(self, name):
systems = self.map.get_systems(name)
if len(systems) > 1:
if len(systems) > 10:
return 'More than 10 systems match {}, please provide a more complete name'.format(name)
return 'Did you mean: {}?'.format(', '.join([self.map.get_system_name(x) for x in systems]))
elif len(systems) == 0:
return 'No systems found matching {}'.format(name)
else:
return systems[0]
def _item_picker(self, item):
if item.strip() == '':
return 'Usage: !price <item>'
if item.lower() == 'plex':
return (u"29668", u"30 Day Pilot's License Extension (PLEX)")
types = dict([(i, v) for i, v in self.types.iteritems() if item.lower() in v.lower()])
if len(types) == 0:
return "No items named {} found".format(item)
elif len(types) > 1:
for i, v in types.iteritems():
if item.lower() == v.lower():
return (i, v)
else:
if len(types) > 10:
return "More than 10 items found, please narrow down what you want."
return "Did you mean: {}?".format(
', '.join(types.itervalues())
)
return types.popitem()
def _get_evecentral_price(self, type_id, system_id):
try:
resp = requests.get('http://api.eve-central.com/api/marketstat?typeid={}&usesystem={}'.format(type_id, system_id))
root = ElementTree.fromstring(resp.content)
except:
return None
return (float(root.findall("./marketstat/type[@id='{}']/sell/min".format(type_id))[0].text),
float(root.findall("./marketstat/type[@id='{}']/buy/max".format(type_id))[0].text))
def _system_price(self, args, msg, system, system_id):
item = ' '.join(args)
res = self._item_picker(item)
if isinstance(res, basestring):
return res
type_id, type_name = res
try:
resp = requests.get('http://api.eve-central.com/api/marketstat?typeid={}&usesystem={}'.format(type_id, system_id))
root = ElementTree.fromstring(resp.content)
except:
return "An error occurred tying to get the price for {}".format(type_name)
return "{} @ {} | Sell: {} | Buy: {}".format(
type_name,
system,
intcomma(float(root.findall("./marketstat/type[@id='{}']/sell/min".format(type_id))[0].text)),
intcomma(float(root.findall("./marketstat/type[@id='{}']/buy/max".format(type_id))[0].text)),
)
def _get_offices(self, keyid, vcode):
"""Returns a list of offices from a Corp API key"""
logging.debug('Retreving offices for {}/{}'.format(keyid, vcode))
if not keyid or not vcode:
return []
try:
assets = self.get_eveapi_auth(keyid, vcode).corp.AssetList()
except RuntimeError:
logging.exception('Unable to retrieve asset listing for {}/{}'.format(keyid, vcode))
return []
def location_to_station(location_id):
if location_id >= 67000000:
return location_id - 6000000
if location_id >= 66000000:
return location_id - 6000001
return location_id
return [self.stations[unicode(location_to_station(x.locationID))] for x in assets.assets if x.typeID == 27]
def get_eveapi(self):
if self.redis:
return EVEAPIConnection(cacheHandler=EVEAPIRedisCache(self.redis))
return EVEAPIConnection()
def get_eveapi_auth(self, keyid, vcode):
return self.get_eveapi().auth(keyID=keyid, vCode=vcode)
def check_eveapi_permission(self, keyid, vcode, bit):
try:
accessmask = int(self.get_eveapi_auth(keyid, vcode).account.APIKeyInfo().key.accessMask)
logging.debug('Key ID {} - Access Mask: {}'.format(keyid, accessmask))
except RuntimeError:
return False
mask = 1 << bit
return (accessmask & mask) > 0
# Commands
def cmd_help(self, args, msg):
if len(args) == 0:
if msg['type'] == 'groupchat':
return "Commands: {}\nAll commands are available in private chat without the {} prefix".format(
', '.join([self.cmd_prefix + x[4:] for x in dir(self) if x[:4] == 'cmd_' and x not in self.hidden_commands]),
self.cmd_prefix
)
else:
command_lines = ['{}{}: {}'.format(self.cmd_prefix, cmd[4:], getattr(self, cmd).__doc__ or 'No documentation available') for cmd in dir(self) if cmd[:4] == 'cmd_' and cmd not in self.hidden_commands]
return "Available Commands\n\n{}".format('\n'.join(command_lines))
cmd = args[0]
if hasattr(self, 'cmd_%s' % cmd):
if getattr(self, 'cmd_%s' % cmd).__doc__ is not None:
return '{}{}: {}'.format(
self.cmd_prefix,
cmd,
getattr(self, 'cmd_%s' % cmd).__doc__
)
else:
return 'This command has no documentation'
else:
return 'Unknown command'
def cmd_bestprice(self, args, msg):
"""Returns the best price for an item out of the current known market hub systems"""
item = ' '.join(args)
res = self._item_picker(item)
if isinstance(res, basestring):
return res
type_id, type_name = res
min_sell = 0
max_buy = 0
sell_sys = None
buy_sys = None
for name in self.market_systems:
sys_id = self.map.get_system_id(name)
if not sys_id:
continue
sell, buy = self._get_evecentral_price(type_id, sys_id)
if (sell < min_sell or min_sell == 0) and sell > 0:
min_sell = sell
sell_sys = name
if buy > max_buy:
max_buy = buy
buy_sys = name
return '{}\nBest Sell: {} @ {} ISK\nBest Buy: {} @ {} ISK'.format(
type_name,
sell_sys, intcomma(min_sell),
buy_sys, intcomma(max_buy)
)
def cmd_price(self, args, msg):
"""Returns the price of an item in a particular system"""
if len(args) < 2:
return '!price <system name> <item>'
item = ' '.join(args[1:])
system_id = self._system_picker(args[0])
if isinstance(system_id, basestring):
return system_id
item = self._item_picker(item)
if isinstance(item, basestring):
return item
type_id, type_name = item
sell, buy = self._get_evecentral_price(type_id, system_id)
return '{} @ {} | Sell {} | Buy: {}'.format(
type_name,
self.map.get_system_name(system_id),
intcomma(sell),
intcomma(buy)
)
def cmd_jita(self, args, msg):
"""Returns the price of a item in Jita"""
return self.cmd_price(['Jita'] + args, msg)
def cmd_amarr(self, args, msg):
"""Returns the price of a item in Amarr"""
return self.cmd_price(['Amarr'] + args, msg)
def cmd_rens(self, args, msg):
"""Returns the price of a item in Rens"""
return self.cmd_price(['Rens'] + args, msg)
def cmd_dodixie(self, args, msg):
"""Returns the price of a item in Dodixie"""
return self.cmd_price(['Dodixie'] + args, msg)
def cmd_hek(self, args, msg):
"""Returns the price of a item in Hek"""
return self.cmd_price(['Hek'] + args, msg)
def cmd_r(self, args, msg):
return self.cmd_redditimg(args, msg)
def cmd_redditimg(self, args, msg):
"""Shows a random picture from imgur.com reddit section"""
if len(args) == 0:
return "Usage: !redditimg <subreddit>"
imgs = []
for page in range(1, 11):
for img in requests.get("http://imgur.com/r/%s/top/all/page/%s.json" % (args[0], page)).json()['data']:
resp = "%s - http://i.imgur.com/%s%s" % (img['title'], img['hash'], img['ext'])
if img['nsfw']:
resp = resp + " :nsfw:"
imgs.append(resp)
if len(imgs):
return choice(imgs)
def cmd_kos(self, args, msg):
"""Checks the CVA KOS list for a name"""
arg = ' '.join(args)
resp = requests.get(self.kos_url, params={
'c': 'json',
'q': arg,
'type': 'unit',
'details': None
})
if resp.status_code != requests.codes.ok:
return "Something went wrong (Error %s)" % resp.status_code
try:
data = resp.json()
except:
return "KOS API returned invalid data."
if data['message'] != 'OK':
return "KOS API returned an error."
if data['total'] == 0:
return "KOS returned no results (Not on KOS)"
results = []
for result in data['results']:
text = '{} ({}) - {}'.format(
result['label'],
result['type'],
'KOS' if result['kos'] else 'Not KOS'
)
results.append(text)
return '\n'.join(results)
def cmd_range(self, args, msg):
"""Returns a count of the number of systems in jump range from a source system"""
if len(args) == 0 or len(args) > 2:
return '!range <system> <ship class>'
system = args[0]
if len(args) == 2:
ship_class = args[1].lower()
else:
ship_class = 'blackops'
if ship_class not in base_range.keys():
return 'Unknown class {}, please use one of: {}'.format(
ship_class,
', '.join(base_range.keys())
)
system_id = self._system_picker(system)
if isinstance(system_id, basestring):
return system_id
res = {}
systems = self.map.neighbors_jump(system_id, ship_class=ship_class)
for sys, range in systems:
if sys['region'] in res:
res[sys['region']] += 1
else:
res[sys['region']] = 1
return '{} systems in JDC5 {} range of {}:\n'.format(len(systems), ship_class, self.map.get_system_name(system_id)) + '\n'.join(['{} - {}'.format(x, y) for x, y in res.items()])
def cmd_route(self, args, msg):
"""Shows the shortest route between two sytems"""
if len(args) != 2:
return '!route <source> <destination>'
source, dest = args
source = self._system_picker(source)
if isinstance(source, basestring):
return source
dest = self._system_picker(dest)
if isinstance(dest, basestring):
return dest
route = self.map.route_gate(source, dest)
route_names = ' -> '.join(['{} ({})'.format(x['name'], round(x['security'], 2)) for x in [self.map.node[y] for y in route]])
return '{} jumps from {} to {}\n{}'.format(
len(route)-1,
self.map.get_system_name(source),
self.map.get_system_name(dest),
route_names
)
def cmd_addjb(self, args, msg):
"""Adds a jumpbridge to the internal map for routing purposes"""
if len(args) != 2:
return '!addjb <source> <destination>'
source, dest = args
source = self._system_picker(source)
if isinstance(source, basestring):
return source
dest = self._system_picker(dest)
if isinstance(dest, basestring):
return dest
self.map.add_jumpbridge(source, dest)
return "Done"
def cmd_listjbs(self, args, msg):
"""List all known jumpbridges stored in the map"""
resp_lines = []
for u, v, d in self.map.edges_iter(data=True):
if d['link_type'] == 'bridge':
line = '{} <-> {} ({}ly)'.format(
self.map.get_system_name(u),
self.map.get_system_name(v),
round(self.map.system_distance(u, v), 2),
)
resp_lines.append(line)
return '\n'.join(resp_lines)
def cmd_mapstats(self, args, msg):
"""Gives the current overview of the internal map"""
return '{} systems, {} gate jumps, {} jump bridges'.format(
len(self.map.nodes()),
len([u for u, v, d in self.map.edges_iter(data=True) if d['link_type'] == 'gate']),
len([u for u, v, d in self.map.edges_iter(data=True) if d['link_type'] == 'bridge'])
)
def cmd_hit(self, args, msg):
"""Details what class and JDC level is required to jump between two systems"""
if len(args) != 2:
return '!hit <source> <destination>'
source, dest = args
source = self._system_picker(source)
if isinstance(source, basestring):
return source
dest = self._system_picker(dest)
if isinstance(dest, basestring):
return dest
if self.map.node[dest]['security'] >= 0.5:
return '{} is a highsec system'.format(self.map.get_system_name(dest))
ly = self.map.system_distance(source, dest)
if ly > 6.5 * (1 + (0.25 * 5)):
return '{} to {} is greater than {}ly (maximum jump range of all ships)'.format(
self.map.get_system_name(source),
self.map.get_system_name(dest),
6.5 * (1 + (0.25 * 5))
)
res = []
for ship_class in base_range.keys():
res1 = []
for skill in [4, 5]:
if ship_class_to_range(ship_class, skill) >= ly:
res1.append('JDC{}'.format(skill))
if len(res1):
res.append('{}: {}'.format(ship_class, ', '.join(res1)))
return '{} -> {} ({}ly) Capable Ship Types:\n{}'.format(
self.map.get_system_name(source),
self.map.get_system_name(dest),
round(ly, 2),
'\n'.join(res)
)
def cmd_jump(self, args, msg):
"""Calculates the shortest jump route between two systems"""
if len(args) < 2:
return '!jump <source> <destination> (<ship class> <jdc level> <jfc level>)'
elif len(args) == 2:
source, dest = args
ship_class = 'blackops'
jdc = jfc = 5
elif len(args) == 3:
source, dest, ship_class = args
jdc = jfc = 5
elif len(args) == 4:
source, dest, ship_class, jdc = args
jfc = 5
else:
source, dest, ship_class, jdc, jfc = args
jf = 5
source = self._system_picker(source)
if isinstance(source, basestring):
return source
dest = self._system_picker(dest)
if isinstance(dest, basestring):
return dest
if ship_class not in base_range.keys():
return 'Unknown class {}, please use one of: {}'.format(
ship_class,
', '.join(base_range.keys())
)
try:
int(jdc)
int(jfc)
except ValueError:
return 'Invalid JDC/JFC level'
route = self.map.route_jump(source, dest, ship_class=ship_class)
if len(route):
return '{} to {} ({}/{}/{}), {} jumps ({}ly / {} isotopes):\n{}'.format(
self.map.get_system_name(source),
self.map.get_system_name(dest),
ship_class,
jdc,
jfc,
len(route)-1,
round(self.map.route_jump_distance(route), 2),
round(self.map.route_jump_isotopes(route, int(jfc), ship_class=ship_class, jf_skill=jf), 0),
' -> '.join([self.map.get_system_name(x) for x in route])
)
else:
return 'No route found'
def cmd_id(self, args, msg):
"""Provides an overview of a character's activity in-game"""
if len(args) == 0:
return '!id <character name>'
char_name = ' '.join(args)
result = self.get_eveapi().eve.CharacterID(names=char_name.strip())
char_name = result.characters[0].name
char_id = result.characters[0].characterID
if char_id == 0:
return 'Unknown character {}'.format(char_name)
headers, res = ZKillboard().characterID(char_id).kills().pastSeconds(60 * 60 * 24 * 7).get()
from collections import defaultdict, Counter
kill_types = defaultdict(int)
ship_types = defaultdict(int)
alli_assoc = defaultdict(int)
sum_value = 0.0
for kill in res:
kill_type_id = int(kill['victim']['shipTypeID'])
if kill_type_id > 0:
kill_types[self.types[unicode(kill_type_id)]] += 1
sum_value += float(kill['zkb']['totalValue'])
for attk in kill['attackers']:
if attk['allianceName'].strip() != '' and attk['allianceName'] is not None:
alli_assoc[attk['allianceName']] += 1
if int(attk['characterID']) == char_id:
ship_type_id = int(attk['shipTypeID'])
if ship_type_id > 0:
ship_types[self.types[unicode(ship_type_id)]] += 1
break
if len(res) == 0:
return '{} has had no kills in the last week'.format(char_name)
kill_types = Counter(kill_types).most_common(5)
ship_types = Counter(ship_types).most_common(5)
alli_assoc = Counter(alli_assoc).most_common(5)
return '{}, {} kill(s) ({} ISK) in the last week\nActive Systems: {}\nTop 5 Killed Types: {}\nTop 5 Ship: {}\nTop 5 Associates: {}'.format(
char_name,
len(res),
intcomma(sum_value),
', '.join(set([self.map.node[int(x['solarSystemID'])]['name'] for x in res])),
', '.join(['{} ({})'.format(x, y) for x, y in kill_types]),
', '.join(['{} ({})'.format(x, y) for x, y in ship_types]),
', '.join([x for x, y in alli_assoc])
)
def cmd_kill(self, args, msg, no_url=False, raw=None, host=None):
"""Returns a summary of a zKillboard killmail"""
if not raw:
if len(args) == 0:
return '!kill <Kill ID/zKillboard URL>'
kill_id = args[0]
try:
kill_id = int(kill_id)
except ValueError:
m = zkillboard_regex.match(kill_id)
if m:
kill_id = m.groupdict()['killID']
host = m.groupdict()['host']
else:
return 'Invalid kill ID'
headers, data = ZKillboard(base_url='https://{}/api/'.format(host)).killID(kill_id).get()
kill = data[0]
else:
kill = raw
kill_id = raw['killID']
if no_url:
url = ''
else:
url = ' - https://{}/kill/{}/'.format(host, kill_id)
# Ignore kills over an hour old if they're from stomp
age = (datetime.utcnow() - datetime.strptime(kill['killTime'], '%Y-%m-%d %H:%M:%S'))
if age.total_seconds() > 60 * 60 and raw:
return
# Drop kills less than 1mil if they've come from stomp
if raw and float(kill['zkb']['totalValue']) < 1000000:
return
if 'zkb' in kill and 'totalValue' in kill['zkb']:
value_lost = intword(float(kill['zkb']['totalValue']))
else:
value_lost = '???'
return '{} ({}) in {}, {}, {} attacker(s), {} ISK lost{}'.format(
kill['victim']['characterName'],
self.types[unicode(kill['victim']['shipTypeID'])],
self.map.node[int(kill['solarSystemID'])]['name'],
naturaltime(age),
len(kill['attackers']),
value_lost,
url,
)
def cmd_mute(self, args, msg):
"""Mutes killmail broadcast for 30 minutes"""
self.kills_muted = True
def unmute(self):
self.kills_muted = False
self.schedule('unmute', 30 * 60, unmute, [self])
return 'Killmails muted, posting will resume automatically in 30 minutes'
def cmd_nearestoffice(self, args, msg):
if len(args) != 1:
return '!nearestoffice <system>'
source = args[0]
if not self.office_api_key_keyid or not self.office_api_key_vcode:
return 'No Corp API key is setup'
if not self.check_eveapi_permission(self.office_api_key_keyid, self.office_api_key_vcode, 1):
return "The API key setup doesn't have the correct permissions"
source = self._system_picker(source)
if isinstance(source, basestring):
return source
min_route = None
target_office = None
for office in self._get_offices(self.office_api_key_keyid, self.office_api_key_vcode):
if office == source:
return 'An office is in the target system'
route_length = len(self.map.route_gate(source, office)) - 1
if not min_route or (route_length) < min_route:
target_office = office
min_route = route_length
if target_office:
return 'Nearest Office to {} is {}, {} jump(s)'.format(
self.map.get_system_name(source),
self.map.get_system_name(target_office),
min_route,
)
return 'No known offices.'
def cmd_rageping(self, args, msg):
"""Ping spams everyone's name in a room, use with caution"""
if msg['type'] != 'groupchat':
return 'This only works in MUC rooms'
names = self.plugin['xep_0045'].getRoster(msg['from'].bare)
return 'RAGE PING: {} :frogsiren:'.format(', '.join(names))
|
nikdoof/dropbot
|
dropbot/bot.py
|
Python
|
mit
| 28,390
|
# coding: UTF-8
import os, sys
import mercadopago
def index(req, **kwargs):
mp = mercadopago.MP("CLIENT_ID", "CLIENT_SECRET")
topic = kwargs["topic"]
merchant_order_info = None
if topic == "payment"
payment_info = mp.get("/collections/notifications/"+kwargs["id"])
merchant_order_info = mp.get("/merchant_orders/"+payment_info["response"]["collection"]["merchant_order_id"])
elif topic == "merchant_order"
merchant_order_info = mp.get("/merchant_orders/"+kwargs["id"])
if merchant_order_info == None
raise ValueError("Error obtaining the merchant_order")
if merchant_order_info["status"] == 200
return {
"payment": merchant_order_info["response"]["payments"],
"shipment": merchant_order_info["response"]["shipments"]
}
|
matikbird/matikbird.github.io
|
portfolio/quay/back_end/payments2/mercadopago/api-mercadopago-master/templates/code-examples-master/mp-checkout/shipping/python/ipn_merchant_order.py
|
Python
|
mit
| 824
|
from pprint import pformat
from sqlalchemy import Column, ForeignKey, orm
from sqlalchemy.types import String, Integer, Boolean, Text
from sqlalchemy.schema import UniqueConstraint
from intranet3 import memcache
from intranet3.models import Base, User
from intranet3.log import WARN_LOG, INFO_LOG, DEBUG_LOG
LOG = INFO_LOG(__name__)
DEBUG = DEBUG_LOG(__name__)
WARN = WARN_LOG(__name__)
SELECTOR_CACHE_KEY = 'SELECTORS_FOR_TRACKER_%s'
STATUS = [
('1', 'Initialization'),
('2', 'Analysis'),
('3', 'Conception'),
('4', 'Realization'),
('5', 'Support'),
('6', 'Closed'),
]
def bugzilla_bug_list(tracker_url, bug_ids, project_selector=None):
query = '&'.join(['bug_id=%s' % bug_id for bug_id in bug_ids])
return tracker_url + '/buglist.cgi?%s' % query
def unfuddle_bug_list(tracker_url, bug_ids, project_selector=None):
suffix = '/a#/projects/%s/ticket_reports/dynamic?conditions_string=%s'
query = '|'.join(['number-eq-%s' % bug_id for bug_id in bug_ids])
return tracker_url + (suffix % (project_selector, query))
class Project(Base):
__tablename__ = 'project'
BUG_LIST_URL_CONTRUCTORS = {
'bugzilla': bugzilla_bug_list,
'rockzilla': bugzilla_bug_list,
'igozilla': bugzilla_bug_list,
'trac': lambda *args: '#',
'cookie_trac': lambda *args: '#',
'bitbucket': lambda *args: '#',
'pivotaltracker': lambda *args: '#',
'unfuddle': unfuddle_bug_list,
}
id = Column(Integer, primary_key=True, index=True)
name = Column(String, nullable=False)
coordinator_id = Column(Integer, ForeignKey('user.id'), nullable=True, index=True)
client_id = Column(Integer, ForeignKey('client.id'), nullable=False, index=True)
tracker_id = Column(Integer, ForeignKey('tracker.id'), nullable=False, index=True)
turn_off_selectors = Column(Boolean, nullable=False, default=False)
project_selector = Column(String, nullable=True)
component_selector = Column(String, nullable=True)
ticket_id_selector = Column(String, nullable=True)
version_selector = Column(String, nullable=True)
active = Column(Boolean, nullable=False)
time_entries = orm.relationship('TimeEntry', backref='project', lazy='dynamic')
sprints = orm.relationship('Sprint', backref='project', lazy='dynamic')
google_card = Column(String, nullable=True)
google_wiki = Column(String, nullable=True)
status = Column(Integer, nullable=True)
mailing_url = Column(String, nullable=True)
working_agreement = Column(Text, nullable=False, default='')
definition_of_done = Column(Text, nullable=False, default='')
definition_of_ready = Column(Text, nullable=False, default='')
continuous_integration_url = Column(String, nullable=False, default='')
backlog_url = Column(String, nullable=False, default='')
__table_args__ = (UniqueConstraint('name', 'client_id', name='project_name_client_id_unique'), {})
def format_selector(self):
if self.turn_off_selectors:
return u'Turned off'
if self.ticket_id_selector:
return u'Tickets: %s' % (self.ticket_id_selector, )
else:
return u'%s / %s / %s' % (
self.project_selector or u'*',
self.component_selector or u'*',
self.version_selector or u'*',
)
def get_selector_tuple(self):
"""
Returns selector tuple
([ticket_ids], project_selector, component_selector)
"""
ticket_ids = [
int(v.strip()) for v in self.ticket_id_selector.split(',')
] if self.ticket_id_selector else None
components = [
v.strip() for v in self.component_selector.split(',')
] if self.component_selector else []
versions = [
v.strip() for v in self.version_selector.split(',')
] if self.version_selector else []
return (
ticket_ids,
self.project_selector,
components,
versions,
)
def get_new_bug_url(self):
"""
Returns url for create new bug in project
"""
component_selector = self.component_selector if self.component_selector is not None and not self.component_selector.count(',') else None
return self.tracker.get_new_bug_url(self.project_selector, component_selector)
def get_bug_list_url(self, bug_ids):
constructor = self.BUG_LIST_URL_CONTRUCTORS[self.tracker.type]
return constructor(self.tracker.url, bug_ids, self.project_selector)
@property
def status_name(self):
if self.status and len(STATUS) >= self.status:
return STATUS[self.status-1][1]
return None
@property
def coordinator(self):
if self.coordinator_id is not None:
return User.query.filter(User.id==self.coordinator_id).one()
else:
return self.client.coordinator
class SelectorMapping(object):
""" Simple storage for cached project selectors """
def __init__(self, tracker):
"""
Creates a selector mapping for given tracker
None -> project_id
project_name -> project_id
(project_name, component_name) -> project_id
"""
self.tracker = tracker
self.by_ticket_id = {}
self.default = None
self.by_project = {} # key: project_name
self.by_component = {} # key: project_name, component_name
self.by_version = {} # key: project_name, version
self.by_component_version = {} # key: project_name, component_name, version
cache_key = SELECTOR_CACHE_KEY % tracker.id
mapping = memcache.get(cache_key)
if mapping:
self.clone(mapping)
return
projects = Project.query.filter(Project.tracker_id == tracker.id) \
.filter(Project.turn_off_selectors == False) \
.filter(Project.active == True)
self.projects = dict([(project.id, project.name) for project in projects])
for project in projects:
self._create_for_project(project)
memcache.set(cache_key, self)
DEBUG('Created selector mapping for tracker %s: %s, %s' % (
tracker.id, pformat(self.by_ticket_id), pformat(self.by_component))
)
def clone(self, mapping):
self.default = mapping.default
self.by_project = mapping.by_project
self.by_component = mapping.by_component
self.by_version = mapping.by_version
self.by_component_version = mapping.by_component_version
def _check_ticket_id_existance(self, ticket_id):
if ticket_id in self.by_ticket_id:
WARN(u'Overriding ticket ID for tracker from %s to %s' % (
self.by_ticket_id[ticket_id], ticket_id))
def _check_project_component_existance(self, project_component, project):
"""
Warn if we override a project
"""
if project_component is None:
if None in self.by_component:
WARN(u'Overriding default project for tracker [%s] from [%s] to [%s]' % (
self.tracker.name,
self.projects[self.by_component[None]],
project.name
))
elif isinstance(project_component, (str, unicode)):
project_name = project_component
if project_name in self.by_component:
WARN(u'Overriding project [%s] for tracker [%s] from [%s] to [%s]' % (
project_name,
self.tracker.name,
self.projects[self.by_component[project_name]],
project.name
))
else:
project_name, component_name = project_component
if (project_name, component_name) in self.by_component:
WARN(u'Overriding project [%s] and component [%s] for tracker [%s] from [%s] to [%s]' % (
project_name,
component_name,
self.tracker.name,
self.projects[self.by_component[(project_name, component_name)]],
project.name
))
def _create_for_project(self, project):
ticket_ids, project_name, component_names, versions = project.get_selector_tuple()
if ticket_ids:
for ticket_id in ticket_ids:
self._check_ticket_id_existance(ticket_id)
self.by_ticket_id[ticket_id] = project.id
# brak
# tylko projekt
# projekt + komponent
# projekt + wersja
# projekt + komponent + wersja
if not project_name:
# brak
self._check_project_component_existance(None, project)
self.default = project.id
elif not component_names:
if versions:
# projekt + wersja
for version in versions:
self.by_version[(project_name, version)] = project.id
else:
# tylko projekt
self._check_project_component_existance(project_name, project)
self.by_project[project_name] = project.id
elif not versions:
# projekt + komponent
for component_name in component_names:
self._check_project_component_existance((project_name, component_name), project)
self.by_component[(project_name, component_name)] = project.id
else:
# projekt + komponent + wersja
for component_name in component_names:
for version in versions:
self.by_component_version[(project_name, component_name, version)] = project.id
def match(self, id_, project, component, version=None):
if id_ in self.by_ticket_id:
return self.by_ticket_id[id_]
project_id = self.by_component_version.get((project, component, version))
if project_id:
return project_id
project_id = self.by_component.get((project, component))
if project_id:
return project_id
project_id = self.by_version.get((project, version))
if project_id:
return project_id
project_id = self.by_project.get(project)
if project_id:
return project_id
if self.default:
return self.default
WARN(u'map_to_project: Mapping to project/component/tracker %s/%s/%s failed' % (project, component, self.tracker.name))
@staticmethod
def invalidate_for(tracker_id):
memcache.delete(SELECTOR_CACHE_KEY % tracker_id)
DEBUG(u'Invalidated selector mapping cache for tracker %s' % (tracker_id, ))
|
pytlakp/intranetref
|
src/intranet3/models/project.py
|
Python
|
mit
| 10,842
|
import math
def add_vectors(vector1, vector2):
#Note the vectors are tuple (angle, magnitude)
x = math.sin(vector1[0]) * vector1[1] + math.sin(vector2[0]) * vector2[1]
y = math.cos(vector1[0]) * vector1[1] + math.cos(vector2[0]) * vector2[1]
mag = math.hypot(x, y)
angle = (math.pi/2) - math.atan2(y, x)
return (angle, mag)
|
shantanu69/pygame-physics
|
PyParticleSystem/PyParticles/utils.py
|
Python
|
mit
| 330
|
import numpy as np
from numpy import ma
from matplotlib import pyplot as plt
from mpl_toolkits.basemap import Basemap as bm
from mpl_toolkits.basemap import addcyclic
import palettable
class vector_plot:
def __init__(self, ucompos, vcompos):
self.ucompos = ucompos
self.vcompos = vcompos
self.uanoms = self.ucompos.dset['composite_anomalies']
self.vanoms = self.vcompos.dset['composite_anomalies']
self.windspeed = np.sqrt(np.power(self.uanoms, 2) + np.power(self.vanoms, 2))
def plotmap(self, domain = [0., 360., -90., 90.], res='c', stepp=2, scale=20):
latitudes = self.windspeed.latitudes.data
longitudes = self.windspeed.longitudes.data
m = bm(projection='cyl',llcrnrlat=latitudes.min(),urcrnrlat=latitudes.max(),\
llcrnrlon=longitudes.min(),urcrnrlon=longitudes.max(),\
lat_ts=0, resolution=res)
lons, lats = np.meshgrid(longitudes, latitudes)
cmap = palettable.colorbrewer.sequential.Oranges_9.mpl_colormap
f, ax = plt.subplots(figsize=(10,6))
m.ax = ax
x, y = m(lons, lats)
im = m.pcolormesh(lons, lats, self.windspeed.data, cmap=cmap)
cb = m.colorbar(im)
cb.set_label('wind speed (m/s)', fontsize=14)
Q = m.quiver(x[::stepp,::stepp], y[::stepp,::stepp], \
self.uanoms.data[::stepp,::stepp], self.vanoms.data[::stepp,::stepp], \
pivot='middle', scale=scale)
l,b,w,h = ax.get_position().bounds
qk = plt.quiverkey(Q, l+w-0.1, b-0.03, 5, "5 m/s", labelpos='E', fontproperties={'size':14}, coordinates='figure')
m.drawcoastlines()
return f
|
nicolasfauchereau/paleopy
|
paleopy/plotting/vector_plot.py
|
Python
|
mit
| 1,697
|
from django.test import TestCase
from ...models import Office
class PublishedMixinTest(TestCase):
def test_only_published_manager_and_queryset_default_datetime(self):
data = [
dict(published=False, address='office', office='not_published'),
dict(office='published', address='some adress'),
dict(office='published again', address='some address')
]
published_offices = []
not_published_offices = []
for item in data:
office = Office.objects.create(**item)
if office.is_published():
published_offices.append(office)
else:
not_published_offices.append(office)
self.assertQuerysetEqual(
Office.objects.published(), reversed(published_offices),
transform=lambda o: o)
self.assertQuerysetEqual(
Office.published_objects.all(), reversed(published_offices),
transform=lambda o: o)
|
mtrgroup/django-mtr-utils
|
tests/app/tests/models/mixins.py
|
Python
|
mit
| 991
|
# Theme-specific settings
SITENAME = "Marc Sleegers"
DOMAIN = "marcsleegers.com"
BIO_TEXT = "Infrequent ramblings."
FOOTER_TEXT = '© 2022 Marc Sleegers. Licensed <a href="https://creativecommons.org/licenses/by/4.0/">CC BY 4.0</a>.'
SITE_AUTHOR = "Marc Sleegers"
TWITTER_USERNAME = "@marcardioid"
GOOGLE_PLUS_URL = ""
INDEX_DESCRIPTION = "Lead Data Engineer at Nike, especially interested in driving growth through insights – not just metrics. These are my infrequent ramblings."
INDEX_KEYWORDS = [
"Marc",
"Sleegers",
"About",
"Blog",
"Resume",
"CV",
"Portfolio",
"Marcardioid",
"Pumpkinsoup",
"AWildPumpkin",
"Computer",
"Science",
"Developer",
"Programmer",
"Software",
"Data",
"Engineer",
"Technology",
]
NAVIGATION_ITEMS = []
# NAVIGATION_ITEMS = [
# ('/blog/', 'blog', 'Blog'),
# ('/blog/archive/', 'archive', 'Archive'),
# ]
ICONS_PATH = "images/icons"
GOOGLE_FONTS = [
"Inter",
"Source Code Pro",
]
SOCIAL_ICONS = [
# (
# "mailto:mail@marcsleegers.com",
# "Contact (mail@marcsleegers.com)",
# "fa-envelope-square",
# ),
# ('https://facebook.com/marc.sleegers', 'Facebook', 'fa-facebook-square'),
# ('https://twitter.com/marcardioid', 'Twitter', 'fa-twitter-square'),
# ("https://github.com/marcardioid", "GitHub", "fa-github-square"),
# ('/files/CV_Marc-Sleegers_2015_EN_WEB.pdf', 'Resume', 'fa-check-square'),
# ("/atom.xml", "RSS (Atom Feed)", "fa-rss-square"),
]
THEME_COLOR = "#052"
ASSET_URL = "/theme/style.min.css"
# Pelican settings
RELATIVE_URLS = False
SITEURL = "http://localhost:8000"
TIMEZONE = "Europe/Amsterdam"
DEFAULT_DATE = "fs"
DEFAULT_DATE_FORMAT = "%B %d, %Y"
DEFAULT_PAGINATION = False
SUMMARY_MAX_LENGTH = 50
THEME = "themes/pneumatic"
# Relocate blog directory
BLOG_URL = "blog/"
BLOG_DESCRIPTION = "These are my infrequent ramblings."
ARTICLE_URL = BLOG_URL + "{date:%Y}/{date:%m}/{slug}/"
ARTICLE_SAVE_AS = ARTICLE_URL + "index.html"
DRAFT_URL = BLOG_URL + "drafts/{date:%Y}/{date:%m}/{slug}/"
DRAFT_SAVE_AS = DRAFT_URL + "index.html"
PAGE_URL = "{slug}/"
PAGE_SAVE_AS = PAGE_URL + "index.html"
ARCHIVES_SAVE_AS = BLOG_URL + "archive/index.html"
ARCHIVES_DESCRIPTION = "These are the archives of my infrequent ramblings."
YEAR_ARCHIVE_SAVE_AS = BLOG_URL + "{date:%Y}/index.html"
MONTH_ARCHIVE_SAVE_AS = BLOG_URL + "{date:%Y}/{date:%m}/index.html"
# Disable authors, categories, tags, and category pages
DIRECT_TEMPLATES = ["index", "archives"]
INDEX_SAVE_AS = BLOG_URL + "index.html"
CATEGORY_SAVE_AS = ""
# Disable Atom feed generation
FEED_ATOM = "atom.xml"
FEED_ALL_ATOM = None
CATEGORY_FEED_ATOM = None
TRANSLATION_FEED_ATOM = None
TYPOGRIFY = True
MARKDOWN = {
"extension_configs": {
"markdown.extensions.codehilite": {"linenums": "True"},
"markdown.extensions.admonition": {},
"markdown.extensions.extra": {},
"markdown.extensions.toc": {"anchorlink": "True"},
"markdown.extensions.footnotes": {},
"markdown.extensions.meta": {},
},
"output_format": "html5",
}
JINJA_ENVIRONMENT = {"trim_blocks": True, "lstrip_blocks": True}
CACHE_CONTENT = False
DELETE_OUTPUT_DIRECTORY = False
OUTPUT_PATH = "output/develop/"
PATH = "content"
templates = ["404.html"]
TEMPLATE_PAGES = {page: page for page in templates}
STATIC_PATHS = ["images", "extra"]
IGNORE_FILES = ["style.css"]
extras = ["favicon.ico", "robots.txt", "humans.txt"]
EXTRA_PATH_METADATA = {"extra/%s" % file: {"path": file} for file in extras}
PLUGINS = ["neighbors", "sitemap", "webassets", "share_post", "series"]
GOOGLE_ANALYTICS = "UA-72969416-1"
SITEMAP = {
"format": "xml",
"priorities": {"articles": 0.5, "indexes": 0.5, "pages": 0.5},
"changefreqs": {"articles": "monthly", "indexes": "weekly", "pages": "monthly"},
}
WEBASSETS_CONFIG = [
("cache", False),
("manifest", False),
("url_expire", False),
("versions", False),
]
|
marcardioid/marcsleegers.com
|
pelicanconf.py
|
Python
|
mit
| 3,987
|
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits import mplot3d
# returns a random d dimensional vector, a direction to peturb in
def direction(d,t):
# if type == uniform
if(t == 'u'):
return np.random.uniform(-1/np.sqrt(2), 1/np.sqrt(2), d)
elif(t == 'n'):
return np.random.normal(0, 1/np.sqrt(d), d)
elif(t == 's'):
# a point on the N-Sphere
angles = np.random.uniform(0, np.pi, d-2)
x = np.zeros(d)
x[0] = np.cos(angles[0])
for i in range(1,d-1):
temp = 1
for j in range(i):
temp = temp * np.sin(angles[j])
x[i] = temp*np.cos(angles[i])
x[d-1] = x[d-2]*np.tan(angles[d-2])
return x
fig = plt.figure()
ax = plt.axes(projection='3d')
for i in range(1000):
R = np.random.uniform(0,1,1)[0]
R2 = np.random.uniform(0,1,1)[0]
xs = np.sin(np.arccos(1-2*R))*np.cos(2*np.pi*R2)
ys = np.sin(np.arccos(1-2*R))*np.sin(2*np.pi*R2)
zs = 1- 2*R
ax.scatter3D(xs, ys, zs, cmap='Greens')
ax.set_xlabel('X Label')
ax.set_ylabel('Y Label')
ax.set_zlabel('Z Label')
plt.show()
|
Aditya8795/Python-Scripts
|
peturb.py
|
Python
|
mit
| 1,147
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: ai ts=4 sts=4 et sw=4 nu
from __future__ import (unicode_literals, absolute_import,
division, print_function)
from snisi_reprohealth.models.PFActivities import (PFActivitiesR, AggPFActivitiesR)
# from snisi_reprohealth.models.ChildrenMortality import (ChildrenDeathR, AggChildrenDeathR)
# from snisi_reprohealth.models.MaternalMortality import (MaternalDeathR, AggMaternalDeathR)
# from snisi_reprohealth.models.Commodities import (RHProductsR, AggRHProductsR)
|
yeleman/snisi
|
snisi_reprohealth/models/__init__.py
|
Python
|
mit
| 540
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import collections
import logging
import os
import platform
import re
import subprocess
import types
import util
import json
from ebstall.versions import Version
from ebstall.util import normalize_string
logger = logging.getLogger(__name__)
CLI_DEFAULTS_DEFAULT = dict(
packager='source'
)
CLI_DEFAULTS_DEBIAN = dict(
packager='apt-get'
)
CLI_DEFAULTS_CENTOS = dict(
packager='yum'
)
CLI_DEFAULTS_DARWIN = dict(
packager='source'
)
FLAVORS = {
'debian': 'debian',
'ubuntu': 'debian',
'kubuntu': 'debian',
'kali': 'debian',
'centos': 'redhat',
'centos linux': 'redhat',
'fedora': 'redhat',
'red hat enterprise linux server': 'redhat',
'rhel': 'redhat',
'amazon': 'redhat',
'amzn': 'redhat',
'gentoo': 'gentoo',
'gentoo base system': 'gentoo',
'darwin': 'darwin',
'opensuse': 'suse',
'suse': 'suse',
}
CLI_DEFAULTS = {
"default": CLI_DEFAULTS_DEFAULT,
"debian": CLI_DEFAULTS_DEBIAN,
"ubuntu": CLI_DEFAULTS_DEBIAN,
"centos": CLI_DEFAULTS_CENTOS,
"centos linux": CLI_DEFAULTS_CENTOS,
"fedora": CLI_DEFAULTS_CENTOS,
"red hat enterprise linux server": CLI_DEFAULTS_CENTOS,
"rhel": CLI_DEFAULTS_CENTOS,
"amazon": CLI_DEFAULTS_CENTOS,
"amzn": CLI_DEFAULTS_CENTOS,
"gentoo": CLI_DEFAULTS_DEFAULT,
"gentoo base system": CLI_DEFAULTS_DEFAULT,
"darwin": CLI_DEFAULTS_DARWIN,
"opensuse": CLI_DEFAULTS_DEFAULT,
"suse": CLI_DEFAULTS_DEFAULT,
}
"""CLI defaults."""
# Start system
START_INITD = 'init.d'
START_SYSTEMD = 'systemd'
# Pkg manager
PKG_YUM = 'yum'
PKG_APT = 'apt-get'
FAMILY_REDHAT = 'redhat'
FAMILY_DEBIAN = 'debian'
# redhat / debian
YUMS = ['redhat', 'fedora', 'centos', 'rhel', 'amzn', 'amazon']
DEBS = ['debian', 'ubuntu', 'kali']
class OSInfo(object):
"""OS information, name, version, like - similarity"""
def __init__(self, name=None, version=None, version_major=None, like=None, family=None,
packager=None, start_system=None, has_os_release=False, fallback_detection=False, long_name=None,
*args, **kwargs):
self.name = name
self.long_name = long_name
self.version_major = version_major
self.version = version
self.like = like
self.family = family
self.packager = packager
self.start_system = start_system
self.has_os_release = has_os_release
self.fallback_detection = fallback_detection
def __str__(self):
return 'OSInfo(%r)' % json.dumps(self.to_json())
def __repr__(self):
return 'OSInfo(%r)' % json.dumps(self.to_json())
def to_json(self):
"""
Converts to the JSON
:return:
"""
js = collections.OrderedDict()
js['name'] = self.name
js['long_name'] = self.long_name
js['version_major'] = self.version_major
js['version'] = self.version
js['like'] = self.like
js['family'] = self.family
js['packager'] = self.packager
js['start_system'] = self.start_system
js['has_os_release'] = self.has_os_release
js['fallback_detection'] = self.fallback_detection
return js
class PackageInfo(object):
"""
Basic information about particular package
"""
def __init__(self, name, version, arch, repo, size=None, section=None):
self._version = None
self.name = name
self.version = version
self.arch = arch
self.repo = repo
self.size = size
self.section = section
@property
def version(self):
return self._version
@version.setter
def version(self, val):
self._version = Version(val)
def __str__(self):
return '%s-%s.%s' % (self.name, self.version, self.arch)
def __repr__(self):
return 'PackageInfo(name=%r, version=%r, arch=%r, repo=%r, size=%r, section=%r)' \
% (self.name, self.version, self.arch, self.repo, self.size, self.section)
def to_json(self):
"""
Converts to the JSON
:return:
"""
js = collections.OrderedDict()
js['name'] = self.name
js['version'] = str(self.version)
js['arch'] = self.arch
js['repo'] = self.repo
if self.size is not None:
js['size'] = self.size
if self.section is not None:
js['section'] = self.section
return js
@classmethod
def from_json(cls, js):
"""
Converts json dict to the object
:param js:
:return:
"""
obj = cls(name=js['name'], version=js['version'], arch=js['arch'], repo=js['repo'])
if 'size' in js:
obj.size = js['size']
if 'section' in js:
obj.section = js['section']
return obj
def get_os():
"""
Returns basic information about the OS.
:return: OSInfo
"""
# At first - parse os-release
ros = OSInfo()
os_release_path = '/etc/os-release'
if os.path.isfile(os_release_path):
ros.name = _get_systemd_os_release_var("ID", filepath=os_release_path)
ros.version = _get_systemd_os_release_var("VERSION_ID", filepath=os_release_path)
ros.like = _get_systemd_os_release_var("ID_LIKE", os_release_path).split(" ")
ros.long_name = _get_systemd_os_release_var("PRETTY_NAME", filepath=os_release_path)
ros.has_os_release = True
if not ros.long_name:
ros.long_name = _get_systemd_os_release_var("NAME", filepath=os_release_path)
# Try /etc/redhat-release and /etc/debian_version
if not ros.has_os_release or ros.like is None or ros.version is None or ros.name is None:
os_redhat_release(ros)
os_debian_version(ros)
os_issue(ros)
# like detection
os_like_detect(ros)
os_family_detect(ros)
# Major version
os_major_version(ros)
# Packager detection - yum / apt-get
os_packager(ros)
# Start system - init.d / systemd
os_start_system(ros)
return ros
def os_family_detect(ros):
"""
OS Family (redhat, debian, ...)
:param ros:
:return:
"""
if util.startswith(ros.like, YUMS):
ros.family = FAMILY_REDHAT
if util.startswith(ros.like, DEBS):
ros.family = FAMILY_DEBIAN
if ros.family is not None:
if sum([1 for x in YUMS if ros.name.lower().startswith(x)]) > 0:
ros.family = FAMILY_REDHAT
if sum([1 for x in DEBS if ros.name.lower().startswith(x)]) > 0:
ros.family = FAMILY_DEBIAN
return
def os_packager(ros):
if ros.like is not None:
if util.startswith(ros.like, YUMS):
ros.packager = PKG_YUM
if util.startswith(ros.like, DEBS):
ros.packager = PKG_APT
return ros
if ros.name is not None:
if sum([1 for x in YUMS if ros.name.lower().startswith(x)]) > 0:
ros.packager = PKG_YUM
if sum([1 for x in DEBS if ros.name.lower().startswith(x)]) > 0:
ros.packager = PKG_APT
return
if os.path.exists('/etc/yum'):
ros.packager = PKG_YUM
if os.path.exists('/etc/apt/sources.list'):
ros.packager = PKG_APT
def os_start_system(ros):
if os.path.exists('/etc/systemd'):
ros.start_system = START_SYSTEMD
else:
ros.start_system = START_INITD
return ros
def os_issue(ros):
if os.path.exists('/etc/issue'):
with open('/etc/issue', 'r') as fh:
issue = fh.readline().strip()
issue = re.sub(r'\\[a-z]', '', issue).strip()
match1 = re.match(r'^(.+?)\s+release\s+(.+?)$', issue, re.IGNORECASE)
match2 = re.match(r'^(.+?)\s+([0-9.]+)\s*(LTS)?$', issue, re.IGNORECASE)
if match1:
ros.long_name = match1.group(1).strip()
ros.version = match1.group(2).strip()
elif match2:
ros.long_name = match2.group(1).strip()
ros.version = match2.group(2).strip()
else:
ros.long_name = issue
return ros
def os_debian_version(ros):
if os.path.exists('/etc/debian_version'):
with open('/etc/debian_version', 'r') as fh:
debver = fh.readline().strip()
ros.like = 'debian'
ros.family = FAMILY_DEBIAN
if ros.version is None:
ros.version = debver.strip()
return ros
def os_redhat_release(ros):
if os.path.exists('/etc/redhat-release'):
with open('/etc/redhat-release', 'r') as fh:
redhatrel = fh.readline().strip()
ros.like = 'redhat'
ros.family = FAMILY_REDHAT
match = re.match(r'^(.+?)\s+release\s+(.+?)$', redhatrel, re.IGNORECASE)
if match is not None:
ros.long_name = match.group(1).strip()
ros.version = match.group(2).strip()
else:
ros.long_name = redhatrel
return ros
def os_like_detect(ros):
if not ros.like and ros.name is not None:
try:
ros.like = FLAVORS[ros.name.lower()]
except:
pass
if not ros.like and ros.long_name is not None:
try:
ros.like = FLAVORS[ros.long_name.lower()]
except:
pass
return ros
def os_major_version(ros):
if ros.version is not None:
match = re.match(r'(.+?)[/.]', ros.version)
if match:
ros.version_major = match.group(1)
return ros
def get_os_info(filepath="/etc/os-release"):
"""
Get OS name and version
:param str filepath: File path of os-release file
:returns: (os_name, os_version)
:rtype: `tuple` of `str`
"""
if os.path.isfile(filepath):
# Systemd os-release parsing might be viable
os_name, os_version = get_systemd_os_info(filepath=filepath)
if os_name:
return (os_name, os_version)
# Fallback to platform module
return get_python_os_info()
def get_os_info_ua(filepath="/etc/os-release"):
"""
Get OS name and version string for User Agent
:param str filepath: File path of os-release file
:returns: os_ua
:rtype: `str`
"""
if os.path.isfile(filepath):
os_ua = _get_systemd_os_release_var("PRETTY_NAME", filepath=filepath)
if not os_ua:
os_ua = _get_systemd_os_release_var("NAME", filepath=filepath)
if os_ua:
return os_ua
# Fallback
return " ".join(get_python_os_info())
def get_systemd_os_info(filepath="/etc/os-release"):
"""
Parse systemd /etc/os-release for distribution information
:param str filepath: File path of os-release file
:returns: (os_name, os_version)
:rtype: `tuple` of `str`
"""
os_name = _get_systemd_os_release_var("ID", filepath=filepath)
os_version = _get_systemd_os_release_var("VERSION_ID", filepath=filepath)
return (os_name, os_version)
def get_systemd_os_like(filepath="/etc/os-release"):
"""
Get a list of strings that indicate the distribution likeness to
other distributions.
:param str filepath: File path of os-release file
:returns: List of distribution acronyms
:rtype: `list` of `str`
"""
return _get_systemd_os_release_var("ID_LIKE", filepath).split(" ")
def _get_systemd_os_release_var(varname, filepath="/etc/os-release"):
"""
Get single value from systemd /etc/os-release
:param str varname: Name of variable to fetch
:param str filepath: File path of os-release file
:returns: requested value
:rtype: `str`
"""
var_string = varname+"="
if not os.path.isfile(filepath):
return ""
with open(filepath, 'r') as fh:
contents = fh.readlines()
for line in contents:
if line.strip().startswith(var_string):
# Return the value of var, normalized
return normalize_string(line.strip()[len(var_string):])
return ""
def get_python_os_info():
"""
Get Operating System type/distribution and major version
using python platform module
:returns: (os_name, os_version)
:rtype: `tuple` of `str`
"""
info = platform.system_alias(
platform.system(),
platform.release(),
platform.version()
)
os_type, os_ver, _ = info
os_type = os_type.lower()
if os_type.startswith('linux'):
info = platform.linux_distribution()
# On arch, platform.linux_distribution() is reportedly ('','',''),
# so handle it defensively
if info[0]:
os_type = info[0]
if info[1]:
os_ver = info[1]
elif os_type.startswith('darwin'):
os_ver = subprocess.Popen(
["sw_vers", "-productVersion"],
stdout=subprocess.PIPE
).communicate()[0].rstrip('\n')
elif os_type.startswith('freebsd'):
# eg "9.3-RC3-p1"
os_ver = os_ver.partition("-")[0]
os_ver = os_ver.partition(".")[0]
elif platform.win32_ver()[1]:
os_ver = platform.win32_ver()[1]
else:
# Cases known to fall here: Cygwin python
os_ver = ''
return os_type, os_ver
def os_like(key):
"""
Tries to transform OS ID to LIKE_ID
:param key:
:return: string or None
"""
try:
return FLAVORS[key.lower()]
except KeyError:
return None
def os_constant(key):
"""
Get a constant value for operating system
:param key: name of cli constant
:return: value of constant for active os
"""
os_info = get_os_info()
try:
constants = CLI_DEFAULTS[os_info[0].lower()]
except KeyError:
constants = os_like_constants()
if not constants:
constants = CLI_DEFAULTS["default"]
return constants[key]
def os_like_constants():
"""
Try to get constants for distribution with
similar layout and configuration, indicated by
/etc/os-release variable "LIKE"
:returns: Constants dictionary
:rtype: `dict`
"""
os_like = get_systemd_os_like()
if os_like:
for os_name in os_like:
if os_name in CLI_DEFAULTS.keys():
return CLI_DEFAULTS[os_name]
return {}
def get_yum_packages(out):
"""
List of all packages parsing
:param out:
:return:
"""
ret = []
lines = out if isinstance(out, types.ListType) else out.split('\n')
for line in lines:
line = line.strip()
match = re.match(r'^([a-zA-Z0-9.\-_]+)[\s\t]+([a-zA-Z0-9.:\-_]+)[\s\t]+([@a-zA-Z0-9.\-_]+)$', line)
if match is None:
continue
package = match.group(1).strip()
version = match.group(2).strip()
repo = match.group(3).strip()
arch = None
# Architecture extract
match_arch = re.match(r'^(.+?)\.([^.]+)$', package)
if match_arch:
package = match_arch.group(1).strip()
arch = match_arch.group(2).strip()
pkg = PackageInfo(name=package, version=version, arch=arch, repo=repo)
ret.append(pkg)
return ret
def get_yum_packages_update(out):
"""
List of packages to update parsing
:param out:
:return:
"""
ret = []
eqline = 0
cur_section = None
lines = out if isinstance(out, types.ListType) else out.split('\n')
for line in lines:
line = line.strip()
if line.startswith('====='):
eqline += 1
continue
# Process lines only after 2nd ====== line - should be the package list.
if eqline != 2:
continue
lmatch = re.match(r'^([a-zA-Z\s]+):$', line)
if lmatch is not None:
cur_section = lmatch.group(1)
continue
match = re.match(r'^([a-zA-Z0-9.\-_]+)[\s\t]+([a-zA-Z0-9.\-_]+)[\s\t]+([a-zA-Z0-9.:\-_]+)'
r'[\s\t]+([@a-zA-Z0-9.:\-_]+)[\s\t]+([a-zA-Z0-9.\-_\s]+?)$', line)
if match is None:
continue
package = match.group(1).strip()
version = match.group(3).strip()
repo = match.group(4).strip()
arch = match.group(2).strip()
size = match.group(5).strip()
pkg = PackageInfo(name=package, version=version, arch=arch, repo=repo, size=size, section=cur_section)
ret.append(pkg)
return ret
def check_package_restrictions(yum_output_packages, allowed_packages):
"""
Checks list of the yum output pakcages vs. allowed packages
:param yum_output_packages:
:param check_packages:
:return: (conflicting packages, new packages)
"""
new_packages = []
conflicting_packages = []
for out_package in yum_output_packages:
allowed_list = [x for x in allowed_packages if x.name == out_package.name]
if len(allowed_list) == 0:
new_packages.append(out_package)
continue
# Sort packages based on the version, highest first.
if len(allowed_list) > 1:
allowed_list.sort(key=lambda x: x.version, reverse=True)
allowed = allowed_list[0]
if out_package.version > allowed.version:
conflicting_packages.append(out_package)
return conflicting_packages, new_packages
def package_diff(a, b, only_in_b=False):
"""
Package diff a - b
package x \in a is removed from a if the same package (or higher version) is in b.
If there are more packages in b, the one with higher version is taken
Used for removing already installed packages (b) from the packages to install (a).
:param a:
:param b:
:param only_in_b: if True the element in a has to be in the b in the lower version.
:return:
"""
res = []
for pkg in a:
b_filtered = [x for x in b if x.name == pkg.name and x.arch == pkg.arch]
# New package, not in b
if len(b_filtered) == 0:
if not only_in_b:
res.append(pkg)
continue
# Sort packages based on the version, highest first.
if len(b_filtered) > 1:
b_filtered.sort(key=lambda x: x.version, reverse=True)
# b contains smaller version of the package, add to the result
if b_filtered[0].version < pkg.version:
res.append(pkg)
return res
|
EnigmaBridge/ebstall.py
|
ebstall/osutil.py
|
Python
|
mit
| 18,396
|
#!/usr/bin/env python
import boto
from boto.s3.connection import OrdinaryCallingFormat
from fabric.api import prompt
def confirm(message):
"""
Verify a users intentions.
"""
answer = prompt(message, default="Not at all")
if answer.lower() not in ('y', 'yes', 'buzz off', 'screw you'):
exit()
def replace_in_file(filename, find, replace):
with open(filename, 'r') as f:
contents = f.read()
contents = contents.replace(find, replace)
with open(filename, 'w') as f:
f.write(contents)
def get_bucket(bucket_name):
"""
Established a connection and gets s3 bucket
"""
if '.' in bucket_name:
s3 = boto.connect_s3(calling_format=OrdinaryCallingFormat())
else:
s3 = boto.connect_s3()
return s3.get_bucket(bucket_name)
|
swastvedt/dailygraphics
|
fabfile/utils.py
|
Python
|
mit
| 813
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for the instance-groups managed update-instances commands."""
import re
from googlecloudsdk.calliope import exceptions
STANDBY_NAME = 'standby'
TARGET_SIZE_NAME = 'target-size'
TEMPLATE_NAME = 'template'
def _ParseFixed(fixed_or_percent_str):
"""Retrieves int value from string."""
if re.match(r'^\d+$', fixed_or_percent_str):
return int(fixed_or_percent_str)
return None
def _ParsePercent(fixed_or_percent_str):
"""Retrieves percent value from string."""
if re.match(r'^\d+%$', fixed_or_percent_str):
percent = int(fixed_or_percent_str[:-1])
return percent
return None
def ParseFixedOrPercent(flag_name, flag_param_name,
fixed_or_percent_str, messages):
"""Retrieves value: number or percent.
Args:
flag_name: name of the flag associated with the parsed string.
flag_param_name: name of the inner parameter of the flag.
fixed_or_percent_str: string containing fixed or percent value.
messages: module containing message classes.
Returns:
FixedOrPercent message object.
"""
if fixed_or_percent_str is None:
return None
fixed = _ParseFixed(fixed_or_percent_str)
if fixed is not None:
return messages.FixedOrPercent(fixed=fixed)
percent = _ParsePercent(fixed_or_percent_str)
if percent is not None:
if percent > 100:
raise exceptions.InvalidArgumentException(
flag_name, 'percentage cannot be higher than 100%.')
return messages.FixedOrPercent(percent=percent)
raise exceptions.InvalidArgumentException(
flag_name,
flag_param_name + ' has to be non-negative integer number or percent.')
def ParseUpdatePolicyType(flag_name, policy_type_str, messages):
"""Retrieves value of update policy type: opportunistic or proactive.
Args:
flag_name: name of the flag associated with the parsed string.
policy_type_str: string containing update policy type.
messages: module containing message classes.
Returns:
InstanceGroupManagerUpdatePolicy.TypeValueValuesEnum message enum value.
"""
if policy_type_str == 'opportunistic':
return (messages.InstanceGroupManagerUpdatePolicy
.TypeValueValuesEnum.OPPORTUNISTIC)
elif policy_type_str == 'proactive':
return (messages.InstanceGroupManagerUpdatePolicy
.TypeValueValuesEnum.PROACTIVE)
raise exceptions.InvalidArgumentException(flag_name, 'unknown update policy.')
def ValidateUpdateInstancesArgs(args):
"""Validates update arguments provided by the user.
Args:
args: arguments provided by the user.
"""
if args.action == 'restart':
if args.version_original:
raise exceptions.InvalidArgumentException(
'--version-original', 'can\'t be specified for --action restart.')
if args.version_new:
raise exceptions.InvalidArgumentException(
'--version-new', 'can\'t be specified for --action restart.')
elif args.action == 'replace':
if not args.version_new:
raise exceptions.RequiredArgumentException(
'--version-new',
'must be specified for --action replace (or default).')
if not args.version_original and (TARGET_SIZE_NAME in args.version_new):
if args.version_new[TARGET_SIZE_NAME] == '100%':
del args.version_new[TARGET_SIZE_NAME]
else:
raise exceptions.InvalidArgumentException(
'--version-new',
'target-size can\'t be specified if there is only one version.')
if (args.version_original and args.version_new and
(TARGET_SIZE_NAME in args.version_original)
== (TARGET_SIZE_NAME in args.version_new)):
raise exceptions.ToolException(
'Exactly one version must have the target-size specified.')
def ParseVersion(flag_name, version_map, resources, messages):
"""Retrieves version from input map.
Args:
flag_name: name of the flag associated with the parsed string.
version_map: map containing version data provided by the user.
resources: provides reference for instance template resource.
messages: module containing message classes.
Returns:
InstanceGroupManagerVersion message object.
"""
if TEMPLATE_NAME not in version_map:
raise exceptions.InvalidArgumentException(flag_name,
'template has to be specified.')
template_ref = resources.Parse(
version_map[TEMPLATE_NAME], collection='compute.instanceTemplates')
if TARGET_SIZE_NAME in version_map:
target_size = ParseFixedOrPercent(flag_name, TARGET_SIZE_NAME,
version_map[TARGET_SIZE_NAME], messages)
else:
target_size = None
name = version_map.get('name')
return messages.InstanceGroupManagerVersion(
instanceTemplate=template_ref.SelfLink(),
targetSize=target_size,
name=name)
def ValidateCanaryVersionFlag(flag_name, version_map):
"""Retrieves canary version from input map.
Args:
flag_name: name of the flag associated with the parsed string.
version_map: map containing version data provided by the user.
"""
if version_map and TARGET_SIZE_NAME not in version_map:
raise exceptions.RequiredArgumentException(
'{} {}={}'.format(flag_name, TARGET_SIZE_NAME,
TARGET_SIZE_NAME.upper()),
'target size must be specified for canary version')
|
Sorsly/subtle
|
google-cloud-sdk/lib/googlecloudsdk/command_lib/compute/managed_instance_groups/update_instances_utils.py
|
Python
|
mit
| 5,956
|
from reads.conf.development import * # NOQA
|
martinbalfanz/reads-api
|
src/reads/settings.py
|
Python
|
mit
| 45
|
eveapi = local_import('eveapi')
def is_igb():
return 'http_eve_charid' in request.env
def auto_vivify(charid):
if db(db.char.charid==charid).count():
return # character id already exists in db
api = eveapi.EVEAPIConnection()
try:
el = api.eve.CharacterName(ids=str(charid))
charname = el.characters[0].name
db.char.insert(charid=charid, charname=charname, isk=0, oogpasshash=None)
return True
except eveapi.Error:
return False
def get_char_row_from_id(charid):
q = db(db.char.charid==charid)
if not q.count():
if auto_vivify(charid):
q = db(db.char.charid==charid)
else:
raise ValueError('Invalid character id:', charid)
char = q.select().first()
return char
def get_name_from_id(charid):
return get_char_row_from_id(charid).charname
def get_id_from_name(charname):
q = db(db.char.charname==charname)
if not q.count():
return None
charid = q.select()[0].charid
return charid
TYPENAME = 'evedb.typename'
PACKCOUNT = 'prizepack.count'
PACKIMG = 'prizepack.image_url'
PACKDATA_CACHE = {} # md5 : extracted pack
def extract_pack(prizeid):
q = db(db.prize.id==prizeid)
prize = q.select().first()
packdata = prize.pack
if not packdata: return None
from hashlib import md5
packhash = md5(packdata).hexdigest()
if packhash in PACKDATA_CACHE:
return PACKDATA_CACHE[packhash]
from json import loads
try:
data = loads(packdata)
except:
return "invalid pack data: "+packdata
output = {}
for typeid in data:
fragment = {}
fragment[TYPENAME] = data[typeid]['NAME']
fragment[PACKCOUNT] = data[typeid]['COUNT']
url = data[typeid]['DISPLAY']
if url=='Type':
url = 'http://image.eveonline.com/Type/%s_64.png' % typeid
elif url=='Render':
url = 'http://image.eveonline.com/Type/%s_64.png' % typeid
fragment[PACKIMG] = url
output[typeid] = fragment
PACKDATA_CACHE[packhash] = output
return output
def prize_details(prizeid):
q = db(db.prize.id==prizeid)
prize = q.select().first()
pack = extract_pack(prizeid)
if pack:
details = {'NAME': prize.name}
details.update(pack)
return (True, details)
if prize.iskprize > 0.01:
return (False, "Credit: "+ iskfmt(prize.iskprize) +" ISK")
return (False, prize.name)
def iskfmt(isk):
commafy = lambda x: commafy(x/1000)+','+'0'*(3-len(str(x%1000)))+str(x%1000) if x>999 else str(x)
decimals = ("%.2f" % isk).split('.')[1]
return commafy(int(isk))+'.'+decimals
def prize_imgurl(prizeid, size=128):
prize = db.prize(prizeid)
url = prize.imgurl
if url=='Type' or url=='Render':
typeid = prize.typeid
if url=='Type' and size > 64:
size = 64
url = "http://image.eveonline.com/%s/%s_%s.png" % (url, typeid, size)
return url
def prize_imgmeta(prizeid, size=128):
packoverlay = URL("static","images/pack64.png")
if size > 64:
packoverlay = URL("static","images/pack128.png")
imgurl = prize_imgurl(prizeid, size)
pack = bool(extract_pack(prizeid))
return (imgurl, pack, packoverlay)
def prize_price(prize):
typeid = prize.typeid
price = 0
if typeid and (not prize.iskprize or prize.iskprize<0.01):
price = fetch_buy_price(typeid, rounding=False, bonusing=False)
else:
price = prize.iskprize
return price
def spin_reel(reel_id):
prizes = db(db.prize.reel == reel_id).select()
reel = []
for prize in prizes:
reel += prize.repeat * [prize.id]
from random import choice as random_choice
from random import shuffle
shuffle(reel)
c1 = random_choice(reel)
c2 = random_choice(reel)
c3 = random_choice(reel)
return c1, c2, c3
def chance_statistics(reel_id, spinnum=10000, respin=True, cashin=False, verbose=True):
reel = db.reel(reel_id)
cost = reel.spincost
isk = cost*spinnum
startisk = isk
print "Starting with %s ISK" % iskfmt(startisk)
iskwon = 0
prizes = db(db.prize.reel==reel_id).select()
iskprizes = {}
for prize in prizes:
if prize.iskprize>0.01:
iskprizes[prize.id] = prize.iskprize
if verbose:
print "isk prize", prize
results = {}
actualspins = 0
loss_streaks = []
losses = 0
def spin():
c1, c2, c3 = spin_reel(reel_id)
if c1==c2 and c2==c3:
if c1 not in results:
results[c1] = 0
results[c1] += 1
if c1 in iskprizes:
return True, iskprizes[c1], None
return True, 0, c1
return False, 0, None
standby_prizes = []
investisk = isk
while isk>=cost:
if verbose and int(isk)%int(cost*spinnum/10)==0:
print "%.2f" % (isk/cost)
isk -= cost
(win, iskwin, prize) = spin()
if win:
loss_streaks.append(losses)
losses = 0
if not iskwin:
standby_prizes.append(prize)
else:
losses += 1
if respin:
isk += iskwin
iskwon += iskwin
actualspins +=1
if isk<cost and cashin: # If we're about to lose, try to cash in items
cashed_prize_isk = 0
sell_prize_isk = 0
for p in standby_prizes:
if not p:
print "Null prize?", p
continue
p = db.prize(p)
cash_value = buyout_price(None, p)
cashed_prize_isk += cash_value
typeids = [p.typeid] if not p.pack else extract_pack(p)
for t in typeids:
sell_prize_isk += get_sell_prices(typeids)[t]
print "Cashed in: %s ISK (%.2f%%)" % (iskfmt(cashed_prize_isk), 100*cashed_prize_isk/investisk)
investisk = cashed_prize_isk
if cashed_prize_isk+isk>startisk:
print "ALERT: %s ISK after cash-in > %s ISK start ISK" % (iskfmt(isk+cashed_prize_isk), iskfmt(startisk))
if sell_prize_isk>startisk:
print "XXX PLAYER WINNING: %s ISK > %s ISK" % (iskfmt(sell_prize_isk), iskfmt(startisk))
isk += cashed_prize_isk
cashed_prize_isk = 0
standby_prizes = []
for key in results:
name = db(db.prize.id==key).select().first().name
if verbose:
print key, results[key], name
if verbose:
print "Spins:", actualspins
print "Average loss streak", float(sum(loss_streaks))/len(loss_streaks)
print "Shortest loss streak:", min(loss_streaks)
print "Longest loss streak:", max(loss_streaks)
print "Start ISK:", iskfmt(startisk)
print "ISK paid out:", iskfmt(iskwon)
return results
def get_time():
from datetime import datetime
return datetime.utcnow()
def login_redirect():
redirect(URL('char', 'auth', vars={'next': URL()}))
|
fsufitch/eve-shipspinning
|
models/utils.py
|
Python
|
mit
| 7,220
|
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from qrtextedit import ScanQRTextEdit
import re
from decimal import Decimal
from electrum import bitcoin
import util
RE_ADDRESS = '[1-9A-HJ-NP-Za-km-z]{26,}'
RE_ALIAS = '(.*?)\s*\<([1-9A-HJ-NP-Za-km-z]{26,})\>'
frozen_style = "QWidget { background-color:none; border:none;}"
normal_style = "QPlainTextEdit { }"
class PayToEdit(ScanQRTextEdit):
def __init__(self, win):
ScanQRTextEdit.__init__(self)
self.win = win
self.amount_edit = win.amount_e
self.document().contentsChanged.connect(self.update_size)
self.heightMin = 0
self.heightMax = 150
self.c = None
self.textChanged.connect(self.check_text)
self.outputs = []
self.errors = []
self.is_pr = False
self.is_alias = False
self.scan_f = win.pay_to_URI
self.update_size()
self.payto_address = None
self.previous_payto = ''
def setFrozen(self, b):
self.setReadOnly(b)
self.setStyleSheet(frozen_style if b else normal_style)
for button in self.buttons:
button.setHidden(b)
def setGreen(self):
self.setStyleSheet(util.GREEN_BG)
def setExpired(self):
self.setStyleSheet(util.RED_BG)
def parse_address_and_amount(self, line):
x, y = line.split(',')
out_type, out = self.parse_output(x)
amount = self.parse_amount(y)
return out_type, out, amount
def parse_output(self, x):
try:
address = self.parse_address(x)
return bitcoin.TYPE_ADDRESS, address
except:
script = self.parse_script(x)
return bitcoin.TYPE_SCRIPT, script
def parse_script(self, x):
from electrum.transaction import opcodes, push_script
script = ''
for word in x.split():
if word[0:3] == 'OP_':
assert word in opcodes.lookup
script += chr(opcodes.lookup[word])
else:
script += push_script(word).decode('hex')
return script
def parse_amount(self, x):
if x.strip() == '!':
return '!'
p = pow(10, self.amount_edit.decimal_point())
return int(p * Decimal(x.strip()))
def parse_address(self, line):
r = line.strip()
m = re.match('^'+RE_ALIAS+'$', r)
address = str(m.group(2) if m else r)
assert bitcoin.is_address(address)
return address
def check_text(self):
self.errors = []
if self.is_pr:
return
# filter out empty lines
lines = filter(lambda x: x, self.lines())
outputs = []
total = 0
self.payto_address = None
if len(lines) == 1:
data = lines[0]
if data.startswith("myriadcoin:"):
self.scan_f(data)
return
try:
self.payto_address = self.parse_output(data)
except:
pass
if self.payto_address:
self.win.lock_amount(False)
return
is_max = False
for i, line in enumerate(lines):
try:
_type, to_address, amount = self.parse_address_and_amount(line)
except:
self.errors.append((i, line.strip()))
continue
outputs.append((_type, to_address, amount))
if amount == '!':
is_max = True
else:
total += amount
self.win.is_max = is_max
self.outputs = outputs
self.payto_address = None
if self.win.is_max:
self.win.do_update_fee()
else:
self.amount_edit.setAmount(total if outputs else None)
self.win.lock_amount(total or len(lines)>1)
def get_errors(self):
return self.errors
def get_recipient(self):
return self.payto_address
def get_outputs(self, is_max):
if self.payto_address:
if is_max:
amount = '!'
else:
amount = self.amount_edit.get_amount()
_type, addr = self.payto_address
self.outputs = [(_type, addr, amount)]
return self.outputs[:]
def lines(self):
return unicode(self.toPlainText()).split('\n')
def is_multiline(self):
return len(self.lines()) > 1
def paytomany(self):
self.setText("\n\n\n")
self.update_size()
def update_size(self):
docHeight = self.document().size().height()
h = docHeight*17 + 11
if self.heightMin <= h <= self.heightMax:
self.setMinimumHeight(h)
self.setMaximumHeight(h)
self.verticalScrollBar().hide()
def setCompleter(self, completer):
self.c = completer
self.c.setWidget(self)
self.c.setCompletionMode(QCompleter.PopupCompletion)
self.c.activated.connect(self.insertCompletion)
def insertCompletion(self, completion):
if self.c.widget() != self:
return
tc = self.textCursor()
extra = completion.length() - self.c.completionPrefix().length()
tc.movePosition(QTextCursor.Left)
tc.movePosition(QTextCursor.EndOfWord)
tc.insertText(completion.right(extra))
self.setTextCursor(tc)
def textUnderCursor(self):
tc = self.textCursor()
tc.select(QTextCursor.WordUnderCursor)
return tc.selectedText()
def keyPressEvent(self, e):
if self.isReadOnly():
return
if self.c.popup().isVisible():
if e.key() in [Qt.Key_Enter, Qt.Key_Return]:
e.ignore()
return
if e.key() in [Qt.Key_Tab]:
e.ignore()
return
if e.key() in [Qt.Key_Down, Qt.Key_Up] and not self.is_multiline():
e.ignore()
return
QPlainTextEdit.keyPressEvent(self, e)
ctrlOrShift = e.modifiers() and (Qt.ControlModifier or Qt.ShiftModifier)
if self.c is None or (ctrlOrShift and e.text().isEmpty()):
return
eow = QString("~!@#$%^&*()_+{}|:\"<>?,./;'[]\\-=")
hasModifier = (e.modifiers() != Qt.NoModifier) and not ctrlOrShift;
completionPrefix = self.textUnderCursor()
if hasModifier or e.text().isEmpty() or completionPrefix.length() < 1 or eow.contains(e.text().right(1)):
self.c.popup().hide()
return
if completionPrefix != self.c.completionPrefix():
self.c.setCompletionPrefix(completionPrefix);
self.c.popup().setCurrentIndex(self.c.completionModel().index(0, 0))
cr = self.cursorRect()
cr.setWidth(self.c.popup().sizeHintForColumn(0) + self.c.popup().verticalScrollBar().sizeHint().width())
self.c.complete(cr)
def qr_input(self):
data = super(PayToEdit,self).qr_input()
if data.startswith("myriadcoin:"):
self.scan_f(data)
# TODO: update fee
def resolve(self):
self.is_alias = False
if self.hasFocus():
return
if self.is_multiline(): # only supports single line entries atm
return
if self.is_pr:
return
key = str(self.toPlainText())
if key == self.previous_payto:
return
self.previous_payto = key
if not (('.' in key) and (not '<' in key) and (not ' ' in key)):
return
try:
data = self.win.contacts.resolve(key)
except:
return
if not data:
return
self.is_alias = True
address = data.get('address')
name = data.get('name')
new_url = key + ' <' + address + '>'
self.setText(new_url)
self.previous_payto = new_url
#if self.win.config.get('openalias_autoadd') == 'checked':
self.win.contacts[key] = ('openalias', name)
self.win.contact_list.on_update()
self.setFrozen(True)
if data.get('type') == 'openalias':
self.validated = data.get('validated')
if self.validated:
self.setGreen()
else:
self.setExpired()
else:
self.validated = None
|
cryptapus/electrum-myr
|
gui/qt/paytoedit.py
|
Python
|
mit
| 9,506
|
#!/usr/bin/env python3
import Parser, Lexer
from DSL import _lexerParser, _lexerLexer
from DSL import _parserLexer, _parserParser
from DSL import makeParser, makeLexer
lexerLexerConfig = r"""#dsl
%keys ::= '%ignore' '%keys' '::='
comment ::= /#[^\n]*\n/
identifier ::= /[_a-zA-Z][_a-zA-Z0-9]*/
sqString ::= /'[^']*'/
dqString ::= /"[^"\\]*(\\\\.[^"\\]*)*"/
reString ::= /\/[^\/\\]*(\\\\.[^\/\\]*)*\//
%ignore ::= comment
"""
lexerLexer = makeLexer(lexerLexerConfig)
lexerParserConfig = r"""#dsl
LexRules ::= rule*
rule ::= identifier '::=' (sqString | dqString | reString)
| '%keys' '::=' (sqString | dqString)+
| '%ignore' '::=' (identifier | sqString | dqString)+
%ignore ::= '::='
"""
lexerParser = makeParser(lexerParserConfig)
parserLexerConfig = r"""#dsl
%keys ::= '$' '|' '::=' '(' ')' '*' '+' '?'
identifier ::= /[_a-zA-Z][_a-zA-Z0-9]*/
configType ::= /%(ignore|expandSingle|expand)/
sqString ::= /'[^']*'/
dqString ::= /"[^"\\]*(\\\\.[^"\\]*)*"/
comment ::= /#[^\n]*\n/
%ignore ::= comment
"""
parserLexer = makeLexer(parserLexerConfig)
parserParserConfig = r"""#dsl
ParseRules ::= rule*
rule ::= identifier '::=' alternate ('|' alternate)*
| configType '::=' simpleItem+
alternate ::= '$' | rhsItem+
rhsItem ::= itemValue ('?' | '+' | '*')?
itemValue ::= simpleItem | '(' alternate ('|' alternate)* ')'
simpleItem ::= identifier | dqString | sqString
%ignore ::= '::=' '|' '$' '(' ')'
%expand ::= simpleItem
"""
parserParser = makeParser(parserParserConfig)
realOutput = _lexerParser.parse(_lexerLexer.parse(lexerLexerConfig))
testOutput = lexerParser.parse(lexerLexer.parse(lexerLexerConfig))
assert(str(realOutput) == str(testOutput))
realOutput = _lexerParser.parse(_lexerLexer.parse(parserLexerConfig))
testOutput = lexerParser.parse(lexerLexer.parse(parserLexerConfig))
assert(str(realOutput) == str(testOutput))
realOutput = _parserParser.parse(_parserLexer.parse(lexerParserConfig))
testOutput = parserParser.parse(parserLexer.parse(lexerParserConfig))
assert(str(realOutput) == str(testOutput))
realOutput = _parserParser.parse(_parserLexer.parse(parserParserConfig))
testOutput = parserParser.parse(parserLexer.parse(parserParserConfig))
assert(str(realOutput) == str(testOutput))
print('YA!')
|
coquelicot/PyDSL
|
src/test.py
|
Python
|
mit
| 2,354
|
#coding: utf-8
from __future__ import absolute_import, unicode_literals, print_function
import cProfile
from datetime import datetime
import re
import os
import codecs
import timeit
import pymorphy
from pymorphy.contrib.tokenizers import extract_words
DICT_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__),
'..', 'dicts', 'converted'))
def total_seconds(delta):
return delta.days * 3600 * 24 + delta.seconds + delta.microseconds/100000.0
def get_words(text):
# return list(extract_words(text))
r = re.compile('[\W+-]',re.U)
return [word for word in r.split(text.upper()) if word]
def do_normalize(words, morph):
for word in words:
forms = morph.normalize(word)
def do_pluralize(words, morph):
for word in words:
forms = morph.pluralize_ru(word)
def do_all(words, morph):
do_normalize(words, morph)
# do_pluralize(words, morph)
def load_words(fn):
filename = os.path.abspath(os.path.join('text', fn))
with codecs.open(filename, encoding='utf-8') as f:
text = f.read()
return get_words(text)
def get_morph(backend, **kwargs):
if backend == 'pickle':
path = os.path.join(DICT_PATH, 'ru', 'morphs.pickle')
else:
path = os.path.join(DICT_PATH,'ru')
return pymorphy.get_morph(path, backend, **kwargs)
def get_mem_usage():
try:
import psutil
proc = psutil.Process(os.getpid())
return proc.get_memory_info()
except ImportError:
from collections import namedtuple
Info = namedtuple('Info', 'vms rss')
return Info(0, 0)
def print_memory_usage(old=None):
info = get_mem_usage()
M = 1024*1024.0
if old:
print("RSS: %0.1fM (+%0.1fM), VMS: %0.1fM (+%0.1fM)" % (
info.rss/M, (info.rss-old.rss)/M, info.vms/M, (info.vms-old.vms)/M),
)
else:
print("RSS: %0.1fM, VMS: %0.1fM" % (info.rss/M, info.vms/M))
def bench(filename, backend='shelve', use_psyco=True, use_cache=True, profile=True):
if profile:
words = load_words(filename)
print ('Text is loaded (%d words)' % len(words))
usage = get_mem_usage()
morph = get_morph(backend, cached=use_cache)
prof = cProfile.Profile()
prof = prof.runctx('do_all(words, morph)', globals = globals(), locals=locals())
prof.print_stats(1)
print_memory_usage(usage)
else:
# prep = """
#from bench import do_all, load_words, get_morph
#words = load_words('%s')
#morph = get_morph('%s', cached=%s)
# """ % (file, backend, use_cache)
# res = timeit.timeit('do_all(words, morph)', prep, number=1)
# print '%s => %s (cache: %s) => %.2f sec' % (file, backend, use_cache, res)
start = datetime.now()
words = load_words(filename)
usage = get_mem_usage()
morph = get_morph(backend, cached=use_cache)
loaded = datetime.now()
do_all(words, morph)
parsed = datetime.now()
load_time = total_seconds(loaded-start)
parse_time = total_seconds(parsed-loaded)
wps = len(words)/parse_time
print ('%s => %s (cache: %s) => load: %.2f sec, parse: %0.2f sec (%d words/sec)' % (
filename, backend, use_cache, load_time, parse_time, wps))
print_memory_usage(usage)
|
kmike/pymorphy
|
bench/__init__.py
|
Python
|
mit
| 3,351
|
import calendar
import datetime
import platform
import time
import os
import ssl
import socket
import urllib
import urlparse
import warnings
import shippo
from shippo import error, http_client, version, util, certificate_blacklist
def _encode_datetime(dttime):
if dttime.tzinfo and dttime.tzinfo.utcoffset(dttime) is not None:
utc_timestamp = calendar.timegm(dttime.utctimetuple())
else:
utc_timestamp = time.mktime(dttime.timetuple())
return int(utc_timestamp)
def _api_encode(data):
for key, value in data.iteritems():
key = util.utf8(key)
if value is None:
continue
elif hasattr(value, 'shippo_id'):
yield (key, value.shippo_id)
elif isinstance(value, list) or isinstance(value, tuple):
for subvalue in value:
yield ("%s[]" % (key,), util.utf8(subvalue))
elif isinstance(value, dict):
subdict = dict(('%s[%s]' % (key, subkey), subvalue) for
subkey, subvalue in value.iteritems())
for subkey, subvalue in _api_encode(subdict):
yield (subkey, subvalue)
elif isinstance(value, datetime.datetime):
yield (key, _encode_datetime(value))
else:
yield (key, util.utf8(value))
def _build_api_url(url, query):
scheme, netloc, path, base_query, fragment = urlparse.urlsplit(url)
if base_query:
query = '%s&%s' % (base_query, query)
return urlparse.urlunsplit((scheme, netloc, path, query, fragment))
class APIRequestor(object):
_CERTIFICATE_VERIFIED = False
def __init__(self, auth=None, client=None):
self.auth = auth
from shippo import verify_ssl_certs
self._client = client or http_client.new_default_http_client(
verify_ssl_certs=verify_ssl_certs)
def request(self, method, url, params=None):
self._check_ssl_cert()
rbody, rcode, my_auth = self.request_raw(
method.lower(), url, params)
resp = self.interpret_response(rbody, rcode)
return resp, my_auth
def handle_api_error(self, rbody, rcode, resp):
try:
err = resp['error']
except (KeyError, TypeError):
raise error.APIError(
"Invalid response object from API: %r (HTTP response code "
"was %d)" % (rbody, rcode),
rbody, rcode, resp)
if rcode in [400, 404]:
raise error.InvalidRequestError(
err.get('message'), err.get('param'), rbody, rcode, resp)
elif rcode == 401:
raise error.AuthenticationError(
err.get('message'), rbody, rcode, resp)
elif rcode == 402:
raise error.CardError(err.get('message'), err.get('param'),
err.get('code'), rbody, rcode, resp)
else:
raise error.APIError(err.get('message'), rbody, rcode, resp)
def request_raw(self, method, url, params=None):
"""
Mechanism for issuing an API call
"""
from shippo import api_version
if self.auth:
my_auth = self.auth
else:
from shippo import auth
my_auth = auth
if my_auth is None:
raise error.AuthenticationError(
'No API key provided. (HINT: set your API key using '
'"shippo.auth = (<username>, <password>)"). You can generate API keys '
'from the Shippo web interface. See https://goshippo.com/api '
'for details, or email support@shippo.com if you have any '
'questions.')
abs_url = '%s%s' % (shippo.api_base, url)
encoded_params = urllib.urlencode(list(_api_encode(params or {})))
if method == 'get' or method == 'delete':
if params:
abs_url = _build_api_url(abs_url, encoded_params)
post_data = None
elif method == 'post':
post_data = encoded_params
else:
raise error.APIConnectionError(
'Unrecognized HTTP method %r. This may indicate a bug in the '
'Shippo bindings. Please contact support@shippo.com for '
'assistance.' % (method,))
ua = {
'bindings_version': version.VERSION,
'lang': 'python',
'publisher': 'shippo',
'httplib': self._client.name,
}
for attr, func in [['lang_version', platform.python_version],
['platform', platform.platform],
['uname', lambda: ' '.join(platform.uname())]]:
try:
val = func()
except Exception, e:
val = "!! %s" % (e,)
ua[attr] = val
headers = {
'X-Shippo-Client-User-Agent': util.json.dumps(ua),
'User-Agent': 'Shippo/v1 PythonBindings/%s' % (version.VERSION,),
'Authorization': 'Bearer %s' % (my_auth,)
}
if api_version is not None:
headers['Shippo-Version'] = api_version
rbody, rcode = self._client.request(
method, abs_url, headers, post_data)
util.logger.info(
'API request to %s returned (response code, response body) of '
'(%d, %r)',
abs_url, rcode, rbody)
return rbody, rcode, my_auth
def interpret_response(self, rbody, rcode):
try:
if hasattr(rbody, 'decode'):
rbody = rbody.decode('utf-8')
resp = util.json.loads(rbody)
except Exception:
raise error.APIError(
"Invalid response body from API: %s "
"(HTTP response code was %d)" % (rbody, rcode),
rbody, rcode)
if not (200 <= rcode < 300):
self.handle_api_error(rbody, rcode, resp)
return resp
def _check_ssl_cert(self):
"""Preflight the SSL certificate presented by the backend.
This isn't 100% bulletproof, in that we're not actually validating the
transport used to communicate with Shippo, merely that the first
attempt to does not use a revoked certificate.
Unfortunately the interface to OpenSSL doesn't make it easy to check
the certificate before sending potentially sensitive data on the wire.
This approach raises the bar for an attacker significantly."""
from shippo import verify_ssl_certs
if verify_ssl_certs and not self._CERTIFICATE_VERIFIED:
uri = urlparse.urlparse(shippo.api_base)
try:
certificate = ssl.get_server_certificate(
(uri.hostname, uri.port or 443))
der_cert = ssl.PEM_cert_to_DER_cert(certificate)
except socket.error, e:
raise error.APIConnectionError(e)
except TypeError:
# The Google App Engine development server blocks the C socket
# module which causes a type error when using the SSL library
if ('APPENGINE_RUNTIME' in os.environ and
'Dev' in os.environ.get('SERVER_SOFTWARE', '')):
self._CERTIFICATE_VERIFIED = True
warnings.warn(
'We were unable to verify Shippo\'s SSL certificate '
'due to a bug in the Google App Engine development '
'server. Please alert us immediately at '
'support@shippo.com if this message appears in your '
'production logs.')
return
else:
raise
self._CERTIFICATE_VERIFIED = certificate_blacklist.verify(
uri.hostname, der_cert)
|
bosswissam/shippo-python
|
shippo/api_requestor.py
|
Python
|
mit
| 7,865
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._management_locks_operations import build_create_or_update_at_resource_group_level_request, build_create_or_update_at_resource_level_request, build_create_or_update_at_subscription_level_request, build_create_or_update_by_scope_request, build_delete_at_resource_group_level_request, build_delete_at_resource_level_request, build_delete_at_subscription_level_request, build_delete_by_scope_request, build_get_at_resource_group_level_request, build_get_at_resource_level_request, build_get_at_subscription_level_request, build_get_by_scope_request, build_list_at_resource_group_level_request, build_list_at_resource_level_request, build_list_at_subscription_level_request, build_list_by_scope_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ManagementLocksOperations:
"""ManagementLocksOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.resource.locks.v2016_09_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace_async
async def create_or_update_at_resource_group_level(
self,
resource_group_name: str,
lock_name: str,
parameters: "_models.ManagementLockObject",
**kwargs: Any
) -> "_models.ManagementLockObject":
"""Creates or updates a management lock at the resource group level.
When you apply a lock at a parent scope, all child resources inherit the same lock. To create
management locks, you must have access to Microsoft.Authorization/\ * or
Microsoft.Authorization/locks/* actions. Of the built-in roles, only Owner and User Access
Administrator are granted those actions.
:param resource_group_name: The name of the resource group to lock.
:type resource_group_name: str
:param lock_name: The lock name. The lock name can be a maximum of 260 characters. It cannot
contain <, > %, &, :, \, ?, /, or any control characters.
:type lock_name: str
:param parameters: The management lock parameters.
:type parameters: ~azure.mgmt.resource.locks.v2016_09_01.models.ManagementLockObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagementLockObject, or the result of cls(response)
:rtype: ~azure.mgmt.resource.locks.v2016_09_01.models.ManagementLockObject
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagementLockObject"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'ManagementLockObject')
request = build_create_or_update_at_resource_group_level_request(
resource_group_name=resource_group_name,
lock_name=lock_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.create_or_update_at_resource_group_level.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ManagementLockObject', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ManagementLockObject', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update_at_resource_group_level.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Authorization/locks/{lockName}'} # type: ignore
@distributed_trace_async
async def delete_at_resource_group_level(
self,
resource_group_name: str,
lock_name: str,
**kwargs: Any
) -> None:
"""Deletes a management lock at the resource group level.
To delete management locks, you must have access to Microsoft.Authorization/\ * or
Microsoft.Authorization/locks/* actions. Of the built-in roles, only Owner and User Access
Administrator are granted those actions.
:param resource_group_name: The name of the resource group containing the lock.
:type resource_group_name: str
:param lock_name: The name of lock to delete.
:type lock_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_at_resource_group_level_request(
resource_group_name=resource_group_name,
lock_name=lock_name,
subscription_id=self._config.subscription_id,
template_url=self.delete_at_resource_group_level.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete_at_resource_group_level.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Authorization/locks/{lockName}'} # type: ignore
@distributed_trace_async
async def get_at_resource_group_level(
self,
resource_group_name: str,
lock_name: str,
**kwargs: Any
) -> "_models.ManagementLockObject":
"""Gets a management lock at the resource group level.
:param resource_group_name: The name of the locked resource group.
:type resource_group_name: str
:param lock_name: The name of the lock to get.
:type lock_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagementLockObject, or the result of cls(response)
:rtype: ~azure.mgmt.resource.locks.v2016_09_01.models.ManagementLockObject
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagementLockObject"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_at_resource_group_level_request(
resource_group_name=resource_group_name,
lock_name=lock_name,
subscription_id=self._config.subscription_id,
template_url=self.get_at_resource_group_level.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ManagementLockObject', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_at_resource_group_level.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Authorization/locks/{lockName}'} # type: ignore
@distributed_trace_async
async def create_or_update_by_scope(
self,
scope: str,
lock_name: str,
parameters: "_models.ManagementLockObject",
**kwargs: Any
) -> "_models.ManagementLockObject":
"""Create or update a management lock by scope.
:param scope: The scope for the lock. When providing a scope for the assignment, use
'/subscriptions/{subscriptionId}' for subscriptions,
'/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}' for resource groups, and
'/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePathIfPresent}/{resourceType}/{resourceName}'
for resources.
:type scope: str
:param lock_name: The name of lock.
:type lock_name: str
:param parameters: Create or update management lock parameters.
:type parameters: ~azure.mgmt.resource.locks.v2016_09_01.models.ManagementLockObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagementLockObject, or the result of cls(response)
:rtype: ~azure.mgmt.resource.locks.v2016_09_01.models.ManagementLockObject
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagementLockObject"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'ManagementLockObject')
request = build_create_or_update_by_scope_request(
scope=scope,
lock_name=lock_name,
content_type=content_type,
json=_json,
template_url=self.create_or_update_by_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ManagementLockObject', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ManagementLockObject', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update_by_scope.metadata = {'url': '/{scope}/providers/Microsoft.Authorization/locks/{lockName}'} # type: ignore
@distributed_trace_async
async def delete_by_scope(
self,
scope: str,
lock_name: str,
**kwargs: Any
) -> None:
"""Delete a management lock by scope.
:param scope: The scope for the lock.
:type scope: str
:param lock_name: The name of lock.
:type lock_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_by_scope_request(
scope=scope,
lock_name=lock_name,
template_url=self.delete_by_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete_by_scope.metadata = {'url': '/{scope}/providers/Microsoft.Authorization/locks/{lockName}'} # type: ignore
@distributed_trace_async
async def get_by_scope(
self,
scope: str,
lock_name: str,
**kwargs: Any
) -> "_models.ManagementLockObject":
"""Get a management lock by scope.
:param scope: The scope for the lock.
:type scope: str
:param lock_name: The name of lock.
:type lock_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagementLockObject, or the result of cls(response)
:rtype: ~azure.mgmt.resource.locks.v2016_09_01.models.ManagementLockObject
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagementLockObject"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_by_scope_request(
scope=scope,
lock_name=lock_name,
template_url=self.get_by_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ManagementLockObject', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_by_scope.metadata = {'url': '/{scope}/providers/Microsoft.Authorization/locks/{lockName}'} # type: ignore
@distributed_trace_async
async def create_or_update_at_resource_level(
self,
resource_group_name: str,
resource_provider_namespace: str,
parent_resource_path: str,
resource_type: str,
resource_name: str,
lock_name: str,
parameters: "_models.ManagementLockObject",
**kwargs: Any
) -> "_models.ManagementLockObject":
"""Creates or updates a management lock at the resource level or any level below the resource.
When you apply a lock at a parent scope, all child resources inherit the same lock. To create
management locks, you must have access to Microsoft.Authorization/\ * or
Microsoft.Authorization/locks/* actions. Of the built-in roles, only Owner and User Access
Administrator are granted those actions.
:param resource_group_name: The name of the resource group containing the resource to lock.
:type resource_group_name: str
:param resource_provider_namespace: The resource provider namespace of the resource to lock.
:type resource_provider_namespace: str
:param parent_resource_path: The parent resource identity.
:type parent_resource_path: str
:param resource_type: The resource type of the resource to lock.
:type resource_type: str
:param resource_name: The name of the resource to lock.
:type resource_name: str
:param lock_name: The name of lock. The lock name can be a maximum of 260 characters. It cannot
contain <, > %, &, :, \, ?, /, or any control characters.
:type lock_name: str
:param parameters: Parameters for creating or updating a management lock.
:type parameters: ~azure.mgmt.resource.locks.v2016_09_01.models.ManagementLockObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagementLockObject, or the result of cls(response)
:rtype: ~azure.mgmt.resource.locks.v2016_09_01.models.ManagementLockObject
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagementLockObject"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'ManagementLockObject')
request = build_create_or_update_at_resource_level_request(
resource_group_name=resource_group_name,
resource_provider_namespace=resource_provider_namespace,
parent_resource_path=parent_resource_path,
resource_type=resource_type,
resource_name=resource_name,
lock_name=lock_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.create_or_update_at_resource_level.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ManagementLockObject', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ManagementLockObject', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update_at_resource_level.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}/providers/Microsoft.Authorization/locks/{lockName}'} # type: ignore
@distributed_trace_async
async def delete_at_resource_level(
self,
resource_group_name: str,
resource_provider_namespace: str,
parent_resource_path: str,
resource_type: str,
resource_name: str,
lock_name: str,
**kwargs: Any
) -> None:
"""Deletes the management lock of a resource or any level below the resource.
To delete management locks, you must have access to Microsoft.Authorization/\ * or
Microsoft.Authorization/locks/* actions. Of the built-in roles, only Owner and User Access
Administrator are granted those actions.
:param resource_group_name: The name of the resource group containing the resource with the
lock to delete.
:type resource_group_name: str
:param resource_provider_namespace: The resource provider namespace of the resource with the
lock to delete.
:type resource_provider_namespace: str
:param parent_resource_path: The parent resource identity.
:type parent_resource_path: str
:param resource_type: The resource type of the resource with the lock to delete.
:type resource_type: str
:param resource_name: The name of the resource with the lock to delete.
:type resource_name: str
:param lock_name: The name of the lock to delete.
:type lock_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_at_resource_level_request(
resource_group_name=resource_group_name,
resource_provider_namespace=resource_provider_namespace,
parent_resource_path=parent_resource_path,
resource_type=resource_type,
resource_name=resource_name,
lock_name=lock_name,
subscription_id=self._config.subscription_id,
template_url=self.delete_at_resource_level.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete_at_resource_level.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}/providers/Microsoft.Authorization/locks/{lockName}'} # type: ignore
@distributed_trace_async
async def get_at_resource_level(
self,
resource_group_name: str,
resource_provider_namespace: str,
parent_resource_path: str,
resource_type: str,
resource_name: str,
lock_name: str,
**kwargs: Any
) -> "_models.ManagementLockObject":
"""Get the management lock of a resource or any level below resource.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_provider_namespace: The namespace of the resource provider.
:type resource_provider_namespace: str
:param parent_resource_path: An extra path parameter needed in some services, like SQL
Databases.
:type parent_resource_path: str
:param resource_type: The type of the resource.
:type resource_type: str
:param resource_name: The name of the resource.
:type resource_name: str
:param lock_name: The name of lock.
:type lock_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagementLockObject, or the result of cls(response)
:rtype: ~azure.mgmt.resource.locks.v2016_09_01.models.ManagementLockObject
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagementLockObject"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_at_resource_level_request(
resource_group_name=resource_group_name,
resource_provider_namespace=resource_provider_namespace,
parent_resource_path=parent_resource_path,
resource_type=resource_type,
resource_name=resource_name,
lock_name=lock_name,
subscription_id=self._config.subscription_id,
template_url=self.get_at_resource_level.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ManagementLockObject', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_at_resource_level.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}/providers/Microsoft.Authorization/locks/{lockName}'} # type: ignore
@distributed_trace_async
async def create_or_update_at_subscription_level(
self,
lock_name: str,
parameters: "_models.ManagementLockObject",
**kwargs: Any
) -> "_models.ManagementLockObject":
"""Creates or updates a management lock at the subscription level.
When you apply a lock at a parent scope, all child resources inherit the same lock. To create
management locks, you must have access to Microsoft.Authorization/\ * or
Microsoft.Authorization/locks/* actions. Of the built-in roles, only Owner and User Access
Administrator are granted those actions.
:param lock_name: The name of lock. The lock name can be a maximum of 260 characters. It cannot
contain <, > %, &, :, \, ?, /, or any control characters.
:type lock_name: str
:param parameters: The management lock parameters.
:type parameters: ~azure.mgmt.resource.locks.v2016_09_01.models.ManagementLockObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagementLockObject, or the result of cls(response)
:rtype: ~azure.mgmt.resource.locks.v2016_09_01.models.ManagementLockObject
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagementLockObject"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'ManagementLockObject')
request = build_create_or_update_at_subscription_level_request(
lock_name=lock_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.create_or_update_at_subscription_level.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ManagementLockObject', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ManagementLockObject', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update_at_subscription_level.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Authorization/locks/{lockName}'} # type: ignore
@distributed_trace_async
async def delete_at_subscription_level(
self,
lock_name: str,
**kwargs: Any
) -> None:
"""Deletes the management lock at the subscription level.
To delete management locks, you must have access to Microsoft.Authorization/\ * or
Microsoft.Authorization/locks/* actions. Of the built-in roles, only Owner and User Access
Administrator are granted those actions.
:param lock_name: The name of lock to delete.
:type lock_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_at_subscription_level_request(
lock_name=lock_name,
subscription_id=self._config.subscription_id,
template_url=self.delete_at_subscription_level.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete_at_subscription_level.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Authorization/locks/{lockName}'} # type: ignore
@distributed_trace_async
async def get_at_subscription_level(
self,
lock_name: str,
**kwargs: Any
) -> "_models.ManagementLockObject":
"""Gets a management lock at the subscription level.
:param lock_name: The name of the lock to get.
:type lock_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagementLockObject, or the result of cls(response)
:rtype: ~azure.mgmt.resource.locks.v2016_09_01.models.ManagementLockObject
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagementLockObject"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_at_subscription_level_request(
lock_name=lock_name,
subscription_id=self._config.subscription_id,
template_url=self.get_at_subscription_level.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ManagementLockObject', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_at_subscription_level.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Authorization/locks/{lockName}'} # type: ignore
@distributed_trace
def list_at_resource_group_level(
self,
resource_group_name: str,
filter: Optional[str] = None,
**kwargs: Any
) -> AsyncIterable["_models.ManagementLockListResult"]:
"""Gets all the management locks for a resource group.
:param resource_group_name: The name of the resource group containing the locks to get.
:type resource_group_name: str
:param filter: The filter to apply on the operation.
:type filter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ManagementLockListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.resource.locks.v2016_09_01.models.ManagementLockListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagementLockListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_at_resource_group_level_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
filter=filter,
template_url=self.list_at_resource_group_level.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_at_resource_group_level_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
filter=filter,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ManagementLockListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_at_resource_group_level.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Authorization/locks'} # type: ignore
@distributed_trace
def list_at_resource_level(
self,
resource_group_name: str,
resource_provider_namespace: str,
parent_resource_path: str,
resource_type: str,
resource_name: str,
filter: Optional[str] = None,
**kwargs: Any
) -> AsyncIterable["_models.ManagementLockListResult"]:
"""Gets all the management locks for a resource or any level below resource.
:param resource_group_name: The name of the resource group containing the locked resource. The
name is case insensitive.
:type resource_group_name: str
:param resource_provider_namespace: The namespace of the resource provider.
:type resource_provider_namespace: str
:param parent_resource_path: The parent resource identity.
:type parent_resource_path: str
:param resource_type: The resource type of the locked resource.
:type resource_type: str
:param resource_name: The name of the locked resource.
:type resource_name: str
:param filter: The filter to apply on the operation.
:type filter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ManagementLockListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.resource.locks.v2016_09_01.models.ManagementLockListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagementLockListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_at_resource_level_request(
resource_group_name=resource_group_name,
resource_provider_namespace=resource_provider_namespace,
parent_resource_path=parent_resource_path,
resource_type=resource_type,
resource_name=resource_name,
subscription_id=self._config.subscription_id,
filter=filter,
template_url=self.list_at_resource_level.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_at_resource_level_request(
resource_group_name=resource_group_name,
resource_provider_namespace=resource_provider_namespace,
parent_resource_path=parent_resource_path,
resource_type=resource_type,
resource_name=resource_name,
subscription_id=self._config.subscription_id,
filter=filter,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ManagementLockListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_at_resource_level.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}/providers/Microsoft.Authorization/locks'} # type: ignore
@distributed_trace
def list_at_subscription_level(
self,
filter: Optional[str] = None,
**kwargs: Any
) -> AsyncIterable["_models.ManagementLockListResult"]:
"""Gets all the management locks for a subscription.
:param filter: The filter to apply on the operation.
:type filter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ManagementLockListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.resource.locks.v2016_09_01.models.ManagementLockListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagementLockListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_at_subscription_level_request(
subscription_id=self._config.subscription_id,
filter=filter,
template_url=self.list_at_subscription_level.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_at_subscription_level_request(
subscription_id=self._config.subscription_id,
filter=filter,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ManagementLockListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_at_subscription_level.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Authorization/locks'} # type: ignore
@distributed_trace
def list_by_scope(
self,
scope: str,
filter: Optional[str] = None,
**kwargs: Any
) -> AsyncIterable["_models.ManagementLockListResult"]:
"""Gets all the management locks for a scope.
:param scope: The scope for the lock. When providing a scope for the assignment, use
'/subscriptions/{subscriptionId}' for subscriptions,
'/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}' for resource groups, and
'/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePathIfPresent}/{resourceType}/{resourceName}'
for resources.
:type scope: str
:param filter: The filter to apply on the operation.
:type filter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ManagementLockListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.resource.locks.v2016_09_01.models.ManagementLockListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagementLockListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_scope_request(
scope=scope,
filter=filter,
template_url=self.list_by_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_scope_request(
scope=scope,
filter=filter,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ManagementLockListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_scope.metadata = {'url': '/{scope}/providers/Microsoft.Authorization/locks'} # type: ignore
|
Azure/azure-sdk-for-python
|
sdk/resources/azure-mgmt-resource/azure/mgmt/resource/locks/v2016_09_01/aio/operations/_management_locks_operations.py
|
Python
|
mit
| 49,288
|
from src.settings import Colors
def league_color(league: str) -> Colors:
if league in [
]:
return Colors.GREEN
if league in [
'1 CFL (Montenegro)',
'A Lyga (Lithuania)',
'Bikar (Iceland)',
'Coupe de la Ligue (France)',
'EURO Qualifiers (Europe)',
'FA Cup (England)',
'J-League (Japan)',
'J-League 2 (Japan)',
'K-League (South Korea)',
'Landspokal (Denmark)',
'League Cup (Scotland)',
'Meistriliiga (Estonia)',
'OFB Cup (Austria)',
'Pohar CMFS (Czech Republic)',
'Premier League (Wales)',
'Primera Division (Chile)',
'Proximus League (Belgium)',
'Serie A (Italy)',
'S-League (Singapore)',
'Slovensky Pohar (Slovakia)',
'Svenska Cupen (Sweden)',
'Swiss Cup (Switzerland)',
'Virsliga (Latvia)',
'Vyscha Liga (Ukraine)',
'Úrvalsdeild (Iceland)',
]:
return Colors.RED
if league in [
]:
return Colors.YELLOW
return Colors.EMPTY
|
vapkarian/soccer-analyzer
|
src/colors/v20/default.py
|
Python
|
mit
| 1,079
|
import platform
from .clipboard import Clipboard
__version__ = '1.0.0'
def get_clipboard(handle=None):
system = platform.system().lower()
if 'windows' == system:
from . import win_clipboard
clip = win_clipboard.WinClipboard(handle)
elif 'darwin' == system:
raise NotImplementedError("Clipboard not available for MacOS yet")
else:
try:
from . import gtk_clipboard
clip = gtk_clipboard.GTKClipboard(handle)
except:
raise NotImplementedError("Clipboard for Qt not available yet")
return clip
|
sebastiandev/clipton
|
clipton/__init__.py
|
Python
|
mit
| 598
|
from collections import namedtuple
from cairo import LINE_JOIN_ROUND
from zorro.di import di, dependency, has_dependencies
from tilenol.groups import GroupManager
from tilenol.commands import CommandDispatcher
from .base import Widget
from tilenol.theme import Theme
from tilenol.window import Window
GroupState = namedtuple(
'GroupState',
('name', 'empty', 'active', 'visible', 'urgent')
)
@has_dependencies
class State(object):
commander = dependency(CommandDispatcher, 'commander')
gman = dependency(GroupManager, 'group-manager')
def __init__(self):
self._state = None
def dirty(self):
return self._state != self._read()
def update(self):
nval = self._read()
if nval != self._state:
self._state = nval
return True
def _read(self):
cur = self.commander.get('group')
visgr = self.gman.current_groups.values()
return tuple(GroupState(g.name, g.empty, g is cur, g in visgr,
g.has_urgent_windows)
for g in self.gman.groups)
@property
def groups(self):
return self._state
@has_dependencies
class Groupbox(Widget):
theme = dependency(Theme, 'theme')
def __init__(self, *, filled=False, first_letter=False, right=False):
super().__init__(right=right)
self.filled = filled
self.first_letter = first_letter
def __zorro_di_done__(self):
self.state = di(self).inject(State())
bar = self.theme.bar
self.font = bar.font
self.inactive_color = bar.dim_color_pat
self.urgent_color = bar.bright_color_pat
self.active_color = bar.text_color_pat
self.selected_color = bar.active_border_pat
self.subactive_color = bar.subactive_border_pat
self.padding = bar.text_padding
self.border_width = bar.border_width
self.state.gman.group_changed.listen(self.bar.redraw.emit)
Window.any_window_changed.listen(self.check_state)
def check_state(self):
if self.state.dirty:
self.bar.redraw.emit()
def draw(self, canvas, l, r):
self.state.update()
assert not self.right, "Sorry, right not implemented"
self.font.apply(canvas)
canvas.set_line_join(LINE_JOIN_ROUND)
canvas.set_line_width(self.border_width)
x = l
between = self.padding.right + self.padding.left
for gs in self.state.groups:
gname = gs.name
if self.first_letter:
gname = gname[0]
sx, sy, w, h, ax, ay = canvas.text_extents(gname)
if gs.active:
canvas.set_source(self.selected_color)
if self.filled:
canvas.rectangle(x, 0, ax + between, self.height)
canvas.fill()
else:
canvas.rectangle(
x + 2, 2, ax + between - 4, self.height - 4
)
canvas.stroke()
elif gs.visible:
canvas.set_source(self.subactive_color)
if self.filled:
canvas.rectangle(x, 0, ax + between, self.height)
canvas.fill()
else:
canvas.rectangle(
x + 2, 2, ax + between - 4, self.height - 4
)
canvas.stroke()
if gs.urgent:
canvas.set_source(self.urgent_color)
elif gs.empty:
canvas.set_source(self.inactive_color)
else:
canvas.set_source(self.active_color)
canvas.move_to(x + self.padding.left,
self.height - self.padding.bottom)
canvas.show_text(gname)
x += ax + between
return x, r
|
tailhook/tilenol
|
tilenol/widgets/groupbox.py
|
Python
|
mit
| 3,880
|
"""
Trabalho T2 da disciplina Teoria dos Grafos, ministrada em 2014/02
'All Hail Gabe Newell'
Alunos:
Daniel Nobusada 344443
Thales Eduardo Adair Menato 407976
Jorge Augusto Bernardo 407844
"""
import networkx as nx
import numpy as np
import plotly.plotly as py
from plotly.graph_objs import *
py.sign_in("thamenato", "aq0t3czzut")
# Importa grafo Zachary's Karate Club
graphG = nx.read_gml('karate.gml')
"""
1) Computacao da distribuicao estacionaria teorica (steady state) do grafo
w(i) = d(vi) / 2|E|
"""
w_real = []
for i in graphG.nodes_iter():
aux = float(graphG.degree(i)) / float((2 * graphG.number_of_edges()))
w_real.append(aux)
"""
2) Calcular The Power Method
http://college.cengage.com/mathematics/larson/elementary_linear/4e/shared/
downloads/c10s3.pdf
"""
# Matriz P recebe a matriz de adjacencia de matrixG
matrixP = nx.adjacency_matrix(graphG)
# A soma de cada linha eh calculado
sum_linha = []
for i in matrixP:
sum_linha.append(i.sum())
# Para cada p(i,j) de P temos p(i,j) = p(i,j)/sum_linha(i)
for i in range(0, matrixP.shape[0]):
for j in range(0, matrixP.shape[1]):
matrixP[i, j] = float(matrixP[i, j]) / float(sum_linha[i])
# Vetor w_inicial onde a soma eh 1 com divisao de probabilidade 1/G.order()
# Para o grafo utilizado G.order() = 34
w_inicial = np.array([1.0/float(graphG.order())
for i in range(0, graphG.order())])
# Calcular w_power5
w_power5 = np.dot(w_inicial, matrixP)
for i in range(0, 4):
w_power5 = np.dot(w_power5, matrixP)
# Calcular w_power100
w_power100 = np.dot(w_inicial, matrixP)
for i in range(0, 99):
w_power100 = np.dot(w_power100, matrixP)
# A soma de todos os elementos destes vetores eh 1
"""
3) Escolha de 2 vertices distintos e realizar a caminhada aleatoria de ambos
"""
# Funcao Random Walk
def random_walk(node, numPassos):
# Vetor contendo numero de posicoes = numeros de vertices(noh)
caminhada = [0.0 for i in range(0, graphG.number_of_nodes())]
# Para o numero de passos desejado, uma lista contendo os vizinhos sera armazenada
# um indice aleatorio desta lista eh selecionado como proximo noh que entao passa
# a ser o noh atual e numero de visitar naquele noh eh incrementado
for i in range(0, numPassos):
vizinhos = graphG.neighbors(node)
proxNo = vizinhos[np.random.randint(0, len(vizinhos))]
node = proxNo
caminhada[node-1] += 1
# Realiza a divisao pelo numero de passos em todos os numeros de lista
for i in range(0, len(caminhada)):
caminhada[i] /= numPassos
# Retorna vetor contendo o numero de passadas / num de passos em cada vertice (noh)
return caminhada
# Escolha de dois vertices (noh) aleatorios
nodeA = np.random.random_integers(1, graphG.number_of_nodes())
nodeB = np.random.random_integers(1, graphG.number_of_nodes())
# Caso vertice B seja igual a A, receber outros numeros ateh que sejam distintos
while nodeB is nodeA:
nodeB = np.random.random_integers(1, graphG.number_of_nodes())
# 2 caminhadas aleatorias de tamanho N = 100
w_random100a = random_walk(nodeA, 100)
w_random100b = random_walk(nodeB, 100)
# 2 caminhadas aleatorias de tamanho N = 10000
w_random10000a = random_walk(nodeA, 10000)
w_random10000b = random_walk(nodeB, 10000)
# Print no console de todos os dados obtidos
print "w_power5: "
w_power5_lista = []
for i in range(0, w_power5.size):
w_power5_lista.append('%.4f'%w_power5[0, i])
print w_power5_lista
print "w_power100: "
w_power100_lista = []
for i in range(0, w_power100.size):
w_power100_lista.append('%.4f'%w_power100[0, i])
print w_power100_lista
print "w_random100a:"
print w_random100a
print "w_random100b:"
print w_random100b
print "w_random10000a:"
print w_random10000a
print "w_random10000b:"
print w_random10000b
# Para plotar no link: https://plot.ly/~thamenato/2/t2-random-walk/
# basta descomentar e executar o codigo novamente
# Tem de instalar a biblioteca (https://plot.ly/python/getting-started/)
# no Windows eh soh abrir o menu do Python(x,y) e escolher interactive consoles: IPython(sh)
# e executar: pip install plotly
"""
trace_power5 = Bar(
x = graphG.nodes(),
y = np.squeeze(np.asarray(w_power5)),
name = 'w_power5',
marker = Marker(
color='rgb(51,102,255)'
)
)
trace_power100 = Bar(
x = graphG.nodes(),
y = np.squeeze(np.asarray(w_power100)),
name = 'w_power100',
marker = Marker(
color='rgb(0,184,245)'
)
)
trace_random100a = Bar(
x = graphG.nodes(),
y = np.squeeze(np.asarray(w_random100a)),
name = 'w_random100a',
marker = Marker(
color='rgb(138,184,0)'
)
)
trace_random100b = Bar(
x = graphG.nodes(),
y = np.squeeze(np.asarray(w_random100b)),
name = 'w_random100b',
marker = Marker(
color='rgb(184,245,0)'
)
)
trace_random10000a = Bar(
x = graphG.nodes(),
y = np.squeeze(np.asarray(w_random10000a)),
name = 'w_random10000a',
marker = Marker(
color='rgb(245,184,0)'
)
)
trace_random10000b = Bar(
x = graphG.nodes(),
y = np.squeeze(np.asarray(w_random10000b)),
name = 'w_random10000b',
marker = Marker(
color='rgb(255,102,51)'
)
)
data = Data([trace_power5, trace_power100, trace_random100a,
trace_random100b, trace_random10000a, trace_random10000b])
layout = Layout(
title = 'T2: Random Walk',
xaxis = XAxis(
title = 'Nodes',
titlefont = Font(
size = 16,
color = 'rgb(107, 107, 107)'
),
tickfont = Font(
size = 14,
color = 'rgb(107, 107, 107)'
)
),
yaxis = YAxis(
title = 'Probability',
titlefont = Font(
size = 16,
color = 'rgb(107, 107, 107)'
),
tickfont = Font(
size = 14,
color = 'rgb(107, 107, 107)'
)
),
legend = Legend(
x = 0.25,
y = 1.0,
bgcolor = 'rgba(255, 255, 255, 0)',
bordercolor = 'rgba(255, 255, 255, 0)'
),
barmode = 'group',
bargap = 0.15,
bargroupgap = 0.1
)
fig = Figure(data = data, layout = layout)
plot_url = py.plot(fig, filename='T2_Random_Walks')
"""
|
UFSCar-CS-011/graph-theory-2012-2
|
tasks/task2/t2-random-walks.py
|
Python
|
mit
| 6,342
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class IpGroupsOperations(object):
"""IpGroupsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_11_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def get(
self,
resource_group_name, # type: str
ip_groups_name, # type: str
expand=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "_models.IpGroup"
"""Gets the specified ipGroups.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ip_groups_name: The name of the ipGroups.
:type ip_groups_name: str
:param expand: Expands resourceIds (of Firewalls/Network Security Groups etc.) back referenced
by the IpGroups resource.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: IpGroup, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_11_01.models.IpGroup
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.IpGroup"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ipGroupsName': self._serialize.url("ip_groups_name", ip_groups_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('IpGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ipGroups/{ipGroupsName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
ip_groups_name, # type: str
parameters, # type: "_models.IpGroup"
**kwargs # type: Any
):
# type: (...) -> "_models.IpGroup"
cls = kwargs.pop('cls', None) # type: ClsType["_models.IpGroup"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ipGroupsName': self._serialize.url("ip_groups_name", ip_groups_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'IpGroup')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('IpGroup', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('IpGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ipGroups/{ipGroupsName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
ip_groups_name, # type: str
parameters, # type: "_models.IpGroup"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.IpGroup"]
"""Creates or updates an ipGroups in a specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ip_groups_name: The name of the ipGroups.
:type ip_groups_name: str
:param parameters: Parameters supplied to the create or update IpGroups operation.
:type parameters: ~azure.mgmt.network.v2020_11_01.models.IpGroup
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either IpGroup or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_11_01.models.IpGroup]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.IpGroup"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
ip_groups_name=ip_groups_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('IpGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ipGroupsName': self._serialize.url("ip_groups_name", ip_groups_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ipGroups/{ipGroupsName}'} # type: ignore
def update_groups(
self,
resource_group_name, # type: str
ip_groups_name, # type: str
parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> "_models.IpGroup"
"""Updates tags of an IpGroups resource.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ip_groups_name: The name of the ipGroups.
:type ip_groups_name: str
:param parameters: Parameters supplied to the update ipGroups operation.
:type parameters: ~azure.mgmt.network.v2020_11_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: IpGroup, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_11_01.models.IpGroup
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.IpGroup"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_groups.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ipGroupsName': self._serialize.url("ip_groups_name", ip_groups_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('IpGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_groups.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ipGroups/{ipGroupsName}'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
ip_groups_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ipGroupsName': self._serialize.url("ip_groups_name", ip_groups_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ipGroups/{ipGroupsName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
ip_groups_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified ipGroups.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ip_groups_name: The name of the ipGroups.
:type ip_groups_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
ip_groups_name=ip_groups_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ipGroupsName': self._serialize.url("ip_groups_name", ip_groups_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ipGroups/{ipGroupsName}'} # type: ignore
def list_by_resource_group(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.IpGroupListResult"]
"""Gets all IpGroups in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either IpGroupListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_11_01.models.IpGroupListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.IpGroupListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('IpGroupListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.Error, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ipGroups'} # type: ignore
def list(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.IpGroupListResult"]
"""Gets all IpGroups in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either IpGroupListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_11_01.models.IpGroupListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.IpGroupListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('IpGroupListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.Error, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/ipGroups'} # type: ignore
|
Azure/azure-sdk-for-python
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_11_01/operations/_ip_groups_operations.py
|
Python
|
mit
| 27,449
|
# pylint: disable=missing-docstring
# pylint: disable=wildcard-import
from .test_mocks import *
from .cpython.testmock import *
from .cpython.testwith import *
|
nivbend/mock-open
|
src/mock_open/test/__init__.py
|
Python
|
mit
| 161
|