repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ByrdOfAFeather/AlphaTrion
|
Community/migrations/0034_auto_20171121_1316.py
|
1
|
1619
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2017-11-21 18:16
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('Community', '0033_auto_20171112_1742'),
]
operations = [
migrations.CreateModel(
name='SongSuggestions',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('suggestions', models.TextField(help_text="Please list links to songs, we can't play it with just a name")),
('community', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='Community.CommunityInst')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AlterField(
model_name='communityextraratings',
name='overall_rating',
field=models.PositiveIntegerField(choices=[(1, '1'), (2, '2'), (3, 'e'), (4, '4'), (5, '5'), (6, '6'), (7, '7'), (8, '8'), (9, '9'), (10, '10')], default=5),
),
migrations.AlterField(
model_name='communitygameratings',
name='game_rating',
field=models.PositiveIntegerField(choices=[(1, '1'), (2, '2'), (3, 'e'), (4, '4'), (5, '5'), (6, '6'), (7, '7'), (8, '8'), (9, '9'), (10, '10')], default=5),
),
]
|
mit
| 4,240,877,986,806,022,000
| 42.756757
| 169
| 0.575664
| false
| 3.597778
| false
| false
| false
|
sergpolly/FluUtils
|
FluDB_coding_aln/subsample_msa_random.py
|
1
|
2581
|
import re
import os
import sys
from Bio import Seq
from Bio import SeqIO
import pandas as pd
import itertools
import numpy as np
import random
import subprocess as sub
def get_least_gaps_seq(seq_dict,length,side='left'):
middle = length/2
min_gaps = length
min_key = ''
# for all sequences check number of gaps in either half and return its id ...
for seq_id in seq_dict:
seq_half = seq_dict[seq_id].seq[:middle] if side=='left' else seq_dict[seq_id].seq[middle:]
num_gaps = seq_half.count('-')
# reassign the min gaps counts and id in a procedural fashion ...
if num_gaps < min_gaps:
min_gaps = num_gaps
min_key = seq_id
# return ...
return (min_key, min_gaps)
# command to clust sequences and get a draft alignment ...
# usearch -cluster_fast seg1.fasta -id 0.993 -centroids nr.fasta -uc clust.uc
path = "/home/venevs/fludb_pH1N1"
if len(sys.argv) < 3:
print "Call signature is: \"%s msa_fname subs_size\""
msa_fname = sys.argv[1]
subs_size = int(sys.argv[2])
msa = SeqIO.parse(msa_fname,"fasta")
msa = SeqIO.to_dict(msa)
# chosen msa keys ...
chosen_keys = random.sample(msa,subs_size)
# add sequences with the longest UTRs as well ...
alignment_len = len(msa[chosen_keys[0]].seq)
# find sequence with the least gaps in the left half of the sequence ...
# supposedly - longest left-UTR
left_utr_key,_ = get_least_gaps_seq(msa,alignment_len,side='left')
# find sequence with the least gaps in the right half of the sequence ...
# supposedly - longest right-UTR
right_utr_key,_ = get_least_gaps_seq(msa,alignment_len,side='right')
# include those 2 if they are yet in the subsampled alignement ..
if left_utr_key not in chosen_keys:
chosen_keys += [left_utr_key, ]
if right_utr_key not in chosen_keys:
chosen_keys += [right_utr_key, ]
# now extract aligned sequences ...
alignment_out = [msa[sid] for sid in chosen_keys]
# output the alignment now ...
tmp_afa_fname = "tmp.afa"
SeqIO.write(alignment_out,tmp_afa_fname,"fasta")
# htm out fname :
out_htm = os.path.basename(msa_fname)+'.htm'
cmd = "mview -in fasta -ruler on -moltype dna -coloring consensus -threshold 60 -consensus on -con_threshold 60 -html head %s > %s"%(tmp_afa_fname,out_htm)
print
print cmd
print
retcode = sub.call(cmd,shell=True)
if retcode == 0:
print "Complete ..."
else:
print "mview retcode was %s"%str(retcode)
# #
# # remove temporary file here ...
# os.remove(tmp_afa_fname)
# print "tmp file removed ..."
# # now make an html alignment using mview ...
|
mit
| -1,590,030,344,426,314,000
| 12.728723
| 155
| 0.673382
| false
| 2.913093
| false
| false
| false
|
dtroyer/python-openstacksdk
|
openstack/identity/v3/role_domain_group_assignment.py
|
1
|
1276
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack.identity import identity_service
from openstack import resource
class RoleDomainGroupAssignment(resource.Resource):
resource_key = 'role'
resources_key = 'roles'
base_path = '/domains/%(domain_id)s/groups/%(group_id)s/roles'
service = identity_service.IdentityService()
# capabilities
allow_list = True
# Properties
#: name of the role *Type: string*
name = resource.Body('name')
#: The links for the service resource.
links = resource.Body('links')
#: The ID of the domain to list assignment from. *Type: string*
domain_id = resource.URI('domain_id')
#: The ID of the group to list assignment from. *Type: string*
group_id = resource.URI('group_id')
|
apache-2.0
| -8,509,374,455,824,138,000
| 36.529412
| 75
| 0.717085
| false
| 4.037975
| false
| false
| false
|
YouAreTheHat/pngr
|
pngr.py
|
1
|
17939
|
### ### ### ### ### ### ### IMPORTANT ### ### ### ### ### ### ###
# #
# This module is a work in progress. Comments, particularly the #
# ones preceding a class or function definition, are intended #
# to represent the desired end result of this code. Until such #
# time as a version 1.0 of this work is published, with this #
# warning removed, all the contents and functions herein are to #
# be considered experimental, incomplete, and mutable. All #
# comments outside of this box are to be considered lies and #
# wishful thinking, not accurate documentation. #
# #
### ### ### ### ### ### ### #### #### ### ### ### ### ### ### ###
import math, zlib
# A custom error raised for issues with this module only.
class PngError(Exception):
def __init__(self, message=None):
if message is None:
message = "an unspecified error has occurred"
self.message = message
super(PngError, self).__init__(self.message)
# Reads PNG files.
# Largely acts as a wrapper for open(), automatically
# reading in positions and increments appropriate for the PNG format.
# Is also capable of spawning PngChunks.
class PngReader:
""" !!! WIP !!!
Reads PNG files and returns chunks of information.
"""
def __init__(self, pngfile):
self.png_path = pngfile #path to PNG file
self.png = None
# This will hold the file's first 8 bytes; in a PNG, these should
# always be the static PNG signature
self.png_sig = b''
# Check if the passed file really is a PNG; if not, raise error
if not self.is_valid():
raise PngError("file {} is corrupt or not a PNG".format(\
self.png_path))
# For using the 'with' statement to initialize
def __enter__(self):
self.open_png()
return self
# For using the 'with' statement to initialize
def __exit__(self, type, value, traceback):
self.close_png()
# Checks if the file location passed at init refers to a valid PNG.
# Never call this if the file is already open
def is_valid(self):
# This is the signature of all properly constructed PNGs; if the first
# 8 bytes of the file are not this, it isn't a PNG
sig = b'\x89PNG\r\n\x1a\n'
with open(self.png_path, 'rb') as f:
self.png_sig = f.read(8)
f.seek(0)
if self.png_sig == sig:
return True
else:
return False
# Acts as a wrapper for open(); also records the cursor position
def open_png(self):
if (self.png is None) or (self.png and self.png.closed):
self.png = open(self.png_path, 'rb')
self.last_pos = self.png.tell()
# Closes the PNG
def close_png(self):
if self.png and not self.png.closed:
self.png.close()
# Allows an instance to resume reading a file from the position in which
# it was after its last successful open_png() or next_chunk() call.
def resume(self):
if self.png and not self.png.closed:
self.png.seek(self.last_pos)
# Reads the next chunk in the file and returns a PngChunk object.
# If at the beginning of a file, it will skip the PNG signature.
# It will fail if its associated PNG is not opened for reading.
def next_chunk(self):
# Skip the PNG signature because it is not a chunk
if self.png.tell() == 0:
self.png.seek(8)
# Make a list to hold the chunk
self.cur_chunk = []
# Read the length, type, data, and crc
self.cur_chunk.append(self.png.read(4))
self.cur_chunk.append(self.png.read(4))
self.cur_chunk.append(self.png.read(\
int.from_bytes(self.cur_chunk[0], 'big')))
self.cur_chunk.append(self.png.read(4))
# Record the cursor position
self.last_pos = self.png.tell()
try:
# Return a PngChunk for the read bytes
return PngChunk(self.cur_chunk)
finally:
# We've finished reading, so forget about the current chunk
# (since it's no longer "current")
del self.cur_chunk
# Check if there is at least one more chunk.
# It will fail if its associated PNG is not opened for reading.
def has_more(self):
if len(self.png.read(12)) < 12:
self.png.seek(self.last_pos)
return False
else:
self.png.seek(self.last_pos)
return True
# Stores organized data for a single chunk of a PNG.
# Superclass for specific chunk types.
# The 'meta' dict is sued to stores the store the attributes of the chunk
# which the chunk itself stores (length, type, CRC).
# Subclasses should extend the 'info' dict with the parsed information the
# chunk actually carries (e.g., IHDR adds 'Width', 'Height', etc).
class PngChunk:
""" !!! WIP !!!
Stores organized data on a PNG chunk.
"""
# Must be passed the entire binary chunk as a list
def __init__(self, c_bytes):
self.meta = {}
self.meta['Length'] = int.from_bytes(c_bytes[0], 'big')
self.meta['Type'] = c_bytes[1].decode()
self.meta['CRC'] = c_bytes[3]
self.data = bytearray(c_bytes[2])
self.info = {}
# Getter for chunk meta-data
def get_meta(self, property_name=None):
"""\tReturns dict of chunk length, type, and CRC.
Specify a key to return only that value."""
if property_name is None:
return self.meta
return self.meta[property_name]
# Getter for raw data
def get_data(self, buffer=None):
"""\tReturns generator over unparsed data, <buffer> bytes at a time.
Defaults to entire data field at once.
This does not include the length, type, or CRC fields.
Use get_raw() for a binary version of the entire chunk.
WARNING: may be up to 2^31 bytes long w/o buffer, use with caution"""
return self._raw_generator(buffer, 8, -4)
# Getter for parsed contents; most useful for subtypes
def get_info(self, info_name=None):
"""\tReturns parsed chunk data as dict (may be empty).
For known chunk types, this should return their stored information
in human-readable form."""
if info_name is None:
return self.info
return self.info[info_name]
# Getter for the binary data of the entire chunk
def get_raw(self, buffer: '4 to 2147483659'=None):
"""\tReturns generator over binary chunk, <buffer> bytes at a time.
Defaults to entire chunk at once.
WARNING: may be over 2^31 bytes long w/o buffer, use with caution"""
if buffer is not None:
if buffer < 4:
raise PngError("buffer length out of range")
return self._raw_generator(buffer)
# Makes generator over binary form of chunk (or part of chunk)
def _raw_generator(self, buffer, start=0, end=0):
l = 12 + len(self.data)
if end < 0:
l += end
if start >= 0:
num = start
elif abs(start) <= l:
num = l + start
if buffer is None:
buffer = l
while num < l:
result, toread = b'', buffer
while toread > 0:
b_l = len(result)
if num < 4:
result += self.meta['Length'].to_bytes(4, 'big')\
[num:num + toread]
elif num >= 4 and num < 8:
result += bytes(self.meta['Type'], 'utf8')\
[num - 4:num - 4 + toread]
elif num >= 8 and num < (l - 4):
result += self.data[num - 8:num - 8 + toread]
elif num - l + toread < 0:
result += self.meta['CRC'][num - l:num - l + toread]
else:
result += self.meta['CRC'][num - l:]
toread = 0
num += len(result) - b_l
toread -= len(result) - b_l
yield result
# Sets the 'Length' to the actual length of its raw data
def set_length(self):
"""\tSet 'Length' to length of raw data.
Returns difference between new and old lengths."""
if self.meta('Length') != len(self.raw):
oldlen = self.meta('Length')
self.meta('Length') = len(self.raw)
return (self.meta('Length') - oldlen)
return 0
# Stores parsed data from the IHDR chunk.
# PngData objects can use IHDR info dict to extract image properties
class IHDR(PngChunk):
# IHDR can extract all of its info at init
def __init__(self, genchunk):
if not isinstance(genchunk, PngChunk):
raise PngError("expected PngChunk, but {} found"\
.format(type(genchunk).__name__))
self.meta = genchunk.meta
self.data = genchunk.data
self.info = genchunk.info
self.info['Width'] = int.from_bytes(self.data[:4], 'big')
self.info['Height'] = int.from_bytes(self.data[4:8], 'big')
self.info['Bit depth'] = self.data[8]
self.info['Color type'] = self.data[9]
self.info['Interlace'] = self.data[-1]
# Stores parsed data from an IDAT chunk.
class IDAT(PngChunk):
# Init does not parse info because info from other chunks (IHDR and
# possibly others) is needed to understand the formatting.
# Plus, it's kind of a large and memory-intensive process.
def __init__(self, genchunk):
if not isinstance(genchunk, PngChunk):
raise PngError("expected PngChunk, but {} found"\
.format(type(genchunk).__name__))
self.meta = genchunk.meta
self.data = genchunk.data
self.info = genchunk.info
class PLTE(PngChunk):
pass
class IEND(PngChunk):
pass
# Stores PngChunks and analyzes their attributes.
# Acts as an object representation of the PNG file, since it holds all of the
# file's data in chunk form.
# Generic PngChunks should be passed to it through the 'addchunk' method;
# it will convert them to an appropriate subtype if one is defined.
class PngData:
""" !!! WIP !!!
Stores and analyzes PngChunks and prints their data.
"""
# Static mapping of chunk types to chunk subclasses.
# Used to replace generic chunks with their specific classes for
# analyzation.
# Critical chunks are unconditionally supported; ancillary chunks will
# be supported selectively as they are developed and added to the module.
chunktypes = {'IHDR': IHDR,
'IDAT': IDAT,
'PLTE': PLTE,
'IEND': IEND}
# Static mapping of color types to their sample information.
# The first value in the tuple is the number of samples/channels in the
# decompressed IDAT stream. This should be used for parsing the filter
# and, consequently, the scanlines.
# The second value reflects the presence of a PLTE. True means that a PLTE
# must appear; False means it must not appear; None means it may appear,
# but may also be safely ignored.
# Note that type 3 implies that the pixels in PLTE are 3-tuples of 1-byte
# samples (a bit depth less than 8 just adds leading zeroes).
colortypes = {0: (1, False),
2: (3, None),
3: (1, True),
4: (2, False),
6: (4, None)}
# Static PNG signature; it will be needed when writing
signature = b'\x89PNG\r\n\x1a\n'
def __init__(self):
self.chunks = []
self.ihdr_pos = None
self.plte_pos = None
def add_chunk(self, chunk):
if not isinstance(chunk, PngChunk):
raise PngError("expected PngChunk, but {} found"\
.format(type(chunk).__name__))
ctype = chunk.get_meta('Type')
if ctype in self.chunktypes.keys():
if ctype == 'IHDR':
self.ihdr_pos = len(self.chunks)
elif ctype == 'PLTE':
self.plte_pos = len(self.chunks)
self.chunks.append(self.chunktypes[ctype](chunk))
else:
self.chunks.append(chunk)
# Rough unfiltering method.
# Currently works naively on an array of scanlines.
# No support for interlacing. Requires precalculated pixel depth. May
# work improperly on color type 0 for bit depths less than 8.
def _unfilter(self, lines, px_depth):
for i in range(len(lines)):
l = bytearray(lines[i])
if l[0] == 0: #filter 'none'
pass
elif l[0] == 1: #filter 'sub'
for j in range((1 + px_depth), len(l)):
l[j] = (l[j] + l[j - px_depth])%256
elif l[0] == 2: #filter 'up'
for j in range(1, len(l)):
if i == 0:
prior = 0
else:
prior = lines[i - 1][j - 1]
l[j] = (l[j] + prior)%256
elif l[0] == 3: #filter 'average'
for j in range(1, len(l)):
if j in range(1, (1 + px_depth)):
prev = 0
else:
prev = l[j - px_depth]
if i == 0:
prior = 0
else:
prior = lines[i - 1][j - 1]
l[j] = (l[j] + math.floor((prev + prior)/2))%256
elif l[0] == 4: #filter 'Paeth'
for j in range(1, len(l)):
flg = False
if j in range(1, (1 + px_depth)):
prev = 0
flg = True
else:
prev = l[j - px_depth]
if i == 0:
prior = 0
flg = True
else:
prior = lines[i - 1][j - 1]
if flg:
prevpri = 0
else:
prevpri = lines[i - 1][(j - 1) - px_depth]
p_p = prev + prior + prevpri
p_d = []
for p_v in [prev, prior, prevpri]:
p_d.append(math.abs(p_p - p_v))
if p_d[0] <= p_d[1] and p_d[0] <= p_d[2]:
paeth = prev
elif p_d[1] <= p_d[2]:
paeth = prior
else:
paeth = prevpri
l[j] = (l[j] + paeth)%256
l = l[1:]
lines[i] = l
return lines
# Rough method for extracting pixel data from IDATs
# Currently works naively on all data at once, returns array. No support
# for interlacing. May work improperly on color type 0 for bit depths less
# than 8.
def get_scanlines(self):
info = self.chunks[self.ihdr_pos].get_info()
if info['Interlace']:
raise PngError("interlacing not supported")
c_count = self.colortypes[info['Color type']][0]
c_depth = max([info['Bit depth']//8, 1])
p_depth = c_depth * c_count
p_w, p_h = info['Width'], info['Height']
cmp = b''
for chunk in [c for c in self.chunks if isinstance(c, IDAT)]:
for d in chunk.get_data():
cmp += d
dcmp = zlib.decompress(cmp)
scanlines = []
for i in range(0, len(dcmp), ((p_depth * p_w) + 1)):
scanlines.append(dcmp[i:i + ((p_depth * p_w) + 1)])
scanlines = self._unfilter(scanlines, p_depth)
return scanlines
## Notes
# pngr_test.py has some testing, basic implementations, etc
# add PngChunk subclasses for each critical type (and hopefully important
# ancillary types as well). use them for analyzing chunks more effectively.
# project purpose has been changed: the goal is now to make a PNG decoder,
# including parsing, modification, and re-writing
# for the above goal:
# - data class would hold info attributes (probably)
# - only chunks which affect the reading/writing of IDAT/pixel data would need
# to be parsed (others are optional)
# - only critical info/data would need to be stored
# - maybe a gateway to stegosaurus?
# make chunk subtypes able to init with bin arrays from reader
# ...because reasons?
# OR
# eliminate subtypes and meta array, trust 'Type' for chunk typing, have data
# class parse and store information to avoid redundant storage. this may
# be necessary for cat'ing IDATs and using IHDR and PLTE info anyway
# for the above, only certain data has to be stored; chunks can still be
# mostly responsible for themselves.
# keep mem usage in mind. at minimum, entire file is in mem. decompressing
# IDAT(s) all at once nearly doubles that. copying decomp'd data to array
# doubles decomp'd data length, which is already longer than IDAT. working
# with data in place as much as possible would be wise.
# the above may be complicated in the case of Adam7 interlacing
# (de)compression, making scanlines, and (un)filtering may also benefit from
# generators/buffered IO (or be impossible - look into that)
# scanline and unfiltering functions are very rough; revise to ensure they are
# compatible withh color types and bit depths. also include a buffered read
# by way of a generator.
# for above, carefully consider how decompression and unfiltering will work;
# the compressed data must be at least 2 scanlines long to be useful for
# unfiltering.
# if this will work as a proper PNG decoder, ensure that all requirements from
# the PNG standard are followed.
##
|
lgpl-3.0
| -588,470,887,037,475,200
| 38.864444
| 78
| 0.564468
| false
| 3.910835
| false
| false
| false
|
be-cloud-be/horizon-addons
|
partner-contact/partner_external_map/tests/test_partner_external_map.py
|
1
|
3467
|
# -*- coding: utf-8 -*-
# © 2016 Pedro M. Baeza <pedro.baeza@tecnativa.com>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from openerp.tests import common
from openerp.exceptions import Warning as UserError
from ..hooks import set_default_map_settings
class TestPartnerExternalMap(common.TransactionCase):
def setUp(self):
super(TestPartnerExternalMap, self).setUp()
self.user = self.env['res.users'].create({
'name': 'Test user',
'login': 'test_login',
'context_map_website_id': self.ref(
'partner_external_map.google_maps'),
'context_route_map_website_id': self.ref(
'partner_external_map.google_maps'),
})
self.user.partner_id.city = 'Tomelloso'
self.partner = self.env['res.partner'].create({
'name': 'Test partner',
'city': 'Madrid',
})
def test_post_init_hook(self):
# Call this again for coverage purposes, but it has been already run
set_default_map_settings(self.cr, self.registry)
self.assertTrue(self.env.user.context_map_website_id)
self.assertTrue(self.env.user.context_route_map_website_id)
self.assertEqual(self.env.user.partner_id,
self.env.user.context_route_start_partner_id)
def test_create_user(self):
self.assertEqual(
self.user.partner_id, self.user.context_route_start_partner_id)
def test_open_map(self):
action = self.partner.sudo(self.user.id).open_map()
self.assertEqual(
action['url'], "https://www.google.com/maps?ie=UTF8&q=Madrid")
def test_open_route_map(self):
action = self.partner.sudo(self.user.id).open_route_map()
self.assertEqual(
action['url'], "https://www.google.com/maps?saddr=Tomelloso&daddr="
"Madrid&directionsmode=driving")
def test_open_map_with_coordinates(self):
# Simulate that we have the base_geolocalize module installed creating
# by hand the variables - This can't be done with routes
partner = self.partner.sudo(self.user.id)
partner.partner_latitude = 39.15837
partner.partner_longitude = -3.02145
action = partner.open_map()
self.assertEqual(
action['url'],
"https://www.google.com/maps?z=15&q=39.15837,-3.02145")
def test_exception_no_map_website(self):
self.user.context_map_website_id = False
with self.assertRaises(UserError):
self.partner.sudo(self.user.id).open_map()
def test_exception_no_map_route_website(self):
self.user.context_route_start_partner_id = False
with self.assertRaises(UserError):
self.partner.sudo(self.user.id).open_route_map()
def test_exception_no_starting_partner(self):
self.user.context_route_map_website_id = False
with self.assertRaises(UserError):
self.partner.sudo(self.user.id).open_route_map()
def test_exception_no_address_url(self):
self.user.context_map_website_id.address_url = False
with self.assertRaises(UserError):
self.partner.sudo(self.user.id).open_map()
def test_exception_no_route_address_url(self):
self.user.context_map_website_id.route_address_url = False
with self.assertRaises(UserError):
self.partner.sudo(self.user.id).open_route_map()
|
agpl-3.0
| -35,478,124,546,341,290
| 40.261905
| 79
| 0.632141
| false
| 3.55123
| true
| false
| false
|
Linaro/lava-dispatcher
|
lava_dispatcher/test/test_uboot_ums.py
|
1
|
2817
|
# Copyright (C) 2018 Linaro Limited
#
# Author: Matthew Hart <matthew.hart@linaro.org>
#
# This file is part of LAVA Dispatcher.
#
# LAVA Dispatcher is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# LAVA Dispatcher is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along
# with this program; if not, see <http://www.gnu.org/licenses>.
import os
import unittest
from lava_dispatcher.device import NewDevice
from lava_dispatcher.parser import JobParser
from lava_dispatcher.test.test_basic import Factory, StdoutTestCase
from lava_dispatcher.test.utils import DummyLogger, infrastructure_error
class UBootUMSFactory(Factory): # pylint: disable=too-few-public-methods
"""
Not Model based, this is not a Django factory.
Factory objects are dispatcher based classes, independent
of any database objects.
"""
def create_warp7_job(self, filename): # pylint: disable=no-self-use
device = NewDevice(os.path.join(os.path.dirname(__file__), '../devices/imx7s-warp-01.yaml'))
bbb_yaml = os.path.join(os.path.dirname(__file__), filename)
with open(bbb_yaml) as sample_job_data:
parser = JobParser()
job = parser.parse(sample_job_data, device, 4212, None, "")
job.logger = DummyLogger()
return job
class TestUbootUMSAction(StdoutTestCase): # pylint: disable=too-many-public-methods
def setUp(self):
super(TestUbootUMSAction, self).setUp()
self.factory = UBootUMSFactory()
@unittest.skipIf(infrastructure_error('dd'), "dd not installed")
def test_ums_action(self):
job = self.factory.create_warp7_job('sample_jobs/warp7-ums.yaml')
self.assertIsNotNone(job)
description_ref = self.pipeline_reference('uboot-ums.yaml', job=job)
self.assertEqual(description_ref, job.pipeline.describe(False))
self.assertIsNone(job.validate())
self.assertEqual(job.device['device_type'], 'imx7s-warp')
uboot = [action for action in job.pipeline.actions if action.name == 'uboot-action'][0]
retry = [action for action in uboot.internal_pipeline.actions if action.name == 'uboot-retry'][0]
flash = [action for action in retry.internal_pipeline.actions if action.name == 'flash-uboot-ums'][0]
self.assertEqual("ums", flash.parameters['commands'])
self.assertEqual("/dev/vde", flash.usb_mass_device)
|
gpl-2.0
| 2,561,922,625,065,863,000
| 42.338462
| 109
| 0.707845
| false
| 3.701708
| true
| false
| false
|
php1ic/inch
|
scripts/randomChart.py
|
1
|
7115
|
#!/usr/bin/env python3
"""
Create chart(s) with random parameters
Using either the executable provided, or
searching for it in standard locations,
and farm the creation on multiple threads
"""
import argparse
import multiprocessing
import os
import random
import shutil
import subprocess
import colorama
from joblib import Parallel, delayed
def getExecutableName():
"""
Get the name of the executable that is going to be used.
@param: None
@return[success] The basename of the executable that will be used
@return[failure] The default value
"""
# Set a default program name incase this function fails
programName = "inch"
# Store where we are so we can comeback
currentdir = os.getcwd()
# Get the path of this script
scriptdir = os.path.realpath(__file__)
# Move into the script directory as it's guaranteed to part of the git repo
os.chdir(os.path.dirname(scriptdir))
# Use git to get the repo directory name, assume this is also the exe name
gitExe = shutil.which("git")
if gitExe is None:
print("Looks like git is not installed on this system")
print(f"Using the default {programName} as the executable name")
return programName
output = subprocess.run([gitExe, "rev-parse", "--show-toplevel"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
programName = os.path.basename(output.stdout.strip().decode())
os.chdir(currentdir)
return programName
# -------------------------------------------------
def validateExecutable(exe):
"""
Check the provided executable exists, otherwise look for it in the
'standard' locations
@param: File path
@return[success]: Path to valid executable
@return[failure]: None
"""
colorama.init()
if exe is not None:
if os.path.isfile(exe):
return exe
print(colorama.Fore.YELLOW + "WARNING: " + colorama.Style.RESET_ALL
+ f"{exe} does not exist."
"Looking for executable in standard build locations")
exeName = getExecutableName()
scriptdir = os.path.realpath(os.path.dirname(__file__))
commonPath = os.path.join("bin", exeName)
gnumakePath = os.path.abspath(os.path.join(scriptdir, "..", commonPath))
cmakePath = os.path.abspath(os.path.join(scriptdir, "..", "..", "build", commonPath))
fullExe = None
if os.path.isfile(gnumakePath):
fullExe = gnumakePath
elif os.path.isfile(cmakePath):
fullExe = cmakePath
else:
print(colorama.Fore.RED + "ERROR: " + colorama.Style.RESET_ALL
+ f" Couldn't find an executable to use")
colorama.deinit()
return fullExe
# -------------------------------------------------
def createSingleChart(MAX_LOW_Z, MAX_Z):
"""
Generate a single chart with random parameters.
Limit the Z range to [MAX_LOW_Z,MAX_Z]
@param: Highest value of Z to use as Zmin
@param: Largest value of Z allowed
@return: Nothing
"""
# This script removes the ability to interact with the program so need to make sure
# that the file we are try to create does not already exist. Otherwise the script will
# get stuck waiting for a user input that will never come
while True:
# Randomly pick 0,1,2
experimental = random.choice(range(0, 3))
# If the experimental option is '1' i.e theoretical, there is one less property
# to colour by so randomly pick form a,b,c,d and possibly e
choice = random.choice(range(0, 4 if experimental == 1 else 5))
minZ = random.randrange(MAX_LOW_Z)
maxZ = minZ + random.randrange(MAX_Z - minZ)
name = f"Zmin-{minZ:03d}_Zmax-{maxZ:03d}_Exp-{experimental}_Type-{choice}"
if not os.path.isfile(name+".eps"):
break
print(f"Creating - {name}")
with open(name+".in", 'w') as ofile:
ofile.write(f"section=1\n"
f"Zmin={minZ}\n"
f"Zmax={maxZ}\n"
f"required=0\n"
f"type={experimental}\n"
f"choice={choice}\n")
ofile.close()
subprocess.run([exe, "-o", name, "-i", name+".in"],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
# -------------------------------------------------
def runExecutable(exe, number, threads):
"""
Run <exe> <number> times, randomising the input parameters.
Each execution is independent so allow <exe> to be run over <threads> threads
@param: Executable to run
@param: Number of times to run <exe>
@param: Number of threads to concurrently use
@return: Nothing
"""
# We could read this from ../includes/inputs.h
MAX_Z = 118
# Set the minimum possible Z range
MAX_LOW_Z = MAX_Z - 1
colorama.init()
print(f"\nUsing: "
+ colorama.Fore.GREEN + exe + colorama.Style.RESET_ALL
+ " to create "
+ colorama.Fore.GREEN + str(number) + colorama.Style.RESET_ALL
+ " chart(s)\n")
colorama.deinit()
Parallel(threads)(delayed(createSingleChart)(MAX_LOW_Z, MAX_Z) for i in range(0, number))
print()
# -------------------------------------------------
def check_positive(value):
"""
Check that the value is positive while also converting to an int.
Use to ensure that the number of charts option make sense.
@param: Number
@return[success]: The integer version of the number
@return[failure]: ArgparseTypeError
"""
intValue = int(value)
if intValue <= 0:
raise argparse.ArgumentTypeError(f"{intValue} is an invalid positive int value")
return intValue
# -------------------------------------------------
def parse_arguments():
"""
Encapsulate the use of argparse
@param: None
@return: An instance of argparse
"""
parser = argparse.ArgumentParser(description="Create some random charts")
# Required
# Nothing
# Optional
parser.add_argument("-e", "--executable",
help="The executable to use [default: None]",
type=str,
default=None)
parser.add_argument("-n", "--number",
help="Number of charts to randomly create [default: %(default)s]",
type=check_positive,
default=1)
parser.add_argument("-t", "--threads",
help="Number of threads to use [default: %(default)s]",
type=int,
default=multiprocessing.cpu_count()-1,
choices=range(1, multiprocessing.cpu_count()))
return parser.parse_args()
# -------------------------------------------------
if __name__ == "__main__":
colorama.init()
args = parse_arguments()
exe = validateExecutable(args.executable)
if exe is not None:
runExecutable(exe, args.number, args.threads)
colorama.deinit()
# -------------------------------------------------
|
gpl-3.0
| -8,901,250,184,690,402,000
| 28.279835
| 93
| 0.583275
| false
| 4.122248
| false
| false
| false
|
neelchauhan/OnionLauncher
|
OnionLauncher/main.py
|
1
|
2902
|
import sys
from PyQt5.QtWidgets import QApplication, QMainWindow, QMessageBox
from PyQt5.uic import loadUi
from var import values, version
import torctl
from fn_handle import detect_filename
class MainWindow(QMainWindow):
def __init__(self, *args):
super(MainWindow, self).__init__(*args)
# Load .ui file
loadUi(detect_filename("ui_files/main.ui"), self)
# Define buttons
buttons = {
self.tbAdd: self.addRow,
self.tbRemove: self.removeRow,
self.btnSwitchTor: self.switchTor,
self.btnAbout: self.showAbout
}
self.evAddClick(buttons)
# Function to connect objects from dictionary
def evAddClick(self, obj_dict):
for obj in obj_dict:
obj.clicked.connect(obj_dict[obj])
# Function to set objects enabled or not
def evSetListEnabled(self, lst, state):
for item in lst:
item.setEnabled(state)
# Function to add a blank row
def addRow(self):
rowPos = self.twSettings.rowCount() # Get position
self.twSettings.insertRow(rowPos)
# Function to delete a selected row
def removeRow(self):
rows = sorted(set(index.row() for index in self.twSettings.selectedIndexes())) # Get selected rows
rows.reverse() # Reverse rows (we're deleting from last->first)
for row in rows:
self.twSettings.removeRow(row)
def optToDict(self): # Function to conert options in a QTableWidget to a Python Dictionary
rows = self.twSettings.rowCount() # Row count (we're iterating the hard way)
output_dict = {}
for row in range(rows):
# Get values in two variables
setting = self.twSettings.item(row, 0)
parameter = self.twSettings.item(row, 1)
# Add them to dictionary
if setting is not None and parameter is not None:
output_dict[setting.text()] = parameter.text().split()
return output_dict
def switchTor(self): # Enable (or Disable) Tor
modList = [
self.twSettings,
self.tbAdd,
self.tbRemove
]
if values["torEnabled"]: # Turn off if Tor is on
values["torEnabled"] = False
self.btnSwitchTor.setText("Start Tor")
self.lblSwitchTor.setText("Tor Not Running")
self.evSetListEnabled(modList, True)
torctl.stopTor(values["process_desc"])
else: # Turn on Tor
values["process_desc"] = torctl.startTor(self, self.optToDict())
# If Tor started correctly, then mark as "on"
if values["process_desc"] != None:
values["torEnabled"] = True
self.btnSwitchTor.setText("Stop Tor")
self.lblSwitchTor.setText("Tor Running")
self.evSetListEnabled(modList, False)
# Refresh elements
QApplication.processEvents()
def showAbout(self): # Show about dialog
message = "About OnionLauncher " + version + "\n\n" \
"Copyright 2016 Neel Chauhan\n" \
"https://github.com/neelchauhan/OnionLauncher"
QMessageBox.information(self, "Information", message)
def main_loop():
app = QApplication(sys.argv)
mw = MainWindow()
mw.show()
sys.exit(app.exec_())
if __name__ == "__main__":
main_loop()
|
bsd-2-clause
| 8,074,531,801,412,537,000
| 29.547368
| 100
| 0.709511
| false
| 3.192519
| false
| false
| false
|
bsarsgard/blackrocktickets
|
texas/forms.py
|
1
|
1312
|
"""
Texas - Ticket Sales System
Copyright (C) 2010 Ben Sarsgard
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from django import forms
class LoginForm(forms.Form):
username = forms.CharField(label='Email Address', max_length=75)
password = forms.CharField(widget=forms.PasswordInput(render_value=False))
class SaleForm(forms.Form):
tickets = forms.CharField(label='Number of Tickets', max_length=2)
name = forms.CharField(label='Full Name', max_length=50)
email = forms.CharField(label='Email', max_length=75)
class ChanceForm(forms.Form):
name = forms.CharField(label='Full Name', max_length=255)
email = forms.CharField(label='Primary Email', max_length=255)
|
apache-2.0
| -4,711,378,940,113,325,000
| 40
| 78
| 0.730945
| false
| 4.049383
| false
| false
| false
|
cbouilla/3sum-pool
|
share.py
|
1
|
5451
|
import time
import struct
import random
from hashlib import sha256
from binascii import hexlify, unhexlify
JOB_TYPES = ['FOO', 'BAR', 'FOOBAR']
def sha256d(x):
return sha256(sha256(x).digest()).digest()
def swap_endian_words(hex_words):
'''Swaps the endianness of a hexidecimal string of words and converts to binary string.'''
message = unhexlify(hex_words)
if len(message) % 4 != 0: raise ValueError('Must be 4-byte word aligned')
return b''.join([ message[4 * i: 4 * i + 4][::-1] for i in range(0, len(message) // 4) ])
def version_prev_block(kind):
"""Return the "block version" & the "hash of previous block" according to our categories (FOO, BAR, FOOBAR)"""
if kind == 0: # 'FOO'
block_version = hexlify(b'-OOF').decode()
prev_block_hash = hexlify(swap_endian_words(hexlify(b' Charles Bouillaguet'))).decode()
elif kind == 1: # 'BAR'
block_version = hexlify(b'-RAB').decode()
prev_block_hash = hexlify(swap_endian_words(hexlify(b' Pierre-Alain Fouque'))).decode()
elif kind == 2: # 'FOOBAR'
block_version = hexlify(b'BOOF').decode()
prev_block_hash = hexlify(swap_endian_words(hexlify(b'AR- Claire Delaplace'))).decode()
return (block_version, prev_block_hash)
class JobContext:
extranonce1 = None
kind = None
D = None
def __init__(self, extranonce1, D):
self.extranonce1 = extranonce1
self.kind = random.randrange(3)
self.D = D
def work_parameters(self):
block_version, prev_block_hash = version_prev_block(self.kind)
ntime = "{:08x}".format(int(time.time()))
return [prev_block_hash, Share.coinbase_1, Share.coinbase_2, [], block_version, Share.ndiff, ntime]
class Share:
"""representation of a full share (i.e. a block whose hash is correct)"""
# variable part. Strings, in hex.
extranonce1 = None
extranonce2 = None
nonce = None
ntime = None # network time
# metadata
D = None # actual difficulty of the share
kind = None # 0==FOO, 1==BAR, 2==FOOBAR
# static values. These choices yields invalid bitcoin blocks.
# This means that we don't actually mine bitcoins.
ndiff = "efbeadde" # encoded network difficulty
extraNonce2_size = 4
coinbase_1 = "01000000010000000000000000000000000000000000000000000000000000000000000000ffffffff20020862062f503253482f04b8864e5008"
coinbase_2 = "072f736c7573682f000000000100f2052a010000001976a914d23fcdf86f7e756a64a7a9688ef9903327048ed988ac00000000"
def __init__(self, extranonce2, nonce, ntime, job_context=None, kind=None, D=None, extranonce1=None):
self.extranonce2 = extranonce2
self.nonce = nonce
self.ntime = ntime
self._hash = None
if job_context:
self.extranonce1 = job_context.extranonce1
self.kind = job_context.kind
self.D = job_context.D
else:
self.extranonce1 = extranonce1
self.kind = kind
self.D = D
def block(self):
"""build the (binary) block this shares represent"""
block_version, prev_block_hash = version_prev_block(self.kind)
coinbase = self.coinbase_1 + self.extranonce1 + self.extranonce2 + self.coinbase_2
coinbase_hash_bin = sha256d(unhexlify(coinbase))
merkle_root = hexlify(coinbase_hash_bin)
version_bin = struct.pack("<I", int(block_version, base=16))
prev_hash_bin = swap_endian_words(prev_block_hash) # must be LE
mrt_bin = unhexlify(merkle_root) # must be LE
time_bin = struct.pack("<I", int(self.ntime, base=16))
diff_bin = struct.pack("<I", int(self.ndiff, base=16))
nonce_bin = struct.pack("<I", int(self.nonce, base=16))
return version_bin + prev_hash_bin + mrt_bin + time_bin + diff_bin + nonce_bin
def __str__(self):
return "({} / D={} / {} / {} / {})".format(JOB_TYPES[self.kind], self.D, self.extranonce1, self.extranonce2, self.nonce)
def block_hash(self):
if not self._hash:
self._hash = sha256d(self.block())
return self._hash
def valid(self):
#print(hexlify(self.block()).decode())
#print(self.formated_hex_block())
block_hash = self.block_hash()
#print(hexlify(block_hash).decode())
return block_hash[28:] == bytes([0,0,0,0])
def formated_hex_block(self):
h = hexlify(self.block()).decode()
return "{} {} {} {} {} {}".format(h[0:8], h[8:72], h[72:136], h[136:144], h[144:152], h[152:160])
def serialize(self):
"""dump this share into 160 bits"""
return struct.pack('<HHIIII', self.kind, self.D, int(self.extranonce2, base=16),
int(self.extranonce1, base=16), int(self.nonce, base=16), int(self.ntime, base=16))
@staticmethod
def unserialize(buf):
"""Generate a Share object given a 128-bit serialized share"""
kind, D, extranonce2_bin, extranonce1_bin, nonce_bin, ntime_bin = struct.unpack('<HHIIII', buf)
extranonce1 = "{:08x}".format(extranonce1_bin)
extranonce2 = "{:08x}".format(extranonce2_bin)
nonce = "{:08x}".format(nonce_bin)
ntime = "{:08x}".format(ntime_bin)
return Share(extranonce2, nonce, ntime, D=D, kind=kind, extranonce1=extranonce1)
|
gpl-3.0
| -3,494,661,404,866,030,600
| 39.080882
| 135
| 0.616401
| false
| 3.307646
| false
| false
| false
|
stackforge/python-monascaclient
|
monascaclient/tests/v2_0/shell/test_alarm_definitions.py
|
1
|
5743
|
# Copyright 2017 FUJITSU LIMITED
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslotest import base
from monascaclient.osc import migration as migr
from monascaclient.v2_0 import alarm_definitions as ad
from monascaclient.v2_0 import shell
class FakeV2Client(object):
def __init__(self):
super(FakeV2Client, self).__init__()
self.alarm_definitions = mock.Mock(
spec=ad.AlarmDefinitionsManager)
class TestAlarmDefinitionShellV2(base.BaseTestCase):
@mock.patch('monascaclient.osc.migration.make_client')
def test_should_update(self, mc):
mc.return_value = c = FakeV2Client()
ad_id = '0495340b-58fd-4e1c-932b-5e6f9cc96490'
ad_name = 'alarm_name'
ad_desc = 'test_alarm_definition'
ad_expr = 'avg(Test_Metric_1)>=10'
ad_action_id = '16012650-0b62-4692-9103-2d04fe81cc93'
ad_action_enabled = 'True'
ad_match_by = 'hostname'
ad_severity = 'CRITICAL'
raw_args = [
ad_id, ad_name, ad_desc, ad_expr,
ad_action_id, ad_action_id, ad_action_id, ad_action_enabled,
ad_match_by, ad_severity
]
name, cmd_clazz = migr.create_command_class(
'do_alarm_definition_update',
shell
)
cmd = cmd_clazz(mock.Mock(), mock.Mock())
parser = cmd.get_parser(name)
parsed_args = parser.parse_args(raw_args)
cmd.run(parsed_args)
c.alarm_definitions.update.assert_called_once_with(
actions_enabled=True,
alarm_actions=[ad_action_id],
alarm_id=ad_id,
description=ad_desc,
expression=ad_expr,
match_by=[ad_match_by],
name=ad_name,
ok_actions=[ad_action_id],
severity=ad_severity,
undetermined_actions=[ad_action_id]
)
@mock.patch('monascaclient.osc.migration.make_client')
def test_alarm_definitions_list(self, mc):
mc.return_value = c = FakeV2Client()
c.alarm_definitions.list.return_value = [{
"name": "ntp_sync_check",
"id": "aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee",
"expression": "(max(ntp.offset{}, deterministic)>=1)",
"match_by": ['hostname'],
"description": "NTP time sync check",
"actions_enabled": True,
"deterministic": True,
"alarm_actions": ['aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'],
"ok_actions": [],
"undetermined_actions": [],
"severity": "HIGH",
}]
name, cmd_class = migr.create_command_class(
'do_alarm_definition_list',
shell
)
cmd = cmd_class(mock.Mock(), mock.Mock())
parser = cmd.get_parser(name)
raw_args = []
parsed_args = parser.parse_args(raw_args)
cmd.run(parsed_args)
c.alarm_definitions.list.assert_called_once()
@mock.patch('monascaclient.osc.migration.make_client')
def test_should_patch_name(self, mc):
ad_id = '0495340b-58fd-4e1c-932b-5e6f9cc96490'
ad_name = 'patch_name'
raw_args = '{0} --name {1}'.format(ad_id, ad_name).split(' ')
self._patch_test(mc, raw_args, alarm_id=ad_id, name=ad_name)
@mock.patch('monascaclient.osc.migration.make_client')
def test_should_patch_actions(self, mc):
ad_id = '0495340b-58fd-4e1c-932b-5e6f9cc96490'
ad_action_id = '16012650-0b62-4692-9103-2d04fe81cc93'
actions = ['alarm-actions', 'ok-actions',
'undetermined-actions']
for action in actions:
raw_args = ('{0} --{1} {2}'.format(ad_id, action, ad_action_id)
.split(' '))
self._patch_test(mc, raw_args, **{
'alarm_id': ad_id,
action.replace('-', '_'): [ad_action_id]
})
@mock.patch('monascaclient.osc.migration.make_client')
def test_should_patch_severity(self, mc):
ad_id = '0495340b-58fd-4e1c-932b-5e6f9cc96490'
severity_types = ['LOW', 'MEDIUM', 'HIGH', 'CRITICAL']
for st in severity_types:
raw_args = ('{0} --severity {1}'.format(ad_id, st)
.split(' '))
self._patch_test(mc, raw_args, alarm_id=ad_id, severity=st)
@mock.patch('monascaclient.osc.migration.make_client')
def test_should_not_patch_unknown_severity(self, mc):
ad_id = '0495340b-58fd-4e1c-932b-5e6f9cc96490'
st = 'foo'
raw_args = ('{0} --severity {1}'.format(ad_id, st)
.split(' '))
self._patch_test(mc, raw_args, called=False)
@staticmethod
def _patch_test(mc, args, called=True, **kwargs):
mc.return_value = c = FakeV2Client()
name, cmd_clazz = migr.create_command_class(
'do_alarm_definition_patch',
shell
)
cmd = cmd_clazz(mock.Mock(), mock.Mock())
parser = cmd.get_parser(name)
parsed_args = parser.parse_args(args)
cmd.run(parsed_args)
if called:
c.alarm_definitions.patch.assert_called_once_with(**kwargs)
else:
c.alarm_definitions.patch.assert_not_called()
|
apache-2.0
| -7,564,279,838,991,846,000
| 33.806061
| 75
| 0.587672
| false
| 3.368328
| true
| false
| false
|
dahebolangkuan/ToughRADIUS
|
radiusd/plugins/acct_stop_process.py
|
1
|
3010
|
#!/usr/bin/env python
#coding=utf-8
from twisted.python import log
from pyrad import packet
from store import store
from settings import *
import logging
import decimal
import datetime
import utils
decimal.getcontext().prec = 11
decimal.getcontext().rounding = decimal.ROUND_UP
def process(req=None,user=None,runstat=None):
if not req.get_acct_status_type() == STATUS_TYPE_STOP:
return
runstat.acct_stop += 1
ticket = req.get_ticket()
if not ticket.nas_addr:
ticket.nas_addr = req.source[0]
_datetime = datetime.datetime.now()
online = store.get_online(ticket.nas_addr,ticket.acct_session_id)
if not online:
session_time = ticket.acct_session_time
stop_time = _datetime.strftime( "%Y-%m-%d %H:%M:%S")
start_time = (_datetime - datetime.timedelta(seconds=int(session_time))).strftime( "%Y-%m-%d %H:%M:%S")
ticket.acct_start_time = start_time
ticket.acct_stop_time = stop_time
ticket.start_source= STATUS_TYPE_STOP
ticket.stop_source = STATUS_TYPE_STOP
store.add_ticket(ticket)
else:
store.del_online(ticket.nas_addr,ticket.acct_session_id)
ticket.acct_start_time = online['acct_start_time']
ticket.acct_stop_time= _datetime.strftime( "%Y-%m-%d %H:%M:%S")
ticket.start_source = online['start_source']
ticket.stop_source = STATUS_TYPE_STOP
store.add_ticket(ticket)
if not user:return
product = store.get_product(user['product_id'])
if product and product['product_policy'] == FEE_TIMES:
# PrePay fee times policy
user_balance = store.get_user_balance(user['account_number'])
sessiontime = decimal.Decimal(req.get_acct_sessiontime())
billing_times = decimal.Decimal(online['billing_times'])
acct_length = sessiontime-billing_times
fee_price = decimal.Decimal(product['fee_price'])
usedfee = acct_length/decimal.Decimal(3600) * fee_price
usedfee = actual_fee = int(usedfee.to_integral_value())
balance = user_balance - usedfee
if balance < 0 :
balance = 0
actual_fee = user_balance
store.update_billing(utils.Storage(
account_number = online['account_number'],
nas_addr = online['nas_addr'],
acct_session_id = online['acct_session_id'],
acct_start_time = online['acct_start_time'],
acct_session_time = req.get_acct_sessiontime(),
acct_length = int(acct_length.to_integral_value()),
acct_fee = usedfee,
actual_fee = actual_fee,
balance = balance,
is_deduct = 1,
create_time = datetime.datetime.now().strftime( "%Y-%m-%d %H:%M:%S")
),False)
log.msg('%s Accounting stop request, remove online'%req.get_user_name(),level=logging.INFO)
|
bsd-2-clause
| -922,994,961,154,856,700
| 36.17284
| 111
| 0.599336
| false
| 3.781407
| false
| false
| false
|
alvason/infectious-pulse
|
code/sir_array_cross_immunity.py
|
1
|
10364
|
# -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <markdowncell>
# # Infectious Pulse
# https://github.com/alvason/infectious-pulse/
#
# ### Many-strain SIR evolution --- its equilibrium state and infectious pulse due to mutation and cross-immunity
# <codecell>
'''
author: Alvason Zhenhua Li
date: 03/23/2015
'''
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d.axes3d import Axes3D
import alva_machinery as alva
AlvaFontSize = 23
AlvaFigSize = (9, 7)
numberingFig = 0
numberingFig = numberingFig + 1
plt.figure(numberingFig, figsize=(12, 6))
plt.axis('off')
plt.title(r'$ Many-strain \ SIR \ equations \ (mutation \ and \ cross-immunity) $',fontsize = AlvaFontSize)
plt.text(0, 4.0/6,r'$ \frac{\partial S_n(t)}{\partial t} = \
-\beta S_n(t)\sum_{\eta = n_{min}}^{n_{max}} (1 - \frac{|n - \eta|}{r + |n - \eta|})I_{\eta}(t) + \mu N - \mu S_n(t)$'
, fontsize = 1.2*AlvaFontSize)
plt.text(0, 2.0/6, r'$ \frac{\partial I_n(t)}{\partial t} = \
+\beta S_n(t)I_n(t) - \gamma I_n(t) - \mu I_n(t) \
+ m \frac{I_{n - 1}(t) - 2I_n(t) + I_{n + 1}(t)}{(\Delta n)^2} $'
, fontsize = 1.2*AlvaFontSize)
plt.text(0, 0.0/6,r'$ \frac{\partial R_n(t)}{\partial t} = \
+\gamma I_n(t) - \mu R_n(t) - \beta S_n(t)I_n(t)\
+ \beta S_n(t)\sum_{\eta = n_{min}}^{n_{max}} (1 - \frac{|n - \eta|}{r + |n - \eta|})I_{\eta}(t) $'
, fontsize = 1.2*AlvaFontSize)
plt.show()
# define many-strain S-I-R equation
def dSdt_array(SIRxt = [], *args):
# naming
S = SIRxt[0]
I = SIRxt[1]
R = SIRxt[2]
x_totalPoint = SIRxt.shape[1]
# there are n dSdt
dS_dt_array = np.zeros(x_totalPoint)
# each dSdt with the same equation form
for xn in range(x_totalPoint):
dS_dt_array[xn] = -infecRate*S[xn]*crossInfect(cross_radius, x_totalPoint, I, xn) + inOutRate*totalSIR - inOutRate*S[xn]
return(dS_dt_array)
def dIdt_array(SIRxt = [], *args):
# naming
S = SIRxt[0]
I = SIRxt[1]
R = SIRxt[2]
x_totalPoint = SIRxt.shape[1]
# there are n dIdt
dI_dt_array = np.zeros(x_totalPoint)
# each dIdt with the same equation form
Icopy = np.copy(I)
centerX = Icopy[:]
leftX = np.roll(Icopy[:], 1)
rightX = np.roll(Icopy[:], -1)
leftX[0] =centerX[0]
rightX[-1] = centerX[-1]
for xn in range(x_totalPoint):
dI_dt_array[xn] = +infecRate*S[xn]*I[xn] - recovRate*I[xn] - inOutRate*I[xn] + mutatRate*(leftX[xn]
- 2*centerX[xn]
+ rightX[xn])/(dx**2)
return(dI_dt_array)
def dRdt_array(SIRxt = [], *args):
# naming
S = SIRxt[0]
I = SIRxt[1]
R = SIRxt[2]
x_totalPoint = SIRxt.shape[1]
# there are n dRdt
dR_dt_array = np.zeros(x_totalPoint)
# each dIdt with the same equation form
for xn in range(x_totalPoint):
dR_dt_array[xn] = +recovRate*I[xn] - inOutRate*R[xn] + \
(-infecRate*S[xn]*I[xn] + infecRate*S[xn]*crossInfect(cross_radius, x_totalPoint, I, xn))
return(dR_dt_array)
def monodA(r, i):
outM = np.absolute(i)/(r + np.absolute(i))
return (outM)
def crossInfect(cross_radius, cross_range, infect, current_i):
invertM = np.zeros(cross_range)
cross = 0.0
for neighbor in range(cross_range):
invertM[neighbor] = 1 - monodA(cross_radius, dx*(current_i - neighbor))
cross = cross + invertM[neighbor]*infect[neighbor]
# print (neighbor, invertM[neighbor], cross) # for checking purpose
# plt.plot(gridX, invertM, marker = 'o') # for checking purpose
if cross_radius < 0.1: cross = infect[current_i]
return (cross)
# <codecell>
# setting parameter
timeUnit = 'year'
if timeUnit == 'day':
day = 1
year = 365
elif timeUnit == 'year':
year = 1
day = float(1)/365
totalSIR = float(1) # total population
reprodNum = 1.8 # basic reproductive number R0: one infected person will transmit to 1.8 person
recovRate = float(1)/(4*day) # 4 days per period ==> rate/year = 365/4
inOutRate = float(1)/(30*year) # birth rate per year
infecRate = reprodNum*(recovRate + inOutRate)/totalSIR # per year, per person, per total-population
mutatRate = float(1)/(10**17) # mutation rate
cross_radius = float(5) # radius of cross-immunity (the distance of half-of-value in the Monod equation)
# time boundary and griding condition
minT = float(0)*year
maxT = float(40)*year
totalGPoint_T = int(1*10**3 + 1)
spacingT = np.linspace(minT, maxT, num = totalGPoint_T, retstep = True)
gridT = spacingT[0]
dt = spacingT[1]
# space boundary and griding condition
minX = float(0)
maxX = float(40)
totalGPoint_X = int(maxX + 1)
gridingX = np.linspace(minX, maxX, num = totalGPoint_X, retstep = True)
gridX = gridingX[0]
dx = gridingX[1]
gridS_array = np.zeros([totalGPoint_X, totalGPoint_T])
gridI_array = np.zeros([totalGPoint_X, totalGPoint_T])
gridR_array = np.zeros([totalGPoint_X, totalGPoint_T])
# initial output condition (only one virus in equilibrium condition)
# for fast switching from one-virus equilibrium to many-virus equilibrium, invert-Monod distribution of S and R are applied
gridI_array[0, 0] = inOutRate*totalSIR*(reprodNum - 1)/infecRate # only one virus exists
gridR_array[:, 0] = recovRate*totalSIR*(reprodNum - 1)/infecRate * (1 - monodA(cross_radius, gridX))
gridS_array[:, 0] = totalSIR - gridI_array[:, 0] - gridR_array[:, 0]
# Runge Kutta numerical solution
pde_array = np.array([dSdt_array, dIdt_array, dRdt_array])
startingOut_Value = np.array([gridS_array, gridI_array, gridR_array])
gridOut_array = alva.AlvaRungeKutta4ArrayXT(pde_array, startingOut_Value, minX, maxX, totalGPoint_X, minT, maxT, totalGPoint_T)
# plotting
gridS = gridOut_array[0]
gridI = gridOut_array[1]
gridR = gridOut_array[2]
numberingFig = numberingFig + 1
plt.figure(numberingFig, figsize = AlvaFigSize)
plt.contourf(gridT, gridX, gridI, levels = np.arange(0, gridI_array[0, 0]*4, gridI_array[0, 0]/100))
plt.title(r'$ Infectious \ pulse \ by \ mutation \ and \ cross-immunity $', fontsize = AlvaFontSize)
plt.xlabel(r'$time \ (%s)$'%(timeUnit), fontsize = AlvaFontSize);
plt.ylabel(r'$ discrete \ space \ (strain) $', fontsize = AlvaFontSize);
plt.colorbar()
plt.text(maxT*4.0/3, maxX*5.0/6, r'$ R_0 = %f $'%(reprodNum), fontsize = AlvaFontSize)
plt.text(maxT*4.0/3, maxX*4.0/6, r'$ \gamma = %f $'%(recovRate), fontsize = AlvaFontSize)
plt.text(maxT*4.0/3, maxX*3.0/6, r'$ \beta = %f $'%(infecRate), fontsize = AlvaFontSize)
plt.text(maxT*4.0/3, maxX*2.0/6, r'$ \mu = %f $'%(inOutRate), fontsize = AlvaFontSize)
plt.text(maxT*4.0/3, maxX*1.0/6, r'$ m = %f $'%(mutatRate*10**14), fontsize = AlvaFontSize)
plt.text(maxT*4.0/3, maxX*0.0/6, r'$ r = %f $'%(cross_radius), fontsize = AlvaFontSize)
plt.show()
# <codecell>
# plot by listing each strain
numberingFig = numberingFig + 1;
for i in range(0, totalGPoint_X, int(totalGPoint_X/10)):
plt.figure(numberingFig, figsize = AlvaFigSize)
plt.plot(gridT, gridS[i], label = r'$ S_{%i}(t) $'%(i))
plt.plot(gridT, gridR[i], label = r'$ R_{%i}(t) $'%(i))
plt.plot(gridT, gridI[i], label = r'$ I_{%i}(t) $'%(i))
plt.plot(gridT, infecRate*gridS[i].T*gridI[i].T*day, label = r'$ \beta \ S_{%i}(t)I_{%i}(t) $'%(i, i)
, linestyle = 'dashed', color = 'red')
plt.plot(gridT, (gridS[i] + gridI[i] + gridR[i]).T, label = r'$ S_{%i}(t)+I_{%i}(t)+R_{%i}(t) $'%(i, i, i)
, color = 'black')
plt.grid(True)
plt.title(r'$ Prevalence \ and \ incidence \ of \ SIR $', fontsize = AlvaFontSize)
plt.xlabel(r'$time \ (%s)$'%(timeUnit), fontsize = AlvaFontSize);
plt.ylabel(r'$ Proportion \ of \ population $', fontsize = AlvaFontSize);
plt.text(maxT, totalSIR*7.0/6, r'$ R_0 = %f $'%(reprodNum), fontsize = AlvaFontSize)
plt.text(maxT, totalSIR*6.0/6, r'$ \gamma = %f $'%(recovRate), fontsize = AlvaFontSize)
plt.text(maxT, totalSIR*5.0/6, r'$ \beta = %f $'%(infecRate), fontsize = AlvaFontSize)
plt.text(maxT, totalSIR*4.0/6, r'$ \mu = %f $'%(inOutRate), fontsize = AlvaFontSize)
plt.text(maxT, totalSIR*3.0/6, r'$ m = %f $'%(mutatRate), fontsize = AlvaFontSize)
plt.legend(loc = (1,0))
plt.show()
# <codecell>
# 3D plotting
# define GridXX function for making 2D-grid from 1D-grid
def AlvaGridXX(gridX, totalGPoint_Y):
gridXX = gridX;
for n in range(totalGPoint_Y - 1):
gridXX = np.vstack((gridXX, gridX))
return gridXX
# for 3D plotting
X = AlvaGridXX(gridT, totalGPoint_X)
Y = AlvaGridXX(gridX, totalGPoint_T).T
Z = gridI
numberingFig = numberingFig + 1
figure = plt.figure(numberingFig, figsize=(16, 7))
figure1 = figure.add_subplot(1,2,1, projection='3d')
figure1.view_init(30, -80)
figure1.plot_wireframe(X, Y, Z, cstride = totalGPoint_T, rstride = int(dx))
plt.xlabel(r'$t \ (time)$', fontsize = AlvaFontSize)
plt.ylabel(r'$x \ (virus \ space)$', fontsize = AlvaFontSize)
figure2 = figure.add_subplot(1,2,2, projection='3d')
figure2.view_init(30, 10)
figure2.plot_wireframe(X, Y, Z, cstride = totalGPoint_T/20, rstride = int(maxX))
plt.xlabel(r'$t \ (time)$', fontsize = AlvaFontSize)
plt.ylabel(r'$x \ (virus \ space)$', fontsize = AlvaFontSize)
figure.tight_layout()
plt.show()
# <codecell>
numberingFig = numberingFig + 1
plt.figure(numberingFig, figsize = AlvaFigSize)
plt.plot(gridT, gridS.T)
plt.plot(gridT, gridR.T)
plt.plot(gridT, gridI.T)
plt.plot(gridT, (gridS + gridI + gridR).T, label = r'$ S(t)+I(t)+R(t) $', color = 'black')
plt.title(r'$ Many-strain \ SIR $', fontsize = AlvaFontSize)
plt.xlabel(r'$time \ (%s)$'%(timeUnit), fontsize = AlvaFontSize)
plt.ylabel(r'$ Proportion \ of \ population $', fontsize = AlvaFontSize)
plt.text(maxT, totalSIR*6.0/6, r'$ R_0 = %f $'%(reprodNum), fontsize = AlvaFontSize)
plt.text(maxT, totalSIR*5.0/6, r'$ \gamma = %f $'%(recovRate), fontsize = AlvaFontSize)
plt.text(maxT, totalSIR*4.0/6, r'$ \beta = %f $'%(infecRate), fontsize = AlvaFontSize)
plt.text(maxT, totalSIR*3.0/6, r'$ \mu = %f $'%(inOutRate), fontsize = AlvaFontSize)
plt.text(maxT, totalSIR*2.0/6, r'$ m = %f $'%(mutatRate), fontsize = AlvaFontSize)
plt.show()
# <codecell>
|
gpl-2.0
| 7,623,518,905,974,450,000
| 39.484375
| 231
| 0.623794
| false
| 2.53151
| false
| false
| false
|
e7dal/hexy
|
hexy/commands/cmd_point.py
|
1
|
1476
|
# -*- coding: utf-8 -*-
# Part of hexy. See LICENSE file for full copyright and licensing details.
import click
from ..cli import pass_hexy
from .. import Hexy
@click.command('point',
short_help='Put a single point on a grid and show grid in hexy tool')
@click.option('--xsize',
'-x',
type=int,
default=10,
help='set the x size (horizontal) for the grid')
@click.option('--ysize',
'-y',
type=int,
default=10,
help='set the y size (vertical) for the grid')
@click.option('--xpos',
'-i',
type=int,
default=3,
help='set the x position for the point')
@click.option('--ypos',
'-j',
type=int,
default=3,
help='set the y posistin for the point')
@click.option('--char',
'-c',
type=str,
default='x',
help='the character to put in the given point i,j')
@pass_hexy
def cli(ctx, xsize,ysize,xpos,ypos,char):
"""Show example for doing some task in hexy(experimental)"""
ctx.say('grid', stuff=(xsize,ysize),verbosity=100)
ctx.say('point',stuff=(xpos,ypos,char),verbosity=100)
if len(char)>1:
ctx.mumble('point, the character is longer than one, using first char',verbosity=100)
char=char[0]
g=Hexy(x=xsize,y=ysize)
g.point(xpos=xpos,ypos=ypos,char=char)
click.echo(g.show())
|
gpl-3.0
| -5,836,573,175,946,536,000
| 29.75
| 87
| 0.553523
| false
| 3.49763
| false
| false
| false
|
unstko/adventofcode2016
|
01/solution.py
|
1
|
3446
|
from lib import solution
from lib.point import Point2D
from lib.map import Map
import copy
class Solution(solution.Solution):
def __init__(self, nr):
super().__init__(nr)
self.instructions = []
self.directions = ['N', 'E', 'S', 'W']
self.face = 0
self.source = Point2D(0, 0)
self.destination = Point2D(0, 0)
self.distance = 0
self.map = Map('RGB', (350, 350), 0, 'center')
def calculate(self, test=False):
self.test = test
self.map_init()
self.read_instructions()
self.calc_destination()
self.calc_distance()
self.map_result()
def map_init(self):
self.map.set_point(self.source, (0, 255, 0))
def map_result(self):
if not self.test:
self.map.show()
self.map.print_min_and_max()
def read_instructions(self):
self.read_input()
self.instructions = self.input.split(', ')
def calc_destination(self):
for instruction in self.instructions:
self.calc_face(instruction)
self.move_destination(instruction)
self.set_and_check_path()
def calc_face(self, instruction):
turn = instruction[0]
move = 1
if turn == 'L':
move = -1
self.face = (self.face + move) % len(self.directions)
def move_destination(self, instruction):
blocks = int(instruction[1:])
direction = self.get_direction()
self.source = copy.copy(self.destination)
if direction == 'N':
self.destination.move(0, blocks)
elif direction == 'S':
self.destination.move(0, -1 * blocks)
elif direction == 'E':
self.destination.move(blocks, 0)
elif direction == 'W':
self.destination.move(-1 * blocks, 0)
def get_direction(self):
return self.directions[self.face]
def calc_distance(self):
self.distance = self.destination.manhattan_distance(Point2D(0, 0))
self.set_solution(1, self.distance)
def set_and_check_path(self):
if not self.is_calculated(2):
x_src = self.source.get_x()
y_src = self.source.get_y()
x_dst = self.destination.get_x()
y_dst = self.destination.get_y()
direction = self.get_direction()
step = 1
if direction == 'S' or direction == 'W':
step = -1
range_x = range(x_src, x_dst+step, step)
range_y = range(y_src, y_dst+step, step)
for x in range_x:
if x == x_src:
continue
point = Point2D(x, y_dst)
check = self.set_and_check_point(point)
if check:
return
for y in range_y:
if y == y_src:
continue
point = Point2D(x_dst, y)
check = self.set_and_check_point(point)
if check:
return
def set_and_check_point(self, point: Point2D):
check = False
if self.map.get_point(point) == (255, 255, 255):
self.map.set_point(point, (255, 0, 0))
distance = point.manhattan_distance(Point2D(0, 0))
self.set_solution(2, distance)
check = True
else:
self.map.set_point(point, (255, 255, 255))
return check
|
mit
| -315,879,149,843,139,140
| 31.205607
| 74
| 0.525537
| false
| 3.762009
| false
| false
| false
|
wadobo/socializa
|
backend/socializa/settings.py
|
1
|
9337
|
"""
Django settings for socializa project.
Generated by 'django-admin startproject' using Django 1.10.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
import logging
# Removed log oauth2 when execute test. If you want to activate debug, change logging.ERROR by
# logging.DEBUG
log = logging.getLogger('oauthlib')
log.setLevel(logging.ERROR)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'gh)^9&mtcp($nlm-zvlnb(lpe+b8kgbk(l30@u%xdpk@w5@n%j'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
DEV = False
ALLOWED_HOSTS = []
ADMINS = (
('wadobo', 'socializa@wadobo.com'),
)
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.gis',
'rest_framework',
'rest_framework.authtoken',
'oauth2_provider',
'social_django',
'rest_framework_social_oauth2',
'rest_framework_swagger',
'django_nose',
'frontend',
'player',
'event',
'game',
'clue',
'store',
'editor',
'landing',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'socializa.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'social_django.context_processors.backends',
'social_django.context_processors.login_redirect',
],
},
},
]
WSGI_APPLICATION = 'socializa.wsgi.application'
REST_FRAMEWORK = {
# Use Django's standard `django.contrib.auth` permissions,
# or allow read-only access for unauthenticated users.
#'DEFAULT_PERMISSION_CLASSES': [
# 'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly'
#],
'DEFAULT_AUTHENTICATION_CLASSES': (
'oauth2_provider.contrib.rest_framework.OAuth2Authentication',
'rest_framework_social_oauth2.authentication.SocialAuthentication',
'rest_framework.authentication.TokenAuthentication',
'rest_framework.authentication.SessionAuthentication',
),
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
),
'TEST_REQUEST_DEFAULT_FORMAT': 'json'
}
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'NAME': 'socializa',
'USER': 'socializa',
'PASSWORD': 'socializa',
'HOST': 'localhost',
'PORT': '5432'
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
from django.utils.translation import ugettext_lazy as _
LANGUAGE_CODE = 'en-us'
LANGUAGES = [
('en', _('English')),
('es', _('Spanish')),
]
LOCALE_PATHS = (os.path.join(BASE_DIR, "locale"), )
TIME_ZONE = 'Europe/Madrid'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
from django.contrib.messages import constants as messages
MESSAGE_TAGS = {
messages.ERROR: 'danger',
}
# Dev active
if DEV:
MIDDLEWARE += ('silk.middleware.SilkyMiddleware',)
INSTALLED_APPS += ('silk', 'django_extensions')
SILKY_PYTHON_PROFILER = True
SILKY_META = True
SILKY_DYNAMIC_PROFILING = [
{'module': 'player.views', 'function': 'PlayersNear.get', 'name': 'near players'},
{'module': 'player.views', 'function': 'MeetingCreate.post', 'name': 'meeting players'}
]
GRAPH_MODELS = {
'all_applications': False,
'group_models': True,
}
if DEBUG:
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
BASE_URL = 'https://socializa.wadobo.com'
DEFAULT_FROM_EMAIL = 'socializa@wadobo.com'
# SOCIAL AUTHENTICATION
AUTHENTICATION_BACKENDS = (
'social_core.backends.google.GoogleOAuth2',
'social_core.backends.facebook.FacebookOAuth2',
'rest_framework_social_oauth2.backends.DjangoOAuth2',
'django.contrib.auth.backends.ModelBackend'
)
SOCIAL_AUTH_PIPELINE = (
# Get the information we can about the user and return it in a simple
# format to create the user instance later. On some cases the details are
# already part of the auth response from the provider, but sometimes this
# could hit a provider API.
'social.pipeline.social_auth.social_details',
# Get the social uid from whichever service we're authing thru. The uid is
# the unique identifier of the given user in the provider.
'social.pipeline.social_auth.social_uid',
# Verifies that the current auth process is valid within the current
# project, this is where emails and domains whitelists are applied (if
# defined).
'social.pipeline.social_auth.auth_allowed',
# Checks if the current social-account is already associated in the site.
'social.pipeline.social_auth.social_user',
# Make up a username for this person, appends a random string at the end if
# there's any collision.
'social.pipeline.user.get_username',
# Send a validation email to the user to verify its email address.
# Disabled by default.
'social.pipeline.mail.mail_validation',
# Associates the current social details with another user account with
# a similar email address. Disabled by default.
'social.pipeline.social_auth.associate_by_email',
# Create a user account if we haven't found one yet.
'social.pipeline.user.create_user',
# Custom function
'player.utils.create_player',
# Create the record that associates the social account with the user.
'social.pipeline.social_auth.associate_user',
# Populate the extra_data field in the social record with the values
# specified by settings (and the default ones like access_token, etc).
'social.pipeline.social_auth.load_extra_data',
# Update the user record with any changed info from the auth service.
'social.pipeline.user.user_details',
)
PROPRIETARY_BACKEND_NAME = 'Django'
SOCIAL_AUTH_USERNAME_IS_FULL_EMAIL = True
OAUTH2_PROVIDER = {
'ACCESS_TOKEN_EXPIRE_SECONDS': 24 * 60 * 60 * 365, # a whole year
}
# DEBUG SOCIAL_AUTH
SOCIAL_AUTH_RAISE_EXCEPTIONS = False
# GOOGLE
SOCIAL_AUTH_GOOGLE_OAUTH2_KEY = 'update me'
SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET = 'update me'
GOOGLE_APIKEY = 'update me'
# FACEBOOK
SOCIAL_AUTH_FACEBOOK_KEY = 'update me'
SOCIAL_AUTH_FACEBOOK_SECRET = 'update me'
SOCIAL_AUTH_FACEBOOK_SCOPE = ['email']
SOCIAL_AUTH_FACEBOOK_PROFILE_EXTRA_PARAMS = {
'fields': 'id,name,email',
}
SWAGGER_SETTINGS = {
'DOC_EXPANSION': 'list',
'APIS_SORTER': 'alpha',
'JSON_EDITOR': True,
'OPERATIONS_SORTER': 'alpha',
'USE_SESSION_AUTH': False,
'SHOW_REQUEST_HEADERS': True,
'SECURITY_DEFINITIONS': {
'token': {
'type': 'apiKey',
'name': 'authorization',
'description': 'The valid api_key should be: "Token xxxxxxxxxxx"',
'in': 'header'
},
},
}
# For celery
CELERY_BROKER_URL = 'amqp://localhost'
CELERY_RESULT_BACKEND = 'amqp://localhost'
CELERY_ACCEPT_CONTENT = ['json']
CELERY_TASK_SERIALIZER = 'json'
DEFAULT_VISION_DISTANCE = 1000 # m
DEFAULT_MEETING_DISTANCE = 10 # m
QR_LENGTH = 16
DEFAULT_PAGINATION = 20
try:
from local_settings import *
except:
print("NO LOCAL SETTINGS")
|
agpl-3.0
| 583,695,365,934,439,000
| 28.269592
| 95
| 0.678912
| false
| 3.556952
| false
| false
| false
|
mfussenegger/python-prompt-toolkit
|
prompt_toolkit/key_bindings/vi.py
|
1
|
35929
|
from __future__ import unicode_literals
from ..enums import IncrementalSearchDirection, InputMode
from ..keys import Keys
from ..line import ClipboardData, ClipboardDataType, SelectionType, indent, unindent
from ..selection import SelectionType
from .basic import basic_bindings
from .utils import create_handle_decorator
import codecs
__all__ = (
'vi_bindings',
)
class CursorRegion(object):
"""
Return struct for functions wrapped in ``change_delete_move_yank_handler``.
"""
def __init__(self, start, end=0):
self.start = start
self.end = end
def sorted(self):
"""
Return a (start, end) tuple where start <= end.
"""
if self.start < self.end:
return self.start, self.end
else:
return self.end, self.start
def vi_bindings(registry, cli_ref):
"""
Vi extensions.
# Overview of Readline Vi commands:
# http://www.catonmat.net/download/bash-vi-editing-mode-cheat-sheet.pdf
"""
basic_bindings(registry, cli_ref)
line = cli_ref().line
search_line = cli_ref().lines['search']
handle = create_handle_decorator(registry, line)
_last_character_find = [None] # (char, backwards) tuple
_search_direction = [IncrementalSearchDirection.FORWARD]
vi_transform_functions = [
# Rot 13 transformation
(('g', '?'), lambda string: codecs.encode(string, 'rot_13')),
# To lowercase
(('g', 'u'), lambda string: string.lower()),
# To uppercase.
(('g', 'U'), lambda string: string.upper()),
# Swap case.
# (XXX: If we would implement 'tildeop', the 'g' prefix is not required.)
(('g', '~'), lambda string: string.swapcase()),
]
@registry.add_after_handler_callback
def check_cursor_position(event):
"""
After every command, make sure that if we are in navigation mode, we
never put the cursor after the last character of a line. (Unless it's
an empty line.)
"""
if (
event.input_processor.input_mode == InputMode.VI_NAVIGATION and
line.document.is_cursor_at_the_end_of_line and
len(line.document.current_line) > 0):
line.cursor_position -= 1
@handle(Keys.Escape)
def _(event):
"""
Escape goes to vi navigation mode.
"""
if event.input_processor.input_mode in (InputMode.INSERT,
InputMode.VI_REPLACE):
line.cursor_position += line.document.get_cursor_left_position()
if event.input_processor.input_mode == InputMode.SELECTION:
line.exit_selection()
event.input_processor.pop_input_mode()
else:
event.input_processor.input_mode = InputMode.VI_NAVIGATION
@handle(Keys.Up, in_mode=InputMode.VI_NAVIGATION)
def _(event):
"""
Arrow up in navigation mode.
"""
line.auto_up(count=event.arg)
@handle(Keys.Down, in_mode=InputMode.VI_NAVIGATION)
def _(event):
"""
Arrow down in navigation mode.
"""
line.auto_down(count=event.arg)
@handle(Keys.Backspace, in_mode=InputMode.VI_NAVIGATION)
def _(event):
"""
In navigation-mode, move cursor.
"""
line.cursor_position += line.document.get_cursor_left_position(count=event.arg)
@handle(Keys.ControlV, Keys.Any, in_mode=InputMode.INSERT)
def _(event):
"""
Insert a character literally (quoted insert).
"""
line.insert_text(event.data, overwrite=False)
@handle(Keys.ControlN, in_mode=InputMode.INSERT)
def _(event):
line.complete_next()
@handle(Keys.ControlN, in_mode=InputMode.VI_NAVIGATION)
def _(event):
"""
Control-N: Next completion.
"""
line.auto_down()
@handle(Keys.ControlP, in_mode=InputMode.INSERT)
def _(event):
"""
Control-P: To previous completion.
"""
line.complete_previous()
@handle(Keys.ControlY, in_mode=InputMode.INSERT)
def _(event):
"""
Accept current completion.
"""
line.complete_state = None
@handle(Keys.ControlE, in_mode=InputMode.INSERT)
def _(event):
"""
Cancel completion. Go back to originally typed text.
"""
line.cancel_completion()
@handle(Keys.ControlP, in_mode=InputMode.VI_NAVIGATION)
def _(event):
"""
CtrlP in navigation mode goes up.
"""
line.auto_up()
@handle(Keys.ControlJ, in_mode=InputMode.VI_NAVIGATION)
@handle(Keys.ControlM, in_mode=InputMode.VI_NAVIGATION)
def _(event):
"""
In navigation mode, pressing enter will always return the input.
"""
if line.validate():
line.add_to_history()
cli_ref().set_return_value(line.document)
# ** In navigation mode **
# List of navigation commands: http://hea-www.harvard.edu/~fine/Tech/vi.html
@handle('a', in_mode=InputMode.VI_NAVIGATION)
def _(event):
line.cursor_position += line.document.get_cursor_right_position()
event.input_processor.input_mode = InputMode.INSERT
@handle('A', in_mode=InputMode.VI_NAVIGATION)
def _(event):
line.cursor_position += line.document.get_end_of_line_position()
event.input_processor.input_mode = InputMode.INSERT
@handle('C', in_mode=InputMode.VI_NAVIGATION)
def _(event):
"""
# Change to end of line.
# Same as 'c$' (which is implemented elsewhere.)
"""
deleted = line.delete(count=line.document.get_end_of_line_position())
if deleted:
data = ClipboardData(deleted)
line.set_clipboard(data)
event.input_processor.input_mode = InputMode.INSERT
@handle('c', 'c', in_mode=InputMode.VI_NAVIGATION)
@handle('S', in_mode=InputMode.VI_NAVIGATION)
def _(event): # TODO: implement 'arg'
"""
Change current line
"""
# We copy the whole line.
data = ClipboardData(line.document.current_line, ClipboardDataType.LINES)
line.set_clipboard(data)
# But we delete after the whitespace
line.cursor_position += line.document.get_start_of_line_position(after_whitespace=True)
line.delete(count=line.document.get_end_of_line_position())
event.input_processor.input_mode = InputMode.INSERT
@handle('D', in_mode=InputMode.VI_NAVIGATION)
def _(event):
deleted = line.delete(count=line.document.get_end_of_line_position())
line.set_clipboard(ClipboardData(deleted))
@handle('d', 'd', in_mode=InputMode.VI_NAVIGATION)
def _(event):
"""
Delete line. (Or the following 'n' lines.)
"""
# Split string in before/deleted/after text.
lines = line.document.lines
before = '\n'.join(lines[:line.document.cursor_position_row])
deleted = '\n'.join(lines[line.document.cursor_position_row: line.document.cursor_position_row + event.arg])
after = '\n'.join(lines[line.document.cursor_position_row + event.arg:])
# Set new text.
if before and after:
before = before + '\n'
line.text = before + after
# Set cursor position. (At the start of the first 'after' line, after the leading whitespace.)
line.cursor_position = len(before) + len(after) - len(after.lstrip(' '))
# Set clipboard data
line.set_clipboard(ClipboardData(deleted, ClipboardDataType.LINES))
@handle('G', in_mode=InputMode.VI_NAVIGATION)
def _(event):
"""
If an argument is given, move to this line in the history. (for
example, 15G) Otherwise, go the the last line of the current string.
"""
# If an arg has been given explicitely.
if event._arg:
line.go_to_history(event.arg - 1)
# Otherwise this goes to the last line of the file.
else:
line.cursor_position = len(line.text)
@handle('i', in_mode=InputMode.VI_NAVIGATION)
def _(event):
event.input_processor.input_mode = InputMode.INSERT
@handle('I', in_mode=InputMode.VI_NAVIGATION)
def _(event):
event.input_processor.input_mode = InputMode.INSERT
line.cursor_position += line.document.get_start_of_line_position(after_whitespace=True)
@handle('J', in_mode=InputMode.VI_NAVIGATION)
def _(event):
for i in range(event.arg):
line.join_next_line()
@handle('n', in_mode=InputMode.VI_NAVIGATION)
def _(event): # XXX: use `change_delete_move_yank_handler` and implement 'arg'
"""
Search next.
"""
line.incremental_search(_search_direction[0])
@handle('N', in_mode=InputMode.VI_NAVIGATION)
def _(event): # TODO: use `change_delete_move_yank_handler` and implement 'arg'
"""
Search previous.
"""
if _search_direction[0] == IncrementalSearchDirection.BACKWARD:
line.incremental_search(IncrementalSearchDirection.FORWARD)
else:
line.incremental_search(IncrementalSearchDirection.BACKWARD)
@handle('p', in_mode=InputMode.VI_NAVIGATION)
def _(event):
"""
Paste after
"""
for i in range(event.arg):
line.paste_from_clipboard()
@handle('P', in_mode=InputMode.VI_NAVIGATION)
def _(event):
"""
Paste before
"""
for i in range(event.arg):
line.paste_from_clipboard(before=True)
@handle('r', Keys.Any, in_mode=InputMode.VI_NAVIGATION)
def _(event):
"""
Replace single character under cursor
"""
line.insert_text(event.data * event.arg, overwrite=True)
line.cursor_position -= 1
@handle('R', in_mode=InputMode.VI_NAVIGATION)
def _(event):
"""
Go to 'replace'-mode.
"""
event.input_processor.input_mode = InputMode.VI_REPLACE
@handle('s', in_mode=InputMode.VI_NAVIGATION)
def _(event):
"""
Substitute with new text
(Delete character(s) and go to insert mode.)
"""
data = ClipboardData(''.join(line.delete() for i in range(event.arg)))
line.set_clipboard(data)
event.input_processor.input_mode = InputMode.INSERT
@handle('u', in_mode=InputMode.VI_NAVIGATION)
def _(event):
for i in range(event.arg):
line.undo()
@handle('v', in_mode=InputMode.VI_NAVIGATION)
def _(event):
line.open_in_editor()
# @handle('v', in_mode=InputMode.VI_NAVIGATION)
# def _(event):
# """
# Start characters selection.
# """
# line.start_selection(selection_type=SelectionType.CHARACTERS)
# event.input_processor.push_input_mode(InputMode.SELECTION)
@handle('V', in_mode=InputMode.VI_NAVIGATION)
def _(event):
"""
Start lines selection.
"""
line.start_selection(selection_type=SelectionType.LINES)
event.input_processor.push_input_mode(InputMode.SELECTION)
@handle('a', 'w', in_mode=InputMode.SELECTION)
@handle('a', 'W', in_mode=InputMode.SELECTION)
def _(event):
"""
Switch from visual linewise mode to visual characterwise mode.
"""
if line.selection_state and line.selection_state.type == SelectionType.LINES:
line.selection_state.type = SelectionType.CHARACTERS
@handle('x', in_mode=InputMode.VI_NAVIGATION)
def _(event):
"""
Delete character.
"""
data = ClipboardData(line.delete(count=event.arg))
line.set_clipboard(data)
@handle('x', in_mode=InputMode.SELECTION)
@handle('d', 'd', in_mode=InputMode.SELECTION)
def _(event):
"""
Cut selection.
"""
selection_type = line.selection_state.type
deleted = line.cut_selection()
line.set_clipboard(ClipboardData(deleted, selection_type))
event.input_processor.pop_input_mode()
@handle('c', in_mode=InputMode.SELECTION)
def _(event):
"""
Change selection (cut and go to insert mode).
"""
selection_type = line.selection_state.type
deleted = line.cut_selection()
line.set_clipboard(ClipboardData(deleted, selection_type))
event.input_processor.pop_input_mode()
event.input_processor.input_mode = InputMode.INSERT
@handle('y', in_mode=InputMode.SELECTION)
def _(event):
"""
Copy selection.
"""
selection_type = line.selection_state.type
deleted = line.copy_selection()
line.set_clipboard(ClipboardData(deleted, selection_type))
event.input_processor.pop_input_mode()
@handle('X', in_mode=InputMode.VI_NAVIGATION)
def _(event):
data = line.delete_before_cursor()
line.set_clipboard(data)
@handle('y', 'y', in_mode=InputMode.VI_NAVIGATION)
@handle('Y', in_mode=InputMode.VI_NAVIGATION)
def _(event):
"""
Yank the whole line.
"""
text = '\n'.join(line.document.lines_from_current[:event.arg])
data = ClipboardData(text, ClipboardDataType.LINES)
line.set_clipboard(data)
@handle('+', in_mode=InputMode.VI_NAVIGATION)
def _(event):
"""
Move to first non whitespace of next line
"""
line.cursor_position += line.document.get_cursor_down_position(count=event.arg)
line.cursor_position += line.document.get_start_of_line_position(after_whitespace=True)
@handle('-', in_mode=InputMode.VI_NAVIGATION)
def _(event):
"""
Move to first non whitespace of previous line
"""
line.cursor_position += line.document.get_cursor_up_position(count=event.arg)
line.cursor_position += line.document.get_start_of_line_position(after_whitespace=True)
@handle('>', '>', in_mode=InputMode.VI_NAVIGATION)
def _(event):
"""
Indent lines.
"""
current_row = line.document.cursor_position_row
indent(line, current_row, current_row + event.arg)
@handle('<', '<', in_mode=InputMode.VI_NAVIGATION)
def _(event):
"""
Unindent lines.
"""
current_row = line.document.cursor_position_row
unindent(line, current_row, current_row + event.arg)
@handle('>', in_mode=InputMode.SELECTION)
def _(event):
"""
Indent selection
"""
selection_type = line.selection_state.type
if selection_type == SelectionType.LINES:
from_, to = line.document.selection_range()
from_, _ = line.document.translate_index_to_position(from_)
to, _ = line.document.translate_index_to_position(to)
indent(line, from_ - 1, to, count=event.arg) # XXX: why does translate_index_to_position return 1-based indexing???
event.input_processor.pop_input_mode()
@handle('<', in_mode=InputMode.SELECTION)
def _(event):
"""
Unindent selection
"""
selection_type = line.selection_state.type
if selection_type == SelectionType.LINES:
from_, to = line.document.selection_range()
from_, _ = line.document.translate_index_to_position(from_)
to, _ = line.document.translate_index_to_position(to)
unindent(line, from_ - 1, to, count=event.arg)
event.input_processor.pop_input_mode()
@handle('O', in_mode=InputMode.VI_NAVIGATION)
def _(event):
"""
Open line above and enter insertion mode
"""
line.insert_line_above()
event.input_processor.input_mode = InputMode.INSERT
@handle('o', in_mode=InputMode.VI_NAVIGATION)
def _(event):
"""
Open line below and enter insertion mode
"""
line.insert_line_below()
event.input_processor.input_mode = InputMode.INSERT
@handle('~', in_mode=InputMode.VI_NAVIGATION)
def _(event):
"""
Reverse case of current character and move cursor forward.
"""
c = line.document.current_char
if c is not None and c != '\n':
c = (c.upper() if c.islower() else c.lower())
line.insert_text(c, overwrite=True)
@handle('/', in_mode=InputMode.VI_NAVIGATION)
@handle(Keys.ControlS, in_mode=InputMode.INSERT)
@handle(Keys.ControlS, in_mode=InputMode.VI_NAVIGATION)
@handle(Keys.ControlS, in_mode=InputMode.VI_SEARCH)
def _(event):
"""
Vi-style forward search.
"""
_search_direction[0] = direction = IncrementalSearchDirection.FORWARD
line.incremental_search(direction)
if event.input_processor.input_mode != InputMode.VI_SEARCH:
event.input_processor.push_input_mode(InputMode.VI_SEARCH)
@handle('?', in_mode=InputMode.VI_NAVIGATION)
@handle(Keys.ControlR, in_mode=InputMode.INSERT)
@handle(Keys.ControlR, in_mode=InputMode.VI_NAVIGATION)
@handle(Keys.ControlR, in_mode=InputMode.VI_SEARCH)
def _(event):
"""
Vi-style backward search.
"""
_search_direction[0] = direction = IncrementalSearchDirection.BACKWARD
line.incremental_search(direction)
if event.input_processor.input_mode != InputMode.VI_SEARCH:
event.input_processor.push_input_mode(InputMode.VI_SEARCH)
@handle('#', in_mode=InputMode.VI_NAVIGATION)
def _(event):
"""
Go to previous occurence of this word.
"""
pass
@handle('*', in_mode=InputMode.VI_NAVIGATION)
def _(event):
"""
Go to next occurence of this word.
"""
pass
@handle('(', in_mode=InputMode.VI_NAVIGATION)
def _(event):
# TODO: go to begin of sentence.
pass
@handle(')', in_mode=InputMode.VI_NAVIGATION)
def _(event):
# TODO: go to end of sentence.
pass
def change_delete_move_yank_handler(*keys, **kw):
"""
Register a change/delete/move/yank handlers. e.g. 'dw'/'cw'/'w'/'yw'
The decorated function should return a ``CursorRegion``.
This decorator will create both the 'change', 'delete' and move variants,
based on that ``CursorRegion``.
"""
no_move_handler = kw.pop('no_move_handler', False)
# TODO: Also do '>' and '<' indent/unindent operators.
# TODO: Also "gq": text formatting
# See: :help motion.txt
def decorator(func):
if not no_move_handler:
@handle(*keys, in_mode=InputMode.VI_NAVIGATION)
@handle(*keys, in_mode=InputMode.SELECTION)
def move(event):
""" Create move handler. """
region = func(event)
line.cursor_position += region.start
def create_transform_handler(transform_func, *a):
@handle(*(a + keys), in_mode=InputMode.VI_NAVIGATION)
def _(event):
""" Apply transformation (uppercase, lowercase, rot13, swap case). """
region = func(event)
start, end = region.sorted()
# Transform.
line.transform_region(
line.cursor_position + start,
line.cursor_position + end,
transform_func)
# Move cursor
line.cursor_position += (region.end or region.start)
for k, f in vi_transform_functions:
create_transform_handler(f, *k)
@handle('y', *keys, in_mode=InputMode.VI_NAVIGATION)
def yank_handler(event):
""" Create yank handler. """
region = func(event)
start, end = region.sorted()
substring = line.text[line.cursor_position + start: line.cursor_position + end]
if substring:
line.set_clipboard(ClipboardData(substring))
def create(delete_only):
""" Create delete and change handlers. """
@handle('cd'[delete_only], *keys, in_mode=InputMode.VI_NAVIGATION)
@handle('cd'[delete_only], *keys, in_mode=InputMode.VI_NAVIGATION)
def _(event):
region = func(event)
deleted = ''
if region:
start, end = region.sorted()
# Move to the start of the region.
line.cursor_position += start
# Delete until end of region.
deleted = line.delete(count=end-start)
# Set deleted/changed text to clipboard.
if deleted:
line.set_clipboard(ClipboardData(''.join(deleted)))
# Only go back to insert mode in case of 'change'.
if not delete_only:
event.input_processor.input_mode = InputMode.INSERT
create(True)
create(False)
return func
return decorator
@change_delete_move_yank_handler('b')
def _(event):
""" Move one word or token left. """
return CursorRegion(line.document.find_start_of_previous_word(count=event.arg) or 0)
@change_delete_move_yank_handler('B')
def _(event):
""" Move one non-blank word left """
return CursorRegion(line.document.find_start_of_previous_word(count=event.arg, WORD=True) or 0)
@change_delete_move_yank_handler('$')
def key_dollar(event):
""" 'c$', 'd$' and '$': Delete/change/move until end of line. """
return CursorRegion(line.document.get_end_of_line_position())
@change_delete_move_yank_handler('w')
def _(event):
""" 'word' forward. 'cw', 'dw', 'w': Delete/change/move one word. """
return CursorRegion(line.document.find_next_word_beginning(count=event.arg) or
line.document.end_position)
@change_delete_move_yank_handler('W')
def _(event):
""" 'WORD' forward. 'cW', 'dW', 'W': Delete/change/move one WORD. """
return CursorRegion(line.document.find_next_word_beginning(count=event.arg, WORD=True) or
line.document.end_position)
@change_delete_move_yank_handler('e')
def _(event):
""" End of 'word': 'ce', 'de', 'e' """
end = line.document.find_next_word_ending(count=event.arg)
return CursorRegion(end - 1 if end else 0)
@change_delete_move_yank_handler('E')
def _(event):
""" End of 'WORD': 'cE', 'dE', 'E' """
end = line.document.find_next_word_ending(count=event.arg, WORD=True)
return CursorRegion(end - 1 if end else 0)
@change_delete_move_yank_handler('i', 'w', no_move_handler=True)
def _(event):
""" Inner 'word': ciw and diw """
start, end = line.document.find_boundaries_of_current_word()
return CursorRegion(start, end)
@change_delete_move_yank_handler('a', 'w', no_move_handler=True)
def _(event):
""" A 'word': caw and daw """
start, end = line.document.find_boundaries_of_current_word(include_trailing_whitespace=True)
return CursorRegion(start, end)
@change_delete_move_yank_handler('i', 'W', no_move_handler=True)
def _(event):
""" Inner 'WORD': ciW and diW """
start, end = line.document.find_boundaries_of_current_word(WORD=True)
return CursorRegion(start, end)
@change_delete_move_yank_handler('a', 'W', no_move_handler=True)
def _(event):
""" A 'WORD': caw and daw """
start, end = line.document.find_boundaries_of_current_word(WORD=True, include_trailing_whitespace=True)
return CursorRegion(start, end)
@change_delete_move_yank_handler('^')
def key_circumflex(event):
""" 'c^', 'd^' and '^': Soft start of line, after whitespace. """
return CursorRegion(line.document.get_start_of_line_position(after_whitespace=True))
@change_delete_move_yank_handler('0', no_move_handler=True)
def key_zero(event):
"""
'c0', 'd0': Hard start of line, before whitespace.
(The move '0' key is implemented elsewhere, because a '0' could also change the `arg`.)
"""
return CursorRegion(line.document.get_start_of_line_position(after_whitespace=False))
def create_ci_ca_handles(ci_start, ci_end, inner):
# TODO: 'dab', 'dib', (brackets or block) 'daB', 'diB', Braces.
# TODO: 'dat', 'dit', (tags (like xml)
"""
Delete/Change string between this start and stop character. But keep these characters.
This implements all the ci", ci<, ci{, ci(, di", di<, ca", ca<, ... combinations.
"""
@change_delete_move_yank_handler('ai'[inner], ci_start, no_move_handler=True)
@change_delete_move_yank_handler('ai'[inner], ci_end, no_move_handler=True)
def _(event):
start = line.document.find_backwards(ci_start, in_current_line=True)
end = line.document.find(ci_end, in_current_line=True)
if start is not None and end is not None:
offset = 0 if inner else 1
return CursorRegion(start + 1 - offset, end + offset)
for inner in (False, True):
for ci_start, ci_end in [('"', '"'), ("'", "'"), ("`", "`"),
('[', ']'), ('<', '>'), ('{', '}'), ('(', ')')]:
create_ci_ca_handles(ci_start, ci_end, inner)
@change_delete_move_yank_handler('{') # TODO: implement 'arg'
def _(event):
"""
Move to previous blank-line separated section.
Implements '{', 'c{', 'd{', 'y{'
"""
line_index = line.document.find_previous_matching_line(
lambda text: not text or text.isspace())
if line_index:
index = line.document.get_cursor_up_position(count=-line_index)
else:
index = 0
return CursorRegion(index)
@change_delete_move_yank_handler('}') # TODO: implement 'arg'
def _(event):
"""
Move to next blank-line separated section.
Implements '}', 'c}', 'd}', 'y}'
"""
line_index = line.document.find_next_matching_line(
lambda text: not text or text.isspace())
if line_index:
index = line.document.get_cursor_down_position(count=line_index)
else:
index = 0
return CursorRegion(index)
@change_delete_move_yank_handler('f', Keys.Any)
def _(event):
"""
Go to next occurance of character. Typing 'fx' will move the
cursor to the next occurance of character. 'x'.
"""
_last_character_find[0] = (event.data, False)
match = line.document.find(event.data, in_current_line=True, count=event.arg)
return CursorRegion(match or 0)
@change_delete_move_yank_handler('F', Keys.Any)
def _(event):
"""
Go to previous occurance of character. Typing 'Fx' will move the
cursor to the previous occurance of character. 'x'.
"""
_last_character_find[0] = (event.data, True)
return CursorRegion(line.document.find_backwards(event.data, in_current_line=True, count=event.arg) or 0)
@change_delete_move_yank_handler('t', Keys.Any)
def _(event):
"""
Move right to the next occurance of c, then one char backward.
"""
_last_character_find[0] = (event.data, False)
match = line.document.find(event.data, in_current_line=True, count=event.arg)
return CursorRegion(match - 1 if match else 0)
@change_delete_move_yank_handler('T', Keys.Any)
def _(event):
"""
Move left to the previous occurance of c, then one char forward.
"""
_last_character_find[0] = (event.data, True)
match = line.document.find_backwards(event.data, in_current_line=True, count=event.arg)
return CursorRegion(match + 1 if match else 0)
def repeat(reverse):
"""
Create ',' and ';' commands.
"""
@change_delete_move_yank_handler(',' if reverse else ';')
def _(event):
# Repeat the last 'f'/'F'/'t'/'T' command.
pos = 0
if _last_character_find[0]:
char, backwards = _last_character_find[0]
if reverse:
backwards = not backwards
if backwards:
pos = line.document.find_backwards(char, in_current_line=True, count=event.arg)
else:
pos = line.document.find(char, in_current_line=True, count=event.arg)
return CursorRegion(pos or 0)
repeat(True)
repeat(False)
@change_delete_move_yank_handler('h')
@change_delete_move_yank_handler(Keys.Left)
def _(event):
""" Implements 'ch', 'dh', 'h': Cursor left. """
return CursorRegion(line.document.get_cursor_left_position(count=event.arg))
@change_delete_move_yank_handler('j')
def _(event):
""" Implements 'cj', 'dj', 'j', ... Cursor up. """
return CursorRegion(line.document.get_cursor_down_position(count=event.arg))
@change_delete_move_yank_handler('k')
def _(event):
""" Implements 'ck', 'dk', 'k', ... Cursor up. """
return CursorRegion(line.document.get_cursor_up_position(count=event.arg))
@change_delete_move_yank_handler('l')
@change_delete_move_yank_handler(' ')
@change_delete_move_yank_handler(Keys.Right)
def _(event):
""" Implements 'cl', 'dl', 'l', 'c ', 'd ', ' '. Cursor right. """
return CursorRegion(line.document.get_cursor_right_position(count=event.arg))
@change_delete_move_yank_handler('H')
def _(event):
""" Implements 'cH', 'dH', 'H'. """
# Vi moves to the start of the visible region.
# cursor position 0 is okay for us.
return CursorRegion(-len(line.document.text_before_cursor))
@change_delete_move_yank_handler('L')
def _(event):
# Vi moves to the end of the visible region.
# cursor position 0 is okay for us.
return CursorRegion(len(line.document.text_after_cursor))
@change_delete_move_yank_handler('%')
def _(event):
"""
Implements 'c%', 'd%', '%, 'y%' (Move to corresponding bracket.)
If an 'arg' has been given, go this this % position in the file.
"""
if event._arg:
# If 'arg' has been given, the meaning of % is to go to the 'x%'
# row in the file.
if 0 < event.arg <= 100:
absolute_index = line.document.translate_row_col_to_index(
int(event.arg * line.document.line_count / 100), 0)
return CursorRegion(absolute_index - line.document.cursor_position)
else:
return CursorRegion(0) # Do nothing.
else:
# Move to the corresponding opening/closing bracket (()'s, []'s and {}'s).
return CursorRegion(line.document.matching_bracket_position)
@change_delete_move_yank_handler('|')
def _(event):
# Move to the n-th column (you may specify the argument n by typing
# it on number keys, for example, 20|).
return CursorRegion(line.document.get_column_cursor_position(event.arg))
@change_delete_move_yank_handler('g', 'g')
def _(event):
"""
Implements 'gg', 'cgg', 'ygg'
"""
# Move to the top of the input.
return CursorRegion(line.document.home_position)
@handle('!', in_mode=InputMode.VI_NAVIGATION)
def _(event):
"""
'!' opens the system prompt.
"""
event.input_processor.push_input_mode(InputMode.SYSTEM)
@handle(Keys.Any, in_mode=InputMode.VI_NAVIGATION)
@handle(Keys.Any, in_mode=InputMode.SELECTION)
def _(event):
"""
Always handle numberics in navigation mode as arg.
"""
if event.data in '123456789' or (event._arg and event.data == '0'):
event.append_to_arg_count(event.data)
elif event.data == '0':
line.cursor_position += line.document.get_start_of_line_position(after_whitespace=False)
@handle(Keys.Any, in_mode=InputMode.VI_REPLACE)
def _(event):
"""
Insert data at cursor position.
"""
line.insert_text(event.data, overwrite=True)
@handle(Keys.Any, in_mode=InputMode.VI_SEARCH)
def _(event):
"""
Insert text after the / or ? prompt.
"""
search_line.insert_text(event.data)
line.set_search_text(search_line.text)
@handle(Keys.ControlJ, in_mode=InputMode.VI_SEARCH)
@handle(Keys.ControlM, in_mode=InputMode.VI_SEARCH)
def _(event):
"""
Enter at the / or ? prompt.
"""
# Add query to history of searh line.
search_line.add_to_history()
search_line.reset()
# Go back to navigation mode.
event.input_processor.pop_input_mode()
@handle(Keys.Backspace, in_mode=InputMode.VI_SEARCH)
def _(event):
"""
Backspace at the vi-search prompt.
"""
if search_line.text:
search_line.delete_before_cursor()
line.set_search_text(search_line.text)
else:
# If no text after the prompt, cancel search.
line.exit_isearch(restore_original_line=True)
search_line.reset()
event.input_processor.pop_input_mode()
@handle(Keys.Up, in_mode=InputMode.VI_SEARCH)
def _(event):
"""
Go to the previous history item at the search prompt.
"""
search_line.auto_up()
line.set_search_text(search_line.text)
@handle(Keys.Down, in_mode=InputMode.VI_SEARCH)
def _(event):
"""
Go to the next history item at the search prompt.
"""
search_line.auto_down()
search_line.cursor_position = len(search_line.text)
line.set_search_text(search_line.text)
@handle(Keys.Left, in_mode=InputMode.VI_SEARCH)
def _(event):
"""
Arrow left at the search prompt.
"""
search_line.cursor_left()
@handle(Keys.Right, in_mode=InputMode.VI_SEARCH)
def _(event):
"""
Arrow right at the search prompt.
"""
search_line.cursor_right()
@handle(Keys.ControlC, in_mode=InputMode.VI_SEARCH)
def _(event):
"""
Cancel search.
"""
line.exit_isearch(restore_original_line=True)
search_line.reset()
event.input_processor.pop_input_mode()
def create_selection_transform_handler(keys, transform_func):
"""
Apply transformation on selection (uppercase, lowercase, rot13, swap case).
"""
@handle(*keys, in_mode=InputMode.SELECTION)
def _(event):
range = line.document.selection_range()
if range:
line.transform_region(range[0], range[1], transform_func)
event.input_processor.pop_input_mode()
for k, f in vi_transform_functions:
create_selection_transform_handler(k, f)
@handle(Keys.ControlX, Keys.ControlL, in_mode=InputMode.INSERT)
def _(event):
"""
Pressing the ControlX - ControlL sequence in Vi mode does line
completion based on the other lines in the document and the history.
"""
line.start_history_lines_completion()
@handle(Keys.ControlX, Keys.ControlF, in_mode=InputMode.INSERT)
def _(event):
"""
Complete file names.
"""
# TODO
pass
|
bsd-3-clause
| 7,872,446,468,992,112,000
| 34.19001
| 128
| 0.583234
| false
| 3.73017
| false
| false
| false
|
google/vae-seq
|
vaeseq/examples/text/text.py
|
1
|
4187
|
# Copyright 2018 Google, Inc.,
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model sequences of text, character-by-character."""
from __future__ import print_function
import argparse
import itertools
import sys
import tensorflow as tf
from vaeseq.examples.text import hparams as hparams_mod
from vaeseq.examples.text import model as model_mod
def train(flags):
if flags.vocab_corpus is None:
print("NOTE: no --vocab-corpus supplied; using",
repr(flags.train_corpus), "for vocabulary.")
model = model_mod.Model(
hparams=hparams_mod.make_hparams(flags.hparams),
session_params=flags,
vocab_corpus=flags.vocab_corpus or flags.train_corpus)
model.train(flags.train_corpus, flags.num_steps,
valid_dataset=flags.valid_corpus)
def evaluate(flags):
model = model_mod.Model(
hparams=hparams_mod.make_hparams(flags.hparams),
session_params=flags,
vocab_corpus=flags.vocab_corpus)
model.evaluate(flags.eval_corpus, flags.num_steps)
def generate(flags):
hparams = hparams_mod.make_hparams(flags.hparams)
hparams.sequence_size = flags.length
model = model_mod.Model(
hparams=hparams,
session_params=flags,
vocab_corpus=flags.vocab_corpus)
for i, string in enumerate(itertools.islice(model.generate(),
flags.num_samples)):
print("#{:02d}: {}\n".format(i + 1, string))
# Argument parsing code below.
def common_args(args, require_vocab):
model_mod.Model.SessionParams.add_parser_arguments(args)
args.add_argument(
"--hparams", default="",
help="Model hyperparameter overrides.")
args.add_argument(
"--vocab-corpus",
help="Path to the corpus used for vocabulary generation.",
required=require_vocab)
def train_args(args):
common_args(args, require_vocab=False)
args.add_argument(
"--train-corpus",
help="Location of the training text.",
required=True)
args.add_argument(
"--valid-corpus",
help="Location of the validation text.")
args.add_argument(
"--num-steps", type=int, default=int(1e6),
help="Number of training iterations.")
args.set_defaults(entry=train)
def eval_args(args):
common_args(args, require_vocab=True)
args.add_argument(
"--eval-corpus",
help="Location of the training text.",
required=True)
args.add_argument(
"--num-steps", type=int, default=int(1e3),
help="Number of eval iterations.")
args.set_defaults(entry=evaluate)
def generate_args(args):
common_args(args, require_vocab=True)
args.add_argument(
"--length", type=int, default=1000,
help="Length of the generated strings.")
args.add_argument(
"--num-samples", type=int, default=20,
help="Number of strings to generate.")
args.set_defaults(entry=generate)
def main():
args = argparse.ArgumentParser()
subcommands = args.add_subparsers(title="subcommands")
train_args(subcommands.add_parser(
"train", help="Train a model."))
eval_args(subcommands.add_parser(
"evaluate", help="Evaluate a trained model."))
generate_args(subcommands.add_parser(
"generate", help="Generate some text."))
flags, unparsed_args = args.parse_known_args(sys.argv[1:])
if not hasattr(flags, "entry"):
args.print_help()
return 1
tf.logging.set_verbosity(tf.logging.INFO)
tf.app.run(main=lambda _unused_argv: flags.entry(flags),
argv=[sys.argv[0]] + unparsed_args)
if __name__ == "__main__":
main()
|
apache-2.0
| -6,422,594,320,282,468,000
| 31.207692
| 74
| 0.657511
| false
| 3.775473
| false
| false
| false
|
3cky/netdata
|
collectors/python.d.plugin/python_modules/bases/charts.py
|
1
|
12932
|
# -*- coding: utf-8 -*-
# Description:
# Author: Ilya Mashchenko (ilyam8)
# SPDX-License-Identifier: GPL-3.0-or-later
from bases.collection import safe_print
CHART_PARAMS = ['type', 'id', 'name', 'title', 'units', 'family', 'context', 'chart_type', 'hidden']
DIMENSION_PARAMS = ['id', 'name', 'algorithm', 'multiplier', 'divisor', 'hidden']
VARIABLE_PARAMS = ['id', 'value']
CHART_TYPES = ['line', 'area', 'stacked']
DIMENSION_ALGORITHMS = ['absolute', 'incremental', 'percentage-of-absolute-row', 'percentage-of-incremental-row']
CHART_BEGIN = 'BEGIN {type}.{id} {since_last}\n'
CHART_CREATE = "CHART {type}.{id} '{name}' '{title}' '{units}' '{family}' '{context}' " \
"{chart_type} {priority} {update_every} '{hidden}' 'python.d.plugin' '{module_name}'\n"
CHART_OBSOLETE = "CHART {type}.{id} '{name}' '{title}' '{units}' '{family}' '{context}' " \
"{chart_type} {priority} {update_every} '{hidden} obsolete'\n"
DIMENSION_CREATE = "DIMENSION '{id}' '{name}' {algorithm} {multiplier} {divisor} '{hidden} {obsolete}'\n"
DIMENSION_SET = "SET '{id}' = {value}\n"
CHART_VARIABLE_SET = "VARIABLE CHART '{id}' = {value}\n"
RUNTIME_CHART_CREATE = "CHART netdata.runtime_{job_name} '' 'Execution time for {job_name}' 'ms' 'python.d' " \
"netdata.pythond_runtime line 145000 {update_every}\n" \
"DIMENSION run_time 'run time' absolute 1 1\n"
def create_runtime_chart(func):
"""
Calls a wrapped function, then prints runtime chart to stdout.
Used as a decorator for SimpleService.create() method.
The whole point of making 'create runtime chart' functionality as a decorator was
to help users who re-implements create() in theirs classes.
:param func: class method
:return:
"""
def wrapper(*args, **kwargs):
self = args[0]
ok = func(*args, **kwargs)
if ok:
safe_print(RUNTIME_CHART_CREATE.format(job_name=self.name,
update_every=self._runtime_counters.update_every))
return ok
return wrapper
class ChartError(Exception):
"""Base-class for all exceptions raised by this module"""
class DuplicateItemError(ChartError):
"""Occurs when user re-adds a chart or a dimension that has already been added"""
class ItemTypeError(ChartError):
"""Occurs when user passes value of wrong type to Chart, Dimension or ChartVariable class"""
class ItemValueError(ChartError):
"""Occurs when user passes inappropriate value to Chart, Dimension or ChartVariable class"""
class Charts:
"""Represent a collection of charts
All charts stored in a dict.
Chart is a instance of Chart class.
Charts adding must be done using Charts.add_chart() method only"""
def __init__(self, job_name, priority, cleanup, get_update_every, module_name):
"""
:param job_name: <bound method>
:param priority: <int>
:param get_update_every: <bound method>
"""
self.job_name = job_name
self.priority = priority
self.cleanup = cleanup
self.get_update_every = get_update_every
self.module_name = module_name
self.charts = dict()
def __len__(self):
return len(self.charts)
def __iter__(self):
return iter(self.charts.values())
def __repr__(self):
return 'Charts({0})'.format(self)
def __str__(self):
return str([chart for chart in self.charts])
def __contains__(self, item):
return item in self.charts
def __getitem__(self, item):
return self.charts[item]
def __delitem__(self, key):
del self.charts[key]
def __bool__(self):
return bool(self.charts)
def __nonzero__(self):
return self.__bool__()
def add_chart(self, params):
"""
Create Chart instance and add it to the dict
Manually adds job name, priority and update_every to params.
:param params: <list>
:return:
"""
params = [self.job_name()] + params
new_chart = Chart(params)
new_chart.params['update_every'] = self.get_update_every()
new_chart.params['priority'] = self.priority
new_chart.params['module_name'] = self.module_name
self.priority += 1
self.charts[new_chart.id] = new_chart
return new_chart
def active_charts(self):
return [chart.id for chart in self if not chart.flags.obsoleted]
class Chart:
"""Represent a chart"""
def __init__(self, params):
"""
:param params: <list>
"""
if not isinstance(params, list):
raise ItemTypeError("'chart' must be a list type")
if not len(params) >= 8:
raise ItemValueError("invalid value for 'chart', must be {0}".format(CHART_PARAMS))
self.params = dict(zip(CHART_PARAMS, (p or str() for p in params)))
self.name = '{type}.{id}'.format(type=self.params['type'],
id=self.params['id'])
if self.params.get('chart_type') not in CHART_TYPES:
self.params['chart_type'] = 'absolute'
hidden = str(self.params.get('hidden', ''))
self.params['hidden'] = 'hidden' if hidden == 'hidden' else ''
self.dimensions = list()
self.variables = set()
self.flags = ChartFlags()
self.penalty = 0
def __getattr__(self, item):
try:
return self.params[item]
except KeyError:
raise AttributeError("'{instance}' has no attribute '{attr}'".format(instance=repr(self),
attr=item))
def __repr__(self):
return 'Chart({0})'.format(self.id)
def __str__(self):
return self.id
def __iter__(self):
return iter(self.dimensions)
def __contains__(self, item):
return item in [dimension.id for dimension in self.dimensions]
def add_variable(self, variable):
"""
:param variable: <list>
:return:
"""
self.variables.add(ChartVariable(variable))
def add_dimension(self, dimension):
"""
:param dimension: <list>
:return:
"""
dim = Dimension(dimension)
if dim.id in self:
raise DuplicateItemError("'{dimension}' already in '{chart}' dimensions".format(dimension=dim.id,
chart=self.name))
self.refresh()
self.dimensions.append(dim)
return dim
def del_dimension(self, dimension_id, hide=True):
if dimension_id not in self:
return
idx = self.dimensions.index(dimension_id)
dimension = self.dimensions[idx]
if hide:
dimension.params['hidden'] = 'hidden'
dimension.params['obsolete'] = 'obsolete'
self.create()
self.dimensions.remove(dimension)
def hide_dimension(self, dimension_id, reverse=False):
if dimension_id not in self:
return
idx = self.dimensions.index(dimension_id)
dimension = self.dimensions[idx]
dimension.params['hidden'] = 'hidden' if not reverse else str()
self.refresh()
def create(self):
"""
:return:
"""
chart = CHART_CREATE.format(**self.params)
dimensions = ''.join([dimension.create() for dimension in self.dimensions])
variables = ''.join([var.set(var.value) for var in self.variables if var])
self.flags.push = False
self.flags.created = True
safe_print(chart + dimensions + variables)
def can_be_updated(self, data):
for dim in self.dimensions:
if dim.get_value(data) is not None:
return True
return False
def update(self, data, interval):
updated_dimensions, updated_variables = str(), str()
for dim in self.dimensions:
value = dim.get_value(data)
if value is not None:
updated_dimensions += dim.set(value)
for var in self.variables:
value = var.get_value(data)
if value is not None:
updated_variables += var.set(value)
if updated_dimensions:
since_last = interval if self.flags.updated else 0
if self.flags.push:
self.create()
chart_begin = CHART_BEGIN.format(type=self.type, id=self.id, since_last=since_last)
safe_print(chart_begin, updated_dimensions, updated_variables, 'END\n')
self.flags.updated = True
self.penalty = 0
else:
self.penalty += 1
self.flags.updated = False
return bool(updated_dimensions)
def obsolete(self):
self.flags.obsoleted = True
if self.flags.created:
safe_print(CHART_OBSOLETE.format(**self.params))
def refresh(self):
self.penalty = 0
self.flags.push = True
self.flags.obsoleted = False
class Dimension:
"""Represent a dimension"""
def __init__(self, params):
"""
:param params: <list>
"""
if not isinstance(params, list):
raise ItemTypeError("'dimension' must be a list type")
if not params:
raise ItemValueError("invalid value for 'dimension', must be {0}".format(DIMENSION_PARAMS))
self.params = dict(zip(DIMENSION_PARAMS, (p or str() for p in params)))
self.params['name'] = self.params.get('name') or self.params['id']
if self.params.get('algorithm') not in DIMENSION_ALGORITHMS:
self.params['algorithm'] = 'absolute'
if not isinstance(self.params.get('multiplier'), int):
self.params['multiplier'] = 1
if not isinstance(self.params.get('divisor'), int):
self.params['divisor'] = 1
self.params.setdefault('hidden', '')
self.params.setdefault('obsolete', '')
def __getattr__(self, item):
try:
return self.params[item]
except KeyError:
raise AttributeError("'{instance}' has no attribute '{attr}'".format(instance=repr(self),
attr=item))
def __repr__(self):
return 'Dimension({0})'.format(self.id)
def __str__(self):
return self.id
def __eq__(self, other):
if not isinstance(other, Dimension):
return self.id == other
return self.id == other.id
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(repr(self))
def create(self):
return DIMENSION_CREATE.format(**self.params)
def set(self, value):
"""
:param value: <str>: must be a digit
:return:
"""
return DIMENSION_SET.format(id=self.id,
value=value)
def get_value(self, data):
try:
return int(data[self.id])
except (KeyError, TypeError):
return None
class ChartVariable:
"""Represent a chart variable"""
def __init__(self, params):
"""
:param params: <list>
"""
if not isinstance(params, list):
raise ItemTypeError("'variable' must be a list type")
if not params:
raise ItemValueError("invalid value for 'variable' must be: {0}".format(VARIABLE_PARAMS))
self.params = dict(zip(VARIABLE_PARAMS, params))
self.params.setdefault('value', None)
def __getattr__(self, item):
try:
return self.params[item]
except KeyError:
raise AttributeError("'{instance}' has no attribute '{attr}'".format(instance=repr(self),
attr=item))
def __bool__(self):
return self.value is not None
def __nonzero__(self):
return self.__bool__()
def __repr__(self):
return 'ChartVariable({0})'.format(self.id)
def __str__(self):
return self.id
def __eq__(self, other):
if isinstance(other, ChartVariable):
return self.id == other.id
return False
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(repr(self))
def set(self, value):
return CHART_VARIABLE_SET.format(id=self.id,
value=value)
def get_value(self, data):
try:
return int(data[self.id])
except (KeyError, TypeError):
return None
class ChartFlags:
def __init__(self):
self.push = True
self.created = False
self.updated = False
self.obsoleted = False
|
gpl-3.0
| -8,566,432,299,610,067,000
| 30.773956
| 113
| 0.564259
| false
| 4.093701
| false
| false
| false
|
roaet/wafflehaus.neutron
|
tests/test_neutron_context.py
|
1
|
11693
|
# Copyright 2013 Openstack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from wafflehaus.try_context import context_filter
import webob.exc
from tests import test_base
class TestNeutronContext(test_base.TestBase):
def setUp(self):
super(TestNeutronContext, self).setUp()
adv_svc_patch = mock.patch(
"neutron.policy.check_is_advsvc")
self.adv_svc = adv_svc_patch.start()
self.adv_svc.return_value = False
self.app = mock.Mock()
self.app.return_value = "OK"
self.start_response = mock.Mock()
self.neutron_cls = "wafflehaus.neutron.context.%s.%s" % (
"neutron_context", "NeutronContextFilter")
self.strat_neutron = {"context_strategy": self.neutron_cls,
'enabled': 'true'}
self.strat_neutron_a = {"context_strategy": self.neutron_cls,
'enabled': 'true',
'require_auth_info': 'true'}
def test_create_strategy_neutron(self):
result = context_filter.filter_factory(self.strat_neutron)(self.app)
self.assertIsNotNone(result)
self.assertTrue(isinstance(result, context_filter.ContextFilter))
headers = {'Content-Type': 'application/json',
'X_USER_ID': 'derp', }
result.__call__.request('/', method='HEAD', headers=headers)
context = result.strat_instance.context
self.assertTrue(context.is_admin)
def test_create_strategy_neutron_no_user_no_role(self):
result = context_filter.filter_factory(self.strat_neutron)(self.app)
self.assertIsNotNone(result)
self.assertTrue(isinstance(result, context_filter.ContextFilter))
headers = {'Content-Type': 'application/json', }
resp = result.__call__.request('/', method='HEAD', headers=headers)
context = result.strat_instance.context
self.assertTrue(context.is_admin)
self.assertEqual(self.app, resp)
def test_create_strategy_neutron_with_no_roles(self):
result = context_filter.filter_factory(self.strat_neutron)(self.app)
self.assertIsNotNone(result)
self.assertTrue(isinstance(result, context_filter.ContextFilter))
headers = {'Content-Type': 'application/json',
'X_ROLES': None, }
resp = result.__call__.request('/', method='HEAD', headers=headers)
context = result.strat_instance.context
self.assertTrue(context.is_admin)
self.assertEqual(self.app, resp)
def test_create_strategy_neutron_with_empty_roles(self):
result = context_filter.filter_factory(self.strat_neutron)(self.app)
self.assertIsNotNone(result)
self.assertTrue(isinstance(result, context_filter.ContextFilter))
headers = {'Content-Type': 'application/json',
'X_ROLES': '', }
resp = result.__call__.request('/', method='HEAD', headers=headers)
self.assertEqual(self.app, resp)
context = result.strat_instance.context
self.assertTrue(context.is_admin)
self.assertTrue(hasattr(context, 'roles'))
def test_create_strategy_neutron_with_role(self):
result = context_filter.filter_factory(self.strat_neutron)(self.app)
self.assertIsNotNone(result)
self.assertTrue(isinstance(result, context_filter.ContextFilter))
headers = {'Content-Type': 'application/json',
'X_ROLES': 'testrole', }
resp = result.__call__.request('/', method='HEAD', headers=headers)
self.assertEqual(self.app, resp)
context = result.strat_instance.context
self.assertTrue(context.is_admin)
self.assertTrue(hasattr(context, 'roles'))
self.assertTrue('testrole' in context.roles)
def test_create_strategy_neutron_with_roles(self):
result = context_filter.filter_factory(self.strat_neutron)(self.app)
self.assertIsNotNone(result)
self.assertTrue(isinstance(result, context_filter.ContextFilter))
headers = {'Content-Type': 'application/json',
'X_ROLES': 'testrole, testrole2', }
resp = result.__call__.request('/', method='HEAD', headers=headers)
self.assertEqual(self.app, resp)
context = result.strat_instance.context
self.assertTrue(hasattr(context, 'roles'))
self.assertTrue('testrole' in context.roles)
self.assertTrue('testrole2' in context.roles)
self.assertTrue(context.is_admin)
self.assertEqual(2, len(context.roles))
def test_requires_auth_will_fail_without_info(self):
result = context_filter.filter_factory(self.strat_neutron_a)(self.app)
self.assertIsNotNone(result)
headers = {'Content-Type': 'application/json',
'X_ROLES': 'testrole, testrole2', }
resp = result.__call__.request('/', method='HEAD', headers=headers)
self.assertTrue(isinstance(resp, webob.exc.HTTPForbidden))
def test_requires_auth_is_admin(self):
result = context_filter.filter_factory(self.strat_neutron_a)(self.app)
self.assertIsNotNone(result)
headers = {'Content-Type': 'application/json',
'X_TENANT_ID': '123456',
'X_USER_ID': 'foo',
'X_ROLES': 'testrole, testrole2', }
policy_check = self.create_patch('neutron.policy.check_is_admin')
policy_check.return_value = True
resp = result.__call__.request('/', method='HEAD', headers=headers)
self.assertEqual(self.app, resp)
self.assertEqual(1, policy_check.call_count)
context = result.strat_instance.context
self.assertTrue(hasattr(context, 'roles'))
self.assertTrue('testrole' in context.roles)
self.assertTrue('testrole2' in context.roles)
self.assertTrue(context.is_admin)
self.assertEqual(2, len(context.roles))
def test_requires_auth_is_not_admin(self):
result = context_filter.filter_factory(self.strat_neutron_a)(self.app)
self.assertIsNotNone(result)
headers = {'Content-Type': 'application/json',
'X_TENANT_ID': '123456',
'X_USER_ID': 'foo',
'X_ROLES': 'testrole, testrole2', }
policy_check = self.create_patch('neutron.policy.check_is_admin')
policy_check.return_value = False
resp = result.__call__.request('/', method='HEAD', headers=headers)
self.assertEqual(self.app, resp)
self.assertEqual(2, policy_check.call_count)
context = result.strat_instance.context
self.assertTrue(hasattr(context, 'roles'))
self.assertTrue('testrole' in context.roles)
self.assertTrue('testrole2' in context.roles)
self.assertFalse(context.is_admin)
self.assertEqual(2, len(context.roles))
def test_verify_non_duplicate_request_id_non_admin(self):
result = context_filter.filter_factory(self.strat_neutron_a)(self.app)
self.assertIsNotNone(result)
headers = {'Content-Type': 'application/json',
'X_TENANT_ID': '123456',
'X_USER_ID': 'foo',
'X_ROLES': 'testrole, testrole2', }
policy_check = self.create_patch('neutron.policy.check_is_admin')
policy_check.return_value = False
resp = result.__call__.request('/', method='HEAD', headers=headers)
self.assertEqual(self.app, resp)
self.assertEqual(2, policy_check.call_count)
context = result.strat_instance.context
self.assertTrue(hasattr(context, 'roles'))
self.assertTrue('testrole' in context.roles)
self.assertTrue('testrole2' in context.roles)
self.assertFalse(context.is_admin)
self.assertEqual(2, len(context.roles))
# Generate another call in order to force oslo.context to refresh
# the _request_store, which in turn generates a new request_id
resp = result.__call__.request('/', method='HEAD', headers=headers)
context1 = result.strat_instance.context
self.assertNotEqual(context.request_id, context1.request_id)
def test_verify_non_duplicate_request_id_admin(self):
result = context_filter.filter_factory(self.strat_neutron)(self.app)
self.assertIsNotNone(result)
self.assertTrue(isinstance(result, context_filter.ContextFilter))
headers = {'Content-Type': 'application/json', }
resp = result.__call__.request('/', method='HEAD', headers=headers)
context = result.strat_instance.context
self.assertTrue(context.is_admin)
self.assertEqual(self.app, resp)
# Generate another call in order to force oslo.context to refresh
# the _request_store, which in turn generates a new request_id
resp = result.__call__.request('/', method='HEAD', headers=headers)
context1 = result.strat_instance.context
self.assertNotEqual(context.request_id, context1.request_id)
def test_is_not_admin_policy_check_true(self):
result = context_filter.filter_factory(self.strat_neutron_a)(self.app)
self.assertIsNotNone(result)
headers = {'Content-Type': 'application/json',
'X_TENANT_ID': '123456',
'X_USER_ID': 'foo',
'X_ROLES': 'testrole, testrole2', }
policy_check = self.create_patch('neutron.policy.check_is_admin')
# First return value sets is_admin to False, second value sets
# is_admin to True
policy_check.side_effect = [False, True]
resp = result.__call__.request('/', method='HEAD', headers=headers)
self.assertEqual(self.app, resp)
self.assertEqual(2, policy_check.call_count)
context = result.strat_instance.context
self.assertTrue(hasattr(context, 'roles'))
self.assertTrue('testrole' in context.roles)
self.assertTrue('testrole2' in context.roles)
self.assertTrue(context.is_admin)
self.assertEqual(2, len(context.roles))
def test_advsvc_is_false_when_admin_and_not_advsvc_role(self):
result = context_filter.filter_factory(self.strat_neutron)(self.app)
self.assertIsNotNone(result)
self.assertTrue(isinstance(result, context_filter.ContextFilter))
headers = {'Content-Type': 'application/json'}
resp = result.__call__.request('/', method='HEAD', headers=headers)
self.assertEqual(self.app, resp)
context = result.strat_instance.context
self.assertFalse(context.is_advsvc)
def test_advsvc_is_true_when_policy_says_it_is(self):
self.adv_svc.return_value = True
result = context_filter.filter_factory(self.strat_neutron)(self.app)
self.assertIsNotNone(result)
self.assertTrue(isinstance(result, context_filter.ContextFilter))
headers = {'Content-Type': 'application/json'}
resp = result.__call__.request('/', method='HEAD', headers=headers)
self.assertEqual(self.app, resp)
context = result.strat_instance.context
self.assertTrue(context.is_advsvc)
|
apache-2.0
| -5,572,070,798,425,745,000
| 48.130252
| 78
| 0.642863
| false
| 3.934388
| true
| false
| false
|
wgwoods/fedup2
|
setup.py
|
1
|
2636
|
#!/usr/bin/python
from distutils.core import setup, Command
from distutils.util import convert_path
from distutils.command.build_scripts import build_scripts
from distutils import log
import os
from os.path import join, basename
from subprocess import check_call
class Gettext(Command):
description = "Use po/POTFILES.in to generate po/<name>.pot"
user_options = []
def initialize_options(self):
self.encoding = 'UTF-8'
self.po_dir = 'po'
self.add_comments = True
def finalize_options(self):
pass
def _xgettext(self, opts):
name = self.distribution.get_name()
version = self.distribution.get_version()
email = self.distribution.get_author_email()
cmd = ['xgettext', '--default-domain', name, '--package-name', name,
'--package-version', version, '--msgid-bugs-address', email,
'--from-code', self.encoding,
'--output', join(self.po_dir, name + '.pot')]
if self.add_comments:
cmd.append('--add-comments')
check_call(cmd + opts)
def run(self):
self._xgettext(['-f', 'po/POTFILES.in'])
class Msgfmt(Command):
description = "Generate po/*.mo from po/*.po"
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
po_dir = 'po'
for po in os.listdir(po_dir):
po = join(po_dir, po)
if po.endswith('.po'):
mo = po[:-3]+'.mo'
check_call(['msgfmt', '-vv', po, '-o', mo])
class BuildScripts(build_scripts):
def run(self):
build_scripts.run(self)
for script in self.scripts:
script = convert_path(script)
outfile = join(self.build_dir, basename(script))
if os.path.exists(outfile) and outfile.endswith(".py"):
newfile = outfile[:-3] # drop .py
log.info("renaming %s -> %s", outfile, basename(newfile))
os.rename(outfile, newfile)
version='v0'
try:
exec(open("fedup2/version.py").read())
except IOError:
pass
setup(name="fedup2",
version=version,
description="Fedora Upgrade",
long_description="",
author="Will Woods",
author_email="wwoods@redhat.com",
url="https://github.com/wgwoods/fedup2",
download_url="https://github.com/wgwoods/fedup2/downloads",
license="GPLv2+",
packages=["fedup2"],
scripts=["fedup2.py"],
cmdclass={
'gettext': Gettext,
'msgfmt': Msgfmt,
'build_scripts': BuildScripts,
}
)
|
gpl-2.0
| -929,589,590,876,869,800
| 28.954545
| 76
| 0.581942
| false
| 3.691877
| false
| false
| false
|
hopshadoop/hops-util-py
|
hops/experiment_impl/distribute/parameter_server_reservation.py
|
1
|
11016
|
from __future__ import absolute_import
from __future__ import division
from __future__ import nested_scopes
from __future__ import print_function
import logging
import pickle
import select
import socket
import struct
import threading
import time
from hops import util
from hops.experiment_impl.util import experiment_utils
MAX_RETRIES = 3
BUFSIZE = 1024*2
class Reservations:
"""Thread-safe store for node reservations."""
def __init__(self, required):
"""
Args:
required:
"""
self.required = required
self.lock = threading.RLock()
self.reservations = []
self.cluster_spec = {}
self.check_done = False
def add(self, meta):
"""Add a reservation.
Args:
:meta: a dictonary of metadata about a node
"""
with self.lock:
self.reservations.append(meta)
if self.remaining() == 0:
gpus_present = False
for entry in self.reservations:
if entry["gpus_present"] == True:
gpus_present = True
break
cluster_spec = {"chief": [], "ps": [], "worker": []}
if not gpus_present:
added_chief=False
for entry in self.reservations:
if entry["task_type"] == "ps":
cluster_spec["ps"].append(entry["host_port"])
elif added_chief == False and entry["task_type"] == "worker":
cluster_spec["chief"].append(entry["host_port"])
added_chief = True
else:
cluster_spec["worker"].append(entry["host_port"])
else:
added_chief=False
# switch Worker without GPU with PS with GPU
for possible_switch in self.reservations:
if possible_switch["task_type"] == "worker" and possible_switch["gpus_present"] == False:
for candidate in self.reservations:
if candidate["task_type"] == "ps" and candidate["gpus_present"] == True:
candidate["task_type"] = "worker"
possible_switch["task_type"] = "ps"
break
for entry in self.reservations:
if entry["task_type"] == "worker" and entry["gpus_present"] == True and added_chief == False:
added_chief=True
cluster_spec["chief"].append(entry["host_port"])
elif entry["task_type"] == "worker" and entry["gpus_present"] == True:
cluster_spec["worker"].append(entry["host_port"])
elif entry["task_type"] == "ps" and entry["gpus_present"] == False:
cluster_spec["ps"].append(entry["host_port"])
self.cluster_spec = cluster_spec
self.check_done = True
def done(self):
"""Returns True if the ``required`` number of reservations have been fulfilled."""
with self.lock:
return self.check_done
def get(self):
"""Get the list of current reservations."""
with self.lock:
return self.cluster_spec
def remaining(self):
"""Get a count of remaining/unfulfilled reservations."""
with self.lock:
num_registered = len(self.reservations)
return self.required - num_registered
class WorkerFinished:
"""Thread-safe store for node reservations."""
def __init__(self, required):
"""
Args:
:required: expected number of nodes in the cluster.
"""
self.required = required
self.lock = threading.RLock()
self.finished = 0
self.check_done = False
def add(self):
"""Add a reservation.
Args:
:meta: a dictonary of metadata about a node
"""
with self.lock:
self.finished = self.finished + 1
if self.remaining() == 0:
self.check_done = True
def done(self):
"""Returns True if the ``required`` number of reservations have been fulfilled."""
with self.lock:
return self.check_done
def remaining(self):
"""Get a count of remaining/unfulfilled reservations."""
with self.lock:
return self.required - self.finished
class MessageSocket(object):
"""Abstract class w/ length-prefixed socket send/receive functions."""
def receive(self, sock):
"""
Receive a message on ``sock``
Args:
sock:
Returns:
"""
msg = None
data = b''
recv_done = False
recv_len = -1
while not recv_done:
buf = sock.recv(BUFSIZE)
if buf is None or len(buf) == 0:
raise Exception("socket closed")
if recv_len == -1:
recv_len = struct.unpack('>I', buf[:4])[0]
data += buf[4:]
recv_len -= len(data)
else:
data += buf
recv_len -= len(buf)
recv_done = (recv_len == 0)
msg = pickle.loads(data)
return msg
def send(self, sock, msg):
"""
Send ``msg`` to destination ``sock``.
Args:
sock:
msg:
Returns:
"""
data = pickle.dumps(msg)
buf = struct.pack('>I', len(data)) + data
sock.sendall(buf)
class Server(MessageSocket):
"""Simple socket server with length prefixed pickle messages"""
reservations = None
done = False
def __init__(self, count):
"""
Args:
count:
"""
assert count > 0
self.reservations = Reservations(count)
self.worker_finished = WorkerFinished(util.num_executors() - util.num_param_servers())
def await_reservations(self, sc, status={}, timeout=600):
"""
Block until all reservations are received.
Args:
sc:
status:
timeout:
Returns:
"""
timespent = 0
while not self.reservations.done():
logging.info("waiting for {0} reservations".format(self.reservations.remaining()))
# check status flags for any errors
if 'error' in status:
sc.cancelAllJobs()
#sc.stop()
#sys.exit(1)
time.sleep(1)
timespent += 1
if (timespent > timeout):
raise Exception("timed out waiting for reservations to complete")
logging.info("all reservations completed")
return self.reservations.get()
def _handle_message(self, sock, msg):
"""
Args:
sock:
msg:
Returns:
"""
logging.debug("received: {0}".format(msg))
msg_type = msg['type']
if msg_type == 'REG':
self.reservations.add(msg['data'])
MessageSocket.send(self, sock, 'OK')
elif msg_type == 'REG_DONE':
self.worker_finished.add()
MessageSocket.send(self, sock, 'OK')
elif msg_type == 'QUERY':
MessageSocket.send(self, sock, self.reservations.done())
elif msg_type == 'QUERY_DONE':
MessageSocket.send(self, sock, self.worker_finished.done())
elif msg_type == 'QINFO':
rinfo = self.reservations.get()
MessageSocket.send(self, sock, rinfo)
elif msg_type == 'STOP':
logging.info("setting server.done")
MessageSocket.send(self, sock, 'OK')
self.done = True
else:
MessageSocket.send(self, sock, 'ERR')
def start(self):
"""
Start listener in a background thread
Returns:
address of the Server as a tuple of (host, port)
"""
server_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_sock.bind(('', 0))
server_sock.listen(10)
# hostname may not be resolvable but IP address probably will be
host = experiment_utils._get_ip_address()
port = server_sock.getsockname()[1]
addr = (host,port)
def _listen(self, sock):
CONNECTIONS = []
CONNECTIONS.append(sock)
while not self.done:
read_socks, write_socks, err_socks = select.select(CONNECTIONS, [], [], 60)
for sock in read_socks:
if sock == server_sock:
client_sock, client_addr = sock.accept()
CONNECTIONS.append(client_sock)
logging.debug("client connected from {0}".format(client_addr))
else:
try:
msg = self.receive(sock)
self._handle_message(sock, msg)
except Exception as e:
logging.debug(e)
sock.close()
CONNECTIONS.remove(sock)
server_sock.close()
t = threading.Thread(target=_listen, args=(self, server_sock))
t.daemon = True
t.start()
return addr
def stop(self):
"""Stop the Server's socket listener."""
self.done = True
class Client(MessageSocket):
"""Client to register and await node reservations.
Args:
:server_addr: a tuple of (host, port) pointing to the Server.
"""
sock = None #: socket to server TCP connection
server_addr = None #: address of server
def __init__(self, server_addr):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect(server_addr)
self.server_addr = server_addr
logging.info("connected to server at {0}".format(server_addr))
def _request(self, msg_type, msg_data=None):
"""Helper function to wrap msg w/ msg_type."""
msg = {}
msg['type'] = msg_type
if msg_data or ((msg_data == True) or (msg_data == False)):
msg['data'] = msg_data
done = False
tries = 0
while not done and tries < MAX_RETRIES:
try:
MessageSocket.send(self, self.sock, msg)
done = True
except socket.error as e:
tries += 1
if tries >= MAX_RETRIES:
raise
print("Socket error: {}".format(e))
self.sock.close()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect(self.server_addr)
logging.debug("sent: {0}".format(msg))
resp = MessageSocket.receive(self, self.sock)
logging.debug("received: {0}".format(resp))
return resp
def close(self):
"""Close the client socket."""
self.sock.close()
def register(self, reservation):
"""
Register ``reservation`` with server.
Args:
reservation:
Returns:
"""
resp = self._request('REG', reservation)
return resp
def register_worker_finished(self):
"""
Register ``worker as finished`` with server.
Returns:
"""
resp = self._request('REG_DONE')
return resp
def await_all_workers_finished(self):
"""
Poll until all reservations completed, then return cluster_info.
Returns:
"""
done = False
while not done:
done = self._request('QUERY_DONE')
time.sleep(5)
return True
def get_reservations(self):
"""
Get current list of reservations.
Returns:
"""
cluster_info = self._request('QINFO')
return cluster_info
def await_reservations(self):
"""Poll until all reservations completed, then return cluster_info."""
done = False
while not done:
done = self._request('QUERY')
time.sleep(1)
reservations = self.get_reservations()
return reservations
def request_stop(self):
"""Request server stop."""
resp = self._request('STOP')
return resp
|
apache-2.0
| 5,554,518,278,654,147,000
| 25.291169
| 105
| 0.590051
| false
| 3.97546
| false
| false
| false
|
sloria/osf.io
|
osf/migrations/0113_add_view_collectionprovider_to_admin_perm.py
|
1
|
2258
|
from __future__ import unicode_literals
import logging
from django.core.management.sql import emit_post_migrate_signal
from django.db import migrations
from django.db.models import Q
from django.contrib.auth.models import Group
from django.contrib.auth.models import Permission
logger = logging.getLogger(__file__)
def get_new_read_only_permissions():
return Permission.objects.filter(
Q(codename='view_collectionprovider')
)
def get_new_admin_permissions():
return Permission.objects.filter(
Q(codename='change_collectionprovider') |
Q(codename='delete_collectionprovider')
)
def add_group_permissions(*args):
# this is to make sure that the permissions created in an earlier migration exist!
emit_post_migrate_signal(2, False, 'default')
# Add permissions for the read only group
read_only_group = Group.objects.get(name='read_only')
[read_only_group.permissions.add(perm) for perm in get_new_read_only_permissions()]
read_only_group.save()
logger.info('Collection Provider permissions added to read only group')
# Add permissions for new OSF Admin group - can perform actions
admin_group = Group.objects.get(name='osf_admin')
[admin_group.permissions.add(perm) for perm in get_new_read_only_permissions()]
[admin_group.permissions.add(perm) for perm in get_new_admin_permissions()]
admin_group.save()
logger.info('Administrator permissions for Collection Providers added to admin group')
def remove_group_permissions(*args):
# remove the read only group
read_only_group = Group.objects.get(name='read_only')
[read_only_group.permissions.remove(perm) for perm in get_new_read_only_permissions()]
read_only_group.save()
# remove the osf admin group
admin_group = Group.objects.get(name='osf_admin')
[admin_group.permissions.remove(perm) for perm in get_new_read_only_permissions()]
[admin_group.permissions.remove(perm) for perm in get_new_admin_permissions()]
admin_group.save()
class Migration(migrations.Migration):
dependencies = [
('osf', '0112_alter_collectionprovider_permissions'),
]
operations = [
migrations.RunPython(add_group_permissions, remove_group_permissions),
]
|
apache-2.0
| 860,804,156,702,227,200
| 33.738462
| 90
| 0.723206
| false
| 3.866438
| false
| false
| false
|
rlbabyuk/integration_tests
|
scripts/cleanup_edomain_templates.py
|
1
|
12391
|
#!/usr/bin/env python2
"""This script takes an provider and edomain as optional parameters, and
searches for old templates on specified provider's export domain and deletes
them. In case of no --provider parameter specified then this script
traverse all the rhevm providers in cfme_data.
"""
import argparse
import datetime
import pytz
from threading import Lock, Thread
from utils import net
from utils.conf import cfme_data, credentials
from utils.log import logger
from utils.providers import get_mgmt
from utils.ssh import SSHClient
from utils.wait import wait_for
lock = Lock()
def make_ssh_client(provider_mgmt):
creds = credentials[provider_mgmt.kwargs.get('ssh_creds', None)]
connect_kwargs = {
'username': creds['username'],
'password': creds['password'],
'hostname': provider_mgmt.kwargs.get('ipaddress')
}
return SSHClient(**connect_kwargs)
def parse_cmd_line():
parser = argparse.ArgumentParser(argument_default=None)
parser.add_argument("--edomain", dest="edomain",
help="Export domain for the remplate", default=None)
parser.add_argument("--provider", dest="provider",
help="Rhevm provider (to look for in cfme_data)",
default=None)
parser.add_argument("--days-old", dest="days_old",
help="number of days_old templates to be deleted"
"e.g. --day-old 4 deletes templates created before 4 days",
default=3)
parser.add_argument("--max-templates", dest="max_templates",
help="max number of templates to be deleted at a time"
"e.g. --max-templates 6 deletes 6 templates at a time",
default=5)
args = parser.parse_args()
return args
def is_ovirt_engine_running(provider_mgmt):
try:
with make_ssh_client(provider_mgmt) as ssh_client:
stdout = ssh_client.run_command('systemctl status ovirt-engine')[1]
# fallback to sysV commands if necessary
if 'command not found' in stdout:
stdout = ssh_client.run_command('service ovirt-engine status')[1]
return 'running' in stdout
except Exception as e:
logger.exception(e)
return False
def change_edomain_state(provider_mgmt, state, edomain):
try:
# fetch name for logging
provider_name = provider_mgmt.kwargs.get('name', None)
log_args = (provider_name, edomain, state)
api = provider_mgmt.api
dcs = api.datacenters.list()
for dc in dcs:
export_domain = dc.storagedomains.get(edomain)
if export_domain:
if state == 'maintenance' and export_domain.get_status().state == 'active':
dc.storagedomains.get(edomain).deactivate()
elif state == 'active' and export_domain.get_status().state != 'active':
dc.storagedomains.get(edomain).activate()
wait_for(is_edomain_in_state, [api, state, edomain], fail_condition=False, delay=5)
print('RHEVM:{}, domain {} set to "{}" state'.format(*log_args))
return True
return False
except Exception as e:
print(e)
print('RHEVM:{} Exception setting domain {} to "{}" state'.format(*log_args))
return False
def is_edomain_in_state(api, state, edomain):
dcs = api.datacenters.list()
for dc in dcs:
export_domain = dc.storagedomains.get(edomain)
if export_domain:
return export_domain.get_status().state == state
return False
# get the domain edomain path on the rhevm
def get_edomain_path(api, edomain):
edomain_id = api.storagedomains.get(edomain).get_id()
edomain_conn = api.storagedomains.get(edomain).storageconnections.list()[0]
return ('{}/{}'.format(edomain_conn.get_path(), edomain_id),
edomain_conn.get_address())
def cleanup_empty_dir_on_edomain(provider_mgmt, edomain):
"""Cleanup all the empty directories on the edomain/edomain_id/master/vms
else api calls will result in 400 Error with ovf not found,
Args:
provider_mgmt: provider object under execution
edomain: domain on which to operate
"""
try:
# We'll use this for logging
provider_name = provider_mgmt.kwargs.get('name', None)
# get path first
path, edomain_ip = get_edomain_path(provider_mgmt.api, edomain)
edomain_path = '{}:{}'.format(edomain_ip, path)
command = 'mkdir -p ~/tmp_filemount &&'
command += 'mount -O tcp {} ~/tmp_filemount &&'.format(edomain_path)
command += 'find ~/tmp_filemount/master/vms/ -maxdepth 1 -type d -empty -delete &&'
command += 'cd ~ && umount ~/tmp_filemount &&'
command += 'find . -maxdepth 1 -name tmp_filemount -type d -empty -delete'
print('RHEVM:{} Deleting empty directories on edomain/vms file path {}'
.format(provider_name, path))
with make_ssh_client(provider_mgmt) as ssh_client:
exit_status, output = ssh_client.run_command(command)
if exit_status != 0:
print('RHEVM:{} Error deleting empty directories on path {}'
.format(provider_name, path))
print(output)
print('RHEVM:{} successfully deleted empty directories on path {}'
.format(provider_name, path))
except Exception as e:
print(e)
return False
def is_edomain_template_deleted(api, name, edomain):
"""Checks for the templates delete status on edomain.
Args:
api: API for RHEVM.
name: template_name
edomain: Export domain of selected RHEVM provider.
"""
return not api.storagedomains.get(edomain).templates.get(name)
def delete_edomain_templates(api, template, edomain):
"""deletes the template on edomain.
Args:
api: API for RHEVM.
name: template_name
edomain: Export domain of selected RHEVM provider.
"""
with lock:
creation_time = template.get_creation_time().strftime("%d %B-%Y")
name = template.get_name()
print('Deleting {} created on {} ...'.format(name, creation_time))
try:
template.delete()
print('waiting for {} to be deleted..'.format(name))
wait_for(is_edomain_template_deleted, [api, name, edomain], fail_condition=False, delay=5)
print('RHEVM: successfully deleted template {} on domain {}'.format(name, edomain))
except Exception as e:
with lock:
print('RHEVM: Exception deleting template {} on domain {}'.format(name, edomain))
logger.exception(e)
def cleanup_templates(api, edomain, days, max_templates):
try:
templates = api.storagedomains.get(edomain).templates.list()
thread_queue = []
delete_templates = []
for template in templates:
delta = datetime.timedelta(days=days)
now = datetime.datetime.now(pytz.utc)
template_creation_time = template.get_creation_time().astimezone(pytz.utc)
if template.get_name().startswith('auto-tmp'):
if now > (template_creation_time + delta):
delete_templates.append(template)
if not delete_templates:
print("RHEVM: No old templates to delete in {}".format(edomain))
for delete_template in delete_templates[:max_templates]:
thread = Thread(target=delete_edomain_templates,
args=(api, delete_template, edomain))
thread.daemon = True
thread_queue.append(thread)
thread.start()
for thread in thread_queue:
thread.join()
except Exception as e:
logger.exception(e)
return False
def api_params_resolution(item_list, item_name, item_param):
"""Picks and prints info about parameter obtained by api call.
Args:
item_list: List of possible candidates to pick from.
item_name: Name of parameter obtained by api call.
item_param: Name of parameter representing data in the script.
"""
if len(item_list) == 0:
print("RHEVM: Cannot find {} ({}) automatically.".format(item_name, item_param))
print("Please specify it by cmd-line parameter '--{}' or in cfme_data.".format(item_param))
return None
elif len(item_list) > 1:
print("RHEVM: Found multiple of {}. Picking first, '{}'.".format(item_name, item_list[0]))
else:
print("RHEVM: Found {}: '{}'.".format(item_name, item_list[0]))
return item_list[0]
def get_edomain(api):
"""Discovers suitable export domain automatically.
Args:
api: API to RHEVM instance.
"""
edomain_names = []
for domain in api.storagedomains.list(status=None):
if domain.get_type() == 'export':
edomain_names.append(domain.get_name())
return api_params_resolution(edomain_names, 'export domain', 'edomain')
def make_kwargs(args, cfme_data, **kwargs):
"""Assembles all the parameters in case of running as a standalone script.
Makes sure, that the parameters given by command-line arguments
have higher priority.Makes sure, that all the needed parameters
have proper values.
Args:
args: Arguments given from command-line.
cfme_data: Data in cfme_data.yaml
kwargs: Kwargs generated from
cfme_data['template_upload']['template_upload_rhevm']
"""
args_kwargs = dict(args._get_kwargs())
if not kwargs:
return args_kwargs
template_name = kwargs.get('template_name', None)
if template_name is None:
template_name = cfme_data['basic_info']['appliance_template']
kwargs.update(template_name=template_name)
for kkey, kval in kwargs.items():
for akey, aval in args_kwargs.items():
if aval and kkey == akey and kval != aval:
kwargs[akey] = aval
for akey, aval in args_kwargs.items():
if akey not in kwargs.keys():
kwargs[akey] = aval
return kwargs
def run(**kwargs):
"""Calls the functions needed to cleanup templates on RHEVM providers.
This is called either by template_upload_all script, or by main
function.
Args:
**kwargs: Kwargs generated from cfme_data['template_upload']['template_upload_rhevm'].
"""
providers = cfme_data['management_systems']
for provider in [prov for prov in providers if providers[prov]['type'] == 'rhevm']:
# If a provider was passed, only cleanup on it, otherwise all rhevm providers
cli_provider = kwargs.get('provider', None)
if cli_provider and cli_provider != provider:
continue
provider_mgmt = get_mgmt(provider)
if not net.is_pingable(provider_mgmt.kwargs.get('ipaddress', None)):
continue
elif not is_ovirt_engine_running(provider_mgmt):
print('ovirt-engine service not running..')
continue
try:
print('connecting to provider, to establish api handler')
edomain = kwargs.get('edomain', None)
if not edomain:
edomain = provider_mgmt.kwargs['template_upload']['edomain']
except Exception as e:
logger.exception(e)
continue
try:
print("\n--------Start of {}--------".format(provider))
cleanup_templates(provider_mgmt.api,
edomain,
kwargs.get('days_old'),
kwargs.get('max_templates'))
finally:
change_edomain_state(provider_mgmt,
'maintenance',
edomain)
cleanup_empty_dir_on_edomain(provider_mgmt, edomain)
change_edomain_state(provider_mgmt,
'active',
edomain)
print("--------End of {}--------\n".format(provider))
print("Provider Execution completed")
if __name__ == "__main__":
args = parse_cmd_line()
kwargs = cfme_data['template_upload']['template_upload_rhevm']
final_kwargs = make_kwargs(args, cfme_data, **kwargs)
run(**final_kwargs)
|
gpl-2.0
| -5,913,989,060,310,183,000
| 35.444118
| 99
| 0.60665
| false
| 4.04406
| false
| false
| false
|
katakumpo/niceredis
|
tests/test_lock.py
|
1
|
5521
|
from __future__ import with_statement
import time
import pytest
from redis.exceptions import LockError, ResponseError
from redis.lock import Lock, LuaLock
class TestLock(object):
lock_class = Lock
def get_lock(self, redis, *args, **kwargs):
kwargs['lock_class'] = self.lock_class
return redis.lock(*args, **kwargs)
def test_lock(self, sr):
lock = self.get_lock(sr, 'foo')
assert lock.acquire(blocking=False)
assert sr.get('foo') == lock.local.token
assert sr.ttl('foo') == -1
lock.release()
assert sr.get('foo') is None
def test_competing_locks(self, sr):
lock1 = self.get_lock(sr, 'foo')
lock2 = self.get_lock(sr, 'foo')
assert lock1.acquire(blocking=False)
assert not lock2.acquire(blocking=False)
lock1.release()
assert lock2.acquire(blocking=False)
assert not lock1.acquire(blocking=False)
lock2.release()
def test_timeout(self, sr):
lock = self.get_lock(sr, 'foo', timeout=10)
assert lock.acquire(blocking=False)
assert 8 < sr.ttl('foo') <= 10
lock.release()
def test_float_timeout(self, sr):
lock = self.get_lock(sr, 'foo', timeout=9.5)
assert lock.acquire(blocking=False)
assert 8 < sr.pttl('foo') <= 9500
lock.release()
def test_blocking_timeout(self, sr):
lock1 = self.get_lock(sr, 'foo')
assert lock1.acquire(blocking=False)
lock2 = self.get_lock(sr, 'foo', blocking_timeout=0.2)
start = time.time()
assert not lock2.acquire()
assert (time.time() - start) > 0.2
lock1.release()
def test_context_manager(self, sr):
# blocking_timeout prevents a deadlock if the lock can't be acquired
# for some reason
with self.get_lock(sr, 'foo', blocking_timeout=0.2) as lock:
assert sr.get('foo') == lock.local.token
assert sr.get('foo') is None
def test_high_sleep_raises_error(self, sr):
"If sleep is higher than timeout, it should raise an error"
with pytest.raises(LockError):
self.get_lock(sr, 'foo', timeout=1, sleep=2)
def test_releasing_unlocked_lock_raises_error(self, sr):
lock = self.get_lock(sr, 'foo')
with pytest.raises(LockError):
lock.release()
def test_releasing_lock_no_longer_owned_raises_error(self, sr):
lock = self.get_lock(sr, 'foo')
lock.acquire(blocking=False)
# manually change the token
sr.set('foo', 'a')
with pytest.raises(LockError):
lock.release()
# even though we errored, the token is still cleared
assert lock.local.token is None
def test_extend_lock(self, sr):
lock = self.get_lock(sr, 'foo', timeout=10)
assert lock.acquire(blocking=False)
assert 8000 < sr.pttl('foo') <= 10000
assert lock.extend(10)
assert 16000 < sr.pttl('foo') <= 20000
lock.release()
def test_extend_lock_float(self, sr):
lock = self.get_lock(sr, 'foo', timeout=10.0)
assert lock.acquire(blocking=False)
assert 8000 < sr.pttl('foo') <= 10000
assert lock.extend(10.0)
assert 16000 < sr.pttl('foo') <= 20000
lock.release()
def test_extending_unlocked_lock_raises_error(self, sr):
lock = self.get_lock(sr, 'foo', timeout=10)
with pytest.raises(LockError):
lock.extend(10)
def test_extending_lock_with_no_timeout_raises_error(self, sr):
lock = self.get_lock(sr, 'foo')
assert lock.acquire(blocking=False)
with pytest.raises(LockError):
lock.extend(10)
lock.release()
def test_extending_lock_no_longer_owned_raises_error(self, sr):
lock = self.get_lock(sr, 'foo')
assert lock.acquire(blocking=False)
sr.set('foo', 'a')
with pytest.raises(LockError):
lock.extend(10)
class TestLuaLock(TestLock):
lock_class = LuaLock
class TestLockClassSelection(object):
def test_lock_class_argument(self, sr):
lock = sr.lock('foo', lock_class=Lock)
assert type(lock) == Lock
lock = sr.lock('foo', lock_class=LuaLock)
assert type(lock) == LuaLock
def test_cached_lualock_flag(self, sr):
try:
sr._use_lua_lock = True
lock = sr.lock('foo')
assert type(lock) == LuaLock
finally:
sr._use_lua_lock = None
def test_cached_lock_flag(self, sr):
try:
sr._use_lua_lock = False
lock = sr.lock('foo')
assert type(lock) == Lock
finally:
sr._use_lua_lock = None
def test_lua_compatible_server(self, sr, monkeypatch):
@classmethod
def mock_register(cls, redis):
return
monkeypatch.setattr(LuaLock, 'register_scripts', mock_register)
try:
lock = sr.lock('foo')
assert type(lock) == LuaLock
assert sr._use_lua_lock is True
finally:
sr._use_lua_lock = None
def test_lua_unavailable(self, sr, monkeypatch):
@classmethod
def mock_register(cls, redis):
raise ResponseError()
monkeypatch.setattr(LuaLock, 'register_scripts', mock_register)
try:
lock = sr.lock('foo')
assert type(lock) == Lock
assert sr._use_lua_lock is False
finally:
sr._use_lua_lock = None
|
mit
| -6,382,778,191,388,930,000
| 31.863095
| 76
| 0.587031
| false
| 3.580415
| true
| false
| false
|
apipanda/openssl
|
app/helpers/marshmallow/convert.py
|
1
|
10248
|
# -*- coding: utf-8 -*-
import functools
import inspect
import uuid
import marshmallow as ma
import sqlalchemy as sa
from marshmallow import fields, validate
from sqlalchemy.dialects import mssql, mysql, postgresql
from .exceptions import ModelConversionError
from .fields import Related
def _is_field(value):
return (
isinstance(value, type) and
issubclass(value, fields.Field)
)
def _has_default(column):
return (
column.default is not None or
column.server_default is not None or
_is_auto_increment(column)
)
def _is_auto_increment(column):
return (
column.table is not None and
column is column.table._autoincrement_column
)
def _postgres_array_factory(converter, data_type):
return functools.partial(
fields.List,
converter._get_field_class_for_data_type(data_type.item_type),
)
def _should_exclude_field(column, fields=None, exclude=None):
if fields and column.key not in fields:
return True
if exclude and column.key in exclude:
return True
return False
class ModelConverter(object):
"""Class that converts a SQLAlchemy model into a dictionary of corresponding
marshmallow `Fields <marshmallow.fields.Field>`.
"""
SQLA_TYPE_MAPPING = {
sa.Enum: fields.Field,
postgresql.BIT: fields.Integer,
postgresql.UUID: fields.UUID,
postgresql.MACADDR: fields.String,
postgresql.INET: fields.String,
postgresql.JSON: fields.Raw,
postgresql.JSONB: fields.Raw,
postgresql.HSTORE: fields.Raw,
postgresql.ARRAY: _postgres_array_factory,
mysql.BIT: fields.Integer,
mysql.YEAR: fields.Integer,
mysql.SET: fields.List,
mysql.ENUM: fields.Field,
mssql.BIT: fields.Integer,
}
if hasattr(sa, 'JSON'):
SQLA_TYPE_MAPPING[sa.JSON] = fields.Raw
DIRECTION_MAPPING = {
'MANYTOONE': False,
'MANYTOMANY': True,
'ONETOMANY': True,
}
def __init__(self, schema_cls=None):
self.schema_cls = schema_cls
@property
def type_mapping(self):
if self.schema_cls:
return self.schema_cls.TYPE_MAPPING
else:
return ma.Schema.TYPE_MAPPING
def fields_for_model(self, model, include_fk=False, fields=None, exclude=None, base_fields=None,
dict_cls=dict):
result = dict_cls()
base_fields = base_fields or {}
for prop in model.__mapper__.iterate_properties:
if _should_exclude_field(prop, fields=fields, exclude=exclude):
continue
if hasattr(prop, 'columns'):
if not include_fk:
# Only skip a column if there is no overridden column
# which does not have a Foreign Key.
for column in prop.columns:
if not column.foreign_keys:
break
else:
continue
field = base_fields.get(prop.key) or self.property2field(prop)
if field:
result[prop.key] = field
return result
def fields_for_table(self, table, include_fk=False, fields=None, exclude=None, base_fields=None,
dict_cls=dict):
result = dict_cls()
base_fields = base_fields or {}
for column in table.columns:
if _should_exclude_field(column, fields=fields, exclude=exclude):
continue
if not include_fk and column.foreign_keys:
continue
field = base_fields.get(column.key) or self.column2field(column)
if field:
result[column.key] = field
return result
def property2field(self, prop, instance=True, field_class=None, **kwargs):
field_class = field_class or self._get_field_class_for_property(prop)
if not instance:
return field_class
field_kwargs = self._get_field_kwargs_for_property(prop)
field_kwargs.update(kwargs)
ret = field_class(**field_kwargs)
if (
hasattr(prop, 'direction') and
self.DIRECTION_MAPPING[prop.direction.name] and
prop.uselist is True
):
ret = fields.List(ret, **kwargs)
return ret
def column2field(self, column, instance=True, **kwargs):
field_class = self._get_field_class_for_column(column)
if not instance:
return field_class
field_kwargs = self.get_base_kwargs()
self._add_column_kwargs(field_kwargs, column)
field_kwargs.update(kwargs)
return field_class(**field_kwargs)
def field_for(self, model, property_name, **kwargs):
prop = model.__mapper__.get_property(property_name)
return self.property2field(prop, **kwargs)
def _get_field_class_for_column(self, column):
return self._get_field_class_for_data_type(column.type)
def _get_field_class_for_data_type(self, data_type):
field_cls = None
types = inspect.getmro(type(data_type))
# First search for a field class from self.SQLA_TYPE_MAPPING
for col_type in types:
if col_type in self.SQLA_TYPE_MAPPING:
field_cls = self.SQLA_TYPE_MAPPING[col_type]
if callable(field_cls) and not _is_field(field_cls):
field_cls = field_cls(self, data_type)
break
else:
# Try to find a field class based on the column's python_type
try:
python_type = data_type.python_type
except NotImplementedError:
python_type = None
if python_type in self.type_mapping:
field_cls = self.type_mapping[python_type]
else:
if hasattr(data_type, 'impl'):
return self._get_field_class_for_data_type(data_type.impl)
raise ModelConversionError(
'Could not find field column of type {0}.'.format(types[0]))
return field_cls
def _get_field_class_for_property(self, prop):
if hasattr(prop, 'direction'):
field_cls = Related
else:
column = prop.columns[0]
field_cls = self._get_field_class_for_column(column)
return field_cls
def _get_field_kwargs_for_property(self, prop):
kwargs = self.get_base_kwargs()
if hasattr(prop, 'columns'):
column = prop.columns[0]
self._add_column_kwargs(kwargs, column)
if hasattr(prop, 'direction'): # Relationship property
self._add_relationship_kwargs(kwargs, prop)
if getattr(prop, 'doc', None): # Useful for documentation generation
kwargs['description'] = prop.doc
return kwargs
def _add_column_kwargs(self, kwargs, column):
"""Add keyword arguments to kwargs (in-place) based on the passed in
`Column <sqlalchemy.schema.Column>`.
"""
if column.nullable:
kwargs['allow_none'] = True
kwargs['required'] = not column.nullable and not _has_default(column)
if hasattr(column.type, 'enums'):
kwargs['validate'].append(
validate.OneOf(choices=column.type.enums))
# Add a length validator if a max length is set on the column
# Skip UUID columns
if hasattr(column.type, 'length'):
try:
python_type = column.type.python_type
except (AttributeError, NotImplementedError):
python_type = None
if not python_type or not issubclass(python_type, uuid.UUID):
kwargs['validate'].append(
validate.Length(max=column.type.length))
if hasattr(column.type, 'scale'):
kwargs['places'] = getattr(column.type, 'scale', None)
def _add_relationship_kwargs(self, kwargs, prop):
"""Add keyword arguments to kwargs (in-place) based on the passed in
relationship `Property`.
"""
nullable = True
for pair in prop.local_remote_pairs:
if not pair[0].nullable:
if prop.uselist is True:
nullable = False
break
kwargs.update({
'allow_none': nullable,
'required': not nullable,
})
def get_base_kwargs(self):
return {
'validate': []
}
default_converter = ModelConverter()
fields_for_model = default_converter.fields_for_model
"""Generate a dict of field_name: `marshmallow.fields.Field` pairs for the
given model.
:param model: The SQLAlchemy model
:param bool include_fk: Whether to include foreign key fields in the output.
:return: dict of field_name: Field instance pairs
"""
property2field = default_converter.property2field
"""Convert a SQLAlchemy `Property` to a field instance or class.
:param Property prop: SQLAlchemy Property.
:param bool instance: If `True`, return `Field` instance, computing relevant kwargs
from the given property. If `False`, return the `Field` class.
:param kwargs: Additional keyword arguments to pass to the field constructor.
:return: A `marshmallow.fields.Field` class or instance.
"""
column2field = default_converter.column2field
"""Convert a SQLAlchemy `Column <sqlalchemy.schema.Column>` to a field instance or class.
:param sqlalchemy.schema.Column column: SQLAlchemy Column.
:param bool instance: If `True`, return `Field` instance, computing relevant kwargs
from the given property. If `False`, return the `Field` class.
:return: A `marshmallow.fields.Field` class or instance.
"""
field_for = default_converter.field_for
"""Convert a property for a mapped SQLAlchemy class to a marshmallow `Field`.
Example: ::
date_created = field_for(Author, 'date_created', dump_only=True)
author = field_for(Book, 'author')
:param type model: A SQLAlchemy mapped class.
:param str property_name: The name of the property to convert.
:param kwargs: Extra keyword arguments to pass to `property2field`
:return: A `marshmallow.fields.Field` class or instance.
"""
|
mit
| -2,935,554,774,744,383,000
| 33.857143
| 100
| 0.613388
| false
| 4.104125
| false
| false
| false
|
kkanellis/uthportal-server
|
uthportal/tasks/base.py
|
1
|
11926
|
import sys
from abc import ABCMeta, abstractmethod
from datetime import datetime
import feedparser
import requests
from requests.exceptions import ConnectionError, Timeout
from uthportal.database.mongo import MongoDatabaseManager
from uthportal.logger import get_logger
from uthportal.util import truncate_str
class BaseTask(object):
__metaclass__ = ABCMeta
def __init__(self, path, settings, database_manager, pushd_client, **kwargs):
self.settings = settings
self.path = path
self.id = path.split('.')[-1]
self.logger = get_logger(self.id, self.settings)
self.timeout = self.settings['network']['timeout']
self.database_manager = database_manager
self.pushd_client = pushd_client
self.db_collection = '.'.join( path.split('.')[:-1] )
self.db_query = { }
for (key, value) in self.db_query_format.iteritems():
if not hasattr(self, value):
self.logger.error('Missing "%s" field defined in db_query_format' % value)
sys.exit(1)
self.db_query[key] = getattr(self, value)
# Load and update database document (if necessary)
self.document = self.load()
if not self.document:
if hasattr(self, 'document_prototype'):
self.logger.info('No document found in database. Using prototype')
self.document = self.document_prototype
self.save()
else:
self.logger.error('No document_prototype is available!')
return
def __call__(self):
"""This is the method called from the Scheduler when this object is
next in queue (and about to be executed) """
if not hasattr(self, 'document') or not self.document:
self.logger.error('Task has no document attribute or document is empty. Task stalled!')
else:
self.load()
self.update()
def fetch(self, link, session=None, *args, **kwargs):
"""
Fetch a remote document to be parsed later.
This function is called as is from subclasses
"""
if not session:
session = requests.Session()
self.logger.debug('Fetching "%s" ...' % link)
try:
page = session.get(link, *args, timeout=self.timeout, **kwargs)
except ConnectionError:
self.logger.warning('%s: Connection error' % link)
return None
except Timeout:
self.logger.warning('%s: Timeout [%d]' % (link, self.timeout))
return None
if page.status_code is not (200 or 301):
self.logger.warning('%s: Returned [%d]' % (link, page.status_code))
return None
self.logger.debug('Fetched successfully! [%d]' % page.status_code)
# Change page encoding to utf-8 so no special handling for encoding is needed
page.encoding = 'utf-8'
return page.text
@abstractmethod
def update(self, *args, **kwargs):
"""This function is called from __call__. Takes as a key-word argument (kwargs) a dictionary called
new_fields where new data are stored after fecthing procedures. These are compared with the
current data (stored in self.document)"""
# Check if 'new_fields' arg is present
if 'new_fields' in kwargs:
new_fields = kwargs['new_fields']
else:
self.logger.warning('Update method called without "new_fields" dict')
return
# Check if 'new_fields' has the neccessary fields
for field in self.update_fields:
if field not in new_fields:
self.logger.error('Field "%s" not present in "new_fields" dict. Stalling task!' % field)
return
# Get self.document's update_fields
old_fields = { field: self._get_document_field(self.document, field)
for field in self.update_fields }
# Check if new data is available
(data_differ, should_notify) = self.process_new_data(new_fields, old_fields)
now = datetime.now()
if data_differ:
self.logger.debug('Archiving old document...')
self.archive()
# Update new fields
self.logger.debug('Updating new fields...')
for field in self.update_fields:
self._set_document_field(self.document, field, new_fields[field])
# Update remaining fields
self._set_document_field(self.document, 'first_updated', now)
self._set_document_field(self.document, 'last_updated', now)
self.logger.debug('Transmitting new document...')
self.transmit()
if should_notify:
self.notify()
else:
self.logger.debug('No new entries found')
self._set_document_field(self.document, 'last_updated', now)
self.save()
self.logger.debug('Task updated successfully!')
self.post_process()
def process_new_data(self, new_fields, old_fields):
"""
Returns tuple (data_differ[bool], should_notify[bool])
data_differ: True if we have differences between new and old data
should_notify: True if we have to send push notification to the client
"""
data_differ = should_notify = False
# Checking for differences in the according update_fields
for field in self.update_fields:
(old_data, new_data) = (old_fields[field], new_fields[field])
if old_data:
if new_data:
if type(old_data) == type(new_data):
if isinstance(new_data, list):
last_updated = self._get_document_field(self.document, 'last_updated')
# Check if new list entries are present in new_data since last update
new_entries = [ entry for entry in new_data if entry not in old_data ]
if new_entries:
differ = True
notify = False
for entry in new_entries:
assert ('date' in entry and 'has_time' in entry)
# Check if entry was published after last update date
# NOTE: Avoid comparing time because of small time
# changes which may occur in production and
# not properly notify the clients
if entry['date'].date() >= last_updated.date():
notify = True
break
else:
differ = notify = False
else:
differ = notify = True if old_data != new_data else False
else:
self.logger.warning(
'Different type (%s - %s) for the same update field [%s]'
% (type(old_data), type(new_data), field)
)
differ = notify = True
else:
# We shouldn't notify the user because it may be server error:
# e.g problematic parser or invalid link
differ = True
notify = False
else:
# Data differ only if new_data exist
differ = True if new_data else False
# We notify the user if and only if:
# a) data differ and
# b) task is NOT run for the first time
notify = True if differ and 'first_updated' in self.document else False
if differ:
self.logger.info(
truncate_str( 'New entries in field "%s"' % field, 150 )
)
data_differ = data_differ or differ
should_notify = should_notify or notify
return (data_differ, should_notify)
def notify(self):
self.logger.debug('Notifing clients...')
event_name = self.path
data = {
'event': event_name
}
var = { }
if hasattr(self, 'notify_fields'):
var = {
field: self._get_document_field(self.document, field)
for field in self.notify_fields
}
if not all(var.values()):
self.logger.warning('notify: some var values are None')
success = False
try:
success = self.pushd_client.events[event_name].send(data, var=var)
except KeyError:
self.logger.error('No valid event template exists. Notification NOT sent!')
except ValueError:
self.logger.error('Event name is empty/None. Notification NOT sent!')
if success:
self.logger.info('Notification send!')
else:
self.logger.error('Notification NOT send! Check notifier logs')
def post_process(self):
pass
""" Database related method """
def save(self, *args, **kwargs):
"""Save result dictionary in database"""
if not self.database_manager.update_document(
self.db_collection,
self.db_query,
self.document.copy(),
upsert=True,
*args,
**kwargs):
self.logger.warning('Could not save document "%s"' % self.path)
def archive(self, *args, **kwargs):
""" Save the current document into the history collection """
if not self.database_manager.insert_document(
'history.%s' % self.db_collection,
self.document.copy(),
*args,
**kwargs):
self.logger.warning('Could not archive document "%s"' % self.path)
def transmit(self, *args, **kwargs):
""" Save the current document into the server collection free of uneccessary fields """
#TODO: Implement ignore_fields
if not self.database_manager.update_document(
'server.%s' % self.db_collection,
self.db_query,
self.document,
*args, upsert=True, **kwargs):
self.logger.warning('Could not transmit document "%s"' %self.path)
pass
def load(self, *args, **kwargs):
"""Load old dictionary from database"""
document = self.database_manager.find_document(
self.db_collection,
self.db_query,
*args,
**kwargs)
if document and '_id' in document:
del document['_id']
return document
""" Helper methods """
def _set_document_field(self, document, field, value):
""" Sets the field (dot notation format) in the provided document """
keys = field.split('.')
for key in keys[:-1]:
if key not in document:
self.logger.warning('Key "%s" not found in field "%s"' % (key, field))
return
document = document[key]
# Set the field
document[keys[-1]] = value
def _get_document_field(self, document, field):
""" Gets the field (dot notation format) in the provided document """
keys = field.split('.')
for key in keys[:-1]:
if key not in document:
self.logger.warning('Key "%s" not found in field "%s"' % (key, field))
return
document = document[key]
if keys[-1] in document:
return document[keys[-1]]
else:
return None
|
gpl-3.0
| -1,328,568,271,007,929,600
| 35.470948
| 107
| 0.536643
| false
| 4.686051
| false
| false
| false
|
crccheck/project_runpy
|
project_runpy/heidi.py
|
1
|
4119
|
"""
Heidi: Helpers related to visuals.
"""
import logging
__all__ = ['ColorizingStreamHandler', 'ReadableSqlFilter']
# Copyright (C) 2010-2012 Vinay Sajip. All rights reserved. Licensed under the new BSD license.
# https://gist.github.com/758430
class ColorizingStreamHandler(logging.StreamHandler):
# color names to indices
color_map = {
'black': 0,
'red': 1,
'green': 2,
'yellow': 3,
'blue': 4,
'magenta': 5,
'cyan': 6,
'white': 7,
}
# levels to (background, foreground, bold/intense)
level_map = {
logging.DEBUG: (None, 'blue', False),
logging.INFO: (None, 'white', False),
logging.WARNING: (None, 'yellow', False),
logging.ERROR: (None, 'red', False),
logging.CRITICAL: ('red', 'white', True),
}
csi = '\x1b['
reset = '\x1b[0m'
@property
def is_tty(self):
isatty = getattr(self.stream, 'isatty', None)
return isatty and isatty()
def emit(self, record):
try:
message = self.format(record)
stream = self.stream
if not self.is_tty:
stream.write(message)
else:
self.output_colorized(message)
stream.write(getattr(self, 'terminator', '\n'))
self.flush()
except (KeyboardInterrupt, SystemExit):
raise
except: # noqa: E722
self.handleError(record)
def output_colorized(self, message):
self.stream.write(message)
def colorize(self, message, record):
if record.levelno in self.level_map:
bg, fg, bold = self.level_map[record.levelno]
params = []
if bg in self.color_map:
params.append(str(self.color_map[bg] + 40))
if fg in self.color_map:
params.append(str(self.color_map[fg] + 30))
if bold:
params.append('1')
if params:
message = ''.join((self.csi, ';'.join(params),
'm', message, self.reset))
return message
def format(self, record):
message = logging.StreamHandler.format(self, record)
if self.is_tty:
# Don't colorize any traceback
parts = message.split('\n', 1)
parts[0] = self.colorize(parts[0], record)
message = '\n'.join(parts)
return message
# LOGGING FILTERS
#################
class ReadableSqlFilter(logging.Filter):
"""
A filter for more readable sql by stripping out the SELECT ... columns.
Modeled after how debug toolbar displays SQL. This code should be optimized
for performance. For example, I don't check to make sure record.name is
'django.db.backends' because I assume you put this filter alongside it.
Sample Usage in Django's `settings.py`:
LOGGING = {
...
'filters': {
'readable_sql': {
'()': 'project_runpy.ReadableSqlFilter',
},
},
'loggers': {
'django.db.backends': {
'filters': ['readable_sql'],
...
},
...
},
}
"""
def filter(self, record):
# https://github.com/django/django/blob/febe136d4c3310ec8901abecca3ea5ba2be3952c/django/db/backends/utils.py#L106-L131
duration, sql, *__ = record.args
if not sql or 'SELECT' not in sql[:28]:
# WISHLIST what's the most performant way to see if 'SELECT' was
# used?
return super().filter(record)
begin = sql.index('SELECT')
try:
end = sql.index('FROM', begin + 6)
except ValueError: # not all SELECT statements also have a FROM
return super().filter(record)
sql = '{0}...{1}'.format(sql[:begin + 6], sql[end:])
# Drop "; args=%s" to shorten logging output
record.msg = '(%.3f) %s'
record.args = (duration, sql)
return super().filter(record)
|
apache-2.0
| -28,892,168,273,902,668
| 30.442748
| 126
| 0.531925
| false
| 3.975869
| false
| false
| false
|
kimlaborg/NGSKit
|
ngskit/utils/__pycache__/fasta_tools.py
|
1
|
2263
|
"""Fasta Tools
"""
def write_fasta_sequence(sequence_data, file_output, write_mode='a'):
"""Add sequences to a file, in Fasta Format.
Parameters
----------
sequence_data : str
Sequence to add to the fasta file. if only the sequence is provided,
assume the header is not relevant and a random will be created, sequence base
to avoid collisions
sequence_data : array_like
sequence_data[0] == Header or id of the sequences, if do not contain > ,
it will be added.
sequence_data[1] == Sequence
file_output: str, obj
This function can recive both a file_handler or file name. In the former
scenario it will create a file_handler, and in both cases it will let
it open, to improve I/O.
Returns
-------
file_handle : obj
returns the file handler.
Raises
------
ValueError
Sequence_data should contain two items: header, Sequece
Examples
--------
>>> write_fasta_sequence('ATGATGATGA','my_file.fasta')
>>> write_fasta_sequence('ATGATGATGA',open('my_file.fasta', 'a'))
>>> write_fasta_sequence(['SEQ_1', 'ATGATGATGA'],'my_file.fasta')
"""
# Check the input sequence
if isinstance(sequence_data, str):
# create a Header using 100 first sequence caracters.
header = sequence_data.strip('\n').strip()[:100]
sequence_data = [header,
sequence_data.strip('\n').strip()]
if not len(sequence_data)>=2:
raise ValueError("Sequence data must contain at least header and sequence")
# check if a file handelr has been provided
if isinstance(file_output, str):
file_handle = open(file_output, write_mode)
else:
file_handle = file_output
# write the sequence
file_handle.write(">{0}\n{1}\n".format(*sequence_data))
return file_handle
def to_fasta(grp_seq, output, header=False):
"""Transform a batch of sequnces to a fasta format file.
Parameters
----------
grp_seq : array_like
Iterable object with sequneces
""""
if header == False:
for sequence in grp_seq:
output = write_fasta_sequence(seq, output, write_mode='a')
output.close()
|
mit
| 7,169,794,591,417,631,000
| 26.26506
| 83
| 0.613787
| false
| 4.055556
| false
| false
| false
|
Shpilevskiy/mobile_family_budget
|
backend/django_service/mobile_family_budget/purchaseManager/migrations/0001_initial.py
|
1
|
1567
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2017-01-21 21:02
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('account', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Purchase',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30)),
('count', models.PositiveIntegerField(default=1)),
('price', models.FloatField(default=0)),
('current_count', models.PositiveIntegerField(default=0)),
('status', models.BooleanField(default=False)),
],
),
migrations.CreateModel(
name='PurchaseList',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default='Мой список покупок', max_length=30)),
('budget_group', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='account.BudgetGroup')),
],
),
migrations.AddField(
model_name='purchase',
name='purchase_list',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='purchaseManager.PurchaseList'),
),
]
|
gpl-3.0
| 7,832,930,612,681,314,000
| 35.928571
| 123
| 0.583495
| false
| 4.237705
| false
| false
| false
|
intelaligent/tctb
|
tctb/classes/agent.py
|
1
|
4353
|
#!/usr/bin/env python2
"""
@file connection.py
@author Bo Gao
@date 2017-07-25
@version alpha
Intersection control agent
Copyright (C) 2017 Transport Research Group, University of Southampton
Intersection Control Test Bed
"""
# if "SUMO_HOME" in os.environ:
# tools = os.path.join(os.environ["SUMO_HOME"], "tools")
# sys.path.append(tools)
# from sumolib import checkBinary # sumo, sumo-gui
# else:
# sys.exit("please declare environment variable 'SUMO_HOME'")
# import traci
import os
import sys
from subprocess import call
class Agent(object):
# _name = ""
def init():
raise NotImplementedError( "Method init not implemented." )
class Agent_Tools:
"""
doc: http://www.sumo.dlr.de/userdoc/DUAROUTER.html
duarouter
-n data/map.sumo.net.xml
-t demands/odTrips.demand.xml
-d add.vTypes.xml
-o demands/odTrips.rou.xml
"""
def trip_to_route(self, scn):
_binary_name = "duarouter"
if "SUMO_HOME" in os.environ:
tools = os.path.join(os.environ["SUMO_HOME"], "tools")
sys.path.append(tools)
from sumolib import checkBinary # sumo, sumo-gui
else:
sys.exit("please declare environment variable 'SUMO_HOME'")
output_route_file = os.path.join(scn.get("dir"), "duarouter_out.rou.xml")
command = []
command.append(checkBinary(_binary_name))
command.append("--net-file")
command.append(scn.get("net_file"))
# command.append("--trip-files")
# command.append(scn.get("demand_file"))
command.append("--additional-files")
command.append(scn.get("add_files"))
command.append("--output-file")
command.append(output_route_file)
print("".join(elt + " " for elt in command))
call(command)
return output_route_file
class Agent_Sumo_Coordinator(Agent):
"""
doc: http://www.sumo.dlr.de/userdoc/Tools/tls.html#tlsCoordinator.py
tlsCoordinator.py
-n data/map.sumo.net.xml
-r demands/randomTrips.rou.xml
-o tls/tls.coordinated.xml
"""
_script_name = "tlsCoordinator.py"
def init(self, scn) :
command = []
tls_offset_file = os.path.join(scn.get("dir"), "tls.offset.sumo_coordinator.xml" )
if "SUMO_HOME" in os.environ:
command.append(os.path.join(os.environ["SUMO_HOME"], "tools", self._script_name))
else:
sys.exit("Agent_Sumo_Coordinator requires environment variable 'SUMO_HOME'")
command.append("--net-file")
command.append(scn.get("net_file"))
command.append("--route-file")
command.append(Agent_Tools().trip_to_route(scn))
command.append("--output-file")
command.append(tls_offset_file)
print("".join(elt + " " for elt in command))
call(command)
scn.add_additional_file(tls_offset_file)
class Agent_Sumo_Cycle_Adaptation(Agent):
"""
doc: http://www.sumo.dlr.de/userdoc/Tools/tls.html#tlsCycleAdaptation.py
tlsCycleAdaptation.py
-n data/map.sumo.net.xml
-r demands/odTrips.rou.xml
-o tls/tls.ca.od.xml
"""
_script_name = "tlsCycleAdaptation.py"
def init(self, scn) :
command = []
tls_new_program_file = os.path.join(scn.get("dir"), "tls.offset.sumo_cycle_adaptation.xml" )
if "SUMO_HOME" in os.environ:
command.append(os.path.join(os.environ["SUMO_HOME"], "tools", self._script_name))
else:
sys.exit("Agent_Sumo_Coordinator requires environment variable 'SUMO_HOME'")
command.append("--net-file")
command.append(scn.get("net_file"))
command.append("--route-file")
command.append(Agent_Tools().trip_to_route(scn))
command.append("--output-file")
command.append(tls_new_program_file)
print("".join(elt + " " for elt in command))
call(command)
scn.add_additional_file(tls_new_program_file)
class AgentManager:
def initialise_agent_for_scenario(self, scn):
return {
"tls_sumo_coordinator" : lambda scn : Agent_Sumo_Coordinator().init(scn),
"tls_sumo_cycle_adaptation" : lambda scn : Agent_Sumo_Cycle_Adaptation().init(scn)
}[scn.get("agent_type")](scn)
|
gpl-3.0
| -1,551,814,368,528,722,000
| 28.02
| 100
| 0.612221
| false
| 3.305239
| false
| false
| false
|
grnet/synnefo
|
docs/conf.py
|
1
|
1973
|
import sys, os
sys.path.insert(0, os.path.abspath('../snf-cyclades-app'))
import synnefo
reload(synnefo)
import synnefo.versions
reload(synnefo.versions)
from synnefo.versions.app import __version__
project = u'synnefo'
copyright = u'2010-2017, GRNET S.A.'
version = __version__
release = __version__
html_title = 'synnefo ' + version
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
exclude_patterns = ['_build']
pygments_style = 'sphinx'
html_theme = 'default'
html_theme_options = {
'sidebarwidth': '300',
'collapsiblesidebar': 'true',
'footerbgcolor': '#55b577',
'footertextcolor': '#000000',
'sidebarbgcolor': '#ffffff',
'sidebarbtncolor': '#f2f2f2',
'sidebartextcolor': '#000000',
'sidebarlinkcolor': '#328e4a',
'relbarbgcolor': '#55b577',
'relbartextcolor': '#ffffff',
'relbarlinkcolor': '#ffffff',
'bgcolor': '#ffffff',
'textcolor': '#000000',
'headbgcolor': '#ffffff',
'headtextcolor': '#000000',
'headlinkcolor': '#c60f0f',
'linkcolor': '#328e4a',
'visitedlinkcolor': '#63409b',
'codebgcolor': '#eeffcc',
'codetextcolor': '#333333'
}
htmlhelp_basename = 'synnefodoc'
intersphinx_mapping = {
'python': ('http://docs.python.org/', None),
'django': ('https://docs.djangoproject.com/en/dev/',
'https://docs.djangoproject.com/en/dev/_objects/')
}
SYNNEFO_PROJECTS = ['synnefo', 'archipelago', 'kamaki', 'snf-image',
'snf-image-creator', 'nfdhcpd', 'snf-vncauthproxy',
'snf-network']
SYNNEFO_DOCS_BASEURL = 'https://www.synnefo.org/docs/%s/latest/objects.inv'
for project in SYNNEFO_PROJECTS:
project_url = SYNNEFO_DOCS_BASEURL % project
intersphinx_mapping[project.replace('-', '')] = (os.path.dirname(project_url), project_url)
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.viewcode']
|
gpl-3.0
| -5,990,170,465,250,381,000
| 28.893939
| 95
| 0.631019
| false
| 2.9273
| false
| false
| false
|
FogCreek/solari-board
|
example_realtime/liveFogbugz.py
|
1
|
1937
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
from __future__ import print_function
import sys, os
import cgi
import time
def cgi_callback():
params = cgi.parse_qs(os.environ['QUERY_STRING'])
# lastts is the last modified date that this browser has already loaded, 0 means this is an initial request
lastts = 0
if params.has_key('ts'):
lastts = int(params['ts'][0])
# keep count of the number of times waiting so this takes no longer than 30 sec to respond
attempt = 0
ts = ''
while ts == '':
attempt += 1
try:
stats = os.stat('fogbugz.json')
if (attempt > 56 or int(stats.st_mtime) > lastts):
# the file either has new data, or we've been waiting long enough, exit the loop
ts = int(stats.st_mtime)
else:
# the file has no new data, wait a half a second and try again
time.sleep(0.5)
except:
break
if ts == "":
# a file was not found, return invalid JSON to raise an error in the UI
json = 'Show fail whale because refreshFogBugz.py has never been called'
ts = 0
else:
f = open('fogbugz.json')
json = f.read()
f.close()
if json == '':
json = '[]'
print('Content-Type: application/javascript\n')
# remember this last modified ts, so future requests can tell if there's new data
print('URL_SUFFIX = "&ts=%s";' % (ts))
# if responding immediately then kick off another read
if attempt == 1 and not params.has_key('ts'):
print('setTimeout(updateSolariBoard, 1000);')
# support a callback param, or default to "void"
callback = 'void'
if params.has_key('callback'):
callback = params['callback'][0]
# send the json to jQuery's callback
print('%s(%s);' % (callback,json))
if __name__ == '__main__':
cgi_callback()
|
mit
| -8,826,270,226,747,974,000
| 29.746032
| 111
| 0.579763
| false
| 3.874
| false
| false
| false
|
ucloud/uai-sdk
|
examples/tensorflow-2.0/imagenet/train/code/imagenet.py
|
1
|
3432
|
import os
import vgg_preprocessing
import tensorflow as tf
_DEFAULT_IMAGE_SIZE = 224
_NUM_CHANNELS = 3
_LABEL_CLASSES = 1001
_FILE_SHUFFLE_BUFFER = 1024
_SHUFFLE_BUFFER = 1500
class ImagenetDataSet(object):
"""Imagenet data set
"""
def __init__(self, data_dir, subset='train', use_distortion=True):
self.data_dir = data_dir
self.subset = subset
self.use_distortion = use_distortion
def filenames(self, is_training, data_dir):
"""Return filenames for dataset."""
if is_training:
return [
os.path.join(data_dir, 'train-%05d-of-01024' % i)
for i in range(1024)]
else:
return [
os.path.join(data_dir, 'validation-%05d-of-00128' % i)
for i in range(128)]
def parser(self, value, is_training):
"""Parse an ImageNet record from `value`."""
keys_to_features = {
'image/encoded':
tf.compat.v1.FixedLenFeature((), tf.string, default_value=''),
'image/format':
tf.compat.v1.FixedLenFeature((), tf.string, default_value='jpeg'),
'image/class/label':
tf.compat.v1.FixedLenFeature([], dtype=tf.int64, default_value=-1),
'image/class/text':
tf.compat.v1.FixedLenFeature([], dtype=tf.string, default_value=''),
'image/object/bbox/xmin':
tf.compat.v1.VarLenFeature(dtype=tf.float32),
'image/object/bbox/ymin':
tf.compat.v1.VarLenFeature(dtype=tf.float32),
'image/object/bbox/xmax':
tf.compat.v1.VarLenFeature(dtype=tf.float32),
'image/object/bbox/ymax':
tf.compat.v1.VarLenFeature(dtype=tf.float32),
'image/object/class/label':
tf.compat.v1.VarLenFeature(dtype=tf.int64),
}
parsed = tf.compat.v1.parse_single_example(value, keys_to_features)
image = tf.image.decode_image(
tf.reshape(parsed['image/encoded'], shape=[]),
_NUM_CHANNELS)
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
image = vgg_preprocessing.preprocess_image(
image=image,
output_height=_DEFAULT_IMAGE_SIZE,
output_width=_DEFAULT_IMAGE_SIZE,
is_training=is_training)
label = tf.cast(
tf.reshape(parsed['image/class/label'], shape=[]),
dtype=tf.int32)
return image, label #tf.one_hot(label, _LABEL_CLASSES)
def make_dataset(self, batch_size, is_training, num_shards, num_epochs=1):
data_dir = self.data_dir
shards_batch_size = int(batch_size / num_shards)
"""Input function which provides batches for train or eval."""
dataset = tf.data.Dataset.from_tensor_slices(self.filenames(is_training, data_dir))
if is_training:
dataset = dataset.shuffle(buffer_size=(_FILE_SHUFFLE_BUFFER * num_shards))
dataset = dataset.flat_map(tf.data.TFRecordDataset)
dataset = dataset.map(lambda value: self.parser(value, is_training),
num_parallel_calls=5)
dataset = dataset.prefetch(batch_size * 2)
if is_training:
# When choosing shuffle buffer sizes, larger sizes result in better
# randomness, while smaller sizes have better performance.
dataset = dataset.shuffle(buffer_size=_SHUFFLE_BUFFER)
# We call repeat after shuffling, rather than before, to prevent separate
# epochs from blending together.
dataset = dataset.repeat(num_epochs)
dataset = dataset.batch(shards_batch_size)
return dataset
|
apache-2.0
| -8,043,587,197,140,686,000
| 33.676768
| 87
| 0.644814
| false
| 3.498471
| false
| false
| false
|
OpenTwinCities/site_bot
|
app/Meetup/RSS.py
|
1
|
1767
|
# -*- coding: utf8 -*-
import feedparser
from bs4 import BeautifulSoup
from datetime import datetime
from time import mktime
class MeetupRSS:
MEETUP_DOMAIN = 'www.meetup.com'
def __init__(self, group_id):
self.group_id = group_id
self.__rss__ = None
self.__events__ = None
@property
def rss_url(self):
return 'https://%s/%s/events/rss' % (self.MEETUP_DOMAIN, self.group_id)
def fetch_rss(self):
"""Use feedparser to get entries from the RSS feed"""
return feedparser.parse(self.rss_url)
def update_entries(self):
"""Fetch entries from RSS feed and store them"""
self.__rss__ = self.fetch_rss()
self.__events__ = self.__rss__.entries
def parse_event(self, e):
"""Helper function to convert RSS event data to
expected data for MeetupEvent"""
event = {}
event['title'] = e.title
event['id'] = e.guid.rsplit('/')[-2] if e.guid.endswith('/') else e.guid.rsplit('/', 1)[-1]
# published_parsed has the date in struct_time
# Convert to datetime for better output
event['time'] = datetime.fromtimestamp(mktime(e.published_parsed))
# Find a better way to parse this specific element
html = BeautifulSoup(e.summary, 'html.parser')
event['excerpt'] = None
for tag in html.find_all('p'):
for p in tag.find_all('p'):
event['excerpt'] = str(p)
break
event['venue_name'] = None
event['venue_location'] = None
return event
@property
def events(self):
"""Stored entries from the RSS feed"""
if self.__events__ is None:
self.update_entries()
return self.__events__
|
mit
| -47,412,677,332,010,296
| 28.949153
| 99
| 0.578947
| false
| 3.91796
| false
| false
| false
|
cernbox/smashbox
|
lib/test_slowwrite.py
|
1
|
2941
|
import os
import time
import tempfile
__doc__ = """
Synchronize local folder while writing into the file.
This is a testcase for:
https://github.com/owncloud/mirall/issues/2210 (corrupted file upload if file modified during transfer)
owncloudcmd will delay syncing of the file if the file is modified every 2 seconds or less (slowWrite < 2)
"""
from smashbox.utilities import *
from smashbox.utilities.hash_files import *
do_not_report_as_failure()
MB = 1024*1000
filesizeKB = int(config.get('slowwrite_filesizeKB',10000))
blockSize = int(config.get('slowwrite_blockSize',MB))
slowWrite = int(config.get('slowwrite_slowWrite',1))
nfiles=1
testsets = [
{ 'slowwrite_filesizeKB': 2,
'slowwrite_blockSize': 200,
'slowwrite_slowWrite':1.5
},
{ 'slowwrite_filesizeKB': 5000,
'slowwrite_blockSize': MB,
'slowwrite_slowWrite':1
},
{ 'slowwrite_filesizeKB': 11000,
'slowwrite_blockSize': MB,
'slowwrite_slowWrite':1
},
{ 'slowwrite_filesizeKB': 25000,
'slowwrite_blockSize': MB,
'slowwrite_slowWrite':1
},
{ 'slowwrite_filesizeKB': 50000,
'slowwrite_blockSize': MB,
'slowwrite_slowWrite':1
}
]
@add_worker
def writer(step):
ver=ocsync_version()
# sync client version 2.x.x and below were syncing indefinitely in case of local errors, so eventually the files got synced
# for newer clients, the default number of sync retries is 3 which is not enough to get the file synced if the writes are really slow
# so for newer client we set the --max-sync-retries explicitly to a higher number (this is a new option)
# ref: https://github.com/owncloud/client/issues/4586
if ver[0] >= 2:
config.oc_sync_cmd += " --max-sync-retries 20"
# do not cleanup server files from previous run
reset_owncloud_account()
# cleanup all local files for the test
reset_rundir()
step(1,'Preparation')
d = make_workdir('writer') # bother writer and synchronizer share the same workdir
run_ocsync(d)
k0 = count_files(d)
step(2,'Add %s files and check if we still have k1+nfiles after resync'%nfiles)
create_hashfile(d,size=filesizeKB*1000,bs=blockSize,slow_write=slowWrite) #config.hashfile_size)
@add_worker
def synchronizer(step):
step(2,'Sync the file as it is being written by writer')
sleep(slowWrite*2)
d = make_workdir('writer') # bother writer and synchronizer share the same workdir
run_ocsync(d)
@add_worker
def checker(step):
step(1,'Preparation')
d = make_workdir()
run_ocsync(d)
k0 = count_files(d)
step(3,'Resync and check files added by synchronizer')
run_ocsync(d)
analyse_hashfiles(d)
k1 = count_files(d)
error_check(k1-k0==nfiles,'Expecting to have %d files more: see k1=%d k0=%d'%(nfiles,k1,k0))
|
agpl-3.0
| -6,707,239,179,517,909,000
| 24.353448
| 137
| 0.658279
| false
| 3.407879
| false
| false
| false
|
MIT-LCP/false-alarm-reduction
|
pyfar/utils.py
|
1
|
5165
|
from __future__ import print_function
import wfdb
import json
def abs_value(x, y):
return abs(x-y)
def is_true_alarm_fields(fields):
return fields['comments'][1] == 'True alarm'
def is_true_alarm(data_path, sample_name):
sig, fields = wfdb.srdsamp(data_path + sample_name)
return is_true_alarm_fields(fields)
# start and end in seconds
def get_annotation(sample, ann_type, ann_fs, start, end):
try:
annotation = wfdb.rdann(sample, ann_type, sampfrom=start*ann_fs, sampto=end*ann_fs)
except Exception as e:
annotation = []
print(e)
return annotation
## Returns type of arrhythmia alarm
# output types include: 'a', 'b', 't', 'v', 'f'
def get_arrhythmia_type(fields):
"""Returns type of arrhythmia based on fields of the sample
Arguments
---------
fields: fields of sample read from wfdb.rdsamp
Returns
-------
Type of arrhythmia
'a': asystole
'b': bradycardia
't': tachycardia
'f': ventricular fibrillation
'v': ventricular tachycardia
"""
arrhythmias = {
'Asystole': 'a',
'Bradycardia': 'b',
'Tachycardia': 't',
'Ventricular_Tachycardia': 'v',
'Ventricular_Flutter_Fib': 'f'
}
arrhythmia_type = fields['comments'][0]
return arrhythmias[arrhythmia_type]
def get_channel_type(channel_name, sigtypes_filename):
"""Returns type of channel
Arguments
---------
channel_name: name of channel (e.g. "II", "V", etc.)
sigtypes_filename: file mapping channel names to channel
types
Returns
-------
Type of channel (e.g. "ECG", "BP", "PLETH", "Resp")
"""
channel_types_dict = {}
with open(sigtypes_filename, "r") as f:
for line in f:
splitted_line = line.split("\t")
channel = splitted_line[-1].rstrip()
channel_type = splitted_line[0]
channel_types_dict[channel] = channel_type
if channel_name in channel_types_dict.keys():
return channel_types_dict[channel_name]
raise Exception("Unknown channel name")
def get_samples_of_type(samples_dict, arrhythmia_type):
"""Returns a sub-dictionary of only the given arrhythmia type
Arguments
---------
samples_dict: dictionary mapping sample names to data associated
with the given sample
arrhythmia_type:
'a': asystole
'b': bradycardia
't': tachycardia
'f': ventricular fibrillation
'v': ventricular tachycardia
Returns
-------
a sub-dictionary with keys of only the given arrhythmia
"""
subdict = {}
for sample_name in samples_dict.keys():
if sample_name[0] == arrhythmia_type:
subdict[sample_name] = samples_dict[sample_name]
return subdict
def write_json(dictionary, filename):
with open(filename, "w") as f:
json.dump(dictionary, f)
def read_json(filename):
with open(filename, "r") as f:
dictionary = json.load(f)
return dictionary
def get_classification_accuracy(matrix):
num_correct = len(matrix["TP"]) + len(matrix["TN"])
num_total = len(matrix["FP"]) + len(matrix["FN"]) + num_correct
return float(num_correct) / num_total
def calc_sensitivity(counts):
tp = counts["TP"]
fn = counts["FN"]
return tp / float(tp + fn)
def calc_specificity(counts):
tn = counts["TN"]
fp = counts["FP"]
return tn / float(tn + fp)
def calc_ppv(counts):
tp = counts["TP"]
fp = counts["FP"]
return tp / float(tp + fp)
def calc_npv(counts):
tn = counts["TN"]
fn = counts["FN"]
return tn / float(tn + fn)
def calc_f1(counts):
sensitivity = calc_sensitivity(counts)
ppv = calc_ppv(counts)
return 2 * sensitivity * ppv / float(sensitivity + ppv)
def print_stats(counts):
try:
sensitivity = calc_sensitivity(counts)
specificity = calc_specificity(counts)
ppv = calc_ppv(counts)
npv = calc_npv(counts)
f1 = calc_f1(counts)
except Exception as e:
print(e)
print("counts: ", counts)
print("sensitivity: ", sensitivity)
print("specificity: ", specificity)
print("ppv: ", ppv)
print("npv: ", npv)
print("f1: ", f1)
def get_matrix_classification(actual, predicted):
if actual and predicted:
return "TP"
elif actual and not predicted:
return "FN"
elif not actual and predicted:
return "FP"
return "TN"
def get_score(matrix):
numerator = len(matrix["TP"]) + len(matrix["TN"])
denominator = len(matrix["FP"]) + 5*len(matrix["FN"]) + numerator
return float(numerator) / denominator
def get_by_arrhythmia(confusion_matrix, arrhythmia_prefix):
counts_by_arrhythmia = {}
matrix_by_arrhythmia = {}
for classification_type in confusion_matrix.keys():
sample_list = [ sample for sample in confusion_matrix[classification_type] if sample[0] == arrhythmia_prefix]
counts_by_arrhythmia[classification_type] = len(sample_list)
matrix_by_arrhythmia[classification_type] = sample_list
return counts_by_arrhythmia, matrix_by_arrhythmia
|
mit
| -8,308,017,992,821,407,000
| 23.712919
| 117
| 0.619942
| false
| 3.298212
| false
| false
| false
|
max4260/HydroP
|
python/script.py
|
1
|
1197
|
import webiopi
import datetime
import sqlite3
import subprocess
import sys
GPIO = webiopi.GPIO
AUTO = 1
SCRIPT_PATH = "/home/pi/HydroP/python/"
LIGHT1 = 17 # GPIO pin using BCM numbering
dbconn = sqlite3.connect(SCRIPT_PATH + "hydro.db")
dbconn.row_factory = sqlite3.Row
dbcursor = dbconn.cursor()
# setup function is automatically called at WebIOPi startup
def setup():
# set the GPIO used by the light to output
GPIO.setFunction(LIGHT1, GPIO.OUT)
# retrieve current datetime
now = datetime.datetime.now()
dbcursor.execute("SELECT status, interval FROM devices WHERE name = \"LIGHT1\"")
lightDevice = dbcursor.fetchone()
if (lightDevice != None) and (lightDevice["status"] == AUTO):
setLightInterval(lightDevice['interval'])
# loop function is repeatedly called by WebIOPi
def loop():
# retrieve current datetime
now = datetime.datetime.now()
# gives CPU some time before looping again
webiopi.sleep(1)
# destroy function is called at WebIOPi shutdown
def destroy():
GPIO.digitalWrite(LIGHT1, GPIO.LOW)
@webiopi.macro
def setLightInterval(interval):
subprocess.Popen(["python",SCRIPT_PATH + "light_loop.py",str(interval)])
|
gpl-2.0
| 1,823,126,588,863,554,300
| 25.622222
| 84
| 0.721805
| false
| 3.530973
| false
| false
| false
|
zhenjiawang157/BART_Py2
|
BART/AUCcalc.py
|
1
|
6820
|
# Time-stamp: <2017-08-10>
'''Module for calculating ROC-AUC values for all TF datasets
Copyright (c) 2017, 2018 Chongzhi Zang, Zhenjia Wang <zhenjia@virginia.edu>
This code is free software; you can redistribute it and/or modify it
under the terms of the BSD License.
@status: release candidate
@version: $Id$
@author: Chongzhi Zang, Zhenjia Wang
@contact: zhenjia@virginia.edu
'''
from __future__ import division
import os,sys,os.path
import argparse,time
import configparser
import re
import multiprocessing
from BART.StatTest import stat_test
from BART.OptValidator import opt_validate,conf_validate
from BART.ReadCount import read_count_on_DHS
import bz2
def get_file(filedir,match):
mark = re.compile(match)
names = os.listdir(filedir)
files = []
for name in names:
if mark.search(name):
files.append('_'.join(name.split('_')[:-2]))
files = sorted(files)
return files
def get_position_list(margefile):
'''
Get the ID list of DHS, according to the decreasingly sorted scores in MARGE
'''
fin = open(margefile,'rb')
line = fin.readline()
score = {}
while line:
line = line.strip().split()
try:
score[line[-2]]=float(line[-1])
except:
pass
line = fin.readline()
fin.close()
return sorted(score.keys(),key=score.get,reverse=True)
def get_match_list(tf, tfdir,positions):
'''
Return the binding info on DHS
'''
## .txt format
# fname = tf+'_DHS_01.txt'
# tf = open(os.path.join(tfdir,fname), 'rb')
# lines = tf.raw.read()
## .bz2 format
fname = tf+'_DHS_01.txt.bz2'
tf = bz2.BZ2File(os.path.join(tfdir,fname),'r')
lines = tf.read()
match = [ord(lines[2*position-2])-ord('0') for position in positions]
tf.close()
return match
def partion(match):
sub_t = 0
list_t = []
list_f = []
total = len(match)
groupsize=10000
groups = int(total/groupsize)
for i in range(groups):
sub_t = sum(match[i*groupsize:(i+1)*groupsize])
sub_f = groupsize - sub_t
list_t.append(sub_t)
list_f.append(sub_f)
sub_t = sum(match[groups*groupsize:])
sub_f = total -groups*groupsize-sub_t
list_t.append(sub_t)
list_f.append(sub_f)
return total, list_t, list_f
def roc_auc(total, list_t, list_f):
list_x = [0.0]
list_y = [0.0]
assert len(list_t)==len(list_f)
for i in range(len(list_t)):
list_x.append(list_f[i]+list_x[i])
list_y.append(list_t[i]+list_y[i])
total_t = list_y[-1]
list_x = [i/(total - total_t) for i in list_x]
list_y = [i/total_t for i in list_y]
auc = 0.0
for i in range(1,len(list_x)):
width = list_x[i]-list_x[i-1]
height = (list_y[i]+list_y[i-1])/2
auc += height*width
return list_x, list_y, auc
def cal_auc_for_tf(tf_p):
tf,tfdir,positions = tf_p
# time1 = time.time()
match = get_match_list(tf,tfdir,positions)
(t, lt, lf) = partion(match)
(list_x, list_y, auc) = roc_auc(t, lt,lf)
# time2 = time.time()
# print(time2-time1)
return tf,auc
def run(options):
args = opt_validate(options)
tfs = get_file(args.tfdir,'DHS_01')
if len(tfs) == 0:
sys.stderr.write('Please specify correct directory of TF binding profiles!')
sys.exit(1)
try:
os.makedirs(args.outdir)
except:
sys.exit('Output directory: {} already exist, please select another directory.'.format(args.outdir))
# This part is for the auc.txt input
#if args.auc:
# AUCs = {}
# with open(args.infile,'r') as auc_infile:
# for auc_line in auc_infile.readlines():
# auc_info = auc_line.strip().split()
# AUCs[auc_info[0]] = float(auc_info[-1])
# #print(AUCs)
# stat_test(AUCs,args)
# exit(0)
# print(args,'\n')
if args.subcommand_name == 'geneset':
print('Prediction starts...\n\nRank all DHS...\n')
margefile = args.infile
positions = get_position_list(args.infile)
positions = [int(i) for i in positions]
#print(type(positions[1]));exit(0)
elif args.subcommand_name == 'profile':
print('Start mapping the {} file...\n'.format(args.format.upper()))
counting = read_count_on_DHS(args)
positions = sorted(counting.keys(),key=counting.get,reverse=True)
positions = [int(i) for i in positions]
#print([[i,counting[i]] for i in positions[:30]])
print('Prediction starts...\n\nRank all DHS...\n')
if len(positions) == 0:
sys.stderr.write('Input file might not with right format!\n')
sys.exit(1)
# output file of AUC-ROC values for all TFs
aucfile = args.outdir+os.sep+args.ofilename+'_auc.txt'
sys.stdout.write("Calculating ROC-AUC values for all transcription factors:\n\n")
print(args)
tf_ps = [(tf,args.tfdir,positions) for tf in tfs]
print(len(tf_ps),'#TF datasets')###########
AUCs = {}
# always multiprocessing
if args.processes:
print('--Number of CUPs in use: {}\n'.format(args.processes))
pool = multiprocessing.Pool(processes=args.processes)
tf_aucs = pool.map_async(cal_auc_for_tf,tf_ps,chunksize=1)
total=len(tf_ps)
#print(total)
#import tqdm ##pbar
#pbar = tqdm.tqdm(total=total) ##pbar
#last=total ##pbar
while not tf_aucs.ready(): # print percentage of work has been done
remaining=tf_aucs._number_left
#pbar.update(last-remaining) ##pbar
#last=remaining ##pbar
sys.stdout.write('\n Processing...{:.1f}% finished'.format(100*(total-remaining)/total)) ##print
i=0
while not tf_aucs.ready() and i<24:
sys.stdout.write('.')
sys.stdout.flush()
#print(".",end='',flush=True) for py3
i+=1
time.sleep(5)
#pbar.update(remaining) ##pbar
#pbar.close() ##pbar
print('\n Processing...100.0% finished.') ##print
pool.close()
pool.join()
# save the AUCs
for tfauc in tf_aucs.get():
AUCs[tfauc[0]]=tfauc[1]
#print(AUCs)
else:
for tf_p in tf_ps:
AUCs[tf_p[0]]=cal_auc_for_tf(tf_p)[1]
with open(aucfile, 'w') as aucf:
for tf_key in sorted(AUCs.keys(),key=AUCs.get,reverse=True):
aucf.write('{}\tAUC = {:.3f}\n'.format(tf_key,AUCs[tf_key]))
print('\n--ROC-AUC calculation finished!\n--Results saved in file: {}\n'.format(aucfile))
stat_test(AUCs,args)
|
bsd-2-clause
| -6,984,467,226,184,872,000
| 29.58296
| 109
| 0.57478
| false
| 3.12844
| false
| false
| false
|
timbooo/traktforalfred
|
trakt/mapper/summary.py
|
1
|
3161
|
from trakt.mapper.core.base import Mapper
class SummaryMapper(Mapper):
@classmethod
def movies(cls, client, items, **kwargs):
if not items:
return None
return [cls.movie(client, item, **kwargs) for item in items]
@classmethod
def movie(cls, client, item, **kwargs):
if not item:
return None
if 'movie' in item:
i_movie = item['movie']
else:
i_movie = item
# Retrieve item keys
pk, keys = cls.get_ids('movie', i_movie)
if pk is None:
return None
# Create object
movie = cls.construct(client, 'movie', i_movie, keys, **kwargs)
# Update with root info
if 'movie' in item:
movie._update(item)
return movie
@classmethod
def shows(cls, client, items, **kwargs):
if not items:
return None
return [cls.show(client, item, **kwargs) for item in items]
@classmethod
def show(cls, client, item, **kwargs):
if not item:
return None
if 'show' in item:
i_show = item['show']
else:
i_show = item
# Retrieve item keys
pk, keys = cls.get_ids('show', i_show)
if pk is None:
return None
# Create object
show = cls.construct(client, 'show', i_show, keys, **kwargs)
# Update with root info
if 'show' in item:
show._update(item)
return show
@classmethod
def seasons(cls, client, items, **kwargs):
if not items:
return None
return [cls.season(client, item, **kwargs) for item in items]
@classmethod
def season(cls, client, item, **kwargs):
if not item:
return None
if 'season' in item:
i_season = item['season']
else:
i_season = item
# Retrieve item keys
pk, keys = cls.get_ids('season', i_season)
if pk is None:
return None
# Create object
season = cls.construct(client, 'season', i_season, keys, **kwargs)
# Update with root info
if 'season' in item:
season._update(item)
return season
@classmethod
def episodes(cls, client, items, **kwargs):
if not items:
return None
return [cls.episode(client, item, **kwargs) for item in items]
@classmethod
def episode(cls, client, item, **kwargs):
if not item:
return None
if 'episode' in item:
i_episode = item['episode']
else:
i_episode = item
# Retrieve item keys
pk, keys = cls.get_ids('episode', i_episode)
if pk is None:
return None
# Create object
episode = cls.construct(client, 'episode', i_episode, keys, **kwargs)
# Update with root info
if 'episode' in item:
episode._update(item)
return episode
|
mit
| 3,495,584,519,128,426,500
| 22.129771
| 77
| 0.506485
| false
| 4.427171
| false
| false
| false
|
mmetince/akgulyzer
|
akgulyzer.py
|
1
|
3279
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Mehmet Dursun Ince
from argparse import ArgumentParser
from random import choice, randint
import locale
locale.setlocale(locale.LC_ALL, "tr_TR")
class Akgulyzer(object):
def __init__(self, args):
"""
Kurucu Akgulyzer
:param args:
:return:
"""
if args.encode:
self.encode(args.encode)
elif args.random:
self.random(args.random)
def random(self, level):
"""
Parametre olarak verilen degere gore, en az bir kisi tarafindan gorulmus olan
Akgul metnini rastgele secer ve ekrana basar.
:param level:
:return:
"""
akgul_db = {
'low': [
"Seneye bekleriz. LKD uyelei lutfen dvet beklemeyin parcasıdı. LKD akadeiik Bilşimin organik bir parcasıdır. Mustafa Akgul",
"Aslında tum seminerler icin bilmeyene hitap edecek , yeterli detayda, seminmer ozeti isterim. Baslik ve konusmaci ve epostasi belli olduktan sonra bir 'stub acip, opemnconf uzeriden girilmesini rica ediyorum.",
""
],
'medium': [
"Bilgisayar Mugendislii/Bilim egitini, Yürkieyenin yazılım startejisi ve belki Ümiveristelrde özgür yzılım kullanımı konularınd apanel olacak LKD den konusmaci istiyoruz.",
"Okudugunu anlamayanlar ülkesi: katılamayacaklar oln mail atsin diyoruz. su ikiis yanlis gondermis - cikattmıyoru",
"bu ucune sşizn kurs iicn ben kabul mektubu uretip dizne koyacagimsiz github'a eklediniz dimi?"
],
'hardcore': ["Erdem Bayer'e Akgül hocadan gelen mesajı okuyanlar anlar.."]
}
print choice(akgul_db[level])
def encode(self, text):
"""
Temel olarak whitespace'e gore parse edip kelimeleri, uzunluguna gore
random rotasyona tabi tutar.
:param text:
:return:
"""
words = []
char_from_previous = None
for word in text.split():
if randint(0, 10) < 2 and len(word) > 3:
# %20 ihtimalle karakterleri hatali sirala
word = self.__swap(word, randint(1, len(word)-2))
if char_from_previous:
# Eger bir onceki kelimenin son harfi kaldiysa en basa ekle
word = char_from_previous + word
char_from_previous = None
elif randint(0, 10) < 2:
char_from_previous = word[-1]
word = word[:-1]
words.append(word)
print " ".join(words)
def __swap(self, strg, n):
"""
Verilen parametreye gore karakter degisimi.
:param strg:
:param n:
:return:
"""
return strg[:n] + strg[n+1] + strg[n] + strg[n+2:]
if __name__ == "__main__":
parser = ArgumentParser(description="Mustafa Akgül'un askerleriyiz..!")
parser.add_argument("-e", "--encode", help="Verilen metni Akgüller.")
parser.add_argument("-r", "--random", choices=['low', 'medium', 'hardcore'], default="low",
help="Bilinen Akgül metinlerinden birini rastgele seçer.")
args = parser.parse_args()
main = Akgulyzer(args)
|
gpl-2.0
| 1,414,536,402,941,925,000
| 38.192771
| 231
| 0.583948
| false
| 2.997235
| false
| false
| false
|
janpipek/pyearcal
|
pyearcal/image_sources.py
|
1
|
1485
|
import abc
import os
import fnmatch
import random
from typing import Dict
from collections import OrderedDict
class ImageDirectory(abc.ABC):
def __getitem__(self, index: int) -> str:
return self.images[index]
images: Dict[int, str]
def __iter__(self):
# yield from self.images.values()
for image in self.images.values():
yield image
class SortedImageDirectory(ImageDirectory):
def __init__(self, dirname=".", extension=".jpg"):
self.dirname = dirname
self.extension = extension
self.read_images()
def read_images(self):
self.images = OrderedDict()
for index in range(1, 13):
path = os.path.join(self.dirname, str(index) + self.extension)
if os.path.exists(path):
self.images[index] = path
else:
raise Exception("File does not exist: " + path)
class UnsortedImageDirectory(ImageDirectory):
def __init__(self, dirname=".", pattern="*.jpg"):
self.dirname = dirname
self.pattern = pattern
self.read_images()
def read_images(self):
self.images = OrderedDict()
all_file_names = [
fn for fn in os.listdir(self.dirname) if fnmatch.fnmatch(fn, self.pattern)
]
sampled_file_names = random.sample(all_file_names, 12)
for index, name in enumerate(sampled_file_names):
self.images[index + 1] = os.path.join(self.dirname, name)
|
mit
| -7,820,184,735,028,925,000
| 28.117647
| 86
| 0.607407
| false
| 4.046322
| false
| false
| false
|
wdv4758h/rst2html5slides
|
test/test_output_dir.py
|
1
|
4833
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
from io import open
from os import makedirs, urandom
from os.path import join, exists
from shutil import rmtree
from tempfile import mkdtemp
from rst2html5slides import SlideWriter
from docutils.core import publish_file, publish_string
presentation = '''.. presentation::
:distribution: grid
.. role:: small
.. class:: capa
Presentation Title
==================
Agenda
======
* Topic 1
* Topic 2
* Topic 3
|logotipo|
.. class:: chapter
Chapter 1
=========
Schema
======
|tdd cycle|
----
|python logo|
.. include:: junit.rst
.. title:: Testes Automatizados de Software
.. meta::
:generator: rst2html5slides https://bitbucket.org/andre_felipe_dias/rst2html5slides
:author: André Felipe Dias
.. |logotipo| image:: imagens/logotipo.png
.. |tdd cycle| image:: imagens/tdd_cycle.png
.. |python logo| image:: https://www.python.org/static/community_logos/python-logo-master-v3-TM.png
'''
junit = '''JUnit
=====
JUnit is a testing framework'''
css = 'div {background-color: red}'
source_dir = mkdtemp()
source_path = join(source_dir, 'presentation.rst')
def setup():
makedirs(join(source_dir, 'imagens'))
makedirs(join(source_dir, 'css'))
with open(source_path, 'w', encoding='utf-8') as f:
f.write(presentation)
with open(join(source_dir, 'junit.rst'), 'w', encoding='utf-8') as f:
f.write(junit)
with open(join(source_dir, 'css', 'style.css'), 'w', encoding='utf-8') as f:
f.write(css)
with open(join(source_dir, 'imagens', 'tdd_cycle.png'), 'wb') as f:
f.write(urandom(2 ** 16))
with open(join(source_dir, 'imagens', 'not_used.png'), 'wb') as f:
f.write(urandom(2 ** 11))
with open(join(source_dir, 'imagens', 'logotipo.png'), 'wb') as f:
f.write(urandom(2 ** 15))
def teardown():
rmtree(source_dir)
def test_destination_dir():
dest_dir = mkdtemp()
output = publish_file(
writer=SlideWriter(), source_path=source_path,
destination_path=dest_dir,
settings_overrides={'stylesheet': [join('css', 'style.css')], 'presentation': 'jmpress.js'}
)
assert exists(join(dest_dir, 'presentation.html'))
assert exists(join(dest_dir, 'css', 'style.css'))
assert exists(join(dest_dir, 'imagens', 'tdd_cycle.png'))
assert exists(join(dest_dir, 'imagens', 'logotipo.png'))
assert exists(join(dest_dir, 'css', 'slides.css'))
assert exists(join(dest_dir, 'js'))
assert not exists(join(dest_dir, 'imagens', 'not_used.png'))
assert str('<link href="css/slides.css"') in output
assert str('<script src="js/jquery.min.js">') in output
assert str('<link href="css/style.css"') in output
assert str('src="https://www.python.org') in output
rmtree(dest_dir)
def test_destination_path():
dest_dir = mkdtemp()
output = publish_file(
writer=SlideWriter(), source_path=source_path,
destination_path=join(dest_dir, 'slides.html'),
settings_overrides={'stylesheet': [join('css', 'style.css')], 'presentation': 'jmpress.js'}
)
assert exists(join(dest_dir, 'slides.html'))
assert not exists(join(dest_dir, 'presentation.html'))
assert exists(join(dest_dir, 'css', 'style.css'))
assert exists(join(dest_dir, 'imagens', 'tdd_cycle.png'))
assert exists(join(dest_dir, 'imagens', 'logotipo.png'))
assert not exists(join(dest_dir, 'imagens', 'not_used.png'))
assert exists(join(dest_dir, 'css', 'slides.css'))
assert exists(join(dest_dir, 'js'))
assert str('<link href="css/slides.css"') in output
assert str('<script src="js/jquery.min.js">') in output
assert str('<link href="css/style.css"') in output
assert str('src="https://www.python.org') in output
rmtree(dest_dir)
def test_no_destination():
dest_dir = mkdtemp()
os.chdir(dest_dir)
output = publish_string(
writer=SlideWriter(), source=presentation, source_path=source_path,
settings_overrides={'stylesheet': [join('css', 'style.css')],
'output_encoding': 'unicode',
'presentation': 'jmpress.js'}
)
assert not exists(join(dest_dir, 'presentation.html'))
assert not exists(join(dest_dir, 'css', 'style.css'))
assert not exists(join(dest_dir, 'imagens', 'tdd_cycle.png'))
assert not exists(join(dest_dir, 'imagens', 'logotipo.png'))
assert not exists(join(dest_dir, 'imagens', 'not_used.png'))
assert not exists(join(dest_dir, 'css', 'slides.css'))
assert not exists(join(dest_dir, 'js'))
assert str('<link href="css/slides.css"') in output
assert str('<script src="js/jquery.min.js">') in output
assert '<link href="css/style.css"' in output
assert 'src="https://www.python.org' in output
rmtree(dest_dir)
|
mit
| 6,010,415,876,726,808,000
| 29.974359
| 99
| 0.641349
| false
| 3.2
| false
| false
| false
|
RexFuzzle/sfepy
|
sfepy/discrete/iga/extmods/setup.py
|
1
|
1281
|
#!/usr/bin/env python
def configuration(parent_package='', top_path=None):
import os.path as op
from numpy.distutils.misc_util import Configuration
from sfepy import Config
site_config = Config()
os_flag = {'posix' : 0, 'windows' : 1}
auto_dir = op.dirname(__file__)
auto_name = op.split(auto_dir)[-1]
config = Configuration(auto_name, parent_package, top_path)
defines = [('__SDIR__', "'\"%s\"'" % auto_dir),
('SFEPY_PLATFORM', os_flag[site_config.system()])]
if '-DDEBUG_FMF' in site_config.debug_flags():
defines.append(('DEBUG_FMF', None))
common_path = '../../common/extmods'
fem_src = ['fmfield.c', 'geommech.c', 'common_python.c']
fem_src = [op.join(common_path, ii) for ii in fem_src]
src = ['igac.pyx', 'nurbs.c']
config.add_extension('igac',
sources=src + fem_src,
extra_compile_args=site_config.compile_flags(),
extra_link_args=site_config.link_flags(),
include_dirs=[auto_dir, common_path],
define_macros=defines)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
|
bsd-3-clause
| 5,189,118,304,611,245,000
| 32.710526
| 72
| 0.568306
| false
| 3.490463
| true
| false
| false
|
c0710204/mirrorsBistu
|
pypi/bandersnatch/lib/python2.7/site-packages/pyrepl/reader.py
|
1
|
20539
|
# Copyright 2000-2010 Michael Hudson-Doyle <micahel@gmail.com>
# Antonio Cuni
# Armin Rigo
#
# All Rights Reserved
#
#
# Permission to use, copy, modify, and distribute this software and
# its documentation for any purpose is hereby granted without fee,
# provided that the above copyright notice appear in all copies and
# that both that copyright notice and this permission notice appear in
# supporting documentation.
#
# THE AUTHOR MICHAEL HUDSON DISCLAIMS ALL WARRANTIES WITH REGARD TO
# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL,
# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER
# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF
# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from __future__ import unicode_literals
import unicodedata
from pyrepl import commands
from pyrepl import input
try:
unicode
except NameError:
unicode = str
unichr = chr
basestring = bytes, str
def _make_unctrl_map():
uc_map = {}
for c in map(unichr, range(256)):
if unicodedata.category(c)[0] != 'C':
uc_map[c] = c
for i in range(32):
c = unichr(i)
uc_map[c] = '^' + unichr(ord('A') + i - 1)
uc_map[b'\t'] = ' ' # display TABs as 4 characters
uc_map[b'\177'] = unicode('^?')
for i in range(256):
c = unichr(i)
if c not in uc_map:
uc_map[c] = unicode('\\%03o') % i
return uc_map
def _my_unctrl(c, u=_make_unctrl_map()):
if c in u:
return u[c]
else:
if unicodedata.category(c).startswith('C'):
return b'\u%04x' % ord(c)
else:
return c
def disp_str(buffer, join=''.join, uc=_my_unctrl):
""" disp_str(buffer:string) -> (string, [int])
Return the string that should be the printed represenation of
|buffer| and a list detailing where the characters of |buffer|
get used up. E.g.:
>>> disp_str(chr(3))
('^C', [1, 0])
the list always contains 0s or 1s at present; it could conceivably
go higher as and when unicode support happens."""
# disp_str proved to be a bottleneck for large inputs,
# so it needs to be rewritten in C; it's not required though.
s = [uc(x) for x in buffer]
b = [] # XXX: bytearray
for x in s:
b.append(1)
b.extend([0] * (len(x) - 1))
return join(s), b
del _my_unctrl
del _make_unctrl_map
# syntax classes:
[SYNTAX_WHITESPACE,
SYNTAX_WORD,
SYNTAX_SYMBOL] = range(3)
def make_default_syntax_table():
# XXX perhaps should use some unicodedata here?
st = {}
for c in map(unichr, range(256)):
st[c] = SYNTAX_SYMBOL
for c in [a for a in map(unichr, range(256)) if a.isalpha()]:
st[c] = SYNTAX_WORD
st[unicode('\n')] = st[unicode(' ')] = SYNTAX_WHITESPACE
return st
default_keymap = tuple(
[(r'\C-a', 'beginning-of-line'),
(r'\C-b', 'left'),
(r'\C-c', 'interrupt'),
(r'\C-d', 'delete'),
(r'\C-e', 'end-of-line'),
(r'\C-f', 'right'),
(r'\C-g', 'cancel'),
(r'\C-h', 'backspace'),
(r'\C-j', 'accept'),
(r'\<return>', 'accept'),
(r'\C-k', 'kill-line'),
(r'\C-l', 'clear-screen'),
(r'\C-m', 'accept'),
(r'\C-q', 'quoted-insert'),
(r'\C-t', 'transpose-characters'),
(r'\C-u', 'unix-line-discard'),
(r'\C-v', 'quoted-insert'),
(r'\C-w', 'unix-word-rubout'),
(r'\C-x\C-u', 'upcase-region'),
(r'\C-y', 'yank'),
(r'\C-z', 'suspend'),
(r'\M-b', 'backward-word'),
(r'\M-c', 'capitalize-word'),
(r'\M-d', 'kill-word'),
(r'\M-f', 'forward-word'),
(r'\M-l', 'downcase-word'),
(r'\M-t', 'transpose-words'),
(r'\M-u', 'upcase-word'),
(r'\M-y', 'yank-pop'),
(r'\M--', 'digit-arg'),
(r'\M-0', 'digit-arg'),
(r'\M-1', 'digit-arg'),
(r'\M-2', 'digit-arg'),
(r'\M-3', 'digit-arg'),
(r'\M-4', 'digit-arg'),
(r'\M-5', 'digit-arg'),
(r'\M-6', 'digit-arg'),
(r'\M-7', 'digit-arg'),
(r'\M-8', 'digit-arg'),
(r'\M-9', 'digit-arg'),
#(r'\M-\n', 'insert-nl'),
('\\\\', 'self-insert')] + \
[(c, 'self-insert')
for c in map(chr, range(32, 127)) if c != '\\'] + \
[(c, 'self-insert')
for c in map(chr, range(128, 256)) if c.isalpha()] + \
[(r'\<up>', 'up'),
(r'\<down>', 'down'),
(r'\<left>', 'left'),
(r'\<right>', 'right'),
(r'\<insert>', 'quoted-insert'),
(r'\<delete>', 'delete'),
(r'\<backspace>', 'backspace'),
(r'\M-\<backspace>', 'backward-kill-word'),
(r'\<end>', 'end-of-line'), # was 'end'
(r'\<home>', 'beginning-of-line'), # was 'home'
(r'\<f1>', 'help'),
(r'\EOF', 'end'), # the entries in the terminfo database for xterms
(r'\EOH', 'home'), # seem to be wrong. this is a less than ideal
# workaround
])
if 'c' in globals(): # only on python 2.x
del c # from the listcomps
class Reader(object):
"""The Reader class implements the bare bones of a command reader,
handling such details as editing and cursor motion. What it does
not support are such things as completion or history support -
these are implemented elsewhere.
Instance variables of note include:
* buffer:
A *list* (*not* a string at the moment :-) containing all the
characters that have been entered.
* console:
Hopefully encapsulates the OS dependent stuff.
* pos:
A 0-based index into `buffer' for where the insertion point
is.
* screeninfo:
Ahem. This list contains some info needed to move the
insertion point around reasonably efficiently. I'd like to
get rid of it, because its contents are obtuse (to put it
mildly) but I haven't worked out if that is possible yet.
* cxy, lxy:
the position of the insertion point in screen ... XXX
* syntax_table:
Dictionary mapping characters to `syntax class'; read the
emacs docs to see what this means :-)
* commands:
Dictionary mapping command names to command classes.
* arg:
The emacs-style prefix argument. It will be None if no such
argument has been provided.
* dirty:
True if we need to refresh the display.
* kill_ring:
The emacs-style kill-ring; manipulated with yank & yank-pop
* ps1, ps2, ps3, ps4:
prompts. ps1 is the prompt for a one-line input; for a
multiline input it looks like:
ps2> first line of input goes here
ps3> second and further
ps3> lines get ps3
...
ps4> and the last one gets ps4
As with the usual top-level, you can set these to instances if
you like; str() will be called on them (once) at the beginning
of each command. Don't put really long or newline containing
strings here, please!
This is just the default policy; you can change it freely by
overriding get_prompt() (and indeed some standard subclasses
do).
* finished:
handle1 will set this to a true value if a command signals
that we're done.
"""
help_text = """\
This is pyrepl. Hear my roar.
Helpful text may appear here at some point in the future when I'm
feeling more loquacious than I am now."""
msg_at_bottom = True
def __init__(self, console):
self.buffer = []
self.ps1 = "->> "
self.ps2 = "/>> "
self.ps3 = "|.. "
self.ps4 = "\__ "
self.kill_ring = []
self.arg = None
self.finished = 0
self.console = console
self.commands = {}
self.msg = ''
for v in vars(commands).values():
if (isinstance(v, type)
and issubclass(v, commands.Command)
and v.__name__[0].islower()):
self.commands[v.__name__] = v
self.commands[v.__name__.replace('_', '-')] = v
self.syntax_table = make_default_syntax_table()
self.input_trans_stack = []
self.keymap = self.collect_keymap()
self.input_trans = input.KeymapTranslator(
self.keymap,
invalid_cls='invalid-key',
character_cls='self-insert')
def collect_keymap(self):
return default_keymap
def calc_screen(self):
"""The purpose of this method is to translate changes in
self.buffer into changes in self.screen. Currently it rips
everything down and starts from scratch, which whilst not
especially efficient is certainly simple(r).
"""
lines = self.get_unicode().split("\n")
screen = []
screeninfo = []
w = self.console.width - 1
p = self.pos
for ln, line in zip(range(len(lines)), lines):
ll = len(line)
if 0 <= p <= ll:
if self.msg and not self.msg_at_bottom:
for mline in self.msg.split("\n"):
screen.append(mline)
screeninfo.append((0, []))
self.lxy = p, ln
prompt = self.get_prompt(ln, ll >= p >= 0)
while '\n' in prompt:
pre_prompt, _, prompt = prompt.partition('\n')
screen.append(pre_prompt)
screeninfo.append((0, []))
p -= ll + 1
prompt, lp = self.process_prompt(prompt)
l, l2 = disp_str(line)
wrapcount = (len(l) + lp) // w
if wrapcount == 0:
screen.append(prompt + l)
screeninfo.append((lp, l2 + [1]))
else:
screen.append(prompt + l[:w - lp] + "\\")
screeninfo.append((lp, l2[:w - lp]))
for i in range(-lp + w, -lp + wrapcount * w, w):
screen.append(l[i:i + w] + "\\")
screeninfo.append((0, l2[i:i + w]))
screen.append(l[wrapcount * w - lp:])
screeninfo.append((0, l2[wrapcount * w - lp:] + [1]))
self.screeninfo = screeninfo
self.cxy = self.pos2xy(self.pos)
if self.msg and self.msg_at_bottom:
for mline in self.msg.split("\n"):
screen.append(mline)
screeninfo.append((0, []))
return screen
def process_prompt(self, prompt):
""" Process the prompt.
This means calculate the length of the prompt. The character \x01
and \x02 are used to bracket ANSI control sequences and need to be
excluded from the length calculation. So also a copy of the prompt
is returned with these control characters removed. """
out_prompt = ''
l = len(prompt)
pos = 0
while True:
s = prompt.find('\x01', pos)
if s == -1:
break
e = prompt.find('\x02', s)
if e == -1:
break
# Found start and end brackets, subtract from string length
l = l - (e - s + 1)
out_prompt += prompt[pos:s] + prompt[s + 1:e]
pos = e + 1
out_prompt += prompt[pos:]
return out_prompt, l
def bow(self, p=None):
"""Return the 0-based index of the word break preceding p most
immediately.
p defaults to self.pos; word boundaries are determined using
self.syntax_table."""
if p is None:
p = self.pos
st = self.syntax_table
b = self.buffer
p -= 1
while p >= 0 and st.get(b[p], SYNTAX_WORD) != SYNTAX_WORD:
p -= 1
while p >= 0 and st.get(b[p], SYNTAX_WORD) == SYNTAX_WORD:
p -= 1
return p + 1
def eow(self, p=None):
"""Return the 0-based index of the word break following p most
immediately.
p defaults to self.pos; word boundaries are determined using
self.syntax_table."""
if p is None:
p = self.pos
st = self.syntax_table
b = self.buffer
while p < len(b) and st.get(b[p], SYNTAX_WORD) != SYNTAX_WORD:
p += 1
while p < len(b) and st.get(b[p], SYNTAX_WORD) == SYNTAX_WORD:
p += 1
return p
def bol(self, p=None):
"""Return the 0-based index of the line break preceding p most
immediately.
p defaults to self.pos."""
# XXX there are problems here.
if p is None:
p = self.pos
b = self.buffer
p -= 1
while p >= 0 and b[p] != '\n':
p -= 1
return p + 1
def eol(self, p=None):
"""Return the 0-based index of the line break following p most
immediately.
p defaults to self.pos."""
if p is None:
p = self.pos
b = self.buffer
while p < len(b) and b[p] != '\n':
p += 1
return p
def get_arg(self, default=1):
"""Return any prefix argument that the user has supplied,
returning `default' if there is None. `default' defaults
(groan) to 1."""
if self.arg is None:
return default
else:
return self.arg
def get_prompt(self, lineno, cursor_on_line):
"""Return what should be in the left-hand margin for line
`lineno'."""
if self.arg is not None and cursor_on_line:
return "(arg: %s) " % self.arg
if "\n" in self.buffer:
if lineno == 0:
res = self.ps2
elif lineno == self.buffer.count("\n"):
res = self.ps4
else:
res = self.ps3
else:
res = self.ps1
# Lazily call str() on self.psN, and cache the results using as key
# the object on which str() was called. This ensures that even if the
# same object is used e.g. for ps1 and ps2, str() is called only once.
if res not in self._pscache:
self._pscache[res] = str(res)
return self._pscache[res]
def push_input_trans(self, itrans):
self.input_trans_stack.append(self.input_trans)
self.input_trans = itrans
def pop_input_trans(self):
self.input_trans = self.input_trans_stack.pop()
def pos2xy(self, pos):
"""Return the x, y coordinates of position 'pos'."""
# this *is* incomprehensible, yes.
y = 0
assert 0 <= pos <= len(self.buffer)
if pos == len(self.buffer):
y = len(self.screeninfo) - 1
p, l2 = self.screeninfo[y]
return p + len(l2) - 1, y
else:
for p, l2 in self.screeninfo:
l = l2.count(1)
if l > pos:
break
else:
pos -= l
y += 1
c = 0
i = 0
while c < pos:
c += l2[i]
i += 1
while l2[i] == 0:
i += 1
return p + i, y
def insert(self, text):
"""Insert 'text' at the insertion point."""
self.buffer[self.pos:self.pos] = list(text)
self.pos += len(text)
self.dirty = 1
def update_cursor(self):
"""Move the cursor to reflect changes in self.pos"""
self.cxy = self.pos2xy(self.pos)
self.console.move_cursor(*self.cxy)
def after_command(self, cmd):
"""This function is called to allow post command cleanup."""
if getattr(cmd, "kills_digit_arg", 1):
if self.arg is not None:
self.dirty = 1
self.arg = None
def prepare(self):
"""Get ready to run. Call restore when finished. You must not
write to the console in between the calls to prepare and
restore."""
try:
self.console.prepare()
self.arg = None
self.screeninfo = []
self.finished = 0
del self.buffer[:]
self.pos = 0
self.dirty = 1
self.last_command = None
self._pscache = {}
except:
self.restore()
raise
def last_command_is(self, klass):
if not self.last_command:
return 0
return issubclass(klass, self.last_command)
def restore(self):
"""Clean up after a run."""
self.console.restore()
def finish(self):
"""Called when a command signals that we're finished."""
pass
def error(self, msg="none"):
self.msg = "! " + msg + " "
self.dirty = 1
self.console.beep()
def update_screen(self):
if self.dirty:
self.refresh()
def refresh(self):
"""Recalculate and refresh the screen."""
# this call sets up self.cxy, so call it first.
screen = self.calc_screen()
self.console.refresh(screen, self.cxy)
self.dirty = 0 # forgot this for a while (blush)
def do_cmd(self, cmd):
#print cmd
if isinstance(cmd[0], basestring):
#XXX: unify to text
cmd = self.commands.get(cmd[0],
commands.invalid_command)(self, *cmd)
elif isinstance(cmd[0], type):
cmd = cmd[0](self, cmd)
else:
return # nothing to do
cmd.do()
self.after_command(cmd)
if self.dirty:
self.refresh()
else:
self.update_cursor()
if not isinstance(cmd, commands.digit_arg):
self.last_command = cmd.__class__
self.finished = cmd.finish
if self.finished:
self.console.finish()
self.finish()
def handle1(self, block=1):
"""Handle a single event. Wait as long as it takes if block
is true (the default), otherwise return None if no event is
pending."""
if self.msg:
self.msg = ''
self.dirty = 1
while 1:
event = self.console.get_event(block)
if not event: # can only happen if we're not blocking
return None
translate = True
if event.evt == 'key':
self.input_trans.push(event)
elif event.evt == 'scroll':
self.refresh()
elif event.evt == 'resize':
self.refresh()
else:
translate = False
if translate:
cmd = self.input_trans.get()
else:
cmd = event.evt, event.data
if cmd is None:
if block:
continue
else:
return None
self.do_cmd(cmd)
return 1
def push_char(self, char):
self.console.push_char(char)
self.handle1(0)
def readline(self, returns_unicode=False, startup_hook=None):
"""Read a line. The implementation of this method also shows
how to drive Reader if you want more control over the event
loop."""
self.prepare()
try:
if startup_hook is not None:
startup_hook()
self.refresh()
while not self.finished:
self.handle1()
if returns_unicode:
return self.get_unicode()
return self.get_buffer()
finally:
self.restore()
def bind(self, spec, command):
self.keymap = self.keymap + ((spec, command),)
self.input_trans = input.KeymapTranslator(
self.keymap,
invalid_cls='invalid-key',
character_cls='self-insert')
def get_buffer(self, encoding=None):
if encoding is None:
encoding = self.console.encoding
return unicode('').join(self.buffer).encode(self.console.encoding)
def get_unicode(self):
"""Return the current buffer as a unicode string."""
return unicode('').join(self.buffer)
def test():
from pyrepl.unix_console import UnixConsole
reader = Reader(UnixConsole())
reader.ps1 = "**> "
reader.ps2 = "/*> "
reader.ps3 = "|*> "
reader.ps4 = "\*> "
while reader.readline():
pass
if __name__ == '__main__':
test()
|
mit
| 4,925,212,020,770,701,000
| 31.19279
| 78
| 0.528263
| false
| 3.775551
| false
| false
| false
|
mmisiewicz/slask
|
limbo/plugins/pager.py
|
1
|
3469
|
import re
import urllib2
import json
def pager_response(text):
"""!oncall|!pager (add "link" for url to pager rotation page)"""
match = re.match('!oncall|!pager', text, re.IGNORECASE)
if not match:
return False
if "link" in match.string:
return "https://corpwiki.appnexus.com/x/xxsaAQ"
return maestro_pager_response() or "Not Found"
# r = requests.get(URL, auth=(username, password), verify=False)
# soup = BeautifulSoup(r.text)
# tables = soup.find_all('table', 'confluenceTable')
# table_call = tables[0].find_all('td')
# list_call = [i.text for i in table_call]
# reg = re.compile("(\d+)\D+(\d+)\D+(\d+)\D+(\d+)")
# def time_range(t):
# month = datetime.now().month
# day = datetime.now().day
# return (int(t[0]) < month <=int(t[2]) and int(t[3]) >= day) \
# or (int(t[0]) <= month < int(t[2]) and int(t[1]) <= day) \
# or (int(t[0]) <= month <= int(t[2]) and (int(t[3]) >= day >= int(t[1])))
#
# response = None
# for i in range(0, len(list_call), 3):
# match = reg.match(list_call[i])
# if time_range(match.groups()):
# response = "Primary: {}, Secondary: {}".format(list_call[i+1], list_call[i+2])
# return response or "Not Found"
# maestro pager code borrowed from data-bot.
def __join_oncall_info(user_infos):
""" does the joining across the rseponse from maestro3's usergroup map service
and the timeperiods service, returning a tuple3 of (username, timeperiod_name, hours)
where hours are on call for day_of_week. If hours is null or the user is deleted
an entry is not returned day_of_week is expected to be lower case"""
results = []
for user_info in user_infos:
results.append(user_info['username'])
# if not user_info['deleted']:
# # XXX: ignoring out of bounds for now
# period = periods[user_info['nagios_timeperiod_id']]
# on_call_timerange = period[day_of_week]
# if on_call_timerange:
# results.append((user_info['username'], period['timeperiod_name'], on_call_timerange))
return results
# def __get_timeperiods_dict():
# timeperiods_resp = urllib2.urlopen('http://maestro3-api.adnxs.net/nagios-timeperiod').read()
# periods = {}
# for period in json.loads(timeperiods_resp)['response']['nagios_timeperiods']:
# periods[period['id']] = period
# return periods
def maestro_pager_response():
# periods = __get_timeperiods_dict()
# day_of_week = datetime.now().strftime("%A").lower()
on_pager_resp = urllib2.urlopen('http://maestro3-api.adnxs.net/nagios-usergroup-map?nagios_usergroup_id=20&pager=1').read()
on_pagers = __join_oncall_info(json.loads(on_pager_resp)['response']['nagios_usergroup_maps'])
on_escalation_resp = urllib2.urlopen('http://maestro3-api.adnxs.net/nagios-usergroup-map?nagios_usergroup_id=20&escalation=1').read()
on_escalations = __join_oncall_info(json.loads(on_escalation_resp)['response']['nagios_usergroup_maps'])
on_pager_section = ','.join([' %s' % on_pager for on_pager in on_pagers])
on_escalation_section = ','.join([' %s' % on_escalation for on_escalation in on_escalations])
reply = '```Primary:%s\nSecondary:%s```' % (on_pager_section, on_escalation_section)
return reply
def on_message(msg, server):
text = msg.get("text", "")
return pager_response(text)
|
mit
| -5,478,028,948,386,106,000
| 42.911392
| 137
| 0.62237
| false
| 3.153636
| false
| false
| false
|
mathiasertl/django-xmpp-server-list
|
xmpplist/settings.py
|
1
|
8688
|
# This file is part of django-xmpp-server-list
# (https://github.com/mathiasertl/django-xmpp-server-list).
#
# django-xmpp-server-list is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# xmppllist is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with django-xmpp-server-list. If not, see <http://www.gnu.org/licenses/>.
"""Django settings for django-xmpp-server-list project."""
import os
from datetime import timedelta
from celery.schedules import crontab
from django.contrib.messages import constants as messages
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
DEBUG = False
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
#'ENGINE': 'django.db.backends.mysql', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3'
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'xmpplist.sqlite3', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
'ATOMIC_REQUESTS': True,
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# URL prefix for admin static files -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
ADMIN_MEDIA_PREFIX = '/static/admin/'
# Additional locations of static files
STATICFILES_DIRS = [
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
]
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
]
# Make this unique, and don't share it with anybody.
SECRET_KEY = ''
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.locale.LocaleMiddleware'
]
ROOT_URLCONF = 'xmpplist.urls'
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
# style
'bootstrapform',
'core',
'server',
'account',
'api',
'confirm',
)
if DEBUG:
LOG_LEVEL = 'DEBUG'
else:
LOG_LEVEL = 'ERROR'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
LOGIN_URL = '/user/login/'
LOGIN_REDIRECT_URL = '/user/'
AUTH_USER_MODEL = 'account.LocalUser'
DEFAULT_FROM_EMAIL = 'test@example.com'
INTERNAL_IPS = ('127.0.0.1')
USE_HTTPS = False
USE_IP4 = True
USE_IP6 = True
GEOIP_CONFIG_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'geoip'))
CONFIRMATION_TIMEOUT = timedelta(hours=48)
CERTIFICATES_PATH = 'static/certs'
LOGOUT_REDIRECT_URL = 'home' # only used when next queryparam is not set
# Message tags updated to match bootstrap alert classes
MESSAGE_TAGS = {
messages.ERROR: 'danger',
}
##########
# Celery #
##########
CELERY_BEAT_SCHEDULE = {
'refresh geoip': {
'task': 'core.tasks.refresh_geoip_database',
'schedule': crontab(hour=3, minute=0, day_of_week=1),
},
'verify servers': {
'task': 'server.tasks.verify_servers',
'schedule': crontab(hour=3, minute=10),
},
'remove old servers': {
'task': 'server.tasks.remove_old_servers',
'schedule': crontab(hour=3, minute=5),
},
'moderation mails': {
'task': 'server.tasks.moderation_mails',
'schedule': crontab(hour=8, minute=0),
},
}
try:
from .localsettings import * # NOQA
except ImportError:
pass
GEOIP_COUNTRY_DB = os.path.join(GEOIP_CONFIG_ROOT, 'GeoLite2-Country.mmdb')
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s',
},
'simple': {
'format': '%(levelname)-8s %(message)s',
},
},
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse',
},
},
'handlers': {
'mail_admins': {
'level': LOG_LEVEL,
'class': 'django.utils.log.AdminEmailHandler',
'formatter': 'simple',
'filters': ['require_debug_false'],
},
'console': {
'level': LOG_LEVEL,
'class': 'logging.StreamHandler',
'formatter': 'simple',
},
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': False,
},
'server': {
'handlers': ['console'],
'level': LOG_LEVEL,
'propagate': False,
},
'sleekxmpp': {
'handlers': ['console'],
'level': 'CRITICAL',
'propagate': False,
},
'xmpp': {
'handlers': ['console'],
'level': LOG_LEVEL,
'propagate': False,
},
},
'root': {
'handlers': ['console'],
'level': 'INFO',
}
}
|
gpl-3.0
| 222,943,038,920,344,300
| 30.478261
| 108
| 0.637776
| false
| 3.735168
| false
| false
| false
|
muharif/vpp
|
vpp-api/java/jvpp/gen/jvpp_gen.py
|
1
|
5128
|
#!/usr/bin/env python
#
# Copyright (c) 2016 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
# l
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import importlib
import sys
import callback_gen
import notification_gen
import dto_gen
import jvpp_callback_facade_gen
import jvpp_future_facade_gen
import jvpp_impl_gen
import jvpp_c_gen
import util
# Invocation:
# ~/Projects/vpp/vpp-api/jvpp/gen$ mkdir -p java/org/openvpp/jvpp && cd java/org/openvpp/jvpp
# ~/Projects/vpp/vpp-api/jvpp/gen/java/org/openvpp/jvpp$ ../../../../jvpp_gen.py -idefs_api_vpp_papi.py
#
# Compilation:
# ~/Projects/vpp/vpp-api/jvpp/gen/java/org/openvpp/jvpp$ javac *.java dto/*.java callback/*.java
#
# where
# defs_api_vpp_papi.py - vpe.api in python format (generated by vppapigen)
from util import vpp_2_jni_type_mapping
parser = argparse.ArgumentParser(description='VPP Java API generator')
parser.add_argument('-i', action="store", dest="inputfile")
args = parser.parse_args()
sys.path.append(".")
inputfile = args.inputfile.replace('.py', '')
cfg = importlib.import_module(inputfile, package=None)
# FIXME: functions unsupported due to problems with vpe.api
def is_supported(f_name):
return f_name not in {'vnet_ip4_fib_counters', 'vnet_ip6_fib_counters'}
def is_request_field(field_name):
return field_name not in {'_vl_msg_id', 'client_index', 'context'}
def is_response_field(field_name):
return field_name not in {'_vl_msg_id'}
def get_args(t, filter):
arg_list = []
for i in t:
if not filter(i[1]):
continue
arg_list.append(i[1])
return arg_list
def get_types(t, filter):
types_list = []
c_types_list = []
lengths_list = []
for i in t:
if not filter(i[1]):
continue
if len(i) is 3: # array type
types_list.append(vpp_2_jni_type_mapping[i[0]] + 'Array')
c_types_list.append(i[0] + '[]')
lengths_list.append((i[2], False))
elif len(i) is 4: # variable length array type
types_list.append(vpp_2_jni_type_mapping[i[0]] + 'Array')
c_types_list.append(i[0] + '[]')
lengths_list.append((i[3], True))
else: # primitive type
types_list.append(vpp_2_jni_type_mapping[i[0]])
c_types_list.append(i[0])
lengths_list.append((0, False))
return types_list, c_types_list, lengths_list
def get_definitions():
# Pass 1
func_list = []
func_name = {}
for a in cfg.vppapidef:
if not is_supported(a[0]):
continue
java_name = util.underscore_to_camelcase(a[0])
# For replies include all the arguments except message_id
if util.is_reply(java_name):
types, c_types, lengths = get_types(a[1:], is_response_field)
func_name[a[0]] = dict(
[('name', a[0]), ('java_name', java_name),
('args', get_args(a[1:], is_response_field)), ('full_args', get_args(a[1:], lambda x: True)),
('types', types), ('c_types', c_types), ('lengths', lengths)])
# For requests skip message_id, client_id and context
else:
types, c_types, lengths = get_types(a[1:], is_request_field)
func_name[a[0]] = dict(
[('name', a[0]), ('java_name', java_name),
('args', get_args(a[1:], is_request_field)), ('full_args', get_args(a[1:], lambda x: True)),
('types', types), ('c_types', c_types), ('lengths', lengths)])
# Indexed by name
func_list.append(func_name[a[0]])
return func_list, func_name
func_list, func_name = get_definitions()
base_package = 'org.openvpp.jvpp'
dto_package = 'dto'
callback_package = 'callback'
notification_package = 'notification'
future_package = 'future'
# TODO find better package name
callback_facade_package = 'callfacade'
dto_gen.generate_dtos(func_list, base_package, dto_package, args.inputfile)
jvpp_impl_gen.generate_jvpp(func_list, base_package, dto_package, args.inputfile)
callback_gen.generate_callbacks(func_list, base_package, callback_package, dto_package, args.inputfile)
notification_gen.generate_notification_registry(func_list, base_package, notification_package, callback_package, dto_package, args.inputfile)
jvpp_c_gen.generate_jvpp(func_list, args.inputfile)
jvpp_future_facade_gen.generate_jvpp(func_list, base_package, dto_package, callback_package, notification_package, future_package, args.inputfile)
jvpp_callback_facade_gen.generate_jvpp(func_list, base_package, dto_package, callback_package, notification_package, callback_facade_package, args.inputfile)
|
apache-2.0
| 5,746,196,769,455,145,000
| 35.368794
| 157
| 0.656786
| false
| 3.165432
| false
| false
| false
|
ferdyrod/basic-ecommerce
|
ecommerce/settings/local.py
|
1
|
2918
|
# Django settings for ecommerce project.
from os.path import dirname, abspath, join
PROJECT_ROOT = dirname(dirname(dirname(abspath(__file__))))
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'ecommerce.sqlite', # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': '',
'PASSWORD': '',
'HOST': '', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '', # Set to empty string for default.
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = join(PROJECT_ROOT, 'static', 'media')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = '/media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = join(PROJECT_ROOT, 'static', 'static-only')
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
join(PROJECT_ROOT, 'static', 'static'),
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
TEMPLATE_DIRS = (
join(PROJECT_ROOT, 'static', 'templates')
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'django.contrib.admindocs',
'south',
'registration',
'products',
'contact',
'cart',
'profiles',
'orders',
)
ACCOUNT_ACTIVATION_DAYS = 7
AUTH_PROFILE_MODULE = 'profiles.profile'
EMAIL_HOST = 'stmp.gmail.com'
EMAIL_HOST_USER = 'Your_Email_Here'
EMAIL_HOST_PASSWORD = 'Your_Password_Here'
EMAIL_USE_TLS = True
|
apache-2.0
| 275,103,391,602,662,940
| 31.422222
| 127
| 0.665182
| false
| 3.602469
| false
| false
| false
|
google/iree-llvm-sandbox
|
runners/test/python/experts.py
|
1
|
3643
|
#
# import time
from typing import List
from search import *
from transforms import *
class Assignments:
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
class Expert:
def __init__(self, **asignments):
self.assignments = Assignments(**asignments)
def _pre_transform(self, module, boilerplate_code):
# TODO: Allow cloning functions from one module to another.
# Atm we have to resort to string concatenation.
module = Module.parse(
str(module.operation.regions[0].blocks[0].operations[0].operation) +
boilerplate_code)
return module
def __call__(self, module, boilerplate_code):
module = self._pre_transform(module, boilerplate_code)
for transform in self.transforms():
transform(module, 'matmul_on_tensors')
return module
def transforms(self) -> List[Transform]:
'Abstract method that returns a list of transforms for given expert.'
class ExpertCompiler1(Expert):
variables = {
'sizes1': TilingSizesVariable,
'sizes2': TilingSizesVariable,
'sizes3': TilingSizesVariable,
'pad': BoolVariable,
'hoist_padding': HoistPaddingVariable,
}
def transforms(self) -> List[Transform]:
v = self.assignments
return [
TileAndPad('matmul_on_tensors', 'linalg.matmul', v.sizes1),
TileAndPad('matmul_on_tensors', 'linalg.matmul', v.sizes2),
TileAndPad(
'matmul_on_tensors',
'linalg.matmul',
v.sizes3,
pad=v.pad,
hoist_padding=v.hoist_padding),
Vectorize('matmul_on_tensors', 'linalg.matmul'),
Bufferize(),
LowerToLLVM(),
]
class ExpertCompiler2(Expert):
variables = {
'sizes1': TilingSizesVariable,
'sizes2': TilingSizesVariable,
'sizes3': TilingSizesVariable,
}
def transforms(self) -> List[Transform]:
v = self.assignments
return [
Fuse('matmul_on_tensors', 'linalg.matmul', v.sizes1),
Fuse('matmul_on_tensors', 'linalg.matmul', v.sizes2),
TileAndPad('matmul_on_tensors', 'linalg.matmul', v.sizes3),
Vectorize('matmul_on_tensors', 'linalg.matmul'),
Vectorize('matmul_on_tensors', 'linalg.fill'),
Bufferize(),
LowerToLLVM(),
]
class ExpertCompiler3(Expert):
variables = {
'sizes1': TilingSizesVariable,
'sizes2': TilingSizesVariable,
'sizes3': TilingSizesVariable,
'pad': BoolVariable,
'hoist_padding': HoistPaddingVariable,
}
def transforms(self) -> List[Transform]:
v = self.assignments
return [
Fuse('matmul_on_tensors', 'linalg.matmul', v.sizes1),
TileAndPad(
'matmul_on_tensors',
'linalg.matmul',
v.sizes2,
pad=v.pad,
hoist_padding=v.hoist_padding),
Vectorize('matmul_on_tensors', 'linalg.matmul'),
TileAndPad('matmul_on_tensors', 'linalg.fill', v.sizes3),
Vectorize('matmul_on_tensors', 'linalg.fill'),
Bufferize(),
LowerToLLVM(),
]
class ExpertSparseCompiler(Expert):
variables = {'options': str}
def transforms(self) -> List[Transform]:
v = self.assignments
self.options = v.options
return [
Sparsify(v.options),
]
expert_compilerr_1 = ExpertCompiler1(
sizes1=[256, 256, 256],
sizes2=[64, 64, 64],
sizes3=[8, 16, 32],
pad=True,
hoist_padding=2)
expert_compilerr_2 = ExpertCompiler2(
sizes1=[256, 256], sizes2=[8, 16], sizes3=[0, 0, 32])
expert_compilerr_3 = ExpertCompiler3(
sizes1=[256, 256],
sizes2=[8, 16, 32],
sizes3=[8, 32],
pad=True,
hoist_padding=3)
|
apache-2.0
| 5,863,396,034,825,331,000
| 25.208633
| 76
| 0.622289
| false
| 3.513018
| false
| false
| false
|
ubvu/orcid-monitor
|
orcid-usage/analyze.py
|
1
|
6196
|
import codecs
import os
import sys
from multiprocessing import Process, Queue
from lxml import etree
import tablib
COLNAME_MODIFIED_DATE = 'last modified date'
COLNAME_CREATION_DATE = 'creation date'
COLNAME_KEYWORDS = 'keywords'
COLNAME_EMAIL = 'email'
COLNAME_WORKS = 'works'
COLNAME_FUNDING = 'funding'
COLNAME_AFFILIATIONS = 'affiliations'
COLNAME_OTHER_NAMES = 'other-names'
COLNAME_CREDIT_NAME = 'credit-name'
COLNAME_FAMILY_NAME = 'family-name'
COLNAME_ORCID = 'orcid'
COLNAME_GIVEN_NAMES = 'given-names'
COLUMN_INTERNAL = 'Internal (by disam. source id)'
nsmap = {
'x': 'http://www.orcid.org/ns/orcid'
}
def save_to_file(persons, dest):
column_names = [
COLNAME_ORCID,
COLNAME_GIVEN_NAMES,
COLNAME_FAMILY_NAME,
COLNAME_CREDIT_NAME,
COLNAME_OTHER_NAMES,
COLNAME_AFFILIATIONS,
COLUMN_INTERNAL,
COLNAME_FUNDING,
COLNAME_WORKS,
COLNAME_EMAIL,
COLNAME_KEYWORDS,
COLNAME_CREATION_DATE,
COLNAME_MODIFIED_DATE,
]
# Add column names for (initially unknown) external identifiers
all_col_names = {x for person in persons for x in person.keys()}
ext_id_col_names = {x for x in all_col_names if x not in column_names}
column_names.extend(ext_id_col_names)
dataset = tablib.Dataset(column_names, title='ORCID analyse')
for person in persons:
person_data = map(lambda x: person.get(x, ''), column_names)
dataset.append(person_data)
file_path = os.path.join(os.getcwd(), 'data', dest + '.csv')
with open(file_path, 'wb') as f:
f.write(dataset.csv)
with open('organization_ids.txt') as f:
internal_org_ids = {tuple(line.rstrip('\r\n').split(',')) for line in f}
def parse_person(filehandle):
person = {}
root = etree.parse(filehandle).getroot()
person[COLNAME_ORCID] = root.xpath('//x:orcid-identifier/x:path/text()', namespaces=nsmap)[0]
print person[COLNAME_ORCID]
#print sys.getsizeof(root)
person[COLNAME_AFFILIATIONS] = len(root.xpath('//x:affiliation[x:type[text()=\'employment\']]', namespaces=nsmap))
person[COLNAME_FUNDING] = len(root.xpath('//x:funding', namespaces=nsmap))
person[COLNAME_WORKS] = len(root.xpath('//x:orcid-works/x:orcid-work', namespaces=nsmap))
given_name_elems = root.xpath('//x:personal-details/x:given-names/text()', namespaces=nsmap)
if len(given_name_elems) > 0:
person[COLNAME_GIVEN_NAMES] = given_name_elems[0]
person[COLNAME_OTHER_NAMES] = len(root.xpath('//x:personal-details/x:other-names/x:other-name', namespaces=nsmap))
family_name_elems = root.xpath('//x:personal-details/x:family-name/text()', namespaces=nsmap)
if len(family_name_elems) > 0:
person[COLNAME_FAMILY_NAME] = family_name_elems[0]
credit_name_elems = root.xpath('//x:personal-details/x:credit-name/text()', namespaces=nsmap)
if len(credit_name_elems) > 0:
person[COLNAME_CREDIT_NAME] = credit_name_elems[0]
email_elems = root.xpath('//x:contact-details/x:email/text()', namespaces=nsmap)
if len(email_elems) > 0:
person[COLNAME_EMAIL] = email_elems[0]
keywords_elems = root.xpath('//x:keywords/x:keyword', namespaces=nsmap)
person[COLNAME_KEYWORDS] = 'No' if len(keywords_elems) == 0 else 'Yes'
person[COLNAME_CREATION_DATE] = root.xpath('//x:submission-date/text()', namespaces=nsmap)[0][:10]
person[COLNAME_MODIFIED_DATE] = root.xpath('//x:last-modified-date/text()', namespaces=nsmap)[0][:10]
for ext_id_node in root.xpath('//x:external-identifier', namespaces=nsmap):
source = ext_id_node.find('x:external-id-common-name', nsmap).text
reference = ext_id_node.find('x:external-id-reference', nsmap).text
person[source] = reference
employment_affiliations = root.xpath('//x:affiliation[x:type[text()=\'employment\']]', namespaces=nsmap)
person[COLNAME_AFFILIATIONS] = len(employment_affiliations)
person[COLUMN_INTERNAL] = 'N'
# find the source without an enddate
curr_affls = 0
for affiliation in employment_affiliations:
disam_org_identifier = affiliation.xpath(
'.//x:disambiguated-organization/x:disambiguated-organization-identifier', namespaces=nsmap)
disam_org_source = affiliation.xpath('.//x:disambiguated-organization/x:disambiguation-source',
namespaces=nsmap)
org_name = affiliation.xpath('.//x:organization/x:name/text()', namespaces=nsmap)[0]
org_name = org_name.lower()
end_date = affiliation.xpath('.//x:end-date', namespaces=nsmap)
end_year = affiliation.xpath('.//x:end-date/x:year/text()', namespaces=nsmap)
if len(end_date) == 0:
colname = 'affl' + str(curr_affls)
if org_name.find('amsterdam') > -1 or org_name.find('vu') > -1 or org_name.find('free') > -1 or org_name.find('vrije') > -1:
person[colname] = org_name
curr_affls = curr_affls + 1
# check for RINNGOLD ID and strings VU University or Vrije Universiteit
if len(end_date) == 0: # current employer
print org_name
if disam_org_identifier and disam_org_source:
if (disam_org_source[0].text, disam_org_identifier[0].text) in internal_org_ids:
person[COLUMN_INTERNAL] = 'Y'
if (org_name.find('vu university') > -1 and org_name.find('vu university medical center')==-1) or org_name.find('vrije universiteit amsterdam') > -1 or org_name.find('free university amsterdam') > -1:
print '****YES****'
person[COLUMN_INTERNAL] = 'Y'
return person
if __name__ == '__main__':
try:
path = sys.argv[1]
except:
path = '0217'
source = os.path.join(os.getcwd(), 'data', 'downloads', path)
persons = []
for fn in os.listdir(source):
f = codecs.open(os.path.join(source, fn), 'r', 'utf-8')
# with open(os.path.join(source, fn), 'r') as f:
# result = executor.submit(persons.append(parse_person(f)), *args, **kwargs).result()
persons.append(parse_person(f))
f.close
save_to_file(persons, path)
|
mit
| 147,770,138,063,816,770
| 40.583893
| 212
| 0.643157
| false
| 3.187243
| false
| false
| false
|
szendrei/django-unleashed
|
codesnippets/snippets/models.py
|
1
|
1482
|
from django.db import models
from pygments.lexers import get_all_lexers
from django.core.urlresolvers import reverse
LEXERS = [item for item in get_all_lexers() if item[1]]
LANGUAGE_CHOICES = sorted([(item[1][0], item[0]) for item in LEXERS])
class Snippet(models.Model):
create_date = models.DateTimeField('created date', auto_now_add=True)
title = models.CharField(max_length=63, blank=True, default='')
code = models.TextField()
language = models.CharField(choices=LANGUAGE_CHOICES, default='python',
max_length=100)
author = models.CharField(max_length=32, blank=True, default='Anonymus')
slug = models.CharField(max_length=63, unique=True, blank=True, default='')
class Meta:
ordering = ['-create_date']
def __str__(self):
if len(self.title):
return self.title
else:
return "{} code created at {}".format(self.language,
self.create_date)
def get_absolute_url(self):
return reverse('snippets_snippet_detail',kwargs={'slug':self.slug})
def get_category_url(self):
return reverse('snippets_snippet_category_list',
kwargs={'language':self.language})
def get_update_url(self):
return reverse('snippets_snippet_update',kwargs={'slug':self.slug})
def get_delete_url(self):
return reverse('snippets_snippet_delete',kwargs={'slug':self.slug})
|
gpl-3.0
| -2,873,390,856,656,632,000
| 37
| 79
| 0.62888
| false
| 4.005405
| false
| false
| false
|
enen92/script.sportscenter
|
resources/lib/calendar.py
|
1
|
14046
|
import xbmc,xbmcgui,xbmcaddon,xbmcplugin
import urllib
import thesportsdb
import datetime
import os
import re
import threading
from random import randint
from centerutils.common_variables import *
from centerutils.datemanipulation import *
import competlist as competlist
import teamview as teamview
import contextmenubuilder
import tweetbuild
def start(data_list):
window = dialog_calendar('DialogCalendar.xml',addonpath,'Default',str(data_list))
window.doModal()
class dialog_calendar(xbmcgui.WindowXML):
def __init__( self, *args, **kwargs ):
xbmcgui.WindowXML.__init__(self)
self.date_string = eval(args[3])
def onInit(self):
self.getControl(911).setImage(addon_fanart)
self.getControl(333).setLabel('Calendar View')
self.ignored_leagues = os.listdir(ignoredleaguesfolder)
self.rmleaguescalendar = os.listdir(ignoreleaguecalendar)
#Change background if custom is defined
if settings.getSetting('calendar-background-type') == '1' and settings.getSetting('calendar-background-custom') != '':
self.getControl(912).setImage(settings.getSetting('calendar-background-custom'))
#Populate week days
menu = []
#grab datetime now and transform into a timezone object based on user timezone
date_now = datetime.datetime.now()
date_now_mytz = pytz.timezone(str(pytz.timezone(str(my_location)))).localize(date_now)
#convert datetime timezone object to the timezone of the database
date_now_tsdb = date_now_mytz.astimezone(my_location)
menu.append(('Today, %s' % (date_now_mytz.day),'%s-%s-%s' % (str(date_now_tsdb.year),str(date_now_tsdb.month),str(date_now_tsdb.day))))
for i in range(7):
date_now_mytz += datetime.timedelta(days=1)
date_now_tsdb += datetime.timedelta(days=1)
if i == 0: day_string ='%s, %s' % ('Tomorrow',date_now_mytz.day)
else:
day_string = '%s, %s' % (get_weekday(date_now_mytz.weekday()),date_now_mytz.day)
date_string = '%s-%s-%s' % (str(date_now_tsdb.year),str(date_now_tsdb.month),str(date_now_tsdb.day))
menu.append((day_string,date_string))
self.getControl(983).reset()
for data_string,date in menu:
menu_entry = xbmcgui.ListItem(data_string)
menu_entry.setProperty('menu_entry', data_string)
menu_entry.setProperty('entry_date', date)
self.getControl(983).addItem(menu_entry)
#use this to direct navigation to a given date! -TODO
threading.Thread(name='watcher', target=self.watcher).start()
if not self.date_string:
self.setFocusId(983)
self.getControl(983).selectItem(0)
self.date_string = menu[0][1]
self.fill_calendar(self.date_string)
def fill_calendar(self,datestring):
self.getControl(93).setVisible(False)
items_to_add = []
self.getControl(94).setPercent(0)
self.getControl(92).setImage(os.path.join(addonpath,art,'busy.png'))
xbmc.executebuiltin("SetProperty(loading,1,home)")
self.getControl(987).reset()
#next matches stuff
event_next_list = thesportsdb.Schedules(tsdbkey).eventsday(datestring,None,None)["events"]
j = 0
if event_next_list:
total_events = len(event_next_list)
for event in event_next_list:
event_sport = thesportsdb.Events().get_sport(event)
event_id = thesportsdb.Events().get_eventid(event)
#check if event belongs to blocked sport strSport
if event_sport == 'Soccer' and settings.getSetting('enable-football') == 'false' and settings.getSetting('calendar-disabledsports') == 'true': pass
elif event_sport == 'Basketball' and settings.getSetting('enable-basketball') == 'false' and settings.getSetting('calendar-disabledsports') == 'true': pass
elif event_sport == 'Ice Hockey' and settings.getSetting('enable-icehockey') == 'false' and settings.getSetting('calendar-disabledsports') == 'true': pass
elif event_sport == 'Baseball' and settings.getSetting('enable-baseball') == 'false' and settings.getSetting('calendar-disabledsports') == 'true': pass
elif event_sport == 'Motorsport' and settings.getSetting('enable-motorsport') == 'false' and settings.getSetting('calendar-disabledsports') == 'true': pass
elif event_sport == 'Rugby' and settings.getSetting('enable-rugby') == 'false' and settings.getSetting('calendar-disabledsports') == 'true': pass
elif event_sport == 'Golf' and settings.getSetting('enable-golf') == 'false' and settings.getSetting('calendar-disabledsports') == 'true': pass
elif event_sport == 'American Football' and settings.getSetting('enable-amfootball') == 'false' and settings.getSetting('calendar-disabledsports') == 'true': pass
else:
#get league id and check if the league is not ignored
league_id = thesportsdb.Events().get_leagueid(event)
if ((league_id + '.txt') in self.ignored_leagues and settings.getSetting('calendar-disabledleagues') == 'true') or ((league_id + '.txt') in self.rmleaguescalendar): pass
else:
event_fullname = thesportsdb.Events().get_eventtitle(event)
event_race = thesportsdb.Events().get_racelocation(event)
event_league = thesportsdb.Events().get_league(event)
event_sport = thesportsdb.Events().get_sport(event)
if event_sport == 'Soccer': sport_logo = os.path.join(addonpath,art,'loadingsports','soccer.png')
elif event_sport == 'Basketball': sport_logo = os.path.join(addonpath,art,'loadingsports','basketball.png')
elif event_sport == 'Ice Hockey': sport_logo = os.path.join(addonpath,art,'loadingsports','ice%20hockey.png')
elif event_sport == 'Baseball': sport_logo = os.path.join(addonpath,art,'loadingsports','baseball.png')
elif event_sport == 'Motorsport': sport_logo = os.path.join(addonpath,art,'loadingsports','motorsport.png')
elif event_sport == 'Rugby': sport_logo = os.path.join(addonpath,art,'loadingsports','rugby.png')
elif event_sport == 'Golf': sport_logo = os.path.join(addonpath,art,'loadingsports','golf.png')
elif event_sport == 'American Football': sport_logo = os.path.join(addonpath,art,'loadingsports','american%20football.png')
fmt = "%y-%m-%d"
fmt_time = "%H:%M"
event_datetime = thesportsdb.Events().get_datetime_object(event)
if event_datetime:
#datetime object conversion goes here
db_time = pytz.timezone(str(pytz.timezone(tsdbtimezone))).localize(event_datetime)
event_datetime=db_time.astimezone(my_location)
event_strtime = thesportsdb.Events().get_time(event)
if event_strtime and event_strtime != 'null' and event_strtime != 'None':
event_time = event_datetime.strftime(fmt_time)
if len(str(event_datetime.minute)) == 1: event_minute = str(event_datetime.minute) + '0'
else: event_minute = str(event_datetime.minute)
event_order = int(str(event_datetime.hour) + str(event_minute))
else:
event_time = 'N/A'
event_order = 30000
else:
event_time = 'N/A'
event_order = 30000
if event_race:
home_team_logo = os.path.join(addonpath,art,'raceflag.png')
event_name = thesportsdb.Events().get_eventtitle(event)
event_round = ''
else:
home_team_id = thesportsdb.Events().get_hometeamid(event)
home_team_dict = thesportsdb.Lookups(tsdbkey).lookupteam(home_team_id)["teams"][0]
if settings.getSetting('team-naming')=='0': home_team_name = thesportsdb.Teams().get_name(home_team_dict)
else: team_name = home_team_name = thesportsdb.Teams().get_alternativefirst(home_team_dict)
home_team_logo = thesportsdb.Teams().get_badge(home_team_dict)
stadium_fanart = thesportsdb.Teams().get_stadium_thumb(home_team_dict)
away_team_id = thesportsdb.Events().get_awayteamid(event)
away_team_dict = thesportsdb.Lookups(tsdbkey).lookupteam(away_team_id)["teams"][0]
if settings.getSetting('team-naming')=='0': away_team_name = thesportsdb.Teams().get_name(away_team_dict)
else: away_team_name = thesportsdb.Teams().get_alternativefirst(away_team_dict)
away_team_logo = thesportsdb.Teams().get_badge(away_team_dict)
event_round = thesportsdb.Events().get_round(event)
if event_round and event_round != '0':
round_label = ' - Round ' + str(event_round)
event_league = event_league + round_label
game = xbmcgui.ListItem(event_fullname)
game.setProperty('HomeTeamLogo',home_team_logo)
game.setProperty('league',event_league)
game.setProperty('sport_logo',sport_logo)
game.setProperty('sport',event_sport)
game.setProperty('event_time',event_time)
game.setProperty('event_order',str(event_order))
game.setProperty('event_id',event_id)
if not event_race:
if ' ' in home_team_name:
if len(home_team_name) > 12: game.setProperty('HomeTeamLong',home_team_name)
else: game.setProperty('HomeTeamShort',home_team_name)
else: game.setProperty('HomeTeamShort',home_team_name)
game.setProperty('AwayTeamLogo',away_team_logo)
if ' ' in away_team_name:
if len(away_team_name) > 12: game.setProperty('AwayTeamLong',away_team_name)
else: game.setProperty('AwayTeamShort',away_team_name)
else: game.setProperty('AwayTeamShort',away_team_name)
game.setProperty('StadiumThumb',stadium_fanart)
game.setProperty('vs','VS')
try: game.setProperty('date',event_datetime.strftime(fmt))
except: pass
if event_race:
game.setProperty('EventName',event_name)
try:
date_now_mytz = pytz.timezone(str(pytz.timezone(str(my_location)))).localize(datetime.datetime.now())
if event_datetime > date_now_mytz:
hour_diff = (event_datetime-date_now_mytz).seconds/3600
else: hour_diff = ((date_now_mytz-event_datetime).seconds/3600)*(-1)
if settings.getsetting('calendar-disabledpassed') == 'true' and hour_diff > int(settings.getSetting('calendar-disabledpassed-delay')): pass
else: items_to_add.append(game)
except:items_to_add.append(game)
#try to set progress bar here
#for the events presented
j+=1
self.getControl(94).setPercent(int(float(j)/total_events*100))
#for the events not presented
j+=1
self.getControl(94).setPercent(int(float(j)/total_events*100))
#order the items here by start time
time_array = []
items_to_add_processed = []
for item in items_to_add:
time_array.append(int(item.getProperty('event_order')))
for timestmp in sorted(time_array):
for item in items_to_add:
itemorder = int(item.getProperty('event_order'))
if itemorder == timestmp:
items_to_add_processed.append(item)
items_to_add.remove(item)
if items_to_add_processed: self.getControl(987).addItems(items_to_add_processed)
else:
self.getControl(93).setVisible(True)
self.getControl(93).setLabel('No events available!')
else:
self.getControl(93).setVisible(True)
self.getControl(93).setLabel('No events available!')
xbmc.executebuiltin("ClearProperty(loading,Home)")
xbmc.executebuiltin("ClearProperty(lastmatchview,Home)")
xbmc.executebuiltin("ClearProperty(plotview,Home)")
xbmc.executebuiltin("ClearProperty(bannerview,Home)")
xbmc.executebuiltin("ClearProperty(nextview,Home)")
xbmc.executebuiltin("ClearProperty(videosview,Home)")
xbmc.executebuiltin("ClearProperty(jerseyview,Home)")
xbmc.executebuiltin("ClearProperty(badgeview,Home)")
xbmc.executebuiltin("ClearProperty(newsview,Home)")
xbmc.executebuiltin("SetProperty(nextmatchview,1,home)")
settings.setSetting("view_type_league",'nextmatchview')
self.getControl(2).setLabel("League: NextMatchView")
def watcher(self,):
while not xbmc.abortRequested:
rmleaguescalendar = os.listdir(ignoreleaguecalendar)
if self.rmleaguescalendar != rmleaguescalendar:
self.rmleaguescalendar = rmleaguescalendar
self.fill_calendar(self.date_string)
xbmc.sleep(200)
def onAction(self,action):
if action.getId() == 92 or action.getId() == 10:
self.close()
elif action.getId() == 117: #contextmenu
if xbmc.getCondVisibility("Control.HasFocus(987)"): container = 987
self.specific_id = self.getControl(container).getSelectedItem().getProperty('event_id')
contextmenubuilder.start(['calendaritem',self.specific_id])
def onClick(self,controlId):
if controlId == 983:
listControl = self.getControl(controlId)
selected_date=listControl.getSelectedItem().getProperty('entry_date')
self.date_string = selected_date
self.fill_calendar(selected_date)
elif controlId == 980 or controlId == 984 or controlId == 985 or controlId == 981:
self.team = self.getControl(controlId).getSelectedItem().getProperty('team_id')
teamview.start([self.team,self.sport,'','plotview'])
elif controlId == 2:
active_view_type = self.getControl(controlId).getLabel()
if active_view_type == "League: PlotView":
self.setvideosview()
elif active_view_type == "League: VideosView":
self.setbannerview()
elif active_view_type == "League: BannerView":
self.setbadgeview()
elif active_view_type == "League: BadgeView":
self.setjerseyview()
elif active_view_type == "League: JerseyView":
self.setnewsview()
elif active_view_type == "League: NewsView":
self.setnextmatchview()
elif active_view_type == "League: NextMatchView":
self.setlastmatchview()
elif active_view_type == "League: LastMatchView":
self.setplotview()
elif controlId == 989:
youtube_id = self.getControl(989).getSelectedItem().getProperty('video_id')
xbmc.executebuiltin('PlayMedia(plugin://plugin.video.youtube/play/?video_id='+youtube_id+')')
elif controlId == 986:
news_content = self.getControl(986).getSelectedItem().getProperty('content')
news_title = self.getControl(986).getSelectedItem().getProperty('title')
news_image = self.getControl(986).getSelectedItem().getProperty('news_img')
self.getControl(939).setImage(news_image)
self.getControl(937).setText(news_content)
self.getControl(938).setLabel(news_title)
|
gpl-2.0
| -670,272,421,011,220,700
| 45.664452
| 174
| 0.698206
| false
| 3.177828
| false
| false
| false
|
ramineni/my_congress
|
congress/tests/datasources/fakes.py
|
1
|
6195
|
# Copyright (c) 2014 VMware, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import mock
class NovaFakeClient(mock.MagicMock):
# TODO(rajdeepd): Replace Fake with mocks directly in test_neutron_driver
def __init__(self, *args, **kwargs):
super(NovaFakeClient, self).__init__(*args, **kwargs)
self.servers = mock.MagicMock()
self.servers.list.return_value = self.get_server_list()
self.flavors = mock.MagicMock()
self.flavors.list.return_value = self.get_flavor_list()
self.hosts = mock.MagicMock()
self.hosts.list.return_value = self.get_host_list()
self.services = mock.MagicMock()
self.services.list.return_value = self.get_service_list()
self.availability_zones = mock.MagicMock()
self.availability_zones.list.return_value = self.get_zone_list()
def get_mock_server(self, id, name, host_id, status, tenant_id, user_id,
flavor, image, zone=None, host_name=None):
server = mock.MagicMock()
server.id = id
server.hostId = host_id
server.tenant_id = tenant_id
server.user_id = user_id
server.status = status
server.name = name
server.image = image
server.flavor = flavor
if zone is not None:
setattr(server, 'OS-EXT-AZ:availability_zone', zone)
else:
# This ensures that the magic mock raises an AttributeError
delattr(server, 'OS-EXT-AZ:availability_zone')
if host_name is not None:
setattr(server, 'OS-EXT-SRV-ATTR:hypervisor_hostname',
host_name)
else:
# This ensures that the magic mock raises an AttributeError
delattr(server, 'OS-EXT-SRV-ATTR:hypervisor_hostname')
return server
def get_server_list(self):
server_one = (
self.get_mock_server(1234, 'sample-server',
"e4d909c290d0fb1ca068ffaddf22cbd0",
'BUILD',
'50e14867-7c64-4ec9-be8d-ed2470ca1d24',
'33ea0494-2bdf-4382-a445-9068997430b9',
{"id": 1}, {"id": 2}, 'default', 'host1'))
server_two = (
self.get_mock_server(5678, 'sample-server2',
"9e107d9d372bb6826bd81d3542a419d6",
'ACTIVE',
'50e14867-7c64-4ec9-be8d-ed2470ca1d24',
'33ea0494-2bdf-4382-a445-9068997430b9',
{"id": 1}, {"id": 2}))
server_three = (
self.get_mock_server(9012, 'sample-server3',
"9e107d9d372bb6826bd81d3542a419d6",
'ACTIVE',
'50e14867-7c64-4ec9-be8d-ed2470ca1d24',
'33ea0494-2bdf-4382-a445-9068997430b9',
{"id": 1}, {"id": 2}, 'foo', 'host2'))
return [server_one, server_two, server_three]
def get_flavor(self, id, name, vcpus, ram, disk, ephemeral, rxtx_factor):
f = mock.MagicMock()
f.id = id
f.name = name
f.vcpus = vcpus
f.ram = ram
f.disk = disk
f.ephemeral = ephemeral
f.rxtx_factor = rxtx_factor
return f
def get_flavor_list(self):
flavor_one = self.get_flavor(1, "256 MB Server", 1, 256, 10, 10, 1.0)
flavor_two = self.get_flavor(2, "512 MB Server", 2, 512, 20, 20, 1.0)
flavor_three = self.get_flavor(3, "128 MB Server", 4, 128, 0, 0, 3.0)
flavor_four = self.get_flavor(4, "1024 MB Server", 3, 1024, 10, 10,
2.0)
return [flavor_one, flavor_two, flavor_three, flavor_four]
def get_host(self, host_name, service, zone):
h = mock.MagicMock()
h.host_name = host_name
h.service = service
h.zone = zone
return h
def get_host_list(self):
h_one = self.get_host('host1', 'nova-compute', 'nova1')
h_two = self.get_host('host2', 'nova-cert', 'nova1')
return [h_one, h_two]
def get_service(self, id, binary, host, zone, status, state,
updated_at, disabled_reason):
s = mock.MagicMock()
s.id = id
s.binary = binary
s.host = host
s.zone = zone
s.status = status
s.state = state
s.updated_at = updated_at
s.disabled_reason = disabled_reason
return s
def get_service_list(self):
service_one = self.get_service(1, 'nova-compute', 'nova',
'nova1', 'enabled', 'up',
'2015-07-28T08:28:37.000000', None)
service_two = self.get_service(2, 'nova-schedule', 'nova',
'nova1', 'disabled', 'up',
'2015-07-28T08:28:38.000000',
'daily maintenance')
return [service_one, service_two]
def get_availability_zone(self, name, state):
zone = mock.MagicMock()
zone.zoneName = name
zone.zoneState = state
return zone
def get_zone_list(self):
zone_one = self.get_availability_zone('AZ1', 'available')
zone_two = self.get_availability_zone('AZ2', 'not available')
return [zone_one, zone_two]
|
apache-2.0
| 7,589,944,185,511,851,000
| 37.962264
| 78
| 0.54318
| false
| 3.722957
| false
| false
| false
|
caperren/Archives
|
OSU Robotics Club/Mars Rover 2016-2017/common/soil-sensor-test.py
|
1
|
2721
|
#!/usr/bin/env python
import sys
import math
import struct
import serial
import signal
import os
import time
import struct
SerialPath="/dev/ttyUSB0"
class SoilSensor():
def __init__(self, path):
self.path = path
self.__tty = serial.Serial(port=self.path,
baudrate=9600,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS,
timeout=0.2)
def set_mode_rx(self):
"""Set the transceiver to receive mode."""
#Set DTR for receive mode, clear for transmit
self.__tty.setDTR(True)
def set_mode_tx(self):
"""Set the transceiver to transmit mode."""
self.__tty.setDTR(False)
def send_command(self, addr_str, command_str):
"""Send a command to the soil sensor."""
self.set_mode_tx()
time.sleep(.04)
self.__tty.write(addr_str + command_str + "\r")
self.__tty.flush()
time.sleep(.05)
self.__tty.write("\n")
time.sleep(.005)
self.set_mode_rx()
reply = self.__tty.read(size=10000000)
return reply
def set_data(self, addr_str, command_str, data_str):
"""Set data in the soil sensor."""
self.set_mode_tx()
time.sleep(.04)
self.__tty.write(addr_str + command_str + "=" + data_str + "\r\n")
self.__tty.flush()
time.sleep(.05)
self.__tty.write("\n")
time.sleep(.005)
self.set_mode_rx()
reply = self.__tty.read(size=10000000)
return reply
def get_data(self, addr_str, command_str):
"""Get data from the sensor, returning the data.
command_str is the two-character string."""
self.set_mode_tx()
time.sleep(.04)
self.__tty.write(addr_str + command_str + "=?" + "\r")
self.__tty.flush()
time.sleep(.05)
self.__tty.write("\n")
time.sleep(.005)
self.set_mode_rx()
reply = self.__tty.read(size=10000000)
return reply
def get_measurement(self, addr_str):
Moisture = float(data[2])
Cond = float(data[4])
"""Take and return a soil measurement."""
addr = addr_str
self.send_command(addr, "TR")
time.sleep(1)
data = self.send_command(addr, "T3")
print data
data = data[3:]
data = data.split(",")
print "Raw Values (reading set 3):", data
TempC = float(data[0])
PermR = float(data[6])
PermI = float(data[8])
Salinity = Cond * 6.4
return {"TempC":TempC, "Moisture":Moisture, "Salinity":Salinity}
def main():
s = SoilSensor(SerialPath)
addr = s.get_data("///", "SN")[0:3]
s.set_data(addr, "PE", "1")
time.sleep(1)
while True:
print s.get_measurement(addr)
print ""
time.sleep(10)
if __name__ == "__main__":
main()
|
gpl-3.0
| 7,409,158,072,236,962,000
| 25.23
| 68
| 0.586182
| false
| 2.907051
| false
| false
| false
|
YzPaul3/h2o-3
|
h2o-py/h2o/connection.py
|
1
|
29700
|
"""
An H2OConnection represents the latest active handle to a cloud. No more than a single
H2OConnection object will be active at any one time.
"""
from __future__ import print_function
from __future__ import absolute_import
import requests
import math
import tempfile
import os
import re
import sys
import time
import subprocess
import atexit
import warnings
import site
from .display import H2ODisplay
from .h2o_logging import _is_logging, _log_rest
from .two_dim_table import H2OTwoDimTable
from .utils.shared_utils import quote
from six import iteritems, PY3
from string import ascii_lowercase, digits
from random import choice
warnings.simplefilter('always', UserWarning)
try:
warnings.simplefilter('ignore', requests.packages.urllib3.exceptions.InsecureRequestWarning)
except:
pass
__H2OCONN__ = None # the single active connection to H2O cloud
__H2O_REST_API_VERSION__ = 3 # const for the version of the rest api
class H2OConnection(object):
"""
H2OConnection is a class that represents a connection to the H2O cluster.
It is specified by an IP address and a port number.
Objects of type H2OConnection are not instantiated directly!
This class contains static methods for performing the common REST methods
GET, POST, and DELETE.
"""
__ENCODING__ = "utf-8"
__ENCODING_ERROR__ = "replace"
def __init__(self, ip, port, start_h2o, enable_assertions, license, nthreads, max_mem_size, min_mem_size, ice_root,
strict_version_check, proxy, https, insecure, username, password, max_mem_size_GB, min_mem_size_GB, proxies, size):
"""
Instantiate the package handle to the H2O cluster.
:param ip: An IP address, default is "localhost"
:param port: A port, default is 54321
:param start_h2o: A boolean dictating whether this module should start the H2O jvm. An attempt is made anyways if _connect fails.
:param enable_assertions: If start_h2o, pass `-ea` as a VM option.
:param license: If not None, is a path to a license file.
:param nthreads: Number of threads in the thread pool. This relates very closely to the number of CPUs used.
-1 means use all CPUs on the host. A positive integer specifies the number of CPUs directly. This value is only used when Python starts H2O.
:param max_mem_size: Maximum heap size (jvm option Xmx) in gigabytes.
:param min_mem_size: Minimum heap size (jvm option Xms) in gigabytes.
:param ice_root: A temporary directory (default location is determined by tempfile.mkdtemp()) to hold H2O log files.
:param strict_version_check: Setting this to False is unsupported and should only be done when advised by technical support.
:param proxy: A dictionary with keys 'ftp', 'http', 'https' and values that correspond to a proxy path.
:param https: Set this to True to use https instead of http.
:param insecure: Set this to True to disable SSL certificate checking.
:param username: Username to login with.
:param password: Password to login with.
:param max_mem_size_GB: DEPRECATED. Use max_mem_size.
:param min_mem_size_GB: DEPRECATED. Use min_mem_size.
:param proxies: DEPRECATED. Use proxy.
:param size: DEPRECATED.
:return: None
"""
port = as_int(port)
if not (isinstance(port, int) and 0 <= port <= sys.maxsize): raise ValueError("Port out of range, "+port)
if https != insecure: raise ValueError("`https` and `insecure` must both be True to enable HTTPS")
#Deprecated params
if max_mem_size_GB is not None:
warnings.warn("`max_mem_size_GB` is deprecated. Use `max_mem_size` instead.", category=DeprecationWarning)
max_mem_size = max_mem_size_GB
if min_mem_size_GB is not None:
warnings.warn("`min_mem_size_GB` is deprecated. Use `min_mem_size` instead.", category=DeprecationWarning)
min_mem_size = min_mem_size_GB
if proxies is not None:
warnings.warn("`proxies` is deprecated. Use `proxy` instead.", category=DeprecationWarning)
proxy = proxies
if size is not None:
warnings.warn("`size` is deprecated.", category=DeprecationWarning)
global __H2OCONN__
self._cld = None
self._ip = ip
self._port = port
self._proxy = proxy
self._https = https
self._insecure = insecure
self._username = username
self._password = password
self._session_id = None
self._rest_version = __H2O_REST_API_VERSION__
self._child = getattr(__H2OCONN__, "_child") if hasattr(__H2OCONN__, "_child") else None
__H2OCONN__ = self
#Give user warning if proxy environment variable is found. PUBDEV-2504
for name, value in os.environ.items():
if name.lower()[-6:] == '_proxy' and value:
warnings.warn("Proxy environment variable `" + name + "` with value `" + value + "` found. This may interfere with your H2O Connection.")
jarpaths = H2OConnection.jar_paths()
if os.path.exists(jarpaths[0]): jar_path = jarpaths[0]
elif os.path.exists(jarpaths[1]): jar_path = jarpaths[1]
elif os.path.exists(jarpaths[2]): jar_path = jarpaths[2]
elif os.path.exists(jarpaths[3]): jar_path = jarpaths[3]
elif os.path.exists(jarpaths[4]): jar_path = jarpaths[4]
else: jar_path = jarpaths[5]
try:
cld = self._connect()
except:
# try to start local jar or re-raise previous exception
if not start_h2o: raise ValueError("Cannot connect to H2O server. Please check that H2O is running at {}".format(H2OConnection.make_url("")))
print()
print()
print("No instance found at ip and port: " + ip + ":" + str(port) + ". Trying to start local jar...")
print()
print()
path_to_jar = os.path.exists(jar_path)
if path_to_jar:
if not ice_root:
ice_root = tempfile.mkdtemp()
cld = self._start_local_h2o_jar(max_mem_size, min_mem_size, enable_assertions, license, ice_root, jar_path, nthreads)
else:
print("No jar file found. Could not start local instance.")
print("Jar Paths searched: ")
for jp in jarpaths:
print("\t" + jp)
print()
raise
__H2OCONN__._cld = cld
if strict_version_check and os.environ.get('H2O_DISABLE_STRICT_VERSION_CHECK') is None:
ver_h2o = cld['version']
from .__init__ import __version__
ver_pkg = "UNKNOWN" if __version__ == "SUBST_PROJECT_VERSION" else __version__
if ver_h2o != ver_pkg:
try:
branch_name_h2o = cld['branch_name']
except KeyError:
branch_name_h2o = None
else:
branch_name_h2o = cld['branch_name']
try:
build_number_h2o = cld['build_number']
except KeyError:
build_number_h2o = None
else:
build_number_h2o = cld['build_number']
if build_number_h2o is None:
raise EnvironmentError("Version mismatch. H2O is version {0}, but the h2o-python package is version {1}. "
"Upgrade H2O and h2o-Python to latest stable version - "
"http://h2o-release.s3.amazonaws.com/h2o/latest_stable.html"
"".format(ver_h2o, str(ver_pkg)))
elif build_number_h2o == 'unknown':
raise EnvironmentError("Version mismatch. H2O is version {0}, but the h2o-python package is version {1}. "
"Upgrade H2O and h2o-Python to latest stable version - "
"http://h2o-release.s3.amazonaws.com/h2o/latest_stable.html"
"".format(ver_h2o, str(ver_pkg)))
elif build_number_h2o == '99999':
raise EnvironmentError("Version mismatch. H2O is version {0}, but the h2o-python package is version {1}. "
"This is a developer build, please contact your developer."
"".format(ver_h2o, str(ver_pkg)))
else:
raise EnvironmentError("Version mismatch. H2O is version {0}, but the h2o-python package is version {1}. "
"Install the matching h2o-Python version from - "
"http://h2o-release.s3.amazonaws.com/h2o/{2}/{3}/index.html."
"".format(ver_h2o, str(ver_pkg),branch_name_h2o, build_number_h2o))
self._session_id = H2OConnection.get_json(url_suffix="InitID")["session_key"]
H2OConnection._cluster_info()
@staticmethod
def default():
H2OConnection.__ENCODING__ = "utf-8"
H2OConnection.__ENCODING_ERROR__ = "replace"
@staticmethod
def jar_paths():
sys_prefix1 = sys_prefix2 = sys.prefix
if sys_prefix1.startswith('/Library'): sys_prefix2 = '/System'+sys_prefix1
elif sys_prefix1.startswith('/System'): sys_prefix2 = sys_prefix1.split('/System')[1]
return [os.path.join(sys_prefix1, "h2o_jar", "h2o.jar"),
os.path.join(os.path.sep,"usr","local","h2o_jar","h2o.jar"),
os.path.join(sys_prefix1, "local", "h2o_jar", "h2o.jar"),
os.path.join(site.USER_BASE, "h2o_jar", "h2o.jar"),
os.path.join(sys_prefix2, "h2o_jar", "h2o.jar"),
os.path.join(sys_prefix2, "h2o_jar", "h2o.jar"),
]
@staticmethod
def _cluster_info():
global __H2OCONN__
cld = __H2OCONN__._cld
ncpus = sum([n['num_cpus'] for n in cld['nodes']])
allowed_cpus = sum([n['cpus_allowed'] for n in cld['nodes']])
mfree = sum([n['free_mem'] for n in cld['nodes']])
cluster_health = all([n['healthy'] for n in cld['nodes']])
ip = "127.0.0.1" if __H2OCONN__._ip=="localhost" else __H2OCONN__._ip
cluster_info = [
["H2O cluster uptime: ", get_human_readable_time(cld["cloud_uptime_millis"])],
["H2O cluster version: ", cld["version"]],
["H2O cluster name: ", cld["cloud_name"]],
["H2O cluster total nodes: ", cld["cloud_size"]],
["H2O cluster total free memory: ", get_human_readable_size(mfree)],
["H2O cluster total cores: ", str(ncpus)],
["H2O cluster allowed cores: ", str(allowed_cpus)],
["H2O cluster healthy: ", str(cluster_health)],
["H2O Connection ip: ", ip],
["H2O Connection port: ", __H2OCONN__._port],
["H2O Connection proxy: ", __H2OCONN__._proxy],
["Python Version: ", sys.version.split()[0]],
]
__H2OCONN__._cld = H2OConnection.get_json(url_suffix="Cloud") # update the cached version of cld
H2ODisplay(cluster_info)
def _connect(self, size=1, max_retries=5, print_dots=False):
"""
Does not actually "connect", instead simply tests that the cluster can be reached,
is of a certain size, and is taking basic status commands.
:param size: The number of H2O instances in the cloud.
:return: The JSON response from a "stable" cluster.
"""
retries = 0
while True:
retries += 1
if print_dots:
self._print_dots(retries)
try:
cld = H2OConnection.get_json(url_suffix="Cloud")
if not cld['cloud_healthy']:
raise ValueError("Cluster reports unhealthy status", cld)
if cld['cloud_size'] >= size and cld['consensus']:
if print_dots: print(" Connection successful!")
return cld
except EnvironmentError:
pass
# Cloud too small or voting in progress; sleep; try again
time.sleep(0.1)
if retries > max_retries:
raise EnvironmentError("Max retries exceeded. Could not establish link to the H2O cloud @ " + str(self._ip) + ":" + str(self._port))
def _print_dots(self, retries):
sys.stdout.write("\rStarting H2O JVM and connecting: {}".format("." * retries))
sys.stdout.flush()
def _start_local_h2o_jar(self, mmax, mmin, ea, license, ice, jar_path, nthreads):
command = H2OConnection._check_java()
if license:
if not os.path.exists(license):
raise ValueError("License file not found (" + license + ")")
if not ice:
raise ValueError("`ice_root` must be specified")
stdout = open(H2OConnection._tmp_file("stdout"), 'w')
stderr = open(H2OConnection._tmp_file("stderr"), 'w')
print("Using ice_root: " + ice)
print()
jver = subprocess.check_output([command, "-version"], stderr=subprocess.STDOUT)
if PY3: jver = str(jver, H2OConnection.__ENCODING__)
print()
print("Java Version: " + jver)
print()
if "GNU libgcj" in jver:
raise ValueError("Sorry, GNU Java is not supported for H2O.\n"+
"Please download the latest Java SE JDK 7 from the following URL:\n"+
"http://www.oracle.com/technetwork/java/javase/downloads/jdk7-downloads-1880260.html")
if "Client VM" in jver:
print("WARNING: ")
print("You have a 32-bit version of Java. H2O works best with 64-bit Java.")
print("Please download the latest Java SE JDK 7 from the following URL:")
print("http://www.oracle.com/technetwork/java/javase/downloads/jdk7-downloads-1880260.html")
print()
vm_opts = []
if mmin: vm_opts += ["-Xms{}g".format(mmin)]
if mmax: vm_opts += ["-Xmx{}g".format(mmax)]
if ea: vm_opts += ["-ea"]
h2o_opts = ["-verbose:gc",
"-XX:+PrintGCDetails",
"-XX:+PrintGCTimeStamps",
"-jar", jar_path,
"-name", "H2O_started_from_python_"
+ re.sub("[^A-Za-z0-9]", "_",
(os.getenv("USERNAME") if sys.platform == "win32" else os.getenv("USER")) or "unknownUser")
+ "_" + "".join([choice(ascii_lowercase) for _ in range(3)] + [choice(digits) for _ in range(3)]),
"-ip", "127.0.0.1",
"-port", "54321",
"-ice_root", ice,
]
if nthreads > 0: h2o_opts += ["-nthreads", str(nthreads)]
if license: h2o_opts += ["-license", license]
cmd = [command] + vm_opts + h2o_opts
cwd = os.path.abspath(os.getcwd())
if sys.platform == "win32":
self._child = subprocess.Popen(args=cmd,stdout=stdout,stderr=stderr,cwd=cwd,creationflags=subprocess.CREATE_NEW_PROCESS_GROUP)
else:
self._child = subprocess.Popen(args=cmd, stdout=stdout, stderr=stderr, cwd=cwd, preexec_fn=os.setsid)
cld = self._connect(1, 30, True)
return cld
@staticmethod
def _check_java():
# *WARNING* some over-engineering follows... :{
# is java in PATH?
if H2OConnection._pwhich("java"):
return H2OConnection._pwhich("java")
# check if JAVA_HOME is set (for windoz)
if os.getenv("JAVA_HOME"):
return os.path.join(os.getenv("JAVA_HOME"), "bin", "java.exe")
# check /Program Files/ and /Program Files (x86)/ if os is windoz
if sys.platform == "win32":
program_folder = os.path.join("C:", "{}", "Java")
program_folders = [program_folder.format("Program Files"),
program_folder.format("Program Files (x86)")]
# check both possible program files...
for folder in program_folders:
# hunt down the jdk directory
possible_jdk_dir = [d for d in folder if 'jdk' in d]
# if got a non-empty list of jdk directory candidates
if len(possible_jdk_dir) != 0:
# loop over and check if the java.exe exists
for jdk in possible_jdk_dir:
path = os.path.join(folder, jdk, "bin", "java.exe")
if os.path.exists(path):
return path
# check for JRE and warn
for folder in program_folders:
path = os.path.join(folder, "jre7", "bin", "java.exe")
if os.path.exists(path):
raise ValueError("Found JRE at " + path + "; but H2O requires the JDK to run.")
else:
raise ValueError("Cannot find Java. Please install the latest JDK from\n"
+"http://www.oracle.com/technetwork/java/javase/downloads/index.html" )
@staticmethod
def _pwhich(e):
"""
POSIX style which
"""
ok = os.X_OK
if e:
if os.access(e, ok):
return e
for path in os.getenv('PATH').split(os.pathsep):
full_path = os.path.join(path, e)
if os.access(full_path, ok):
return full_path
return None
@staticmethod
def _tmp_file(type):
usr = re.sub("[^A-Za-z0-9]", "_", (os.getenv("USERNAME") if sys.platform == "win32" else os.getenv("USER")) or "unknownUser")
if type == "stdout":
path = os.path.join(tempfile.mkdtemp(), "h2o_{}_started_from_python.out".format(usr))
print("JVM stdout: " + path)
return path
if type == "stderr":
path = os.path.join(tempfile.mkdtemp(), "h2o_{}_started_from_python.err".format(usr))
print("JVM stderr: " + path)
return path
if type == "pid":
return os.path.join(tempfile.mkdtemp(), "h2o_{}_started_from_python.pid".format(usr))
raise ValueError("Unkown type in H2OConnection._tmp_file call: " + type)
@staticmethod
def _shutdown(conn, prompt):
"""
Shut down the specified instance. All data will be lost.
This method checks if H2O is running at the specified IP address and port, and if it is, shuts down that H2O
instance.
:param conn: An H2OConnection object containing the IP address and port of the server running H2O.
:param prompt: A logical value indicating whether to prompt the user before shutting down the H2O server.
:return: None
"""
global __H2OCONN__
if conn is None: raise ValueError("There is no H2O instance running.")
try:
if not conn.cluster_is_up(conn): raise ValueError("There is no H2O instance running at ip: {0} and port: "
"{1}".format(conn.ip(), conn.port()))
except:
#H2O is already shutdown on the java side
ip = conn.ip()
port = conn.port()
__H2OCONN__= None
raise ValueError("The H2O instance running at {0}:{1} has already been shutdown.".format(ip, port))
if not isinstance(prompt, bool): raise ValueError("`prompt` must be TRUE or FALSE")
if prompt:
question = "Are you sure you want to shutdown the H2O instance running at {0}:{1} (Y/N)? ".format(conn.ip(), conn.port())
response = input(question) if PY3 else raw_input(question)
else: response = "Y"
if response == "Y" or response == "y":
conn.post(url_suffix="Shutdown")
__H2OCONN__ = None #so that the "Did you run `h2o.init()`" ValueError is triggered
@staticmethod
def rest_version(): return __H2OCONN__._rest_version
@staticmethod
def session_id(): return __H2OCONN__._session_id
@staticmethod
def port(): return __H2OCONN__._port
@staticmethod
def ip(): return __H2OCONN__._ip
@staticmethod
def https(): return __H2OCONN__._https
@staticmethod
def username(): return __H2OCONN__._username
@staticmethod
def password(): return __H2OCONN__._password
@staticmethod
def insecure(): return __H2OCONN__._insecure
@staticmethod
def current_connection(): return __H2OCONN__
@staticmethod
def check_conn():
if not __H2OCONN__:
raise EnvironmentError("No active connection to an H2O cluster. Try calling `h2o.init()`")
return __H2OCONN__
@staticmethod
def cluster_is_up(conn):
"""
Determine if an H2O cluster is up or not
:param conn: An H2OConnection object containing the IP address and port of the server running H2O.
:return: TRUE if the cluster is up; FALSE otherwise
"""
if not isinstance(conn, H2OConnection): raise ValueError("`conn` must be an H2OConnection object")
rv = conn.current_connection()._attempt_rest(url=("https" if conn.https() else "http") +"://{0}:{1}/".format(conn.ip(), conn.port()), method="GET",
post_body="", file_upload_info="")
if rv.status_code == 401: warnings.warn("401 Unauthorized Access. Did you forget to provide a username and password?")
return rv.status_code == 200 or rv.status_code == 301
"""
Below is the REST implementation layer:
_attempt_rest -- GET, POST, DELETE
_do_raw_rest
get
post
get_json
post_json
All methods are static and rely on an active __H2OCONN__ object.
"""
@staticmethod
def make_url(url_suffix, _rest_version=None):
scheme = "https" if H2OConnection.https() else "http"
_rest_version = _rest_version or H2OConnection.rest_version()
return "{}://{}:{}/{}/{}".format(scheme,H2OConnection.ip(),H2OConnection.port(),_rest_version,url_suffix)
@staticmethod
def get(url_suffix, **kwargs):
if __H2OCONN__ is None:
raise ValueError("No h2o connection. Did you run `h2o.init()` ?")
return __H2OCONN__._do_raw_rest(url_suffix, "GET", None, **kwargs)
@staticmethod
def post(url_suffix, file_upload_info=None, **kwargs):
if __H2OCONN__ is None:
raise ValueError("No h2o connection. Did you run `h2o.init()` ?")
return __H2OCONN__._do_raw_rest(url_suffix, "POST", file_upload_info, **kwargs)
@staticmethod
def delete(url_suffix, **kwargs):
if __H2OCONN__ is None:
raise ValueError("No h2o connection. Did you run `h2o.init()` ?")
return __H2OCONN__._do_raw_rest(url_suffix, "DELETE", None, **kwargs)
@staticmethod
def get_json(url_suffix, **kwargs):
if __H2OCONN__ is None:
raise ValueError("No h2o connection. Did you run `h2o.init()` ?")
return __H2OCONN__._rest_json(url_suffix, "GET", None, **kwargs)
@staticmethod
def post_json(url_suffix, file_upload_info=None, **kwargs):
if __H2OCONN__ is None:
raise ValueError("No h2o connection. Did you run `h2o.init()` ?")
return __H2OCONN__._rest_json(url_suffix, "POST", file_upload_info, **kwargs)
def _rest_json(self, url_suffix, method, file_upload_info, **kwargs):
raw_txt = self._do_raw_rest(url_suffix, method, file_upload_info, **kwargs)
return self._process_tables(raw_txt.json())
# Massage arguments into place, call _attempt_rest
def _do_raw_rest(self, url_suffix, method, file_upload_info, **kwargs):
if not url_suffix:
raise ValueError("No url suffix supplied.")
# allow override of REST version, currently used for Rapids which is /99
if '_rest_version' in kwargs:
_rest_version = kwargs['_rest_version']
del kwargs['_rest_version']
else:
_rest_version = self._rest_version
url = H2OConnection.make_url(url_suffix,_rest_version)
query_string = ""
for k,v in iteritems(kwargs):
if v is None: continue #don't send args set to None so backend defaults take precedence
if isinstance(v, list):
x = '['
for l in v:
if isinstance(l,list):
x += '['
x += ','.join([str(e) if PY3 else str(e).encode(H2OConnection.__ENCODING__, errors=H2OConnection.__ENCODING_ERROR__) for e in l])
x += ']'
else:
x += str(l) if PY3 else str(l).encode(H2OConnection.__ENCODING__, errors=H2OConnection.__ENCODING_ERROR__)
x += ','
x = x[:-1]
x += ']'
else:
x = str(v) if PY3 else str(v).encode(H2OConnection.__ENCODING__, errors=H2OConnection.__ENCODING_ERROR__)
query_string += k+"="+quote(x)+"&"
query_string = query_string[:-1] # Remove trailing extra &
post_body = ""
if not file_upload_info:
if method == "POST":
post_body = query_string
elif query_string != '':
url = "{}?{}".format(url, query_string)
else:
if not method == "POST":
raise ValueError("Received file upload info and expected method to be POST. Got: " + str(method))
if query_string != '':
url = "{}?{}".format(url, query_string)
if _is_logging():
_log_rest("------------------------------------------------------------\n")
_log_rest("\n")
_log_rest("Time: {0}\n".format(time.strftime('Y-%m-%d %H:%M:%OS3')))
_log_rest("\n")
_log_rest("{0} {1}\n".format(method, url))
_log_rest("postBody: {0}\n".format(post_body))
global _rest_ctr; _rest_ctr = _rest_ctr+1
begin_time_seconds = time.time()
http_result = self._attempt_rest(url, method, post_body, file_upload_info)
end_time_seconds = time.time()
elapsed_time_seconds = end_time_seconds - begin_time_seconds
elapsed_time_millis = elapsed_time_seconds * 1000
if not http_result.ok:
detailed_error_msgs = []
try:
result = http_result.json()
if 'messages' in result.keys():
detailed_error_msgs = '\n'.join([m['message'] for m in result['messages'] if m['message_type'] in ['ERRR']])
elif 'exception_msg' in result.keys():
detailed_error_msgs = result['exception_msg']
except ValueError:
pass
raise EnvironmentError(("h2o-py got an unexpected HTTP status code:\n {} {} (method = {}; url = {}). \n"+ \
"detailed error messages: {}")
.format(http_result.status_code,http_result.reason,method,url,detailed_error_msgs))
if _is_logging():
_log_rest("\n")
_log_rest("httpStatusCode: {0}\n".format(http_result.status_code))
_log_rest("httpStatusMessage: {0}\n".format(http_result.reason))
_log_rest("millis: {0}\n".format(elapsed_time_millis))
_log_rest("\n")
_log_rest("{0}\n".format(http_result.json()))
_log_rest("\n")
return http_result
# Low level request call
def _attempt_rest(self, url, method, post_body, file_upload_info):
auth = (self._username, self._password)
verify = not self._insecure
headers = {'User-Agent': 'H2O Python client/'+sys.version.replace('\n','')}
try:
if method == "GET":
return requests.get(url, headers=headers, proxies=self._proxy, auth=auth, verify=verify)
elif file_upload_info:
files = {file_upload_info["file"] : open(file_upload_info["file"], "rb")}
return requests.post(url, files=files, headers=headers, proxies=self._proxy, auth=auth, verify=verify)
elif method == "POST":
headers["Content-Type"] = "application/x-www-form-urlencoded"
return requests.post(url, data=post_body, headers=headers, proxies=self._proxy, auth=auth, verify=verify)
elif method == "DELETE":
return requests.delete(url, headers=headers, proxies=self._proxy, auth=auth, verify=verify)
else:
raise ValueError("Unknown HTTP method " + method)
except requests.ConnectionError as e:
raise EnvironmentError("h2o-py encountered an unexpected HTTP error:\n {}".format(e))
# TODO:
# @staticmethod
# def _process_matrices(x=None):
# if x:
# if isinstance(x, "dict"):
#
# return x
@staticmethod
def _process_tables(x=None):
if x:
if isinstance(x, dict):
has_meta = "__meta" in x
has_schema_type = has_meta and "schema_type" in x["__meta"]
have_table = has_schema_type and x["__meta"]["schema_type"] == "TwoDimTable"
if have_table:
col_formats = [c["format"] for c in x["columns"]]
table_header = x["name"]
table_descr = x["description"]
col_types = [c["type"] for c in x["columns"]]
col_headers = [c["name"] for c in x["columns"]]
row_headers = ["" for i in range(len(col_headers))]
cell_values = x["data"]
tbl = H2OTwoDimTable(row_header=row_headers, col_header=col_headers,
col_types=col_types, table_header=table_header,
raw_cell_values=cell_values,
col_formats=col_formats,table_description=table_descr)
x = tbl
else:
for k in x:
x[k] = H2OConnection._process_tables(x[k])
if isinstance(x, list):
for it in range(len(x)):
x[it] = H2OConnection._process_tables(x[it])
return x
global _rest_ctr
_rest_ctr = 0
@staticmethod
def rest_ctr(): global _rest_ctr; return _rest_ctr
# On exit, close the session to allow H2O to cleanup any temps
def end_session():
try:
H2OConnection.delete(url_suffix="InitID")
print("Sucessfully closed the H2O Session.")
except:
pass
def get_human_readable_size(num):
exp_str = [(0, 'B'), (10, 'KB'), (20, 'MB'), (30, 'GB'), (40, 'TB'), (50, 'PB'), ]
i = 0
rounded_val = 0
while i + 1 < len(exp_str) and num >= (2 ** exp_str[i + 1][0]):
i += 1
rounded_val = round(float(num) / 2 ** exp_str[i][0], 2)
return '%s %s' % (rounded_val, exp_str[i][1])
def get_human_readable_time(epochTimeMillis):
days = epochTimeMillis/(24*60*60*1000.0)
hours = (days-math.floor(days))*24
minutes = (hours-math.floor(hours))*60
seconds = (minutes-math.floor(minutes))*60
milliseconds = (seconds-math.floor(seconds))*1000
duration_vec = [int(math.floor(t)) for t in [days,hours,minutes,seconds,milliseconds]]
names_duration_vec = ["days","hours","minutes","seconds","milliseconds"]
duration_dict = dict(zip(names_duration_vec, duration_vec))
readable_time = ""
for name in names_duration_vec:
if duration_dict[name] > 0:
readable_time += str(duration_dict[name]) + " " + name + " "
return readable_time
def is_int(possible_int):
try:
int(possible_int)
return True
except ValueError:
return False
def as_int(the_int):
if not is_int(the_int):
raise ValueError("Not a valid int value: " + str(the_int))
return int(the_int)
def _kill_jvm_fork():
global __H2OCONN__
if __H2OCONN__ is not None:
if __H2OCONN__._child:
__H2OCONN__._child.kill()
print("Successfully stopped H2O JVM started by the h2o python module.")
atexit.register(_kill_jvm_fork)
atexit.register(end_session)
|
apache-2.0
| -7,047,899,726,068,955,000
| 39.189445
| 151
| 0.615623
| false
| 3.491243
| false
| false
| false
|
ebigelow/LOTlib
|
LOTlib/Inference/Samplers/AdaptiveParallelTempering.py
|
1
|
3107
|
from scipy import interpolate
from ParallelTempering import ParallelTemperingSampler
class AdaptiveParallelTemperingSampler(ParallelTemperingSampler):
"""
Adaptive setting of the temperatures via
Katzgraber, H. G., Trebst, S., Huse, D. A., & Troyer, M. (2006). Feedback-optimized parallel tempering monte carlo. Journal of Statistical Mechanics: Theory and Experiment, 2006, P03018
"""
def __init__(self, make_h0, data, adapt_at=[50000, 100000, 200000, 300000, 500000, 1000000], **kwargs):
ParallelTemperingSampler.__init__(self, make_h0, data, **kwargs)
self.adapt_at = adapt_at
def adapt_temperatures(self, epsilon=0.001):
"""
Adapat our temperatures, given self.nup and self.ndown
This follows ComputeAdaptedTemperatures in https://github.com/stuhlmueller/mcnets/blob/master/mcnets/tempering.py
:return:
"""
hist = self.get_hist()
linear_hist = [x/float(self.nchains-1) for x in reversed(range(self.nchains))]
monotonic_hist = [x*float(1.-epsilon) + y*epsilon for x, y in zip(hist, linear_hist)]
# print "Linear:", linear_hist
# print "Monotonic:", monotonic_hist
# Hmm force monotonic to have 0,1?
monotonic_hist[0], monotonic_hist[-1] = 1.0, 0.0
f = interpolate.interp1d(list(reversed(monotonic_hist)), list(reversed(self.temperatures)))
newt = [self.temperatures[0]]
for i in reversed(range(2, self.nchains)):
# print i, float(i-1) / (self.nchains-1), frac(float(i-1) / (self.nchains-1))
newt.append(f([float(i-1.) / (self.nchains-1)])[0])
# keep the old temps
newt.append(self.temperatures[-1])
self.temperatures = newt
print "# Adapting temperatures to ", self.temperatures
print "# Acceptance ratio:", self.acceptance_ratio()
# And set each temperature chain
for c, t in zip(self.chains, self.temperatures):
c.likelihod_temperature = t
def next(self):
ret = ParallelTemperingSampler.next(self)
if self.nsamples in self.adapt_at: ## TODO: Maybe make this faster?
self.adapt_temperatures()
return ret
if __name__ == "__main__":
from LOTlib import break_ctrlc
from LOTlib.Examples.Number2015.Model import generate_data, make_h0
data = generate_data(300)
from LOTlib.MCMCSummary.Z import Z
from LOTlib.MCMCSummary.TopN import TopN
z = Z(unique=True)
tn = TopN(N=10)
from LOTlib.Miscellaneous import logrange
sampler = AdaptiveParallelTemperingSampler(make_h0, data, steps=1000000, \
yield_only_t0=False, whichtemperature='acceptance_temperature', \
temperatures=logrange(1.0, 10.0, 10))
for h in break_ctrlc(tn(z(sampler))):
# print sampler.chain_idx, h.posterior_score, h
pass
for x in tn.get_all(sorted=True):
print x.posterior_score, x
print z
print sampler.nup, sampler.ndown
print sampler.get_hist()
|
gpl-3.0
| 4,413,269,540,533,469,700
| 32.419355
| 189
| 0.632121
| false
| 3.498874
| false
| false
| false
|
dnsbob/pynet_testz
|
test_telnet.py
|
1
|
1310
|
#!/usr/bin/env python
# test_telnet.py
import telnetlib
import time
import socket
import sys
TELNET_PORT=23
TELNET_TIMEOUT=6
def telnet_connect(ip_addr, TELNET_PORT, TELNET_TIMEOUT):
try:
return telnetlib.Telnet(ip_addr, TELNET_PORT, TELNET_TIMEOUT)
except socket.timeout:
sys.exit("Connection timed out")
def login(remote_conn, username, password):
output=remote_conn.read_until("sername:", TELNET_TIMEOUT)
remote_conn.write(username + '\n')
output += remote_conn.read_until("ssword:", TELNET_TIMEOUT)
remote_conn.write(password + '\n')
return output
def send_command(remote_conn, cmd):
cmd=cmd.rstrip() # remove trailing linefeed if any
remote_conn.write(cmd + '\n')
time.sleep(1)
return remote_conn.read_very_eager()
def main():
ip_addr='184.105.247.70'
username='pyclass'
password='88newclass'
remote_conn=telnet_connect(ip_addr, TELNET_PORT, TELNET_TIMEOUT)
output=login(remote_conn, username, password)
print output
time.sleep(1)
output=remote_conn.read_very_eager()
print output
output=send_command(remote_conn, 'terminal length 0')
print output
output=send_command(remote_conn, 'show version')
print output
remote_conn.close
if __name__ == '__main__':
main()
|
apache-2.0
| 3,303,492,153,968,261,600
| 22.818182
| 69
| 0.676336
| false
| 3.25062
| false
| false
| false
|
saullocastro/pyNastran
|
pyNastran/bdf/bdf_interface/write_mesh.py
|
1
|
45439
|
# coding: utf-8
"""
This file defines:
- WriteMesh
"""
from __future__ import (nested_scopes, generators, division, absolute_import,
print_function, unicode_literals)
import sys
import io
from codecs import open
from six import string_types, iteritems, itervalues, PY2, StringIO
#from pyNastran.utils import is_file_obj
from pyNastran.bdf.utils import print_filename
from pyNastran.utils.gui_io import save_file_dialog
from pyNastran.bdf.field_writer_8 import print_card_8
from pyNastran.bdf.field_writer_16 import print_card_16
from pyNastran.bdf.bdf_interface.attributes import BDFAttributes
class WriteMesh(BDFAttributes):
"""
Defines methods for writing cards
Major methods:
- model.write_bdf(...)
- model.echo_bdf(...)
- model.auto_reject_bdf(...)
"""
def __init__(self):
"""creates methods for writing cards"""
BDFAttributes.__init__(self)
self._auto_reject = True
self.cards_to_read = set([])
#def echo_bdf(self, infile_name):
#"""
#This method removes all comment lines from the bdf
#A write method is stil required.
#.. todo:: maybe add the write method
#.. code-block:: python
#model = BDF()
#model.echo_bdf(bdf_filename)
#"""
#self.deprecated('self.echo_bdf()', 'removed...', '0.8')
#self.cards_to_read = set([])
#return self.read_bdf(infile_name)
#def auto_reject_bdf(self, infile_name):
#"""
#This method parses supported cards, but does not group them into
#nodes, elements, properties, etc.
#.. todo:: maybe add the write method
#"""
#self.deprecated('self.auto_reject_bdf()', 'removed...', '0.8')
#self._auto_reject = True
#return self.read_bdf(infile_name)
def get_encoding(self, encoding=None):
if encoding is not None:
pass
else:
encoding = self._encoding
if encoding is None:
encoding = sys.getdefaultencoding()
return encoding
def _output_helper(self, out_filename, interspersed, size, is_double):
"""
Performs type checking on the write_bdf inputs
"""
if out_filename is None:
wildcard_wx = "Nastran BDF (*.bdf; *.dat; *.nas; *.pch)|" \
"*.bdf;*.dat;*.nas;*.pch|" \
"All files (*.*)|*.*"
wildcard_qt = "Nastran BDF (*.bdf *.dat *.nas *.pch);;All files (*)"
title = 'Save BDF/DAT/PCH'
out_filename = save_file_dialog(title, wildcard_wx, wildcard_qt)
assert out_filename is not None, out_filename
if PY2:
#if not is_file_obj(out_filename):
if not (hasattr(out_filename, 'read') and hasattr(out_filename, 'write')
) or isinstance(out_filename, (file, StringIO)):
return out_filename
elif not isinstance(out_filename, string_types):
msg = 'out_filename=%r must be a string; type=%s' % (
out_filename, type(out_filename))
raise TypeError(msg)
else:
if not(hasattr(out_filename, 'read') and hasattr(out_filename, 'write')
) or isinstance(out_filename, io.IOBase):
return out_filename
elif not isinstance(out_filename, string_types):
msg = 'out_filename=%r must be a string; type=%s' % (
out_filename, type(out_filename))
raise TypeError(msg)
if size == 8:
assert is_double is False, 'is_double=%r' % is_double
elif size == 16:
assert is_double in [True, False], 'is_double=%r' % is_double
else:
assert size in [8, 16], size
assert isinstance(interspersed, bool)
fname = print_filename(out_filename, self._relpath)
self.log.debug("***writing %s" % fname)
return out_filename
def write_caero_model(self, caero_bdf_filename='caero.bdf'):
"""write the CAERO cards as CQUAD4s that can be visualized"""
bdf_file = open(caero_bdf_filename, 'w')
bdf_file.write('CEND\n')
bdf_file.write('BEGIN BULK\n')
bdf_file.write('$ punch=True\n')
i = 1
mid = 1
bdf_file.write('MAT1,%s,3.0E7,,0.3\n' % mid)
for aesurf_id, aesurf in iteritems(self.aesurf):
cid = aesurf.cid1
bdf_file.write('PSHELL,%s,%s,0.1\n' % (aesurf_id, aesurf_id))
#print(cid)
#ax, ay, az = cid.i
#bx, by, bz = cid.j
#cx, cy, cz = cid.k
#bdf_file.write('CORD2R,%s,,%s,%s,%s,%s,%s,%s\n' % (cid, ax, ay, az, bx, by, bz))
#bdf_file.write(',%s,%s,%s\n' % (cx, cy, cz))
#print(cid)
bdf_file.write(str(cid))
#aesurf.elements
for eid, caero in sorted(iteritems(self.caeros)):
assert eid != 1, 'CAERO eid=1 is reserved for non-flaps'
scaero = str(caero).rstrip().split('\n')
bdf_file.write('$ ' + '\n$ '.join(scaero) + '\n')
points, elements = caero.panel_points_elements()
npoints = points.shape[0]
#nelements = elements.shape[0]
for ipoint, point in enumerate(points):
x, y, z = point
bdf_file.write('GRID,%s,,%s,%s,%s\n' % (i + ipoint, x, y, z))
pid = eid
mid = eid
#if 0:
#bdf_file.write('PSHELL,%s,%s,0.1\n' % (pid, mid))
#bdf_file.write('MAT1,%s,3.0E7,,0.3\n' % mid)
#else:
bdf_file.write('PSHELL,%s,%s,0.1\n' % (1, 1))
bdf_file.write('MAT1,%s,3.0E7,,0.3\n' % 1)
j = 0
for elem in elements + i:
p1, p2, p3, p4 = elem
eid2 = j + eid
pidi = None
for aesurf_id, aesurf in iteritems(self.aesurf):
aelist_id = aesurf.AELIST_id1()
aelist = self.aelists[aelist_id]
if eid2 in aelist.elements:
pidi = aesurf_id
break
if pidi is None:
#pidi = pid
pidi = 1
bdf_file.write('CQUAD4,%s,%s,%s,%s,%s,%s\n' % (j + eid, pidi, p1, p2, p3, p4))
j += 1
i += npoints
#break
#j += nelements
bdf_file.write('ENDDATA\n')
def write_bdf(self, out_filename=None, encoding=None,
size=8, is_double=False,
interspersed=False, enddata=None, close=True):
"""
Writes the BDF.
Parameters
----------
out_filename : varies; default=None
str - the name to call the output bdf
file - a file object
StringIO() - a StringIO object
None - pops a dialog
encoding : str; default=None -> system specified encoding
the unicode encoding
latin1, and utf8 are generally good options
size : int; {8, 16}
the field size
is_double : bool; default=False
False : small field
True : large field
interspersed : bool; default=True
Writes a bdf with properties & elements
interspersed like how Patran writes the bdf. This takes
slightly longer than if interspersed=False, but makes it
much easier to compare to a Patran-formatted bdf and is
more clear.
enddata : bool; default=None
bool - enable/disable writing ENDDATA
None - depends on input BDF
close : bool; default=True
should the output file be closed
"""
#self.write_caero_model()
out_filename = self._output_helper(out_filename,
interspersed, size, is_double)
self.log.debug('---starting BDF.write_bdf of %s---' % out_filename)
encoding = self.get_encoding(encoding)
#assert encoding.lower() in ['ascii', 'latin1', 'utf8'], encoding
if hasattr(out_filename, 'read') and hasattr(out_filename, 'write'):
bdf_file = out_filename
else:
if PY2:
wb = 'wb'
else:
wb = 'w'
bdf_file = open(out_filename, 'w', encoding=encoding)
self._write_header(bdf_file, encoding)
self._write_params(bdf_file, size, is_double)
self._write_nodes(bdf_file, size, is_double)
if interspersed:
self._write_elements_properties(bdf_file, size, is_double)
else:
self._write_elements(bdf_file, size, is_double)
self._write_properties(bdf_file, size, is_double)
self._write_materials(bdf_file, size, is_double)
self._write_masses(bdf_file, size, is_double)
self._write_common(bdf_file, size, is_double)
if (enddata is None and 'ENDDATA' in self.card_count) or enddata:
bdf_file.write('ENDDATA\n')
if close:
bdf_file.close()
def _write_header(self, bdf_file, encoding):
"""
Writes the executive and case control decks.
"""
if self.punch is None:
# writing a mesh without using read_bdf
if self.executive_control_lines or self.case_control_deck:
self.punch = False
else:
self.punch = True
if self.nastran_format:
bdf_file.write('$pyNastran: version=%s\n' % self.nastran_format)
bdf_file.write('$pyNastran: punch=%s\n' % self.punch)
bdf_file.write('$pyNastran: encoding=%s\n' % encoding)
bdf_file.write('$pyNastran: nnodes=%s\n' % len(self.nodes))
bdf_file.write('$pyNastran: nelements=%s\n' % len(self.elements))
if not self.punch:
self._write_executive_control_deck(bdf_file)
self._write_case_control_deck(bdf_file)
def _write_executive_control_deck(self, bdf_file):
"""
Writes the executive control deck.
"""
if self.executive_control_lines:
msg = '$EXECUTIVE CONTROL DECK\n'
if self.sol == 600:
new_sol = 'SOL 600,%s' % self.sol_method
else:
new_sol = 'SOL %s' % self.sol
if self.sol_iline is not None:
self.executive_control_lines[self.sol_iline] = new_sol
for line in self.executive_control_lines:
msg += line + '\n'
bdf_file.write(msg)
def _write_case_control_deck(self, bdf_file):
"""
Writes the Case Control Deck.
"""
if self.case_control_deck:
msg = '$CASE CONTROL DECK\n'
msg += str(self.case_control_deck)
assert 'BEGIN BULK' in msg, msg
bdf_file.write(''.join(msg))
def _write_elements(self, bdf_file, size=8, is_double=False):
"""
Writes the elements in a sorted order
"""
if self.elements:
bdf_file.write('$ELEMENTS\n')
if self.is_long_ids:
for (eid, element) in sorted(iteritems(self.elements)):
bdf_file.write(element.write_card_16(is_double))
else:
for (eid, element) in sorted(iteritems(self.elements)):
try:
bdf_file.write(element.write_card(size, is_double))
except:
print('failed printing element...'
'type=%s eid=%s' % (element.type, eid))
raise
def _write_elements_properties(self, bdf_file, size=8, is_double=False):
"""
Writes the elements and properties in and interspersed order
"""
missing_properties = []
if self.properties:
bdf_file.write('$ELEMENTS_WITH_PROPERTIES\n')
eids_written = []
pids = sorted(self.properties.keys())
pid_eids = self.get_element_ids_dict_with_pids(pids)
msg = []
#failed_element_types = set([])
for (pid, eids) in sorted(iteritems(pid_eids)):
prop = self.properties[pid]
if eids:
msg.append(prop.write_card(size, is_double))
eids.sort()
for eid in eids:
element = self.Element(eid)
try:
msg.append(element.write_card(size, is_double))
except:
print('failed printing element...' 'type=%r eid=%s'
% (element.type, eid))
raise
eids_written += eids
else:
missing_properties.append(prop.write_card(size, is_double))
bdf_file.write(''.join(msg))
eids_missing = set(self.elements.keys()).difference(set(eids_written))
if eids_missing:
msg = ['$ELEMENTS_WITH_NO_PROPERTIES '
'(PID=0 and unanalyzed properties)\n']
for eid in sorted(eids_missing):
element = self.Element(eid, msg='')
try:
msg.append(element.write_card(size, is_double))
except:
print('failed printing element...'
'type=%s eid=%s' % (element.type, eid))
raise
bdf_file.write(''.join(msg))
if missing_properties or self.pdampt or self.pbusht or self.pelast:
msg = ['$UNASSOCIATED_PROPERTIES\n']
for card in sorted(itervalues(self.pbusht)):
msg.append(card.write_card(size, is_double))
for card in sorted(itervalues(self.pdampt)):
msg.append(card.write_card(size, is_double))
for card in sorted(itervalues(self.pelast)):
msg.append(card.write_card(size, is_double))
for card in missing_properties:
# this is a string...
#print("missing_property = ", card
msg.append(card)
bdf_file.write(''.join(msg))
def _write_aero(self, bdf_file, size=8, is_double=False):
"""Writes the aero cards"""
if self.caeros or self.paeros or self.monitor_points or self.splines:
msg = ['$AERO\n']
for (unused_id, caero) in sorted(iteritems(self.caeros)):
msg.append(caero.write_card(size, is_double))
for (unused_id, paero) in sorted(iteritems(self.paeros)):
msg.append(paero.write_card(size, is_double))
for (unused_id, spline) in sorted(iteritems(self.splines)):
msg.append(spline.write_card(size, is_double))
for monitor_point in self.monitor_points:
msg.append(monitor_point.write_card(size, is_double))
bdf_file.write(''.join(msg))
def _write_aero_control(self, bdf_file, size=8, is_double=False):
"""Writes the aero control surface cards"""
if(self.aecomps or self.aefacts or self.aeparams or self.aelinks or
self.aelists or self.aestats or self.aesurf or self.aesurfs):
msg = ['$AERO CONTROL SURFACES\n']
for (unused_id, aelinks) in sorted(iteritems(self.aelinks)):
for aelink in aelinks:
msg.append(aelink.write_card(size, is_double))
for (unused_id, aecomp) in sorted(iteritems(self.aecomps)):
msg.append(aecomp.write_card(size, is_double))
for (unused_id, aeparam) in sorted(iteritems(self.aeparams)):
msg.append(aeparam.write_card(size, is_double))
for (unused_id, aestat) in sorted(iteritems(self.aestats)):
msg.append(aestat.write_card(size, is_double))
for (unused_id, aelist) in sorted(iteritems(self.aelists)):
msg.append(aelist.write_card(size, is_double))
for (unused_id, aesurf) in sorted(iteritems(self.aesurf)):
msg.append(aesurf.write_card(size, is_double))
for (unused_id, aesurfs) in sorted(iteritems(self.aesurfs)):
msg.append(aesurfs.write_card(size, is_double))
for (unused_id, aefact) in sorted(iteritems(self.aefacts)):
msg.append(aefact.write_card(size, is_double))
bdf_file.write(''.join(msg))
def _write_static_aero(self, bdf_file, size=8, is_double=False):
"""Writes the static aero cards"""
if self.aeros or self.trims or self.divergs:
msg = ['$STATIC AERO\n']
# static aero
if self.aeros:
msg.append(self.aeros.write_card(size, is_double))
for (unused_id, trim) in sorted(iteritems(self.trims)):
msg.append(trim.write_card(size, is_double))
for (unused_id, diverg) in sorted(iteritems(self.divergs)):
msg.append(diverg.write_card(size, is_double))
bdf_file.write(''.join(msg))
def _find_aero_location(self):
"""Determines where the AERO card should be written"""
write_aero_in_flutter = False
write_aero_in_gust = False
if self.aero:
if self.flfacts or self.flutters or self.mkaeros:
write_aero_in_flutter = True
elif self.gusts:
write_aero_in_gust = True
else:
# an AERO card exists, but no FLUTTER, FLFACT, MKAEROx or GUST card
write_aero_in_flutter = True
return write_aero_in_flutter, write_aero_in_gust
def _write_flutter(self, bdf_file, size=8, is_double=False, write_aero_in_flutter=True):
"""Writes the flutter cards"""
if (write_aero_in_flutter and self.aero) or self.flfacts or self.flutters or self.mkaeros:
msg = ['$FLUTTER\n']
if write_aero_in_flutter:
msg.append(self.aero.write_card(size, is_double))
for (unused_id, flutter) in sorted(iteritems(self.flutters)):
msg.append(flutter.write_card(size, is_double))
for (unused_id, flfact) in sorted(iteritems(self.flfacts)):
msg.append(flfact.write_card(size, is_double))
for mkaero in self.mkaeros:
msg.append(mkaero.write_card(size, is_double))
bdf_file.write(''.join(msg))
def _write_gust(self, bdf_file, size=8, is_double=False, write_aero_in_gust=True):
"""Writes the gust cards"""
if (write_aero_in_gust and self.aero) or self.gusts:
msg = ['$GUST\n']
if write_aero_in_gust:
for (unused_id, aero) in sorted(iteritems(self.aero)):
msg.append(aero.write_card(size, is_double))
for (unused_id, gust) in sorted(iteritems(self.gusts)):
msg.append(gust.write_card(size, is_double))
bdf_file.write(''.join(msg))
def _write_common(self, bdf_file, size=8, is_double=False):
"""
Write the common outputs so none get missed...
Parameters
----------
bdf_file : file
the file object
size : int (default=8)
the field width
is_double : bool (default=False)
is this double precision
Returns
-------
msg : str
part of the bdf
"""
self._write_rigid_elements(bdf_file, size, is_double)
self._write_dmigs(bdf_file, size, is_double)
self._write_loads(bdf_file, size, is_double)
self._write_dynamic(bdf_file, size, is_double)
self._write_aero(bdf_file, size, is_double)
self._write_aero_control(bdf_file, size, is_double)
self._write_static_aero(bdf_file, size, is_double)
write_aero_in_flutter, write_aero_in_gust = self._find_aero_location()
self._write_flutter(bdf_file, size, is_double, write_aero_in_flutter)
self._write_gust(bdf_file, size, is_double, write_aero_in_gust)
self._write_thermal(bdf_file, size, is_double)
self._write_thermal_materials(bdf_file, size, is_double)
self._write_constraints(bdf_file, size, is_double)
self._write_optimization(bdf_file, size, is_double)
self._write_tables(bdf_file, size, is_double)
self._write_sets(bdf_file, size, is_double)
self._write_superelements(bdf_file, size, is_double)
self._write_contact(bdf_file, size, is_double)
self._write_rejects(bdf_file, size, is_double)
self._write_coords(bdf_file, size, is_double)
def _write_constraints(self, bdf_file, size=8, is_double=False):
"""Writes the constraint cards sorted by ID"""
if self.suport or self.suport1:
msg = ['$CONSTRAINTS\n']
for suport in self.suport:
msg.append(suport.write_card(size, is_double))
for suport_id, suport in sorted(iteritems(self.suport1)):
msg.append(suport.write_card(size, is_double))
bdf_file.write(''.join(msg))
if self.spcs or self.spcadds:
#msg = ['$SPCs\n']
#str_spc = str(self.spcObject) # old
#if str_spc:
#msg.append(str_spc)
#else:
msg = ['$SPCs\n']
for (unused_id, spcadd) in sorted(iteritems(self.spcadds)):
msg.append(str(spcadd))
for (unused_id, spcs) in sorted(iteritems(self.spcs)):
for spc in spcs:
msg.append(str(spc))
bdf_file.write(''.join(msg))
if self.mpcs or self.mpcadds:
msg = ['$MPCs\n']
for (unused_id, mpcadd) in sorted(iteritems(self.mpcadds)):
msg.append(str(mpcadd))
for (unused_id, mpcs) in sorted(iteritems(self.mpcs)):
for mpc in mpcs:
msg.append(mpc.write_card(size, is_double))
bdf_file.write(''.join(msg))
def _write_contact(self, bdf_file, size=8, is_double=False):
"""Writes the contact cards sorted by ID"""
is_contact = (self.bcrparas or self.bctadds or self.bctparas
or self.bctsets or self.bsurf or self.bsurfs)
if is_contact:
msg = ['$CONTACT\n']
for (unused_id, bcrpara) in sorted(iteritems(self.bcrparas)):
msg.append(bcrpara.write_card(size, is_double))
for (unused_id, bctadds) in sorted(iteritems(self.bctadds)):
msg.append(bctadds.write_card(size, is_double))
for (unused_id, bctpara) in sorted(iteritems(self.bctparas)):
msg.append(bctpara.write_card(size, is_double))
for (unused_id, bctset) in sorted(iteritems(self.bctsets)):
msg.append(bctset.write_card(size, is_double))
for (unused_id, bsurfi) in sorted(iteritems(self.bsurf)):
msg.append(bsurfi.write_card(size, is_double))
for (unused_id, bsurfsi) in sorted(iteritems(self.bsurfs)):
msg.append(bsurfsi.write_card(size, is_double))
bdf_file.write(''.join(msg))
def _write_coords(self, bdf_file, size=8, is_double=False):
"""Writes the coordinate cards in a sorted order"""
msg = []
if len(self.coords) > 1:
msg.append('$COORDS\n')
for (unused_id, coord) in sorted(iteritems(self.coords)):
if unused_id != 0:
msg.append(coord.write_card(size, is_double))
bdf_file.write(''.join(msg))
def _write_dmigs(self, bdf_file, size=8, is_double=False):
"""
Writes the DMIG cards
Parameters
----------
size : int
large field (16) or small field (8)
Returns
-------
msg : str
string representation of the DMIGs
"""
msg = []
for (unused_name, dmig) in sorted(iteritems(self.dmigs)):
msg.append(dmig.write_card(size, is_double))
for (unused_name, dmi) in sorted(iteritems(self.dmis)):
msg.append(dmi.write_card(size, is_double))
for (unused_name, dmij) in sorted(iteritems(self.dmijs)):
msg.append(dmij.write_card(size, is_double))
for (unused_name, dmiji) in sorted(iteritems(self.dmijis)):
msg.append(dmiji.write_card(size, is_double))
for (unused_name, dmik) in sorted(iteritems(self.dmiks)):
msg.append(dmik.write_card(size, is_double))
bdf_file.write(''.join(msg))
def _write_dynamic(self, bdf_file, size=8, is_double=False):
"""Writes the dynamic cards sorted by ID"""
is_dynamic = (self.dareas or self.dphases or self.nlparms or self.frequencies or
self.methods or self.cMethods or self.tsteps or self.tstepnls or
self.transfer_functions or self.delays or self.rotors)
if is_dynamic:
msg = ['$DYNAMIC\n']
for (unused_id, method) in sorted(iteritems(self.methods)):
msg.append(method.write_card(size, is_double))
for (unused_id, cmethod) in sorted(iteritems(self.cMethods)):
msg.append(cmethod.write_card(size, is_double))
for (unused_id, darea) in sorted(iteritems(self.dareas)):
msg.append(darea.write_card(size, is_double))
for (unused_id, dphase) in sorted(iteritems(self.dphases)):
msg.append(dphase.write_card(size, is_double))
for (unused_id, nlparm) in sorted(iteritems(self.nlparms)):
msg.append(nlparm.write_card(size, is_double))
for (unused_id, nlpci) in sorted(iteritems(self.nlpcis)):
msg.append(nlpci.write_card(size, is_double))
for (unused_id, tstep) in sorted(iteritems(self.tsteps)):
msg.append(tstep.write_card(size, is_double))
for (unused_id, tstepnl) in sorted(iteritems(self.tstepnls)):
msg.append(tstepnl.write_card(size, is_double))
for (unused_id, freq) in sorted(iteritems(self.frequencies)):
msg.append(freq.write_card(size, is_double))
for (unused_id, delay) in sorted(iteritems(self.delays)):
msg.append(delay.write_card(size, is_double))
for (unused_id, rotor) in sorted(iteritems(self.rotors)):
msg.append(rotor.write_card(size, is_double))
for (unused_id, tfs) in sorted(iteritems(self.transfer_functions)):
for tf in tfs:
msg.append(tf.write_card(size, is_double))
bdf_file.write(''.join(msg))
def _write_loads(self, bdf_file, size=8, is_double=False):
"""Writes the load cards sorted by ID"""
if self.loads or self.tempds:
msg = ['$LOADS\n']
for (key, loadcase) in sorted(iteritems(self.loads)):
for load in loadcase:
try:
msg.append(load.write_card(size, is_double))
except:
print('failed printing load...type=%s key=%r'
% (load.type, key))
raise
for key, tempd in sorted(iteritems(self.tempds)):
msg.append(tempd.write_card(size, is_double))
bdf_file.write(''.join(msg))
if self.dloads or self.dload_entries:
msg = ['$DLOADS\n']
for (key, loadcase) in sorted(iteritems(self.dloads)):
for load in loadcase:
try:
msg.append(load.write_card(size, is_double))
except:
print('failed printing load...type=%s key=%r'
% (load.type, key))
raise
for (key, loadcase) in sorted(iteritems(self.dload_entries)):
for load in loadcase:
try:
msg.append(load.write_card(size, is_double))
except:
print('failed printing load...type=%s key=%r'
% (load.type, key))
raise
bdf_file.write(''.join(msg))
def _write_masses(self, bdf_file, size=8, is_double=False):
"""Writes the mass cards sorted by ID"""
if self.properties_mass:
bdf_file.write('$PROPERTIES_MASS\n')
for (pid, mass) in sorted(iteritems(self.properties_mass)):
try:
bdf_file.write(mass.write_card(size, is_double))
except:
print('failed printing mass property...'
'type=%s eid=%s' % (mass.type, pid))
raise
if self.masses:
bdf_file.write('$MASSES\n')
for (eid, mass) in sorted(iteritems(self.masses)):
try:
bdf_file.write(mass.write_card(size, is_double))
except:
print('failed printing masses...'
'type=%s eid=%s' % (mass.type, eid))
raise
def _write_materials(self, bdf_file, size=8, is_double=False):
"""Writes the materials in a sorted order"""
is_materials = (self.materials or self.hyperelastic_materials or self.creep_materials or
self.MATS1 or self.MATS3 or self.MATS8 or self.MATT1 or
self.MATT2 or self.MATT3 or self.MATT4 or self.MATT5 or
self.MATT8 or self.MATT9)
if is_materials:
msg = ['$MATERIALS\n']
for (unused_mid, material) in sorted(iteritems(self.materials)):
msg.append(material.write_card(size, is_double))
for (unused_mid, material) in sorted(iteritems(self.hyperelastic_materials)):
msg.append(material.write_card(size, is_double))
for (unused_mid, material) in sorted(iteritems(self.creep_materials)):
msg.append(material.write_card(size, is_double))
for (unused_mid, material) in sorted(iteritems(self.MATS1)):
msg.append(material.write_card(size, is_double))
for (unused_mid, material) in sorted(iteritems(self.MATS3)):
msg.append(material.write_card(size, is_double))
for (unused_mid, material) in sorted(iteritems(self.MATS8)):
msg.append(material.write_card(size, is_double))
for (unused_mid, material) in sorted(iteritems(self.MATT1)):
msg.append(material.write_card(size, is_double))
for (unused_mid, material) in sorted(iteritems(self.MATT2)):
msg.append(material.write_card(size, is_double))
for (unused_mid, material) in sorted(iteritems(self.MATT3)):
msg.append(material.write_card(size, is_double))
for (unused_mid, material) in sorted(iteritems(self.MATT4)):
msg.append(material.write_card(size, is_double))
for (unused_mid, material) in sorted(iteritems(self.MATT5)):
msg.append(material.write_card(size, is_double))
for (unused_mid, material) in sorted(iteritems(self.MATT8)):
msg.append(material.write_card(size, is_double))
for (unused_mid, material) in sorted(iteritems(self.MATT9)):
msg.append(material.write_card(size, is_double))
bdf_file.write(''.join(msg))
def _write_nodes(self, bdf_file, size=8, is_double=False):
"""
Writes the NODE-type cards
"""
if self.spoints:
msg = []
msg.append('$SPOINTS\n')
msg.append(self.spoints.write_card(size, is_double))
bdf_file.write(''.join(msg))
if self.epoints:
msg = []
msg.append('$EPOINTS\n')
msg.append(self.epoints.write_card(size, is_double))
bdf_file.write(''.join(msg))
if self.points:
msg = []
msg.append('$POINTS\n')
for point_id, point in sorted(iteritems(self.points)):
msg.append(point.write_card(size, is_double))
bdf_file.write(''.join(msg))
if self.nodes:
msg = []
msg.append('$NODES\n')
if self.grdset:
msg.append(self.grdset.print_card(size))
if self.is_long_ids:
for (unused_nid, node) in sorted(iteritems(self.nodes)):
msg.append(node.write_card_16(is_double))
else:
for (unused_nid, node) in sorted(iteritems(self.nodes)):
msg.append(node.write_card(size, is_double))
bdf_file.write(''.join(msg))
#if 0: # not finished
#self._write_nodes_associated(bdf_file, size, is_double)
#def _write_nodes_associated(self, bdf_file, size=8, is_double=False):
#"""
#Writes the NODE-type in associated and unassociated groups.
#.. warning:: Sometimes crashes, probably on invalid BDFs.
#"""
#msg = []
#associated_nodes = set([])
#for (eid, element) in iteritems(self.elements):
#associated_nodes = associated_nodes.union(set(element.node_ids))
#all_nodes = set(self.nodes.keys())
#unassociated_nodes = list(all_nodes.difference(associated_nodes))
##missing_nodes = all_nodes.difference(
## TODO: this really shouldn't be a list...???
#associated_nodes = list(associated_nodes)
#if associated_nodes:
#msg += ['$ASSOCIATED NODES\n']
#if self.grdset:
#msg.append(self.grdset.write_card(size, is_double))
## TODO: this really shouldn't be a dictionary...???
#for key, node in sorted(iteritems(associated_nodes)):
#msg.append(node.write_card(size, is_double))
#if unassociated_nodes:
#msg.append('$UNASSOCIATED NODES\n')
#if self.grdset and not associated_nodes:
#msg.append(self.grdset.write_card(size, is_double))
#for key, node in sorted(iteritems(unassociated_nodes)):
#if key in self.nodes:
#msg.append(node.write_card(size, is_double))
#else:
#msg.append('$ Missing NodeID=%s' % key)
#bdf_file.write(''.join(msg))
def _write_optimization(self, bdf_file, size=8, is_double=False):
"""Writes the optimization cards sorted by ID"""
is_optimization = (self.dconadds or self.dconstrs or self.desvars or self.ddvals or
self.dresps or
self.dvprels or self.dvmrels or self.dvcrels or self.doptprm or
self.dlinks or self.dequations or self.dtable is not None or
self.dvgrids)
if is_optimization:
msg = ['$OPTIMIZATION\n']
for (unused_id, dconadd) in sorted(iteritems(self.dconadds)):
msg.append(dconadd.write_card(size, is_double))
for (unused_id, dconstrs) in sorted(iteritems(self.dconstrs)):
for dconstr in dconstrs:
msg.append(dconstr.write_card(size, is_double))
for (unused_id, desvar) in sorted(iteritems(self.desvars)):
msg.append(desvar.write_card(size, is_double))
for (unused_id, ddval) in sorted(iteritems(self.ddvals)):
msg.append(ddval.write_card(size, is_double))
for (unused_id, dlink) in sorted(iteritems(self.dlinks)):
msg.append(dlink.write_card(size, is_double))
for (unused_id, dresp) in sorted(iteritems(self.dresps)):
msg.append(dresp.write_card(size, is_double))
for (unused_id, dvcrel) in sorted(iteritems(self.dvcrels)):
msg.append(dvcrel.write_card(size, is_double))
for (unused_id, dvmrel) in sorted(iteritems(self.dvmrels)):
msg.append(dvmrel.write_card(size, is_double))
for (unused_id, dvprel) in sorted(iteritems(self.dvprels)):
msg.append(dvprel.write_card(size, is_double))
for (unused_id, dvgrids) in sorted(iteritems(self.dvgrids)):
for dvgrid in dvgrids:
msg.append(dvgrid.write_card(size, is_double))
for (unused_id, equation) in sorted(iteritems(self.dequations)):
msg.append(str(equation))
if self.dtable is not None:
msg.append(self.dtable.write_card(size, is_double))
if self.doptprm is not None:
msg.append(self.doptprm.write_card(size, is_double))
bdf_file.write(''.join(msg))
def _write_params(self, bdf_file, size=8, is_double=False):
"""
Writes the PARAM cards
"""
if self.params:
msg = ['$PARAMS\n']
if self.is_long_ids:
for (unused_key, param) in sorted(iteritems(self.params)):
msg.append(param.write_card(16, is_double))
else:
for (unused_key, param) in sorted(iteritems(self.params)):
msg.append(param.write_card(size, is_double))
bdf_file.write(''.join(msg))
def _write_properties(self, bdf_file, size=8, is_double=False):
"""Writes the properties in a sorted order"""
if self.properties:
msg = ['$PROPERTIES\n']
prop_groups = (self.properties, self.pelast, self.pdampt, self.pbusht)
if self.is_long_ids:
for prop_group in prop_groups:
for unused_pid, prop in sorted(iteritems(prop_group)):
msg.append(prop.write_card_16(is_double))
#except:
#print('failed printing property type=%s' % prop.type)
#raise
else:
for prop_group in prop_groups:
for unused_pid, prop in sorted(iteritems(prop_group)):
msg.append(prop.write_card(size, is_double))
bdf_file.write(''.join(msg))
def _write_rejects(self, bdf_file, size=8, is_double=False):
"""
Writes the rejected (processed) cards and the rejected unprocessed
cardlines
"""
if size == 8:
print_func = print_card_8
else:
print_func = print_card_16
msg = []
if self.reject_cards:
msg.append('$REJECTS\n')
for reject_card in self.reject_cards:
try:
msg.append(print_func(reject_card))
except RuntimeError:
for field in reject_card:
if field is not None and '=' in field:
raise SyntaxError('cannot reject equal signed '
'cards\ncard=%s\n' % reject_card)
raise
if self.rejects:
msg.append('$REJECT_LINES\n')
for reject_lines in self.reject_lines:
if isinstance(reject_lines, (list, tuple)):
for reject in reject_lines:
reject2 = reject.rstrip()
if reject2:
msg.append('%s\n' % reject2)
elif isinstance(reject_lines, string_types):
reject2 = reject_lines.rstrip()
if reject2:
msg.append('%s\n' % reject2)
else:
raise TypeError(reject_lines)
bdf_file.write(''.join(msg))
def _write_rigid_elements(self, bdf_file, size=8, is_double=False):
"""Writes the rigid elements in a sorted order"""
if self.rigid_elements:
bdf_file.write('$RIGID ELEMENTS\n')
if self.is_long_ids:
for (eid, element) in sorted(iteritems(self.rigid_elements)):
try:
bdf_file.write(element.write_card_16(is_double))
except:
print('failed printing element...'
'type=%s eid=%s' % (element.type, eid))
raise
else:
for (eid, element) in sorted(iteritems(self.rigid_elements)):
try:
bdf_file.write(element.write_card(size, is_double))
except:
print('failed printing element...'
'type=%s eid=%s' % (element.type, eid))
raise
if self.plotels:
bdf_file.write('$PLOT ELEMENTS\n')
for (eid, element) in sorted(iteritems(self.plotels)):
bdf_file.write(element.write_card(size, is_double))
def _write_sets(self, bdf_file, size=8, is_double=False):
"""Writes the SETx cards sorted by ID"""
is_sets = (self.sets or self.asets or self.bsets or self.csets or self.qsets
or self.usets)
if is_sets:
msg = ['$SETS\n']
for (unused_id, set_obj) in sorted(iteritems(self.sets)): # dict
msg.append(set_obj.write_card(size, is_double))
for set_obj in self.asets: # list
msg.append(set_obj.write_card(size, is_double))
for set_obj in self.bsets: # list
msg.append(set_obj.write_card(size, is_double))
for set_obj in self.csets: # list
msg.append(set_obj.write_card(size, is_double))
for set_obj in self.qsets: # list
msg.append(set_obj.write_card(size, is_double))
for name, usets in sorted(iteritems(self.usets)): # dict
for set_obj in usets: # list
msg.append(set_obj.write_card(size, is_double))
bdf_file.write(''.join(msg))
def _write_superelements(self, bdf_file, size=8, is_double=False):
"""Writes the SETx cards sorted by ID"""
is_sets = (self.se_sets or self.se_bsets or self.se_csets or self.se_qsets
or self.se_usets)
if is_sets:
msg = ['$SUPERELEMENTS\n']
for set_obj in self.se_bsets: # list
msg.append(set_obj.write_card(size, is_double))
for set_obj in self.se_csets: # list
msg.append(set_obj.write_card(size, is_double))
for set_obj in self.se_qsets: # list
msg.append(set_obj.write_card(size, is_double))
for (set_id, set_obj) in sorted(iteritems(self.se_sets)): # dict
msg.append(set_obj.write_card(size, is_double))
for name, usets in sorted(iteritems(self.se_usets)): # dict
for set_obj in usets: # list
msg.append(set_obj.write_card(size, is_double))
for suport in self.se_suport: # list
msg.append(suport.write_card(size, is_double))
bdf_file.write(''.join(msg))
def _write_tables(self, bdf_file, size=8, is_double=False):
"""Writes the TABLEx cards sorted by ID"""
if self.tables or self.tables_sdamping:
msg = ['$TABLES\n']
for (unused_id, table) in sorted(iteritems(self.tables)):
msg.append(table.write_card(size, is_double))
for (unused_id, table) in sorted(iteritems(self.tables_sdamping)):
msg.append(table.write_card(size, is_double))
bdf_file.write(''.join(msg))
if self.random_tables:
msg = ['$RANDOM TABLES\n']
for (unused_id, table) in sorted(iteritems(self.random_tables)):
msg.append(table.write_card(size, is_double))
bdf_file.write(''.join(msg))
def _write_thermal(self, bdf_file, size=8, is_double=False):
"""Writes the thermal cards"""
# PHBDY
if self.phbdys or self.convection_properties or self.bcs:
# self.thermalProperties or
msg = ['$THERMAL\n']
for (unused_key, phbdy) in sorted(iteritems(self.phbdys)):
msg.append(phbdy.write_card(size, is_double))
#for unused_key, prop in sorted(iteritems(self.thermalProperties)):
# msg.append(str(prop))
for (unused_key, prop) in sorted(iteritems(self.convection_properties)):
msg.append(prop.write_card(size, is_double))
# BCs
for (unused_key, bcs) in sorted(iteritems(self.bcs)):
for boundary_condition in bcs: # list
msg.append(boundary_condition.write_card(size, is_double))
bdf_file.write(''.join(msg))
def _write_thermal_materials(self, bdf_file, size=8, is_double=False):
"""Writes the thermal materials in a sorted order"""
if self.thermal_materials:
msg = ['$THERMAL MATERIALS\n']
for (unused_mid, material) in sorted(iteritems(self.thermal_materials)):
msg.append(material.write_card(size, is_double))
bdf_file.write(''.join(msg))
|
lgpl-3.0
| 4,066,461,600,381,241,000
| 43.201362
| 98
| 0.54286
| false
| 3.635411
| false
| false
| false
|
SUSE/kiwi
|
test/unit/xml_state_test.py
|
1
|
39351
|
import logging
from collections import namedtuple
from mock import (
patch, Mock
)
from pytest import (
raises, fixture
)
from kiwi.xml_state import XMLState
from kiwi.xml_description import XMLDescription
from kiwi.exceptions import (
KiwiTypeNotFound,
KiwiDistributionNameError,
KiwiProfileNotFound
)
class TestXMLState:
@fixture(autouse=True)
def inject_fixtures(self, caplog):
self._caplog = caplog
@patch('platform.machine')
def setup(self, mock_machine):
mock_machine.return_value = 'x86_64'
self.description = XMLDescription(
'../data/example_config.xml'
)
self.state = XMLState(
self.description.load()
)
boot_description = XMLDescription(
'../data/isoboot/example-distribution/config.xml'
)
self.boot_state = XMLState(
boot_description.load()
)
no_image_packages_description = XMLDescription(
'../data/example_no_image_packages_config.xml'
)
self.no_image_packages_boot_state = XMLState(
no_image_packages_description.load()
)
self.bootloader = Mock()
self.bootloader.get_name.return_value = 'some-loader'
self.bootloader.get_timeout.return_value = 'some-timeout'
self.bootloader.get_timeout_style.return_value = 'some-style'
self.bootloader.get_targettype.return_value = 'some-target'
self.bootloader.get_console.return_value = 'some-console'
self.bootloader.get_serial_line.return_value = 'some-serial'
def test_get_description_section(self):
description = self.state.get_description_section()
assert description.author == 'Marcus'
assert description.contact == 'ms@suse.com'
assert description.specification == \
'Testing various configuration states'
@patch('platform.machine')
def test_get_preferences_by_architecture(self, mock_machine):
mock_machine.return_value = 'aarch64'
state = XMLState(
self.description.load()
)
preferences = state.get_preferences_sections()
assert len(preferences) == 3
assert preferences[2].get_arch() == 'aarch64'
assert state.get_build_type_name() == 'iso'
def test_build_type_primary_selected(self):
assert self.state.get_build_type_name() == 'oem'
def test_build_type_first_selected(self):
self.state.xml_data.get_preferences()[2].get_type()[0].set_primary(
False
)
assert self.state.get_build_type_name() == 'oem'
@patch('kiwi.xml_state.XMLState.get_preferences_sections')
def test_get_rpm_excludedocs_without_entry(self, mock_preferences):
mock_preferences.return_value = []
assert self.state.get_rpm_excludedocs() is False
def test_get_rpm_excludedocs(self):
assert self.state.get_rpm_excludedocs() is True
@patch('kiwi.xml_state.XMLState.get_preferences_sections')
def test_get_rpm_check_signatures_without_entry(self, mock_preferences):
mock_preferences.return_value = []
assert self.state.get_rpm_check_signatures() is False
def test_get_rpm_check_signatures(self):
assert self.state.get_rpm_check_signatures() is True
def test_get_package_manager(self):
assert self.state.get_package_manager() == 'zypper'
@patch('kiwi.xml_state.XMLState.get_preferences_sections')
def test_get_default_package_manager(self, mock_preferences):
mock_preferences.return_value = []
assert self.state.get_package_manager() == 'dnf'
def test_get_image_version(self):
assert self.state.get_image_version() == '1.13.2'
def test_get_bootstrap_packages(self):
assert self.state.get_bootstrap_packages() == [
'filesystem', 'zypper'
]
assert self.state.get_bootstrap_packages(plus_packages=['vim']) == [
'filesystem', 'vim', 'zypper'
]
assert self.no_image_packages_boot_state.get_bootstrap_packages() == [
'patterns-openSUSE-base'
]
def test_get_system_packages(self):
assert self.state.get_system_packages() == [
'gfxboot-branding-openSUSE',
'grub2-branding-openSUSE',
'ifplugd',
'iputils',
'kernel-default',
'openssh',
'plymouth-branding-openSUSE',
'vim'
]
@patch('platform.machine')
def test_get_system_packages_some_arch(self, mock_machine):
mock_machine.return_value = 's390'
state = XMLState(
self.description.load()
)
assert state.get_system_packages() == [
'foo',
'gfxboot-branding-openSUSE',
'grub2-branding-openSUSE',
'ifplugd',
'iputils',
'kernel-default',
'openssh',
'plymouth-branding-openSUSE',
'vim'
]
def test_get_system_collections(self):
assert self.state.get_system_collections() == [
'base'
]
def test_get_system_products(self):
assert self.state.get_system_products() == [
'openSUSE'
]
def test_get_system_archives(self):
assert self.state.get_system_archives() == [
'/absolute/path/to/image.tgz'
]
def test_get_system_ignore_packages(self):
assert self.state.get_system_ignore_packages() == [
'bar', 'baz', 'foo'
]
self.state.host_architecture = 'aarch64'
assert self.state.get_system_ignore_packages() == [
'baz', 'foo'
]
self.state.host_architecture = 's390'
assert self.state.get_system_ignore_packages() == [
'baz'
]
def test_get_system_collection_type(self):
assert self.state.get_system_collection_type() == 'plusRecommended'
def test_get_bootstrap_collections(self):
assert self.state.get_bootstrap_collections() == [
'bootstrap-collection'
]
def test_get_bootstrap_products(self):
assert self.state.get_bootstrap_products() == ['kiwi']
def test_get_bootstrap_archives(self):
assert self.state.get_bootstrap_archives() == ['bootstrap.tgz']
def test_get_bootstrap_collection_type(self):
assert self.state.get_bootstrap_collection_type() == 'onlyRequired'
def test_set_repository(self):
self.state.set_repository('repo', 'type', 'alias', 1, True, False)
assert self.state.xml_data.get_repository()[0].get_source().get_path() \
== 'repo'
assert self.state.xml_data.get_repository()[0].get_type() == 'type'
assert self.state.xml_data.get_repository()[0].get_alias() == 'alias'
assert self.state.xml_data.get_repository()[0].get_priority() == 1
assert self.state.xml_data.get_repository()[0] \
.get_imageinclude() is True
assert self.state.xml_data.get_repository()[0] \
.get_package_gpgcheck() is False
def test_add_repository(self):
self.state.add_repository('repo', 'type', 'alias', 1, True)
assert self.state.xml_data.get_repository()[3].get_source().get_path() \
== 'repo'
assert self.state.xml_data.get_repository()[3].get_type() == 'type'
assert self.state.xml_data.get_repository()[3].get_alias() == 'alias'
assert self.state.xml_data.get_repository()[3].get_priority() == 1
assert self.state.xml_data.get_repository()[3] \
.get_imageinclude() is True
def test_add_repository_with_empty_values(self):
self.state.add_repository('repo', 'type', '', '', True)
assert self.state.xml_data.get_repository()[3].get_source().get_path() \
== 'repo'
assert self.state.xml_data.get_repository()[3].get_type() == 'type'
assert self.state.xml_data.get_repository()[3].get_alias() == ''
assert self.state.xml_data.get_repository()[3].get_priority() is None
assert self.state.xml_data.get_repository()[3] \
.get_imageinclude() is True
def test_get_to_become_deleted_packages(self):
assert self.state.get_to_become_deleted_packages() == [
'kernel-debug'
]
def test_get_build_type_vagrant_config_section(self):
vagrant_config = self.state.get_build_type_vagrant_config_section()
assert vagrant_config.get_provider() == 'libvirt'
assert self.boot_state.get_build_type_vagrant_config_section() is None
def test_virtualbox_guest_additions_vagrant_config_section(self):
assert not self.state.get_vagrant_config_virtualbox_guest_additions()
def test_virtualbox_guest_additions_vagrant_config_section_missing(self):
self.state. \
get_build_type_vagrant_config_section() \
.virtualbox_guest_additions_present = True
assert self.state.get_vagrant_config_virtualbox_guest_additions()
def test_get_build_type_system_disk_section(self):
assert self.state.get_build_type_system_disk_section().get_name() == \
'mydisk'
def test_get_build_type_vmdisk_section(self):
assert self.state.get_build_type_vmdisk_section().get_id() == 0
assert self.boot_state.get_build_type_vmdisk_section() is None
def test_get_build_type_vmnic_entries(self):
assert self.state.get_build_type_vmnic_entries()[0].get_interface() \
== ''
assert self.boot_state.get_build_type_vmnic_entries() == []
def test_get_build_type_vmdvd_section(self):
assert self.state.get_build_type_vmdvd_section().get_id() == 0
assert self.boot_state.get_build_type_vmdvd_section() is None
def test_get_volume_management(self):
assert self.state.get_volume_management() == 'lvm'
def test_get_volume_management_none(self):
assert self.boot_state.get_volume_management() is None
def test_get_volume_management_btrfs(self):
description = XMLDescription('../data/example_btrfs_config.xml')
xml_data = description.load()
state = XMLState(xml_data)
assert state.get_volume_management() == 'btrfs'
def test_get_volume_management_lvm_prefer(self):
description = XMLDescription('../data/example_lvm_preferred_config.xml')
xml_data = description.load()
state = XMLState(xml_data)
assert state.get_volume_management() == 'lvm'
def test_get_volume_management_lvm_default(self):
description = XMLDescription('../data/example_lvm_default_config.xml')
xml_data = description.load()
state = XMLState(xml_data)
assert state.get_volume_management() == 'lvm'
def test_build_type_explicitly_selected(self):
xml_data = self.description.load()
state = XMLState(xml_data, ['vmxFlavour'], 'oem')
assert state.get_build_type_name() == 'oem'
def test_build_type_not_found(self):
xml_data = self.description.load()
with raises(KiwiTypeNotFound):
XMLState(xml_data, ['vmxFlavour'], 'foo')
def test_build_type_not_found_no_default_type(self):
description = XMLDescription('../data/example_no_default_type.xml')
xml_data = description.load()
with raises(KiwiTypeNotFound):
XMLState(xml_data, ['minimal'])
def test_profile_not_found(self):
xml_data = self.description.load()
with raises(KiwiProfileNotFound):
XMLState(xml_data, ['foo'])
def test_profile_requires(self):
xml_data = self.description.load()
xml_state = XMLState(xml_data, ['composedProfile'])
assert xml_state.profiles == [
'composedProfile', 'vmxSimpleFlavour', 'xenDomUFlavour'
]
def test_get_volumes_custom_root_volume_name(self):
description = XMLDescription(
'../data/example_lvm_custom_rootvol_config.xml'
)
xml_data = description.load()
state = XMLState(xml_data)
volume_type = namedtuple(
'volume_type', [
'name',
'size',
'realpath',
'mountpoint',
'fullsize',
'label',
'attributes',
'is_root_volume'
]
)
assert state.get_volumes() == [
volume_type(
name='myroot', size='freespace:500',
realpath='/',
mountpoint=None, fullsize=False,
label=None,
attributes=[],
is_root_volume=True
)
]
def test_get_volumes(self):
description = XMLDescription('../data/example_lvm_default_config.xml')
xml_data = description.load()
state = XMLState(xml_data)
volume_type = namedtuple(
'volume_type', [
'name',
'size',
'realpath',
'mountpoint',
'fullsize',
'label',
'attributes',
'is_root_volume'
]
)
assert state.get_volumes() == [
volume_type(
name='usr_lib', size='size:1024',
realpath='usr/lib',
mountpoint='usr/lib',
fullsize=False,
label='library',
attributes=[],
is_root_volume=False
),
volume_type(
name='LVRoot', size='freespace:500',
realpath='/',
mountpoint=None, fullsize=False,
label=None,
attributes=[],
is_root_volume=True
),
volume_type(
name='etc_volume', size='freespace:30',
realpath='etc',
mountpoint='etc', fullsize=False,
label=None,
attributes=['no-copy-on-write'],
is_root_volume=False
),
volume_type(
name='bin_volume', size=None,
realpath='/usr/bin',
mountpoint='/usr/bin', fullsize=True,
label=None,
attributes=[],
is_root_volume=False
),
volume_type(
name='LVSwap', size='size:128',
realpath='swap',
mountpoint=None, fullsize=False,
label='SWAP',
attributes=[],
is_root_volume=False
)
]
def test_get_volumes_no_explicit_root_setup(self):
description = XMLDescription('../data/example_lvm_no_root_config.xml')
xml_data = description.load()
state = XMLState(xml_data)
volume_type = namedtuple(
'volume_type', [
'name',
'size',
'realpath',
'mountpoint',
'fullsize',
'label',
'attributes',
'is_root_volume'
]
)
assert state.get_volumes() == [
volume_type(
name='LVRoot', size=None, realpath='/',
mountpoint=None, fullsize=True,
label=None,
attributes=[],
is_root_volume=True
),
volume_type(
name='LVSwap', size='size:128',
realpath='swap',
mountpoint=None, fullsize=False,
label='SWAP',
attributes=[],
is_root_volume=False
)
]
def test_get_volumes_no_explicit_root_setup_other_fullsize_volume(self):
description = XMLDescription(
'../data/example_lvm_no_root_full_usr_config.xml'
)
xml_data = description.load()
state = XMLState(xml_data)
volume_type = namedtuple(
'volume_type', [
'name',
'size',
'realpath',
'mountpoint',
'fullsize',
'label',
'attributes',
'is_root_volume'
]
)
assert state.get_volumes() == [
volume_type(
name='usr', size=None, realpath='usr',
mountpoint='usr', fullsize=True,
label=None,
attributes=[],
is_root_volume=False
),
volume_type(
name='LVRoot', size='freespace:30', realpath='/',
mountpoint=None, fullsize=False,
label=None,
attributes=[],
is_root_volume=True
),
volume_type(
name='LVSwap', size='size:128',
realpath='swap',
mountpoint=None, fullsize=False,
label='SWAP',
attributes=[],
is_root_volume=False
)
]
@patch('kiwi.xml_state.XMLState.get_build_type_system_disk_section')
def test_get_empty_volumes(self, mock_system_disk):
mock_system_disk.return_value = None
assert self.state.get_volumes() == []
def test_get_strip_files_to_delete(self):
assert self.state.get_strip_files_to_delete() == ['del-a', 'del-b']
def test_get_strip_tools_to_keep(self):
assert self.state.get_strip_tools_to_keep() == ['tool-a', 'tool-b']
def test_get_strip_libraries_to_keep(self):
assert self.state.get_strip_libraries_to_keep() == ['lib-a', 'lib-b']
def test_get_build_type_machine_section(self):
xml_data = self.description.load()
state = XMLState(xml_data, ['vmxSimpleFlavour'], 'oem')
assert state.get_build_type_machine_section().get_guestOS() == 'suse'
def test_get_drivers_list(self):
assert self.state.get_drivers_list() == \
['crypto/*', 'drivers/acpi/*', 'bar']
def test_get_build_type_oemconfig_section(self):
xml_data = self.description.load()
state = XMLState(xml_data, None, 'oem')
assert state.get_build_type_oemconfig_section().get_oem_swap()[0] is \
True
def test_get_oemconfig_oem_resize(self):
xml_data = self.description.load()
state = XMLState(xml_data, ['vmxFlavour'], 'oem')
assert state.get_oemconfig_oem_resize() is True
description = XMLDescription(
'../data/example_multiple_users_config.xml'
)
xml_data = description.load()
state = XMLState(xml_data)
assert state.get_oemconfig_oem_resize() is False
def test_get_oemconfig_oem_multipath_scan(self):
xml_data = self.description.load()
state = XMLState(xml_data, ['vmxFlavour'], 'oem')
assert state.get_oemconfig_oem_multipath_scan() is False
description = XMLDescription(
'../data/example_disk_config.xml'
)
xml_data = description.load()
state = XMLState(xml_data)
assert state.get_oemconfig_oem_multipath_scan() is False
def test_get_oemconfig_swap_mbytes(self):
xml_data = self.description.load()
state = XMLState(xml_data, ['containerFlavour'], 'docker')
assert state.get_oemconfig_swap_mbytes() is None
state = XMLState(xml_data, ['vmxFlavour'], 'oem')
assert state.get_oemconfig_swap_mbytes() == 42
def test_get_oemconfig_swap_name(self):
xml_data = self.description.load()
state = XMLState(xml_data, ['containerFlavour'], 'docker')
assert state.get_oemconfig_swap_name() == 'LVSwap'
state = XMLState(xml_data, ['vmxFlavour'], 'oem')
assert state.get_oemconfig_swap_name() == 'swap'
def test_get_oemconfig_swap_mbytes_default(self):
description = XMLDescription(
'../data/example_btrfs_config.xml'
)
xml_data = description.load()
state = XMLState(xml_data)
assert state.get_oemconfig_swap_mbytes() == 128
def test_get_users_sections(self):
assert self.state.get_users_sections()[0].get_user()[0].get_name() == \
'root'
def test_get_users(self):
description = XMLDescription(
'../data/example_multiple_users_config.xml'
)
xml_data = description.load()
state = XMLState(xml_data)
users = state.get_users()
assert len(users) == 3
assert any(u.get_name() == 'root' for u in users)
assert any(u.get_name() == 'tux' for u in users)
assert any(u.get_name() == 'kiwi' for u in users)
def test_get_user_groups(self):
description = XMLDescription(
'../data/example_multiple_users_config.xml'
)
xml_data = description.load()
state = XMLState(xml_data)
assert len(state.get_user_groups('root')) == 0
assert len(state.get_user_groups('tux')) == 1
assert any(grp == 'users' for grp in state.get_user_groups('tux'))
assert len(state.get_user_groups('kiwi')) == 3
assert any(grp == 'users' for grp in state.get_user_groups('kiwi'))
assert any(grp == 'kiwi' for grp in state.get_user_groups('kiwi'))
assert any(grp == 'admin' for grp in state.get_user_groups('kiwi'))
def test_copy_displayname(self):
self.state.copy_displayname(self.boot_state)
assert self.boot_state.xml_data.get_displayname() == 'Bob'
def test_copy_drivers_sections(self):
self.state.copy_drivers_sections(self.boot_state)
assert 'bar' in self.boot_state.get_drivers_list()
def test_copy_systemdisk_section(self):
self.state.copy_systemdisk_section(self.boot_state)
systemdisk = self.boot_state.get_build_type_system_disk_section()
assert systemdisk.get_name() == 'mydisk'
@patch('kiwi.xml_parse.type_.get_bootloader')
def test_copy_bootloader_section(self, mock_bootloader):
mock_bootloader.return_value = [self.bootloader]
self.state.copy_bootloader_section(self.boot_state)
assert self.boot_state.get_build_type_bootloader_section() == \
self.bootloader
def test_copy_strip_sections(self):
self.state.copy_strip_sections(self.boot_state)
assert 'del-a' in self.boot_state.get_strip_files_to_delete()
def test_copy_machine_section(self):
self.state.copy_machine_section(self.boot_state)
machine = self.boot_state.get_build_type_machine_section()
assert machine.get_memory() == 512
def test_copy_oemconfig_section(self):
self.state.copy_oemconfig_section(self.boot_state)
oemconfig = self.boot_state.get_build_type_oemconfig_section()
assert oemconfig.get_oem_systemsize()[0] == 2048
def test_copy_repository_sections(self):
self.state.copy_repository_sections(self.boot_state, True)
repository = self.boot_state.get_repository_sections()[0]
assert repository.get_source().get_path() == 'iso:///image/CDs/dvd.iso'
def test_copy_preferences_subsections(self):
self.state.copy_preferences_subsections(
['bootsplash_theme'], self.boot_state
)
preferences = self.boot_state.get_preferences_sections()[0]
assert preferences.get_bootsplash_theme()[0] == 'openSUSE'
def test_copy_build_type_attributes(self):
self.state.copy_build_type_attributes(
['firmware'], self.boot_state
)
assert self.boot_state.build_type.get_firmware() == 'efi'
def test_copy_bootincluded_packages_with_no_image_packages(self):
self.state.copy_bootincluded_packages(self.boot_state)
bootstrap_packages = self.boot_state.get_bootstrap_packages()
assert 'plymouth-branding-openSUSE' in bootstrap_packages
assert 'grub2-branding-openSUSE' in bootstrap_packages
assert 'gfxboot-branding-openSUSE' in bootstrap_packages
to_delete_packages = self.boot_state.get_to_become_deleted_packages()
assert 'gfxboot-branding-openSUSE' not in to_delete_packages
def test_copy_bootincluded_packages_with_image_packages(self):
boot_description = XMLDescription(
'../data/isoboot/example-distribution/config.xml'
)
boot_state = XMLState(boot_description.load(), ['std'])
self.state.copy_bootincluded_packages(boot_state)
image_packages = boot_state.get_system_packages()
assert 'plymouth-branding-openSUSE' in image_packages
assert 'grub2-branding-openSUSE' in image_packages
assert 'gfxboot-branding-openSUSE' in image_packages
to_delete_packages = boot_state.get_to_become_deleted_packages()
assert 'gfxboot-branding-openSUSE' not in to_delete_packages
def test_copy_bootincluded_archives(self):
self.state.copy_bootincluded_archives(self.boot_state)
bootstrap_archives = self.boot_state.get_bootstrap_archives()
assert '/absolute/path/to/image.tgz' in bootstrap_archives
def test_copy_bootdelete_packages(self):
self.state.copy_bootdelete_packages(self.boot_state)
to_delete_packages = self.boot_state.get_to_become_deleted_packages()
assert 'vim' in to_delete_packages
def test_copy_bootdelete_packages_no_delete_section_in_boot_descr(self):
boot_description = XMLDescription(
'../data/isoboot/example-distribution-no-delete-section/config.xml'
)
boot_state = XMLState(
boot_description.load()
)
self.state.copy_bootdelete_packages(boot_state)
to_delete_packages = boot_state.get_to_become_deleted_packages()
assert 'vim' in to_delete_packages
def test_build_type_size(self):
result = self.state.get_build_type_size()
assert result.mbytes == 1024
assert result.additive
def test_build_type_size_with_unpartitioned(self):
state = XMLState(self.description.load(), ['vmxSimpleFlavour'], 'oem')
result = state.get_build_type_size()
assert result.mbytes == 3072
assert not result.additive
result = state.get_build_type_size(include_unpartitioned=True)
assert result.mbytes == 4096
assert not result.additive
def test_get_build_type_unpartitioned_bytes(self):
assert self.state.get_build_type_unpartitioned_bytes() == 0
state = XMLState(self.description.load(), ['vmxSimpleFlavour'], 'oem')
assert state.get_build_type_unpartitioned_bytes() == 1073741824
state = XMLState(self.description.load(), ['vmxFlavour'], 'oem')
assert state.get_build_type_unpartitioned_bytes() == 0
state = XMLState(self.description.load(), ['ec2Flavour'], 'oem')
assert state.get_build_type_unpartitioned_bytes() == 0
def test_get_volume_group_name(self):
assert self.state.get_volume_group_name() == 'mydisk'
def test_get_volume_group_name_default(self):
assert self.boot_state.get_volume_group_name() == 'systemVG'
def test_get_distribution_name_from_boot_attribute(self):
assert self.state.get_distribution_name_from_boot_attribute() == \
'distribution'
def test_get_fs_mount_option_list(self):
assert self.state.get_fs_mount_option_list() == ['async']
def test_get_fs_create_option_list(self):
assert self.state.get_fs_create_option_list() == ['-O', '^has_journal']
@patch('kiwi.xml_parse.type_.get_boot')
def test_get_distribution_name_from_boot_attribute_no_boot(self, mock_boot):
mock_boot.return_value = None
with raises(KiwiDistributionNameError):
self.state.get_distribution_name_from_boot_attribute()
@patch('kiwi.xml_parse.type_.get_boot')
def test_get_distribution_name_from_boot_attribute_invalid_boot(
self, mock_boot
):
mock_boot.return_value = 'invalid'
with raises(KiwiDistributionNameError):
self.state.get_distribution_name_from_boot_attribute()
def test_delete_repository_sections(self):
self.state.delete_repository_sections()
assert self.state.get_repository_sections() == []
def test_delete_repository_sections_used_for_build(self):
self.state.delete_repository_sections_used_for_build()
assert self.state.get_repository_sections()[0].get_imageonly()
def test_get_build_type_vmconfig_entries(self):
assert self.state.get_build_type_vmconfig_entries() == []
def test_get_build_type_vmconfig_entries_for_simple_disk(self):
xml_data = self.description.load()
state = XMLState(xml_data, ['vmxSimpleFlavour'], 'oem')
assert state.get_build_type_vmconfig_entries() == [
'numvcpus = "4"', 'cpuid.coresPerSocket = "2"'
]
def test_get_build_type_vmconfig_entries_no_machine_section(self):
description = XMLDescription('../data/example_disk_config.xml')
xml_data = description.load()
state = XMLState(xml_data)
assert state.get_build_type_vmconfig_entries() == []
def test_get_build_type_docker_containerconfig_section(self):
xml_data = self.description.load()
state = XMLState(xml_data, ['containerFlavour'], 'docker')
containerconfig = state.get_build_type_containerconfig_section()
assert containerconfig.get_name() == \
'container_name'
assert containerconfig.get_maintainer() == \
'tux'
assert containerconfig.get_workingdir() == \
'/root'
def test_set_container_tag(self):
xml_data = self.description.load()
state = XMLState(xml_data, ['containerFlavour'], 'docker')
state.set_container_config_tag('new_tag')
config = state.get_container_config()
assert config['container_tag'] == 'new_tag'
def test_add_container_label(self):
xml_data = self.description.load()
state = XMLState(xml_data, ['containerFlavour'], 'docker')
state.add_container_config_label('somelabel', 'overwrittenvalue')
state.add_container_config_label('new_label', 'new value')
config = state.get_container_config()
assert config['labels'] == {
'somelabel': 'overwrittenvalue',
'someotherlabel': 'anotherlabelvalue',
'new_label': 'new value'
}
def test_add_container_label_without_contianerconfig(self):
xml_data = self.description.load()
state = XMLState(xml_data, ['xenDom0Flavour'], 'docker')
state.add_container_config_label('somelabel', 'newlabelvalue')
config = state.get_container_config()
assert config['labels'] == {
'somelabel': 'newlabelvalue'
}
def test_add_container_label_no_container_image_type(self):
xml_data = self.description.load()
state = XMLState(xml_data, ['vmxFlavour'], 'oem')
state.add_container_config_label('somelabel', 'newlabelvalue')
with self._caplog.at_level(logging.WARNING):
config = state.get_container_config()
assert config == {
'history': {'author': 'Marcus <ms@suse.com>'},
'maintainer': 'Marcus <ms@suse.com>'
}
def test_set_container_tag_not_applied(self):
with self._caplog.at_level(logging.WARNING):
self.state.set_container_config_tag('new_tag')
def test_get_container_config(self):
expected_config = {
'labels': {
'somelabel': 'labelvalue',
'someotherlabel': 'anotherlabelvalue'
},
'maintainer': 'tux',
'entry_subcommand': ['ls', '-l'],
'container_name': 'container_name',
'container_tag': 'container_tag',
'additional_tags': ['current', 'foobar'],
'workingdir': '/root',
'environment': {
'PATH': '/bin:/usr/bin:/home/user/bin',
'SOMEVAR': 'somevalue'
},
'user': 'root',
'volumes': ['/tmp', '/var/log'],
'entry_command': ['/bin/bash', '-x'],
'expose_ports': ['80', '8080'],
'history': {
'author': 'history author',
'comment': 'This is a comment',
'created_by': 'created by text',
'application_id': '123',
'package_version': '2003.12.0.0',
'launcher': 'app'
}
}
xml_data = self.description.load()
state = XMLState(xml_data, ['containerFlavour'], 'docker')
assert state.get_container_config() == expected_config
def test_get_container_config_clear_commands(self):
expected_config = {
'maintainer': 'tux',
'entry_subcommand': [],
'container_name': 'container_name',
'container_tag': 'container_tag',
'workingdir': '/root',
'user': 'root',
'entry_command': [],
'history': {'author': 'Marcus <ms@suse.com>'}
}
xml_data = self.description.load()
state = XMLState(xml_data, ['derivedContainer'], 'docker')
assert state.get_container_config() == expected_config
def test_get_spare_part(self):
assert self.state.get_build_type_spare_part_size() == 200
assert self.state.get_build_type_spare_part_fs_attributes() == [
'no-copy-on-write'
]
def test_get_build_type_format_options(self):
assert self.state.get_build_type_format_options() == {
'super': 'man',
'force_size': None
}
def test_get_derived_from_image_uri(self):
xml_data = self.description.load()
state = XMLState(xml_data, ['derivedContainer'], 'docker')
assert state.get_derived_from_image_uri().uri == \
'obs://project/repo/image#mytag'
def test_set_derived_from_image_uri(self):
xml_data = self.description.load()
state = XMLState(xml_data, ['derivedContainer'], 'docker')
state.set_derived_from_image_uri('file:///new_uri')
assert state.get_derived_from_image_uri().translate() == '/new_uri'
def test_set_derived_from_image_uri_not_applied(self):
with self._caplog.at_level(logging.WARNING):
self.state.set_derived_from_image_uri('file:///new_uri')
def test_is_xen_server(self):
assert self.state.is_xen_server() is True
def test_is_xen_guest_by_machine_setup(self):
assert self.state.is_xen_guest() is True
def test_is_xen_guest_no_xen_guest_setup(self):
assert self.boot_state.is_xen_guest() is False
@patch('platform.machine')
def test_is_xen_guest_by_firmware_setup(self, mock_platform_machine):
mock_platform_machine.return_value = 'x86_64'
xml_data = self.description.load()
state = XMLState(xml_data, ['ec2Flavour'], 'oem')
assert state.is_xen_guest() is True
@patch('platform.machine')
def test_is_xen_guest_by_architecture(self, mock_platform_machine):
mock_platform_machine.return_value = 'unsupported'
xml_data = self.description.load()
state = XMLState(xml_data, ['ec2Flavour'], 'oem')
assert state.is_xen_guest() is False
def test_get_initrd_system(self):
xml_data = self.description.load()
state = XMLState(xml_data, ['vmxFlavour'], 'oem')
assert state.get_initrd_system() == 'dracut'
state = XMLState(xml_data, ['vmxSimpleFlavour'], 'iso')
assert state.get_initrd_system() == 'dracut'
state = XMLState(xml_data, ['containerFlavour'], 'docker')
assert state.get_initrd_system() is None
state = XMLState(xml_data, [], 'oem')
assert state.get_initrd_system() == 'dracut'
def test_get_rpm_locale_filtering(self):
assert self.state.get_rpm_locale_filtering() is True
assert self.boot_state.get_rpm_locale_filtering() is False
def test_get_locale(self):
assert self.state.get_locale() == ['en_US', 'de_DE']
assert self.boot_state.get_locale() is None
def test_get_rpm_locale(self):
assert self.state.get_rpm_locale() == [
'POSIX', 'C', 'C.UTF-8', 'en_US', 'de_DE'
]
assert self.boot_state.get_rpm_locale() is None
def test_set_root_partition_uuid(self):
assert self.state.get_root_partition_uuid() is None
self.state.set_root_partition_uuid('some-id')
assert self.state.get_root_partition_uuid() == 'some-id'
def test_set_root_filesystem_uuid(self):
assert self.state.get_root_filesystem_uuid() is None
self.state.set_root_filesystem_uuid('some-id')
assert self.state.get_root_filesystem_uuid() == 'some-id'
@patch('kiwi.xml_parse.type_.get_bootloader')
def test_get_build_type_bootloader_name(self, mock_bootloader):
mock_bootloader.return_value = [None]
assert self.state.get_build_type_bootloader_name() == 'grub2'
mock_bootloader.return_value = [self.bootloader]
assert self.state.get_build_type_bootloader_name() == 'some-loader'
@patch('kiwi.xml_parse.type_.get_bootloader')
def test_get_build_type_bootloader_console(self, mock_bootloader):
mock_bootloader.return_value = [self.bootloader]
assert self.state.get_build_type_bootloader_console() == \
'some-console'
@patch('kiwi.xml_parse.type_.get_bootloader')
def test_get_build_type_bootloader_serial_line_setup(self, mock_bootloader):
mock_bootloader.return_value = [self.bootloader]
assert self.state.get_build_type_bootloader_serial_line_setup() == \
'some-serial'
mock_bootloader.return_value = [None]
assert self.state.get_build_type_bootloader_serial_line_setup() \
is None
@patch('kiwi.xml_parse.type_.get_bootloader')
def test_get_build_type_bootloader_timeout(self, mock_bootloader):
mock_bootloader.return_value = [self.bootloader]
assert self.state.get_build_type_bootloader_timeout() == \
'some-timeout'
@patch('kiwi.xml_parse.type_.get_bootloader')
def test_get_build_type_bootloader_timeout_style(self, mock_bootloader):
mock_bootloader.return_value = [self.bootloader]
assert self.state.get_build_type_bootloader_timeout_style() == \
'some-style'
mock_bootloader.return_value = [None]
assert self.state.get_build_type_bootloader_timeout_style() \
is None
@patch('kiwi.xml_parse.type_.get_bootloader')
def test_get_build_type_bootloader_targettype(self, mock_bootloader):
mock_bootloader.return_value = [self.bootloader]
assert self.state.get_build_type_bootloader_targettype() == \
'some-target'
def test_get_installintrd_modules(self):
self.state.get_installmedia_initrd_modules('add') == ['network-legacy']
self.state.get_installmedia_initrd_modules('set') == []
self.state.get_installmedia_initrd_modules('omit') == []
xml_data = self.description.load()
state = XMLState(xml_data, ['vmxSimpleFlavour'], 'oem')
state.get_installmedia_initrd_modules('add') == []
|
gpl-3.0
| -1,623,195,503,704,412,200
| 38.628399
| 80
| 0.597139
| false
| 3.673201
| true
| false
| false
|
ironsmile/tank4eta
|
pathfinding/core/node.py
|
1
|
1197
|
# -*- coding: utf-8 -*-
class Node(object):
"""
basic node, saves X and Y coordinates on some grid and determine if
it is walkable.
"""
def __init__(self, x=0, y=0, walkable=True, see_through=True):
# Coordinates
self.x = x
self.y = y
# Whether this node can be walked through.
self.walkable = walkable
# Whether this node is visible for line of sight calculations
self.see_through = see_through
# values used in the finder
# cost from this node to the goal
self.h = 0.0
# cost from the start node to this node
self.g = 0.0
# distance from start to this point (f = g + h )
self.f = 0.0
self.opened = 0
self.closed = False
# used for backtracking to the start point
self.parent = None
# used for recurion tracking of IDA*
self.retain_count = 0
# used for IDA* and Jump-Point-Search
self.tested = False
def __lt__(self, other):
"""
nodes are sorted by f value (see a_star.py)
:param other: compare Node
:return:
"""
return self.f < other.f
|
mit
| -6,834,358,347,663,112,000
| 24.468085
| 71
| 0.552214
| false
| 3.92459
| false
| false
| false
|
speric/simplehttp
|
pysimplehttp/src/file_to_simplequeue.py
|
1
|
6682
|
import tornado.ioloop
import tornado.httpclient
import os
import functools
import gzip
import logging
import urllib
try:
import ujson as json
except ImportError:
import json
class FileToSimplequeue(object):
http = tornado.httpclient.AsyncHTTPClient(max_simultaneous_connections=50, max_clients=50)
def __init__(self, input_file, max_concurrent, max_queue_depth, simplequeue_urls,
check_simplequeue_interval, stats_interval, filter_require=None, filter_exclude=None, io_loop=None):
assert isinstance(simplequeue_urls, (list, tuple))
assert isinstance(max_queue_depth, int)
assert isinstance(max_concurrent, int)
assert isinstance(check_simplequeue_interval, int)
assert isinstance(stats_interval, int)
assert isinstance(filter_require, (None.__class__, list, tuple))
assert isinstance(filter_exclude, (None.__class__, list, tuple))
for entry in simplequeue_urls:
assert entry.startswith("http://") or entry.startswith("https://"), "simplequeue url %s is not valid" % entry
self.simplequeue_urls = simplequeue_urls
self.input = self.open_file(input_file)
self.concurrent = 0
self.finished = False
self.fill_check = False
self.max_queue_depth = max_queue_depth
self.max_concurrent = max_concurrent
self.check_simplequeue_interval = check_simplequeue_interval
self.pending = dict([[simplequeue, 0] for simplequeue in simplequeue_urls])
self.stats_interval = stats_interval
self.filter_require = dict([data.split('=', 1) for data in (filter_require or [])])
for key, value in self.filter_require.items():
logging.info("requiring json key=%s value=%s" % (key, value) )
self.filter_exclude = dict([data.split('=', 1) for data in (filter_exclude or [])])
for key, value in self.filter_exclude.items():
logging.info("excluding json key=%s value=%s" % (key, value) )
self.stats_reset()
self.io_loop = io_loop or tornado.ioloop.IOLoop.instance()
def stats_incr(self, successful=True, filtered=False):
if filtered:
self.filtered += 1
return
if successful:
self.success += 1
else:
self.failed += 1
def stats_reset(self):
self.success = 0
self.failed = 0
self.filtered = 0
def print_and_reset_stats(self):
logging.warning('success: %5d failed: %5d filtered: %5d concurrent: %2d' % (self.success, self.failed, self.filtered, self.concurrent))
self.stats_reset()
def start(self):
self.stats_timer = tornado.ioloop.PeriodicCallback(self.print_and_reset_stats, self.stats_interval * 1000)
self.stats_timer.start()
self.check_timer = tornado.ioloop.PeriodicCallback(self.check_simplequeue_depth, self.check_simplequeue_interval * 1000)
self.check_timer.start()
self.check_simplequeue_depth() # seed the loop
self.io_loop.start()
def open_file(self, filename):
assert os.path.exists(filename), "%r is not accessible" % filename
if filename.endswith('.gz'):
return gzip.open(filename, 'rb')
else:
return open(filename, 'rb')
def check_simplequeue_depth(self):
"""query the simplequeue and fill it based on where it's dept should be"""
if self.finished:
return self.finish()
for simplequeue in self.simplequeue_urls:
self.http.fetch(simplequeue + '/stats?format=json',
callback=functools.partial(self.finish_check_simplequeue_depth, simplequeue=simplequeue))
def finish_check_simplequeue_depth(self, response, simplequeue):
if response.code != 200:
logging.error('failed checking simplequeue depth %s/stats?format=json' % simplequeue)
self.continue_fill()
return
stats = json.loads(response.body)
entries_needed = self.max_queue_depth - stats['depth']
entries_needed = max(0, entries_needed)
logging.info('%s needs %d entries' % (simplequeue, entries_needed))
self.pending[simplequeue] = entries_needed
self.continue_fill()
def continue_fill(self):
if not self.fill_check:
self.fill_check = True
self.io_loop.add_callback(self.fill_as_needed)
def fill_as_needed(self):
"""
as needed based on how many more should go in a simplequeue, and the current concurrency
"""
self.fill_check = False
if self.finished:
return self.finish()
available_concurrency = self.max_concurrent - self.concurrent
for simplequeue in self.pending.keys():
while available_concurrency and self.pending[simplequeue] > 0:
if self.fill_one(simplequeue):
available_concurrency -= 1
self.pending[simplequeue] -= 1
def fill_one(self, endpoint):
"""read one line from `self.input` and send it to a simplequeue"""
data = self.input.readline()
if not data:
if not self.finished:
logging.info('at end of input stream')
self.finish()
return True
if self.filter_require or self.filter_exclude:
try:
msg = json.loads(data)
except Exception:
logging.error('failed json.loads(%r)' % data)
self.stats_incr(successful=False)
return False
for key, value in self.filter_require.items():
if msg.get(key) != value:
self.stats_incr(filtered=True)
return False
for key, value in self.filter_exclude.items():
if msg.get(key) == value:
self.stats_incr(filtered=True)
return False
self.concurrent += 1
url = endpoint + '/put?' + urllib.urlencode(dict(data=data))
self.http.fetch(url, self.finish_fill_one)
return True
def finish_fill_one(self, response):
self.concurrent -= 1
if response.code != 200:
logging.info(response)
self.failed += 1
else:
self.success += 1
# continue loop
if self.max_concurrent > self.concurrent:
self.continue_fill()
def finish(self):
self.finished = True
if self.concurrent == 0:
logging.info('stopping ioloop')
self.io_loop.stop()
|
mit
| 1,708,702,651,177,100,500
| 39.993865
| 143
| 0.601766
| false
| 4.173641
| false
| false
| false
|
bitcraft/pyglet
|
pyglet/media/avbin.py
|
1
|
20165
|
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
"""Use avbin to decode audio and video media.
"""
import struct
import ctypes
import threading
import time
import pyglet
import pyglet.lib
from pyglet import gl
from pyglet.gl import gl_info
from pyglet import image
from pyglet.compat import asbytes, asbytes_filename
from pyglet.media import (MediaFormatException, StreamingSource, VideoFormat,
AudioFormat, AudioData, MediaEvent, WorkerThread,
SourceInfo)
if pyglet.compat_platform.startswith('win') and struct.calcsize('P') == 8:
av = 'avbin64'
else:
av = 'avbin'
av = pyglet.lib.load_library(av)
AVBIN_RESULT_ERROR = -1
AVBIN_RESULT_OK = 0
AVbinResult = ctypes.c_int
AVBIN_STREAM_TYPE_UNKNOWN = 0
AVBIN_STREAM_TYPE_VIDEO = 1
AVBIN_STREAM_TYPE_AUDIO = 2
AVbinStreamType = ctypes.c_int
AVBIN_SAMPLE_FORMAT_U8 = 0
AVBIN_SAMPLE_FORMAT_S16 = 1
AVBIN_SAMPLE_FORMAT_S24 = 2
AVBIN_SAMPLE_FORMAT_S32 = 3
AVBIN_SAMPLE_FORMAT_FLOAT = 4
AVbinSampleFormat = ctypes.c_int
AVBIN_LOG_QUIET = -8
AVBIN_LOG_PANIC = 0
AVBIN_LOG_FATAL = 8
AVBIN_LOG_ERROR = 16
AVBIN_LOG_WARNING = 24
AVBIN_LOG_INFO = 32
AVBIN_LOG_VERBOSE = 40
AVBIN_LOG_DEBUG = 48
AVbinLogLevel = ctypes.c_int
AVbinFileP = ctypes.c_void_p
AVbinStreamP = ctypes.c_void_p
Timestamp = ctypes.c_int64
class AVbinFileInfo(ctypes.Structure):
_fields_ = [
('structure_size', ctypes.c_size_t),
('n_streams', ctypes.c_int),
('start_time', Timestamp),
('duration', Timestamp),
('title', ctypes.c_char * 512),
('author', ctypes.c_char * 512),
('copyright', ctypes.c_char * 512),
('comment', ctypes.c_char * 512),
('album', ctypes.c_char * 512),
('year', ctypes.c_int),
('track', ctypes.c_int),
('genre', ctypes.c_char * 32),
]
class _AVbinStreamInfoVideo8(ctypes.Structure):
_fields_ = [
('width', ctypes.c_uint),
('height', ctypes.c_uint),
('sample_aspect_num', ctypes.c_uint),
('sample_aspect_den', ctypes.c_uint),
('frame_rate_num', ctypes.c_uint),
('frame_rate_den', ctypes.c_uint),
]
class _AVbinStreamInfoAudio8(ctypes.Structure):
_fields_ = [
('sample_format', ctypes.c_int),
('sample_rate', ctypes.c_uint),
('sample_bits', ctypes.c_uint),
('channels', ctypes.c_uint),
]
class _AVbinStreamInfoUnion8(ctypes.Union):
_fields_ = [
('video', _AVbinStreamInfoVideo8),
('audio', _AVbinStreamInfoAudio8),
]
class AVbinStreamInfo8(ctypes.Structure):
_fields_ = [
('structure_size', ctypes.c_size_t),
('type', ctypes.c_int),
('u', _AVbinStreamInfoUnion8)
]
class AVbinPacket(ctypes.Structure):
_fields_ = [
('structure_size', ctypes.c_size_t),
('timestamp', Timestamp),
('stream_index', ctypes.c_int),
('data', ctypes.POINTER(ctypes.c_uint8)),
('size', ctypes.c_size_t),
]
AVbinLogCallback = ctypes.CFUNCTYPE(None, ctypes.c_char_p, ctypes.c_int,
ctypes.c_char_p)
av.avbin_get_version.restype = ctypes.c_int
av.avbin_get_ffmpeg_revision.restype = ctypes.c_int
av.avbin_get_audio_buffer_size.restype = ctypes.c_size_t
av.avbin_have_feature.restype = ctypes.c_int
av.avbin_have_feature.argtypes = [ctypes.c_char_p]
av.avbin_init.restype = AVbinResult
av.avbin_set_log_level.restype = AVbinResult
av.avbin_set_log_level.argtypes = [AVbinLogLevel]
av.avbin_set_log_callback.argtypes = [AVbinLogCallback]
av.avbin_open_filename.restype = AVbinFileP
av.avbin_open_filename.argtypes = [ctypes.c_char_p]
av.avbin_close_file.argtypes = [AVbinFileP]
av.avbin_seek_file.argtypes = [AVbinFileP, Timestamp]
av.avbin_file_info.argtypes = [AVbinFileP, ctypes.POINTER(AVbinFileInfo)]
av.avbin_stream_info.argtypes = [AVbinFileP, ctypes.c_int,
ctypes.POINTER(AVbinStreamInfo8)]
av.avbin_open_stream.restype = ctypes.c_void_p
av.avbin_open_stream.argtypes = [AVbinFileP, ctypes.c_int]
av.avbin_close_stream.argtypes = [AVbinStreamP]
av.avbin_read.argtypes = [AVbinFileP, ctypes.POINTER(AVbinPacket)]
av.avbin_read.restype = AVbinResult
av.avbin_decode_audio.restype = ctypes.c_int
av.avbin_decode_audio.argtypes = [AVbinStreamP,
ctypes.c_void_p, ctypes.c_size_t,
ctypes.c_void_p, ctypes.POINTER(ctypes.c_int)]
av.avbin_decode_video.restype = ctypes.c_int
av.avbin_decode_video.argtypes = [AVbinStreamP,
ctypes.c_void_p, ctypes.c_size_t,
ctypes.c_void_p]
if True:
# TODO: lock all avbin calls. not clear from ffmpeg documentation if this
# is necessary. leaving it on while debugging to rule out the possiblity
# of a problem.
def synchronize(func, lock):
def f(*args):
lock.acquire()
result = func(*args)
lock.release()
return result
return f
avbin_lock = threading.Lock()
for name in dir(av):
if name.startswith('avbin_'):
setattr(av, name, synchronize(getattr(av, name), avbin_lock))
def get_version():
return av.avbin_get_version()
class AVbinException(MediaFormatException):
pass
def timestamp_from_avbin(timestamp):
return float(timestamp) / 1000000
def timestamp_to_avbin(timestamp):
return int(timestamp * 1000000)
class VideoPacket:
_next_id = 0
def __init__(self, packet):
self.timestamp = timestamp_from_avbin(packet.timestamp)
self.data = (ctypes.c_uint8 * packet.size)()
self.size = packet.size
ctypes.memmove(self.data, packet.data, self.size)
# Decoded image. 0 == not decoded yet; None == Error or discarded
self.image = 0
self.id = self._next_id
self.__class__._next_id += 1
class AVbinSource(StreamingSource):
def __init__(self, filename, file=None):
if file is not None:
raise NotImplementedError(
'Loading from file stream is not supported')
self._file = av.avbin_open_filename(asbytes_filename(filename))
if not self._file:
raise AVbinException('Could not open "%s"' % filename)
self._video_stream = None
self._video_stream_index = -1
self._audio_stream = None
self._audio_stream_index = -1
self._audio_packet_size = 0
file_info = AVbinFileInfo()
file_info.structure_size = ctypes.sizeof(file_info)
av.avbin_file_info(self._file, ctypes.byref(file_info))
self._duration = timestamp_from_avbin(file_info.duration)
self.info = SourceInfo()
self.info.title = file_info.title
self.info.author = file_info.author
self.info.copyright = file_info.copyright
self.info.comment = file_info.comment
self.info.album = file_info.album
self.info.year = file_info.year
self.info.track = file_info.track
self.info.genre = file_info.genre
# Pick the first video and audio streams found, ignore others.
for i in range(file_info.n_streams):
info = AVbinStreamInfo8()
info.structure_size = ctypes.sizeof(info)
av.avbin_stream_info(self._file, i, info)
if (info.type == AVBIN_STREAM_TYPE_VIDEO and
not self._video_stream):
stream = av.avbin_open_stream(self._file, i)
if not stream:
continue
self.video_format = VideoFormat(
width=info.u.video.width,
height=info.u.video.height)
if info.u.video.sample_aspect_num != 0:
self.video_format.sample_aspect = (
float(info.u.video.sample_aspect_num) /
info.u.video.sample_aspect_den)
if _have_frame_rate:
self.video_format.frame_rate = (
float(info.u.video.frame_rate_num) /
info.u.video.frame_rate_den)
self._video_stream = stream
self._video_stream_index = i
elif (info.type == AVBIN_STREAM_TYPE_AUDIO and
info.u.audio.sample_bits in (8, 16) and
info.u.audio.channels in (1, 2) and
not self._audio_stream):
stream = av.avbin_open_stream(self._file, i)
if not stream:
continue
self.audio_format = AudioFormat(
channels=info.u.audio.channels,
sample_size=info.u.audio.sample_bits,
sample_rate=info.u.audio.sample_rate)
self._audio_stream = stream
self._audio_stream_index = i
self._packet = AVbinPacket()
self._packet.structure_size = ctypes.sizeof(self._packet)
self._packet.stream_index = -1
self._events = list()
# Timestamp of last video packet added to decoder queue.
self._video_timestamp = 0
self._buffered_audio_data = list()
if self.audio_format:
self._audio_buffer = \
(ctypes.c_uint8 * av.avbin_get_audio_buffer_size())()
if self.video_format:
self._video_packets = list()
self._decode_thread = WorkerThread()
self._decode_thread.start()
self._condition = threading.Condition()
def __del__(self):
if _debug:
print('del avbin source')
try:
if self._video_stream:
av.avbin_close_stream(self._video_stream)
if self._audio_stream:
av.avbin_close_stream(self._audio_stream)
av.avbin_close_file(self._file)
except:
pass
# TODO: TODO call this / add to source api
def delete(self):
if self.video_format:
self._decode_thread.stop()
def seek(self, timestamp):
if _debug:
print('AVbin seek', timestamp)
av.avbin_seek_file(self._file, timestamp_to_avbin(timestamp))
self._audio_packet_size = 0
del self._events[:]
del self._buffered_audio_data[:]
if self.video_format:
self._video_timestamp = 0
self._condition.acquire()
for packet in self._video_packets:
packet.image = None
self._condition.notify()
self._condition.release()
del self._video_packets[:]
self._decode_thread.clear_jobs()
def _get_packet(self):
"""Read a packet into self._packet.
Returns True if OK, False if no more packets are in stream.
"""
return av.avbin_read(self._file, self._packet) == AVBIN_RESULT_OK
def _process_packet(self):
"""Returns (packet_type, packet)
where packet_type = 'video' or 'audio'; and packet is VideoPacket or
AudioData. In either case, packet is buffered or queued for decoding;
no further action is necessary.
Returns (None, None) if packet was neither type.
"""
if self._packet.stream_index == self._video_stream_index:
if self._packet.timestamp < 0:
# TODO: TODO
# AVbin needs hack to decode timestamp for B frames in
# some containers (OGG?). See
# http://www.dranger.com/ffmpeg/tutorial05.html
# For now we just drop these frames.
return None, None
video_packet = VideoPacket(self._packet)
if _debug:
print('Created and queued frame %d (%f)' %
(video_packet.id, video_packet.timestamp))
self._video_timestamp = max(self._video_timestamp,
video_packet.timestamp)
self._video_packets.append(video_packet)
self._decode_thread.put_job(
lambda: self._decode_video_packet(video_packet))
return 'video', video_packet
elif self._packet.stream_index == self._audio_stream_index:
audio_data = self._decode_audio_packet()
if audio_data:
if _debug:
print('Got an audio packet at', audio_data.timestamp)
self._buffered_audio_data.append(audio_data)
return 'audio', audio_data
return None, None
def get_audio_data(self, bytes_):
try:
audio_data = self._buffered_audio_data.pop(0)
audio_data_timeend = audio_data.timestamp + audio_data.duration
except IndexError:
audio_data = None
audio_data_timeend = self._video_timestamp + 1
if _debug:
print('get_audio_data')
have_video_work = False
# Keep reading packets until we have an audio packet and all the
# associated video packets have been enqueued on the decoder thread.
while not audio_data or (
self._video_stream and self._video_timestamp < audio_data_timeend):
if not self._get_packet():
break
packet_type, packet = self._process_packet()
if packet_type == 'video':
have_video_work = True
elif not audio_data and packet_type == 'audio':
audio_data = self._buffered_audio_data.pop(0)
if _debug:
print(
'Got requested audio packet at', audio_data.timestamp)
audio_data_timeend = audio_data.timestamp + audio_data.duration
if have_video_work:
# Give decoder thread a chance to run before we return this audio
# data.
time.sleep(0)
if not audio_data:
if _debug:
print('get_audio_data returning None')
return None
while self._events and self._events[0].timestamp <= audio_data_timeend:
event = self._events.pop(0)
if event.timestamp >= audio_data.timestamp:
event.timestamp -= audio_data.timestamp
audio_data.events.append(event)
if _debug:
print('get_audio_data returning ts %f with events' %
audio_data.timestamp, audio_data.events)
print('remaining events are', self._events)
return audio_data
def _decode_audio_packet(self):
packet = self._packet
size_out = ctypes.c_int(len(self._audio_buffer))
while True:
audio_packet_ptr = ctypes.cast(packet.data, ctypes.c_void_p)
audio_packet_size = packet.size
used = av.avbin_decode_audio(self._audio_stream,
audio_packet_ptr, audio_packet_size,
self._audio_buffer, size_out)
if used < 0:
self._audio_packet_size = 0
break
audio_packet_ptr.value += used
audio_packet_size -= used
if size_out.value <= 0:
continue
# TODO: how did this ever work? replaced with copy below
# buffer = ctypes.string_at(self._audio_buffer, size_out)
# TODO: to actually copy the data.. but it never used to crash, so
# maybe I'm missing something
buffer = ctypes.create_string_buffer(size_out.value)
ctypes.memmove(buffer, self._audio_buffer, len(buffer))
buffer = buffer.raw
duration = float(len(buffer)) / self.audio_format.bytes_per_second
self._audio_packet_timestamp = \
timestamp = timestamp_from_avbin(packet.timestamp)
return AudioData(buffer, len(buffer), timestamp, duration, list())
def _decode_video_packet(self, packet):
width = self.video_format.width
height = self.video_format.height
pitch = width * 3
buffer = (ctypes.c_uint8 * (pitch * height))()
result = av.avbin_decode_video(self._video_stream,
packet.data, packet.size,
buffer)
if result < 0:
image_data = None
else:
image_data = image.ImageData(width, height, 'RGB', buffer, pitch)
packet.image = image_data
# Notify get_next_video_frame() that another one is ready.
self._condition.acquire()
self._condition.notify()
self._condition.release()
def _ensure_video_packets(self):
"""Process packets until a video packet has been queued (and begun
decoding). Return False if EOS.
"""
if not self._video_packets:
if _debug:
print('No video packets...')
# Read ahead until we have another video packet
self._get_packet()
packet_type, _ = self._process_packet()
while packet_type and packet_type != 'video':
self._get_packet()
packet_type, _ = self._process_packet()
if not packet_type:
return False
if _debug:
print('Queued packet', _)
return True
def get_next_video_timestamp(self):
if not self.video_format:
return
if self._ensure_video_packets():
if _debug:
print(
'Next video timestamp is', self._video_packets[0].timestamp)
return self._video_packets[0].timestamp
def get_next_video_frame(self):
if not self.video_format:
return
if self._ensure_video_packets():
packet = self._video_packets.pop(0)
if _debug:
print('Waiting for', packet)
# Block until decoding is complete
self._condition.acquire()
while packet.image == 0:
self._condition.wait()
self._condition.release()
if _debug:
print('Returning', packet)
return packet.image
av.avbin_init()
if pyglet.options['debug_media']:
_debug = True
av.avbin_set_log_level(AVBIN_LOG_DEBUG)
else:
_debug = False
av.avbin_set_log_level(AVBIN_LOG_QUIET)
_have_frame_rate = av.avbin_have_feature(asbytes('frame_rate'))
|
bsd-3-clause
| -6,378,432,242,768,156,000
| 33.235993
| 83
| 0.583635
| false
| 3.923152
| false
| false
| false
|
anetasie/sherpa
|
sherpa/astro/xspec/utils.py
|
3
|
5433
|
#
# Copyright (C) 2017, 2018, 2019 Smithsonian Astrophysical Observatory
#
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
from distutils.version import LooseVersion
from . import _xspec
__all__ = ['ModelMeta', 'include_if', 'version_at_least']
XSPEC_VERSION = LooseVersion(_xspec.get_xsversion())
class ModelMeta(type):
"""
Metaclass for xspec models. The __function__ member in xspec model classes is seamlessly
transformed from a string representing the low level function in the sherpa xspec extension
into a proper call, taking into account error cases (e.g. the function cannot be found in the
xspec extension at runtime).
"""
NOT_COMPILED_FUNCTION_MESSAGE = "Calling an xspec function that was not compiled"
def __init__(cls, *args, **kwargs):
if hasattr(cls, '__function__'):
try:
cls._calc = getattr(_xspec, cls.__function__)
except AttributeError:
# Error handling: the model meets the condition expressed in the decorator
# but the low level function is not included in the xspec extension
cls._calc = ModelMeta._not_compiled
# The `__function__` member signals that `cls` is a model that needs the `_calc` method
# to be generated.
# If the class does not have the `__function__` member, the we assume the class provides
# a `_calc` method itself, or it does not need it to begin with. This is the case for
# some classes extending `XSModel` but that are base classes themselves,
# like `XSAdditiveModel`, or they have a more complex `_calc` implementation, like `XSTableModel`.
# In principle there is room for mistakes, i.e. a proper model class might be defined without
# the `__function__` member. Tests should make sure this is not the case. `test_xspec_models`
# is indeed such a test, because it calls all models making sure they are usable. A model without
# the `_calc_ method or the `__function__` member would fail the test.
# The alternative would be to include more logic to handle the error cases, but that would require
# more tests, making this choice impractical.
super(ModelMeta, cls).__init__(*args, **kwargs)
@staticmethod
def _not_compiled(*args, **kwargs):
raise AttributeError(ModelMeta.NOT_COMPILED_FUNCTION_MESSAGE)
def equal_or_greater_than(version_string):
"""
Utility function that compares a version string with the version of the current xspec instance.
For better or worse the xspec current instance is not cached across calls. It probably could be but
it just seems safer not to, and any overhead insists on models initialization only.
The comparison is made in terms of the `distutils.version.LooseVersion` class.
:param version_string: the version against which to compare the current xspec version
:return: `True` if the version of xspec is equal or greater than the argument, `False` otherwise
"""
return XSPEC_VERSION >= LooseVersion(version_string)
class include_if():
"""
Generic decorator for including xspec models conditionally. It takes a boolean condition as an argument.
If the boolean condition is not met, then the model is not included, and its function is replaced with a
dummy function that throws an exception.
If the model is disabled, then its class's `version_enabled` attribute is set to `False`.
"""
DISABLED_MODEL_MESSAGE = "Model {} is disabled because of an unmet condition"
def __init__(self, condition):
self.condition = condition
def __call__(self, model_class):
if not self.condition:
model_class.version_enabled = False
model_class._calc = self._disabled(self.get_message(model_class))
return model_class
def get_message(self, model_class):
return self.DISABLED_MODEL_MESSAGE.format(model_class.__name__)
@staticmethod
def _disabled(message):
def wrapped(*args, **kwargs):
raise AttributeError(message)
return wrapped
class version_at_least(include_if):
"""
Decorator which takes a version string as an argument and enables a model only if
the xspec version detected at runtime is equal or greater than the one provided to the decorator.
"""
DISABLED_MODEL_MESSAGE = "Model {} is disabled because XSPEC version >= {} is required"
def __init__(self, version_string):
include_if.__init__(self, equal_or_greater_than(version_string))
self.version_string = version_string
def get_message(self, model_class):
return self.DISABLED_MODEL_MESSAGE.format(model_class.__name__, self.version_string)
|
gpl-3.0
| 8,876,311,769,032,798,000
| 42.814516
| 108
| 0.692987
| false
| 4.271226
| false
| false
| false
|
KorolevskyMax/TestFrameworkTemplate
|
pages/base_page.py
|
1
|
1954
|
from selenium.webdriver import ActionChains
from selenium.webdriver.common.by import By
from selenium.webdriver.support.wait import WebDriverWait
from selenium.common.exceptions import NoSuchElementException
from webium import BasePage as WebiumBasePage, Find
class BasePage(WebiumBasePage):
url_path = None
a_tag = "//a[contains(.,'{link_text}')]"
login_link = Find(by=By.XPATH, value=a_tag.format(link_text='Sign in'))
logout_btn = Find(by=By.XPATH, value="//button[contains(.,'Sign out')]")
account_options_btn = Find(by=By.XPATH, value=a_tag.replace('.', '@aria-label').format(link_text='View profile and more'))
loader_xpath = "//div[@id='prestatus']"
def clear_send_keys(self, element_name, kwargs):
value = kwargs.get(element_name)
element = getattr(self, element_name)
element.clear()
element.send_keys(value)
def hover(self, element):
hov = ActionChains(self._driver).move_to_element(element)
hov.perform()
self.wait_for_loading()
self.wait_for_loader_disappear()
def get_login_status(self):
try:
self.account_options_btn.click()
return 'logged in' if self.logout_btn.is_displayed() == True else 'logged out'
except NoSuchElementException:
return 'logged out'
def wait_for_loading(self, seconds=180):
wait = WebDriverWait(self._driver, seconds)
wait.until(lambda x: self._driver.execute_script('return jQuery.active == 0') is True)
def replace_bad_elements(self, css_locator):
self._driver.execute_script("$('{}').remove()".format(css_locator))
def is_loader_displayed(self, *args):
return self._driver.find_element_by_xpath(self.loader_xpath).is_displayed()
def wait_for_loader_disappear(self):
WebDriverWait(self._driver, timeout=500).until_not(
self.is_loader_displayed, "Timeout waiting for loader disappear")
|
mit
| 8,327,454,099,560,124,000
| 38.877551
| 126
| 0.669396
| false
| 3.618519
| false
| false
| false
|
eltoncarr/tubular
|
tubular/scripts/retrieve_base_ami.py
|
1
|
2964
|
#! /usr/bin/env python3
"""
Command-line script used to retrieve the last base AMI ID used for an environment/deployment/play.
"""
# pylint: disable=invalid-name
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
from os import path
import io
import sys
import logging
import traceback
import click
import yaml
# Add top-level module path to sys.path before importing tubular code.
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
from tubular import ec2 # pylint: disable=wrong-import-position
logging.basicConfig(level=logging.INFO)
@click.command()
@click.option(
'--environment', '-e',
help='Environment for AMI, e.g. prod, stage',
)
@click.option(
'--deployment', '-d',
help='Deployment for AMI e.g. edx, edge',
)
@click.option(
'--play', '-p',
help='Play for AMI, e.g. edxapp, insights, discovery',
)
@click.option(
'--override',
help='Override AMI id to use',
)
@click.option(
'--out_file',
help='Output file for the AMI information yaml.',
default=None
)
def retrieve_base_ami(environment, deployment, play, override, out_file):
"""
Method used to retrieve the last base AMI ID used for an environment/deployment/play.
"""
has_edp = environment is not None or deployment is not None or play is not None
if has_edp and override is not None:
logging.error("--environment, --deployment and --play are mutually exclusive with --override.")
sys.exit(1)
if not has_edp and override is None:
logging.error("Either --environment, --deployment and --play or --override are required.")
sys.exit(1)
try:
if override:
ami_id = override
else:
ami_id = ec2.active_ami_for_edp(environment, deployment, play)
ami_info = {
# This is passed directly to an ansible script that expects a base_ami_id variable
'base_ami_id': ami_id,
# This matches the key produced by the create_ami.yml ansible play to make
# generating release pages easier.
'ami_id': ami_id,
}
ami_info.update(ec2.tags_for_ami(ami_id))
logging.info("Found active AMI ID for {env}-{dep}-{play}: {ami_id}".format(
env=environment, dep=deployment, play=play, ami_id=ami_id
))
if out_file:
with io.open(out_file, 'w') as stream:
yaml.safe_dump(ami_info, stream, default_flow_style=False, explicit_start=True)
else:
print(yaml.safe_dump(ami_info, default_flow_style=False, explicit_start=True))
except Exception as err: # pylint: disable=broad-except
traceback.print_exc()
click.secho('Error finding base AMI ID.\nMessage: {}'.format(err), fg='red')
sys.exit(1)
sys.exit(0)
if __name__ == "__main__":
retrieve_base_ami() # pylint: disable=no-value-for-parameter
|
agpl-3.0
| 7,992,221,647,808,419,000
| 30.2
| 103
| 0.643387
| false
| 3.654747
| false
| false
| false
|
tim-janik/tobin
|
LogParser.py
|
1
|
3770
|
# Licensed GNU Affero GPL v3 or later: http://www.gnu.org/licenses/agpl.html
import sys, calendar, re, heapq, tempfile
_month_dict = { 'Jan' : 1, 'Feb' : 2, 'Mar' : 3, 'Apr' : 4, 'May' : 5, 'Jun' : 6,
'Jul' : 7, 'Aug' : 8, 'Sep' : 9, 'Oct' : 10, 'Nov' : 11, 'Dec' : 12 }
def _parse_logtime (string):
# e.g. string = '07/Aug/2013:21:14:18 +0200'
tup = (int (string[7:11]), _month_dict[string[3:6]], int (string[:2]),
int (string[12:14]), int (string[15:17]), int (string[18:20]))
tzone = int (string[22:24]) * 3600 + int (string[24:26]) * 60 # TZ offset in seconds
seconds = calendar.timegm (tup) # this is faster than using strptime
if string[21] == '+':
seconds -= tzone
else:
seconds += tzone
return seconds # unix time stamp in UTC
def _timestamp_from_logline (line):
b1 = line.find ('[')
b2 = line.find (']', b1)
return _parse_logtime (line[b1+1:b2]) if b2 - b1 == 27 else -1
def _log_file_sorter (logfile):
sorted_files, lines = [], []
for line in logfile:
line = '%08x|' % _timestamp_from_logline (line) + line
lines.append (line)
if len (lines) >= 1000000:
lines.sort()
f = tempfile.TemporaryFile()
f.writelines (lines)
f.seek (0)
sorted_files.append (f)
del lines[:]
if lines:
lines.sort()
f = tempfile.TemporaryFile()
f.writelines (lines)
f.seek (0)
sorted_files.append (f)
return sorted_files
def log_file_sort_pool (filelist):
sorted_files = []
for ff in filelist:
sorted_files += _log_file_sorter (open (ff))
return sorted_files
def log_file_parse_pool (sorted_files):
s = r'\s+' # separator
ip = r'([0-9.abcdef:ABCDEF]{7,39})' # ip4/ip6 addresses
#idt = r'([\w\d/.$+-]+)' # unquoted identifier (too strict for some corrupted user names)
idt = r'([^\s]+)' # space separated string
num = r'([0-9]{1,9})' # integer
xnum = r'(-|[0-9]{1,9})' # maybe integer
dt = r'\[\d\d/\w\w\w/\d{4}:\d\d:\d\d:\d\d\s[+-]\d{4}\]' # [dd/MMM/yyyy:hh:mm:ss +-zone]
#qx = r'"((?:[^"\\]|\\.)*)"' # quoted text (slow), allows escaped quotes
qx = r'"([^"\\]*(?:[^"\\]|\\.)*)"' # fast quoted text, unconditionalize/speed up the common case
logpattern = re.compile (ip + s + idt + s + idt + s + dt + s + qx + s + num + s + xnum + '(?:' + s + qx + s + qx + ')?')
urlpattern = re.compile (r'([A-Z]+)\s(.*)\s(HTTP[0-9./]*)$')
for line in heapq.merge (*sorted_files):
# extract timestamp from line in sorted pool
timestamp, line = int (line[:8], 16), line[9:]
# parse common log format
m = logpattern.match (line)
u = urlpattern.match (m.group (3 + 1)) if m else None
if not m or not u:
print >>sys.stderr, '%s: malformed input: %s' % (sys.argv[0], line.rstrip())
continue
hit = m.groups()
time_stamp_usec = 1000000 * timestamp
http_status = int (hit[4]) # http_status
tx_bytes = 0 if hit[5] == '-' else int (hit[5]) # tx_bytes
referrer = '' if hit[6] == '-' else hit[6] # referrer
uagent = '' if hit[7] == '-' else hit[7] # uagent
# split request URL
method = u.group (1)
url = u.group (2)
protocol = u.group (3)
qpos = url.find ('?')
resource, query = (url[:qpos], url[qpos:]) if qpos >= 0 else (url, '')
# yield result
yield (hit[0], hit[1], hit[2], time_stamp_usec, method, resource, query, protocol, http_status, tx_bytes, referrer, uagent)
|
agpl-3.0
| 5,767,667,281,494,441,000
| 44.421687
| 128
| 0.513793
| false
| 3.097781
| false
| false
| false
|
InUrSys/PescArt2.0
|
src/Reports/Relatorio_SaidasPorProvinica.py
|
1
|
1633
|
'''
Created on 01/02/2018
@author: chernomirdinmacuvele
'''
import ReportAPI
from ui_Relatorio_SaidasPorProvincia import Ui_Form
import FuncSQL
from PyQt5.Qt import QPlainTextEdit, QComboBox
class Relatorio_SaidasPorProvincia(ReportAPI.JasperReports, Ui_Form):
def __init__(self, parent=None, dbcon=None):
super(Relatorio_SaidasPorProvincia, self).__init__(parent)
self.setupUi(self)
self.dbcon = dbcon
self.relatorio = 'Saidas_Distrito'
self.setForm()
def setForm(self):
self.LEFormato.setText(self.getFormat())
self.getInfoReport()
self.setProvincias()
self.PBGerar.clicked.connect(self.generateReport)
def getInfoReport(self):
quer = "SELECT nome, descricao FROM public.prc_relatorios where nome = '{nome}'".format(nome = self.relatorio)
bok, valOut = FuncSQL.anySelectScript(scpt= quer)
if bok:
self.LENome.setText(str(valOut[0]))
self.PTEDescricao.setPlainText(str(valOut[1]))
def setProvincias(self):
quer = "select distinct provincia from view_saidas_provincias"
lstOut = []
bok, valOut = FuncSQL.multLineSelect(scpt=quer)
if bok:
for val in valOut:
lstOut.append(val[0])
self.CBProvincia.addItems(lstOut)
def generateReport(self):
file = self.LENome.text()
formato = self.LEFormato.text().lower()
provincia = [self.CBProvincia.currentText()]
self.getTemplateFile(file=file, format=formato, parametro=provincia)
|
gpl-3.0
| 6,905,557,344,856,947,000
| 31.68
| 118
| 0.63319
| false
| 3.312373
| false
| false
| false
|
voanna/Deep-Features-or-Not
|
src/extract_features_no_finetune_temperature.py
|
1
|
1123
|
#!/usr/bin/env python
from __future__ import print_function
from extractCaffeActivations import features
import argparse
import HONHelpers as hon
import itertools
import os
import glob
layers = [
'pool1',
'pool2',
'pool3',
'pool4',
'pool5',
'fc6',
'fc7',
]
parser = argparse.ArgumentParser()
parser.add_argument("job_id", help="indexes the job of extracting features", type=int)
args = parser.parse_args()
job_config_list = [pair for pair in itertools.product(hon.webcams, ['train', 'test'])]
# grid engine jobs start with 1
job_id = args.job_id - 1
job_config = job_config_list[job_id]
webcam, split = job_config
print(webcam, split)
finetune_root = os.path.join(hon.experiment_root, 'finetune-temperature', 'no-finetune-features')
img_fnames = sorted(glob.glob(os.path.join(hon.hon_data_root, webcam, 'imgs_align', '*' + split + '*.png')))
deploy = hon.VGG16_deploy_path
weights = hon.VGG16_caffemodel_path
layer = 'fc7'
save_directory = os.path.join(finetune_root, webcam)
_ = features(deploy, weights, img_fnames, layer, save_directory, layers, mean_npy = None)
|
mit
| -2,117,628,950,225,300,000
| 23.413043
| 108
| 0.693678
| false
| 2.955263
| false
| false
| false
|
poojavade/Genomics_Docker
|
Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/Bio/motifs/meme.py
|
1
|
11510
|
# Copyright 2008 by Bartek Wilczynski
# Adapted from Bio.MEME.Parser by Jason A. Hackney. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
from __future__ import print_function
from Bio.Alphabet import IUPAC
from Bio import Seq
from Bio import motifs
def read(handle):
"""Parses the text output of the MEME program into a meme.Record object.
Example:
>>> from Bio.motifs import meme
>>> with open("meme.output.txt") as f:
... record = meme.read(f)
>>> for motif in record:
... for instance in motif.instances:
... print(instance.motif_name, instance.sequence_name, instance.strand, instance.pvalue)
"""
record = Record()
__read_version(record, handle)
__read_datafile(record, handle)
__read_alphabet(record, handle)
__read_sequences(record, handle)
__read_command(record, handle)
for line in handle:
if line.startswith('MOTIF 1'):
break
else:
raise ValueError('Unexpected end of stream')
alphabet = record.alphabet
revcomp = 'revcomp' in record.command
while True:
motif_number, length, num_occurrences, evalue = __read_motif_statistics(line)
name = __read_motif_name(handle)
instances = __read_motif_sequences(handle, name, alphabet, length, revcomp)
motif = Motif(alphabet, instances)
motif.length = length
motif.num_occurrences = num_occurrences
motif.evalue = evalue
motif.name = name
record.append(motif)
assert len(record)==motif_number
__skip_unused_lines(handle)
try:
line = next(handle)
except StopIteration:
raise ValueError('Unexpected end of stream: Expected to find new motif, or the summary of motifs')
if line.startswith("SUMMARY OF MOTIFS"):
break
if not line.startswith('MOTIF'):
raise ValueError("Line does not start with 'MOTIF':\n%s" % line)
return record
class Motif(motifs.Motif):
"""A subclass of Motif used in parsing MEME (and MAST) output.
This subclass defines functions and data specific to MEME motifs.
This includes the motif name, the evalue for a motif, and its number
of occurrences.
"""
def __init__(self, alphabet=None, instances=None):
motifs.Motif.__init__(self, alphabet, instances)
self.evalue = 0.0
self.num_occurrences = 0
self.name = None
class Instance(Seq.Seq):
"""A class describing the instances of a MEME motif, and the data thereof.
"""
def __init__(self, *args, **kwds):
Seq.Seq.__init__(self, *args, **kwds)
self.sequence_name = ""
self.start = 0
self.pvalue = 1.0
self.strand = 0
self.length = 0
self.motif_name = ""
class Record(list):
"""A class for holding the results of a MEME run.
A meme.Record is an object that holds the results from running
MEME. It implements no methods of its own.
The meme.Record class inherits from list, so you can access individual
motifs in the record by their index. Alternatively, you can find a motif
by its name:
>>> from Bio import motifs
>>> with open("meme.output.txt") as f:
... record = motifs.parse(f, 'MEME')
>>> motif = record[0]
>>> print(motif.name)
Motif 1
>>> motif = record['Motif 1']
>>> print(motif.name)
Motif 1
"""
def __init__(self):
"""__init__ (self)"""
self.version = ""
self.datafile = ""
self.command = ""
self.alphabet = None
self.sequences = []
def __getitem__(self, key):
if isinstance(key, str):
for motif in self:
if motif.name == key:
return motif
else:
return list.__getitem__(self, key)
# Everything below is private
def __read_version(record, handle):
for line in handle:
if line.startswith('MEME version'):
break
else:
raise ValueError("Improper input file. File should contain a line starting MEME version.")
line = line.strip()
ls = line.split()
record.version = ls[2]
def __read_datafile(record, handle):
for line in handle:
if line.startswith('TRAINING SET'):
break
else:
raise ValueError("Unexpected end of stream: 'TRAINING SET' not found.")
try:
line = next(handle)
except StopIteration:
raise ValueError("Unexpected end of stream: Expected to find line starting with '****'")
if not line.startswith('****'):
raise ValueError("Line does not start with '****':\n%s" % line)
try:
line = next(handle)
except StopIteration:
raise ValueError("Unexpected end of stream: Expected to find line starting with 'DATAFILE'")
if not line.startswith('DATAFILE'):
raise ValueError("Line does not start with 'DATAFILE':\n%s" % line)
line = line.strip()
line = line.replace('DATAFILE= ', '')
record.datafile = line
def __read_alphabet(record, handle):
try:
line = next(handle)
except StopIteration:
raise ValueError("Unexpected end of stream: Expected to find line starting with 'ALPHABET'")
if not line.startswith('ALPHABET'):
raise ValueError("Line does not start with 'ALPHABET':\n%s" % line)
line = line.strip()
line = line.replace('ALPHABET= ', '')
if line == 'ACGT':
al = IUPAC.unambiguous_dna
else:
al = IUPAC.protein
record.alphabet = al
def __read_sequences(record, handle):
try:
line = next(handle)
except StopIteration:
raise ValueError("Unexpected end of stream: Expected to find line starting with 'Sequence name'")
if not line.startswith('Sequence name'):
raise ValueError("Line does not start with 'Sequence name':\n%s" % line)
try:
line = next(handle)
except StopIteration:
raise ValueError("Unexpected end of stream: Expected to find line starting with '----'")
if not line.startswith('----'):
raise ValueError("Line does not start with '----':\n%s" % line)
for line in handle:
if line.startswith('***'):
break
line = line.strip()
ls = line.split()
record.sequences.append(ls[0])
if len(ls) == 6:
record.sequences.append(ls[3])
else:
raise ValueError("Unexpected end of stream: Expected to find line starting with '***'")
def __read_command(record, handle):
for line in handle:
if line.startswith('command:'):
break
else:
raise ValueError("Unexpected end of stream: Expected to find line starting with 'command'")
line = line.strip()
line = line.replace('command: ', '')
record.command = line
def __read_motif_statistics(line):
# Depending on the version of MEME, this line either like like
# MOTIF 1 width = 19 sites = 3 llr = 43 E-value = 6.9e-002
# or like
# MOTIF 1 MEME width = 19 sites = 3 llr = 43 E-value = 6.9e-002
words = line.split()
assert words[0]=='MOTIF'
motif_number = int(words[1])
if words[2]=='MEME':
key_values = words[3:]
else:
key_values = words[2:]
keys = key_values[::3]
equal_signs = key_values[1::3]
values = key_values[2::3]
assert keys==['width', 'sites', 'llr', 'E-value']
for equal_sign in equal_signs:
assert equal_sign=='='
length = int(values[0])
num_occurrences = int(values[1])
evalue = float(values[3])
return motif_number, length, num_occurrences, evalue
def __read_motif_name(handle):
for line in handle:
if 'sorted by position p-value' in line:
break
else:
raise ValueError('Unexpected end of stream: Failed to find motif name')
line = line.strip()
words = line.split()
name = " ".join(words[0:2])
return name
def __read_motif_sequences(handle, motif_name, alphabet, length, revcomp):
try:
line = next(handle)
except StopIteration:
raise ValueError('Unexpected end of stream: Failed to find motif sequences')
if not line.startswith('---'):
raise ValueError("Line does not start with '---':\n%s" % line)
try:
line = next(handle)
except StopIteration:
raise ValueError("Unexpected end of stream: Expected to find line starting with 'Sequence name'")
if not line.startswith('Sequence name'):
raise ValueError("Line does not start with 'Sequence name':\n%s" % line)
try:
line = next(handle)
except StopIteration:
raise ValueError('Unexpected end of stream: Failed to find motif sequences')
if not line.startswith('---'):
raise ValueError("Line does not start with '---':\n%s" % line)
instances = []
for line in handle:
if line.startswith('---'):
break
line = line.strip()
words = line.split()
if revcomp:
strand = words.pop(1)
else:
strand = '+'
sequence = words[4]
assert len(sequence) == length
instance = Instance(sequence, alphabet)
instance.motif_name = motif_name
instance.sequence_name = words[0]
instance.start = int(words[1])
instance.pvalue = float(words[2])
instance.strand = strand
instance.length = length
instances.append(instance)
else:
raise ValueError('Unexpected end of stream')
return motifs.Instances(instances, alphabet)
def __skip_unused_lines(handle):
for line in handle:
if line.startswith('log-odds matrix'):
break
else:
raise ValueError("Unexpected end of stream: Expected to find line starting with 'log-odds matrix'")
for line in handle:
if line.startswith('---'):
break
else:
raise ValueError("Unexpected end of stream: Expected to find line starting with '---'")
for line in handle:
if line.startswith('letter-probability matrix'):
break
else:
raise ValueError("Unexpected end of stream: Expected to find line starting with 'letter-probability matrix'")
for line in handle:
if line.startswith('---'):
break
else:
raise ValueError("Unexpected end of stream: Expected to find line starting with '---'")
for line in handle:
if line.startswith('Time'):
break
else:
raise ValueError("Unexpected end of stream: Expected to find line starting with 'Time'")
try:
line = next(handle)
except StopIteration:
raise ValueError('Unexpected end of stream: Expected to find blank line')
if line.strip():
raise ValueError("Expected blank line, but got:\n%s" % line)
try:
line = next(handle)
except StopIteration:
raise ValueError("Unexpected end of stream: Expected to find line starting with '***'")
if not line.startswith('***'):
raise ValueError("Line does not start with '***':\n%s" % line)
for line in handle:
if line.strip():
break
else:
raise ValueError("Unexpected end of stream: Expected to find line starting with '***'")
if not line.startswith('***'):
raise ValueError("Line does not start with '***':\n%s" % line)
|
apache-2.0
| 5,834,909,790,318,722,000
| 32.753666
| 117
| 0.611295
| false
| 3.960771
| false
| false
| false
|
CaliOpen/CaliOpen
|
src/backend/components/py.pi/caliopen_pi/tests/test_spam.py
|
1
|
1631
|
"""Test spam privacy feature extraction."""
import unittest
import os
from caliopen_storage.config import Configuration
if 'CALIOPEN_BASEDIR' in os.environ:
conf_file = '{}/src/backend/configs/caliopen.yaml.template'. \
format(os.environ['CALIOPEN_BASEDIR'])
else:
conf_file = '../../../../../configs/caliopen.yaml.template'
Configuration.load(conf_file, 'global')
from mailbox import Message
from caliopen_pi.features.helpers.spam import SpamScorer
def load_mail(filename):
"""Read email from fixtures of an user."""
# XXX tofix: set fixtures in a more convenient way to not
# have dirty hacking on relative path
dir_path = os.path.dirname(os.path.realpath(__file__))
path = '{}/fixtures'.format(dir_path)
with open('{}/{}'.format(path, filename)) as f:
data = f.read()
return Message(data)
class TestSpamScorer(unittest.TestCase):
"""Test spam scorer."""
def test_spam1(self):
mail = load_mail('spam1.eml')
scorer = SpamScorer(mail)
self.assertFalse(scorer.is_spam)
self.assertEqual(scorer.method, 'score')
self.assertEqual(scorer.score, 0.0)
def test_spam2(self):
mail = load_mail('spam2.eml')
scorer = SpamScorer(mail)
self.assertTrue(scorer.is_spam)
self.assertEqual(scorer.method, 'status')
self.assertEqual(scorer.score, 51.0)
def test_spam3(self):
mail = load_mail('spam3.eml')
scorer = SpamScorer(mail)
self.assertTrue(scorer.is_spam)
self.assertEqual(scorer.method, 'status')
self.assertEqual(scorer.score, 97.0)
|
gpl-3.0
| -8,242,803,574,089,239,000
| 29.773585
| 66
| 0.652361
| false
| 3.522678
| true
| false
| false
|
cmancone/mygrations
|
mygrations/formats/mysql/file_reader/database.py
|
1
|
3315
|
import os
import glob
from .reader import reader as sql_reader
from mygrations.formats.mysql.definitions.database import database as database_definition
class database(database_definition):
def __init__(self, strings):
""" Constructor. Accepts a string or list of strings with different possible contents
Strings can be one of the following:
================== ====================
Type Value
================== ====================
string SQL to parse
string A filename to read and to parse as SQL
string A directory name to search for .sql files, parsing each one
list A list of strings, with each element corresponding to any of the above
================== ====================
:param strings: A string or list of strings corresponding to one of the allowed input types
:type strings: string|list
"""
self._warnings = []
self._errors = []
self._tables = {}
self._rows = []
if isinstance(strings, str):
strings = [strings]
for string in strings:
self.process(string)
self.store_rows_with_tables()
def process(self, string):
""" Processes a string.
Strings can be either SQL to parse, the location of an SQL file, or a directory containing SQL files
:param string: A string containing one of the above
:type string: string
"""
if os.path.isdir(string):
self._process_directory(string)
elif os.path.isfile(string):
self._read(string)
else:
self._read(string)
def _process_directory(self, directory):
""" Processes a directory.
Finds all SQL files in the directory and calls `_read()` on them,
which results in the file being parsed and its tables/rows added to the
record of database tables/rows.
:param string: A string containing one of the above
:type string: string
"""
if directory[-1] != os.sep:
directory += os.sep
for filename in glob.glob('%s*.sql' % directory):
self._read(filename)
def _read(self, contents):
""" Processes a file or string of SQL.
Creates a reader object (which accepts files or a string of SQL)
to parse its input and stores the tables/rows in the database
object.
:param contents: A string containing a filename or SQL
:type contents: string
"""
try:
reader = sql_reader()
reader.parse(contents)
except ValueError as e:
print("Error in file %s: %s" % (contents, e))
# pull in all errors and warnings
self._errors.extend(reader.errors)
self._warnings.extend(reader.warnings)
# keep rows and tables separate while we are reading
for (table_name, table) in reader.tables.items():
if table.name in self._tables:
self.errors.append('Found two definitions for table %s' % table.name)
self._tables[table.name] = table
for (table_name, rows) in reader.rows.items():
self._rows.extend(rows)
|
mit
| -7,812,337,197,021,024,000
| 32.15
| 108
| 0.569231
| false
| 4.742489
| false
| false
| false
|
lmazuel/azure-sdk-for-python
|
azure-mgmt-dns/azure/mgmt/dns/models/srv_record.py
|
1
|
1365
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class SrvRecord(Model):
"""An SRV record.
:param priority: The priority value for this SRV record.
:type priority: int
:param weight: The weight value for this SRV record.
:type weight: int
:param port: The port value for this SRV record.
:type port: int
:param target: The target domain name for this SRV record.
:type target: str
"""
_attribute_map = {
'priority': {'key': 'priority', 'type': 'int'},
'weight': {'key': 'weight', 'type': 'int'},
'port': {'key': 'port', 'type': 'int'},
'target': {'key': 'target', 'type': 'str'},
}
def __init__(self, priority=None, weight=None, port=None, target=None):
super(SrvRecord, self).__init__()
self.priority = priority
self.weight = weight
self.port = port
self.target = target
|
mit
| 6,262,782,456,267,802,000
| 33.125
| 76
| 0.562637
| false
| 4.23913
| false
| false
| false
|
dodobas/osm-export-tool2
|
jobs/presets.py
|
1
|
11550
|
# -*- coding: utf-8 -*-
import logging
import pdb
from collections import OrderedDict
from StringIO import StringIO
from lxml import etree
logger = logging.getLogger(__name__)
class PresetParser():
types = {
'node': 'point',
'way': 'line',
'area': 'polygon',
'closedway': 'polygon',
'relation': 'polygon'
}
namespaces = {'ns': 'http://josm.openstreetmap.de/tagging-preset-1.0'}
def __init__(self, preset=None, *args, **kwargs):
self.preset = preset
self.tags = []
def parse(self,):
"""
Reads in the JOSM Preset.
Picks out all <item> elements.
For each <item>, gets the 'type' attribute and maps the
geometry type to the <item>'s 'key' element (tag name).
Ignores <item>'s with no 'type' attribute.
"""
f = open(self.preset)
xml = f.read()
tree = etree.parse(StringIO(xml))
items = tree.xpath('//ns:item', namespaces=self.namespaces)
for item in items:
self.process_item_and_children(item)
# tags = OrderedDict(sorted(self.tags.items()))
return self.tags
def process_item_and_children(self, item, geometrytype=None):
geometrytypes = None
if item.get('type'):
item_type = item.get('type')
geometrytypes = self.get_geometrytype(item_type)
keys = item.xpath('./ns:key', namespaces=self.namespaces)
item_groups = {}
groups = []
for group in item.iterancestors(tag='{http://josm.openstreetmap.de/tagging-preset-1.0}group'):
groups.append(group.get('name'))
if len(keys) > 0 and geometrytypes:
key = keys[0].get('key')
value = keys[0].get('value')
tag = {}
tag['name'] = item.get('name')
tag['key'] = key
tag['value'] = value
geom_types = []
for geomtype in geometrytypes:
geom_types.append(geomtype)
tag['geom_types'] = list(set(geom_types))
tag['groups'] = list(reversed(groups))
self.tags.append(tag)
for child in list(item):
self.process_item_and_children(child)
def get_geometrytype(self, item_type):
geometrytypes = []
osmtypes = item_type.split(',')
for osmtype in osmtypes:
geometrytypes.append(self.types[osmtype])
return geometrytypes
def build_hdm_preset_dict(self, ):
hdm = {}
xml = StringIO(open(self.preset).read())
tree = etree.parse(xml)
groups = tree.xpath('./ns:group', namespaces=self.namespaces)
for group in groups:
name = group.get('name')
group_dict = {}
hdm[name] = group_dict
self._parse_group(group, group_dict)
return OrderedDict(sorted(hdm.items()))
def _parse_group(self, group, group_dict):
items = group.xpath('./ns:item', namespaces=self.namespaces)
for item in items:
item_dict = {}
name = item.get('name')
types = item.get('type') # get the type attr on the item element
if types == None:
continue # pass those items with no geom type
geom_types = self.get_geometrytype(types)
keys = item.xpath('./ns:key', namespaces=self.namespaces)
if not len(keys) > 0:
continue
key = keys[0]
item_dict['displayName'] = name
item_dict['tag'] = '{0}:{1}'.format(key.get('key'), key.get('value'))
item_dict['geom'] = geom_types
group_dict[name] = OrderedDict(sorted(item_dict.items()))
groups = group.xpath('./ns:group', namespaces=self.namespaces)
for sub_group in groups:
sub_group_dict = {}
name = sub_group.get('name')
group_dict[name] = sub_group_dict
self._parse_group(sub_group, sub_group_dict)
class UnfilteredPresetParser():
types = {
'node': 'point',
'way': 'line',
'area': 'polygon',
'closedway': 'polygon',
'relation': 'polygon'
}
supported_elements = ['key', 'text', 'combo', 'multiselect', 'check']
namespaces = {'ns': 'http://josm.openstreetmap.de/tagging-preset-1.0'}
def __init__(self, preset=None, *args, **kwargs):
self.preset = preset
self.tags = []
self.keys = []
def parse(self,):
"""
Reads in the JOSM Preset.
Picks out all <item> elements.
For each <item>, gets the 'type' attribute and maps the
geometry type to the <item>'s 'key' attribute (tag name).
Ignores <item>'s with no 'type' attribute.
"""
f = open(self.preset)
xml = f.read()
tree = etree.parse(StringIO(xml))
items = tree.xpath('//ns:item', namespaces=self.namespaces)
for item in items:
self.process_item_and_children(item)
# tags = OrderedDict(sorted(self.tags.items()))
return self.tags
def process_item_and_children(self, item, geometrytype=None):
geometrytypes = None
if item.get('type'):
item_type = item.get('type')
geometrytypes = self.get_geometrytype(item_type)
elements = item.xpath('./ns:*', namespaces=self.namespaces)
item_groups = {}
groups = []
for group in item.iterancestors(tag='{http://josm.openstreetmap.de/tagging-preset-1.0}group'):
groups.append(group.get('name'))
if len(elements) > 0 and geometrytypes:
for element in elements:
name = element.xpath('local-name()')
if name in self.supported_elements:
key = element.get('key')
if key in self.keys:
continue # skip key if already parsed
tag = {}
tag['name'] = item.get('name')
tag['key'] = key
tag['value'] = '' # select all not-null values
geom_types = []
for geomtype in geometrytypes:
geom_types.append(geomtype)
tag['geom_types'] = list(set(geom_types))
tag['groups'] = list(reversed(groups))
self.tags.append(tag)
self.keys.append(key)
"""
if len(elements) > 0 and geometrytypes:
for key_ele in elements:
key = key_ele.get('key')
value = key_ele.get('value')
tag = {}
tag['name'] = item.get('name')
tag['key'] = key
tag['value'] = value
geom_types = []
for geomtype in geometrytypes:
geom_types.append(geomtype)
tag['geom_types'] = list(set(geom_types))
tag['groups'] = list(reversed(groups))
self.tags.append(tag)
"""
"""
if keys[0].get('key'):
# get kv pair
key = keys[0].get('key')
value = keys[0].get('value')
tag = {}
tag['name'] = item.get('name')
tag['key'] = key
tag['value'] = value
geom_types = []
for geomtype in geometrytypes:
geom_types.append(geomtype)
tag['geom_types'] = list(set(geom_types))
tag['groups'] = list(reversed(groups))
self.tags.append(tag)
"""
for child in list(item):
self.process_item_and_children(child)
def get_geometrytype(self, item_type):
geometrytypes = []
osmtypes = item_type.split(',')
for osmtype in osmtypes:
geometrytypes.append(self.types[osmtype])
return geometrytypes
def build_hdm_preset_dict(self, ):
hdm = {}
xml = StringIO(open(self.preset).read())
tree = etree.parse(xml)
groups = tree.xpath('./ns:group', namespaces=self.namespaces)
for group in groups:
name = group.get('name')
group_dict = {}
hdm[name] = group_dict
self._parse_group(group, group_dict)
return OrderedDict(sorted(hdm.items()))
def _parse_group(self, group, group_dict):
items = group.xpath('./ns:item', namespaces=self.namespaces)
for item in items:
item_dict = {}
name = item.get('name')
types = item.get('type') # get the type attr on the item element
if types == None:
continue # pass those items with no geom type
geom_types = self.get_geometrytype(types)
keys = item.xpath('./ns:key', namespaces=self.namespaces)
if not len(keys) > 0:
continue
key = keys[0]
item_dict['displayName'] = name
item_dict['tag'] = '{0}:{1}'.format(key.get('key'), key.get('value'))
item_dict['geom'] = geom_types
group_dict[name] = OrderedDict(sorted(item_dict.items()))
groups = group.xpath('./ns:group', namespaces=self.namespaces)
for sub_group in groups:
sub_group_dict = {}
name = sub_group.get('name')
group_dict[name] = sub_group_dict
self._parse_group(sub_group, sub_group_dict)
class TagParser():
namespaces = {'ns': 'http://josm.openstreetmap.de/tagging-preset-1.0'}
nsmap = {None: 'http://josm.openstreetmap.de/tagging-preset-1.0'}
types = {
'point': 'node',
'line': 'way',
'polygon': 'area,closedway,relation',
}
def __init__(self, tags=None, *args, **kwargs):
self.tags = tags
def parse_tags(self, ):
root = etree.Element('presets', nsmap=self.nsmap)
doc = etree.ElementTree(root)
for tag in self.tags:
groups = self._add_groups(root, tag)
xml = etree.tostring(doc, xml_declaration=True, encoding='UTF-8', pretty_print=True)
return xml
def _add_groups(self, parent, tag):
for group in tag.groups:
# check if element exists if not create it
found_groups = parent.xpath('group[@name="' + group + '"]', namespaces=self.namespaces)
if len(found_groups) == 0:
grp = etree.SubElement(parent, 'group', name=group)
tag.groups.pop(0)
if len(tag.groups) == 0:
geom_types = self._get_types(tag.geom_types)
item = etree.SubElement(grp, 'item', name=tag.name, type=geom_types)
etree.SubElement(item, 'key', key=tag.key, value=tag.value)
self._add_groups(grp, tag)
else:
tag.groups.pop(0)
if len(tag.groups) == 0:
geom_types = self._get_types(tag.geom_types)
item = etree.SubElement(found_groups[0], 'item', name=tag.name, type=geom_types)
etree.SubElement(item, 'key', key=tag.key, value=tag.value)
self._add_groups(found_groups[0], tag)
def _get_types(self, geom_types):
types = []
for geom_type in geom_types:
gtype = self.types.get(geom_type)
if gtype is not None:
types.append(self.types[geom_type])
return ','.join(types)
|
bsd-3-clause
| 1,925,800,538,163,020,300
| 36.5
| 102
| 0.524848
| false
| 3.962264
| false
| false
| false
|
lukasjuhrich/sipa
|
sipa/blueprints/generic.py
|
1
|
10785
|
# -*- coding: utf-8 -*-
import logging
import os
from flask import render_template, request, redirect, \
url_for, flash, session, abort, current_app, jsonify
from flask.blueprints import Blueprint
from flask_babel import gettext, format_date
from flask_login import current_user, login_user, logout_user, \
login_required
from sqlalchemy.exc import DatabaseError
from ldap3.core.exceptions import LDAPCommunicationError
from sipa.forms import flash_formerrors, LoginForm, AnonymousContactForm, \
OfficialContactForm
from sipa.mail import send_official_contact_mail, send_contact_mail
from sipa.model import backends
from sipa.units import dynamic_unit, format_money
from sipa.utils import get_user_name, redirect_url
from sipa.model.exceptions import UserNotFound, InvalidCredentials
from sipa.utils.git_utils import get_repo_active_branch, get_latest_commits
logger = logging.getLogger(__name__)
bp_generic = Blueprint('generic', __name__)
@bp_generic.before_app_request
def log_request():
if 'sentry' in current_app.extensions:
current_app.extensions['sentry'].client.extra_context({
'current_user': get_user_name(current_user),
'ip_user': get_user_name(backends.user_from_ip(request.remote_addr))
})
logging.getLogger(__name__ + '.http').debug(
'Incoming request: %s %s', request.method, request.path,
extra={'tags': {'user': get_user_name(current_user),
'ip': request.remote_addr}}
)
@bp_generic.app_errorhandler(401)
@bp_generic.app_errorhandler(403)
@bp_generic.app_errorhandler(404)
def error_handler_redirection(e):
"""Handles errors by flashing an according message
:param e: The error
:return: A flask response with the according HTTP error code
"""
if e.code == 401:
message = gettext("Bitte melde Dich an, um die Seite zu sehen.")
elif e.code == 403:
message = gettext("Diese Funktion wird in deinem Wohnheim "
"nicht unterstützt.")
elif e.code == 404:
message = gettext("Das von Dir angeforderte Dokument gibt es nicht.")
else:
message = gettext("Es ist ein Fehler aufgetreten!")
return render_template(
'error.html',
errorcode=e.code,
message=message
), e.code
@bp_generic.app_errorhandler(DatabaseError)
def exceptionhandler_sql(ex):
"""Handles global Database errors like:
Server down, Lock wait timeout exceeded, …
"""
flash(gettext("Es gab einen Fehler bei der Datenbankabfrage. "
"Bitte probiere es in ein paar Minuten noch mal."),
"error")
logger.critical('DatabaseError caught',
extra={'data': {'exception_args': ex.args}},
exc_info=True)
return redirect(url_for('generic.index'))
@bp_generic.app_errorhandler(LDAPCommunicationError)
def exceptionhandler_ldap(ex):
"""Handles global LDAPCommunicationError exceptions.
The session must be reset, because if the user is logged in and
the server fails during his session, it would cause a redirect
loop. This also resets the language choice, btw.
The alternative would be a try-except catch block in load_user,
but login also needs a handler.
"""
session.clear()
flash(gettext("Verbindung zum LDAP-Server "
"konnte nicht hergestellt werden!"),
'error')
logger.critical(
'Unable to connect to LDAP server',
extra={'data': {'exception_args': ex.args}},
exc_info=True,
)
return redirect(url_for('generic.index'))
@bp_generic.app_errorhandler(ConnectionError)
def exceptionhandler_gerok(ex):
"""Handles ConnectionErrors
Session is cleared to avoid redirect loops, as above.
"""
flash(gettext("Es gab einen internen Fehler. "
"Bitte probiere es in ein paar Minuten noch mal."))
session.clear()
return redirect(url_for('generic.index'))
@bp_generic.route('/index.php')
@bp_generic.route('/')
def index():
return redirect(url_for('news.show'))
@bp_generic.route("/login", methods=['GET', 'POST'])
def login():
"""Login page for users
"""
form = LoginForm()
if form.validate_on_submit():
dormitory = backends.get_dormitory(form.dormitory.data)
username = form.username.data
password = form.password.data
remember = form.remember.data
User = dormitory.datasource.user_class
valid_suffix = "@{}".format(dormitory.datasource.mail_server)
if username.endswith(valid_suffix):
username = username[:-len(valid_suffix)]
try:
user = User.authenticate(username, password)
except InvalidCredentials as e:
cause = "username" if isinstance(e, UserNotFound) else "password"
logger.info("Authentication failed: Wrong %s", cause, extra={
'tags': {'user': username, 'rate_critical': True}
})
flash(gettext("Anmeldedaten fehlerhaft!"), "error")
else:
if isinstance(user, User):
session['dormitory'] = dormitory.name
login_user(user, remember=remember)
logger.info('Authentication successful',
extra={'tags': {'user': username}})
flash(gettext("Anmeldung erfolgreich!"), "success")
elif form.is_submitted():
flash_formerrors(form)
if current_user.is_authenticated:
return redirect(url_for('usersuite.index'))
return render_template('login.html', form=form,
unsupported=backends.premature_dormitories)
@bp_generic.route("/logout")
@login_required
def logout():
logger.info("Logging out",
extra={'tags': {'user': current_user.uid}})
logout_user()
flash(gettext("Abmeldung erfolgreich!"), 'success')
return redirect(url_for('.index'))
bp_generic.add_app_template_filter(dynamic_unit, name='unit')
@bp_generic.app_template_filter('traffic_color')
def traffic_color(amount, daily_credit):
return ("" if amount < daily_credit
else "bg-warning" if amount < 2 * daily_credit
else "bg-danger")
@bp_generic.app_template_filter('gib')
def to_gigabytes(number):
"""Convert a number from KiB to GiB
This is used mainly for the gauge, everything else uses the dynamic
`unit` function.
"""
return number / 1024 ** 2
@bp_generic.app_template_filter('date')
def jinja_format_date(date):
return format_date(date)
bp_generic.add_app_template_filter(format_money, name='money')
@bp_generic.route("/usertraffic")
def usertraffic():
"""Show a user's traffic on a static site just as in the usersuite.
If a user is logged but the ip corresponds to another user, a hint
is flashed and the traffic of the `ip_user` is displayed.
"""
ip_user = backends.user_from_ip(request.remote_addr)
chosen_user = None
if current_user.is_authenticated:
chosen_user = current_user
if not current_user.has_connection and not ip_user.is_authenticated:
flash(gettext("Aufgrund deines Nutzerstatus kannst Du "
"keine Trafficdaten einsehen."), "info")
return redirect(url_for('generic.index'))
if ip_user.is_authenticated:
chosen_user = ip_user
if current_user.is_authenticated:
if current_user != ip_user:
flash(gettext("Ein anderer Nutzer als der für diesen "
"Anschluss Eingetragene ist angemeldet!"),
'warning')
flash(gettext("Hier werden die Trafficdaten "
"dieses Anschlusses angezeigt."), "info")
if chosen_user:
user_id = chosen_user.id.value if chosen_user.id.supported else None
return render_template("usertraffic.html",
user_id=user_id,
traffic_user=chosen_user)
abort(401)
@bp_generic.route('/usertraffic/json')
def traffic_api():
user = (current_user if current_user.is_authenticated
else backends.user_from_ip(request.remote_addr))
if not user.is_authenticated:
return jsonify(version=0)
traffic_history = ({
'in': x['input'],
'out': x['output'],
} for x in reversed(user.traffic_history))
trafficdata = {
'quota': user.credit,
# `next` gets the first entry (“today”)
'traffic': next(traffic_history),
'history': list(traffic_history),
}
return jsonify(version=2, **trafficdata)
@bp_generic.route('/contact', methods=['GET', 'POST'])
def contact():
form = AnonymousContactForm()
if form.validate_on_submit():
success = send_contact_mail(
sender=form.email.data,
subject=form.subject.data,
name=form.name.data,
message=form.message.data,
dormitory_name=form.dormitory.data,
)
if success:
flash(gettext("Nachricht wurde versandt."), "success")
else:
flash(gettext("Es gab einen Fehler beim Versenden der Nachricht."),
'error')
return redirect(url_for('.index'))
elif form.is_submitted():
flash_formerrors(form)
elif current_user.is_authenticated:
flash(gettext("Sicher, dass Du das anonyme Formular "
"benutzen möchtest? Dies ist nur erforderlich, wenn Du "
"Administratoren eines anderen Wohnheims "
"kontaktieren willst."), 'info')
return render_template('anonymous_contact.html', form=form)
@bp_generic.route('/contact_official', methods=['GET', 'POST'])
def contact_official():
form = OfficialContactForm()
if form.validate_on_submit():
success = send_official_contact_mail(
sender=form.email.data,
subject=form.subject.data,
name=form.name.data,
message=form.message.data,
)
if success:
flash(gettext("Nachricht wurde versandt."), "success")
else:
flash(gettext("Es gab einen Fehler beim Versenden der Nachricht."),
'error')
return redirect(url_for('.index'))
elif form.is_submitted():
flash_formerrors(form)
return render_template(
'official_contact.html',
form=form
)
@bp_generic.route('/version')
def version():
""" Display version information from local repo """
sipa_dir = os.getcwd()
return render_template(
'version.html',
active_branch=get_repo_active_branch(sipa_dir),
commits=get_latest_commits(sipa_dir, 20),
)
|
mit
| 2,989,018,427,990,838,000
| 31.753799
| 80
| 0.627691
| false
| 3.740368
| false
| false
| false
|
brunobraga/termsaver
|
termsaverlib/plugins/exampleplugin/constants.py
|
1
|
2886
|
###############################################################################
#
# file: constants.py
#
# Purpose: refer to module documentation for details
#
# Note: This file is part of Termsaver-Example plugin, and should not be
# used or executed separately.
#
###############################################################################
#
# Copyright 2012 Termsaver
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
###############################################################################
"""
Holds constant values used throughout termsaver-exampleplugin plugin.
"""
#
# Termsaver modules
#
from termsaverlib.constants import PropertyClass
class Plugin(PropertyClass):
"""
Holds application related properties used by termsaver-exampleplugin plugin
screens. Refer to each of the available properties for detailed
documentation.
"""
VERSION = "0.1"
"""
Defines the version of termsaver-exampleplugin plugin. This is accessed during
install process, and to any help and usage messages informed by it.
Refer to CHANGELOG file for a complete history about this project.
"""
NAME = 'termsaver-exampleplugin'
"""
Defines the termsaver-exampleplugin plugin, usually the plugin package name.
"""
TITLE = 'TermSaver Example Plugin'
"""
Defines the termsaver-exampleplugin plugin's official name as it should appear
in documentation.
"""
DESCRIPTION = 'A set of screens for showing an example termsaver plugin.'
"""
Defines the main description of the termsaver-exampleplugin plugin.
"""
URL = 'http://www.termsaver.info/plugins'
"""
Defines the termsaver-exampleplugin plugin official website address.
"""
SOURCE_URL = 'http://github.com/brunobraga/termsaver'
"""
Defines the termsaver-exampleplugin plugin official source-code control site,
hosted on GitHub.
"""
AUTHORS = ['Bruno Braga <bruno.braga@gmail.com>']
"""
Defines a list of all authors contributing to the termsaver-exampleplugin plugin.
"""
class Settings(PropertyClass):
"""
Holds configuration settings used by termsaver-exampleplugin plugin. Refer to each
of the available properties for detailed documentation.
Follow the formatting:
SETTING_NAME = VALUE
\"\"\"
document it!
\"\"\"
"""
pass
|
apache-2.0
| 6,038,243,330,889,457,000
| 28.44898
| 86
| 0.646223
| false
| 4.544882
| false
| false
| false
|
bl4ckh0l3z/droidtrail
|
droidtrail/trails/androguard/arscrestableconfig.py
|
1
|
2720
|
# This file is part of DroidTrail.
#
# bl4ckh0l3 <bl4ckh0l3z at gmail.com>
#
# DroidTrail is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# DroidTrail is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with DroidTrail. If not, see <http://www.gnu.org/licenses/>.
#
# **********************************************************************
# NOTE: This file is part of Androguard;
# Copyright (C) 2012, Anthony Desnos <desnos at t0t0.fr>
# All rights reserved.
#
# It is a modified and sanitized version for DroidTrail,
# created by bl4ckh0l3 <bl4ckh0l3z at gmail.com>.
# **********************************************************************
#
__author__ = 'desnos'
__license__ = 'GPL v2'
__maintainer__ = 'bl4ckh0l3'
__email__ = 'bl4ckh0l3z@gmail.com'
import logging
from struct import unpack
class ARSCResTableConfig:
def __init__(self, buff):
self.start = buff.get_idx()
self.size = unpack('<i', buff.read(4))[0]
self.imsi = unpack('<i', buff.read(4))[0]
self.locale = unpack('<i', buff.read(4))[0]
self.screenType = unpack('<i', buff.read(4))[0]
self.input = unpack('<i', buff.read(4))[0]
self.screenSize = unpack('<i', buff.read(4))[0]
self.version = unpack('<i', buff.read(4))[0]
self.screenConfig = 0
self.screenSizeDp = 0
if self.size >= 32:
self.screenConfig = unpack('<i', buff.read(4))[0]
if self.size >= 36:
self.screenSizeDp = unpack('<i', buff.read(4))[0]
self.exceedingSize = self.size - 36
if self.exceedingSize > 0:
logging.warning("too much bytes !")
self.padding = buff.read(self.exceedingSize)
#print "ARSCResTableConfig", hex(self.start), hex(self.size), hex(self.imsi), hex(self.locale), repr(self.get_language()), repr(self.get_country()), hex(self.screenType), hex(self.input), hex(self.screenSize), hex(self.version), hex(self.screenConfig), hex(self.screenSizeDp)
def get_language(self):
x = self.locale & 0x0000ffff
return chr(x & 0x00ff) + chr((x & 0xff00) >> 8)
def get_country(self):
x = (self.locale & 0xffff0000) >> 16
return chr(x & 0x00ff) + chr((x & 0xff00) >> 8)
|
gpl-2.0
| -5,226,607,345,617,743,000
| 38.434783
| 283
| 0.60625
| false
| 3.358025
| false
| false
| false
|
reynoldpj/sysadmrepo
|
mariadb/dump_stat_info.py
|
1
|
3676
|
#!/usr/bin/python
import MySQLdb
import json
from ConfigParser import ConfigParser
LIMIT = 7
user_and_client_stat_columns = ('TOTAL_CONNECTIONS', 'CONCURRENT_CONNECTIONS', 'CONNECTED_TIME', 'BUSY_TIME', 'CPU_TIME', 'BYTES_RECEIVED', 'BYTES_SENT', 'BINLOG_BYTES_WRITTEN', 'ROWS_READ', 'ROWS_SENT', 'ROWS_DELETED', 'ROWS_INSERTED', 'ROWS_UPDATED', 'SELECT_COMMANDS', 'UPDATE_COMMANDS', 'OTHER_COMMANDS', 'COMMIT_TRANSACTIONS', 'ROLLBACK_TRANSACTIONS', 'DENIED_CONNECTIONS', 'LOST_CONNECTIONS', 'ACCESS_DENIED', 'EMPTY_QUERIES')
# data holding dicts
data_user_stat = {}
data_client_stat = {}
data_index_stat = {}
data_table_stat = {}
try:
# Configuration parsers
cfg = ConfigParser()
cfg.read('/root/.my.cnf')
# Connect to mysql db and get cursor info
db = MySQLdb.connect(host = cfg.get(section='client',option='host'), db = 'INFORMATION_SCHEMA', user = cfg.get(section='client',option='user'), passwd = cfg.get(section='client',option ='password'))
cur = db.cursor()
#gather USER_STATISTICS and CLIENT_STATISTICS info
for col in user_and_client_stat_columns:
cur.execute("SELECT USER,%s FROM USER_STATISTICS ORDER BY %s DESC LIMIT %d" % (col, col, LIMIT))
data_user_stat[col] = cur.fetchall()
cur.execute("SELECT CLIENT,%s FROM CLIENT_STATISTICS ORDER BY %s DESC LIMIT %d" % (col, col, LIMIT))
data_client_stat[col] = cur.fetchall()
# gather INDEX_STATISTICS
cur.execute("select TABLE_SCHEMA, TABLE_NAME, INDEX_NAME, ROWS_READ from INDEX_STATISTICS order by ROWS_READ desc limit %d" % LIMIT)
data_index_stat['ROWS_READ'] = cur.fetchall()
# gather TABLE_STATISTICS
cur.execute("select TABLE_SCHEMA,TABLE_NAME,ROWS_CHANGED from TABLE_STATISTICS order by ROWS_CHANGED desc limit %d" % LIMIT)
data_table_stat['ROWS_CHANGED'] = cur.fetchall()
cur.execute("select TABLE_SCHEMA,TABLE_NAME,ROWS_READ from TABLE_STATISTICS order by ROWS_READ desc limit %d" % LIMIT)
data_table_stat['ROWS_READ'] = cur.fetchall()
cur.execute("select TABLE_SCHEMA,TABLE_NAME,ROWS_CHANGED_X_INDEXES from TABLE_STATISTICS order by ROWS_CHANGED_X_INDEXES desc limit %d" % LIMIT)
data_table_stat['ROWS_CHANGED_X_INDEXES'] = cur.fetchall()
cur.execute("select TABLE_SCHEMA,TABLE_NAME,ROWS_READ from TABLE_STATISTICS where TABLE_NAME like '%s' order by ROWS_READ desc limit %d" % ("%comments%",LIMIT))
data_table_stat['ROWS_READ_comments'] = cur.fetchall()
cur.execute("select TABLE_SCHEMA,TABLE_NAME,ROWS_CHANGED from TABLE_STATISTICS where TABLE_NAME REGEXP 'gast|guest|gasten|gjeste|gbook|gaest' order by ROWS_CHANGED desc limit %d" % LIMIT)
data_table_stat['ROWS_CHANGED_guestbook'] = cur.fetchall()
querystring = {'ROWS_CHANGED_comments':'%comments%' , 'ROWS_CHANGED_phpbbuser': 'phpbb%user%', 'ROWS_CHANGED_phpbbloginattempt':'phpbb%login%attempt%','ROWS_CHANGED_phpbbpost': 'phpbb%post%', 'ROWS_CHANGED_wpcomments': '%wp%comments%', 'ROWS_CHANGED_wpposts':'%wp%posts%', 'ROWS_CHANGED_wpusers': '%wp%users%','ROWS_CHANGED_users': 'users%', 'ROWS_CHANGED_session':'%session%', 'ROWS_CHANGED_friend': '%friend%' }
for key in querystring.keys():
cur.execute("select TABLE_SCHEMA,TABLE_NAME,ROWS_CHANGED from TABLE_STATISTICS where TABLE_NAME like '%s' order by ROWS_CHANGED desc limit %d" % (querystring[key], LIMIT))
data_table_stat[key] = cur.fetchall()
print json.dumps({'USER_STATISTICS': data_user_stat, 'CLIENT_STATISTICS': data_client_stat, 'INDEX_STATISTICS': data_index_stat ,'TABLE_STATISTICS': data_table_stat})
except Exception,e:
print e.message
finally:
#close db connection
cur.close()
db.close()
|
gpl-2.0
| 2,162,547,570,938,539,800
| 53.058824
| 432
| 0.701034
| false
| 3.190972
| false
| false
| false
|
lmyrefelt/CouchPotatoServer
|
couchpotato/core/notifications/plex/main.py
|
1
|
2834
|
from couchpotato.core.event import addEvent
from couchpotato.core.helpers.encoding import tryUrlencode
from couchpotato.core.helpers.variable import cleanHost
from couchpotato.core.logger import CPLog
from couchpotato.core.notifications.base import Notification
from urllib2 import URLError
from xml.dom import minidom
import traceback
log = CPLog(__name__)
class Plex(Notification):
def __init__(self):
super(Plex, self).__init__()
addEvent('renamer.after', self.addToLibrary)
def addToLibrary(self, message = None, group = {}):
if self.isDisabled(): return
log.info('Sending notification to Plex')
hosts = [cleanHost(x.strip() + ':32400') for x in self.conf('host').split(",")]
for host in hosts:
source_type = ['movie']
base_url = '%slibrary/sections' % host
refresh_url = '%s/%%s/refresh' % base_url
try:
sections_xml = self.urlopen(base_url)
xml_sections = minidom.parseString(sections_xml)
sections = xml_sections.getElementsByTagName('Directory')
for s in sections:
if s.getAttribute('type') in source_type:
url = refresh_url % s.getAttribute('key')
x = self.urlopen(url)
except:
log.error('Plex library update failed for %s, Media Server not running: %s', (host, traceback.format_exc(1)))
return False
return True
def notify(self, message = '', data = {}, listener = None):
hosts = [x.strip() + ':3000' for x in self.conf('host').split(",")]
successful = 0
for host in hosts:
if self.send({'command': 'ExecBuiltIn', 'parameter': 'Notification(CouchPotato, %s)' % message}, host):
successful += 1
return successful == len(hosts)
def send(self, command, host):
url = 'http://%s/xbmcCmds/xbmcHttp/?%s' % (host, tryUrlencode(command))
headers = {}
try:
self.urlopen(url, headers = headers, show_error = False)
except URLError:
log.error("Couldn't sent command to Plex, probably just running Media Server")
return False
except:
log.error("Couldn't sent command to Plex: %s", traceback.format_exc())
return False
log.info('Plex notification to %s successful.', host)
return True
def test(self):
test_type = self.testNotifyName()
log.info('Sending test to %s', test_type)
success = self.notify(
message = self.test_message,
data = {},
listener = 'test'
)
success2 = self.addToLibrary()
return {
'success': success or success2
}
|
gpl-3.0
| -4,482,584,000,044,181,500
| 30.488889
| 125
| 0.575865
| false
| 4.242515
| true
| false
| false
|
mission-peace/interview
|
python/dynamic/coin_change_num_ways.py
|
1
|
2081
|
"""
Problem Statement
=================
Given a total and coins of certain denominations find number of ways total can be formed from coins assuming infinity
supply of coins.
Analysis
--------
* Runtime : O(num_of_coins * total)
Video
-----
* https://youtu.be/_fgjrs570YE
Reference
---------
* http://www.geeksforgeeks.org/dynamic-programming-set-7-coin-change/
"""
def coin_changing_num_ways(coins, total):
cols = total + 1 # 1 for value 0 in total
rows = len(coins)
T = [[1 if col == 0 else 0 for col in range(cols)] for _ in range(rows)]
for i in range(rows):
for j in range(cols):
if (i - 1) < 0:
continue
if j < coins[i]:
T[i][j] = T[i - 1][j]
else:
T[i][j] = T[i - 1][j] + T[i][j - coins[i]]
return T[rows - 1][cols - 1]
def coin_changing_num_ways2(coins, total):
cols = total + 1
num_coins = len(coins)
# Using 1-D Array instead of 2-D Array. Approach is same as coin_changing_num_ways.
T = [1 if col == 0 else 0 for col in range(cols)]
for i in range(num_coins):
for col in range(1, cols):
if col >= coins[i]:
T[col] += T[col - coins[i]]
return T[cols - 1]
def print_coin_changes_recursive(coins, total, results_stack, pos):
if total == 0:
for coin in results_stack:
print "%d " % coin,
print
for idx in range(pos, len(coins)):
if total >= coins[idx]:
results_stack.append(coins[idx])
print_coin_changes_recursive(coins, total - coins[idx], results_stack, idx)
results_stack.pop() # Remove last inserted coin from stack to use new coin with different index.
def print_coin_changes(coins, total):
print_coin_changes_recursive(coins, total, list(), 0)
if __name__ == '__main__':
coins = [1, 2, 3]
total = 5
expected = 5
assert expected == coin_changing_num_ways(coins, total)
assert expected == coin_changing_num_ways2(coins, total)
print_coin_changes(coins, total)
|
apache-2.0
| -4,793,228,212,112,853,000
| 26.025974
| 117
| 0.575204
| false
| 3.236392
| false
| false
| false
|
JackGavin13/octoprint-test-not-finished
|
src/octoprint/plugins/pluginmanager/__init__.py
|
1
|
39848
|
# coding=utf-8
from __future__ import absolute_import, division, print_function
__author__ = "Gina Häußge <osd@foosel.net>"
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
__copyright__ = "Copyright (C) 2015 The OctoPrint Project - Released under terms of the AGPLv3 License"
from past.builtins import basestring
import octoprint.plugin
import octoprint.plugin.core
from octoprint.settings import valid_boolean_trues
from octoprint.server.util.flask import restricted_access, with_revalidation_checking, check_etag
from octoprint.server import admin_permission, VERSION
from octoprint.util.pip import LocalPipCaller, UnknownPip
from flask import jsonify, make_response
from flask.ext.babel import gettext
from collections import OrderedDict
import logging
import sarge
import sys
import requests
import re
import os
import pkg_resources
import copy
import dateutil.parser
import time
import threading
class PluginManagerPlugin(octoprint.plugin.SimpleApiPlugin,
octoprint.plugin.TemplatePlugin,
octoprint.plugin.AssetPlugin,
octoprint.plugin.SettingsPlugin,
octoprint.plugin.StartupPlugin,
octoprint.plugin.BlueprintPlugin,
octoprint.plugin.EventHandlerPlugin):
ARCHIVE_EXTENSIONS = (".zip", ".tar.gz", ".tgz", ".tar")
OPERATING_SYSTEMS = dict(windows=["win32"],
linux=lambda x: x.startswith("linux"),
macos=["darwin"],
freebsd=lambda x: x.startswith("freebsd"))
PIP_INAPPLICABLE_ARGUMENTS = dict(uninstall=["--user"])
RECONNECT_HOOKS = ["octoprint.comm.protocol.*",]
def __init__(self):
self._pending_enable = set()
self._pending_disable = set()
self._pending_install = set()
self._pending_uninstall = set()
self._pip_caller = None
self._repository_available = False
self._repository_plugins = []
self._repository_cache_path = None
self._repository_cache_ttl = 0
self._notices = dict()
self._notices_available = False
self._notices_cache_path = None
self._notices_cache_ttl = 0
self._console_logger = None
def initialize(self):
self._console_logger = logging.getLogger("octoprint.plugins.pluginmanager.console")
self._repository_cache_path = os.path.join(self.get_plugin_data_folder(), "plugins.json")
self._repository_cache_ttl = self._settings.get_int(["repository_ttl"]) * 60
self._notices_cache_path = os.path.join(self.get_plugin_data_folder(), "notices.json")
self._notices_cache_ttl = self._settings.get_int(["notices_ttl"]) * 60
self._pip_caller = LocalPipCaller(force_user=self._settings.get_boolean(["pip_force_user"]))
self._pip_caller.on_log_call = self._log_call
self._pip_caller.on_log_stdout = self._log_stdout
self._pip_caller.on_log_stderr = self._log_stderr
##~~ Body size hook
def increase_upload_bodysize(self, current_max_body_sizes, *args, **kwargs):
# set a maximum body size of 50 MB for plugin archive uploads
return [("POST", r"/upload_archive", 50 * 1024 * 1024)]
##~~ StartupPlugin
def on_after_startup(self):
from octoprint.logging.handlers import CleaningTimedRotatingFileHandler
console_logging_handler = CleaningTimedRotatingFileHandler(self._settings.get_plugin_logfile_path(postfix="console"), when="D", backupCount=3)
console_logging_handler.setFormatter(logging.Formatter("%(asctime)s %(message)s"))
console_logging_handler.setLevel(logging.DEBUG)
self._console_logger.addHandler(console_logging_handler)
self._console_logger.setLevel(logging.DEBUG)
self._console_logger.propagate = False
# decouple repository fetching from server startup
self._fetch_all_data(async=True)
##~~ SettingsPlugin
def get_settings_defaults(self):
return dict(
repository="http://plugins.octoprint.org/plugins.json",
repository_ttl=24*60,
notices="http://plugins.octoprint.org/notices.json",
notices_ttl=6*60,
pip_args=None,
pip_force_user=False,
dependency_links=False,
hidden=[]
)
def on_settings_save(self, data):
octoprint.plugin.SettingsPlugin.on_settings_save(self, data)
self._repository_cache_ttl = self._settings.get_int(["repository_ttl"]) * 60
self._notices_cache_ttl = self._settings.get_int(["notices_ttl"]) * 60
self._pip_caller.force_user = self._settings.get_boolean(["pip_force_user"])
##~~ AssetPlugin
def get_assets(self):
return dict(
js=["js/pluginmanager.js"],
css=["css/pluginmanager.css"],
less=["less/pluginmanager.less"]
)
##~~ TemplatePlugin
def get_template_configs(self):
return [
dict(type="settings", name=gettext("Plugin Manager"), template="pluginmanager_settings.jinja2", custom_bindings=True),
dict(type="about", name="Plugin Licenses", template="pluginmanager_about.jinja2")
]
def get_template_vars(self):
plugins = sorted(self._get_plugins(), key=lambda x: x["name"].lower())
return dict(
all=plugins,
thirdparty=filter(lambda p: not p["bundled"], plugins),
archive_extensions=self.__class__.ARCHIVE_EXTENSIONS
)
def get_template_types(self, template_sorting, template_rules, *args, **kwargs):
return [
("about_thirdparty", dict(), dict(template=lambda x: x + "_about_thirdparty.jinja2"))
]
##~~ BlueprintPlugin
@octoprint.plugin.BlueprintPlugin.route("/upload_archive", methods=["POST"])
@restricted_access
@admin_permission.require(403)
def upload_archive(self):
import flask
input_name = "file"
input_upload_path = input_name + "." + self._settings.global_get(["server", "uploads", "pathSuffix"])
input_upload_name = input_name + "." + self._settings.global_get(["server", "uploads", "nameSuffix"])
if input_upload_path not in flask.request.values or input_upload_name not in flask.request.values:
return flask.make_response("No file included", 400)
upload_path = flask.request.values[input_upload_path]
upload_name = flask.request.values[input_upload_name]
exts = filter(lambda x: upload_name.lower().endswith(x), self.__class__.ARCHIVE_EXTENSIONS)
if not len(exts):
return flask.make_response("File doesn't have a valid extension for a plugin archive", 400)
ext = exts[0]
import tempfile
import shutil
import os
archive = tempfile.NamedTemporaryFile(delete=False, suffix="{ext}".format(**locals()))
try:
archive.close()
shutil.copy(upload_path, archive.name)
return self.command_install(path=archive.name, force="force" in flask.request.values and flask.request.values["force"] in valid_boolean_trues)
finally:
try:
os.remove(archive.name)
except Exception as e:
self._logger.warn("Could not remove temporary file {path} again: {message}".format(path=archive.name, message=str(e)))
##~~ EventHandlerPlugin
def on_event(self, event, payload):
from octoprint.events import Events
if event != Events.CONNECTIVITY_CHANGED or not payload or not payload.get("new", False):
return
self._fetch_all_data(async=True)
##~~ SimpleApiPlugin
def get_api_commands(self):
return {
"install": ["url"],
"uninstall": ["plugin"],
"enable": ["plugin"],
"disable": ["plugin"],
"refresh_repository": []
}
def on_api_get(self, request):
if not admin_permission.can():
return make_response("Insufficient rights", 403)
from octoprint.server import safe_mode
refresh_repository = request.values.get("refresh_repository", "false") in valid_boolean_trues
if refresh_repository:
self._repository_available = self._refresh_repository()
refresh_notices = request.values.get("refresh_notices", "false") in valid_boolean_trues
if refresh_notices:
self._notices_available = self._refresh_notices()
def view():
return jsonify(plugins=self._get_plugins(),
repository=dict(
available=self._repository_available,
plugins=self._repository_plugins
),
os=self._get_os(),
octoprint=self._get_octoprint_version_string(),
pip=dict(
available=self._pip_caller.available,
version=self._pip_caller.version_string,
install_dir=self._pip_caller.install_dir,
use_user=self._pip_caller.use_user,
virtual_env=self._pip_caller.virtual_env,
additional_args=self._settings.get(["pip_args"]),
python=sys.executable
),
safe_mode=safe_mode,
online=self._connectivity_checker.online)
def etag():
import hashlib
hash = hashlib.sha1()
hash.update(repr(self._get_plugins()))
hash.update(str(self._repository_available))
hash.update(repr(self._repository_plugins))
hash.update(str(self._notices_available))
hash.update(repr(self._notices))
hash.update(repr(safe_mode))
hash.update(repr(self._connectivity_checker.online))
return hash.hexdigest()
def condition():
return check_etag(etag())
return with_revalidation_checking(etag_factory=lambda *args, **kwargs: etag(),
condition=lambda *args, **kwargs: condition(),
unless=lambda: refresh_repository or refresh_notices)(view)()
def on_api_command(self, command, data):
if not admin_permission.can():
return make_response("Insufficient rights", 403)
if self._printer.is_printing() or self._printer.is_paused():
# do not update while a print job is running
return make_response("Printer is currently printing or paused", 409)
if command == "install":
url = data["url"]
plugin_name = data["plugin"] if "plugin" in data else None
return self.command_install(url=url,
force="force" in data and data["force"] in valid_boolean_trues,
dependency_links="dependency_links" in data
and data["dependency_links"] in valid_boolean_trues,
reinstall=plugin_name)
elif command == "uninstall":
plugin_name = data["plugin"]
if not plugin_name in self._plugin_manager.plugins:
return make_response("Unknown plugin: %s" % plugin_name, 404)
plugin = self._plugin_manager.plugins[plugin_name]
return self.command_uninstall(plugin)
elif command == "enable" or command == "disable":
plugin_name = data["plugin"]
if not plugin_name in self._plugin_manager.plugins:
return make_response("Unknown plugin: %s" % plugin_name, 404)
plugin = self._plugin_manager.plugins[plugin_name]
return self.command_toggle(plugin, command)
def command_install(self, url=None, path=None, force=False, reinstall=None, dependency_links=False):
if url is not None:
source = url
source_type = "url"
already_installed_check = lambda line: url in line
elif path is not None:
path = os.path.abspath(path)
path_url = "file://" + path
if os.sep != "/":
# windows gets special handling
path = path.replace(os.sep, "/").lower()
path_url = "file:///" + path
source = path
source_type = "path"
already_installed_check = lambda line: path_url in line.lower() # lower case in case of windows
else:
raise ValueError("Either URL or path must be provided")
self._logger.info("Installing plugin from {}".format(source))
pip_args = ["install", sarge.shell_quote(source)]
if dependency_links or self._settings.get_boolean(["dependency_links"]):
pip_args.append("--process-dependency-links")
all_plugins_before = self._plugin_manager.find_plugins(existing=dict())
already_installed_string = "Requirement already satisfied (use --upgrade to upgrade)"
success_string = "Successfully installed"
failure_string = "Could not install"
try:
returncode, stdout, stderr = self._call_pip(pip_args)
# pip's output for a package that is already installed looks something like any of these:
#
# Requirement already satisfied (use --upgrade to upgrade): OctoPrint-Plugin==1.0 from \
# https://example.com/foobar.zip in <lib>
# Requirement already satisfied (use --upgrade to upgrade): OctoPrint-Plugin in <lib>
# Requirement already satisfied (use --upgrade to upgrade): OctoPrint-Plugin==1.0 from \
# file:///tmp/foobar.zip in <lib>
# Requirement already satisfied (use --upgrade to upgrade): OctoPrint-Plugin==1.0 from \
# file:///C:/Temp/foobar.zip in <lib>
#
# If we detect any of these matching what we just tried to install, we'll need to trigger a second
# install with reinstall flags.
if not force and any(map(lambda x: x.strip().startswith(already_installed_string) and already_installed_check(x),
stdout)):
self._logger.info("Plugin to be installed from {} was already installed, forcing a reinstall".format(source))
self._log_message("Looks like the plugin was already installed. Forcing a reinstall.")
force = True
except:
self._logger.exception("Could not install plugin from %s" % url)
return make_response("Could not install plugin from URL, see the log for more details", 500)
else:
if force:
# We don't use --upgrade here because that will also happily update all our dependencies - we'd rather
# do that in a controlled manner
pip_args += ["--ignore-installed", "--force-reinstall", "--no-deps"]
try:
returncode, stdout, stderr = self._call_pip(pip_args)
except:
self._logger.exception("Could not install plugin from {}".format(source))
return make_response("Could not install plugin from source {}, see the log for more details"
.format(source), 500)
try:
result_line = filter(lambda x: x.startswith(success_string) or x.startswith(failure_string),
stdout)[-1]
except IndexError:
self._logger.error("Installing the plugin from {} failed, could not parse output from pip. "
"See plugin_pluginmanager_console.log for generated output".format(source))
result = dict(result=False,
source=source,
source_type=source_type,
reason="Could not parse output from pip, see plugin_pluginmanager_console.log "
"for generated output")
self._send_result_notification("install", result)
return jsonify(result)
# The final output of a pip install command looks something like this:
#
# Successfully installed OctoPrint-Plugin-1.0 Dependency-One-0.1 Dependency-Two-9.3
#
# or this:
#
# Successfully installed OctoPrint-Plugin Dependency-One Dependency-Two
# Cleaning up...
#
# So we'll need to fetch the "Successfully installed" line, strip the "Successfully" part, then split
# by whitespace and strip to get all installed packages.
#
# We then need to iterate over all known plugins and see if either the package name or the package name plus
# version number matches one of our installed packages. If it does, that's our installed plugin.
#
# Known issue: This might return the wrong plugin if more than one plugin was installed through this
# command (e.g. due to pulling in another plugin as dependency). It should be safe for now though to
# consider this a rare corner case. Once it becomes a real problem we'll just extend the plugin manager
# so that it can report on more than one installed plugin.
result_line = result_line.strip()
if not result_line.startswith(success_string):
self._logger.error("Installing the plugin from {} failed, pip did not report successful installation"
.format(source))
result = dict(result=False,
source=source,
source_type=source_type,
reason="Pip did not report successful installation")
self._send_result_notification("install", result)
return jsonify(result)
installed = map(lambda x: x.strip(), result_line[len(success_string):].split(" "))
all_plugins_after = self._plugin_manager.find_plugins(existing=dict(), ignore_uninstalled=False)
new_plugin = self._find_installed_plugin(installed, plugins=all_plugins_after)
if new_plugin is None:
self._logger.warn("The plugin was installed successfully, but couldn't be found afterwards to "
"initialize properly during runtime. Please restart OctoPrint.")
result = dict(result=True,
source=source,
source_type=source_type,
needs_restart=True,
needs_refresh=True,
needs_reconnect=True,
was_reinstalled=False,
plugin="unknown")
self._send_result_notification("install", result)
return jsonify(result)
self._plugin_manager.reload_plugins()
needs_restart = self._plugin_manager.is_restart_needing_plugin(new_plugin) \
or new_plugin.key in all_plugins_before \
or reinstall is not None
needs_refresh = new_plugin.implementation \
and isinstance(new_plugin.implementation, octoprint.plugin.ReloadNeedingPlugin)
needs_reconnect = self._plugin_manager.has_any_of_hooks(new_plugin, self._reconnect_hooks) and self._printer.is_operational()
is_reinstall = self._plugin_manager.is_plugin_marked(new_plugin.key, "uninstalled")
self._plugin_manager.mark_plugin(new_plugin.key,
uninstalled=False,
installed=not is_reinstall and needs_restart)
self._plugin_manager.log_all_plugins()
self._logger.info("The plugin was installed successfully: {}, version {}".format(new_plugin.name, new_plugin.version))
result = dict(result=True,
source=source,
source_type=source_type,
needs_restart=needs_restart,
needs_refresh=needs_refresh,
needs_reconnect=needs_reconnect,
was_reinstalled=new_plugin.key in all_plugins_before or reinstall is not None,
plugin=self._to_external_plugin(new_plugin))
self._send_result_notification("install", result)
return jsonify(result)
def command_uninstall(self, plugin):
if plugin.key == "pluginmanager":
return make_response("Can't uninstall Plugin Manager", 403)
if not plugin.managable:
return make_response("Plugin is not managable and hence cannot be uninstalled", 403)
if plugin.bundled:
return make_response("Bundled plugins cannot be uninstalled", 403)
if plugin.origin is None:
self._logger.warn(u"Trying to uninstall plugin {plugin} but origin is unknown".format(**locals()))
return make_response("Could not uninstall plugin, its origin is unknown")
if plugin.origin.type == "entry_point":
# plugin is installed through entry point, need to use pip to uninstall it
origin = plugin.origin[3]
if origin is None:
origin = plugin.origin[2]
pip_args = ["uninstall", "--yes", origin]
try:
self._call_pip(pip_args)
except:
self._logger.exception(u"Could not uninstall plugin via pip")
return make_response("Could not uninstall plugin via pip, see the log for more details", 500)
elif plugin.origin.type == "folder":
import os
import shutil
full_path = os.path.realpath(plugin.location)
if os.path.isdir(full_path):
# plugin is installed via a plugin folder, need to use rmtree to get rid of it
self._log_stdout(u"Deleting plugin from {folder}".format(folder=plugin.location))
shutil.rmtree(full_path)
elif os.path.isfile(full_path):
self._log_stdout(u"Deleting plugin from {file}".format(file=plugin.location))
os.remove(full_path)
if full_path.endswith(".py"):
pyc_file = "{full_path}c".format(**locals())
if os.path.isfile(pyc_file):
os.remove(pyc_file)
else:
self._logger.warn(u"Trying to uninstall plugin {plugin} but origin is unknown ({plugin.origin.type})".format(**locals()))
return make_response("Could not uninstall plugin, its origin is unknown")
needs_restart = self._plugin_manager.is_restart_needing_plugin(plugin)
needs_refresh = plugin.implementation and isinstance(plugin.implementation, octoprint.plugin.ReloadNeedingPlugin)
needs_reconnect = self._plugin_manager.has_any_of_hooks(plugin, self._reconnect_hooks) and self._printer.is_operational()
was_pending_install = self._plugin_manager.is_plugin_marked(plugin.key, "installed")
self._plugin_manager.mark_plugin(plugin.key,
uninstalled=not was_pending_install and needs_restart,
installed=False)
if not needs_restart:
try:
self._plugin_manager.disable_plugin(plugin.key, plugin=plugin)
except octoprint.plugin.core.PluginLifecycleException as e:
self._logger.exception(u"Problem disabling plugin {name}".format(name=plugin.key))
result = dict(result=False, uninstalled=True, disabled=False, unloaded=False, reason=e.reason)
self._send_result_notification("uninstall", result)
return jsonify(result)
try:
self._plugin_manager.unload_plugin(plugin.key)
except octoprint.plugin.core.PluginLifecycleException as e:
self._logger.exception(u"Problem unloading plugin {name}".format(name=plugin.key))
result = dict(result=False, uninstalled=True, disabled=True, unloaded=False, reason=e.reason)
self._send_result_notification("uninstall", result)
return jsonify(result)
self._plugin_manager.reload_plugins()
result = dict(result=True,
needs_restart=needs_restart,
needs_refresh=needs_refresh,
needs_reconnect=needs_reconnect,
plugin=self._to_external_plugin(plugin))
self._send_result_notification("uninstall", result)
return jsonify(result)
def command_toggle(self, plugin, command):
if plugin.key == "pluginmanager":
return make_response("Can't enable/disable Plugin Manager", 400)
needs_restart = self._plugin_manager.is_restart_needing_plugin(plugin)
needs_refresh = plugin.implementation and isinstance(plugin.implementation, octoprint.plugin.ReloadNeedingPlugin)
needs_reconnect = self._plugin_manager.has_any_of_hooks(plugin, self._reconnect_hooks) and self._printer.is_operational()
pending = ((command == "disable" and plugin.key in self._pending_enable) or (command == "enable" and plugin.key in self._pending_disable))
safe_mode_victim = getattr(plugin, "safe_mode_victim", False)
needs_restart_api = (needs_restart or safe_mode_victim) and not pending
needs_refresh_api = needs_refresh and not pending
needs_reconnect_api = needs_reconnect and not pending
try:
if command == "disable":
self._mark_plugin_disabled(plugin, needs_restart=needs_restart)
elif command == "enable":
self._mark_plugin_enabled(plugin, needs_restart=needs_restart)
except octoprint.plugin.core.PluginLifecycleException as e:
self._logger.exception(u"Problem toggling enabled state of {name}: {reason}".format(name=plugin.key, reason=e.reason))
result = dict(result=False, reason=e.reason)
except octoprint.plugin.core.PluginNeedsRestart:
result = dict(result=True,
needs_restart=True,
needs_refresh=True,
needs_reconnect=True,
plugin=self._to_external_plugin(plugin))
else:
result = dict(result=True,
needs_restart=needs_restart_api,
needs_refresh=needs_refresh_api,
needs_reconnect=needs_reconnect_api,
plugin=self._to_external_plugin(plugin))
self._send_result_notification(command, result)
return jsonify(result)
def _find_installed_plugin(self, packages, plugins=None):
if plugins is None:
plugins = self._plugin_manager.find_plugins(existing=dict(), ignore_uninstalled=False)
for key, plugin in plugins.items():
if plugin.origin is None or plugin.origin.type != "entry_point":
continue
package_name = plugin.origin.package_name
package_version = plugin.origin.package_version
versioned_package = "{package_name}-{package_version}".format(**locals())
if package_name in packages or versioned_package in packages:
# exact match, we are done here
return plugin
else:
# it might still be a version that got stripped by python's package resources, e.g. 1.4.5a0 => 1.4.5a
found = False
for inst in packages:
if inst.startswith(versioned_package):
found = True
break
if found:
return plugin
return None
def _send_result_notification(self, action, result):
notification = dict(type="result", action=action)
notification.update(result)
self._plugin_manager.send_plugin_message(self._identifier, notification)
def _call_pip(self, args):
if self._pip_caller is None or not self._pip_caller.available:
raise RuntimeError(u"No pip available, can't operate".format(**locals()))
if "--process-dependency-links" in args:
self._log_message(u"Installation needs to process external dependencies, that might make it take a bit longer than usual depending on the pip version")
additional_args = self._settings.get(["pip_args"])
if additional_args is not None:
inapplicable_arguments = self.__class__.PIP_INAPPLICABLE_ARGUMENTS.get(args[0], list())
for inapplicable_argument in inapplicable_arguments:
additional_args = re.sub("(^|\s)" + re.escape(inapplicable_argument) + "\\b", "", additional_args)
if additional_args:
args.append(additional_args)
return self._pip_caller.execute(*args)
def _log_message(self, *lines):
self._log(lines, prefix=u"*", stream="message")
def _log_call(self, *lines):
self._log(lines, prefix=u" ", stream="call")
def _log_stdout(self, *lines):
self._log(lines, prefix=u">", stream="stdout")
def _log_stderr(self, *lines):
self._log(lines, prefix=u"!", stream="stderr")
def _log(self, lines, prefix=None, stream=None, strip=True):
if strip:
lines = map(lambda x: x.strip(), lines)
self._plugin_manager.send_plugin_message(self._identifier, dict(type="loglines", loglines=[dict(line=line, stream=stream) for line in lines]))
for line in lines:
self._console_logger.debug(u"{prefix} {line}".format(**locals()))
def _mark_plugin_enabled(self, plugin, needs_restart=False):
disabled_list = list(self._settings.global_get(["plugins", "_disabled"]))
if plugin.key in disabled_list:
disabled_list.remove(plugin.key)
self._settings.global_set(["plugins", "_disabled"], disabled_list)
self._settings.save(force=True)
if not needs_restart and not getattr(plugin, "safe_mode_victim", False):
self._plugin_manager.enable_plugin(plugin.key)
else:
if plugin.key in self._pending_disable:
self._pending_disable.remove(plugin.key)
elif (not plugin.enabled and not getattr(plugin, "safe_mode_enabled", False)) and plugin.key not in self._pending_enable:
self._pending_enable.add(plugin.key)
def _mark_plugin_disabled(self, plugin, needs_restart=False):
disabled_list = list(self._settings.global_get(["plugins", "_disabled"]))
if not plugin.key in disabled_list:
disabled_list.append(plugin.key)
self._settings.global_set(["plugins", "_disabled"], disabled_list)
self._settings.save(force=True)
if not needs_restart and not getattr(plugin, "safe_mode_victim", False):
self._plugin_manager.disable_plugin(plugin.key)
else:
if plugin.key in self._pending_enable:
self._pending_enable.remove(plugin.key)
elif (plugin.enabled or getattr(plugin, "safe_mode_enabled", False)) and plugin.key not in self._pending_disable:
self._pending_disable.add(plugin.key)
def _fetch_all_data(self, async=False):
def run():
self._repository_available = self._fetch_repository_from_disk()
self._notices_available = self._fetch_notices_from_disk()
if async:
thread = threading.Thread(target=run)
thread.daemon = True
thread.start()
else:
run()
def _fetch_repository_from_disk(self):
repo_data = None
if os.path.isfile(self._repository_cache_path):
import time
mtime = os.path.getmtime(self._repository_cache_path)
if mtime + self._repository_cache_ttl >= time.time() > mtime:
try:
import json
with open(self._repository_cache_path) as f:
repo_data = json.load(f)
self._logger.info("Loaded plugin repository data from disk, was still valid")
except:
self._logger.exception("Error while loading repository data from {}".format(self._repository_cache_path))
return self._refresh_repository(repo_data=repo_data)
def _fetch_repository_from_url(self):
if not self._connectivity_checker.online:
self._logger.info("Looks like we are offline, can't fetch repository from network")
return None
repository_url = self._settings.get(["repository"])
try:
r = requests.get(repository_url, timeout=30)
r.raise_for_status()
self._logger.info("Loaded plugin repository data from {}".format(repository_url))
except Exception as e:
self._logger.exception("Could not fetch plugins from repository at {repository_url}: {message}".format(repository_url=repository_url, message=str(e)))
return None
repo_data = r.json()
try:
import json
with octoprint.util.atomic_write(self._repository_cache_path, "wb") as f:
json.dump(repo_data, f)
except Exception as e:
self._logger.exception("Error while saving repository data to {}: {}".format(self._repository_cache_path, str(e)))
return repo_data
def _refresh_repository(self, repo_data=None):
if repo_data is None:
repo_data = self._fetch_repository_from_url()
if repo_data is None:
return False
current_os = self._get_os()
octoprint_version = self._get_octoprint_version(base=True)
def map_repository_entry(entry):
result = copy.deepcopy(entry)
if not "follow_dependency_links" in result:
result["follow_dependency_links"] = False
result["is_compatible"] = dict(
octoprint=True,
os=True
)
if "compatibility" in entry:
if "octoprint" in entry["compatibility"] and entry["compatibility"]["octoprint"] is not None and isinstance(entry["compatibility"]["octoprint"], (list, tuple)) and len(entry["compatibility"]["octoprint"]):
result["is_compatible"]["octoprint"] = self._is_octoprint_compatible(octoprint_version, entry["compatibility"]["octoprint"])
if "os" in entry["compatibility"] and entry["compatibility"]["os"] is not None and isinstance(entry["compatibility"]["os"], (list, tuple)) and len(entry["compatibility"]["os"]):
result["is_compatible"]["os"] = self._is_os_compatible(current_os, entry["compatibility"]["os"])
return result
self._repository_plugins = map(map_repository_entry, repo_data)
return True
def _fetch_notices_from_disk(self):
notice_data = None
if os.path.isfile(self._notices_cache_path):
import time
mtime = os.path.getmtime(self._notices_cache_path)
if mtime + self._notices_cache_ttl >= time.time() > mtime:
try:
import json
with open(self._notices_cache_path) as f:
notice_data = json.load(f)
self._logger.info("Loaded notice data from disk, was still valid")
except:
self._logger.exception("Error while loading notices from {}".format(self._notices_cache_path))
return self._refresh_notices(notice_data=notice_data)
def _fetch_notices_from_url(self):
if not self._connectivity_checker.online:
self._logger.info("Looks like we are offline, can't fetch notices from network")
return None
notices_url = self._settings.get(["notices"])
try:
r = requests.get(notices_url, timeout=30)
r.raise_for_status()
self._logger.info("Loaded plugin notices data from {}".format(notices_url))
except Exception as e:
self._logger.exception("Could not fetch notices from {notices_url}: {message}".format(notices_url=notices_url, message=str(e)))
return None
notice_data = r.json()
try:
import json
with octoprint.util.atomic_write(self._notices_cache_path, "wb") as f:
json.dump(notice_data, f)
except Exception as e:
self._logger.exception("Error while saving notices to {}: {}".format(self._notices_cache_path, str(e)))
return notice_data
def _refresh_notices(self, notice_data=None):
if notice_data is None:
notice_data = self._fetch_notices_from_url()
if notice_data is None:
return False
notices = dict()
for notice in notice_data:
if not "plugin" in notice or not "text" in notice or not "date" in notice:
continue
key = notice["plugin"]
try:
parsed_date = dateutil.parser.parse(notice["date"])
notice["timestamp"] = parsed_date.timetuple()
except Exception as e:
self._logger.warn("Error while parsing date {!r} for plugin notice "
"of plugin {}, ignoring notice: {}".format(notice["date"], key, str(e)))
continue
if not key in notices:
notices[key] = []
notices[key].append(notice)
self._notices = notices
return True
def _is_octoprint_compatible(self, octoprint_version, compatibility_entries):
"""
Tests if the current ``octoprint_version`` is compatible to any of the provided ``compatibility_entries``.
"""
for octo_compat in compatibility_entries:
try:
if not any(octo_compat.startswith(c) for c in ("<", "<=", "!=", "==", ">=", ">", "~=", "===")):
octo_compat = ">={}".format(octo_compat)
s = next(pkg_resources.parse_requirements("OctoPrint" + octo_compat))
if octoprint_version in s:
break
except:
self._logger.exception("Something is wrong with this compatibility string for OctoPrint: {}".format(octo_compat))
else:
return False
return True
@staticmethod
def _is_os_compatible(current_os, compatibility_entries):
"""
Tests if the ``current_os`` or ``sys.platform`` are blacklisted or whitelisted in ``compatibility_entries``
"""
if len(compatibility_entries) == 0:
# shortcut - no compatibility info means we are compatible
return True
negative_entries = map(lambda x: x[1:], filter(lambda x: x.startswith("!"), compatibility_entries))
positive_entries = filter(lambda x: not x.startswith("!"), compatibility_entries)
negative_match = False
if negative_entries:
# check if we are blacklisted
negative_match = current_os in negative_entries or any(map(lambda x: sys.platform.startswith(x), negative_entries))
positive_match = True
if positive_entries:
# check if we are whitelisted
positive_match = current_os in positive_entries or any(map(lambda x: sys.platform.startswith(x), positive_entries))
return positive_match and not negative_match
@classmethod
def _get_os(cls):
for identifier, platforms in cls.OPERATING_SYSTEMS.items():
if (callable(platforms) and platforms(sys.platform)) or (isinstance(platforms, list) and sys.platform in platforms):
return identifier
else:
return "unmapped"
def _get_octoprint_version_string(self):
return VERSION
def _get_octoprint_version(self, base=False):
octoprint_version_string = self._get_octoprint_version_string()
if "-" in octoprint_version_string:
octoprint_version_string = octoprint_version_string[:octoprint_version_string.find("-")]
octoprint_version = pkg_resources.parse_version(octoprint_version_string)
# A leading v is common in github release tags and old setuptools doesn't remove it. While OctoPrint's
# versions should never contains such a prefix, we'll make sure to have stuff behave the same
# regardless of setuptools version anyhow.
if octoprint_version and isinstance(octoprint_version, tuple) and octoprint_version[0].lower() == "*v":
octoprint_version = octoprint_version[1:]
if base:
if isinstance(octoprint_version, tuple):
# old setuptools
base_version = []
for part in octoprint_version:
if part.startswith("*"):
break
base_version.append(part)
base_version.append("*final")
octoprint_version = tuple(base_version)
else:
# new setuptools
octoprint_version = pkg_resources.parse_version(octoprint_version.base_version)
return octoprint_version
@property
def _reconnect_hooks(self):
reconnect_hooks = self.__class__.RECONNECT_HOOKS
reconnect_hook_provider_hooks = self._plugin_manager.get_hooks("octoprint.plugin.pluginmanager.reconnect_hooks")
for name, hook in reconnect_hook_provider_hooks.items():
try:
result = hook()
if isinstance(result, (list, tuple)):
reconnect_hooks.extend(filter(lambda x: isinstance(x, basestring), result))
except:
self._logger.exception("Error while retrieving additional hooks for which a "
"reconnect is required from plugin {name}".format(**locals()))
return reconnect_hooks
def _get_plugins(self):
plugins = self._plugin_manager.plugins
hidden = self._settings.get(["hidden"])
result = []
for key, plugin in plugins.items():
if key in hidden:
continue
result.append(self._to_external_plugin(plugin))
return result
def _to_external_plugin(self, plugin):
return dict(
key=plugin.key,
name=plugin.name,
description=plugin.description,
disabling_discouraged=gettext(plugin.disabling_discouraged) if plugin.disabling_discouraged else False,
author=plugin.author,
version=plugin.version,
url=plugin.url,
license=plugin.license,
bundled=plugin.bundled,
managable=plugin.managable,
enabled=plugin.enabled,
safe_mode_victim=getattr(plugin, "safe_mode_victim", False),
safe_mode_enabled=getattr(plugin, "safe_mode_enabled", False),
pending_enable=(not plugin.enabled and not getattr(plugin, "safe_mode_enabled", False) and plugin.key in self._pending_enable),
pending_disable=((plugin.enabled or getattr(plugin, "safe_mode_enabled", False)) and plugin.key in self._pending_disable),
pending_install=(self._plugin_manager.is_plugin_marked(plugin.key, "installed")),
pending_uninstall=(self._plugin_manager.is_plugin_marked(plugin.key, "uninstalled")),
origin=plugin.origin.type,
notifications = self._get_notifications(plugin)
)
def _get_notifications(self, plugin):
key = plugin.key
if not plugin.enabled:
return
if key not in self._notices:
return
octoprint_version = self._get_octoprint_version(base=True)
plugin_notifications = self._notices.get(key, [])
def filter_relevant(notification):
return "text" in notification and "date" in notification and \
("versions" not in notification or plugin.version in notification["versions"]) and \
("octoversions" not in notification or self._is_octoprint_compatible(octoprint_version, notification["octoversions"]))
def map_notification(notification):
return self._to_external_notification(key, notification)
return filter(lambda x: x is not None,
map(map_notification,
filter(filter_relevant,
plugin_notifications)))
def _to_external_notification(self, key, notification):
return dict(key=key,
date=time.mktime(notification["timestamp"]),
text=notification["text"],
link=notification.get("link"),
versions=notification.get("versions", []),
important=notification.get("important", False))
__plugin_name__ = "Plugin Manager"
__plugin_author__ = "Gina Häußge"
__plugin_url__ = "http://docs.octoprint.org/en/master/bundledplugins/pluginmanager.html"
__plugin_description__ = "Allows installing and managing OctoPrint plugins"
__plugin_license__ = "AGPLv3"
def __plugin_load__():
global __plugin_implementation__
__plugin_implementation__ = PluginManagerPlugin()
global __plugin_hooks__
__plugin_hooks__ = {
"octoprint.server.http.bodysize": __plugin_implementation__.increase_upload_bodysize,
"octoprint.ui.web.templatetypes": __plugin_implementation__.get_template_types
}
|
agpl-3.0
| -7,321,240,752,470,460,000
| 38.024486
| 209
| 0.68723
| false
| 3.528828
| false
| false
| false
|
opendatakosovo/data-centar
|
import-budzet.py
|
1
|
4132
|
import argparse
from importer.rashodi_manager import RashodiDataImporter
from importer.prihodi_manager import PrihodiDataImporter
rashodi_importer = RashodiDataImporter()
prihodi_importer = PrihodiDataImporter()
def main_importer(data, municipalities):
mun_list = municipalities.split(",")
data_source = data.split(",")
for mun in mun_list:
if mun in ["all", "prijepolje"]:
for data in data_source:
if data == "prihodi":
prihodi_importer.data_importer_of_municipality_prijepolje("prijepolje", "prihodi")
elif data == "rashodi":
rashodi_importer.data_importer_of_municipality_prijepolje()
if mun in ["all", "vranje"]:
for data in data_source:
if data == "prihodi":
prihodi_importer.data_importer_of_municipality_vranje("vranje", "prihodi")
elif data == "rashodi":
rashodi_importer.data_importer_of_municipality_vranje()
if mun in ["all", "loznica"]:
for data in data_source:
if data == "prihodi":
prihodi_importer.data_importer_of_municipality_loznica("loznitsa", "prihodi")
elif data == "rashodi":
rashodi_importer.data_importer_of_municipality_loznica()
if mun in ["all", "sombor"]:
for data in data_source:
if data == "prihodi":
prihodi_importer.data_importer_of_municipality_sombor("sombor", "prihodi")
elif data == "rashodi":
rashodi_importer.data_importer_of_municipality_sombor()
if mun in ["all", "valjevo"]:
for data in data_source:
if data == "prihodi":
prihodi_importer.data_importer_of_municipality_valjevo("valjevo", "prihodi")
elif data == "rashodi":
rashodi_importer.data_importer_of_municipality_valjevo()
if mun in ["all", "indjija"]:
for data in data_source:
if data == "prihodi":
prihodi_importer.data_importer_of_municipality_indjija("indjija", "prihodi")
elif data == "rashodi":
rashodi_importer.data_importer_of_municipality_indjija()
if mun in ["all", "cacak"]:
for data in data_source:
if data == "prihodi":
prihodi_importer.data_importer_of_municipality_cacak("chachak", "prihodi")
elif data == "rashodi":
rashodi_importer.data_importer_of_municipality_cacak()
if mun in ["all", "kraljevo"]:
for data in data_source:
if data == "prihodi":
prihodi_importer.data_importer_of_municipality_krajlevo("kraljevo", "prihodi")
elif data == "rashodi":
rashodi_importer.data_importer_of_municipality_krajlevo()
if mun in ["all", "zvezdara"]:
for data in data_source:
if data == "prihodi":
prihodi_importer.data_importer_of_municipality_zvezdara("zvezdara", "prihodi")
elif data == "rashodi":
rashodi_importer.data_importer_of_municipality_zvezdara()
if mun in ["all", "novi_beograd"]:
for data in data_source:
if data == "prihodi":
prihodi_importer.data_importer_of_municipality_novi_beograd("novi-beograd", "prihodi")
elif data == "rashodi":
rashodi_importer.data_importer_of_municipality_novi_beograd()
if __name__ == '__main__':
# Initialize arguments
parser = argparse.ArgumentParser()
parser.add_argument("--municipalities", help="The data source we want to import for municipality")
parser.add_argument("--data", help="The data source we want to import")
args = parser.parse_args()
# Read the arguments and run the function
municipalities_sr = args.municipalities
data_sr = args.data
main_importer(data_sr, municipalities_sr)
|
gpl-2.0
| 7,470,330,547,350,416,000
| 42.505263
| 106
| 0.571152
| false
| 3.440466
| false
| false
| false
|
IdeaSolutionsOnline/ERP4R
|
core/objs/linha_entrega.py
|
1
|
4855
|
# !/usr/bin/env python3
# -*- encoding: utf-8 -*-
"""
ERP+
"""
__author__ = 'António Anacleto'
__credits__ = []
__version__ = "1.0"
__maintainer__ = "António Anacleto"
__status__ = "Development"
__model_name__ = 'linha_entrega.LinhaEntrega'
import auth, base_models
from orm import *
from form import *
try:
from my_produto import Produto
except:
from produto import Produto
try:
from my_unidade import Unidade
except:
from unidade import Unidade
class LinhaEntrega(Model, View):
def __init__(self, **kargs):
Model.__init__(self, **kargs)
self.__name__ = 'linha_entrega'
self.__title__ = 'Linhas de Entrega'
self.__model_name__ = __model_name__
self.__list_edit_mode__ = 'inline'
self.__get_options__ = ['produto']
self.entrega = parent_field(view_order=1, name='Entrega', args='style:visibility="hidden"', model_name='entrega.Entrega', nolabel=True, onlist=False, column='numero')
self.ean = string_field(view_order=2, name='EAN', size=45, onchange='ean_onchange')
self.produto = choice_field(view_order=3, name='Produto', args='required tabIndex="-1"', size=60, onchange='produto_onchange', model='produto', column='nome', options="model.get_opts('Produto', '_sellable()')")
self.quantidade = decimal_field(view_order=4, name='Quantidade', size=20, sum=True, onchange='valores_onchange', default=to_decimal(1))
self.unidade = combo_field(view_order=5, name='Unidade', args='required tabIndex="-1"', size=40, onchange='produto_onchange', model='unidade', column='nome', options="model.get_opts('Unidade','()')")
self.valor_unitario = currency_field(view_order=6, name='Valor Unitário', args='tabIndex="-1"', size=20, sum=True, onchange='valores_onchange', default=to_decimal(1))
self.desconto = percent_field(view_order=7, name='Desconto', args='tabIndex="-1"', size=20, onchange='valores_onchange')
self.iva = percent_field(view_order=8, name='IVA', args='readonly="readonly" tabIndex="-1"', size=20, nolabel=True, search=False)
self.valor_total = currency_field(view_order=9, name='Valor Total', args='readonly="readonly" tabIndex="-1"', size=20, sum=True, default=to_decimal(1))
def get_opts(self, model, tipo):
return eval(model + '().get_options' + tipo)
def ean_onchange(self, record):
result = record.copy()
product = Produto(where='referencia = {ean}'.format(ean=record['ean'])).get()
if len(product) != 0:
product = product[0]
for key in ['quantidade', 'valor_unitario', 'valor_total']:
result[key] = to_decimal(result[key])
if result[key] <= to_decimal(0):
result[key] = to_decimal(1)
unidade = record['unidade']
if not record['unidade']:
unidade = product['unidade_medida_venda']
terminal = get_terminal(bottle.request.session['terminal'])
result['valor_unitario'] = to_decimal(Produto().get_sale_price(product['id'], terminal, result['quantidade'], unidade))
result['valor_total'] = to_decimal(result['quantidade']) * to_decimal(result['valor_unitario'])
result['iva'] = to_decimal(product['iva'])
result['unidade'] = unidade
result['produto'] = product['id']
else:
result = {}
return result
def valores_onchange(self, record):
result = record.copy()
for key in ['quantidade', 'valor_unitario', 'valor_total']:
result[key] = to_decimal(result[key])
if result[key] <= to_decimal(0):
result[key] = to_decimal(1)
result['valor_total'] = to_decimal(result['quantidade']) * to_decimal(result['valor_unitario'])
return result
def produto_onchange(self, record):
result = record.copy()
product = Produto().get(key=record['produto'])
if len(product) != 0:
product = product[0]
for key in ['quantidade', 'valor_unitario', 'valor_total']:
result[key]= to_decimal(result[key])
if result[key] <= to_decimal(0):
result[key] = to_decimal(1)
unidade = record['unidade']
if not record['unidade']:
unidade = product['unidade_medida_venda']
terminal = get_terminal(bottle.request.session['terminal'])
result['valor_unitario'] = to_decimal(Produto().get_sale_price(product['id'], terminal, result['quantidade'], unidade))
result['valor_total'] = result['quantidade'] * result['valor_unitario']
result['iva'] = to_decimal(product['iva'])
result['ean'] = product['referencia']
result['unidade'] = unidade
else:
result={}
return result
|
mit
| -3,195,471,058,506,067,000
| 48.510204
| 218
| 0.599959
| false
| 3.364771
| false
| false
| false
|
endlessm/chromium-browser
|
third_party/chromite/utils/attrs_freezer_unittest.py
|
1
|
2645
|
# -*- coding: utf-8 -*-
# Copyright 2019 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Test the attrs_freezer module."""
from __future__ import print_function
import six
from chromite.lib import cros_test_lib
from chromite.utils import attrs_freezer
class FrozenAttributesTest(cros_test_lib.TestCase):
"""Tests FrozenAttributesMixin functionality."""
class DummyClass(object):
"""Any class that does not override __setattr__."""
class SetattrClass(object):
"""Class that does override __setattr__."""
SETATTR_OFFSET = 10
def __setattr__(self, attr, value):
"""Adjust value here to later confirm that this code ran."""
object.__setattr__(self, attr, self.SETATTR_OFFSET + value)
def _TestBasics(self, cls):
# pylint: disable=attribute-defined-outside-init
def _Expected(val):
return getattr(cls, 'SETATTR_OFFSET', 0) + val
obj = cls()
obj.a = 1
obj.b = 2
self.assertEqual(_Expected(1), obj.a)
self.assertEqual(_Expected(2), obj.b)
obj.Freeze()
self.assertRaises(attrs_freezer.Error, setattr, obj, 'a', 3)
self.assertEqual(_Expected(1), obj.a)
self.assertRaises(attrs_freezer.Error, setattr, obj, 'c', 3)
self.assertFalse(hasattr(obj, 'c'))
def testFrozenByMetaclass(self):
"""Test attribute freezing with FrozenAttributesClass."""
@six.add_metaclass(attrs_freezer.Class)
class DummyByMeta(self.DummyClass):
"""Class that freezes DummyClass using metaclass construct."""
self._TestBasics(DummyByMeta)
@six.add_metaclass(attrs_freezer.Class)
class SetattrByMeta(self.SetattrClass):
"""Class that freezes SetattrClass using metaclass construct."""
self._TestBasics(SetattrByMeta)
def testFrozenByMixinFirst(self):
"""Test attribute freezing with Mixin first in hierarchy."""
class Dummy(attrs_freezer.Mixin, self.DummyClass):
"""Class that freezes DummyClass using mixin construct."""
self._TestBasics(Dummy)
class Setattr(attrs_freezer.Mixin, self.SetattrClass):
"""Class that freezes SetattrClass using mixin construct."""
self._TestBasics(Setattr)
def testFrozenByMixinLast(self):
"""Test attribute freezing with Mixin last in hierarchy."""
class Dummy(self.DummyClass, attrs_freezer.Mixin):
"""Class that freezes DummyClass using mixin construct."""
self._TestBasics(Dummy)
class Setattr(self.SetattrClass, attrs_freezer.Mixin):
"""Class that freezes SetattrClass using mixin construct."""
self._TestBasics(Setattr)
|
bsd-3-clause
| -4,560,031,592,534,890,000
| 30.86747
| 72
| 0.699811
| false
| 3.778571
| true
| false
| false
|
ArcherSys/ArcherSys
|
Lib/lzma.py
|
1
|
58253
|
<<<<<<< HEAD
<<<<<<< HEAD
"""Interface to the liblzma compression library.
This module provides a class for reading and writing compressed files,
classes for incremental (de)compression, and convenience functions for
one-shot (de)compression.
These classes and functions support both the XZ and legacy LZMA
container formats, as well as raw compressed data streams.
"""
__all__ = [
"CHECK_NONE", "CHECK_CRC32", "CHECK_CRC64", "CHECK_SHA256",
"CHECK_ID_MAX", "CHECK_UNKNOWN",
"FILTER_LZMA1", "FILTER_LZMA2", "FILTER_DELTA", "FILTER_X86", "FILTER_IA64",
"FILTER_ARM", "FILTER_ARMTHUMB", "FILTER_POWERPC", "FILTER_SPARC",
"FORMAT_AUTO", "FORMAT_XZ", "FORMAT_ALONE", "FORMAT_RAW",
"MF_HC3", "MF_HC4", "MF_BT2", "MF_BT3", "MF_BT4",
"MODE_FAST", "MODE_NORMAL", "PRESET_DEFAULT", "PRESET_EXTREME",
"LZMACompressor", "LZMADecompressor", "LZMAFile", "LZMAError",
"open", "compress", "decompress", "is_check_supported",
]
import builtins
import io
from _lzma import *
from _lzma import _encode_filter_properties, _decode_filter_properties
_MODE_CLOSED = 0
_MODE_READ = 1
_MODE_READ_EOF = 2
_MODE_WRITE = 3
_BUFFER_SIZE = 8192
class LZMAFile(io.BufferedIOBase):
"""A file object providing transparent LZMA (de)compression.
An LZMAFile can act as a wrapper for an existing file object, or
refer directly to a named file on disk.
Note that LZMAFile provides a *binary* file interface - data read
is returned as bytes, and data to be written must be given as bytes.
"""
def __init__(self, filename=None, mode="r", *,
format=None, check=-1, preset=None, filters=None):
"""Open an LZMA-compressed file in binary mode.
filename can be either an actual file name (given as a str or
bytes object), in which case the named file is opened, or it can
be an existing file object to read from or write to.
mode can be "r" for reading (default), "w" for (over)writing,
"x" for creating exclusively, or "a" for appending. These can
equivalently be given as "rb", "wb", "xb" and "ab" respectively.
format specifies the container format to use for the file.
If mode is "r", this defaults to FORMAT_AUTO. Otherwise, the
default is FORMAT_XZ.
check specifies the integrity check to use. This argument can
only be used when opening a file for writing. For FORMAT_XZ,
the default is CHECK_CRC64. FORMAT_ALONE and FORMAT_RAW do not
support integrity checks - for these formats, check must be
omitted, or be CHECK_NONE.
When opening a file for reading, the *preset* argument is not
meaningful, and should be omitted. The *filters* argument should
also be omitted, except when format is FORMAT_RAW (in which case
it is required).
When opening a file for writing, the settings used by the
compressor can be specified either as a preset compression
level (with the *preset* argument), or in detail as a custom
filter chain (with the *filters* argument). For FORMAT_XZ and
FORMAT_ALONE, the default is to use the PRESET_DEFAULT preset
level. For FORMAT_RAW, the caller must always specify a filter
chain; the raw compressor does not support preset compression
levels.
preset (if provided) should be an integer in the range 0-9,
optionally OR-ed with the constant PRESET_EXTREME.
filters (if provided) should be a sequence of dicts. Each dict
should have an entry for "id" indicating ID of the filter, plus
additional entries for options to the filter.
"""
self._fp = None
self._closefp = False
self._mode = _MODE_CLOSED
self._pos = 0
self._size = -1
if mode in ("r", "rb"):
if check != -1:
raise ValueError("Cannot specify an integrity check "
"when opening a file for reading")
if preset is not None:
raise ValueError("Cannot specify a preset compression "
"level when opening a file for reading")
if format is None:
format = FORMAT_AUTO
mode_code = _MODE_READ
# Save the args to pass to the LZMADecompressor initializer.
# If the file contains multiple compressed streams, each
# stream will need a separate decompressor object.
self._init_args = {"format":format, "filters":filters}
self._decompressor = LZMADecompressor(**self._init_args)
self._buffer = b""
self._buffer_offset = 0
elif mode in ("w", "wb", "a", "ab", "x", "xb"):
if format is None:
format = FORMAT_XZ
mode_code = _MODE_WRITE
self._compressor = LZMACompressor(format=format, check=check,
preset=preset, filters=filters)
else:
raise ValueError("Invalid mode: {!r}".format(mode))
if isinstance(filename, (str, bytes)):
if "b" not in mode:
mode += "b"
self._fp = builtins.open(filename, mode)
self._closefp = True
self._mode = mode_code
elif hasattr(filename, "read") or hasattr(filename, "write"):
self._fp = filename
self._mode = mode_code
else:
raise TypeError("filename must be a str or bytes object, or a file")
def close(self):
"""Flush and close the file.
May be called more than once without error. Once the file is
closed, any other operation on it will raise a ValueError.
"""
if self._mode == _MODE_CLOSED:
return
try:
if self._mode in (_MODE_READ, _MODE_READ_EOF):
self._decompressor = None
self._buffer = b""
elif self._mode == _MODE_WRITE:
self._fp.write(self._compressor.flush())
self._compressor = None
finally:
try:
if self._closefp:
self._fp.close()
finally:
self._fp = None
self._closefp = False
self._mode = _MODE_CLOSED
@property
def closed(self):
"""True if this file is closed."""
return self._mode == _MODE_CLOSED
def fileno(self):
"""Return the file descriptor for the underlying file."""
self._check_not_closed()
return self._fp.fileno()
def seekable(self):
"""Return whether the file supports seeking."""
return self.readable() and self._fp.seekable()
def readable(self):
"""Return whether the file was opened for reading."""
self._check_not_closed()
return self._mode in (_MODE_READ, _MODE_READ_EOF)
def writable(self):
"""Return whether the file was opened for writing."""
self._check_not_closed()
return self._mode == _MODE_WRITE
# Mode-checking helper functions.
def _check_not_closed(self):
if self.closed:
raise ValueError("I/O operation on closed file")
def _check_can_read(self):
if self._mode not in (_MODE_READ, _MODE_READ_EOF):
self._check_not_closed()
raise io.UnsupportedOperation("File not open for reading")
def _check_can_write(self):
if self._mode != _MODE_WRITE:
self._check_not_closed()
raise io.UnsupportedOperation("File not open for writing")
def _check_can_seek(self):
if self._mode not in (_MODE_READ, _MODE_READ_EOF):
self._check_not_closed()
raise io.UnsupportedOperation("Seeking is only supported "
"on files open for reading")
if not self._fp.seekable():
raise io.UnsupportedOperation("The underlying file object "
"does not support seeking")
# Fill the readahead buffer if it is empty. Returns False on EOF.
def _fill_buffer(self):
if self._mode == _MODE_READ_EOF:
return False
# Depending on the input data, our call to the decompressor may not
# return any data. In this case, try again after reading another block.
while self._buffer_offset == len(self._buffer):
rawblock = (self._decompressor.unused_data or
self._fp.read(_BUFFER_SIZE))
if not rawblock:
if self._decompressor.eof:
self._mode = _MODE_READ_EOF
self._size = self._pos
return False
else:
raise EOFError("Compressed file ended before the "
"end-of-stream marker was reached")
if self._decompressor.eof:
# Continue to next stream.
self._decompressor = LZMADecompressor(**self._init_args)
try:
self._buffer = self._decompressor.decompress(rawblock)
except LZMAError:
# Trailing data isn't a valid compressed stream; ignore it.
self._mode = _MODE_READ_EOF
self._size = self._pos
return False
else:
self._buffer = self._decompressor.decompress(rawblock)
self._buffer_offset = 0
return True
# Read data until EOF.
# If return_data is false, consume the data without returning it.
def _read_all(self, return_data=True):
# The loop assumes that _buffer_offset is 0. Ensure that this is true.
self._buffer = self._buffer[self._buffer_offset:]
self._buffer_offset = 0
blocks = []
while self._fill_buffer():
if return_data:
blocks.append(self._buffer)
self._pos += len(self._buffer)
self._buffer = b""
if return_data:
return b"".join(blocks)
# Read a block of up to n bytes.
# If return_data is false, consume the data without returning it.
def _read_block(self, n, return_data=True):
# If we have enough data buffered, return immediately.
end = self._buffer_offset + n
if end <= len(self._buffer):
data = self._buffer[self._buffer_offset : end]
self._buffer_offset = end
self._pos += len(data)
return data if return_data else None
# The loop assumes that _buffer_offset is 0. Ensure that this is true.
self._buffer = self._buffer[self._buffer_offset:]
self._buffer_offset = 0
blocks = []
while n > 0 and self._fill_buffer():
if n < len(self._buffer):
data = self._buffer[:n]
self._buffer_offset = n
else:
data = self._buffer
self._buffer = b""
if return_data:
blocks.append(data)
self._pos += len(data)
n -= len(data)
if return_data:
return b"".join(blocks)
def peek(self, size=-1):
"""Return buffered data without advancing the file position.
Always returns at least one byte of data, unless at EOF.
The exact number of bytes returned is unspecified.
"""
self._check_can_read()
if not self._fill_buffer():
return b""
return self._buffer[self._buffer_offset:]
def read(self, size=-1):
"""Read up to size uncompressed bytes from the file.
If size is negative or omitted, read until EOF is reached.
Returns b"" if the file is already at EOF.
"""
self._check_can_read()
if size == 0:
return b""
elif size < 0:
return self._read_all()
else:
return self._read_block(size)
def read1(self, size=-1):
"""Read up to size uncompressed bytes, while trying to avoid
making multiple reads from the underlying stream.
Returns b"" if the file is at EOF.
"""
# Usually, read1() calls _fp.read() at most once. However, sometimes
# this does not give enough data for the decompressor to make progress.
# In this case we make multiple reads, to avoid returning b"".
self._check_can_read()
if (size == 0 or
# Only call _fill_buffer() if the buffer is actually empty.
# This gives a significant speedup if *size* is small.
(self._buffer_offset == len(self._buffer) and not self._fill_buffer())):
return b""
if size > 0:
data = self._buffer[self._buffer_offset :
self._buffer_offset + size]
self._buffer_offset += len(data)
else:
data = self._buffer[self._buffer_offset:]
self._buffer = b""
self._buffer_offset = 0
self._pos += len(data)
return data
def readline(self, size=-1):
"""Read a line of uncompressed bytes from the file.
The terminating newline (if present) is retained. If size is
non-negative, no more than size bytes will be read (in which
case the line may be incomplete). Returns b'' if already at EOF.
"""
self._check_can_read()
# Shortcut for the common case - the whole line is in the buffer.
if size < 0:
end = self._buffer.find(b"\n", self._buffer_offset) + 1
if end > 0:
line = self._buffer[self._buffer_offset : end]
self._buffer_offset = end
self._pos += len(line)
return line
return io.BufferedIOBase.readline(self, size)
def write(self, data):
"""Write a bytes object to the file.
Returns the number of uncompressed bytes written, which is
always len(data). Note that due to buffering, the file on disk
may not reflect the data written until close() is called.
"""
self._check_can_write()
compressed = self._compressor.compress(data)
self._fp.write(compressed)
self._pos += len(data)
return len(data)
# Rewind the file to the beginning of the data stream.
def _rewind(self):
self._fp.seek(0, 0)
self._mode = _MODE_READ
self._pos = 0
self._decompressor = LZMADecompressor(**self._init_args)
self._buffer = b""
self._buffer_offset = 0
def seek(self, offset, whence=0):
"""Change the file position.
The new position is specified by offset, relative to the
position indicated by whence. Possible values for whence are:
0: start of stream (default): offset must not be negative
1: current stream position
2: end of stream; offset must not be positive
Returns the new file position.
Note that seeking is emulated, sp depending on the parameters,
this operation may be extremely slow.
"""
self._check_can_seek()
# Recalculate offset as an absolute file position.
if whence == 0:
pass
elif whence == 1:
offset = self._pos + offset
elif whence == 2:
# Seeking relative to EOF - we need to know the file's size.
if self._size < 0:
self._read_all(return_data=False)
offset = self._size + offset
else:
raise ValueError("Invalid value for whence: {}".format(whence))
# Make it so that offset is the number of bytes to skip forward.
if offset < self._pos:
self._rewind()
else:
offset -= self._pos
# Read and discard data until we reach the desired position.
self._read_block(offset, return_data=False)
return self._pos
def tell(self):
"""Return the current file position."""
self._check_not_closed()
return self._pos
def open(filename, mode="rb", *,
format=None, check=-1, preset=None, filters=None,
encoding=None, errors=None, newline=None):
"""Open an LZMA-compressed file in binary or text mode.
filename can be either an actual file name (given as a str or bytes
object), in which case the named file is opened, or it can be an
existing file object to read from or write to.
The mode argument can be "r", "rb" (default), "w", "wb", "x", "xb",
"a", or "ab" for binary mode, or "rt", "wt", "xt", or "at" for text
mode.
The format, check, preset and filters arguments specify the
compression settings, as for LZMACompressor, LZMADecompressor and
LZMAFile.
For binary mode, this function is equivalent to the LZMAFile
constructor: LZMAFile(filename, mode, ...). In this case, the
encoding, errors and newline arguments must not be provided.
For text mode, a LZMAFile object is created, and wrapped in an
io.TextIOWrapper instance with the specified encoding, error
handling behavior, and line ending(s).
"""
if "t" in mode:
if "b" in mode:
raise ValueError("Invalid mode: %r" % (mode,))
else:
if encoding is not None:
raise ValueError("Argument 'encoding' not supported in binary mode")
if errors is not None:
raise ValueError("Argument 'errors' not supported in binary mode")
if newline is not None:
raise ValueError("Argument 'newline' not supported in binary mode")
lz_mode = mode.replace("t", "")
binary_file = LZMAFile(filename, lz_mode, format=format, check=check,
preset=preset, filters=filters)
if "t" in mode:
return io.TextIOWrapper(binary_file, encoding, errors, newline)
else:
return binary_file
def compress(data, format=FORMAT_XZ, check=-1, preset=None, filters=None):
"""Compress a block of data.
Refer to LZMACompressor's docstring for a description of the
optional arguments *format*, *check*, *preset* and *filters*.
For incremental compression, use an LZMACompressor instead.
"""
comp = LZMACompressor(format, check, preset, filters)
return comp.compress(data) + comp.flush()
def decompress(data, format=FORMAT_AUTO, memlimit=None, filters=None):
"""Decompress a block of data.
Refer to LZMADecompressor's docstring for a description of the
optional arguments *format*, *check* and *filters*.
For incremental decompression, use an LZMADecompressor instead.
"""
results = []
while True:
decomp = LZMADecompressor(format, memlimit, filters)
try:
res = decomp.decompress(data)
except LZMAError:
if results:
break # Leftover data is not a valid LZMA/XZ stream; ignore it.
else:
raise # Error on the first iteration; bail out.
results.append(res)
if not decomp.eof:
raise LZMAError("Compressed data ended before the "
"end-of-stream marker was reached")
data = decomp.unused_data
if not data:
break
return b"".join(results)
=======
"""Interface to the liblzma compression library.
This module provides a class for reading and writing compressed files,
classes for incremental (de)compression, and convenience functions for
one-shot (de)compression.
These classes and functions support both the XZ and legacy LZMA
container formats, as well as raw compressed data streams.
"""
__all__ = [
"CHECK_NONE", "CHECK_CRC32", "CHECK_CRC64", "CHECK_SHA256",
"CHECK_ID_MAX", "CHECK_UNKNOWN",
"FILTER_LZMA1", "FILTER_LZMA2", "FILTER_DELTA", "FILTER_X86", "FILTER_IA64",
"FILTER_ARM", "FILTER_ARMTHUMB", "FILTER_POWERPC", "FILTER_SPARC",
"FORMAT_AUTO", "FORMAT_XZ", "FORMAT_ALONE", "FORMAT_RAW",
"MF_HC3", "MF_HC4", "MF_BT2", "MF_BT3", "MF_BT4",
"MODE_FAST", "MODE_NORMAL", "PRESET_DEFAULT", "PRESET_EXTREME",
"LZMACompressor", "LZMADecompressor", "LZMAFile", "LZMAError",
"open", "compress", "decompress", "is_check_supported",
]
import builtins
import io
from _lzma import *
from _lzma import _encode_filter_properties, _decode_filter_properties
_MODE_CLOSED = 0
_MODE_READ = 1
_MODE_READ_EOF = 2
_MODE_WRITE = 3
_BUFFER_SIZE = 8192
class LZMAFile(io.BufferedIOBase):
"""A file object providing transparent LZMA (de)compression.
An LZMAFile can act as a wrapper for an existing file object, or
refer directly to a named file on disk.
Note that LZMAFile provides a *binary* file interface - data read
is returned as bytes, and data to be written must be given as bytes.
"""
def __init__(self, filename=None, mode="r", *,
format=None, check=-1, preset=None, filters=None):
"""Open an LZMA-compressed file in binary mode.
filename can be either an actual file name (given as a str or
bytes object), in which case the named file is opened, or it can
be an existing file object to read from or write to.
mode can be "r" for reading (default), "w" for (over)writing,
"x" for creating exclusively, or "a" for appending. These can
equivalently be given as "rb", "wb", "xb" and "ab" respectively.
format specifies the container format to use for the file.
If mode is "r", this defaults to FORMAT_AUTO. Otherwise, the
default is FORMAT_XZ.
check specifies the integrity check to use. This argument can
only be used when opening a file for writing. For FORMAT_XZ,
the default is CHECK_CRC64. FORMAT_ALONE and FORMAT_RAW do not
support integrity checks - for these formats, check must be
omitted, or be CHECK_NONE.
When opening a file for reading, the *preset* argument is not
meaningful, and should be omitted. The *filters* argument should
also be omitted, except when format is FORMAT_RAW (in which case
it is required).
When opening a file for writing, the settings used by the
compressor can be specified either as a preset compression
level (with the *preset* argument), or in detail as a custom
filter chain (with the *filters* argument). For FORMAT_XZ and
FORMAT_ALONE, the default is to use the PRESET_DEFAULT preset
level. For FORMAT_RAW, the caller must always specify a filter
chain; the raw compressor does not support preset compression
levels.
preset (if provided) should be an integer in the range 0-9,
optionally OR-ed with the constant PRESET_EXTREME.
filters (if provided) should be a sequence of dicts. Each dict
should have an entry for "id" indicating ID of the filter, plus
additional entries for options to the filter.
"""
self._fp = None
self._closefp = False
self._mode = _MODE_CLOSED
self._pos = 0
self._size = -1
if mode in ("r", "rb"):
if check != -1:
raise ValueError("Cannot specify an integrity check "
"when opening a file for reading")
if preset is not None:
raise ValueError("Cannot specify a preset compression "
"level when opening a file for reading")
if format is None:
format = FORMAT_AUTO
mode_code = _MODE_READ
# Save the args to pass to the LZMADecompressor initializer.
# If the file contains multiple compressed streams, each
# stream will need a separate decompressor object.
self._init_args = {"format":format, "filters":filters}
self._decompressor = LZMADecompressor(**self._init_args)
self._buffer = b""
self._buffer_offset = 0
elif mode in ("w", "wb", "a", "ab", "x", "xb"):
if format is None:
format = FORMAT_XZ
mode_code = _MODE_WRITE
self._compressor = LZMACompressor(format=format, check=check,
preset=preset, filters=filters)
else:
raise ValueError("Invalid mode: {!r}".format(mode))
if isinstance(filename, (str, bytes)):
if "b" not in mode:
mode += "b"
self._fp = builtins.open(filename, mode)
self._closefp = True
self._mode = mode_code
elif hasattr(filename, "read") or hasattr(filename, "write"):
self._fp = filename
self._mode = mode_code
else:
raise TypeError("filename must be a str or bytes object, or a file")
def close(self):
"""Flush and close the file.
May be called more than once without error. Once the file is
closed, any other operation on it will raise a ValueError.
"""
if self._mode == _MODE_CLOSED:
return
try:
if self._mode in (_MODE_READ, _MODE_READ_EOF):
self._decompressor = None
self._buffer = b""
elif self._mode == _MODE_WRITE:
self._fp.write(self._compressor.flush())
self._compressor = None
finally:
try:
if self._closefp:
self._fp.close()
finally:
self._fp = None
self._closefp = False
self._mode = _MODE_CLOSED
@property
def closed(self):
"""True if this file is closed."""
return self._mode == _MODE_CLOSED
def fileno(self):
"""Return the file descriptor for the underlying file."""
self._check_not_closed()
return self._fp.fileno()
def seekable(self):
"""Return whether the file supports seeking."""
return self.readable() and self._fp.seekable()
def readable(self):
"""Return whether the file was opened for reading."""
self._check_not_closed()
return self._mode in (_MODE_READ, _MODE_READ_EOF)
def writable(self):
"""Return whether the file was opened for writing."""
self._check_not_closed()
return self._mode == _MODE_WRITE
# Mode-checking helper functions.
def _check_not_closed(self):
if self.closed:
raise ValueError("I/O operation on closed file")
def _check_can_read(self):
if self._mode not in (_MODE_READ, _MODE_READ_EOF):
self._check_not_closed()
raise io.UnsupportedOperation("File not open for reading")
def _check_can_write(self):
if self._mode != _MODE_WRITE:
self._check_not_closed()
raise io.UnsupportedOperation("File not open for writing")
def _check_can_seek(self):
if self._mode not in (_MODE_READ, _MODE_READ_EOF):
self._check_not_closed()
raise io.UnsupportedOperation("Seeking is only supported "
"on files open for reading")
if not self._fp.seekable():
raise io.UnsupportedOperation("The underlying file object "
"does not support seeking")
# Fill the readahead buffer if it is empty. Returns False on EOF.
def _fill_buffer(self):
if self._mode == _MODE_READ_EOF:
return False
# Depending on the input data, our call to the decompressor may not
# return any data. In this case, try again after reading another block.
while self._buffer_offset == len(self._buffer):
rawblock = (self._decompressor.unused_data or
self._fp.read(_BUFFER_SIZE))
if not rawblock:
if self._decompressor.eof:
self._mode = _MODE_READ_EOF
self._size = self._pos
return False
else:
raise EOFError("Compressed file ended before the "
"end-of-stream marker was reached")
if self._decompressor.eof:
# Continue to next stream.
self._decompressor = LZMADecompressor(**self._init_args)
try:
self._buffer = self._decompressor.decompress(rawblock)
except LZMAError:
# Trailing data isn't a valid compressed stream; ignore it.
self._mode = _MODE_READ_EOF
self._size = self._pos
return False
else:
self._buffer = self._decompressor.decompress(rawblock)
self._buffer_offset = 0
return True
# Read data until EOF.
# If return_data is false, consume the data without returning it.
def _read_all(self, return_data=True):
# The loop assumes that _buffer_offset is 0. Ensure that this is true.
self._buffer = self._buffer[self._buffer_offset:]
self._buffer_offset = 0
blocks = []
while self._fill_buffer():
if return_data:
blocks.append(self._buffer)
self._pos += len(self._buffer)
self._buffer = b""
if return_data:
return b"".join(blocks)
# Read a block of up to n bytes.
# If return_data is false, consume the data without returning it.
def _read_block(self, n, return_data=True):
# If we have enough data buffered, return immediately.
end = self._buffer_offset + n
if end <= len(self._buffer):
data = self._buffer[self._buffer_offset : end]
self._buffer_offset = end
self._pos += len(data)
return data if return_data else None
# The loop assumes that _buffer_offset is 0. Ensure that this is true.
self._buffer = self._buffer[self._buffer_offset:]
self._buffer_offset = 0
blocks = []
while n > 0 and self._fill_buffer():
if n < len(self._buffer):
data = self._buffer[:n]
self._buffer_offset = n
else:
data = self._buffer
self._buffer = b""
if return_data:
blocks.append(data)
self._pos += len(data)
n -= len(data)
if return_data:
return b"".join(blocks)
def peek(self, size=-1):
"""Return buffered data without advancing the file position.
Always returns at least one byte of data, unless at EOF.
The exact number of bytes returned is unspecified.
"""
self._check_can_read()
if not self._fill_buffer():
return b""
return self._buffer[self._buffer_offset:]
def read(self, size=-1):
"""Read up to size uncompressed bytes from the file.
If size is negative or omitted, read until EOF is reached.
Returns b"" if the file is already at EOF.
"""
self._check_can_read()
if size == 0:
return b""
elif size < 0:
return self._read_all()
else:
return self._read_block(size)
def read1(self, size=-1):
"""Read up to size uncompressed bytes, while trying to avoid
making multiple reads from the underlying stream.
Returns b"" if the file is at EOF.
"""
# Usually, read1() calls _fp.read() at most once. However, sometimes
# this does not give enough data for the decompressor to make progress.
# In this case we make multiple reads, to avoid returning b"".
self._check_can_read()
if (size == 0 or
# Only call _fill_buffer() if the buffer is actually empty.
# This gives a significant speedup if *size* is small.
(self._buffer_offset == len(self._buffer) and not self._fill_buffer())):
return b""
if size > 0:
data = self._buffer[self._buffer_offset :
self._buffer_offset + size]
self._buffer_offset += len(data)
else:
data = self._buffer[self._buffer_offset:]
self._buffer = b""
self._buffer_offset = 0
self._pos += len(data)
return data
def readline(self, size=-1):
"""Read a line of uncompressed bytes from the file.
The terminating newline (if present) is retained. If size is
non-negative, no more than size bytes will be read (in which
case the line may be incomplete). Returns b'' if already at EOF.
"""
self._check_can_read()
# Shortcut for the common case - the whole line is in the buffer.
if size < 0:
end = self._buffer.find(b"\n", self._buffer_offset) + 1
if end > 0:
line = self._buffer[self._buffer_offset : end]
self._buffer_offset = end
self._pos += len(line)
return line
return io.BufferedIOBase.readline(self, size)
def write(self, data):
"""Write a bytes object to the file.
Returns the number of uncompressed bytes written, which is
always len(data). Note that due to buffering, the file on disk
may not reflect the data written until close() is called.
"""
self._check_can_write()
compressed = self._compressor.compress(data)
self._fp.write(compressed)
self._pos += len(data)
return len(data)
# Rewind the file to the beginning of the data stream.
def _rewind(self):
self._fp.seek(0, 0)
self._mode = _MODE_READ
self._pos = 0
self._decompressor = LZMADecompressor(**self._init_args)
self._buffer = b""
self._buffer_offset = 0
def seek(self, offset, whence=0):
"""Change the file position.
The new position is specified by offset, relative to the
position indicated by whence. Possible values for whence are:
0: start of stream (default): offset must not be negative
1: current stream position
2: end of stream; offset must not be positive
Returns the new file position.
Note that seeking is emulated, sp depending on the parameters,
this operation may be extremely slow.
"""
self._check_can_seek()
# Recalculate offset as an absolute file position.
if whence == 0:
pass
elif whence == 1:
offset = self._pos + offset
elif whence == 2:
# Seeking relative to EOF - we need to know the file's size.
if self._size < 0:
self._read_all(return_data=False)
offset = self._size + offset
else:
raise ValueError("Invalid value for whence: {}".format(whence))
# Make it so that offset is the number of bytes to skip forward.
if offset < self._pos:
self._rewind()
else:
offset -= self._pos
# Read and discard data until we reach the desired position.
self._read_block(offset, return_data=False)
return self._pos
def tell(self):
"""Return the current file position."""
self._check_not_closed()
return self._pos
def open(filename, mode="rb", *,
format=None, check=-1, preset=None, filters=None,
encoding=None, errors=None, newline=None):
"""Open an LZMA-compressed file in binary or text mode.
filename can be either an actual file name (given as a str or bytes
object), in which case the named file is opened, or it can be an
existing file object to read from or write to.
The mode argument can be "r", "rb" (default), "w", "wb", "x", "xb",
"a", or "ab" for binary mode, or "rt", "wt", "xt", or "at" for text
mode.
The format, check, preset and filters arguments specify the
compression settings, as for LZMACompressor, LZMADecompressor and
LZMAFile.
For binary mode, this function is equivalent to the LZMAFile
constructor: LZMAFile(filename, mode, ...). In this case, the
encoding, errors and newline arguments must not be provided.
For text mode, a LZMAFile object is created, and wrapped in an
io.TextIOWrapper instance with the specified encoding, error
handling behavior, and line ending(s).
"""
if "t" in mode:
if "b" in mode:
raise ValueError("Invalid mode: %r" % (mode,))
else:
if encoding is not None:
raise ValueError("Argument 'encoding' not supported in binary mode")
if errors is not None:
raise ValueError("Argument 'errors' not supported in binary mode")
if newline is not None:
raise ValueError("Argument 'newline' not supported in binary mode")
lz_mode = mode.replace("t", "")
binary_file = LZMAFile(filename, lz_mode, format=format, check=check,
preset=preset, filters=filters)
if "t" in mode:
return io.TextIOWrapper(binary_file, encoding, errors, newline)
else:
return binary_file
def compress(data, format=FORMAT_XZ, check=-1, preset=None, filters=None):
"""Compress a block of data.
Refer to LZMACompressor's docstring for a description of the
optional arguments *format*, *check*, *preset* and *filters*.
For incremental compression, use an LZMACompressor instead.
"""
comp = LZMACompressor(format, check, preset, filters)
return comp.compress(data) + comp.flush()
def decompress(data, format=FORMAT_AUTO, memlimit=None, filters=None):
"""Decompress a block of data.
Refer to LZMADecompressor's docstring for a description of the
optional arguments *format*, *check* and *filters*.
For incremental decompression, use an LZMADecompressor instead.
"""
results = []
while True:
decomp = LZMADecompressor(format, memlimit, filters)
try:
res = decomp.decompress(data)
except LZMAError:
if results:
break # Leftover data is not a valid LZMA/XZ stream; ignore it.
else:
raise # Error on the first iteration; bail out.
results.append(res)
if not decomp.eof:
raise LZMAError("Compressed data ended before the "
"end-of-stream marker was reached")
data = decomp.unused_data
if not data:
break
return b"".join(results)
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
"""Interface to the liblzma compression library.
This module provides a class for reading and writing compressed files,
classes for incremental (de)compression, and convenience functions for
one-shot (de)compression.
These classes and functions support both the XZ and legacy LZMA
container formats, as well as raw compressed data streams.
"""
__all__ = [
"CHECK_NONE", "CHECK_CRC32", "CHECK_CRC64", "CHECK_SHA256",
"CHECK_ID_MAX", "CHECK_UNKNOWN",
"FILTER_LZMA1", "FILTER_LZMA2", "FILTER_DELTA", "FILTER_X86", "FILTER_IA64",
"FILTER_ARM", "FILTER_ARMTHUMB", "FILTER_POWERPC", "FILTER_SPARC",
"FORMAT_AUTO", "FORMAT_XZ", "FORMAT_ALONE", "FORMAT_RAW",
"MF_HC3", "MF_HC4", "MF_BT2", "MF_BT3", "MF_BT4",
"MODE_FAST", "MODE_NORMAL", "PRESET_DEFAULT", "PRESET_EXTREME",
"LZMACompressor", "LZMADecompressor", "LZMAFile", "LZMAError",
"open", "compress", "decompress", "is_check_supported",
]
import builtins
import io
from _lzma import *
from _lzma import _encode_filter_properties, _decode_filter_properties
_MODE_CLOSED = 0
_MODE_READ = 1
_MODE_READ_EOF = 2
_MODE_WRITE = 3
_BUFFER_SIZE = 8192
class LZMAFile(io.BufferedIOBase):
"""A file object providing transparent LZMA (de)compression.
An LZMAFile can act as a wrapper for an existing file object, or
refer directly to a named file on disk.
Note that LZMAFile provides a *binary* file interface - data read
is returned as bytes, and data to be written must be given as bytes.
"""
def __init__(self, filename=None, mode="r", *,
format=None, check=-1, preset=None, filters=None):
"""Open an LZMA-compressed file in binary mode.
filename can be either an actual file name (given as a str or
bytes object), in which case the named file is opened, or it can
be an existing file object to read from or write to.
mode can be "r" for reading (default), "w" for (over)writing,
"x" for creating exclusively, or "a" for appending. These can
equivalently be given as "rb", "wb", "xb" and "ab" respectively.
format specifies the container format to use for the file.
If mode is "r", this defaults to FORMAT_AUTO. Otherwise, the
default is FORMAT_XZ.
check specifies the integrity check to use. This argument can
only be used when opening a file for writing. For FORMAT_XZ,
the default is CHECK_CRC64. FORMAT_ALONE and FORMAT_RAW do not
support integrity checks - for these formats, check must be
omitted, or be CHECK_NONE.
When opening a file for reading, the *preset* argument is not
meaningful, and should be omitted. The *filters* argument should
also be omitted, except when format is FORMAT_RAW (in which case
it is required).
When opening a file for writing, the settings used by the
compressor can be specified either as a preset compression
level (with the *preset* argument), or in detail as a custom
filter chain (with the *filters* argument). For FORMAT_XZ and
FORMAT_ALONE, the default is to use the PRESET_DEFAULT preset
level. For FORMAT_RAW, the caller must always specify a filter
chain; the raw compressor does not support preset compression
levels.
preset (if provided) should be an integer in the range 0-9,
optionally OR-ed with the constant PRESET_EXTREME.
filters (if provided) should be a sequence of dicts. Each dict
should have an entry for "id" indicating ID of the filter, plus
additional entries for options to the filter.
"""
self._fp = None
self._closefp = False
self._mode = _MODE_CLOSED
self._pos = 0
self._size = -1
if mode in ("r", "rb"):
if check != -1:
raise ValueError("Cannot specify an integrity check "
"when opening a file for reading")
if preset is not None:
raise ValueError("Cannot specify a preset compression "
"level when opening a file for reading")
if format is None:
format = FORMAT_AUTO
mode_code = _MODE_READ
# Save the args to pass to the LZMADecompressor initializer.
# If the file contains multiple compressed streams, each
# stream will need a separate decompressor object.
self._init_args = {"format":format, "filters":filters}
self._decompressor = LZMADecompressor(**self._init_args)
self._buffer = b""
self._buffer_offset = 0
elif mode in ("w", "wb", "a", "ab", "x", "xb"):
if format is None:
format = FORMAT_XZ
mode_code = _MODE_WRITE
self._compressor = LZMACompressor(format=format, check=check,
preset=preset, filters=filters)
else:
raise ValueError("Invalid mode: {!r}".format(mode))
if isinstance(filename, (str, bytes)):
if "b" not in mode:
mode += "b"
self._fp = builtins.open(filename, mode)
self._closefp = True
self._mode = mode_code
elif hasattr(filename, "read") or hasattr(filename, "write"):
self._fp = filename
self._mode = mode_code
else:
raise TypeError("filename must be a str or bytes object, or a file")
def close(self):
"""Flush and close the file.
May be called more than once without error. Once the file is
closed, any other operation on it will raise a ValueError.
"""
if self._mode == _MODE_CLOSED:
return
try:
if self._mode in (_MODE_READ, _MODE_READ_EOF):
self._decompressor = None
self._buffer = b""
elif self._mode == _MODE_WRITE:
self._fp.write(self._compressor.flush())
self._compressor = None
finally:
try:
if self._closefp:
self._fp.close()
finally:
self._fp = None
self._closefp = False
self._mode = _MODE_CLOSED
@property
def closed(self):
"""True if this file is closed."""
return self._mode == _MODE_CLOSED
def fileno(self):
"""Return the file descriptor for the underlying file."""
self._check_not_closed()
return self._fp.fileno()
def seekable(self):
"""Return whether the file supports seeking."""
return self.readable() and self._fp.seekable()
def readable(self):
"""Return whether the file was opened for reading."""
self._check_not_closed()
return self._mode in (_MODE_READ, _MODE_READ_EOF)
def writable(self):
"""Return whether the file was opened for writing."""
self._check_not_closed()
return self._mode == _MODE_WRITE
# Mode-checking helper functions.
def _check_not_closed(self):
if self.closed:
raise ValueError("I/O operation on closed file")
def _check_can_read(self):
if self._mode not in (_MODE_READ, _MODE_READ_EOF):
self._check_not_closed()
raise io.UnsupportedOperation("File not open for reading")
def _check_can_write(self):
if self._mode != _MODE_WRITE:
self._check_not_closed()
raise io.UnsupportedOperation("File not open for writing")
def _check_can_seek(self):
if self._mode not in (_MODE_READ, _MODE_READ_EOF):
self._check_not_closed()
raise io.UnsupportedOperation("Seeking is only supported "
"on files open for reading")
if not self._fp.seekable():
raise io.UnsupportedOperation("The underlying file object "
"does not support seeking")
# Fill the readahead buffer if it is empty. Returns False on EOF.
def _fill_buffer(self):
if self._mode == _MODE_READ_EOF:
return False
# Depending on the input data, our call to the decompressor may not
# return any data. In this case, try again after reading another block.
while self._buffer_offset == len(self._buffer):
rawblock = (self._decompressor.unused_data or
self._fp.read(_BUFFER_SIZE))
if not rawblock:
if self._decompressor.eof:
self._mode = _MODE_READ_EOF
self._size = self._pos
return False
else:
raise EOFError("Compressed file ended before the "
"end-of-stream marker was reached")
if self._decompressor.eof:
# Continue to next stream.
self._decompressor = LZMADecompressor(**self._init_args)
try:
self._buffer = self._decompressor.decompress(rawblock)
except LZMAError:
# Trailing data isn't a valid compressed stream; ignore it.
self._mode = _MODE_READ_EOF
self._size = self._pos
return False
else:
self._buffer = self._decompressor.decompress(rawblock)
self._buffer_offset = 0
return True
# Read data until EOF.
# If return_data is false, consume the data without returning it.
def _read_all(self, return_data=True):
# The loop assumes that _buffer_offset is 0. Ensure that this is true.
self._buffer = self._buffer[self._buffer_offset:]
self._buffer_offset = 0
blocks = []
while self._fill_buffer():
if return_data:
blocks.append(self._buffer)
self._pos += len(self._buffer)
self._buffer = b""
if return_data:
return b"".join(blocks)
# Read a block of up to n bytes.
# If return_data is false, consume the data without returning it.
def _read_block(self, n, return_data=True):
# If we have enough data buffered, return immediately.
end = self._buffer_offset + n
if end <= len(self._buffer):
data = self._buffer[self._buffer_offset : end]
self._buffer_offset = end
self._pos += len(data)
return data if return_data else None
# The loop assumes that _buffer_offset is 0. Ensure that this is true.
self._buffer = self._buffer[self._buffer_offset:]
self._buffer_offset = 0
blocks = []
while n > 0 and self._fill_buffer():
if n < len(self._buffer):
data = self._buffer[:n]
self._buffer_offset = n
else:
data = self._buffer
self._buffer = b""
if return_data:
blocks.append(data)
self._pos += len(data)
n -= len(data)
if return_data:
return b"".join(blocks)
def peek(self, size=-1):
"""Return buffered data without advancing the file position.
Always returns at least one byte of data, unless at EOF.
The exact number of bytes returned is unspecified.
"""
self._check_can_read()
if not self._fill_buffer():
return b""
return self._buffer[self._buffer_offset:]
def read(self, size=-1):
"""Read up to size uncompressed bytes from the file.
If size is negative or omitted, read until EOF is reached.
Returns b"" if the file is already at EOF.
"""
self._check_can_read()
if size == 0:
return b""
elif size < 0:
return self._read_all()
else:
return self._read_block(size)
def read1(self, size=-1):
"""Read up to size uncompressed bytes, while trying to avoid
making multiple reads from the underlying stream.
Returns b"" if the file is at EOF.
"""
# Usually, read1() calls _fp.read() at most once. However, sometimes
# this does not give enough data for the decompressor to make progress.
# In this case we make multiple reads, to avoid returning b"".
self._check_can_read()
if (size == 0 or
# Only call _fill_buffer() if the buffer is actually empty.
# This gives a significant speedup if *size* is small.
(self._buffer_offset == len(self._buffer) and not self._fill_buffer())):
return b""
if size > 0:
data = self._buffer[self._buffer_offset :
self._buffer_offset + size]
self._buffer_offset += len(data)
else:
data = self._buffer[self._buffer_offset:]
self._buffer = b""
self._buffer_offset = 0
self._pos += len(data)
return data
def readline(self, size=-1):
"""Read a line of uncompressed bytes from the file.
The terminating newline (if present) is retained. If size is
non-negative, no more than size bytes will be read (in which
case the line may be incomplete). Returns b'' if already at EOF.
"""
self._check_can_read()
# Shortcut for the common case - the whole line is in the buffer.
if size < 0:
end = self._buffer.find(b"\n", self._buffer_offset) + 1
if end > 0:
line = self._buffer[self._buffer_offset : end]
self._buffer_offset = end
self._pos += len(line)
return line
return io.BufferedIOBase.readline(self, size)
def write(self, data):
"""Write a bytes object to the file.
Returns the number of uncompressed bytes written, which is
always len(data). Note that due to buffering, the file on disk
may not reflect the data written until close() is called.
"""
self._check_can_write()
compressed = self._compressor.compress(data)
self._fp.write(compressed)
self._pos += len(data)
return len(data)
# Rewind the file to the beginning of the data stream.
def _rewind(self):
self._fp.seek(0, 0)
self._mode = _MODE_READ
self._pos = 0
self._decompressor = LZMADecompressor(**self._init_args)
self._buffer = b""
self._buffer_offset = 0
def seek(self, offset, whence=0):
"""Change the file position.
The new position is specified by offset, relative to the
position indicated by whence. Possible values for whence are:
0: start of stream (default): offset must not be negative
1: current stream position
2: end of stream; offset must not be positive
Returns the new file position.
Note that seeking is emulated, sp depending on the parameters,
this operation may be extremely slow.
"""
self._check_can_seek()
# Recalculate offset as an absolute file position.
if whence == 0:
pass
elif whence == 1:
offset = self._pos + offset
elif whence == 2:
# Seeking relative to EOF - we need to know the file's size.
if self._size < 0:
self._read_all(return_data=False)
offset = self._size + offset
else:
raise ValueError("Invalid value for whence: {}".format(whence))
# Make it so that offset is the number of bytes to skip forward.
if offset < self._pos:
self._rewind()
else:
offset -= self._pos
# Read and discard data until we reach the desired position.
self._read_block(offset, return_data=False)
return self._pos
def tell(self):
"""Return the current file position."""
self._check_not_closed()
return self._pos
def open(filename, mode="rb", *,
format=None, check=-1, preset=None, filters=None,
encoding=None, errors=None, newline=None):
"""Open an LZMA-compressed file in binary or text mode.
filename can be either an actual file name (given as a str or bytes
object), in which case the named file is opened, or it can be an
existing file object to read from or write to.
The mode argument can be "r", "rb" (default), "w", "wb", "x", "xb",
"a", or "ab" for binary mode, or "rt", "wt", "xt", or "at" for text
mode.
The format, check, preset and filters arguments specify the
compression settings, as for LZMACompressor, LZMADecompressor and
LZMAFile.
For binary mode, this function is equivalent to the LZMAFile
constructor: LZMAFile(filename, mode, ...). In this case, the
encoding, errors and newline arguments must not be provided.
For text mode, a LZMAFile object is created, and wrapped in an
io.TextIOWrapper instance with the specified encoding, error
handling behavior, and line ending(s).
"""
if "t" in mode:
if "b" in mode:
raise ValueError("Invalid mode: %r" % (mode,))
else:
if encoding is not None:
raise ValueError("Argument 'encoding' not supported in binary mode")
if errors is not None:
raise ValueError("Argument 'errors' not supported in binary mode")
if newline is not None:
raise ValueError("Argument 'newline' not supported in binary mode")
lz_mode = mode.replace("t", "")
binary_file = LZMAFile(filename, lz_mode, format=format, check=check,
preset=preset, filters=filters)
if "t" in mode:
return io.TextIOWrapper(binary_file, encoding, errors, newline)
else:
return binary_file
def compress(data, format=FORMAT_XZ, check=-1, preset=None, filters=None):
"""Compress a block of data.
Refer to LZMACompressor's docstring for a description of the
optional arguments *format*, *check*, *preset* and *filters*.
For incremental compression, use an LZMACompressor instead.
"""
comp = LZMACompressor(format, check, preset, filters)
return comp.compress(data) + comp.flush()
def decompress(data, format=FORMAT_AUTO, memlimit=None, filters=None):
"""Decompress a block of data.
Refer to LZMADecompressor's docstring for a description of the
optional arguments *format*, *check* and *filters*.
For incremental decompression, use an LZMADecompressor instead.
"""
results = []
while True:
decomp = LZMADecompressor(format, memlimit, filters)
try:
res = decomp.decompress(data)
except LZMAError:
if results:
break # Leftover data is not a valid LZMA/XZ stream; ignore it.
else:
raise # Error on the first iteration; bail out.
results.append(res)
if not decomp.eof:
raise LZMAError("Compressed data ended before the "
"end-of-stream marker was reached")
data = decomp.unused_data
if not data:
break
return b"".join(results)
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
|
mit
| -5,395,204,604,067,377,000
| 36.851202
| 84
| 0.584073
| false
| 4.235658
| false
| false
| false
|
primoz-k/parilis
|
config/wsgi.py
|
1
|
1723
|
"""
WSGI config for parilis project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
from django.core.wsgi import get_wsgi_application
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "config.settings.production"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.production")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
# application = get_wsgi_application()
if os.environ.get("DJANGO_SETTINGS_MODULE") == "config.settings.production":
from raven.contrib.django.raven_compat.middleware.wsgi import Sentry
application = get_wsgi_application()
application = Sentry(application)
else:
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
|
bsd-3-clause
| -8,953,276,399,421,357,000
| 41.02439
| 79
| 0.785258
| false
| 4.243842
| false
| false
| false
|
gloryofrobots/opparse
|
parsers/ipy/lexicon.py
|
1
|
6194
|
from opparse.lexicon import Lexicon, token, keyword
class IpyLexicon(Lexicon):
# TOKEN TYPES
TT_INT = "TT_INT"
TT_STR = "TT_STR"
TT_NAME = "TT_NAME"
TT_FUN = "TT_FUN"
TT_FOR = "TT_FOR"
TT_WHILE = "TT_WHILE"
TT_BREAK = "TT_BREAK"
TT_CONTINUE = "TT_CONTINUE"
TT_CLASS = "TT_CLASS"
TT_DEF = "TT_DEF"
TT_IF = "TT_IF"
TT_ELIF = "TT_ELIF"
TT_ELSE = "TT_ELSE"
TT_IN = "TT_IN"
TT_AS = "TT_AS"
TT_IS = "TT_IS"
TT_IS_NOT = "TT_IS_NOT"
TT_NOT_IN = "TT_NOT_IN"
TT_AND = "TT_AND"
TT_NOT = "TT_AND"
TT_OR = "TT_OR"
TT_TRUE = "TT_TRUE"
TT_FALSE = "TT_FALSE"
TT_NONE = "TT_NONE"
TT_TRY = "TT_TRY"
TT_RAISE = "TT_RAISE"
TT_YIELD = "TT_YIELD"
TT_RETURN = "TT_RETURN"
TT_EXCEPT = "TT_EXCEPT"
TT_FINALLY = "TT_FINALLY"
TT_END = "TT_END"
TT_END_EXPR = "TT_END_EXPR"
TT_INDENT = "TT_INDENT"
TT_NEWLINE = "TT_NEWLINE"
TT_LAMBDA = "TT_LAMBDA"
TT_LCURLY = "TT_LCURLY"
TT_RCURLY = "TT_RCURLY"
TT_COMMA = "TT_COMMA"
TT_ASSIGN = "TT_ASSIGN"
TT_PLUS_ASSIGN = "TT_PLUS_ASSIGN"
TT_MINUS_ASSIGN = "TT_MINUS_ASSIGN"
TT_LPAREN = "TT_LPAREN"
TT_RPAREN = "TT_RPAREN"
TT_LSQUARE = "TT_LSQUARE"
TT_RSQUARE = "TT_RSQUARE"
TT_DOT = "TT_DOT"
TT_COLON = "TT_COLON"
TT_GT = "TT_GT"
TT_GE = "TT_GE"
TT_LE = "TT_LE"
TT_LT = "TT_LT"
TT_EQ = "TT_EQ"
TT_NE = "TT_NE"
TT_PLUS = "TT_PLUS"
TT_MINUS = "TT_MINUS"
TT_SLASH = "TT_SLASH"
TT_STAR = "TT_STAR"
TT_DOUBLE_STAR = "TT_DOUBLE_STAR"
TT_PERCENTS = "TT_PERCENTS"
TT_TILDE = "TT_TILDE"
TT_CARET = "TT_CARET"
TT_PIPE = "TT_PIPE"
TT_SHL = "TT_SHL"
TT_SHR = "TT_SHR"
TT_AMP = "TT_AMP"
TT_IMPORT = "TT_IMPORT"
TT_FROM = "TT_FROM"
# NODE_TYPES
NT_TRUE = "NT_TRUE"
NT_FALSE = "NT_FALSE"
NT_NONE = "NT_NONE"
NT_INT = "NT_INT"
NT_STR = "NT_STR"
NT_MULTI_STR = "NT_MULTI_STR"
NT_NAME = "NT_NAME"
NT_DICT = "NT_DICT"
NT_LIST = "NT_LIST"
NT_TUPLE = "NT_TUPLE"
NT_FUN = "NT_FUN"
NT_IF = "NT_IF"
NT_TRY = "NT_TRY"
NT_FOR = "NT_FOR"
NT_WHILE = "NT_WHILE"
NT_CONTINUE = "NT_CONTINUE"
NT_BREAK = "NT_BREAK"
NT_RAISE = "NT_RAISE"
NT_ASSIGN = "NT_ASSIGN"
NT_PLUS_ASSIGN = "NT_PLUS_ASSIGN"
NT_MINUS_ASSIGN = "NT_MINUS_ASSIGN"
NT_CALL = "NT_CALL"
NT_DOT = "NT_DOT"
NT_COMMA = "NT_COMMA"
NT_AS = "NT_AS"
NT_AND = "NT_AND"
NT_OR = "NT_OR"
NT_NOT = "NT_NOT"
NT_GT = "NT_GT"
NT_GE = "NT_GE"
NT_LE = "NT_LE"
NT_LT = "NT_LT"
NT_EQ = "NT_EQ"
NT_NE = "NT_NE"
NT_IN = "NT_IN"
NT_IS = "NT_IS"
NT_IS_NOT = "NT_IS_NOT"
NT_NOT_IN = "NT_NOT_IN"
NT_ADD = "NT_ADD"
NT_SUB = "NT_SUB"
NT_DIV = "NT_DIV"
NT_MUL = "NT_MUL"
NT_POW = "NT_POW"
NT_MOD = "NT_MOD"
NT_BXOR = "NT_BXOR"
NT_BNOT = "NT_BNOT"
NT_BOR = "NT_BOR"
NT_BAND = "NT_BAND"
NT_BSHL = "NT_BSHL"
NT_BSHR = "NT_BSHR"
NT_NEGATE = "NT_NEGATE"
NT_VARGS = "NT_VARGS"
NT_KVARGS = "NT_KVARGS"
NT_CLASS = "NT_CLASS"
NT_IMPORT = "NT_IMPORT"
NT_IMPORT_FROM = "NT_IMPORT_FROM"
NT_IMPORT_ALL = "NT_IMPORT_ALL"
RULES = [
(token('\n'), TT_NEWLINE),
(token(' '), -1),
(token('#[^\n]*'), -1),
(token('is[\s]+not'), TT_IS_NOT),
(token('not[\s]+in'), TT_NOT_IN),
(keyword('if'), TT_IF),
(keyword('elif'), TT_ELIF),
(keyword('else'), TT_ELSE),
(keyword('end'), TT_END),
(keyword('is'), TT_IS),
(keyword('and'), TT_AND),
(keyword('or'), TT_OR),
(keyword('not'), TT_NOT),
(keyword('True'), TT_TRUE),
(keyword('False'), TT_FALSE),
(keyword('None'), TT_NONE),
(keyword('raise'), TT_RAISE),
(keyword('return'), TT_RETURN),
(keyword('yield'), TT_YIELD),
(keyword('try'), TT_TRY),
(keyword('except'), TT_EXCEPT),
(keyword('finally'), TT_FINALLY),
(keyword('lambda'), TT_LAMBDA),
(keyword('fun'), TT_FUN),
(keyword('def'), TT_DEF),
(keyword('class'), TT_CLASS),
(keyword('while'), TT_WHILE),
(keyword('for'), TT_FOR),
(keyword('in'), TT_IN),
(keyword('break'), TT_BREAK),
(keyword('continue'), TT_CONTINUE),
(keyword('import'), TT_IMPORT),
(keyword('from'), TT_FROM),
(keyword('as'), TT_AS),
(token("[0-9]+"), TT_INT),
(token('"([^\\\"]+|\\.)*"'), TT_STR),
(token('[a-zA-Z_][0-9a-zA-Z_]*'), TT_NAME),
(token('\;'), TT_END_EXPR),
(token('\{'), TT_LCURLY),
(token('\}'), TT_RCURLY),
(token('\,'), TT_COMMA),
(token('\('), TT_LPAREN),
(token('\)'), TT_RPAREN),
(token('\['), TT_LSQUARE),
(token('\]'), TT_RSQUARE),
(token('\.'), TT_DOT),
(token(':'), TT_COLON),
(token('>>'), TT_SHR),
(token('<<'), TT_SHL),
(token('\^'), TT_CARET),
(token('\&'), TT_AMP),
(token('\~'), TT_TILDE),
(token('\|'), TT_PIPE),
(token('\+='), TT_PLUS_ASSIGN),
(token('\-='), TT_MINUS_ASSIGN),
(token('\*\*'), TT_DOUBLE_STAR),
(token('=='), TT_EQ),
(token('>='), TT_GE),
(token('>'), TT_GT),
(token('<'), TT_LT),
(token('<='), TT_LE),
(token('=='), TT_EQ),
(token('!='), TT_NE),
(token('\+'), TT_PLUS),
(token('\-'), TT_MINUS),
(token('\*'), TT_STAR),
(token('\/'), TT_SLASH),
(token('\%'), TT_PERCENTS),
(token('='), TT_ASSIGN),
]
TERM_BLOCK = [TT_END]
TERM_EXP = [TT_END_EXPR]
TERM_CONDITION = [TT_COLON]
TERM_FOR_CONDITION = [TT_IN]
TERM_IF_BODY = [TT_ELSE, TT_ELIF] + TERM_BLOCK
TERM_TRY = [TT_EXCEPT]
TERM_EXCEPT = [TT_FINALLY, TT_EXCEPT] + TERM_BLOCK
TERM_FUN_SIGNATURE = [TT_COLON]
TERM_FROM_IMPORTED = [TT_IMPORT]
LEVELS_IF = [TT_ELSE, TT_ELIF]
LEVELS_TRY = [TT_EXCEPT, TT_FINALLY]
LEVELS_FOR = [TT_ELSE]
ASSIGNMENT_TOKENS = [TT_ASSIGN, TT_PLUS_ASSIGN, TT_MINUS_ASSIGN]
|
gpl-3.0
| -1,211,293,951,085,114,400
| 24.285714
| 68
| 0.481757
| false
| 2.450158
| false
| false
| false
|
mucow24/roboviva
|
roboviva/latex.py
|
1
|
7662
|
# Roboviva - Better cue sheets for everyone
# Copyright (C) 2015 Mike Kocurek
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import cue
import re
def _makeClimb(climb_type):
'''Very simple utility method -- provides a common way to specify climb types'''
return r"$\underset{\textbf{" + climb_type + r"}}{\text{\large \Mountain}}$"
def _instructionToLatex(instruction, modifier):
'''Maps a cue.Instruction the latex that should be used to render it'''
if instruction == cue.Instruction.CAT_1:
return _makeClimb("1")
elif instruction == cue.Instruction.CAT_2:
return _makeClimb("2")
elif instruction == cue.Instruction.CAT_3:
return _makeClimb("3")
elif instruction == cue.Instruction.CAT_4:
return _makeClimb("4")
elif instruction == cue.Instruction.CAT_5:
return _makeClimb("5")
elif instruction == cue.Instruction.CAT_HC:
return _makeClimb("HC")
elif instruction == cue.Instruction.SUMMIT:
return _makeClimb("End")
elif instruction == cue.Instruction.DANGER:
return r"\Large \danger "
elif instruction == cue.Instruction.FIRST_AID:
return r"\raisebox{-0.15em}{\Plus} "
else:
# all others can be rendered as-is, in bold:
return r"\textbf{" + _escape(modifier) + _escape(instruction) + "}"
def _escape(text):
r''' Escapes &, #, and other characters in 'text' so they don't break the
latex render.'''
ret = re.sub(r'\\([^\\]?)', r'\\textbackslash \1', text)
ret = ret.replace("_", r"\textunderscore ")
ret = ret.replace("$", "\$")
ret = ret.replace("#", "\#")
ret = ret.replace("&", "\&")
ret = ret.replace("|", r'$|$')
ret = ret.replace("<", r'$<$')
ret = ret.replace(">", r'$\Rightarrow$')
ret = ret.replace("%", r'\%')
ret = ret.replace('{', r'\{')
ret = ret.replace('}', r'\}')
return ret
def _format(text):
'''Looks for markdown-style *emphasis* and **strong emphasis** in the text,
turning it into \emph and \textbf, accordingly.'''
# Step 0: Escape any whitespace-delimited *'s and **'s:
text = re.sub(ur'\s\*\s', ur' \* ', text)
text = re.sub(ur'\s\*\*\s', ur' \*\* ', text)
# Do this in two passes. Each pass will replace **...** with \textbf{...},
# and *...* with \emph{...}, where "..." DOES NOT CONTAIN ANY NESTED **...**
# or *...* PATTERNS. We should do this to fixed point, but if people are
# seriously doing this:
# **Foo *bar **baz *foobar******
# Screw 'em :)
Num_Passes = 2
for p in xrange(Num_Passes):
text = re.sub(ur'(\*\*)(?!\s)((\\.|[^\\\*])*?[^\s\\])\1',
ur'\\textbf{\2}',
text)
text = re.sub(ur'\*(?!\s)((\\.|[^\\\*])*?[^\s\\*])\*',
ur'\emph{\1}',
text)
# Finally, un-escape any escaped *'s:
text = re.sub(ur'\\(\*|_)', ur'\1', text)
return text
def _entryColor(entry):
'''Figures out what color, if any, this entry should have. Returns a color
string, if appropriate, or 'None' if this entry doesn't need to be
colored.'''
# Figure out row color:
color = None
if entry.color == cue.Color.YELLOW:
color = ur'{yellow}'
elif entry.color == cue.Color.GRAY:
color = ur'[gray]{0.8}'
return color
def _entryToLatex(entry):
'''Converts a cue.Entry into a latex supertabular row string'''
color_str = ""
note_str = ""
for_str = ""
color = _entryColor(entry)
# Escape all user-provided strings:
esc_note = _escape(entry.note)
esc_description = _escape(entry.description)
if color:
color_str = ur'\rowcolor%s' % color
if entry.note:
# If the user left the description empty, but added a note, treat the note
# as if it were the description. Otherwise, append the note as a an actual
# note after the description.
if esc_description.strip() == "":
note_str = esc_note
else:
note_str = ur' \newline \textit{%s}' % esc_note
if entry.for_distance:
for_str = "%5.1f" % entry.for_distance
instruction_str = _instructionToLatex(entry.instruction, entry.modifier)
note_str = _format(note_str)
description_str = _format(esc_description)
return r"%s %s & %5.1f & %s%s & %s \\ \hline" % (color_str,
instruction_str,
entry.absolute_distance,
description_str,
note_str,
for_str)
def makeLatex(route):
''' Makes a full latex document from a cue.Route object
route - a Cue.Route object, fully initialized.
Returns the Latex output generated from 'route', as a string.
'''
ents = route.entries
route_id = _escape("%s" % route.id)
route_name = _escape("%s" % route.name)
ret = _makeHeader(route)
for ent in ents:
ret = ret + _entryToLatex(ent) + "\n"
ret = ret + LatexFooter
return ret
def _makeHeader(route):
'''
Generates the beginning of a Latex document, meaning everything from \documentclass to the beginning of the supertable.
route: a cue.Route object to use when filling in the header
'''
route_id = route.id
route_name = route.name
elevation_gain_ft = route.elevation_gain_ft
total_distance_mi = route.length_mi
header = unicode(r'''
\documentclass[11pt]{article}
\usepackage[left=0.20in,right=0.20in,top=0.7in,bottom=0.25in]{geometry}
\geometry{letterpaper}
\usepackage{colortbl}
\usepackage{supertabular}
\usepackage{amsmath}
\usepackage{helvet}
\usepackage{fourier}
\usepackage{bbding}
\usepackage[alpine]{ifsym}
\usepackage{fancyhdr}
\usepackage{lastpage}
\pagestyle{fancy}
\fancyhf{}''')
# Fill in left, right headers.
lhead = None
rhead = r"\emph{Route \#%d}" % route_id
# We stick the total distance + climb after the route title if it exists,
# otherwise we put it after the route #:
if elevation_gain_ft:
route_stats_esc = _escape("%.1f mi / %d ft" % (total_distance_mi, elevation_gain_ft))
else:
route_stats_esc= _escape("%.1f mi" % (total_distance_mi))
if route_name:
lhead = r"\emph{%s (%s)}" % (_escape(route_name), route_stats_esc)
else:
# Stick stats after the right header:
rhead += r" \emph{(%s)}" % route_stats_esc
if lhead:
header += unicode(r'''
\lhead{\small %s}''' % lhead)
if rhead:
header += unicode(r'''
\rhead{\small %s}''' % rhead)
header += unicode(r'''
\fancyfoot[C]{\footnotesize{\emph{Page~\thepage~of~\pageref{LastPage}}}}
\setlength{\footskip}{0.0in}
\setlength{\headsep}{0.2in}
\renewcommand{\familydefault}{\sfdefault}
\begin{document}
\renewcommand{\arraystretch}{1.15}
\twocolumn
\tablehead{
\hline
\rowcolor[gray]{0}
\textbf{\textcolor{white}{Go}} &
\textbf{\textcolor{white}{At}} &
\textbf{\textcolor{white}{On}} &
\textbf{\textcolor{white}{For}} \\
\hline
}
\tabletail{\hline}
\tablelasttail{\hline}
\begin{center}
\begin{supertabular}{|c|p{0.30in}|p{2.25in}|l|}
\hline
''')
return header
LatexFooter = unicode(r'''
\end{supertabular}
\end{center}
\end{document}
''')
|
agpl-3.0
| -8,903,511,516,372,346,000
| 30.792531
| 121
| 0.623466
| false
| 3.237009
| false
| false
| false
|
chrinide/PyFV
|
pyfv/portfolio/mean_variance.py
|
1
|
2360
|
# -*- coding: utf-8 -*-
'''
@author: Hung-Hsin Chen
@mail: chenhh@par.cse.nsysu.edu.tw
Markowitz mean variance model
'''
from __future__ import division
from coopr.pyomo import *
from time import time
from datetime import date
import numpy as np
import pandas as pd
import os
import time
from coopr.opt import SolverFactory
def MeanVariance(symbols, risk_ret, money=1e6, risk_weight=1, solver="cplex"):
'''
@riskyRet, shape: M*T
minimize risk_weight * risk - (1-risk_weight) * mean
'''
t = time.time()
sigma = np.cov(risk_ret)
mu = risk_ret.mean(axis=1)
model = ConcreteModel()
#Set
model.symbols = range(len(symbols))
#decision variables
model.W = Var(model.symbols, within=NonNegativeReals)
#constraint
def CapitalConstraint_rule(model):
allocation = sum(model.W[idx] for idx in model.symbols)
return allocation == money
model.CapitalConstraint = Constraint()
#objective
def minRiskObjective_rule(model):
profit = sum(model.W[idx]*mu[idx] for idx in model.symbols)
risk = 0
for idx in model.symbols:
for jdx in model.symbols:
risk += model.W[idx] * model.W[jdx] * sigma[idx, jdx]
return 1./2 * risk_weight * risk - (1. - risk_weight) * profit
model.minRiskObjective = Objective(sense=minimize)
# Create a solver
opt = SolverFactory(solver)
if solver =="cplex":
opt.options["threads"] = 4
instance = model.create()
results = opt.solve(instance)
instance.load(results)
obj = results.Solution.Objective.__default_objective__['value']
display(instance)
print "MeanVariance elapsed %.3f secs"%(time.time()-t)
def testMeanVariance():
FileDir = os.path.abspath(os.path.curdir)
PklBasicFeaturesDir = os.path.join(FileDir, '..', 'pkl', 'BasicFeatures')
symbols = ['2330', '2317', '6505']
n_period = 100
ROIs = np.empty((len(symbols), n_period))
for idx, symbol in enumerate(symbols):
df = pd.read_pickle(os.path.join(PklBasicFeaturesDir, '%s.pkl'%symbol))
roi = df['adjROI'][:n_period]
ROIs[idx] = roi
MeanVariance(symbols, ROIs, money=1e6, risk_weight=1, solver="cplex")
if __name__ == '__main__':
testMeanVariance()
|
gpl-2.0
| -3,923,712,547,247,650,300
| 26.137931
| 79
| 0.618644
| false
| 3.430233
| false
| false
| false
|
pjryan126/solid-start-careers
|
store/api/zillow/venv/lib/python2.7/site-packages/pandas/sandbox/qtpandas.py
|
1
|
4363
|
'''
Easy integration of DataFrame into pyqt framework
@author: Jev Kuznetsov
'''
# flake8: noqa
# GH9615
import warnings
warnings.warn("The pandas.sandbox.qtpandas module is deprecated and will be "
"removed in a future version. We refer users to the external package "
"here: https://github.com/datalyze-solutions/pandas-qt")
try:
from PyQt4.QtCore import QAbstractTableModel, Qt, QVariant, QModelIndex
from PyQt4.QtGui import (
QApplication, QDialog, QVBoxLayout, QTableView, QWidget)
except ImportError:
from PySide.QtCore import QAbstractTableModel, Qt, QModelIndex
from PySide.QtGui import (
QApplication, QDialog, QVBoxLayout, QTableView, QWidget)
QVariant = lambda value=None: value
from pandas import DataFrame, Index
class DataFrameModel(QAbstractTableModel):
''' data model for a DataFrame class '''
def __init__(self):
super(DataFrameModel, self).__init__()
self.df = DataFrame()
def setDataFrame(self, dataFrame):
self.df = dataFrame
def signalUpdate(self):
''' tell viewers to update their data (this is full update, not
efficient)'''
self.layoutChanged.emit()
#------------- table display functions -----------------
def headerData(self, section, orientation, role=Qt.DisplayRole):
if role != Qt.DisplayRole:
return QVariant()
if orientation == Qt.Horizontal:
try:
return self.df.columns.tolist()[section]
except (IndexError, ):
return QVariant()
elif orientation == Qt.Vertical:
try:
# return self.df.index.tolist()
return self.df.index.tolist()[section]
except (IndexError, ):
return QVariant()
def data(self, index, role=Qt.DisplayRole):
if role != Qt.DisplayRole:
return QVariant()
if not index.isValid():
return QVariant()
return QVariant(str(self.df.ix[index.row(), index.column()]))
def flags(self, index):
flags = super(DataFrameModel, self).flags(index)
flags |= Qt.ItemIsEditable
return flags
def setData(self, index, value, role):
row = self.df.index[index.row()]
col = self.df.columns[index.column()]
if hasattr(value, 'toPyObject'):
# PyQt4 gets a QVariant
value = value.toPyObject()
else:
# PySide gets an unicode
dtype = self.df[col].dtype
if dtype != object:
value = None if value == '' else dtype.type(value)
self.df.set_value(row, col, value)
return True
def rowCount(self, index=QModelIndex()):
return self.df.shape[0]
def columnCount(self, index=QModelIndex()):
return self.df.shape[1]
class DataFrameWidget(QWidget):
''' a simple widget for using DataFrames in a gui '''
def __init__(self, dataFrame, parent=None):
super(DataFrameWidget, self).__init__(parent)
self.dataModel = DataFrameModel()
self.dataTable = QTableView()
self.dataTable.setModel(self.dataModel)
layout = QVBoxLayout()
layout.addWidget(self.dataTable)
self.setLayout(layout)
# Set DataFrame
self.setDataFrame(dataFrame)
def setDataFrame(self, dataFrame):
self.dataModel.setDataFrame(dataFrame)
self.dataModel.signalUpdate()
self.dataTable.resizeColumnsToContents()
#-----------------stand alone test code
def testDf():
''' creates test dataframe '''
data = {'int': [1, 2, 3], 'float': [1.5, 2.5, 3.5],
'string': ['a', 'b', 'c'], 'nan': [np.nan, np.nan, np.nan]}
return DataFrame(data, index=Index(['AAA', 'BBB', 'CCC']),
columns=['int', 'float', 'string', 'nan'])
class Form(QDialog):
def __init__(self, parent=None):
super(Form, self).__init__(parent)
df = testDf() # make up some data
widget = DataFrameWidget(df)
widget.resizeColumnsToContents()
layout = QVBoxLayout()
layout.addWidget(widget)
self.setLayout(layout)
if __name__ == '__main__':
import sys
import numpy as np
app = QApplication(sys.argv)
form = Form()
form.show()
app.exec_()
|
gpl-2.0
| -1,997,314,913,183,287,800
| 29.089655
| 84
| 0.595691
| false
| 4.054833
| false
| false
| false
|
radiasoft/radtrack
|
radtrack/ui/rbcbt.py
|
1
|
5681
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'radtrack/ui/rbcbt.ui'
#
# Created: Thu Jun 16 05:40:41 2016
# by: PyQt4 UI code generator 4.11.3
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_RBCBT(object):
def setupUi(self, RBCBT):
RBCBT.setObjectName(_fromUtf8("RBCBT"))
RBCBT.resize(644, 938)
self.verticalLayout_4 = QtGui.QVBoxLayout(RBCBT)
self.verticalLayout_4.setObjectName(_fromUtf8("verticalLayout_4"))
self.label = QtGui.QLabel(RBCBT)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label.setFont(font)
self.label.setObjectName(_fromUtf8("label"))
self.verticalLayout_4.addWidget(self.label)
self.elementButtonLayout = QtGui.QHBoxLayout()
self.elementButtonLayout.setObjectName(_fromUtf8("elementButtonLayout"))
self.verticalLayout_4.addLayout(self.elementButtonLayout)
self.splitter = QtGui.QSplitter(RBCBT)
self.splitter.setOrientation(QtCore.Qt.Vertical)
self.splitter.setChildrenCollapsible(False)
self.splitter.setObjectName(_fromUtf8("splitter"))
self.layoutWidget = QtGui.QWidget(self.splitter)
self.layoutWidget.setObjectName(_fromUtf8("layoutWidget"))
self.elementListLayout = QtGui.QVBoxLayout(self.layoutWidget)
self.elementListLayout.setMargin(0)
self.elementListLayout.setObjectName(_fromUtf8("elementListLayout"))
self.elementListLabel = QtGui.QLabel(self.layoutWidget)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.elementListLabel.setFont(font)
self.elementListLabel.setObjectName(_fromUtf8("elementListLabel"))
self.elementListLayout.addWidget(self.elementListLabel)
self.treeWidget = dtreeWidget(self.layoutWidget)
self.treeWidget.setObjectName(_fromUtf8("treeWidget"))
self.elementListLayout.addWidget(self.treeWidget)
self.layoutWidget1 = QtGui.QWidget(self.splitter)
self.layoutWidget1.setObjectName(_fromUtf8("layoutWidget1"))
self.beamlineEditorLayout = QtGui.QVBoxLayout(self.layoutWidget1)
self.beamlineEditorLayout.setMargin(0)
self.beamlineEditorLayout.setObjectName(_fromUtf8("beamlineEditorLayout"))
self.beamlineEditorLabel = QtGui.QLabel(self.layoutWidget1)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.beamlineEditorLabel.setFont(font)
self.beamlineEditorLabel.setObjectName(_fromUtf8("beamlineEditorLabel"))
self.beamlineEditorLayout.addWidget(self.beamlineEditorLabel)
self.beamlineEditorLayout_2 = QtGui.QHBoxLayout()
self.beamlineEditorLayout_2.setObjectName(_fromUtf8("beamlineEditorLayout_2"))
self.workingBeamline = dlistWidget(self.layoutWidget1)
self.workingBeamline.setObjectName(_fromUtf8("workingBeamline"))
self.beamlineEditorLayout_2.addWidget(self.workingBeamline)
self.saveBeamlineButton = QtGui.QPushButton(self.layoutWidget1)
self.saveBeamlineButton.setObjectName(_fromUtf8("saveBeamlineButton"))
self.beamlineEditorLayout_2.addWidget(self.saveBeamlineButton)
self.clearBeamlineButton = QtGui.QPushButton(self.layoutWidget1)
self.clearBeamlineButton.setObjectName(_fromUtf8("clearBeamlineButton"))
self.beamlineEditorLayout_2.addWidget(self.clearBeamlineButton)
self.beamlineEditorLayout.addLayout(self.beamlineEditorLayout_2)
self.layoutWidget2 = QtGui.QWidget(self.splitter)
self.layoutWidget2.setObjectName(_fromUtf8("layoutWidget2"))
self.graphicsLayout = QtGui.QVBoxLayout(self.layoutWidget2)
self.graphicsLayout.setMargin(0)
self.graphicsLayout.setObjectName(_fromUtf8("graphicsLayout"))
self.graphicsLabel = QtGui.QLabel(self.layoutWidget2)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.graphicsLabel.setFont(font)
self.graphicsLabel.setObjectName(_fromUtf8("graphicsLabel"))
self.graphicsLayout.addWidget(self.graphicsLabel)
self.graphicsView = beamGraphicsWindow(self.layoutWidget2)
self.graphicsView.setObjectName(_fromUtf8("graphicsView"))
self.graphicsLayout.addWidget(self.graphicsView)
self.verticalLayout_4.addWidget(self.splitter)
self.retranslateUi(RBCBT)
QtCore.QMetaObject.connectSlotsByName(RBCBT)
def retranslateUi(self, RBCBT):
RBCBT.setWindowTitle(_translate("RBCBT", "Widget", None))
self.label.setText(_translate("RBCBT", "New Beamline Elements", None))
self.elementListLabel.setText(_translate("RBCBT", "Beamline Element List", None))
self.beamlineEditorLabel.setText(_translate("RBCBT", "Beamline Editor - Drag elements here to create beamlines", None))
self.saveBeamlineButton.setText(_translate("RBCBT", "Save Beamline", None))
self.clearBeamlineButton.setText(_translate("RBCBT", "Clear Beamline", None))
self.graphicsLabel.setText(_translate("RBCBT", "Graphical Preview", None))
from cbt import beamGraphicsWindow, dlistWidget, dtreeWidget
|
apache-2.0
| -5,649,579,195,361,632,000
| 48.833333
| 127
| 0.71924
| false
| 3.867257
| false
| false
| false
|
gplib/gplib
|
gplib/apps/extauth/ldapauth.py
|
1
|
16002
|
# -*- coding: utf-8 -*-
# Este archivo es parte de GPLib - http://gplib.org/
#
# GPlib es software libre desarrollado en la Facultad de Filosofía y Letras de
# la Universidad de Buenos Aires y liberado bajo los términos de la licencia
# GPLIB FILO www.gplib.org/licencia bajo los términos de GPL de GNU. Usted
# puede redistribuirlo y/o modificarlo bajo los términos de la licencia GPLIB
# FILO de GNU General Public License como esta publicado en la Free Software
# Foundation, tanto en la versión 3 de la licencia, o cualquiera de las
# versiones futuras Gplib es distribuido con el objetivo de que sea útil, pero
# SIN NINGUNA GARANTÍA DE FUNCIONAMIENTO; ni siquiera la garantía implícita de
# que sirva para un propósito particular. Cuando implemente este sistema
# sugerimos el registro en www.gplib.org/registro, con el fin de fomentar una
# comunidad de usuarios de GPLib. Ver la GNU General Public License para más
# detalles.http://www.gnu.org/licenses/>
#
#
# Este arquivo é parte do GPLib http://gplib.org/
#
# GPLib é sofware livre desenviolvido na Faculdade de Filosofia e Letras da
# Universidade de Buenos Aires e liberado sob os termos da licença GPLib FILO
# www.gplib.org/licencia/ sob os termos de GPL de GNU. Você pode redistribuí-lo
# e/ou modificá-lo sob os termos da licença pública geral GNU como publicado na
# Free Software Foundation , tanto na versão 3 da licença ou quaisquer
# versões futuras. GPLib é distribuído com o objetivo de que seja útil, mas SEM
# QUALQUER GARANTIA DE PERFORMANCE; nem a garantia implícita de que servem a uma
# finalidade específica. Quando você implementar este sistema sugerimos o
# registro em www.gplib.org/registro/, a fim de promover uma comunidade de
# usuarios do GPLib. Veja a GNU General Public License para mais detalles.
# http://www.gnu.org/licenses/
#
#
# This file is part of GPLib - http://gplib.org/
#
# GPLib is free software developed by Facultad de Filosofia y Letras Universidad
# de Buenos Aires and distributed under the scope of GPLIB FILO
# www.gplib.org/license and the GPL Public License GNU. You can redistribute it
# and/or modify it under the terms of the GPLIB FILO GNU General Public License
# as published by the Free Software Foundation, either version 3 of the License,
# or (at your option) any later version.
#
# GPLib is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. After roll your own version of GPLIB you may register
# at www.gplib.org/register to buld a comunity of users and developers. See the
# GNU General Public License for more details.
from django.conf import settings
from django.contrib.auth.models import User
import ldap
import logging
from gplib.apps.couchauth.libs import create_couch_user
class LDAPBackend(object):
"""
Authenticate a user against LDAP.
Requires python-ldap to be installed.
Requires the following things to be in settings.py:
LDAP_DEBUG -- boolean
Uses logging module for debugging messages.
LDAP_SERVER_URI -- string, ldap uri.
default: 'ldap://localhost'
LDAP_SEARCHDN -- string of the LDAP dn to use for searching
default: 'dc=localhost'
LDAP_SCOPE -- one of: ldap.SCOPE_*, used for searching
see python-ldap docs for the search function
default = ldap.SCOPE_SUBTREE
LDAP_SEARCH_FILTER -- formated string, the filter to use for searching for a
user. Used as: filterstr = LDAP_SEARCH_FILTER % username
default = 'cn=%s'
LDAP_UPDATE_FIELDS -- boolean, do we sync the db with ldap on each auth
default = True
Required unless LDAP_FULL_NAME is set:
LDAP_FIRST_NAME -- string, LDAP attribute to get the given name from
LDAP_LAST_NAME -- string, LDAP attribute to get the last name from
Optional Settings:
LDAP_FULL_NAME -- string, LDAP attribute to get name from, splits on ' '
LDAP_GID -- string, LDAP attribute to get group name/number from
LDAP_SU_GIDS -- list of strings, group names/numbers that are superusers
LDAP_STAFF_GIDS -- list of strings, group names/numbers that are staff
LDAP_EMAIL -- string, LDAP attribute to get email from
LDAP_DEFAULT_EMAIL_SUFFIX -- string, appened to username if no email found
LDAP_OPTIONS -- hash, python-ldap global options and their values
{ldap.OPT_X_TLS_CACERTDIR: '/etc/ldap/ca/'}
LDAP_ACTIVE_FIELD -- list of strings, LDAP attribute to get active status
from
LDAP_ACTIVE -- list of strings, allowed for active from LDAP_ACTIVE_FIELD
You must pick a method for determining the DN of a user and set the needed
settings:
- You can set LDAP_BINDDN and LDAP_BIND_ATTRIBUTE like:
LDAP_BINDDN = 'ou=people,dc=example,dc=com'
LDAP_BIND_ATTRIBUTE = 'uid'
and the user DN would be:
'uid=%s,ou=people,dc=example,dc=com' % username
- Look for the DN on the directory, this is what will happen if you do
not define the LDAP_BINDDN setting. In that case you may need to
define LDAP_PREBINDDN and LDAP_PREBINDPW if your LDAP server does not
allow anonymous queries. The search will be performed with the
LDAP_SEARCH_FILTER setting.
- Override the _pre_bind() method, which receives the ldap object and
the username as it's parameters and should return the DN of the user.
By inheriting this class you can change:
- How the dn to bind with is produced by overriding _pre_bind()
- What type of user object to use by overriding: _get_user_by_name(),
_create_user_object(), and get_user()
"""
import ldap
from django.conf import settings
from django.contrib.auth.models import User
def __init__(self):
self.settings = {
'LDAP_SERVER_URI': 'ldap://localhost',
'LDAP_SEARCHDN': 'dc=localhost',
'LDAP_SCOPE': ldap.SCOPE_SUBTREE,
'LDAP_SEARCH_FILTER': 'cn=%s',
'LDAP_UPDATE_FIELDS': True,
'LDAP_PREBINDDN': None,
'LDAP_PREBINDPW': None,
'LDAP_BINDDN': None,
'LDAP_BIND_ATTRIBUTE': None,
'LDAP_FIRST_NAME': None,
'LDAP_LAST_NAME': None,
'LDAP_FULL_NAME': None,
'LDAP_GID': None,
'LDAP_SU_GIDS': None,
'LDAP_STAFF_GIDS': None,
'LDAP_ACTIVE_FIELD': None,
'LDAP_ACTIVE': None,
'LDAP_EMAIL': None,
'LDAP_DEFAULT_EMAIL_SUFFIX': None,
'LDAP_OPTIONS': None,
'LDAP_DEBUG': True,
}
# Load settings from settings.py, put them on self.settings
# overriding the defaults.
for var in self.settings.iterkeys():
if hasattr(settings, var):
self.settings[var] = settings.__getattr__(var)
def authenticate(self, username=None, password=None):
# Make sure we have a user and pass
if not username and password is not None:
if self.settings['LDAP_DEBUG']:
assert False
logging.info('LDAPBackend.authenticate failed: username or password empty: %s %s' % (
username, password))
return None
if self.settings['LDAP_OPTIONS']:
for k in self.settings['LDAP_OPTIONS']:
self.ldap.set_option(k, self.settings.LDAP_OPTIONS[k])
l = self.ldap.initialize(self.settings['LDAP_SERVER_URI'])
try:
bind_string = self._pre_bind(l, username)
except:
return None
if not bind_string:
if self.settings['LDAP_DEBUG']:
logging.info('LDAPBackend.authenticate failed: _pre_bind return no bind_string (%s, %s)' % (
l, username))
return None
try:
# Try to bind as the provided user. We leave the bind until
# the end for other ldap.search_s call to work authenticated.
l.bind_s(bind_string, password)
except (self.ldap.INVALID_CREDENTIALS,
self.ldap.UNWILLING_TO_PERFORM), exc:
# Failed user/pass (or missing password)
if self.settings['LDAP_DEBUG']:
logging.info('LDAPBackend.authenticate failed: %s' % exc)
l.unbind_s()
return None
try:
user = self._get_user_by_name(username)
except User.DoesNotExist:
user = self._get_ldap_user(l, username)
if user is not None:
if self.settings['LDAP_UPDATE_FIELDS']:
self._update_user(l, user)
l.unbind_s()
if self.settings['LDAP_DEBUG']:
if user is None:
logging.info('LDAPBackend.authenticate failed: user is None')
else:
logging.info('LDAPBackend.authenticate ok: %s %s' % (user, user.__dict__))
create_couch_user(username, password)
return user
# Functions provided to override to customize to your LDAP configuration.
def _pre_bind(self, l, username):
"""
Function that returns the dn to bind against ldap with.
called as: self._pre_bind(ldapobject, username)
"""
if not self.settings['LDAP_BINDDN']:
# When the LDAP_BINDDN setting is blank we try to find the
# dn binding anonymously or using LDAP_PREBINDDN
if self.settings['LDAP_PREBINDDN']:
try:
l.simple_bind_s(self.settings['LDAP_PREBINDDN'],
self.settings['LDAP_PREBINDPW'])
except self.ldap.LDAPError, exc:
if self.settings['LDAP_DEBUG']:
logging.info('LDAPBackend _pre_bind: LDAPError : %s' % exc)
logging.info("LDAP_PREBINDDN: "+self.settings['LDAP_PREBINDDN']+" PW "+self.settings['LDAP_PREBINDPW'])
return None
# Now do the actual search
filter = self.settings['LDAP_SEARCH_FILTER'] % username
result = l.search_s(self.settings['LDAP_SEARCHDN'],
self.settings['LDAP_SCOPE'], filter, attrsonly=1)
if len(result) != 1:
if self.settings['LDAP_DEBUG']:
logging.info('LDAPBackend _pre_bind: not exactly one result: %s (%s %s %s)' % (
result, self.settings['LDAP_SEARCHDN'], self.settings['LDAP_SCOPE'], filter))
return None
return result[0][0]
else:
# LDAP_BINDDN is set so we use it as a template.
return "%s=%s,%s" % (self.settings['LDAP_BIND_ATTRIBUTE'], username,
self.settings['LDAP_BINDDN'])
def _get_user_by_name(self, username):
"""
Returns an object of contrib.auth.models.User that has a matching
username.
called as: self._get_user_by_name(username)
"""
return User.objects.get(username=username)
def _create_user_object(self, username, password):
"""
Creates and returns an object of contrib.auth.models.User.
called as: self._create_user_object(username, password)
"""
return User(username=username, password=password)
# Required for an authentication backend
def get_user(self, user_id):
try:
return User.objects.get(pk=user_id)
except:
return None
# End of functions to override
def _get_ldap_user(self, l, username):
"""
Helper method, makes a user object and call update_user to populate
"""
# Generate a random password string.
password = User.objects.make_random_password(10)
user = self._create_user_object(username, password)
return user
def _update_user(self, l, user):
"""
Helper method, populates a user object with various attributes from
LDAP.
"""
username = user.username
filter = self.settings['LDAP_SEARCH_FILTER'] % username
# Get results of search and make sure something was found.
# At this point this shouldn't fail.
hold = l.search_s(self.settings['LDAP_SEARCHDN'],
self.settings['LDAP_SCOPE'], filter)
if len(hold) < 1:
raise AssertionError('No results found with: %s' % (filter))
dn = hold[0][0]
attrs = hold[0][1]
firstn = self.settings['LDAP_FIRST_NAME'] or None
lastn = self.settings['LDAP_LAST_NAME'] or None
emailf = self.settings['LDAP_EMAIL'] or None
if firstn:
if firstn in attrs:
user.first_name = attrs[firstn][0]
else:
raise NameError('Missing attribute: %s in result for %s'
% (firstn, dn))
if lastn:
if lastn in attrs:
user.last_name = attrs[lastn][0]
else:
raise NameError('Missing attribute: %s in result for %s'
% (lastn, dn))
if not firstn and not lastn and self.settings['LDAP_FULL_NAME']:
fulln = self.settings['LDAP_FULL_NAME']
if fulln in attrs:
tmp = attrs[fulln][0]
user.first_name = tmp.split(' ')[0]
user.last_name = ' '.join(tmp.split(' ')[1:])
else:
raise NameError('Missing attribute: %s in result for %s'
% (fulln, dn))
if emailf and emailf in attrs:
user.email = attrs[emailf][0]
elif self.settings['LDAP_DEFAULT_EMAIL_SUFFIX']:
user.email = username + self.settings['LDAP_DEFAULT_EMAIL_SUFFIX']
# Check if we are mapping an ldap id to check if the user is staff or super
# Other wise the user is created but not give access
if ('LDAP_GID' in self.settings
and self.settings['LDAP_GID'] in attrs):
# Turn off access flags
user.is_superuser = False
user.is_staff = False
check_staff_flag = True
gids = set(attrs[self.settings['LDAP_GID']])
# Check to see if we are mapping any super users
if 'LDAP_SU_GIDS' in self.settings:
su_gids = set(self.settings['LDAP_SU_GIDS'])
# If any of the su_gids exist in the gid_data then the user is super
if (len(gids-su_gids) < len(gids)):
user.is_superuser = True
user.is_staff = True
# No need to check if a staff user
check_staff_flag = False
# Check for staff user?
if 'LDAP_STAFF_GIDS' in self.settings and check_staff_flag == True:
# We are checking to see if the user is staff
staff_gids = set(self.settings['LDAP_STAFF_GIDS'])
if (len(gids-staff_gids) < len(gids)):
user.is_staff = True
# Check if we need to see if a user is active
if ('LDAP_ACTIVE_FIELD' in self.settings
and self.settings['LDAP_ACTIVE_FIELD']):
user.is_active = False
if (self.settings.LDAP_ACTIVE_FIELD in attrs
and 'LDAP_ACTIVE' in self.settings):
active_data = set(attrs[self.settings['LDAP_ACTIVE_FIELD']])
active_flags = set(self.settings.LDAP_ACTIVE)
# if any of the active flags exist in the active data then
# the user is active
if (len(active_data-active_flags) < len(active_data)):
user.is_active = True
else:
# LDAP_ACTIVE_FIELD not defined, all users are active
user.is_active = True
user.save()
|
gpl-3.0
| 2,061,814,885,940,155,600
| 42.407609
| 127
| 0.605922
| false
| 3.890404
| false
| false
| false
|
DimaWittmann/Regenschirm
|
regenschirm/settings.py
|
1
|
2379
|
"""
Django settings for regenschirm project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'fvt7qozy)2mgo!^gxlln-sx#*-absdfoe0_gqtryvvs_lc6l#$'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'regenschirm.urls'
WSGI_APPLICATION = 'regenschirm.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'database',
'USER': 'wittmann',
'PASSWORD': 'Dima-1993',
'HOST': 'localhost',
'PORT': '',
}
}
import dj_database_url
DATABASES['default'] = dj_database_url.config()
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_ROOT = 'staticfiles'
STATIC_URL = '/static/'
STATICFILES_DIRS = (os.path.join(BASE_DIR, 'static'), )
TEMPLATE_DIRS = [os.path.join(BASE_DIR, 'templates')]
|
gpl-2.0
| -3,644,200,480,265,471,000
| 24.042105
| 71
| 0.711223
| false
| 3.263374
| false
| false
| false
|
nguyenkims/projecteuler-python
|
src/p85.py
|
1
|
1049
|
def f(m,n):
'''return the number of rectangles that a m x n contains'''
s=0
for a in range(1,m+1):
for b in range(1,n+1):
s+= (m-a+1)*(n-b+1)
return s
print f(1,1),f(2,4), f(3,3)
def g(m,n):
''' the same as f(m,n) except g(m,n) is calculated recursively'''
if m==0:
return 0
elif m == 1 :
return n * (n+1) /2
else:
return 2* g(m-1,n) - g(m-2,n) + n*(n+1)/2
print g(1,1), g(2,1), g(2,3), g(3,3)
limit = 2 * 10 **6
M=200
N=2000
L={} # L contains (m,n,f(m,n))
def fillL():
for m in range(0,M):
for n in range(0,N):
if m==0:
L[(m,n)]=0
elif m == 1 :
L[(m,n)] = (n * (n+1)) /2
else:
L[(m,n)] = 2* L[(m-1,n)] - L[(m-2,n)] + n*(n+1)/2
fillL()
print 'L is filled'
# print L[(3,3)], L[(2,3)], L[(100,100)], L[(20,200)] , L[(672,854)]
def main() :
mimumum = 10 ** 6
for m in range(1,M):
for n in range(1, N):
if m*n + n*(n+1) + m*(m+1)> 3*limit:
pass
else:
t = L[(m,n)]
# t= g(m,n)
if abs(t - limit) < mimumum:
mimumum = abs(t - limit)
print m,n,t, m*n
main()
|
mit
| -4,020,211,150,527,830,000
| 20.428571
| 68
| 0.481411
| false
| 1.924771
| false
| false
| false
|
mindbody/API-Examples
|
SDKs/Python/swagger_client/models/add_client_response.py
|
1
|
3305
|
# coding: utf-8
"""
MINDBODY Public API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: v6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from swagger_client.models.client import Client # noqa: F401,E501
class AddClientResponse(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'client': 'Client'
}
attribute_map = {
'client': 'Client'
}
def __init__(self, client=None): # noqa: E501
"""AddClientResponse - a model defined in Swagger""" # noqa: E501
self._client = None
self.discriminator = None
if client is not None:
self.client = client
@property
def client(self):
"""Gets the client of this AddClientResponse. # noqa: E501
Contains information about the client. # noqa: E501
:return: The client of this AddClientResponse. # noqa: E501
:rtype: Client
"""
return self._client
@client.setter
def client(self, client):
"""Sets the client of this AddClientResponse.
Contains information about the client. # noqa: E501
:param client: The client of this AddClientResponse. # noqa: E501
:type: Client
"""
self._client = client
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(AddClientResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AddClientResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
bsd-2-clause
| 4,975,355,453,291,282,000
| 26.773109
| 119
| 0.554614
| false
| 4.270026
| false
| false
| false
|
jabelone/Unearthed2017
|
tensorflowshit/helloworld.py
|
1
|
3595
|
'''
HelloWorld example using TensorFlow library.
Author: Aymeric Damien
Project: https://github.com/aymericdamien/TensorFlow-Examples/
'''
from __future__ import print_function
import tensorflow as tf
import csv
import time
def getNetGraph(X, h1size):
with tf.name_scope('hidden'):
weights = tf.Variable(tf.random_normal([tf.size(X), h1size]), name='weights')
biases = tf.Variable(tf.zeros([h1size], tf.float32), name='biases')
hidden1 = tf.nn.relu(tf.matmul(X, weights)) + biases
with tf.name_scope('output'):
weights = tf.Variable(tf.random_normal([h1size, 1]), name='weights')
# weights = tf.Print(weights, [weights])
bias = tf.Variable(0.00, tf.float32, name='bias')
output = tf.matmul(hidden1, weights) + bias
return output
def loss(X, target):
#abs loss
return tf.abs(X - target)
def pruneRow(row, columnIndexes, targetColIndex):
prunedRow = [0 if row[index] == 'NULL' else row[index] for index in columnIndexes]
return (prunedRow, row[targetColIndex])
featuresColNames = ['Casing Pressure',
'Gas Flow (Volume)',
'Motor Speed',
'Motor Torque',
'Pump Speed Actual',
'Tubing Flow Meter',
'Tubing Pressure',
'Water Flow Mag from Separator']
targetColName = 'Downhole Gauge Pressure'
with open('D:/unearthed/Bottom Hole Pressure and Fluid Level Challenge/Data/Well1B3mths.csv',
newline='') as csvFile:
csvReader = csv.reader(csvFile)
allColNames = next(csvReader)
featuresColIndexes = [allColNames.index(name) for name in featuresColNames]
targetColIndex = allColNames.index(targetColName)
print("feature column indexes", featuresColIndexes)
print("target column index", targetColIndex)
learning_rate = 0.00005
learning_iterations = 100
hiddenLayerSize = 8
# trainingSet = [pruneRow(next(csvReader), featuresColIndexes, targetColIndex)
# for i in range(100)]
trainX = [[1,2,3,4,5,6,7,8]]
target = [[30]]
tf.set_random_seed(time.time())
targetPlaceholder = tf.placeholder(tf.float32, shape=[1,1], name='phTarget')
inputPlaceholder = tf.placeholder(tf.float32, shape = [1,len(featuresColIndexes)], name='phIn')
netGraph = getNetGraph(inputPlaceholder, hiddenLayerSize)
lossVal = loss(netGraph, targetPlaceholder)
trainOp = tf.train.GradientDescentOptimizer(learning_rate).minimize(lossVal)
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init, feed_dict={inputPlaceholder: trainX, targetPlaceholder: target})
testSet = [next(csvReader) for i in range(50)]
x = 0
for line in csvReader:
x = x + 1
if x > 5000: break
pruned = pruneRow(line, featuresColIndexes, targetColIndex)
# print("Train row " + str(i) + ":", pruned)
# for epoch in range(learning_iterations):
sess.run(trainOp, feed_dict={inputPlaceholder: [pruned[0]],
targetPlaceholder: [[pruned[1]]]})
# print(sess.run(lossVal, feed_dict={inputPlaceholder: [pruned[0]],
# targetPlaceholder: [[pruned[1]]]}))
for i in range(len(testSet)):
testRow = pruneRow(testSet[i], featuresColIndexes, targetColIndex)
print ("Test Row " + str(i) + ":", testRow[1])
print(sess.run(netGraph, feed_dict={inputPlaceholder: [testRow[0]]}))
sess.close()
|
gpl-3.0
| 6,357,136,693,624,156,000
| 32.598131
| 99
| 0.625035
| false
| 3.562934
| true
| false
| false
|
smattis/BET-1
|
examples/FEniCS/BET_multiple_serial_models_script.py
|
1
|
5328
|
#! /usr/bin/env python
# Copyright (C) 2014-2019 The BET Development Team
r"""
This example requires the following external packages not shipped
with BET:
(1) An installation of FEniCS that can be run using the same
python as used for installing BET. See http://fenicsproject.org/
for more information.
(2) A copy of Launcher. See https://github.com/TACC/launcher for
more information. The user needs to set certain environment
variables inside of lbModel.py for this to run. See lbModel.py
for more information.
This example generates samples for a KL expansion associated with
a covariance defined by ``cov`` in computeSaveKL.py on an L-shaped mesh
that defines the permeability field for a Poisson equation solved in
myModel.py.
The quantities of interest (QoI) are defined as two spatial
averages of the solution to the PDE.
The user defines the dimension of the parameter space (corresponding
to the number of KL terms) and the number of samples in this space.
"""
import numpy as np
import bet.calculateP.simpleFunP as simpleFunP
import bet.calculateP.calculateP as calculateP
import bet.postProcess.plotP as plotP
import bet.postProcess.plotDomains as plotD
import bet.sample as samp
import bet.sampling.basicSampling as bsam
from lbModel import lb_model
from myModel import my_model
from Compute_Save_KL import computeSaveKL
# Interface BET to the model.
sampler = bsam.sampler(lb_model)
# Define the number of KL terms to use to represent permeability field
num_KL_terms = 2
# Compute and save the KL expansion -- can comment out after running once
computeSaveKL(num_KL_terms)
# Initialize input parameter sample set object
input_samples = samp.sample_set(num_KL_terms)
# Set parameter domain
KL_term_min = -3.0
KL_term_max = 3.0
input_samples.set_domain(np.repeat([[KL_term_min, KL_term_max]],
num_KL_terms,
axis=0))
'''
Suggested changes for user:
Try with and without random sampling.
If using regular sampling, try different numbers of samples
per dimension (be careful in the dimension is not 2).
'''
# Generate samples on the parameter space
randomSampling = False
if randomSampling is True:
input_samples = sampler.random_sample_set(
'random', input_samples, num_samples=1E2)
else:
input_samples = sampler.regular_sample_set(
input_samples, num_samples_per_dim=[10, 10])
'''
A standard Monte Carlo (MC) assumption is that every Voronoi cell
has the same volume. If a regular grid of samples was used, then
the standard MC assumption is true.
'''
MC_assumption = True
# Estimate volumes of Voronoi cells associated with the parameter samples
if MC_assumption is False:
input_samples.estimate_volume(n_mc_points=1E5)
else:
input_samples.estimate_volume_mc()
# Create the discretization object using the input samples
my_discretization = sampler.compute_QoI_and_create_discretization(
input_samples, savefile='FEniCS_Example.txt.gz')
'''
Suggested changes for user:
Try different reference parameters.
'''
# Define the reference parameter
#param_ref = np.zeros((1,num_KL_terms))
param_ref = np.ones((1, num_KL_terms))
# Compute the reference QoI
Q_ref = my_model(param_ref)
# Create some plots of input and output discretizations
plotD.scatter_2D(input_samples, ref_sample=param_ref[0, :],
filename='FEniCS_ParameterSamples.eps')
if Q_ref.size == 2:
plotD.show_data_domain_2D(my_discretization, Q_ref=Q_ref[0, :],
file_extension="eps")
'''
Suggested changes for user:
Try different ways of discretizing the probability measure on D defined
as a uniform probability measure on a rectangle or interval depending
on choice of QoI_num in myModel.py.
'''
randomDataDiscretization = False
if randomDataDiscretization is False:
simpleFunP.regular_partition_uniform_distribution_rectangle_scaled(
data_set=my_discretization, Q_ref=Q_ref[0, :], rect_scale=0.1,
cells_per_dimension=3)
else:
simpleFunP.uniform_partition_uniform_distribution_rectangle_scaled(
data_set=my_discretization, Q_ref=Q_ref[0, :], rect_scale=0.1,
M=50, num_d_emulate=1E5)
# calculate probabilities
calculateP.prob(my_discretization)
########################################
# Post-process the results
########################################
# calculate 2d marginal probs
(bins, marginals2D) = plotP.calculate_2D_marginal_probs(input_samples,
nbins=20)
# smooth 2d marginals probs (optional)
marginals2D = plotP.smooth_marginals_2D(marginals2D, bins, sigma=0.5)
# plot 2d marginals probs
plotP.plot_2D_marginal_probs(marginals2D, bins, input_samples, filename="FEniCS",
lam_ref=param_ref[0, :], file_extension=".eps",
plot_surface=False)
# calculate 1d marginal probs
(bins, marginals1D) = plotP.calculate_1D_marginal_probs(input_samples,
nbins=20)
# smooth 1d marginal probs (optional)
marginals1D = plotP.smooth_marginals_1D(marginals1D, bins, sigma=0.5)
# plot 2d marginal probs
plotP.plot_1D_marginal_probs(marginals1D, bins, input_samples, filename="FEniCS",
lam_ref=param_ref[0, :], file_extension=".eps")
|
gpl-3.0
| 435,122,958,647,670,100
| 34.052632
| 81
| 0.70458
| false
| 3.480078
| false
| false
| false
|
fengbeihong/tempest_automate_ironic
|
tempest/services/baremetal/v1/json/baremetal_client.py
|
1
|
15425
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.services.baremetal import base
from tempest.common import waiters
class BaremetalClientJSON(base.BaremetalClient):
"""
Base Tempest REST client for Ironic API v1.
"""
version = '1'
uri_prefix = 'v1'
@base.handle_errors
def list_nodes(self, **kwargs):
"""List all existing nodes."""
return self._list_request('nodes', **kwargs)
@base.handle_errors
def list_chassis(self):
"""List all existing chassis."""
return self._list_request('chassis')
@base.handle_errors
def list_chassis_nodes(self, chassis_uuid):
"""List all nodes associated with a chassis."""
return self._list_request('/chassis/%s/nodes' % chassis_uuid)
@base.handle_errors
def list_ports(self, **kwargs):
"""List all existing ports."""
return self._list_request('ports', **kwargs)
@base.handle_errors
def list_node_ports(self, uuid):
"""List all ports associated with the node."""
return self._list_request('/nodes/%s/ports' % uuid)
@base.handle_errors
def list_nodestates(self, uuid):
"""List all existing states."""
return self._list_request('/nodes/%s/states' % uuid)
@base.handle_errors
def list_ports_detail(self, **kwargs):
"""Details list all existing ports."""
return self._list_request('/ports/detail', **kwargs)
@base.handle_errors
def list_drivers(self):
"""List all existing drivers."""
return self._list_request('drivers')
@base.handle_errors
def show_node(self, uuid):
"""
Gets a specific node.
:param uuid: Unique identifier of the node in UUID format.
:return: Serialized node as a dictionary.
"""
return self._show_request('nodes', uuid)
@base.handle_errors
def show_node_by_instance_uuid(self, instance_uuid):
"""
Gets a node associated with given instance uuid.
:param uuid: Unique identifier of the node in UUID format.
:return: Serialized node as a dictionary.
"""
uri = '/nodes/detail?instance_uuid=%s' % instance_uuid
return self._show_request('nodes',
uuid=None,
uri=uri)
@base.handle_errors
def show_chassis(self, uuid):
"""
Gets a specific chassis.
:param uuid: Unique identifier of the chassis in UUID format.
:return: Serialized chassis as a dictionary.
"""
return self._show_request('chassis', uuid)
@base.handle_errors
def show_port(self, uuid):
"""
Gets a specific port.
:param uuid: Unique identifier of the port in UUID format.
:return: Serialized port as a dictionary.
"""
return self._show_request('ports', uuid)
@base.handle_errors
def show_port_by_address(self, address):
"""
Gets a specific port by address.
:param address: MAC address of the port.
:return: Serialized port as a dictionary.
"""
uri = '/ports/detail?address=%s' % address
return self._show_request('ports', uuid=None, uri=uri)
def show_driver(self, driver_name):
"""
Gets a specific driver.
:param driver_name: Name of driver.
:return: Serialized driver as a dictionary.
"""
return self._show_request('drivers', driver_name)
@base.handle_errors
def create_node(self, chassis_id=None, **kwargs):
"""
Create a baremetal node with the specified parameters.
:param cpu_arch: CPU architecture of the node. Default: x86_64.
:param cpus: Number of CPUs. Default: 8.
:param local_gb: Disk size. Default: 1024.
:param memory_mb: Available RAM. Default: 4096.
:param driver: Driver name. Default: "fake"
:return: A tuple with the server response and the created node.
"""
node = {'chassis_uuid': chassis_id,
'properties': {'cpu_arch': kwargs.get('cpu_arch', 'x86_64'),
'cpus': kwargs.get('cpus', 8),
'local_gb': kwargs.get('local_gb', 1024),
'memory_mb': kwargs.get('memory_mb', 4096)},
'driver': kwargs.get('driver', 'fake')}
return self._create_request('nodes', node)
@base.handle_errors
def create_chassis(self, **kwargs):
"""
Create a chassis with the specified parameters.
:param description: The description of the chassis.
Default: test-chassis
:return: A tuple with the server response and the created chassis.
"""
chassis = {'description': kwargs.get('description', 'test-chassis')}
return self._create_request('chassis', chassis)
@base.handle_errors
def create_port(self, node_id, **kwargs):
"""
Create a port with the specified parameters.
:param node_id: The ID of the node which owns the port.
:param address: MAC address of the port.
:param extra: Meta data of the port. Default: {'foo': 'bar'}.
:param uuid: UUID of the port.
:return: A tuple with the server response and the created port.
"""
port = {'extra': kwargs.get('extra', {'foo': 'bar'}),
'uuid': kwargs['uuid']}
if node_id is not None:
port['node_uuid'] = node_id
if kwargs['address'] is not None:
port['address'] = kwargs['address']
return self._create_request('ports', port)
@base.handle_errors
def delete_node(self, uuid):
"""
Deletes a node having the specified UUID.
:param uuid: The unique identifier of the node.
:return: A tuple with the server response and the response body.
"""
return self._delete_request('nodes', uuid)
@base.handle_errors
def delete_chassis(self, uuid):
"""
Deletes a chassis having the specified UUID.
:param uuid: The unique identifier of the chassis.
:return: A tuple with the server response and the response body.
"""
return self._delete_request('chassis', uuid)
@base.handle_errors
def delete_port(self, uuid):
"""
Deletes a port having the specified UUID.
:param uuid: The unique identifier of the port.
:return: A tuple with the server response and the response body.
"""
return self._delete_request('ports', uuid)
@base.handle_errors
def update_node(self, uuid, **kwargs):
"""
Update the specified node.
:param uuid: The unique identifier of the node.
:return: A tuple with the server response and the updated node.
"""
node_attributes = ('properties/cpu_arch',
'properties/cpus',
'properties/local_gb',
'properties/memory_mb',
'driver',
'instance_uuid')
patch = self._make_patch(node_attributes, **kwargs)
return self._patch_request('nodes', uuid, patch)
@base.handle_errors
def update_chassis(self, uuid, **kwargs):
"""
Update the specified chassis.
:param uuid: The unique identifier of the chassis.
:return: A tuple with the server response and the updated chassis.
"""
chassis_attributes = ('description',)
patch = self._make_patch(chassis_attributes, **kwargs)
return self._patch_request('chassis', uuid, patch)
@base.handle_errors
def update_port(self, uuid, patch):
"""
Update the specified port.
:param uuid: The unique identifier of the port.
:param patch: List of dicts representing json patches.
:return: A tuple with the server response and the updated port.
"""
return self._patch_request('ports', uuid, patch)
@base.handle_errors
def set_node_power_state(self, node_uuid, state):
"""
Set power state of the specified node.
:param node_uuid: The unique identifier of the node.
:state: desired state to set (on/off/reboot).
"""
target = {'target': state}
return self._put_request('nodes/%s/states/power' % node_uuid,
target)
@base.handle_errors
def validate_driver_interface(self, node_uuid):
"""
Get all driver interfaces of a specific node.
:param uuid: Unique identifier of the node in UUID format.
"""
uri = '{pref}/{res}/{uuid}/{postf}'.format(pref=self.uri_prefix,
res='nodes',
uuid=node_uuid,
postf='validate')
return self._show_request('nodes', node_uuid, uri=uri)
@base.handle_errors
def set_node_boot_device(self, node_uuid, boot_device, persistent=False):
"""
Set the boot device of the specified node.
:param node_uuid: The unique identifier of the node.
:param boot_device: The boot device name.
:param persistent: Boolean value. True if the boot device will
persist to all future boots, False if not.
Default: False.
"""
request = {'boot_device': boot_device, 'persistent': persistent}
resp, body = self._put_request('nodes/%s/management/boot_device' %
node_uuid, request)
self.expected_success(204, resp.status)
return body
@base.handle_errors
def get_node_boot_device(self, node_uuid):
"""
Get the current boot device of the specified node.
:param node_uuid: The unique identifier of the node.
"""
path = 'nodes/%s/management/boot_device' % node_uuid
resp, body = self._list_request(path)
self.expected_success(200, resp.status)
return body
@base.handle_errors
def get_node_supported_boot_devices(self, node_uuid):
"""
Get the supported boot devices of the specified node.
:param node_uuid: The unique identifier of the node.
"""
path = 'nodes/%s/management/boot_device/supported' % node_uuid
resp, body = self._list_request(path)
self.expected_success(200, resp.status)
return body
@base.handle_errors
def get_console(self, node_uuid):
"""
Get connection information about the console.
:param node_uuid: Unique identifier of the node in UUID format.
"""
resp, body = self._show_request('nodes/states/console', node_uuid)
self.expected_success(200, resp.status)
return resp, body
@base.handle_errors
def set_console_mode(self, node_uuid, enabled):
"""
Start and stop the node console.
:param node_uuid: Unique identifier of the node in UUID format.
:param enabled: Boolean value; whether to enable or disable the
console.
"""
enabled = {'enabled': enabled}
resp, body = self._put_request('nodes/%s/states/console' % node_uuid,
enabled)
self.expected_success(202, resp.status)
return resp, body
@base.handle_errors
def create_node_advanced(self, driver, properties, driver_info, **kwargs):
"""
Create a baremetal node with the specified parameters.
:param driver: Driver name. Default: "fake"
:param properties
:param driver_info
:return: A tuple with the server response and the created node.
"""
node = {
'driver': driver,
'properties': properties,
'driver_info': driver_info
}
extra = kwargs.get('extra', None)
if extra is not None:
node['extra'] = extra
return self._create_request('nodes', node)
@base.handle_errors
def create_port_advanced(self, node_id, **kwargs):
"""
Create a port with the specified parameters.
:param node_id: The ID of the node which owns the port.
:param address: MAC address of the port.
:param extra: Meta data of the port. Default: {'foo': 'bar'}.
:param uuid: UUID of the port.
:return: A tuple with the server response and the created port.
"""
port = {'extra': kwargs.get('extra', {'foo': 'bar'}),
'uuid': kwargs['uuid']}
if node_id is not None:
port['node_uuid'] = node_id
if kwargs['address'] is not None:
port['address'] = kwargs['address']
return self._create_request('ports', port)
@base.handle_errors
def update_node_advanced(self, uuid, **kwargs):
"""
Update the specified node.
:param uuid: The unique identifier of the node.
:return: A tuple with the server response and the updated node.
"""
node_attributes = ('properties/capabilities',
)
patch = self._make_patch(node_attributes, **kwargs)
return self._patch_request('nodes', uuid, patch)
@base.handle_errors
def set_node_provision_state(self, node_uuid, state, optional=None):
"""
Set power state of the specified node.
:param node_uuid: The unique identifier of the node.
:param state: desired state to set
(active/deleted/rebuild/inspect/provide/manage/abort).
:param optional: configdrive or clean steps
"""
resp, versions = self.get_api_description()
target = {'target': state}
if optional is not None:
target.update(optional)
headers = {
'X-OpenStack-Ironic-API-Version': versions['default_version']['version']
}
return self._put_request('nodes/%s/states/provision' % node_uuid,
target, headers, extra_headers=True)
@base.handle_errors
def set_node_raid_config(self, node_uuid, target_raid_config):
"""
Set raid configuration of the node
:param node_uuid: The unique identifier of the node.
:param target_raid_config: raid configuration.
"""
resp, versions = self.get_api_description()
headers = {
'X-OpenStack-Ironic-API-Version': versions['default_version']['version']
}
return self._put_request('nodes/%s/states/raid' % node_uuid,
target_raid_config, headers, extra_headers=True)
|
apache-2.0
| -6,374,396,648,853,870,000
| 31.610994
| 84
| 0.580097
| false
| 4.338959
| true
| false
| false
|
anchore/anchore-engine
|
tests/unit/anchore_engine/apis/test_oauth.py
|
1
|
6264
|
import pytest
import time
from anchore_engine.apis.oauth import merge_client_metadata
from anchore_engine.apis.oauth import (
setup_oauth_client,
OAuth2Client,
CLIENT_GRANT_KEY,
ANONYMOUS_CLIENT_ID,
)
@pytest.mark.parametrize(
"existing_metadata, meta_to_add, expected_output",
[
(
{"grant_types": []},
{"grant_types": ["password"]},
{"grant_types": ["password"]},
),
(
{"grant_types": ["password"]},
{"grant_types": ["password"]},
{"grant_types": ["password"]},
),
(
{"grant_types": ["password"]},
{"grant_types": []},
{"grant_types": ["password"]},
),
(
{"grant_types": ["password"]},
{"grant_types": ["password", "bearer"]},
{"grant_types": ["password", "bearer"]},
),
(
{"grant_types": ["password", "foobar"]},
{"grant_types": ["password", "bearer"]},
{"grant_types": ["password", "bearer", "foobar"]},
),
(
{},
{"grant_types": ["password"]},
{"grant_types": ["password"]},
),
(
{},
{"grant_types": []},
{"grant_types": []},
),
(
None,
{"grant_types": []},
{"grant_types": []},
),
(
None,
{"grant_types": ["password"]},
{"grant_types": ["password"]},
),
],
)
def test_merge_client_metadata(existing_metadata, meta_to_add, expected_output):
"""
Unit test for merging client metadata records for the OAuth2Client
:param existing_metadata:
:param meta_to_add:
:param expected_output:
:return:
"""
merged = merge_client_metadata(existing_metadata, meta_to_add)
check_metadata(merged, expected_output)
def check_metadata(candidate: dict, expected: dict):
for k, v in expected.items():
if type(v) == list:
assert sorted(candidate.get(k)) == sorted(v)
else:
assert (
candidate.get(k) == v
), "Key {} from candidate {} did not match expected {}".format(
k, candidate, v
)
def password_oauth2_client():
c = OAuth2Client()
c.client_id = ANONYMOUS_CLIENT_ID
c.user_id = None
c.client_secret = None
# These are no-ops effectively since the client isn't authenticated itself
c.client_id_issued_at = time.time() - 100
c.client_secret_expires_at = time.time() + 1000
c.set_client_metadata(
{
"token_endpoint_auth_method": "none", # This should be a function of the grant type input but all of our types are this currently
"client_name": ANONYMOUS_CLIENT_ID,
"grant_types": ["password"],
}
)
return c
def legacy_password_oauth2_client():
c = OAuth2Client()
c.client_id = ANONYMOUS_CLIENT_ID
c.user_id = None
c.client_secret = None
# These are no-ops effectively since the client isn't authenticated itself
c.client_id_issued_at = time.time() - 100
c.client_secret_expires_at = time.time() + 1000
c.set_client_metadata(
{
"grant_types": ["password"],
}
)
return c
def no_metadata_oauth2_client():
c = OAuth2Client()
c.client_id = ANONYMOUS_CLIENT_ID
c.user_id = None
c.client_secret = None
# These are no-ops effectively since the client isn't authenticated itself
c.client_id_issued_at = time.time() - 100
c.client_secret_expires_at = time.time() + 1000
return c
def empty_metadata_oauth2_client():
c = OAuth2Client()
c.client_id = ANONYMOUS_CLIENT_ID
c.user_id = None
c.client_secret = None
# These are no-ops effectively since the client isn't authenticated itself
c.client_id_issued_at = time.time() - 100
c.client_secret_expires_at = time.time() + 1000
c.set_client_metadata({})
return c
def authorization_oauth2_client():
c = OAuth2Client()
c.client_id = ANONYMOUS_CLIENT_ID
c.user_id = None
c.client_secret = None
c.client_id_issued_at = time.time() - 100
c.client_secret_expires_at = time.time() + 1000
c.set_client_metadata(
{
"token_endpoint_auth_method": "none", # This should be a function of the grant type input but all of our types are this currently
"client_name": ANONYMOUS_CLIENT_ID,
"grant_types": ["authorization"],
}
)
return c
def combined_oauth2_client():
c = OAuth2Client()
c.client_id = ANONYMOUS_CLIENT_ID
c.user_id = None
c.client_secret = None
c.client_id_issued_at = time.time() - 100
c.client_secret_expires_at = time.time() + 1000
c.set_client_metadata(
{
"token_endpoint_auth_method": "none", # This should be a function of the grant type input but all of our types are this currently
"client_name": ANONYMOUS_CLIENT_ID,
"grant_types": ["authorization", "password"],
}
)
return c
@pytest.mark.parametrize(
"found_client, add_client, expected_result",
[
(
password_oauth2_client(),
authorization_oauth2_client(),
combined_oauth2_client(),
),
(
legacy_password_oauth2_client(),
authorization_oauth2_client(),
combined_oauth2_client(),
),
(
no_metadata_oauth2_client(),
authorization_oauth2_client(),
authorization_oauth2_client(),
),
(
empty_metadata_oauth2_client(),
authorization_oauth2_client(),
authorization_oauth2_client(),
),
],
)
def test_setup_oauth_client(found_client, add_client, expected_result):
"""
:param found_client:
:param add_client:
:param expected_result:
:return:
"""
assert found_client.client_id == expected_result.client_id
result = setup_oauth_client(found_client, add_client)
assert result is not None
check_metadata(
result.client_metadata,
expected_result.client_metadata,
)
|
apache-2.0
| -2,403,788,572,274,175,000
| 27.472727
| 142
| 0.554598
| false
| 3.812538
| true
| false
| false
|
Sriee/epi
|
data_structures/backtracking/combination_sums.py
|
1
|
5229
|
def combination_sum(candidates, target):
"""
Leet code. Solution -> Accepted
Run Time: 100 ms. Not optimal but this gives a template for writing backtracking
problems
Given an array without duplicates. Find the list of candidates which are equal to
the target sum. Each element can be repeated n times
Examples:
nums: [2, 3, 5] target = 8
Output should be
[
[2, 2, 2, 2],
[2, 3, 3]
[3, 5]
]
:param candidates: Given array
:param target: target sum
:return: list of candidates who sum is equal to candidates sum
"""
res = []
def dfs(candidates, target, index, path):
if target == 0:
res.append(path.copy())
return
for i in range(index, len(candidates)):
if target - candidates[i] < 0:
continue
path.append(candidates[i])
dfs(candidates, target - candidates[i], i, path)
path.pop()
dfs(candidates, target, 0, [])
return res
def combination_sum2(candidates, target):
"""
Leet code. Solution -> Accepted
Run Time: 52 ms. Optimal solution
Given an array with duplicates. Find the list of candidates which are equal to
the target sum. Each element in the output array should appear once.
Examples:
nums: [10, 1, 2, 7, 6, 1, 5] target = 8
Output should be
[
[1, 7],
[1, 2, 5],
[2, 6],
[1, 1, 6]
]
:param candidates: Given array
:param target: target sum
:return: list of candidates who sum is equal to candidates sum
"""
res = []
def dfs(target, idx, path):
if target == 0:
res.append(path)
return
for i in range(idx, len(candidates)):
if i > idx and candidates[i] == candidates[i - 1]:
continue
if target - candidates[i] < 0 or candidates[i] > target:
break
dfs(target - candidates[i], i + 1, path + [candidates[i]])
dfs(target, 0, [])
return res
def combination_sum3(k, n):
"""
Leet code. Solution -> Accepted
Run time: 36 ms Optimized. Optimal solution
Find all possible combinations of k numbers that add up to a number n, given that
only numbers from 1 to 9 can be used and each combination should be a unique set of numbers.
Examples:
k: 3 n: 7
Output should be
[
[1, 2, 4],
]
:param k: Length of combination
:param n: target sum
:return: list of candidates who sum is equal to candidates sum
"""
res, candidates = [], [i for i in range(1, 10)]
def dfs(candidates, target, path):
if target == 0 and len(path) == k:
res.append(path)
return
for i in range(len(candidates)):
if target - candidates[i] >= 0 and len(path) + 1 <= k:
dfs(candidates[i + 1:], target - candidates[i], path + [candidates[i]])
else:
break
dfs(candidates, n, [])
return res
def combination_sum_4(nums, target):
"""
Leet Code. Time Limit Exceeded
Given an integer array with all positive numbers and no duplicates, find the number
of possible combinations that add up to a positive integer target.
Example:
nums: [1, 2, 3], target: 4
Output should be 7 and the combinations are
[1, 1, 1, 1]
[1, 1, 2]
[1, 2, 1]
[1, 3]
[2, 1, 1]
[2, 2]
[3, 1]
:param nums: Given array
:param target: target sum
:return: total number of combinations that can formed equal to sum
"""
output = 0
def combination_helper(nums, target):
nonlocal output
if target == 0:
output += 1
return
for i in range(len(nums)):
if target - nums[i] < 0:
continue
combination_helper(nums, target - nums[i])
combination_helper(nums, target)
return output
def combination_sum_4_optimized(nums, target):
"""
Leet Code. Solution -> Accepted
Given an integer array with all positive numbers and no duplicates, find the number
of possible combinations that add up to a positive integer target.
Example:
nums: [1, 2, 3], target: 4
Output should be 7 and the combinations are
[1, 1, 1, 1]
[1, 1, 2]
[1, 2, 1]
[1, 3]
[2, 1, 1]
[2, 2]
[3, 1]
:param nums: Given array
:param target: target sum
:return: total number of combinations that can formed equal to sum
"""
nums.sort()
mem = {}
def combination_helper(target):
if target in mem:
return mem[target]
count = 0
for i in nums:
if i > target:
break
elif i == target:
count += 1
break
else:
count += combination_helper(target - i)
mem[target] = count
return count
return combination_helper(target)
print(combination_sum_4_optimized([1, 2, 3], 4))
|
gpl-3.0
| 4,889,357,968,295,173,000
| 23.665094
| 96
| 0.544081
| false
| 4.081967
| false
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.