repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
samli6479/bigdata
|
stream-processing.py
|
1
|
2559
|
# 1. read from kafka, kafka broker, kafka topic
# 2. write back to kafka, kafka broker, new kafka topic
import sys
import atexit
import logging
import json
import time
from kafka import KafkaProducer
from kafka.errors import KafkaError, KafkaTimeoutError
from pyspark import SparkContext # how to talk to spark
from pyspark.streaming import StreamingContext
from pyspark.streaming.kafka import KafkaUtils
logger_format = "%(asctime)-15s %(message)s"
logging.basicConfig(format=logger_format)
logger = logging.getLogger('stream-processing')
logger.setLevel(logging.INFO)
topic = ""
new_topic = ""
kafka_broker = ""
kafka_producer = ""
def shutdown_hook(producer):
try:
logger.info('flush pending messages to kafka')
producer.flush(10)
logger.info('finish flushing pending messages')
except kafkaError as kafka_error:
logger.warn('Failed to flush pending messages to kafka')
finally:
try:
producer.close(10)
except Exception as e:
logger.warn('Failed to clode kafka connection')
def process(timeobj, rdd):
# - calculate the average
num_of_records = rdd.count()
if num_of_records == 0:
return
price_sum = rdd.map(lambda record: float(json.loads(record[1].decode('utf-8'))[0].get('LastTradePrice'))).reduce(lambda a, b: a+b)
average = price_sum/num_of_records
logger.info('Received %d records from Kafka, average price is %f' % (num_of_records, average))
# - write back to kafka
# {timestamp, average}
data = json.dumps({
'timestamp': time.time(),
'average': average
})
kafka_producer.send(new_topic, value = data)
if __name__ == "__main__":
# kafka broker, topic,new topic and application name
if len(sys.argv) != 4:
print('Usage: stream-processing [topic] [new topic] [kafka-broker]')
exit(1)
topic, new_topic, kafka_broker = sys.argv[1:]
# -setup connection to spark cluster
# local[x] -x number of cores
sc = SparkContext("local[2]", "StockAveragePrice")
sc.setLogLevel('ERROR')
# Streaming(sc,x) - open in x seconds
ssc = StreamingContext(sc, 5)
# - create a data stream from spark
# we can add pur own kafka consumer to process but not recommanded
# due to additional layer
directKafkaStream = KafkaUtils.createDirectStream(ssc, [topic], {'metadata.broker.list':kafka_broker})
# - for each RDD, do something
# Action
directKafkaStream.foreachRDD(process)
# - instantiate kafka producer
kafka_producer = KafkaProducer(bootstrap_servers=kafka_broker)
# - setup proper shutdown hook
# Action
atexit.register(shutdown_hook, kafka_producer)
ssc.start()
ssc.awaitTermination()
|
apache-2.0
| 8,905,566,711,919,112,000
| 26.826087
| 131
| 0.729191
| false
| 3.293436
| false
| false
| false
|
t123/ReadingTool.Python
|
lib/stringutil.py
|
1
|
4798
|
import re, time, datetime
class StringUtil:
@staticmethod
def isEmpty(x):
if x is None:
return True
x = x.strip()
if len(x)==0:
return True
return False
@staticmethod
def isTrue(x):
if x is None:
return False
if isinstance(x, bool) and x==True:
return True
x = str(x).lower().strip()
if x=="1" or x=="true" or x=="yes":
return True
return False
class FilterParser():
def __init__(self, languageNames=[]):
self.languageNames = [item.lower() for item in languageNames]
self.tags = []
self.normal = []
self.special = []
self.languages = []
self.source = []
self.current = ""
self.isTag = False
self.inQuote = False
self.limit = 0
self.createdSign = ""
self.modifiedSign = ""
self.created = None
self.modified = None
def parseSource(self, string):
string = string.replace("source:", "")
self.source.append(string)
def parseTime(self, string):
string = string.lower()
string = string.replace("created:", "")
string = string.replace("modified:", "")
sign1 = string[0:1]
sign2 = string[0:2]
if sign2==">=" or sign2=="<=":
date = string[2:]
sign = sign2
elif sign1==">" or sign1=="<" or sign1=="=":
date = string[1:]
sign = sign1
else:
date = string[0:]
sign = "="
try:
if date=="today":
now = datetime.datetime.now()
date = now.strftime("%Y-%m-%d")
elif date=="yesterday":
yesterday = datetime.datetime.now() - datetime.timedelta(days=1)
date = yesterday.strftime("%Y-%m-%d")
date = time.strptime(date, "%Y-%m-%d")
created = date
if sign.startswith("<"):
created = date + 60*60*24
return (sign, time.mktime(created))
except:
pass
return None
def append(self):
if not StringUtil.isEmpty(self.current):
if self.isTag:
self.tags.append(self.current.lower())
self.current = ""
self.isTag = False
self.inQuote = False
else:
if self.current.lower() in self.languageNames:
self.languages.append(self.current)
else:
if self.current.lower().startswith("limit:"):
try:
self.limit = int(self.current[6:])
except:
self.limit = 0
elif self.current.lower().startswith("created:"):
result = self.parseTime(self.current)
if result is not None:
self.createdSign = result[0]
self.created = result[1]
elif self.current.lower().startswith("modified:"):
result = self.parseTime(self.current)
if result is not None:
self.modifiedSign = result[0]
self.modified = result[1]
elif self.current.lower().startswith("source:"):
self.source.append(self.current[7:])
else:
self.normal.append(self.current)
self.current = ""
self.isTag = False
self.inQuote = False
def filter(self, text):
if StringUtil.isEmpty(text):
return
text = text.strip()
for char in text:
if char=="#":
self.isTag = True
continue
if char=="\"":
if self.inQuote:
self.append()
self.inQuote = False
else:
self.inQuote = True
continue
if char==" ":
if self.inQuote:
self.current += char
continue
self.append()
continue
self.current += char
self.append()
|
agpl-3.0
| -973,658,565,560,457,300
| 28.9875
| 80
| 0.40892
| false
| 5.104255
| false
| false
| false
|
2027205T/tango_with_django
|
tango_with_django_project/rango/bing_search.py
|
1
|
2779
|
import json
import urllib, urllib2
import keys
# Add your BING_API_KEY
BING_API_KEY = keys.BING_API_KEY
def main():
# The main function should ask a user for a query (from the command line)
query = raw_input("Please enter a search query: ")
# and then issue the query to the BING API via the run_query method
results = run_query(query)
# and print out the top ten results returned.
print "Your results: ", results
# Print out the rank, title and URL for each result.
def run_query(search_terms):
# Specify the base
root_url = 'https://api.datamarket.azure.com/Bing/Search/'
source = 'Web'
# Specify how many results we wish to be returned per page.
# Offset specifies where in the results list to start from.
# With results_per_page = 10 and offset = 11, this would start from page 2.
results_per_page = 10
offset = 0
# Wrap quotes around our query terms as required by the Bing API.
# The query we will then use is stored within variable query.
query = "'{0}'".format(search_terms)
query = urllib.quote(query)
# Construct the latter part of our request's URL.
# Sets the format of the response to JSON and sets other properties.
search_url = "{0}{1}?$format=json&$top={2}&$skip={3}&Query={4}".format(
root_url,
source,
results_per_page,
offset,
query)
# Setup authentication with the Bing servers.
# The username MUST be a blank string, and put in your API key!
username = ''
# Create a 'password manager' which handles authentication for us.
password_mgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
password_mgr.add_password(None, search_url, username, BING_API_KEY)
# Create our results list which we'll populate.
results = []
try:
# Prepare for connecting to Bing's servers.
handler = urllib2.HTTPBasicAuthHandler(password_mgr)
opener = urllib2.build_opener(handler)
urllib2.install_opener(opener)
# Connect to the server and read the response generated.
response = urllib2.urlopen(search_url).read()
# Convert the string response to a Python dictionary object.
json_response = json.loads(response)
# Loop through each page returned, populating out results list.
for result in json_response['d']['results']:
results.append({
'title': result['Title'],
'link': result['Url'],
'summary': result['Description']})
# Catch a URLError exception - something went wrong when connecting!
except urllib2.URLError, e:
print "Error when querying the Bing API: ", e
# Return the list of results to the calling function.
return results
if __name__ == '__main__':
main()
|
mit
| 7,681,770,095,719,579,000
| 31.694118
| 79
| 0.660669
| false
| 3.981375
| false
| false
| false
|
vahndi/scitwi
|
scitwi/trends/trend.py
|
1
|
1155
|
from datetime import datetime
from typing import List
from scitwi.places.location import Location
from scitwi.utils.strs import list_obj_string, obj_string
class Trend(object):
def __init__(self, trend_dict: dict, as_of: datetime, created_at: datetime, locations: List[Location]):
self.as_of = as_of
self.created_at = created_at
self.locations = locations
self.name = trend_dict['name']
self.promoted_content = trend_dict['promoted_content']
self.query = trend_dict['query']
self.tweet_volume = trend_dict['tweet_volume']
self.url = trend_dict['url']
def __str__(self):
str_out = ''
str_out += obj_string('Name', self.name)
str_out += obj_string('Promoted Content', self.promoted_content)
str_out += obj_string('Query', self.query)
str_out += obj_string('Tweet Volume', self.tweet_volume)
str_out += obj_string('Url', self.url)
str_out += obj_string('As Of', self.url)
str_out += obj_string('Created At', self.created_at)
str_out += list_obj_string('Locations', self.locations)
return str_out
|
mit
| -5,805,980,990,255,305,000
| 32
| 107
| 0.624242
| false
| 3.5
| false
| false
| false
|
akmiller01/di-quick-vis
|
qv/core/models.py
|
1
|
1829
|
from django.db import models
from redactor.fields import RedactorField
from jsonfield import JSONField
from django.core.urlresolvers import reverse
from django.conf import settings
from django.utils.text import slugify
import datetime
from os.path import basename, splitext
class Dataset(models.Model):
name = models.CharField(max_length=255, null=True, blank=True)
slug = models.SlugField(unique=True,max_length=255, null=True, blank=True)
created = models.DateTimeField(auto_now_add=True)
file_field = models.FileField(upload_to=settings.MEDIA_ROOT+'/%Y/%m/%d')
json = JSONField(null=True,blank=True)
sheet = models.IntegerField(null=True,blank=True,default=0)
starting_row = models.IntegerField(null=True,blank=True,default=0)
xVar = models.CharField(max_length=255, null=True, blank=True, default='id')
yVar = models.CharField(max_length=255, null=True, blank=True, default='value')
timeVar = models.CharField(max_length=255, null=True, blank=True, default='year')
class Meta:
ordering = ['-created']
def __unicode__(self):
return u'%s' % self.name
def get_absolute_url(self):
return reverse('core.views.data',args=[self.slug])
def save(self, *args, **kwargs):
super(Dataset, self).save(*args, **kwargs)
date = datetime.date.today()
if self.name is None or self.name == "":
self.name = splitext(basename(self.file_field.name))[0]
self.slug = '%s-%i%i%i' % (
slugify(self.name), date.year, date.month, date.day
)
elif self.slug is None or self.slug == "":
self.slug = '%s-%i%i%i%i' % (
slugify(self.name), date.year, date.month, date.day, self.id
)
super(Dataset, self).save(*args, **kwargs)
|
gpl-2.0
| -582,374,485,033,654,700
| 41.534884
| 85
| 0.648442
| false
| 3.537718
| false
| false
| false
|
EDITD/queue_util
|
queue_util/producer.py
|
1
|
2789
|
"""
Allow the ability to connect and publish to a queue.
"""
import logging
import time
import kombu
import six
class Producer(object):
def __init__(self, dest_queue_name, rabbitmq_host, rabbitmq_port=None,
serializer=None, compression=None,
userid=None, password=None):
connect_kwargs = {}
if userid is not None:
connect_kwargs['userid'] = userid
if password is not None:
connect_kwargs['password'] = password
if rabbitmq_port is not None:
connect_kwargs['port'] = rabbitmq_port
broker = kombu.BrokerConnection(rabbitmq_host, **connect_kwargs)
self.dest_queue = broker.SimpleQueue(
dest_queue_name,
serializer=serializer,
compression=compression,
)
def put(self, item):
"""
Put one item onto the queue.
"""
self.dest_queue.put(item)
def buffered_put(self, input_iter, batch_size, resume_threshold=0.1, delay_in_seconds=5.0):
"""
Given an input iterator, keep adding batches of items to the
destination queue.
After each batch, wait for the queue size to drop to a certain level
until putting in the next batch.
(Wait until the queue size is batch_size * resume_threshold.)
Note that it isn't exact, but it will attempt to ensure that the queue
size never goes (much) beyond batch_size.
"""
num_enqueued = 0
while True:
try:
logging.debug('Starting batch (batch_size={0})'.format(batch_size))
for i in range(batch_size):
self.put(six.next(input_iter))
num_enqueued += 1
logging.debug('Batch done. {0} items enqueued so far'.format(num_enqueued))
except StopIteration:
# We're done!
#
logging.debug('Input exhausted. {0} items enqueued in total'.format(num_enqueued))
break
# After each batch, we need to pause briefly.
# Otherwise get_num_messages won't include the messages that we
# just enqueued.
#
time.sleep(delay_in_seconds)
# Now that we have completed one batch, we need to wait.
max_size = resume_threshold * batch_size
num_messages = self.dest_queue.qsize()
while num_messages >= max_size:
logging.debug(
'Current queue size = {0}, waiting until size <= {1}'.format(
num_messages, max_size,
),
)
time.sleep(delay_in_seconds)
num_messages = self.dest_queue.qsize()
|
mit
| -2,356,338,687,290,004,000
| 35.220779
| 98
| 0.556472
| false
| 4.399054
| false
| false
| false
|
wolverineav/horizon
|
openstack_dashboard/dashboards/project/networks/ports/tables.py
|
1
|
3608
|
# Copyright 2012 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django import template
from django.utils.translation import pgettext_lazy
from django.utils.translation import ugettext_lazy as _
from horizon import tables
from openstack_dashboard import api
from openstack_dashboard import policy
def get_fixed_ips(port):
template_name = 'project/networks/ports/_port_ips.html'
context = {"ips": port.fixed_ips}
return template.loader.render_to_string(template_name, context)
def get_attached(port):
if port['device_owner']:
return port['device_owner']
elif port['device_id']:
return _('Attached')
else:
return _('Detached')
class UpdatePort(policy.PolicyTargetMixin, tables.LinkAction):
name = "update"
verbose_name = _("Edit Port")
url = "horizon:project:networks:editport"
classes = ("ajax-modal",)
icon = "pencil"
policy_rules = (("network", "update_port"),)
def get_link_url(self, port):
network_id = self.table.kwargs['network_id']
return reverse(self.url, args=(network_id, port.id))
DISPLAY_CHOICES = (
("UP", pgettext_lazy("Admin state of a Port", u"UP")),
("DOWN", pgettext_lazy("Admin state of a Port", u"DOWN")),
)
STATUS_DISPLAY_CHOICES = (
("ACTIVE", pgettext_lazy("status of a network port", u"Active")),
("DOWN", pgettext_lazy("status of a network port", u"Down")),
("ERROR", pgettext_lazy("status of a network port", u"Error")),
("BUILD", pgettext_lazy("status of a network port", u"Build")),
)
class PortsTable(tables.DataTable):
name = tables.Column("name_or_id",
verbose_name=_("Name"),
link="horizon:project:networks:ports:detail")
fixed_ips = tables.Column(get_fixed_ips, verbose_name=_("Fixed IPs"))
attached = tables.Column(get_attached, verbose_name=_("Attached Device"))
status = tables.Column("status",
verbose_name=_("Status"),
display_choices=STATUS_DISPLAY_CHOICES)
admin_state = tables.Column("admin_state",
verbose_name=_("Admin State"),
display_choices=DISPLAY_CHOICES)
mac_state = tables.Column("mac_state", empty_value=api.neutron.OFF_STATE,
verbose_name=_("MAC Learning State"))
def get_object_display(self, port):
return port.id
class Meta(object):
name = "ports"
verbose_name = _("Ports")
table_actions = (tables.FilterAction,)
row_actions = (UpdatePort,)
hidden_title = False
def __init__(self, request, data=None, needs_form_wrapper=None, **kwargs):
super(PortsTable, self).__init__(request, data=data,
needs_form_wrapper=needs_form_wrapper,
**kwargs)
if not api.neutron.is_extension_supported(request, 'mac-learning'):
del self.columns['mac_state']
|
apache-2.0
| -4,700,138,844,131,522,000
| 36.195876
| 79
| 0.627494
| false
| 4.067644
| false
| false
| false
|
muneebalam/scrapenhl
|
scrapenhl/scrape_game.py
|
1
|
22209
|
import scrapenhl_globals
import os.path
def get_url(season, game):
"""
Returns the NHL API url to scrape.
Parameters
-----------
season : int
The season of the game. 2007-08 would be 2007.
game : int
The game id. This can range from 20001 to 21230 for regular season, and 30111 to 30417 for playoffs.
The preseason, all-star game, Olympics, and World Cup also have game IDs that can be provided.
Returns
--------
str
URL to scrape, http://statsapi.web.nhl.com/api/v1/game/[season]0[game]/feed/live
"""
return 'http://statsapi.web.nhl.com/api/v1/game/{0:d}0{1:d}/feed/live'.format(season, game)
def get_shift_url(season, game):
"""
Returns the NHL API shifts url to scrape.
Parameters
-----------
season : int
The season of the game. 2007-08 would be 2007.
game : int
The game id. This can range from 20001 to 21230 for regular season, and 30111 to 30417 for playoffs.
The preseason, all-star game, Olympics, and World Cup also have game IDs that can be provided.
Returns
--------
str
http://www.nhl.com/stats/rest/shiftcharts?cayenneExp=gameId=[season]0[game]
"""
return 'http://www.nhl.com/stats/rest/shiftcharts?cayenneExp=gameId={0:d}0{1:d}'.format(season, game)
def get_json_save_filename(season, game):
"""
Returns the algorithm-determined save file name of the json accessed online.
Parameters
-----------
season : int
The season of the game. 2007-08 would be 2007.
game : int
The game id. This can range from 20001 to 21230 for regular season, and 30111 to 30417 for playoffs.
The preseason, all-star game, Olympics, and World Cup also have game IDs that can be provided.
Returns
--------
str
file name, SAVE_FOLDER/Season/Game.zlib
"""
return os.path.join(scrapenhl_globals.SAVE_FOLDER, season, '{0:d}.zlib'.format(game))
def get_shift_save_filename(season, game):
"""
Returns the algorithm-determined save file name of the shift json accessed online.
Parameters
-----------
season : int
The season of the game. 2007-08 would be 2007.
game : int
The game id. This can range from 20001 to 21230 for regular season, and 30111 to 30417 for playoffs.
The preseason, all-star game, Olympics, and World Cup also have game IDs that can be provided.
Returns
--------
str
file name, SAVE_FOLDER/Season/Game_shifts.zlib
"""
return os.path.join(scrapenhl_globals.SAVE_FOLDER, season, '{0:d}_shifts.zlib'.format(game))
def get_parsed_save_filename(season, game):
"""
Returns the algorithm-determined save file name of the parsed pbp file.
Parameters
-----------
season : int
The season of the game. 2007-08 would be 2007.
game : int
The game id. This can range from 20001 to 21230 for regular season, and 30111 to 30417 for playoffs.
The preseason, all-star game, Olympics, and World Cup also have game IDs that can be provided.
Returns
--------
str
file name, SAVE_FOLDER/Season/Game_parsed.zlib
"""
return os.path.join(scrapenhl_globals.SAVE_FOLDER, season, '{0:d}_parsed.hdf5'.format(game))
def get_parsed_shifts_save_filename(season, game):
"""
Returns the algorithm-determined save file name of the parsed toi file.
Parameters
-----------
season : int
The season of the game. 2007-08 would be 2007.
game : int
The game id. This can range from 20001 to 21230 for regular season, and 30111 to 30417 for playoffs.
The preseason, all-star game, Olympics, and World Cup also have game IDs that can be provided.
Returns
--------
str
file name, SAVE_FOLDER/Season/Game_shifts_parsed.zlib
"""
return os.path.join(scrapenhl_globals.SAVE_FOLDER, season, '{0:d}_shifts_parsed.hdf5'.format(game))
def scrape_game(season, game, force_overwrite = False):
"""
Scrapes and saves game files in compressed (zlib) format
Parameters
-----------
season : int
The season of the game. 2007-08 would be 2007.
game : int
The game id. This can range from 20001 to 21230 for regular season, and 30111 to 30417 for playoffs.
The preseason, all-star game, Olympics, and World Cup also have game IDs that can be provided.
force_overwrite : bool
If True, will overwrite previously raw html files. If False, will not scrape if files already found.
Returns
-------
bool
A boolean indicating whether the NHL API was queried.
"""
query = False
import os.path
url = get_url(season, game)
filename = get_json_save_filename(season, game)
if force_overwrite or not os.path.exists(filename):
import urllib.request
try:
query = True
with urllib.request.urlopen(url) as reader:
page = reader.read()
except Exception as e:
if game < 30111:
print('Error reading pbp url for', season, game, e, e.args)
page = bytes('', encoding = 'latin-1')
if True:#game < 30111:
import zlib
page2 = zlib.compress(page, level=9)
w = open(filename, 'wb')
w.write(page2)
w.close()
url = get_shift_url(season, game)
filename = get_shift_save_filename(season, game)
if force_overwrite or not os.path.exists(filename):
import urllib.request
try:
query = True
with urllib.request.urlopen(url) as reader:
page = reader.read()
except Exception as e:
if game < 30111:
print('Error reading shift url for', season, game, e, e.args)
page = bytes('', encoding='latin-1')
if True:#game < 30111:
import zlib
page2 = zlib.compress(page, level=9)
w = open(filename, 'wb')
w.write(page2)
w.close()
return query
def parse_game(season, game, force_overwrite = False):
"""
Reads this game's zlib file from disk and parses into a friendlier format, then saves again to disk in zlib.
This method also updates the global player id and game log files, and writes any updates to disk.
Parameters
-----------
season : int
The season of the game. 2007-08 would be 2007.
game : int
The game id. This can range from 20001 to 21230 for regular season, and 30111 to 30417 for playoffs.
The preseason, all-star game, Olympics, and World Cup also have game IDs that can be provided.
force_overwrite : bool
If True, will overwrite previously raw html files. If False, will not scrape if files already found.
"""
import os.path
import zlib
import json
import pandas as pd
filename = get_parsed_save_filename(season, game)
if ((force_overwrite or not os.path.exists(filename)) and os.path.exists(get_json_save_filename(season, game))):
r = open(get_json_save_filename(season, game), 'rb')
page = r.read()
r.close()
page = zlib.decompress(page)
try:
data = json.loads(page.decode('latin-1'))
teamdata = data['liveData']['boxscore']['teams']
update_team_ids_from_json(teamdata)
update_player_ids_from_json(teamdata)
update_quick_gamelog_from_json(data)
events = read_events_from_json(data['liveData']['plays']['allPlays'])
if events is not None:
events.to_hdf(filename, key='Game{0:d}0{1:d}'.format(season, game), mode='w',
complevel=9, complib='zlib')
#pbp_compressed = zlib.compress(bytes(events, encoding = 'latin-1'), level=9)
#w = open(filename, 'wb')
#w.write(pbp_compressed)
#w.close()
except json.JSONDecodeError:
pass
filename = get_parsed_shifts_save_filename(season, game)
basic_gamelog = scrapenhl_globals.get_quick_gamelog_file()
if ((force_overwrite or not os.path.exists(filename)) and os.path.exists(get_shift_save_filename(season, game))):
r = open(get_shift_save_filename(season, game), 'rb')
page = r.read()
r.close()
page = zlib.decompress(page)
try:
data = json.loads(page.decode('latin-1'))
try:
thisgamedata = basic_gamelog.query('Season == {0:d} & Game == {1:d}'.format(season, game))
rname = thisgamedata['Away'].iloc[0]
hname = thisgamedata['Home'].iloc[0]
except Exception as e:
hname = None
rname = None
shifts = read_shifts_from_json(data['data'], hname, rname)
if shifts is not None:
#shifts = ''
#shifts_compressed = zlib.compress(shifts, level=9)
#w = open(filename, 'wb')
#w.write(shifts_compressed)
#w.close()
shifts.to_hdf(filename, key = 'Game{0:d}0{1:d}'.format(season, game), mode = 'w',
complevel = 9, complib = 'zlib')
except json.JSONDecodeError:
pass
def read_shifts_from_json(data, homename = None, roadname = None):
if len(data) == 0:
return
ids = ['' for i in range(len(data))]
periods = [0 for i in range(len(data))]
starts = ['0:00' for i in range(len(data))]
ends = ['0:00' for i in range(len(data))]
teams = ['' for i in range(len(data))]
durations = [0 for i in range(len(data))]
for i, dct in enumerate(data):
ids[i] = dct['playerId']
periods[i] = dct['period']
starts[i] = dct['startTime']
ends[i] = dct['endTime']
durations[i] = dct['duration']
teams[i] = dct['teamAbbrev']
### Seems like home players come first
if homename is None:
homename = teams[0]
for i in range(len(teams) - 1, 0, -1):
if not teams[i] == homename:
roadname = teams[i]
break
startmin = [x[:x.index(':')] for x in starts]
startsec = [x[x.index(':') + 1:] for x in starts]
starttimes = [1200 * (p-1) + 60 * int(m) + int(s) for p, m, s in zip(periods, startmin, startsec)]
endmin = [x[:x.index(':')] for x in ends]
endsec = [x[x.index(':') + 1:] for x in ends]
### There is an extra -1 in endtimes to avoid overlapping start/end
endtimes = [1200 * (p - 1) + 60 * int(m) + int(s) - 1 for p, m, s in zip(periods, endmin, endsec)]
durationtime = [e - s for s, e in zip(starttimes, endtimes)]
import pandas as pd
df = pd.DataFrame({'PlayerID': ids, 'Period': periods, 'Start': starttimes, 'End': endtimes,
'Team': teams, 'Duration': durationtime})
df.loc[df.End < df.Start, 'End'] = df.End + 1200
tempdf = df[['PlayerID', 'Start', 'End', 'Team', 'Duration']]
tempdf = tempdf.assign(Time = tempdf.Start)
#print(tempdf.head(20))
toi = pd.DataFrame({'Time': [i for i in range(0, max(df.End) + 1)]})
toidfs = []
while len(tempdf.index) > 0:
temptoi = toi.merge(tempdf, how = 'inner', on = 'Time')
toidfs.append(temptoi)
tempdf = tempdf.assign(Time = tempdf.Time + 1)
tempdf = tempdf.query('Time <= End')
tempdf = pd.concat(toidfs)
tempdf = tempdf.sort_values(by = 'Time')
### Append team name to start of columns by team
hdf = tempdf.query('Team == "' + homename + '"')
hdf2 = hdf.groupby('Time').rank()
hdf2 = hdf2.rename(columns = {'PlayerID': 'rank'})
hdf2.loc[:, 'rank'] = hdf2['rank'].apply(lambda x: int(x))
hdf.loc[:, 'rank'] = homename + hdf2['rank'].astype('str')
rdf = tempdf.query('Team == "' + roadname + '"')
rdf2 = rdf.groupby('Time').rank()
rdf2 = rdf2.rename(columns={'PlayerID': 'rank'})
rdf2.loc[:, 'rank'] = rdf2['rank'].apply(lambda x: int(x))
rdf.loc[:, 'rank'] = roadname + rdf2['rank'].astype('str')
### Occasionally bad entries make duplicates on time and rank. Take one with longer duration
tokeep = hdf.sort_values(by = 'Duration', ascending = False)
tokeep = tokeep.groupby(['Time', 'PlayerID']).first()
tokeep.reset_index(inplace = True)
hdf = hdf.merge(tokeep, how = 'inner', on = ['Time', 'PlayerID', 'Start', 'End', 'Team', 'rank'])
tokeep = rdf.sort_values(by='Duration', ascending=False)
tokeep = tokeep.groupby(['Time', 'PlayerID']).first()
tokeep.reset_index(inplace=True)
rdf = rdf.merge(tokeep, how='inner', on=['Time', 'PlayerID', 'Start', 'End', 'Team', 'rank'])
### Remove values above 6--looking like there won't be many
### TODO: keep goalie if one is a goalie!
hdf = hdf.pivot(index = 'Time', columns = 'rank', values = 'PlayerID').iloc[:, 0:6]
hdf.reset_index(inplace = True) #get time back as a column
rdf = rdf.pivot(index='Time', columns='rank', values='PlayerID').iloc[:, 0:6]
rdf.reset_index(inplace = True)
toi = toi.merge(hdf, how = 'left', on = 'Time').merge(rdf, how = 'left', on = 'Time')
return(toi)
def update_team_ids_from_json(teamdata):
import urllib.request
import json
import pandas as pd
hid = teamdata['home']['team']['id']
team_ids = scrapenhl_globals.get_team_id_file()
if hid not in team_ids.ID.values:
url = 'https://statsapi.web.nhl.com{0:s}'.format(teamdata['home']['team']['link'])
with urllib.request.urlopen(url) as reader:
page = reader.read()
teaminfo = json.loads(page.decode('latin-1'))
hid = teaminfo['teams'][0]['id']
habbrev = teaminfo['teams'][0]['abbreviation']
hname = teaminfo['teams'][0]['name']
df = pd.DataFrame({'ID': [hid], 'Abbreviation': [habbrev], 'Name': [hname]})
team_ids = pd.concat([team_ids, df])
scrapenhl_globals.write_team_id_file(team_ids)
rid = teamdata['away']['team']['id']
if rid not in team_ids.ID.values:
url = 'https://statsapi.web.nhl.com{0:s}'.format(teamdata['away']['team']['link'])
with urllib.request.urlopen(url) as reader:
page = reader.read()
teaminfo = json.loads(page.decode('latin-1'))
rid = teaminfo['teams'][0]['id']
rabbrev = teaminfo['teams'][0]['abbreviation']
rname = teaminfo['teams'][0]['name']
df = pd.DataFrame({'ID': [rid], 'Abbreviation': [rabbrev], 'Name': [rname]})
team_ids = pd.concat([team_ids, df])
scrapenhl_globals.write_team_id_file(team_ids)
def update_player_ids_from_json(teamdata):
"""
Creates a data frame of player data from current game's json[liveData][boxscore] to update player ids.
This method reads player ids, names, handedness, team, position, and number, and full joins to player ids.
If there are any changes to player ids, the dataframe gets written to disk again.
Parameters
-----------
teamdata : dict
A json dict that is the result of api_page['liveData']['boxscore']['teams']
"""
team_ids = scrapenhl_globals.get_team_id_file()
rteam = team_ids.query('ID == ' + str(teamdata['away']['team']['id']))
rabbrev = rteam['Abbreviation'].iloc[0]
hteam = team_ids.query('ID == ' + str(teamdata['home']['team']['id']))
habbrev = hteam['Abbreviation'].iloc[0]
awayplayers = teamdata['away']['players']
homeplayers = teamdata['home']['players']
numplayers = len(awayplayers) + len(homeplayers)
ids = ['' for i in range(numplayers)]
names = ['' for i in range(numplayers)]
teams = ['' for i in range(numplayers)]
positions = ['' for i in range(numplayers)]
nums = [-1 for i in range(numplayers)]
handedness = ['' for i in range(numplayers)]
for i, (pid, pdata) in enumerate(awayplayers.items()):
idnum = pid[2:]
name = pdata['person']['fullName']
try:
hand = pdata['person']['shootsCatches']
except KeyError:
hand = 'N/A'
try:
num = pdata['jerseyNumber']
if num == '':
raise KeyError
else:
num = int(num)
except KeyError:
num = -1
pos = pdata['position']['code']
ids[i] = idnum
names[i] = name
teams[i] = rabbrev
positions[i] = pos
nums[i] = num
handedness[i] = hand
for i, (pid, pdata) in enumerate(homeplayers.items()):
idnum = pid[2:]
name = pdata['person']['fullName']
try:
hand = pdata['person']['shootsCatches']
except KeyError:
hand = 'N/A'
try:
num = pdata['jerseyNumber']
if num == '':
raise KeyError
else:
num = int(num)
except KeyError:
num = -1
pos = pdata['position']['code']
ids[i + len(awayplayers)] = idnum
names[i + len(awayplayers)] = name
teams[i + len(awayplayers)] = habbrev
positions[i + len(awayplayers)] = pos
nums[i + len(awayplayers)] = num
handedness[i + len(awayplayers)] = hand
import pandas as pd
gamedf = pd.DataFrame({'ID': ids,
'Name': names,
'Team': teams,
'Pos': positions,
'#': nums,
'Hand': handedness})
gamedf['Count'] = 1
player_ids = scrapenhl_globals.get_player_id_file()
player_ids = pd.concat([player_ids, gamedf]) \
.groupby(['ID', 'Name', 'Team', 'Pos', '#', 'Hand']).sum().reset_index()
scrapenhl_globals.write_player_id_file(player_ids)
def update_quick_gamelog_from_json(data):
"""
Creates a data frame of basic game data from current game's json to update global BASIC_GAMELOG.
This method reads the season, game, date and time, venue, and team names, coaches, anc scores, joining to
BASIC_GAMELOG.
If there are any changes to BASIC_GAMELOG, the dataframe gets written to disk again.
Parameters
-----------
data : dict
The full json dict from the api_page
"""
season = int(str(data['gameData']['game']['pk'])[:4])
game = int(str(data['gameData']['game']['pk'])[4:])
datetime = data['gameData']['datetime']['dateTime']
try:
venue = data['gameData']['venue']['name']
except KeyError:
venue = 'N/A'
team_ids = scrapenhl_globals.get_team_id_file()
hname = team_ids.query('ID == ' + str(data['gameData']['teams']['home']['id']))
hname = hname['Abbreviation'].iloc[0]
rname = team_ids.query('ID == ' + str(data['gameData']['teams']['away']['id']))
rname = rname['Abbreviation'].iloc[0]
try:
hcoach = data['liveData']['boxscore']['teams']['home']['coaches'][0]['person']['fullName']
except IndexError:
hcoach = 'N/A'
try:
rcoach = data['liveData']['boxscore']['teams']['away']['coaches'][0]['person']['fullName']
except IndexError:
rcoach = 'N/A'
hscore = data['liveData']['boxscore']['teams']['home']['teamStats']['teamSkaterStats']['goals']
rscore = data['liveData']['boxscore']['teams']['away']['teamStats']['teamSkaterStats']['goals']
import pandas as pd
gamedf = pd.DataFrame({'Season': [season], 'Game': [game], 'Datetime': [datetime], 'Venue': [venue],
'Home': [hname], 'HomeCoach': [hcoach], 'HomeScore': [hscore],
'Away': [rname], 'AwayCoach': [rcoach], 'AwayScore': [rscore]})
basic_gamelog = scrapenhl_globals.get_quick_gamelog_file()
basic_gamelog = pd.concat([basic_gamelog, gamedf]).drop_duplicates()
scrapenhl_globals.write_quick_gamelog_file(basic_gamelog)
def read_events_from_json(pbp):
"""
Returns the NHL API url to scrape.
Parameters
-----------
season : int
The season of the game. 2007-08 would be 2007.
game : int
The game id. This can range from 20001 to 21230 for regular season, and 30111 to 30417 for playoffs.
The preseason, all-star game, Olympics, and World Cup also have game IDs that can be provided.
Returns
--------
pandas df
Dataframe of the game's play by play data
"""
import numpy as np
import pandas as pd
index = [i for i in range(len(pbp))]
period = [-1 for i in range(len(pbp))]
time = ['0:00' for i in range(len(pbp))]
event = ['NA' for i in range(len(pbp))]
team = [-1 for i in range(len(pbp))]
p1 = [-1 for i in range(len(pbp))]
p1role = ['' for i in range(len(pbp))]
p2 = [-1 for i in range(len(pbp))]
p2role = ['' for i in range(len(pbp))]
xy = [(np.NaN, np.NaN) for i in range(len(pbp))]
note = ['' for i in range(len(pbp))]
for i in range(len(pbp)):
period[i] = int(pbp[i]['about']['period'])
time[i] = pbp[i]['about']['periodTime']
event[i] = pbp[i]['result']['event']
try:
xy[i] = (float(pbp[i]['coordinates']['x']), float(pbp[i]['coordinates']['y']))
except KeyError:
pass
try:
team[i] = pbp[i]['team']['id']
except KeyError:
pass
try:
p1[i] = pbp[i]['players'][0]['player']['id']
p1role[i] = pbp[i]['players'][0]['playerType']
except KeyError:
pass
try:
p2[i] = pbp[i]['players'][1]['player']['id']
p2role[i] = pbp[i]['players'][1]['playerType']
except KeyError:
pass
except IndexError: #e.g. on a give or take
pass
try:
note[i] = pbp[i]['result']['description']
except KeyError:
pass
#print(period[i], time[i], event[i], xy[i], team[i], p1[i], p1role[i], p2[i], p2role[i])
pbpdf = pd.DataFrame({'Index': index, 'Period': period, 'Time': time, 'Event': event,
'Team': team, 'Actor': p1, 'ActorRole': p1role, 'Recipient': p2, 'RecipientRole': p2role,
'XY': xy, 'Note': note})
return pbpdf
|
mit
| 3,533,887,664,172,274,000
| 36.706282
| 117
| 0.578639
| false
| 3.499685
| false
| false
| false
|
b3j0f/middleware
|
setup.py
|
1
|
3807
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# --------------------------------------------------------------------
# The MIT License (MIT)
#
# Copyright (c) 2014 Jonathan Labéjof <jonathan.labejof@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# --------------------------------------------------------------------
"""b3j0f.middleware building script."""
from setuptools import setup, find_packages
from os.path import abspath, dirname, join
from re import compile as re_compile, S as re_S
NAME = 'b3j0f.middleware' # library name
NAMEPATH = NAME.replace('.', '/')
BASEPATH = dirname(abspath(__file__))
# get long description from setup directory abspath
with open(join(BASEPATH, 'README.rst')) as f:
DESC = f.read()
# Get the version - do not use normal import because it does break coverage
# thanks to the python jira project
# (https://github.com/pycontribs/jira/blob/master/setup.py)
with open(join(BASEPATH, NAMEPATH, 'version.py')) as f:
_STREAM = f.read()
_REGEX = r'.*__version__ = \'(.*?)\''
VERSION = re_compile(_REGEX, re_S).match(_STREAM).group(1)
KEYWORDS = [
'utils', 'middleware', 'API', 'tools', 'dynamic', 'reflection', 'reflect',
'runtime', 'abstract', 'common'
]
DEPENDENCIES = []
with open(join(BASEPATH, 'requirements.txt')) as f:
DEPENDENCIES = list(line for line in f.readlines())
DESCRIPTION = 'Middleware utilities library'
URL = 'https://github.com/{0}'.format(NAMEPATH)
setup(
name=NAME,
version=VERSION,
packages=find_packages(exclude=['test.*', '*.test.*']),
author='b3j0f',
author_email='ib3j0f@gmail.com',
install_requires=DEPENDENCIES,
description=DESCRIPTION,
long_description=DESC,
include_package_data=True,
url=URL,
license='MIT License',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'License :: OSI Approved :: MIT License',
'Natural Language :: French',
'Operating System :: OS Independent',
'Topic :: Utilities',
'Topic :: Software Development :: Libraries :: Python Modules',
'Intended Audience :: Developers',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.0',
'Programming Language :: Python :: 3.1',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy'
],
test_suite='b3j0f',
keywords=KEYWORDS
)
|
mit
| -2,162,318,671,613,996,300
| 36.303922
| 79
| 0.6523
| false
| 4.03072
| false
| false
| false
|
openstack/zaqar
|
zaqar/tests/unit/transport/wsgi/v1/test_home.py
|
1
|
2242
|
# Copyright (c) 2013 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import falcon
from oslo_serialization import jsonutils
import six.moves.urllib.parse as urlparse
from zaqar.tests.unit.transport.wsgi import base
class TestHomeDocument(base.V1Base):
config_file = 'wsgi_mongodb.conf'
def test_json_response(self):
body = self.simulate_get(self.url_prefix + '/')
self.assertEqual(falcon.HTTP_200, self.srmock.status)
content_type = self.srmock.headers_dict['Content-Type']
self.assertEqual('application/json-home', content_type)
try:
jsonutils.loads(body[0])
except ValueError:
self.fail('Home document is not valid JSON')
def test_href_template(self):
body = self.simulate_get(self.url_prefix + '/')
self.assertEqual(falcon.HTTP_200, self.srmock.status)
resp = jsonutils.loads(body[0])
queue_href_template = resp['resources']['rel/queue']['href-template']
path_1 = 'https://zaqar.example.com' + self.url_prefix
path_2 = 'https://zaqar.example.com' + self.url_prefix + '/'
# Verify all the href template start with the correct version prefix
for resource in list(resp['resources']):
self.assertTrue(resp['resources'][resource]['href-template'].
startswith(self.url_prefix))
url = urlparse.urljoin(path_1, queue_href_template)
expected = ('https://zaqar.example.com' + self.url_prefix +
'/queues/foo')
self.assertEqual(expected, url.format(queue_name='foo'))
url = urlparse.urljoin(path_2, queue_href_template)
self.assertEqual(expected, url.format(queue_name='foo'))
|
apache-2.0
| 6,833,967,138,894,138,000
| 38.333333
| 79
| 0.670384
| false
| 3.858864
| false
| false
| false
|
Incubaid/pyrakoon
|
pyrakoon/client/admin.py
|
1
|
1551
|
# This file is part of Pyrakoon, a distributed key-value store client.
#
# Copyright (C) 2013, 2014 Incubaid BVBA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Administrative client interface'''
from pyrakoon.client import utils
from pyrakoon.protocol import admin
class ClientMixin: #pylint: disable=W0232,C1001
'''Mixin providing client actions for node administration
This can be mixed into any class implementing
:class:`pyrakoon.client.AbstractClient`.
'''
#pylint: disable=C0111,R0201
@utils.call(admin.OptimizeDB) #pylint: disable=E1101
def optimize_db(self):
assert False
@utils.call(admin.DefragDB) #pylint: disable=E1101
def defrag_db(self):
assert False
@utils.call(admin.DropMaster) #pylint: disable=E1101
def drop_master(self):
assert False
@utils.call(admin.CollapseTlogs) #pylint: disable=E1101
def collapse_tlogs(self):
assert False
@utils.call(admin.FlushStore) #pylint: disable=E1101
def flush_store(self):
assert False
|
apache-2.0
| -7,529,183,197,661,053,000
| 31.3125
| 74
| 0.724049
| false
| 3.773723
| false
| false
| false
|
KeyWeeUsr/plyer
|
plyer/platforms/android/proximity.py
|
1
|
2058
|
from jnius import autoclass
from jnius import cast
from jnius import java_method
from jnius import PythonJavaClass
from plyer.platforms.android import activity
from plyer.facades import Proximity
ActivityInfo = autoclass('android.content.pm.ActivityInfo')
Context = autoclass('android.content.Context')
Sensor = autoclass('android.hardware.Sensor')
SensorManager = autoclass('android.hardware.SensorManager')
class ProximitySensorListener(PythonJavaClass):
__javainterfaces__ = ['android/hardware/SensorEventListener']
def __init__(self):
super(ProximitySensorListener, self).__init__()
service = activity.getSystemService(Context.SENSOR_SERVICE)
self.SensorManager = cast('android.hardware.SensorManager', service)
self.sensor = self.SensorManager.getDefaultSensor(
Sensor.TYPE_PROXIMITY)
self.value = None
def enable(self):
self.SensorManager.registerListener(
self, self.sensor,
SensorManager.SENSOR_DELAY_NORMAL
)
def disable(self):
self.SensorManager.unregisterListener(self, self.sensor)
@java_method('(Landroid/hardware/SensorEvent;)V')
def onSensorChanged(self, event):
self.value = event.values[0]
@java_method('(Landroid/hardware/Sensor;I)V')
def onAccuracyChanged(self, sensor, accuracy):
pass
class AndroidProximity(Proximity):
listener = None
def _enable(self, **kwargs):
if not self.listener:
self.listener = ProximitySensorListener()
self.listener.enable()
def _disable(self, **kwargs):
if self.listener:
self.listener.disable()
delattr(self, 'listener')
def _get_proximity(self):
if self.listener:
value = self.listener.value
# value is 0.0 when proxime sensor is covered. In other case
# value is 5.0 because in smartphone, optical proximity sensors
# are used.
return value < 5.0
def instance():
return AndroidProximity()
|
mit
| 839,760,729,264,712,200
| 28.826087
| 76
| 0.672012
| false
| 4.003891
| false
| false
| false
|
EvilCult/moviecatcher
|
View/PlayerView.py
|
1
|
6852
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import tkinter
import urllib.request, urllib.error, urllib.parse
import ssl
import io
import PIL.Image
import PIL.ImageTk
import tkinter.messagebox
import time
import webbrowser
from selenium import webdriver
from Lib import Tools
class GUI :
def __init__ (self, master) :
self.master = master
self.authDownload = ''
self.watchLinkStat = {'err': 0, 'msg': ''}
self.downLinkStat = {'err': 0, 'msg': ''}
self.Tools = Tools.Tools()
def showDlLink (self, link) :
window = tkinter.Toplevel()
window.title('下载链接')
window.resizable(width = 'false', height = 'false')
if self.Tools.isWin() :
window.iconbitmap(self.Tools.getRes('biticon.ico'))
topZone = tkinter.Frame(window, bd = 0, bg="#444")
topZone.pack(expand = True, fill = 'both')
textZone = tkinter.Text(topZone, height = 8, width = 50, bd = 10, bg="#444", fg = '#ddd', highlightthickness = 0, selectbackground = '#116cd6')
textZone.grid(row = 0, column = 0, sticky = '')
textZone.insert('insert', link)
dlBtn = tkinter.Button(topZone, text = '下载', width = 10, fg = '#222', highlightbackground = '#444', command = lambda url = link : webbrowser.open_new(url))
dlBtn.grid(row = 1, column = 0, pady = 5)
def showWatchLink (self) :
if self.watchLinkStat['err'] == 0 :
if self.watchLinkStat['msg'] == '' :
self.timer = self.master.after(50, self.showWatchLink)
else :
webbrowser.open_new(self.watchLinkStat['msg'])
elif self.watchLinkStat['err'] == 1 :
tkinter.messagebox.showinfo('Error', '云端未能完成该任务,请等待云端下载完成or换个资源试试!')
elif self.watchLinkStat['err'] == 2 :
tkinter.messagebox.showinfo('Notice', '磁力链接目前不支持在线观看,待后续版本更新。\r\n暂时请手动下载或上传链接至百度云!')
elif self.watchLinkStat['err'] == 3 :
self.showAuthCode(self.watchLinkStat['msg'])
def showCloudLink (self) :
if self.downLinkStat['err'] == 0 :
if self.downLinkStat['msg'] == '' :
self.timer = self.master.after(50, self.showCloudLink)
else :
window = tkinter.Toplevel()
window.title('离线下载链接')
window.resizable(width = 'false', height = 'false')
if self.Tools.isWin() :
window.iconbitmap(self.Tools.getRes('biticon.ico'))
topZone = tkinter.Frame(window, bd = 0, bg="#444")
topZone.pack(expand = True, fill = 'both')
textZone = tkinter.Text(topZone, height = 8, width = 50, bd = 10, bg="#444", fg = '#ddd', highlightthickness = 0, selectbackground = '#116cd6')
textZone.grid(row = 0, column = 0, sticky = '')
textZone.insert('insert', self.downLinkStat['msg'])
dlBtn = tkinter.Button(topZone, text = '下载', width = 10, fg = '#222', highlightbackground = '#444', command = lambda url = self.downLinkStat['msg'] : webbrowser.open_new(url))
dlBtn.grid(row = 1, column = 0, pady = 5)
elif self.downLinkStat['err'] == 1 :
tkinter.messagebox.showinfo('Error', '云端未能完成该任务,请等待云端下载完成or换个资源试试!')
elif self.downLinkStat['err'] == 2 :
tkinter.messagebox.showinfo('Notice', '磁力链接目前不支持离线下载,待后续版本更新。\r\n暂时请手动下载或上传链接至百度云!')
elif self.downLinkStat['err'] == 3 :
self.showAuthCode(self.downLinkStat['msg'])
def showAuthCode (self, imgUrl) :
self.authWindow = tkinter.Toplevel()
self.authWindow.title('验证码')
self.authWindow.resizable(width = 'false', height = 'false')
if self.Tools.isWin() :
self.authWindow.iconbitmap(self.Tools.getRes('biticon.ico'))
self.authWindow.config(background='#444')
winTop = tkinter.Frame(self.authWindow, bd = 10, bg = '#444')
winTop.grid(row = 0, column = 0, sticky = '')
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
image = urllib.request.urlopen(imgUrl, context = ctx).read()
imgData = io.BytesIO(image)
pilImage = PIL.Image.open(imgData)
tkImg = PIL.ImageTk.PhotoImage(pilImage)
label = tkinter.Label(winTop, image = tkImg, bd = 0, bg = '#111', relief = 'solid')
label.img = tkImg
label.grid(row = 0, column = 0, sticky = '', pady = 5)
self.authKeyInput = tkinter.Entry(winTop, width = 20, bd = 0, bg = "#222", fg = "#ddd", highlightthickness = 1, highlightcolor="#111", highlightbackground = '#111', justify='center')
self.authKeyInput.grid(row = 1, column = 0, pady = 5)
self.authKeyInput.insert('end', '')
btn = tkinter.Button(winTop, text = '确认', width = 10, fg = '#222', highlightbackground = '#444', command = self.__getAuthInput)
btn.grid(row = 2, column = 0, pady = 5)
def showLoginWindow (self, callback = '') :
loginUrl = 'https://pan.baidu.com/'
if self.Tools.isWin() :
chromeDriver = self.Tools.getRes('chromedriver.exe')
else :
chromeDriver = self.Tools.getRes('chromedriver')
# try:
self.browser = webdriver.Chrome(executable_path = chromeDriver)
self.browser.get(loginUrl)
self.browser.maximize_window()
self.slave = tkinter.Toplevel()
self.slave.title('Login')
self.slave.resizable(width = 'false', height = 'false')
if self.Tools.isWin() :
self.slave.iconbitmap(self.Tools.getRes('biticon.ico'))
mainFrame = tkinter.Frame(self.slave, bd = 0, bg="#444")
mainFrame.pack(expand = True, fill = 'both', ipadx = '10')
msgLabel = tkinter.Label(mainFrame, text="请于页面中登陆百度云账号\r\n登陆成功后点击下方「获取cookies」按钮", fg = '#ddd', bg="#444", anchor = 'center')
msgLabel.grid(row = 0, column = 1, pady = 5)
loginBtn = tkinter.Button(mainFrame, text = '获取cookies', width = 20, fg = '#222', highlightbackground = '#444', command = lambda cb = callback : self.__getLoginInput(cb))
loginBtn.grid(row = 4, column = 1, pady = 5)
mainFrame.grid_columnconfigure(0, weight=1)
mainFrame.grid_columnconfigure(2, weight=1)
# except Exception as e:
# tkMessageBox.showinfo('Notice', '为保障密码安全:登陆功能将完全在Chrome浏览器中进行。\r\n所以需要Chrome支持。\r\n请先安装Google Chrome浏览器。')
def __getLoginInput (self, callback = '') :
time.sleep(5)
if self.browser.title == '百度网盘-全部文件' :
cookies = self.browser.get_cookies()
cookieStr = ''
for x in cookies :
cookieStr += x['name'] + '=' + x['value'] + '; '
result = {'stat': 1, 'msg': '获取成功'}
else :
result = {'stat': 2, 'msg': '获取失败'}
self.browser.quit()
if result['stat'] == 1 :
self.slave.destroy()
tkinter.messagebox.showinfo('Success', '登陆成功')
callback(cookieStr)
else :
tkinter.messagebox.showinfo('Error', result['msg'])
def __getAuthInput (self) :
authKey = self.authKeyInput.get()
self.authDownload(authKey)
self.authWindow.destroy()
|
mit
| -659,800,920,965,595,600
| 36.093023
| 184
| 0.666771
| false
| 2.580906
| false
| false
| false
|
miti0/mosquito
|
utils/postman.py
|
1
|
2056
|
import smtplib
import configargparse
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from premailer import transform
class Postman:
"""
Simple email/postman module
! Currently supported only for gmail
"""
arg_parser = configargparse.get_argument_parser()
arg_parser.add('--mail_username', help='Email username (supported only gmail)')
arg_parser.add("--mail_password", help='Email password (supported only gmail)')
arg_parser.add("--mail_recipients", help='Email recipients')
def __init__(self):
self.args = self.arg_parser.parse_known_args()[0]
self.username = self.args.mail_username
self.password = self.args.mail_password
self.recipients = self.args.mail_recipients
def send_mail(self, subject, body):
"""
Send email to configured account with given subject and body
"""
mail_from = self.username
# mail_to = self.recipients if type(self.recipients) is list else [self.recipients]
mail_to = self.recipients
msg = MIMEMultipart('alternative')
msg['Subject'] = subject
msg['From'] = mail_from
msg['To'] = mail_to
# body = self.html_style() + body
# msg.attach(MIMEText(body, 'html'))
body = transform(body)
#body = '<html> <h1 style="font-weight:bolder; border:1px solid black">Peter</h1> <p style="color:red">Hej</p> </html>'
msg.attach(MIMEText(body, 'html'))
mail = smtplib.SMTP("smtp.gmail.com", 587)
mail.ehlo()
mail.starttls()
mail.login(self.username, self.password)
mail.sendmail(mail_from, mail_to, msg.as_string())
mail.close()
print('mail successfully sent')
@staticmethod
def html_style():
"""
Email css styles
"""
style = '''
<style>
#headings {
font-size:26px !important;
line-height:32px !important;
}
</style>
'''
return style
|
gpl-3.0
| 7,558,057,738,645,854,000
| 31.634921
| 127
| 0.601654
| false
| 3.938697
| false
| false
| false
|
weidenba/recovery_sort
|
helper/meta.py
|
1
|
1540
|
from common_helper_files import get_binary_from_file
from hashlib import sha256
import os
import time
import logging
import magic
import sys
def generate_uid(file_path):
file_data = get_binary_from_file(file_path)
if file_data == b'' or type(file_data) is not bytes:
return "0_0"
file_hash = sha256(file_data).hexdigest()
file_size = get_file_size(file_path)
return "{}_{}".format(file_hash, file_size)
def get_modification_date(file_path):
'''
Return a string of the modification date: YYYY-MM-DD
'''
try:
mod_date = os.path.getmtime(file_path)
mod_date = time.localtime(mod_date)
return time.strftime('%Y-%m-%d', mod_date)
except Exception as e:
logging.error('Could not get timestamp: {} {}'.format(sys.exc_info()[0].__name__, e))
return '0'
def get_file_size(file_path):
'''
Returns size of a file in bytes
'''
try:
return os.path.getsize(file_path)
except Exception as e:
logging.error('Could not get file size: {} {}'.format(sys.exc_info()[0].__name__, e))
return 0
def get_file_name(file_path):
'''
Returns a the file name
'''
file_name = file_path.split('/')[-1:][0]
return file_name
def get_file_mime(file_path):
'''
Returns the mime_type of a file
'''
try:
return magic.from_file(file_path, mime=True)
except Exception as e:
logging.error('Could not get file type: {} {}'.format(sys.exc_info()[0].__name__, e))
return 'unknown'
|
gpl-3.0
| 581,053,022,200,500,400
| 25.101695
| 93
| 0.611688
| false
| 3.40708
| false
| false
| false
|
geosolutions-it/geonode
|
geonode/security/models.py
|
1
|
19572
|
# -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2017 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
import logging
import traceback
import operator
from functools import reduce
from django.db.models import Q
from django.conf import settings
from django.db import transaction
from django.contrib.auth import get_user_model
from django.core.exceptions import ObjectDoesNotExist
from django.contrib.auth.models import Group, Permission
from django.contrib.contenttypes.models import ContentType
from geonode.groups.conf import settings as groups_settings
from guardian.shortcuts import (
assign_perm,
get_anonymous_user,
get_groups_with_perms,
get_perms
)
from geonode.groups.models import GroupProfile
from .permissions import (
ADMIN_PERMISSIONS,
LAYER_ADMIN_PERMISSIONS,
VIEW_PERMISSIONS,
)
from .utils import (
get_users_with_perms,
set_owner_permissions,
remove_object_permissions,
purge_geofence_layer_rules,
sync_geofence_with_guardian,
get_user_obj_perms_model
)
logger = logging.getLogger("geonode.security.models")
class PermissionLevelError(Exception):
pass
class PermissionLevelMixin(object):
"""
Mixin for adding "Permission Level" methods
to a model class -- eg role systems where a
user has exactly one assigned role with respect to
an object representing an "access level"
"""
def get_all_level_info(self):
resource = self.get_self_resource()
users = get_users_with_perms(resource)
groups = get_groups_with_perms(
resource,
attach_perms=True)
if groups:
for group in groups:
try:
group_profile = GroupProfile.objects.get(slug=group.name)
managers = group_profile.get_managers()
if managers:
for manager in managers:
if manager not in users and not manager.is_superuser and \
manager != resource.owner:
for perm in ADMIN_PERMISSIONS + VIEW_PERMISSIONS:
assign_perm(perm, manager, resource)
users[manager] = ADMIN_PERMISSIONS + VIEW_PERMISSIONS
except GroupProfile.DoesNotExist:
tb = traceback.format_exc()
logger.debug(tb)
if resource.group:
try:
group_profile = GroupProfile.objects.get(slug=resource.group.name)
managers = group_profile.get_managers()
if managers:
for manager in managers:
if manager not in users and not manager.is_superuser and \
manager != resource.owner:
for perm in ADMIN_PERMISSIONS + VIEW_PERMISSIONS:
assign_perm(perm, manager, resource)
users[manager] = ADMIN_PERMISSIONS + VIEW_PERMISSIONS
except GroupProfile.DoesNotExist:
tb = traceback.format_exc()
logger.debug(tb)
info = {
'users': users,
'groups': groups}
try:
if hasattr(self, "layer"):
info_layer = {
'users': get_users_with_perms(
self.layer),
'groups': get_groups_with_perms(
self.layer,
attach_perms=True)}
for user in info_layer['users']:
if user in info['users']:
info['users'][user] = info['users'][user] + info_layer['users'][user]
else:
info['users'][user] = info_layer['users'][user]
for group in info_layer['groups']:
if group in info['groups']:
info['groups'][group] = list(dict.fromkeys(info['groups'][group] + info_layer['groups'][group]))
else:
info['groups'][group] = info_layer['groups'][group]
except Exception:
tb = traceback.format_exc()
logger.debug(tb)
return info
def get_self_resource(self):
try:
if hasattr(self, "resourcebase_ptr_id"):
return self.resourcebase_ptr
except ObjectDoesNotExist:
pass
return self
@transaction.atomic
def set_default_permissions(self, owner=None):
"""
Remove all the permissions except for the owner and assign the
view permission to the anonymous group
"""
remove_object_permissions(self)
# default permissions for anonymous users
def skip_registered_members_common_group(user_group):
if groups_settings.AUTO_ASSIGN_REGISTERED_MEMBERS_TO_REGISTERED_MEMBERS_GROUP_NAME:
_members_group_name = groups_settings.REGISTERED_MEMBERS_GROUP_NAME
if (settings.RESOURCE_PUBLISHING or settings.ADMIN_MODERATE_UPLOADS) and \
_members_group_name == user_group.name:
return True
return False
anonymous_group, created = Group.objects.get_or_create(name='anonymous')
# default permissions for owner
_owner = owner or self.owner
user_groups = Group.objects.filter(
name__in=_owner.groupmember_set.all().values_list("group__slug", flat=True))
obj_group_managers = []
if user_groups:
for _user_group in user_groups:
if not skip_registered_members_common_group(Group.objects.get(name=_user_group)):
try:
_group_profile = GroupProfile.objects.get(slug=_user_group)
managers = _group_profile.get_managers()
if managers:
for manager in managers:
if manager not in obj_group_managers and not manager.is_superuser:
obj_group_managers.append(manager)
except GroupProfile.DoesNotExist:
tb = traceback.format_exc()
logger.debug(tb)
if not anonymous_group:
raise Exception("Could not acquire 'anonymous' Group.")
# default permissions for resource owner
set_owner_permissions(self, members=obj_group_managers)
# Anonymous
anonymous_can_view = settings.DEFAULT_ANONYMOUS_VIEW_PERMISSION
if anonymous_can_view:
assign_perm('view_resourcebase',
anonymous_group, self.get_self_resource())
else:
for user_group in user_groups:
if not skip_registered_members_common_group(user_group):
assign_perm('view_resourcebase',
user_group, self.get_self_resource())
anonymous_can_download = settings.DEFAULT_ANONYMOUS_DOWNLOAD_PERMISSION
if anonymous_can_download:
assign_perm('download_resourcebase',
anonymous_group, self.get_self_resource())
else:
for user_group in user_groups:
if not skip_registered_members_common_group(user_group):
assign_perm('download_resourcebase',
user_group, self.get_self_resource())
if self.__class__.__name__ == 'Layer':
# only for layer owner
assign_perm('change_layer_data', _owner, self)
assign_perm('change_layer_style', _owner, self)
if settings.OGC_SERVER['default'].get("GEOFENCE_SECURITY_ENABLED", False):
purge_geofence_layer_rules(self.get_self_resource())
# Owner & Managers
perms = [
"view_resourcebase",
"change_layer_data",
"change_layer_style",
"change_resourcebase",
"change_resourcebase_permissions",
"download_resourcebase"]
sync_geofence_with_guardian(self.layer, perms, user=_owner)
for _group_manager in obj_group_managers:
sync_geofence_with_guardian(self.layer, perms, user=_group_manager)
for user_group in user_groups:
if not skip_registered_members_common_group(user_group):
sync_geofence_with_guardian(self.layer, perms, group=user_group)
# Anonymous
perms = ["view_resourcebase"]
if anonymous_can_view:
sync_geofence_with_guardian(self.layer, perms, user=None, group=None)
perms = ["download_resourcebase"]
if anonymous_can_download:
sync_geofence_with_guardian(self.layer, perms, user=None, group=None)
@transaction.atomic
def set_permissions(self, perm_spec, created=False):
"""
Sets an object's the permission levels based on the perm_spec JSON.
the mapping looks like:
{
'users': {
'AnonymousUser': ['view'],
<username>: ['perm1','perm2','perm3'],
<username2>: ['perm1','perm2','perm3']
...
}
'groups': [
<groupname>: ['perm1','perm2','perm3'],
<groupname2>: ['perm1','perm2','perm3'],
...
]
}
"""
remove_object_permissions(self)
# default permissions for resource owner
set_owner_permissions(self)
# Anonymous User group
if 'users' in perm_spec and "AnonymousUser" in perm_spec['users']:
anonymous_group = Group.objects.get(name='anonymous')
for perm in perm_spec['users']['AnonymousUser']:
if self.polymorphic_ctype.name == 'layer' and perm in ('change_layer_data', 'change_layer_style',
'add_layer', 'change_layer', 'delete_layer',):
assign_perm(perm, anonymous_group, self.layer)
else:
assign_perm(perm, anonymous_group, self.get_self_resource())
# Owner
if settings.OGC_SERVER['default'].get("GEOFENCE_SECURITY_ENABLED", False):
if self.polymorphic_ctype.name == 'layer':
if not created:
purge_geofence_layer_rules(self.get_self_resource())
perms = [
"view_resourcebase",
"change_layer_data",
"change_layer_style",
"change_resourcebase",
"change_resourcebase_permissions",
"download_resourcebase"]
sync_geofence_with_guardian(self.layer, perms, user=self.owner)
# All the other users
if 'users' in perm_spec and len(perm_spec['users']) > 0:
for user, perms in perm_spec['users'].items():
_user = get_user_model().objects.get(username=user)
if _user != self.owner and user != "AnonymousUser":
for perm in perms:
if self.polymorphic_ctype.name == 'layer' and perm in (
'change_layer_data', 'change_layer_style',
'add_layer', 'change_layer', 'delete_layer',):
assign_perm(perm, _user, self.layer)
else:
assign_perm(perm, _user, self.get_self_resource())
# Set the GeoFence Rules
if settings.OGC_SERVER['default'].get("GEOFENCE_SECURITY_ENABLED", False):
if self.polymorphic_ctype.name == 'layer':
group_perms = None
if 'groups' in perm_spec and len(perm_spec['groups']) > 0:
group_perms = perm_spec['groups']
sync_geofence_with_guardian(self.layer, perms, user=_user, group_perms=group_perms)
# All the other groups
if 'groups' in perm_spec and len(perm_spec['groups']) > 0:
for group, perms in perm_spec['groups'].items():
_group = Group.objects.get(name=group)
for perm in perms:
if self.polymorphic_ctype.name == 'layer' and perm in (
'change_layer_data', 'change_layer_style',
'add_layer', 'change_layer', 'delete_layer',):
assign_perm(perm, _group, self.layer)
else:
assign_perm(perm, _group, self.get_self_resource())
# Set the GeoFence Rules
if settings.OGC_SERVER['default'].get("GEOFENCE_SECURITY_ENABLED", False):
if self.polymorphic_ctype.name == 'layer':
if _group and _group.name and _group.name == 'anonymous':
_group = None
sync_geofence_with_guardian(self.layer, perms, group=_group)
# AnonymousUser
if 'users' in perm_spec and len(perm_spec['users']) > 0:
if "AnonymousUser" in perm_spec['users']:
_user = get_anonymous_user()
perms = perm_spec['users']["AnonymousUser"]
for perm in perms:
if self.polymorphic_ctype.name == 'layer' and perm in (
'change_layer_data', 'change_layer_style',
'add_layer', 'change_layer', 'delete_layer',):
assign_perm(perm, _user, self.layer)
else:
assign_perm(perm, _user, self.get_self_resource())
# Set the GeoFence Rules (user = None)
if settings.OGC_SERVER['default'].get("GEOFENCE_SECURITY_ENABLED", False):
if self.polymorphic_ctype.name == 'layer':
sync_geofence_with_guardian(self.layer, perms)
@transaction.atomic
def set_workflow_perms(self, approved=False, published=False):
"""
| N/PUBLISHED | PUBLISHED
--------------------------------------------
N/APPROVED | GM/OWR | -
APPROVED | registerd | all
--------------------------------------------
"""
anonymous_group = Group.objects.get(name='anonymous')
if approved:
if groups_settings.AUTO_ASSIGN_REGISTERED_MEMBERS_TO_REGISTERED_MEMBERS_GROUP_NAME:
_members_group_name = groups_settings.REGISTERED_MEMBERS_GROUP_NAME
_members_group_group = Group.objects.get(name=_members_group_name)
for perm in VIEW_PERMISSIONS:
assign_perm(perm,
_members_group_group, self.get_self_resource())
# Set the GeoFence Rules (user = None)
if settings.OGC_SERVER['default'].get("GEOFENCE_SECURITY_ENABLED", False):
if self.polymorphic_ctype.name == 'layer':
sync_geofence_with_guardian(self.layer, VIEW_PERMISSIONS, group=_members_group_group)
else:
for perm in VIEW_PERMISSIONS:
assign_perm(perm,
anonymous_group, self.get_self_resource())
# Set the GeoFence Rules (user = None)
if settings.OGC_SERVER['default'].get("GEOFENCE_SECURITY_ENABLED", False):
if self.polymorphic_ctype.name == 'layer':
sync_geofence_with_guardian(self.layer, VIEW_PERMISSIONS)
if published:
for perm in VIEW_PERMISSIONS:
assign_perm(perm,
anonymous_group, self.get_self_resource())
# Set the GeoFence Rules (user = None)
if settings.OGC_SERVER['default'].get("GEOFENCE_SECURITY_ENABLED", False):
if self.polymorphic_ctype.name == 'layer':
sync_geofence_with_guardian(self.layer, VIEW_PERMISSIONS)
def get_user_perms(self, user):
"""
Returns a list of permissions a user has on a given resource
"""
# To avoid circular import
from geonode.base.models import Configuration
config = Configuration.load()
ctype = ContentType.objects.get_for_model(self)
PERMISSIONS_TO_FETCH = VIEW_PERMISSIONS + ADMIN_PERMISSIONS + LAYER_ADMIN_PERMISSIONS
resource_perms = Permission.objects.filter(
codename__in=PERMISSIONS_TO_FETCH,
content_type_id=ctype.id
).values_list('codename', flat=True)
# Don't filter for admin users
if not (user.is_superuser or user.is_staff):
user_model = get_user_obj_perms_model(self)
user_resource_perms = user_model.objects.filter(
object_pk=self.pk,
content_type_id=ctype.id,
user__username=str(user),
permission__codename__in=resource_perms
)
# get user's implicit perms for anyone flag
implicit_perms = get_perms(user, self)
resource_perms = user_resource_perms.union(
user_model.objects.filter(permission__codename__in=implicit_perms)
).values_list('permission__codename', flat=True)
# filter out permissions for edit, change or publish if readonly mode is active
perm_prefixes = ['change', 'delete', 'publish']
if config.read_only:
clauses = (Q(codename__contains=prefix) for prefix in perm_prefixes)
query = reduce(operator.or_, clauses)
if (user.is_superuser or user.is_staff):
resource_perms = resource_perms.exclude(query)
else:
perm_objects = Permission.objects.filter(codename__in=resource_perms)
resource_perms = perm_objects.exclude(query).values_list('codename', flat=True)
return resource_perms
def user_can(self, user, permission):
"""
Checks if a has a given permission to the resource
"""
resource = self.get_self_resource()
user_perms = self.get_user_perms(user).union(resource.get_user_perms(user))
if permission not in user_perms:
# TODO cater for permissions with syntax base.permission_codename
# eg 'base.change_resourcebase'
return False
return True
|
gpl-3.0
| 5,339,928,737,404,739,000
| 42.785235
| 120
| 0.540773
| false
| 4.561175
| false
| false
| false
|
paulthulstrup/moose
|
modules/thermopower_diffusion/thermopower_geometry.py
|
1
|
8670
|
import os, subprocess, re, sys
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Modify mesh size for all , we set:
# - T_fridge = 0.005
# - T_hot = 0.3
def writeMooseInput(mesh_n):
Values = {
'mesh_name': mesh_n
}
# First part is reading the text file with
Lines = [line.rstrip('\n') for line in open('./input_file_geovar.txt')]
# Write a list tuple {line number thing to change}
Lines_to_change = {
'1': "mesh_name",
}
filename = "./thermopower_diffusion.i"
os.remove(filename)
content = ''
for i in range(len(Lines)):
l = Lines[i]
key = str(i)
if key in Lines_to_change:
l += Values[Lines_to_change[key]] + "'"
content += l
content += '\n'
with open(filename, 'w+') as f2:
f2.write(content + os.linesep)
# Run the Moose simulation
def runMoose():
run_cmd = "sh ./run_sim_thermopower.sh"
subprocess.call(run_cmd, shell=True)
# Cleans the variable to rturn an array of floats
def clean_var(var):
temp = re.sub('', '', var[0])
mylist = temp.split(',')
res = []
for i in range(len(mylist)):
s = mylist[i]
res.append(re.sub('[\s+]', '', s))
res = [float(i) for i in res]
return res
# Set up environment variable
# meshes = ['advanced_L_2.msh', 'advanced_L_4.msh', 'advanced_L_6.msh', 'advanced_L_9.msh',
# 'advanced_L_10.msh', 'advanced_L_11.msh', 'advanced_L_13.msh', 'advanced_L_20.msh',
# 'advanced_L_30.msh', 'advanced_L_40.msh', 'advanced_L_100.msh']
# meshes_length = [2, 4, 6, 9, 10, 11, 13, 20, 30, 40, 100]
meshes = ['rectangle2.msh', 'rectangle2-5.msh', 'rectangle3.msh', 'rectangle3-5.msh', 'rectangle4.msh', 'rectangle6.msh',
'rectangle8.msh', 'rectangle10.msh']
meshes_length = [2, 2.5, 3, 3.5, 4, 6, 8, 10]
result1 = []
result2 = []
result3 = []
result4 = []
result5 = []
for i in range(len(meshes)):
mesh = meshes[i]
writeMooseInput(mesh)
runMoose()
# Loads the data from the nbdcump function
f = open("out.txt", 'r')
data = f.read()
x = re.findall(r'coordx =(.*?);', data, re.DOTALL)
x_node = clean_var(x)
y = re.findall(r'coordy =(.*?);', data, re.DOTALL)
y_node = clean_var(y)
nodes = np.array(zip(x_node, y_node))
T = re.findall(r'vals_nod_var1 =(.*?);', data, re.DOTALL)
val_T = np.sqrt(clean_var(T))
# Interpolation (Linear or Cubic)
# Need to define the domain properly on which we interpolate
from scipy.interpolate import griddata
if meshes_length[i] == 2:
grid_x, grid_y = np.mgrid[min(x_node):max(x_node):100j,
min(y_node):max(y_node):100j] # here we manually define the range of the mesh
grid_T1 = griddata(nodes, val_T, (grid_x, grid_y), method='cubic')
result1.append(grid_T1[10, 50])
result2.append(grid_T1[30, 50])
result3.append(grid_T1[50, 50])
result4.append(grid_T1[70, 50])
result5.append(grid_T1[90, 50])
if meshes_length[i] == 2.5:
grid_x, grid_y = np.mgrid[min(x_node):max(x_node):125j,
min(y_node):max(y_node):125j] # here we manually define the range of the mesh
grid_T1 = griddata(nodes, val_T, (grid_x, grid_y), method='cubic')
result1.append(grid_T1[10, 62])
result2.append(grid_T1[30, 62])
result3.append(grid_T1[50, 62])
result4.append(grid_T1[70, 62])
result5.append(grid_T1[90, 62])
elif meshes_length[i] == 3:
grid_x, grid_y = np.mgrid[min(x_node):max(x_node):150j,
min(y_node):max(y_node):150j] # here we manually define the range of the mesh
grid_T1 = griddata(nodes, val_T, (grid_x, grid_y), method='cubic')
result1.append(grid_T1[10, 75])
result2.append(grid_T1[30, 75])
result3.append(grid_T1[50, 75])
result4.append(grid_T1[70, 75])
result5.append(grid_T1[90, 75])
elif meshes_length[i] == 3.5:
grid_x, grid_y = np.mgrid[min(x_node):max(x_node):175j,
min(y_node):max(y_node):175j] # here we manually define the range of the mesh
grid_T1 = griddata(nodes, val_T, (grid_x, grid_y), method='cubic')
result1.append(grid_T1[10, 87])
result2.append(grid_T1[30, 87])
result3.append(grid_T1[50, 87])
result4.append(grid_T1[70, 87])
result5.append(grid_T1[90, 87])
elif meshes_length[i] == 4:
grid_x, grid_y = np.mgrid[min(x_node):max(x_node):200j,
min(y_node):max(y_node):200j] # here we manually define the range of the mesh
grid_T1 = griddata(nodes, val_T, (grid_x, grid_y), method='cubic')
result1.append(grid_T1[10, 100])
result2.append(grid_T1[30, 100])
result3.append(grid_T1[50, 100])
result4.append(grid_T1[70, 100])
result5.append(grid_T1[90, 100])
elif meshes_length[i] == 6:
grid_x, grid_y = np.mgrid[min(x_node):max(x_node):300j,
min(y_node):max(y_node):300j] # here we manually define the range of the mesh
grid_T1 = griddata(nodes, val_T, (grid_x, grid_y), method='cubic')
result1.append(grid_T1[10, 150])
result2.append(grid_T1[30, 150])
result3.append(grid_T1[50, 150])
result4.append(grid_T1[70, 150])
result5.append(grid_T1[90, 150])
elif meshes_length[i] == 8:
grid_x, grid_y = np.mgrid[min(x_node):max(x_node):400j,
min(y_node):max(y_node):400j] # here we manually define the range of the mesh
grid_T1 = griddata(nodes, val_T, (grid_x, grid_y), method='cubic')
result1.append(grid_T1[10, 200])
result2.append(grid_T1[30, 200])
result3.append(grid_T1[50, 200])
result4.append(grid_T1[70, 200])
result5.append(grid_T1[90, 200])
elif meshes_length[i] == 9:
grid_x, grid_y = np.mgrid[min(x_node):max(x_node):450j,
min(y_node):max(y_node):450j] # here we manually define the range of the mesh
grid_T1 = griddata(nodes, val_T, (grid_x, grid_y), method='cubic')
result1.append(grid_T1[33, 225])
elif meshes_length[i] == 10:
grid_x, grid_y = np.mgrid[min(x_node):max(x_node):500j,
min(y_node):max(y_node):500j] # here we manually define the range of the mesh
grid_T1 = griddata(nodes, val_T, (grid_x, grid_y), method='cubic')
result1.append(grid_T1[10, 250])
result2.append(grid_T1[30, 250])
result3.append(grid_T1[50, 250])
result4.append(grid_T1[70, 250])
result5.append(grid_T1[90, 250])
elif meshes_length[i] == 11:
grid_x, grid_y = np.mgrid[min(x_node):max(x_node):550j,
min(y_node):max(y_node):550j] # here we manually define the range of the mesh
grid_T1 = griddata(nodes, val_T, (grid_x, grid_y), method='cubic')
result1.append(grid_T1[33, 275])
elif meshes_length[i] == 13:
grid_x, grid_y = np.mgrid[min(x_node):max(x_node):650j,
min(y_node):max(y_node):650j] # here we manually define the range of the mesh
grid_T1 = griddata(nodes, val_T, (grid_x, grid_y), method='cubic')
result1.append(grid_T1[33, 325])
elif meshes_length[i] == 20:
grid_x, grid_y = np.mgrid[min(x_node):max(x_node):1000j,
min(y_node):max(y_node):1000j] # here we manually define the range of the mesh
grid_T1 = griddata(nodes, val_T, (grid_x, grid_y), method='cubic')
result1.append(grid_T1[12, 500])
elif meshes_length[i] == 30:
grid_x, grid_y = np.mgrid[min(x_node):max(x_node):1500j,
min(y_node):max(y_node):1500j] # here we manually define the range of the mesh
grid_T1 = griddata(nodes, val_T, (grid_x, grid_y), method='cubic')
result1.append(grid_T1[12, 750])
elif meshes_length[i] == 40:
grid_x, grid_y = np.mgrid[min(x_node):max(x_node):2000j,
min(y_node):max(y_node):2000j] # here we manually define the range of the mesh
grid_T1 = griddata(nodes, val_T, (grid_x, grid_y), method='cubic')
result1.append(grid_T1[12, 1000])
elif meshes_length[i] == 100:
grid_x, grid_y = np.mgrid[min(x_node):max(x_node):5000j,
min(y_node):max(y_node):5000j] # here we manually define the range of the mesh
grid_T1 = griddata(nodes, val_T, (grid_x, grid_y), method='cubic')
result1.append(grid_T1[12, 2500])
print result5
|
lgpl-2.1
| -392,985,355,593,617,200
| 36.695652
| 121
| 0.57451
| false
| 2.823185
| false
| false
| false
|
bzhou26/leetcode_sol
|
p20_Valid_Parentheses.py
|
1
|
1041
|
'''
- Leetcode problem: 20
- Difficulty: Easy
- Brief problem description:
Given a string containing just the characters '(', ')', '{', '}', '[' and ']', determine if the input string is valid.
An input string is valid if:
Open brackets must be closed by the same type of brackets.
Open brackets must be closed in the correct order.
Note that an empty string is also considered valid.
Example 1:
Input: "()"
Output: true
Example 2:
Input: "()[]{}"
Output: true
Example 3:
Input: "(]"
Output: false
Example 4:
Input: "([)]"
Output: false
Example 5:
Input: "{[]}"
Output: true
- Solution Summary:
- Used Resources:
--- Bo Zhou
'''
class Solution:
def isValid(self, s: str) -> bool:
pStack = []
for c in s:
if c == "{":
pStack.append("}")
elif c == "[":
pStack.append("]")
elif c == "(":
pStack.append(")")
elif len(pStack) == 0 or pStack.pop() != c:
return False
return len(pStack) == 0
|
mit
| -6,117,799,411,657,766,000
| 16.965517
| 118
| 0.558117
| false
| 3.516892
| false
| false
| false
|
ryanGT/sympy
|
sympy/polys/wrappers.py
|
1
|
2095
|
from polynomial import Poly
def LexPoly(*args):
"""Returns a polynomial with lexicographic order of terms. """
return Poly(*args, **{ 'order' : 'lex' })
from algorithms import poly_div, poly_pdiv, poly_groebner, poly_lcm, poly_gcd, \
poly_half_gcdex, poly_gcdex, poly_sqf, poly_resultant, poly_subresultants, \
poly_decompose, poly_quo, poly_rem, poly_pquo, poly_prem
from rootfinding import poly_root_factors, poly_sturm
def _conv_args(n, args):
symbols = args[n:]
if len(symbols) == 1 and isinstance(symbols[0], (tuple, list)):
return args[:n] + tuple(symbols[0])
else:
return args
def _map_basic(f, n, *args, **kwargs):
result = f(*_conv_args(n, args), **kwargs)
if isinstance(result, (list, tuple, set)):
return result.__class__(g.as_basic() for g in result)
else:
return result.as_basic()
_funcs = {
'quo' : 2,
'rem' : 2,
'pdiv' : 2,
'pquo' : 2,
'prem' : 2,
'groebner' : 1,
'lcm' : 2,
'gcd' : 2,
'gcdex' : 2,
'half_gcdex' : 2,
'resultant' : 2,
'sqf' : 1,
'decompose' : 1,
'root_factors' : 1,
'sturm' : 1,
}
_func_def = \
"""
def %s(*args, **kwargs):
return _map_basic(poly_%s, %d, *args, **kwargs)
%s.__doc__ = poly_%s.__doc__
"""
for _func, _n in _funcs.iteritems():
exec _func_def % (_func, _func, _n, _func, _func)
def div(*args, **kwargs):
q, r = poly_div(*_conv_args(2, args), **kwargs)
if type(q) is not list:
q = q.as_basic()
else:
q = [ p.as_basic() for p in q ]
return q, r.as_basic()
div.__doc__ = poly_div.__doc__
def subresultants(*args, **kwargs):
result = poly_subresultants(*_conv_args(2, args), **kwargs)
if type(result) is tuple:
res, R = result
else:
res, R = None, result
R = [ r.as_basic() for r in R ]
if res is None:
return R
else:
return res.as_basic(), R
subresultants.__doc__ = poly_subresultants.__doc__
|
bsd-3-clause
| 5,509,448,400,583,074,000
| 23.08046
| 80
| 0.528401
| false
| 2.921897
| false
| false
| false
|
openstack/python-designateclient
|
designateclient/v2/cli/service_statuses.py
|
1
|
2982
|
# Copyright 2016 Hewlett Packard Enterprise Development Company LP
#
# Author: Endre Karlson <endre.karlson@hp.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from osc_lib.command import command
from designateclient import utils
from designateclient.v2.cli import common
from designateclient.v2 import utils as v2_utils
LOG = logging.getLogger(__name__)
def _format_status(status):
status.pop("links", None)
# Remove unneeded fields for display output formatting
for k in ("capabilities", "stats"):
status[k] = "\n".join(status[k]) if status[k] else "-"
return status
class ListServiceStatusesCommand(command.Lister):
"""List service statuses"""
columns = ['id', 'hostname', 'service_name', 'status', 'stats',
'capabilities']
def get_parser(self, prog_name):
parser = super(ListServiceStatusesCommand, self).get_parser(prog_name)
parser.add_argument("--hostname", help="Hostname", required=False)
parser.add_argument("--service_name", help="Service Name",
required=False)
parser.add_argument("--status", help="Status", required=False)
common.add_all_common_options(parser)
return parser
def take_action(self, parsed_args):
client = self.app.client_manager.dns
common.set_all_common_headers(client, parsed_args)
cols = self.columns
criterion = {}
for i in ["hostname", "service_name", "status"]:
v = getattr(parsed_args, i)
if v is not None:
criterion[i] = v
data = v2_utils.get_all(client.service_statuses.list,
criterion=criterion)
for i, s in enumerate(data):
data[i] = _format_status(s)
return cols, (utils.get_item_properties(s, cols) for s in data)
class ShowServiceStatusCommand(command.ShowOne):
"""Show service status details"""
def get_parser(self, prog_name):
parser = super(ShowServiceStatusCommand, self).get_parser(prog_name)
parser.add_argument('id', help="Service Status ID")
common.add_all_common_options(parser)
return parser
def take_action(self, parsed_args):
client = self.app.client_manager.dns
common.set_all_common_headers(client, parsed_args)
data = client.service_statuses.get(parsed_args.id)
_format_status(data)
return zip(*sorted(data.items()))
|
apache-2.0
| -2,124,062,861,226,373,000
| 31.064516
| 78
| 0.65996
| false
| 3.934037
| false
| false
| false
|
google/earthengine-api
|
python/ee/_cloud_api_utils.py
|
1
|
26796
|
#!/usr/bin/env python
"""Earth Engine helper functions for working with the Cloud API.
Many of the functions defined here are for mapping legacy calls in ee.data into
their new Cloud API equivalents. This generally requires remapping call
parameters and result values.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import calendar
import copy
import datetime
import re
import warnings
from . import ee_exception
from google_auth_httplib2 import AuthorizedHttp
from google_auth_httplib2 import Request
from googleapiclient import discovery
from googleapiclient import http
from googleapiclient import model
# We use the urllib3-aware shim if it's available.
# It is not available by default if the package is installed via the conda-forge
# channel.
# pylint: disable=g-bad-import-order,g-import-not-at-top
try:
import httplib2shim as httplib2
except ImportError:
import httplib2
import six
# pylint: enable=g-bad-import-order,g-import-not-at-top
# The Cloud API version.
VERSION = 'v1alpha'
PROJECT_ID_PATTERN = (r'^(?:\w+(?:[\w\-]+\.[\w\-]+)*?\.\w+\:)?'
r'[a-z][-a-z0-9]{4,28}[a-z0-9]$')
ASSET_NAME_PATTERN = (r'^projects/((?:\w+(?:[\w\-]+\.[\w\-]+)*?\.\w+\:)?'
r'[a-z][a-z0-9\-]{4,28}[a-z0-9])/assets/(.*)$')
ASSET_ROOT_PATTERN = (r'^projects/((?:\w+(?:[\w\-]+\.[\w\-]+)*?\.\w+\:)?'
r'[a-z][a-z0-9\-]{4,28}[a-z0-9])/assets/?$')
# The default user project to use when making Cloud API calls.
_cloud_api_user_project = None
def _wrap_request(headers_supplier, response_inspector):
"""Builds a callable that wraps an API request.
Args:
headers_supplier: If not None, this will be called for each request and the
resulting dict incorporated into that request's HTTP headers.
response_inspector: If not None, this will be called with an
httplib2.Response containing the HTTP response and body content.
The call happens no matter what the HTTP response status was.
Returns:
Something that can be called in place of the http.HttpRequest constructor
to build an HttpRequest.
"""
if headers_supplier is None and response_inspector is None:
return http.HttpRequest
# pylint: disable=invalid-name
def builder(http_transport,
postproc,
uri,
method='GET',
body=None,
headers=None,
methodId=None,
resumable=None):
"""Builds an HttpRequest, adding headers and response inspection."""
additional_headers = headers_supplier()
if additional_headers:
headers = headers.copy() if headers else {}
headers.update(additional_headers)
request = http.HttpRequest(
http_transport,
postproc,
uri,
method=method,
body=body,
headers=headers,
methodId=methodId,
resumable=resumable)
if response_inspector:
request.add_response_callback(response_inspector)
return request
return builder
def set_cloud_api_user_project(cloud_api_user_project):
global _cloud_api_user_project
_cloud_api_user_project = cloud_api_user_project
def build_cloud_resource(api_base_url,
api_key=None,
credentials=None,
timeout=None,
headers_supplier=None,
response_inspector=None,
http_transport=None,
raw=False):
"""Builds an Earth Engine Cloud API resource.
Args:
api_base_url: The base URL of the cloud endpoints.
api_key: An API key that's enabled for use with the Earth Engine Cloud API.
credentials: OAuth2 credentials to use when authenticating to the API.
timeout: How long a timeout to set on requests, in seconds.
headers_supplier: A callable that will return a set of headers to be applied
to a request. Will be called once for each request.
response_inspector: A callable that will be invoked with the raw
httplib2.Response responses.
http_transport: An optional custom http_transport to use.
raw: Whether or not to return raw bytes when making method requests.
Returns:
A resource object to use to call the Cloud API.
"""
discovery_service_url = (
'{}/$discovery/rest?version={}&prettyPrint=false'
.format(api_base_url, VERSION))
if http_transport is None:
http_transport = httplib2.Http(timeout=timeout)
if credentials is not None:
http_transport = AuthorizedHttp(credentials, http=http_transport)
request_builder = _wrap_request(headers_supplier, response_inspector)
# Discovery uses json by default.
if raw:
alt_model = model.RawModel()
else:
alt_model = None
def build(**kwargs):
return discovery.build(
'earthengine',
VERSION,
discoveryServiceUrl=discovery_service_url,
developerKey=api_key,
http=http_transport,
requestBuilder=request_builder,
model=alt_model,
cache_discovery=False,
**kwargs) # pytype: disable=wrong-keyword-args
try:
# google-api-python-client made static_discovery the default in version 2,
# but it's not backward-compatible. There's no reliable way to check the
# package version, either.
resource = build(static_discovery=False)
except TypeError:
resource = build()
resource._baseUrl = api_base_url
return resource
def build_cloud_resource_from_document(discovery_document,
http_transport=None,
headers_supplier=None,
response_inspector=None):
"""Builds an Earth Engine Cloud API resource from a description of the API.
This version is intended for use in tests.
Args:
discovery_document: The description of the API.
http_transport: An HTTP transport object to use for the call.
headers_supplier: A callable that will return a set of headers to be applied
to a request. Will be called once for each request.
response_inspector: A callable that will be invoked with the raw
httplib2.Response responses.
Returns:
A resource object to use to call the Cloud API.
"""
request_builder = _wrap_request(headers_supplier, response_inspector)
return discovery.build_from_document(
discovery_document,
http=http_transport,
requestBuilder=request_builder)
def _convert_dict(to_convert,
conversions,
defaults=None,
key_warnings=False,
retain_keys=False):
"""Applies a set of conversion rules to a dict.
Args:
to_convert: A dictionary of key/value pairs to convert.
conversions: A dictionary giving the mapping from key names in "to_convert"
to how those keys and their values will be handled. Key/value pairs in
"to_convert" will be modified in a way that depends on how the key
appears in "conversions". If "to_convert" contains a key/value mapping
of "k"->"v", then:
- If "conversions" contains "k"->"X" then the result will contain
"X"->"v".
- If "conversions" contains "k"->None then the result will not contain an
entry for "k".
- If "conversions" contains "k"->("X", f) then the result will contain
"X"->f("v")
- If "conversions" does not contain an entry for "k" then the result
will not contain an entry for "k" unless retain_keys is true;
if key_warnings is True then a warning will be printed.
- If two or more distinct input keys are converted to the same output key,
one of the resulting values will appear in the result, the others
will be dropped, and a warning will be printed.
defaults: Values to insert in the result if the result of conversion does
not contain these keys.
key_warnings: Whether to print warnings for input keys that are not mapped
to anything in the output.
retain_keys: Whether or not to retain the state of dict. If false, any keys
that don't show up in the conversions dict will be dropped from result.
Returns:
The "to_convert" dict with keys renamed, values converted, and defaults
added.
"""
result = {}
for key, value in six.iteritems(to_convert):
if key in conversions:
conversion = conversions[key]
if conversion is not None:
if isinstance(conversion, tuple):
key = conversion[0]
value = conversion[1](value)
else:
key = conversion
if key in result:
warnings.warn(
'Multiple request parameters converted to {}'.format(key))
result[key] = value
elif retain_keys:
result[key] = value
elif key_warnings:
warnings.warn('Unrecognized key {} ignored'.format(key))
if defaults:
for default_key, default_value in six.iteritems(defaults):
if default_key not in result:
result[default_key] = default_value
return result
def _convert_value(value, conversions, default):
"""Converts a value using a set of value mappings.
Args:
value: The value to convert.
conversions: A dict giving the desired output for each of a set of possible
input values.
default: The value to return if the input value is not one of the ones
listed in "conversions".
Returns:
The converted value.
"""
return conversions.get(value, default)
def _convert_msec_to_timestamp(time_msec):
"""Converts a time value to a google.protobuf.Timestamp's string form.
Args:
time_msec: A time in msec since the Unix epoch.
Returns:
A string formatted like '2003-09-07T19:30:12.345Z', which is the expected
form of google.protobuf.Timestamp values.
"""
return datetime.datetime.utcfromtimestamp(
time_msec / 1000.0).isoformat() + 'Z'
def _convert_timestamp_to_msec(timestamp):
"""Converts a google.protobuf.Timestamp's string form to a time in msec.
Args:
timestamp: A string formatted like '2003-09-07T19:30:12.345Z', which is the
expected form of google.protobuf.Timestamp values.
Returns:
A time in msec since the Unix epoch.
"""
# The fractional second part is optional. Sigh.
if '.' in timestamp:
parsed_timestamp = datetime.datetime.strptime(
timestamp, '%Y-%m-%dT%H:%M:%S.%fZ')
else:
parsed_timestamp = datetime.datetime.strptime(
timestamp, '%Y-%m-%dT%H:%M:%SZ')
return (calendar.timegm(parsed_timestamp.utctimetuple()) * 1000 +
int(parsed_timestamp.microsecond / 1000))
def _convert_bounding_box_to_geo_json(bbox):
"""Converts a lng/lat bounding box to a GeoJSON string."""
lng_min = bbox[0]
lat_min = bbox[1]
lng_max = bbox[2]
lat_max = bbox[3]
return ('{{"type":"Polygon","coordinates":'
'[[[{0},{1}],[{2},{1}],[{2},{3}],[{0},{3}],[{0},{1}]]]}}'.format(
lng_min, lat_min, lng_max, lat_max))
def convert_get_list_params_to_list_assets_params(params):
"""Converts a getList params dict to something usable with listAssets."""
return _convert_dict(
params, {
'id': ('parent', convert_asset_id_to_asset_name),
'num': 'pageSize'
}, key_warnings=True)
def convert_list_assets_result_to_get_list_result(result):
"""Converts a listAssets result to something getList can return."""
if 'assets' not in result:
return []
return [_convert_asset_for_get_list_result(i) for i in result['assets']]
def convert_get_list_params_to_list_images_params(params):
"""Converts a getList params dict to something usable with listImages."""
params = _convert_dict(
params, {
'id': ('parent', convert_asset_id_to_asset_name),
'num': 'pageSize',
'starttime': ('startTime', _convert_msec_to_timestamp),
'endtime': ('endTime', _convert_msec_to_timestamp),
'bbox': ('region', _convert_bounding_box_to_geo_json),
'region': 'region',
'filter': 'filter'
},
key_warnings=True)
# getList returns minimal information; we can filter unneeded stuff out
# server-side.
params['view'] = 'BASIC'
return params
def is_asset_root(asset_name):
return bool(re.match(ASSET_ROOT_PATTERN, asset_name))
def convert_list_images_result_to_get_list_result(result):
"""Converts a listImages result to something getList can return."""
if 'images' not in result:
return []
return [_convert_image_for_get_list_result(i) for i in result['images']]
def _convert_asset_for_get_list_result(asset):
"""Converts an EarthEngineAsset to the format returned by getList."""
result = _convert_dict(
asset, {
'name': 'id',
'type': ('type', _convert_asset_type_for_get_list_result)
},
defaults={'type': 'Unknown'})
return result
def _convert_image_for_get_list_result(asset):
"""Converts an Image to the format returned by getList."""
result = _convert_dict(
asset, {
'name': 'id',
},
defaults={'type': 'Image'})
return result
def _convert_asset_type_for_get_list_result(asset_type):
"""Converts an EarthEngineAsset.Type to the format returned by getList."""
return _convert_value(
asset_type, {
'IMAGE': 'Image',
'IMAGE_COLLECTION': 'ImageCollection',
'TABLE': 'Table',
'FOLDER': 'Folder'
}, 'Unknown')
def convert_asset_type_for_create_asset(asset_type):
"""Converts a createAsset asset type to an EarthEngineAsset.Type."""
return _convert_value(
asset_type, {
'Image': 'IMAGE',
'ImageCollection': 'IMAGE_COLLECTION',
'Table': 'TABLE',
'Folder': 'FOLDER'
}, asset_type)
def convert_asset_id_to_asset_name(asset_id):
"""Converts an internal asset ID to a Cloud API asset name.
If asset_id already matches the format 'projects/*/assets/**', it is returned
as-is.
Args:
asset_id: The asset ID to convert.
Returns:
An asset name string in the format 'projects/*/assets/**'.
"""
if re.match(ASSET_NAME_PATTERN, asset_id) or is_asset_root(asset_id):
return asset_id
elif asset_id.split('/')[0] in ['users', 'projects']:
return 'projects/earthengine-legacy/assets/{}'.format(asset_id)
else:
return 'projects/earthengine-public/assets/{}'.format(asset_id)
def split_asset_name(asset_name):
"""Splits an asset name into the parent and ID parts.
Args:
asset_name: The asset ID to split, in the form 'projects/*/assets/**'.
Returns:
The parent ('projects/*') and ID ('**') parts of the name.
"""
projects, parent, _, remainder = asset_name.split('/', 3)
return projects + '/' + parent, remainder
def convert_operation_name_to_task_id(operation_name):
"""Converts an Operation name to a task ID."""
found = re.search(r'^.*operations/(.*)$', operation_name)
return found.group(1) if found else operation_name
def convert_task_id_to_operation_name(task_id):
"""Converts a task ID to an Operation name."""
return 'projects/{}/operations/{}'.format(_cloud_api_user_project, task_id)
def convert_params_to_image_manifest(params):
"""Converts params to an ImageManifest for ingestion."""
return _convert_dict(
params, {
'id': ('name', convert_asset_id_to_asset_name),
'tilesets': ('tilesets', convert_tilesets_to_one_platform_tilesets)
},
retain_keys=True)
def convert_params_to_table_manifest(params):
"""Converts params to a TableManifest for ingestion."""
return _convert_dict(
params, {
'id': ('name', convert_asset_id_to_asset_name),
'sources': ('sources', convert_sources_to_one_platform_sources),
},
retain_keys=True)
def convert_tilesets_to_one_platform_tilesets(tilesets):
"""Converts a tileset to a one platform representation of a tileset."""
converted_tilesets = []
for tileset in tilesets:
converted_tileset = _convert_dict(
tileset,
{'sources': ('sources', convert_sources_to_one_platform_sources)},
retain_keys=True)
converted_tilesets.append(converted_tileset)
return converted_tilesets
def convert_sources_to_one_platform_sources(sources):
"""Converts the sources to one platform representation of sources."""
converted_sources = []
for source in sources:
converted_source = copy.deepcopy(source)
if 'primaryPath' in converted_source:
file_sources = [converted_source['primaryPath']]
if 'additionalPaths' in converted_source:
file_sources += converted_source['additionalPaths']
del converted_source['additionalPaths']
del converted_source['primaryPath']
converted_source['uris'] = file_sources
if 'maxError' in converted_source:
converted_source['maxErrorMeters'] = converted_source['maxError']
del converted_source['maxError']
converted_sources.append(converted_source)
return converted_sources
def encode_number_as_cloud_value(number):
# Numeric values in constantValue-style nodes end up stored in doubles. If the
# input is an integer that loses precision as a double, use the int64 slot
# ("integerValue") in ValueNode.
if (isinstance(number, six.integer_types) and float(number) != number):
return {'integerValue': str(number)}
else:
return {'constantValue': number}
def convert_algorithms(algorithms):
"""Converts a ListAlgorithmsResult to the internal format.
The internal code expects a dict mapping each algorithm's name to a dict
containing:
- description: string
- returns: string
- arguments: list of dicts, each containing
- name: argument name
- type: argument type
- description: argument description (optional)
- optional: bool (optional)
- default: default value (optional)
- hidden: bool (optional)
- preview: bool (optional)
- deprecated: string containing deprecation reason (optional)
Args:
algorithms: A ListAlgorithmResult.
Returns:
A version of that algorithms list that can be interpreted by
apifunction.initialize().
"""
return dict(
_convert_algorithm(algorithm) for algorithm in algorithms['algorithms'])
def _convert_algorithm(algorithm):
"""Converts an Algorithm to the internal format."""
# Strip leading 'algorithms/' from the name.
algorithm_name = algorithm['name'][11:]
converted_algorithm = _convert_dict(
algorithm, {
'description': 'description',
'returnType': 'returns',
'arguments': ('args', _convert_algorithm_arguments),
'hidden': 'hidden',
'preview': 'preview'
},
defaults={
'description': '',
'returns': '',
'args': []
})
if algorithm.get('deprecated'):
converted_algorithm['deprecated'] = algorithm.get('deprecationReason', '')
return algorithm_name, converted_algorithm
def _convert_algorithm_arguments(args):
return [_convert_algorithm_argument(arg) for arg in args]
def _convert_algorithm_argument(arg):
return _convert_dict(
arg, {
'argumentName': 'name',
'type': 'type',
'description': 'description',
'optional': 'optional',
'defaultValue': 'default'
},
defaults={
'description': '',
'type': ''
})
def convert_to_image_file_format(format_str):
"""Converts a legacy file format string to an ImageFileFormat enum value.
Args:
format_str: A string describing an image file format that was passed to
one of the functions in ee.data that takes image file formats.
Returns:
A best guess at the corresponding ImageFileFormat enum name.
"""
if format_str is None:
return 'AUTO_JPEG_PNG'
format_str = format_str.upper()
if format_str == 'JPG':
return 'JPEG'
elif format_str == 'AUTO':
return 'AUTO_JPEG_PNG'
elif format_str == 'GEOTIFF':
return 'GEO_TIFF'
elif format_str == 'TFRECORD':
return 'TF_RECORD_IMAGE'
else:
# It's probably "JPEG" or "PNG", but might be some other supported format.
# Let the server validate it.
return format_str
def convert_to_table_file_format(format_str):
"""Converts a legacy file format string to a TableFileFormat enum value.
Args:
format_str: A string describing a table file format that was passed to
one of the functions in ee.data that takes table file formats.
Returns:
A best guess at the corresponding TableFileFormat enum name.
"""
format_str = format_str.upper()
if format_str == 'GEOJSON':
return 'GEO_JSON'
elif format_str == 'TFRECORD':
return 'TF_RECORD_TABLE'
else:
# It's probably "CSV" or "KML" or one of the others.
# Let the server validate it.
return format_str
def convert_to_band_list(bands):
"""Converts a band list, possibly as CSV, to a real list of bands.
Args:
bands: A list of strings containing band names, or a string containing
a comma-separated list of band names, or None.
Returns:
A list of band names.
"""
if bands is None:
return []
elif isinstance(bands, six.string_types):
return bands.split(',')
elif isinstance(bands, list):
return bands
else:
raise ee_exception.EEException('Invalid band list ' + bands)
def convert_to_visualization_options(params):
"""Extracts a VisualizationOptions from a param dict.
Args:
params: See ee.data.getMapId() for the description of the keys and values
that might appear here.
Returns:
A VisualizationOptions proto, in dict form.
"""
result = {}
if 'palette' in params:
palette = params['palette']
if isinstance(palette, six.string_types):
palette = palette.split(',')
result['paletteColors'] = palette
value_range = len(palette) - 1
else:
value_range = 255
ranges = []
if 'gain' in params or 'bias' in params:
if 'min' in params or 'max' in params:
raise ee_exception.EEException(
'Gain and bias can\'t be specified together with min and max')
# The Cloud API doesn't support gain/bias, only min/max. Extract and
# convert.
gains = _convert_csv_numbers_to_list(params.get('gain'))
biases = _convert_csv_numbers_to_list(params.get('bias'))
if not gains:
gains = [1.0] * len(biases)
elif not biases:
biases = [0.0] * len(gains)
elif len(gains) != len(biases):
raise ee_exception.EEException('Length of gain and bias must match.')
for gain, bias in zip(gains, biases):
# The transformation equations are
# x -> x * gain + bias
# x -> range * (x - min) / (max - min)
# Solving for (min, max) given (gain, bias) gives:
range_min = -bias / gain
range_max = value_range / gain + range_min
ranges.append({'min': range_min, 'max': range_max})
elif 'min' in params or 'max' in params:
mins = _convert_csv_numbers_to_list(params.get('min'))
maxes = _convert_csv_numbers_to_list(params.get('max'))
if not mins:
mins = [0.0] * len(maxes)
elif not maxes:
maxes = [1.0] * len(mins)
elif len(mins) != len(maxes):
raise ee_exception.EEException('Length of min and max must match.')
for range_min, range_max in zip(mins, maxes):
ranges.append({'min': range_min, 'max': range_max})
if ranges:
result['ranges'] = ranges
gammas = _convert_csv_numbers_to_list(params.get('gamma'))
if len(gammas) > 1:
raise ee_exception.EEException('Only one gamma value is supported.')
elif gammas:
result['gamma'] = {'value': gammas[0]}
return result
def _convert_csv_numbers_to_list(value):
"""Converts a string containing CSV numbers to a list."""
if not value:
return []
return [float(x) for x in value.split(',')]
def convert_operation_to_task(operation):
"""Converts an Operation to a legacy Task."""
result = _convert_dict(
operation['metadata'], {
'createTime': ('creation_timestamp_ms', _convert_timestamp_to_msec),
'updateTime': ('update_timestamp_ms', _convert_timestamp_to_msec),
'startTime': ('start_timestamp_ms', _convert_timestamp_to_msec),
'attempt': 'attempt',
'state': ('state', _convert_operation_state_to_task_state),
'description': 'description',
'type': 'task_type',
'destinationUris': 'destination_uris',
})
if operation.get('done'):
if 'error' in operation:
result['error_message'] = operation['error']['message']
result['id'] = convert_operation_name_to_task_id(operation['name'])
result['name'] = operation['name']
return result
def _convert_operation_state_to_task_state(state):
"""Converts a state string from an Operation to the Task equivalent."""
return _convert_value(
state, {
'PENDING': 'READY',
'RUNNING': 'RUNNING',
'CANCELLING': 'CANCEL_REQUESTED',
'SUCCEEDED': 'COMPLETED',
'CANCELLED': 'CANCELLED',
'FAILED': 'FAILED'
}, 'UNKNOWN')
def convert_iam_policy_to_acl(policy):
"""Converts an IAM Policy proto to the legacy ACL format."""
bindings = {
binding['role']: binding.get('members', [])
for binding in policy.get('bindings', [])
}
owners = bindings.get('roles/owner', [])
readers = bindings.get('roles/viewer', [])
writers = bindings.get('roles/editor', [])
if 'allUsers' in readers:
all_users_can_read = True
readers.remove('allUsers')
else:
all_users_can_read = False
result = {'owners': owners, 'readers': readers, 'writers': writers}
if all_users_can_read:
result['all_users_can_read'] = True
return result
def convert_acl_to_iam_policy(acl):
"""Converts the legacy ACL format to an IAM Policy proto."""
owners = acl.get('owners', [])
readers = acl.get('readers', [])
if acl.get('all_users_can_read', False):
readers.append('allUsers')
writers = acl.get('writers', [])
bindings = []
if owners:
bindings.append({'role': 'roles/owner', 'members': owners})
if readers:
bindings.append({'role': 'roles/viewer', 'members': readers})
if writers:
bindings.append({'role': 'roles/editor', 'members': writers})
return {'bindings': bindings}
def convert_to_grid_dimensions(dimensions):
"""Converts an input value to GridDimensions.
Args:
dimensions: May specify a single number to indicate a square shape,
or a tuple of two dimensions to indicate (width,height).
Returns:
A GridDimensions as a dict.
"""
if isinstance(dimensions, six.integer_types):
return {'width': dimensions, 'height': dimensions}
elif len(dimensions) == 1:
return {'width': dimensions[0], 'height': dimensions[0]}
else:
return {'width': dimensions[0], 'height': dimensions[1]}
|
apache-2.0
| -7,180,240,639,461,749,000
| 32.328358
| 80
| 0.652
| false
| 3.83677
| false
| false
| false
|
victorfsf/RecRecife
|
recmap/admin.py
|
1
|
2190
|
# -*- encoding: utf-8 -*-
from django.contrib import admin
from recmap.models import Endereco, Horario, Coleta, Setor, ColetaHorario, Feedback
class EnderecoAdmin(admin.ModelAdmin):
fieldsets = (
(u'Nome da Rua', {'fields': ('nome_bruto', 'nome_min', 'nome')}),
(u'Bairro / Geolocalização', {'fields': ('bairro', 'latitude', 'longitude')}),
)
list_display = ('nome', 'bairro', 'latitude', 'longitude', 'nome_bruto')
search_fields = ('nome', 'bairro', 'latitude', 'longitude', 'nome_bruto', 'nome_min')
class HorarioAdmin(admin.ModelAdmin):
fieldsets = (
(u'Horário', {'fields': ('intervalo', 'turno')}),
)
list_display = ('intervalo', 'turno',)
search_fields = ('intervalo', 'turno',)
class ColetaAdmin(admin.ModelAdmin):
fieldsets = (
(u'Informações da coleta', {'fields': ('endereco', 'setor', 'rota')}),
)
list_display = ('endereco', 'setor', 'rota',)
search_fields = ('endereco__nome', 'endereco__bairro', 'setor__nome_setor', 'setor__frequencia', 'rota',)
class ColetaHorarioAdmin(admin.ModelAdmin):
fieldsets = (
(u'Informações', {'fields': ('coleta', 'horario',)}),
)
list_display = ('coleta', 'horario',)
search_fields = ('coleta__endereco__nome', 'coleta__endereco__bairro', 'horario__turno', 'horario__intervalo')
class SetorAdmin(admin.ModelAdmin):
fieldsets = (
(u'Informações', {'fields': ('nome_setor', 'frequencia',)}),
)
list_display = ('nome_setor', 'frequencia',)
search_fields = ('nome_setor', 'frequencia',)
class FeedbackAdmin(admin.ModelAdmin):
fieldsets = (
(u'Informações', {'fields': ('enviado_por', 'email', 'situacao', 'descricao','endereco', )}),
)
list_display = ('endereco', 'enviado_por', 'email', 'situacao', 'descricao',)
search_fields = ('endereco__nome', 'nome', 'email', 'situacao', 'descricao',)
admin.site.register(Endereco, EnderecoAdmin)
admin.site.register(Horario, HorarioAdmin)
admin.site.register(Coleta, ColetaAdmin)
admin.site.register(Setor, SetorAdmin)
admin.site.register(ColetaHorario, ColetaHorarioAdmin)
admin.site.register(Feedback, FeedbackAdmin)
|
gpl-2.0
| -719,037,859,188,247,000
| 28.863014
| 114
| 0.635613
| false
| 2.840939
| false
| false
| false
|
blitzmann/Pyfa
|
gui/builtinAdditionPanes/droneView.py
|
1
|
8775
|
# =============================================================================
# Copyright (C) 2010 Diego Duclos
#
# This file is part of pyfa.
#
# pyfa is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pyfa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pyfa. If not, see <http://www.gnu.org/licenses/>.
# =============================================================================
# noinspection PyPackageRequirements
import wx
import gui.globalEvents as GE
import gui.mainFrame
from gui.builtinMarketBrowser.events import ItemSelected, ITEM_SELECTED
from gui.display import Display
from gui.builtinViewColumns.state import State
from gui.contextMenu import ContextMenu
from gui.utils.staticHelpers import DragDropHelper
from service.fit import Fit
from service.market import Market
import gui.fitCommands as cmd
class DroneViewDrop(wx.DropTarget):
def __init__(self, dropFn, *args, **kwargs):
super(DroneViewDrop, self).__init__(*args, **kwargs)
self.dropFn = dropFn
# this is really transferring an EVE itemID
self.dropData = wx.TextDataObject()
self.SetDataObject(self.dropData)
def OnData(self, x, y, t):
if self.GetData():
dragged_data = DragDropHelper.data
data = dragged_data.split(':')
self.dropFn(x, y, data)
return t
class DroneView(Display):
DEFAULT_COLS = [
"State",
# "Base Icon",
"Base Name",
# "prop:droneDps,droneBandwidth",
"Max Range",
"Miscellanea",
"attr:maxVelocity",
"Price",
]
def __init__(self, parent):
Display.__init__(self, parent, style=wx.LC_SINGLE_SEL | wx.BORDER_NONE)
self.lastFitId = None
self.hoveredRow = None
self.hoveredColumn = None
self.mainFrame = gui.mainFrame.MainFrame.getInstance()
self.mainFrame.Bind(GE.FIT_CHANGED, self.fitChanged)
self.mainFrame.Bind(ITEM_SELECTED, self.addItem)
self.Bind(wx.EVT_LEFT_DCLICK, self.removeItem)
self.Bind(wx.EVT_LEFT_DOWN, self.click)
self.Bind(wx.EVT_KEY_UP, self.kbEvent)
self.Bind(wx.EVT_MOTION, self.OnMouseMove)
self.Bind(wx.EVT_LEAVE_WINDOW, self.OnLeaveWindow)
self.Bind(wx.EVT_CONTEXT_MENU, self.spawnMenu)
self.Bind(wx.EVT_LIST_BEGIN_DRAG, self.startDrag)
self.SetDropTarget(DroneViewDrop(self.handleDragDrop))
def OnLeaveWindow(self, event):
self.SetToolTip(None)
self.hoveredRow = None
self.hoveredColumn = None
event.Skip()
def OnMouseMove(self, event):
row, _, col = self.HitTestSubItem(event.Position)
if row != self.hoveredRow or col != self.hoveredColumn:
if self.ToolTip is not None:
self.SetToolTip(None)
else:
self.hoveredRow = row
self.hoveredColumn = col
if row != -1 and col != -1 and col < len(self.DEFAULT_COLS):
mod = self.drones[self.GetItemData(row)]
if self.DEFAULT_COLS[col] == "Miscellanea":
tooltip = self.activeColumns[col].getToolTip(mod)
if tooltip is not None:
self.SetToolTip(tooltip)
else:
self.SetToolTip(None)
else:
self.SetToolTip(None)
else:
self.SetToolTip(None)
event.Skip()
def kbEvent(self, event):
keycode = event.GetKeyCode()
if keycode == wx.WXK_DELETE or keycode == wx.WXK_NUMPAD_DELETE:
row = self.GetFirstSelected()
if row != -1:
drone = self.drones[self.GetItemData(row)]
self.removeDrone(drone)
event.Skip()
def startDrag(self, event):
row = event.GetIndex()
if row != -1:
data = wx.TextDataObject()
dataStr = "drone:" + str(row)
data.SetText(dataStr)
dropSource = wx.DropSource(self)
dropSource.SetData(data)
DragDropHelper.data = dataStr
dropSource.DoDragDrop()
def handleDragDrop(self, x, y, data):
"""
Handles dragging of items from various pyfa displays which support it
data is list with two indices:
data[0] is hard-coded str of originating source
data[1] is typeID or index of data we want to manipulate
"""
if data[0] == "drone": # we want to merge drones
pass
# remove merge functionality, if people complain in the next while, can add it back
# srcRow = int(data[1])
# dstRow, _ = self.HitTest((x, y))
# if srcRow != -1 and dstRow != -1:
# self._merge(srcRow, dstRow)
elif data[0] == "market":
wx.PostEvent(self.mainFrame, ItemSelected(itemID=int(data[1])))
def _merge(self, src, dst):
sFit = Fit.getInstance()
fitID = self.mainFrame.getActiveFit()
if sFit.mergeDrones(fitID, self.drones[src], self.drones[dst]):
wx.PostEvent(self.mainFrame, GE.FitChanged(fitID=fitID))
DRONE_ORDER = ('Light Scout Drones', 'Medium Scout Drones',
'Heavy Attack Drones', 'Sentry Drones', 'Combat Utility Drones',
'Electronic Warfare Drones', 'Logistic Drones', 'Mining Drones', 'Salvage Drones')
def droneKey(self, drone):
sMkt = Market.getInstance()
groupName = sMkt.getMarketGroupByItem(drone.item).name
return (self.DRONE_ORDER.index(groupName),
drone.item.name)
def fitChanged(self, event):
sFit = Fit.getInstance()
fit = sFit.getFit(event.fitID)
self.Parent.Parent.DisablePage(self, not fit or fit.isStructure)
# Clear list and get out if current fitId is None
if event.fitID is None and self.lastFitId is not None:
self.DeleteAllItems()
self.lastFitId = None
event.Skip()
return
self.original = fit.drones if fit is not None else None
self.drones = stuff = fit.drones[:] if fit is not None else None
if stuff is not None:
stuff.sort(key=self.droneKey)
if event.fitID != self.lastFitId:
self.lastFitId = event.fitID
item = self.GetNextItem(-1, wx.LIST_NEXT_ALL, wx.LIST_STATE_DONTCARE)
if item != -1:
self.EnsureVisible(item)
self.deselectItems()
self.update(stuff)
event.Skip()
def addItem(self, event):
sFit = Fit.getInstance()
fitID = self.mainFrame.getActiveFit()
fit = sFit.getFit(fitID)
if not fit or fit.isStructure:
event.Skip()
return
if self.mainFrame.command.Submit(cmd.GuiAddDroneCommand(fitID, event.itemID)):
self.mainFrame.additionsPane.select("Drones")
event.Skip()
def removeItem(self, event):
row, _ = self.HitTest(event.Position)
if row != -1:
col = self.getColumn(event.Position)
if col != self.getColIndex(State):
drone = self.drones[self.GetItemData(row)]
self.removeDrone(drone)
def removeDrone(self, drone):
fitID = self.mainFrame.getActiveFit()
self.mainFrame.command.Submit(cmd.GuiRemoveDroneCommand(fitID, self.original.index(drone)))
def click(self, event):
event.Skip()
row, _ = self.HitTest(event.Position)
if row != -1:
col = self.getColumn(event.Position)
if col == self.getColIndex(State):
fitID = self.mainFrame.getActiveFit()
drone = self.drones[row]
self.mainFrame.command.Submit(cmd.GuiToggleDroneCommand(fitID, self.original.index(drone)))
def spawnMenu(self, event):
sel = self.GetFirstSelected()
if sel != -1:
drone = self.drones[sel]
sMkt = Market.getInstance()
sourceContext = "droneItem"
itemContext = sMkt.getCategoryByItem(drone.item).name
menu = ContextMenu.getMenu((drone,), (sourceContext, itemContext))
self.PopupMenu(menu)
|
gpl-3.0
| 3,243,596,477,556,555,000
| 33.960159
| 107
| 0.587692
| false
| 3.780698
| false
| false
| false
|
codelv/enaml-native
|
src/enamlnative/android/android_toast.py
|
1
|
5004
|
"""
Copyright (c) 2017, Jairus Martin.
Distributed under the terms of the MIT License.
The full license is in the file LICENSE, distributed with this software.
Created on Sept 18, 2017
@author: jrm
"""
from atom.api import Typed, Bool, set_default
from .bridge import JavaBridgeObject, JavaMethod, JavaStaticMethod
from enamlnative.widgets.toast import ProxyToast
from .android_toolkit_object import AndroidToolkitObject
class Toast(JavaBridgeObject):
#: Show the view for the specified duration.
__nativeclass__ = set_default('android.widget.Toast')
__signature__ = set_default(('android.content.Context',))
makeText = JavaStaticMethod('android.content.Context',
'java.lang.CharSequence', 'int',
returns='android.widget.Toast')
show = JavaMethod()
cancel = JavaMethod()
setDuration = JavaMethod('int')
setGravity = JavaMethod('int', 'int', 'int')
setText = JavaMethod('java.lang.CharSequence')
setView = JavaMethod('android.view.View')
class AndroidToast(AndroidToolkitObject, ProxyToast):
""" An Android implementation of an Enaml ProxyToast.
"""
#: A reference to the widget created by the proxy.
toast = Typed(Toast)
#: Made toast
#: Android doesn't let us simply update the text of an existing toast
#: unless it was created with "makeToast"
made_toast = Bool()
# -------------------------------------------------------------------------
# Initialization API
# -------------------------------------------------------------------------
def create_widget(self):
""" Create the underlying widget.
A toast is not a subclass of view, hence we don't set name as widget
or children will try to use it as their parent (which crashes).
"""
d = self.declaration
if d.text:
Toast.makeText(self.get_context(),
d.text, 1).then(self.on_make_toast)
self.made_toast = True
else:
self.toast = Toast(self.get_context())
def init_widget(self):
""" Our widget may not exist yet so we have to diverge from the normal
way of doing initialization. See `update_widget`
"""
if not self.toast:
return
super(AndroidToast, self).init_widget()
d = self.declaration
if not self.made_toast:
#: Set it to LONG
self.toast.setDuration(1)
if d.gravity:
self.set_gravity(d.gravity)
if d.show:
self.set_show(d.show)
def init_layout(self):
""" If a view is given show it """
super(AndroidToast, self).init_layout()
if not self.made_toast:
for view in self.child_widgets():
self.toast.setView(view)
break
def child_added(self, child):
""" Overwrite the view """
view = child.widget
if view is not None:
self.toast.setView(view)
def on_make_toast(self, ref):
""" Using Toast.makeToast returns async so we have to initialize it
later.
"""
d = self.declaration
self.toast = Toast(__id__=ref)
self.init_widget()
def _refresh_show(self, dt):
""" While the toast.show is true, keep calling .show() until the
duration `dt` expires.
Parameters
------------
dt: int
Time left to keep showing
"""
d = self.declaration
if dt <= 0:
#: Done, hide
d.show = False
elif d.show:
#: If user didn't cancel it, keep it alive
self.toast.show()
t = min(1000, dt)
app = self.get_context()
app.timed_call(t, self._refresh_show, dt-t)
# -------------------------------------------------------------------------
# ProxyToast API
# -------------------------------------------------------------------------
def set_text(self, text):
#: Only possible if a custom view is not used
if self.made_toast:
self.toast.setText(text)
def set_duration(self, duration):
""" Android for whatever stupid reason doesn't let you set the time
it only allows 1-long or 0-short. So we have to repeatedly call show
until the duration expires, hence this method does nothing see
`set_show`.
"""
pass
def set_show(self, show):
if show:
d = self.declaration
self.toast.show()
#: Get app
app = self.get_context()
t = min(1000, d.duration)
app.timed_call(t, self._refresh_show, d.duration-t)
else:
self.toast.cancel()
def set_layout(self, layout):
pass
def set_gravity(self, gravity):
d = self.declaration
self.toast.setGravity(gravity, int(d.x), int(d.y))
|
mit
| -5,463,170,937,494,007,000
| 30.28125
| 79
| 0.540568
| false
| 4.205042
| false
| false
| false
|
demonchild2112/travis-test
|
grr/server/grr_response_server/databases/mem_events.py
|
1
|
2164
|
#!/usr/bin/env python
"""The in memory database methods for event handling."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import collections
from grr_response_core.lib import rdfvalue
from grr_response_core.lib import utils
class InMemoryDBEventMixin(object):
"""InMemoryDB mixin for event handling."""
@utils.Synchronized
def ReadAPIAuditEntries(self,
username=None,
router_method_names=None,
min_timestamp=None,
max_timestamp=None):
"""Returns audit entries stored in the database."""
results = []
for entry in self.api_audit_entries:
if username is not None and entry.username != username:
continue
if (router_method_names and
entry.router_method_name not in router_method_names):
continue
if min_timestamp is not None and entry.timestamp < min_timestamp:
continue
if max_timestamp is not None and entry.timestamp > max_timestamp:
continue
results.append(entry)
return sorted(results, key=lambda entry: entry.timestamp)
@utils.Synchronized
def CountAPIAuditEntriesByUserAndDay(self,
min_timestamp=None,
max_timestamp=None):
"""Returns audit entry counts grouped by user and calendar day."""
results = collections.Counter()
for entry in self.api_audit_entries:
if min_timestamp is not None and entry.timestamp < min_timestamp:
continue
if max_timestamp is not None and entry.timestamp > max_timestamp:
continue
# Truncate DateTime by removing the time-part to allow grouping by date.
day = rdfvalue.RDFDatetime.FromDate(entry.timestamp.AsDatetime().date())
results[(entry.username, day)] += 1
return dict(results)
@utils.Synchronized
def WriteAPIAuditEntry(self, entry):
"""Writes an audit entry to the database."""
copy = entry.Copy()
copy.timestamp = rdfvalue.RDFDatetime.Now()
self.api_audit_entries.append(copy)
|
apache-2.0
| -6,057,583,941,528,205,000
| 31.298507
| 78
| 0.649261
| false
| 4.489627
| false
| false
| false
|
thinkopensolutions/server-tools
|
users_ldap_populate/models/users_ldap.py
|
1
|
2682
|
# -*- coding: utf-8 -*-
# © 2012 Therp BV (<http://therp.nl>)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/gpl.html).
import re
from odoo import models, api, _
from odoo.exceptions import UserError
import logging
_logger = logging.getLogger(__name__)
try:
from ldap.filter import filter_format
except ImportError:
_logger.debug('Can not `from ldap.filter import filter_format`.')
class CompanyLDAP(models.Model):
_inherit = 'res.company.ldap'
@api.multi
def action_populate(self):
"""
Prepopulate the user table from one or more LDAP resources.
Obviously, the option to create users must be toggled in
the LDAP configuration.
Return the number of users created (as far as we can tell).
"""
users_pool = self.env['res.users']
users_no_before = users_pool.search_count([])
logger = logging.getLogger('orm.ldap')
logger.debug("action_populate called on res.company.ldap ids %s",
self.ids)
for conf in self.get_ldap_dicts():
if not conf['create_user']:
continue
attribute_match = re.search(
r'([a-zA-Z_]+)=\%s', conf['ldap_filter'])
if attribute_match:
login_attr = attribute_match.group(1)
else:
raise UserError(
_("No login attribute found: "
"Could not extract login attribute from filter %s") %
conf['ldap_filter'])
ldap_filter = filter_format(conf['ldap_filter'] % '*', ())
for result in self.query(conf, ldap_filter.encode('utf-8')):
self.get_or_create_user(conf, result[1][login_attr][0], result)
users_no_after = users_pool.search_count([])
users_created = users_no_after - users_no_before
logger.debug("%d users created", users_created)
return users_created
@api.multi
def populate_wizard(self):
"""
GUI wrapper for the populate method that reports back
the number of users created.
"""
if not self:
return
wizard_obj = self.env['res.company.ldap.populate_wizard']
res_id = wizard_obj.create({'ldap_id': self.id}).id
return {
'name': wizard_obj._description,
'view_type': 'form',
'view_mode': 'form',
'res_model': wizard_obj._name,
'domain': [],
'context': self.env.context,
'type': 'ir.actions.act_window',
'target': 'new',
'res_id': res_id,
'nodestroy': True,
}
|
agpl-3.0
| -8,825,716,694,864,436,000
| 32.098765
| 79
| 0.556509
| false
| 3.995529
| false
| false
| false
|
kimus/django-blocks
|
blocks/migrations/0006_auto__chg_field_menu_slug.py
|
1
|
6647
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Menu.slug'
db.alter_column(u'blocks_menu', 'slug', self.gf('blocks.fields.SlugURLField')(max_length=200, null=True))
def backwards(self, orm):
# Changing field 'Menu.slug'
db.alter_column(u'blocks_menu', 'slug', self.gf('blocks.fields.SlugURLField')(default='', max_length=200))
models = {
u'blocks.menu': {
'Meta': {'object_name': 'Menu'},
'expiry_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'keyword': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
u'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
u'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'order': ('blocks.fields.OrderField', [], {'default': '0', 'db_index': 'True', 'blank': 'True'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': u"orm['blocks.Menu']"}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['sites.Site']", 'db_index': 'True', 'symmetrical': 'False'}),
'slug': ('blocks.fields.SlugURLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '1', 'db_index': 'True'}),
u'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_index': 'True'})
},
u'blocks.menutranslation': {
'Meta': {'unique_together': "[('language_code', 'master')]", 'object_name': 'MenuTranslation', 'db_table': "u'blocks_menu_translation'"},
'description': ('django.db.models.fields.TextField', [], {'max_length': '200', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language_code': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'master': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'translations'", 'null': 'True', 'to': u"orm['blocks.Menu']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '80'})
},
u'blocks.page': {
'Meta': {'ordering': "['url', 'order']", 'object_name': 'Page'},
'expiry_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_relative': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'menu': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'order': ('blocks.fields.OrderField', [], {'default': '0', 'db_index': 'True', 'blank': 'True'}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['sites.Site']", 'db_index': 'True', 'symmetrical': 'False'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '1', 'db_index': 'True'}),
'template_name': ('django.db.models.fields.CharField', [], {'max_length': '70', 'blank': 'True'}),
'url': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200', 'db_index': 'True'})
},
u'blocks.pagetranslation': {
'Meta': {'unique_together': "[('language_code', 'master')]", 'object_name': 'PageTranslation', 'db_table': "u'blocks_page_translation'"},
'content': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language_code': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'master': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'translations'", 'null': 'True', 'to': u"orm['blocks.Page']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '80'})
},
u'blocks.promotable': {
'Meta': {'object_name': 'Promotable'},
'expiry_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'promoted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['sites.Site']", 'db_index': 'True', 'symmetrical': 'False'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '1', 'db_index': 'True'})
},
u'blocks.template': {
'Meta': {'object_name': 'Template'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'template': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['blocks']
|
mit
| 4,874,328,548,890,864,000
| 70.483871
| 154
| 0.546863
| false
| 3.579429
| false
| false
| false
|
spirali/nukecon
|
src/base/structure.py
|
1
|
10222
|
import logging
import os.path
from base import paths
from base import utils
import xml.etree.ElementTree as xml
import itertools
import copy
GAMMA_LIMITS = [ 30, 90, 150, 210, 270, 330, 9999 ]
GAMMA_NAMES = [ "sp", "+sc", "+ac", "ap", "-ac", "-sc", "sp" ]
DIRECTION_LIMITS = [ 45, 135, 225, 315 ]
DIRECTION_NAMES = [ "North", "East", "South", "West" ]
class Result:
def __init__(self):
self.gamma = None
self.p = None
self.tm = None
self.synanti = None
self.mixed_results = 1
@property
def dir_index(self):
for i, limit in enumerate(DIRECTION_LIMITS):
if self.p < limit:
return i
return 0
@property
def gamma_index(self):
for i, limit in enumerate(GAMMA_LIMITS):
if self.gamma < limit:
return i
else:
raise Exception("Invalid value")
@property
def dir_name(self):
return DIRECTION_NAMES[self.dir_index]
@property
def gamma_name(self):
return GAMMA_NAMES[self.gamma_index]
def to_element(self):
e = xml.Element("result")
e.set("gamma", str(self.gamma))
e.set("p", str(self.p))
e.set("tm", str(self.tm))
e.set("synanti", str(self.synanti))
return e
@classmethod
def from_element(cls, e):
result = cls()
result.gamma = float(e.get("gamma"))
result.p = float(e.get("p"))
result.tm = float(e.get("tm"))
result.synanti = float(e.get("synanti"))
return result
class Chain:
def __init__(self, id):
self.id = id
self.ec_numbers = []
self.compound = None
self.results = []
def add_result(self, result):
self.results.append(result)
@property
def ec_numbers_str(self):
return ", ".join(self.ec_numbers)
def to_element(self):
e = xml.Element("chain")
e.set("id", self.id)
e.set("compound", self.compound)
for ec_no in self.ec_numbers:
e2 = xml.Element("ec-number")
e2.text = str(ec_no)
e.append(e2)
for result in self.results:
e.append(result.to_element())
return e
@classmethod
def from_element(cls, element):
chain = cls(element.get("id"))
chain.ec_numbers = [ e.text for e in element.findall("ec-number") ]
chain.compound = element.get("compound")
chain.results = [ Result.from_element(e) for e in element.findall("result") ]
return chain
def avg_results(results):
r = Result()
l = len(results)
r.mixed_results = l
r.gamma = (sum(s.gamma for s in results) % 360.0) / l
r.tm = (sum(s.tm for s in results) % 360.0) / l
r.p = (sum(s.p for s in results) % 360.0) / l
return r
def angle_diff(a, b):
d = abs(a - b)
if d > 180.0:
return d - 180.0
else:
return d
def join_chains(chains, angle_limit):
def key(v):
return v[1].p
results = []
for c in chains:
results.extend((c, r) for r in c.results)
if not results:
return results
results.sort(key=key)
for n in xrange(1, len(results) + 1):
best_angle = 360.0
best_partition = None
for partition in utils.make_partitions(results, n):
angle = 0
for s in partition:
a = sum(angle_diff(s[i-1][1].p, s[i][1].p) for i in xrange(1, len(s)))
if a > angle:
angle = a
if angle < best_angle:
best_angle = angle
best_partition = partition
if best_angle <= angle_limit:
break
result = []
for s in best_partition:
chains = list(set(c for c, r in s))
chains.sort(key=lambda c: c.id)
chain = Chain(",".join(c.id for c in chains))
chain.results = [ avg_results([r for c, r, in s]) ]
chain.ec_numbers = chains[0].ec_numbers
chain.compound = chains[0].compound
result.append(chain)
return result
class Structure:
def __init__(self, id):
self.id = id
self.downloaded = False
self.resolution = None
self.exp_technique = None
self.title = None
self.chains = []
@property
def filename(self):
return os.path.join(paths.DATA,
self.id[:2].lower(),
"pdb{0}.ent".format(self.id.lower()))
def get_chain(self, id):
for chain in self.chains:
if chain.id == id:
return chain
def join_chains(self, angle_limit):
s = copy.copy(self)
if self.chains:
s.chains = join_chains(self.chains, angle_limit)
return s
def to_element(self):
e = xml.Element("structure")
e.set("id", str(self.id))
if self.resolution is not None:
e.set("resolution", str(self.resolution))
e.set("exp-technique", self.exp_technique)
e.set("title", self.title)
for chain in self.chains:
e.append(chain.to_element())
return e
def fill_download_info(self):
self.downloaded = os.path.isfile(self.filename)
def strip_empty_chains(self):
s = copy.copy(self)
s.chains = [ chain for chain in self.chains if chain.results ]
return s
@classmethod
def from_datarow(cls, row):
id, chains = row
id, chain_id, title, compound, resolution, exp_technique, ec_no \
= chains[0]
s = cls(id)
try:
s.resolution = float(resolution)
except ValueError:
s.resolution = None
s.exp_technique = exp_technique
s.title = title
for c in chains:
id, chain_id, t, c, resolution, exp_technique, ec_no = c
assert t == title
chain = Chain(chain_id)
chain.compound = c
if ec_no:
chain.ec_numbers = ec_no.split("#")
s.chains.append(chain)
return s
@classmethod
def from_element(cls, element):
s = cls(element.get("id"))
resolution = element.get("resolution", None)
if resolution is not None:
s.resolution = float(resolution)
s.exp_technique = element.get("exp-technique")
s.title = element.get("title", None)
s.chains = [ Chain.from_element(e) for e in element.findall("chain") ]
return s
class StructureList:
def __init__(self, datarows=None, xmlfile=None, structures=None):
if structures is None:
structures = []
self.structures = structures
if datarows is not None:
for row in datarows:
self.structures.append(Structure.from_datarow(row))
if xmlfile is not None:
try:
tree = xml.parse(xmlfile)
except Exception:
logging.debug("File with structures not found")
return
for e in tree.getroot():
self.structures.append(Structure.from_element(e))
def get_missing(self, slist):
my = set(s.id for s in self.structures)
other = set(s.id for s in slist.structures)
diff = other - my
result = []
for s in slist.structures:
if s.id in diff:
result.append(s)
return StructureList(structures=result)
def add(self, slist):
self.structures.extend(slist.structures)
def save(self, filename):
root = xml.Element("structures")
for s in self.structures:
root.append(s.to_element())
tree = xml.ElementTree(root)
tree.write(filename)
def get_ids(self):
return [ s.id for s in self.structures]
def compare(self, other):
my_ids = frozenset(self.get_ids())
other_ids = frozenset(other.get_ids())
return len(my_ids - other_ids), len(other_ids - my_ids)
def make_resolution_stats(self):
resolution_stats = [ 0, 0, 0, 0, 0 ]
for s in self.structures:
if s.resolution is None:
resolution_stats[0] += 1
elif s.resolution <= 1.0:
resolution_stats[1] += 1
elif s.resolution <= 2.0:
resolution_stats[2] += 1
elif s.resolution <= 3.0:
resolution_stats[3] += 1
else:
resolution_stats[4] += 1
return resolution_stats
def filter(self, max_resolution=None):
structures = self.structures
if max_resolution is not None:
structures = (s for s in structures
if s.resolution and
s.resolution <= max_resolution)
return StructureList(structures=list(structures))
def filter_downloaded(self):
structures = [ s for s in self.structures if s.downloaded ]
return StructureList(structures=structures)
def filter_not_downloaded(self):
structures = [ s for s in self.structures if not s.downloaded ]
return StructureList(structures=structures)
def fill_download_info(self):
for s in self.structures:
s.fill_download_info()
def filter_with_results(self):
structures = [ s for s in self.structures
if any(c.results for c in s.chains) ]
return StructureList(structures=structures)
def join_chains(self, angle_limit):
structures = [ s.join_chains(angle_limit) for s in self.structures ]
return StructureList(structures=structures)
def strip_empty_chains(self):
return StructureList(
structures=[ s.strip_empty_chains() for s in self.structures ])
@property
def chains(self):
return itertools.chain.from_iterable(s.chains for s in self.structures)
@property
def results(self):
return itertools.chain.from_iterable(c.results for c in self.chains)
def make_table(self):
return []
def __iter__(self):
return iter(self.structures)
def __len__(self):
return len(self.structures)
|
bsd-3-clause
| 5,950,072,718,312,121,000
| 27.794366
| 86
| 0.553414
| false
| 3.780325
| false
| false
| false
|
rahulunair/nova
|
nova/conductor/tasks/live_migrate.py
|
1
|
27649
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
import oslo_messaging as messaging
import six
from nova import availability_zones
from nova.compute import power_state
from nova.compute import utils as compute_utils
from nova.conductor.tasks import base
from nova.conductor.tasks import migrate
import nova.conf
from nova import exception
from nova.i18n import _
from nova.network import neutron
from nova import objects
from nova.objects import fields as obj_fields
from nova.objects import migrate_data as migrate_data_obj
from nova.scheduler import utils as scheduler_utils
LOG = logging.getLogger(__name__)
CONF = nova.conf.CONF
def supports_vif_related_pci_allocations(context, host):
"""Checks if the compute host service is new enough to support
VIF related PCI allocation during live migration
:param context: The user request context.
:param host: The nova-compute host to check.
:returns: True if the compute host is new enough to support vif related
PCI allocations
"""
svc = objects.Service.get_by_host_and_binary(context, host, 'nova-compute')
return svc.version >= 36
class LiveMigrationTask(base.TaskBase):
def __init__(self, context, instance, destination,
block_migration, disk_over_commit, migration, compute_rpcapi,
servicegroup_api, query_client, report_client,
request_spec=None):
super(LiveMigrationTask, self).__init__(context, instance)
self.destination = destination
self.block_migration = block_migration
self.disk_over_commit = disk_over_commit
self.migration = migration
self.source = instance.host
self.migrate_data = None
self.limits = None
self.compute_rpcapi = compute_rpcapi
self.servicegroup_api = servicegroup_api
self.query_client = query_client
self.report_client = report_client
self.request_spec = request_spec
self._source_cn = None
self._held_allocations = None
self.network_api = neutron.API()
def _execute(self):
self._check_instance_is_active()
self._check_instance_has_no_numa()
self._check_host_is_up(self.source)
self._source_cn, self._held_allocations = (
# NOTE(danms): This may raise various exceptions, which will
# propagate to the API and cause a 500. This is what we
# want, as it would indicate internal data structure corruption
# (such as missing migrations, compute nodes, etc).
migrate.replace_allocation_with_migration(self.context,
self.instance,
self.migration))
if not self.destination:
# Either no host was specified in the API request and the user
# wants the scheduler to pick a destination host, or a host was
# specified but is not forcing it, so they want the scheduler
# filters to run on the specified host, like a scheduler hint.
self.destination, dest_node, self.limits = self._find_destination()
else:
# This is the case that the user specified the 'force' flag when
# live migrating with a specific destination host so the scheduler
# is bypassed. There are still some minimal checks performed here
# though.
source_node, dest_node = self._check_requested_destination()
# Now that we're semi-confident in the force specified host, we
# need to copy the source compute node allocations in Placement
# to the destination compute node. Normally select_destinations()
# in the scheduler would do this for us, but when forcing the
# target host we don't call the scheduler.
# TODO(mriedem): Call select_destinations() with a
# skip_filters=True flag so the scheduler does the work of claiming
# resources on the destination in Placement but still bypass the
# scheduler filters, which honors the 'force' flag in the API.
# This raises NoValidHost which will be handled in
# ComputeTaskManager.
# NOTE(gibi): consumer_generation = None as we expect that the
# source host allocation is held by the migration therefore the
# instance is a new, empty consumer for the dest allocation. If
# this assumption fails then placement will return consumer
# generation conflict and this call raise a AllocationUpdateFailed
# exception. We let that propagate here to abort the migration.
scheduler_utils.claim_resources_on_destination(
self.context, self.report_client,
self.instance, source_node, dest_node,
source_allocations=self._held_allocations,
consumer_generation=None)
# dest_node is a ComputeNode object, so we need to get the actual
# node name off it to set in the Migration object below.
dest_node = dest_node.hypervisor_hostname
self.instance.availability_zone = (
availability_zones.get_host_availability_zone(
self.context, self.destination))
self.migration.source_node = self.instance.node
self.migration.dest_node = dest_node
self.migration.dest_compute = self.destination
self.migration.save()
# TODO(johngarbutt) need to move complexity out of compute manager
# TODO(johngarbutt) disk_over_commit?
return self.compute_rpcapi.live_migration(self.context,
host=self.source,
instance=self.instance,
dest=self.destination,
block_migration=self.block_migration,
migration=self.migration,
migrate_data=self.migrate_data)
def rollback(self, ex):
# TODO(johngarbutt) need to implement the clean up operation
# but this will make sense only once we pull in the compute
# calls, since this class currently makes no state changes,
# except to call the compute method, that has no matching
# rollback call right now.
if self._held_allocations:
migrate.revert_allocation_for_migration(self.context,
self._source_cn,
self.instance,
self.migration)
def _check_instance_is_active(self):
if self.instance.power_state not in (power_state.RUNNING,
power_state.PAUSED):
raise exception.InstanceInvalidState(
instance_uuid=self.instance.uuid,
attr='power_state',
state=power_state.STATE_MAP[self.instance.power_state],
method='live migrate')
def _check_instance_has_no_numa(self):
"""Prevent live migrations of instances with NUMA topologies.
TODO(artom) Remove this check in compute RPC 6.0.
"""
if not self.instance.numa_topology:
return
# Only KVM (libvirt) supports NUMA topologies with CPU pinning;
# HyperV's vNUMA feature doesn't allow specific pinning
hypervisor_type = objects.ComputeNode.get_by_host_and_nodename(
self.context, self.source, self.instance.node).hypervisor_type
# KVM is not a hypervisor, so when using a virt_type of "kvm" the
# hypervisor_type will still be "QEMU".
if hypervisor_type.lower() != obj_fields.HVType.QEMU:
return
# We're fully upgraded to a version that supports NUMA live
# migration, carry on.
if objects.Service.get_minimum_version(
self.context, 'nova-compute') >= 40:
return
if CONF.workarounds.enable_numa_live_migration:
LOG.warning(
'Instance has an associated NUMA topology, cell contains '
'compute nodes older than train, but the '
'enable_numa_live_migration workaround is enabled. Live '
'migration will not be NUMA-aware. The instance NUMA '
'topology, including related attributes such as CPU pinning, '
'huge page and emulator thread pinning information, will not '
'be recalculated. See bug #1289064 for more information.',
instance=self.instance)
else:
raise exception.MigrationPreCheckError(
reason='Instance has an associated NUMA topology, cell '
'contains compute nodes older than train, and the '
'enable_numa_live_migration workaround is disabled. '
'Refusing to perform the live migration, as the '
'instance NUMA topology, including related attributes '
'such as CPU pinning, huge page and emulator thread '
'pinning information, cannot be recalculated. See '
'bug #1289064 for more information.')
def _check_can_migrate_pci(self, src_host, dest_host):
"""Checks that an instance can migrate with PCI requests.
At the moment support only if:
1. Instance contains VIF related PCI requests.
2. Neutron supports multiple port binding extension.
3. Src and Dest host support VIF related PCI allocations.
"""
if self.instance.pci_requests is None or not len(
self.instance.pci_requests.requests):
return
for pci_request in self.instance.pci_requests.requests:
if pci_request.source != objects.InstancePCIRequest.NEUTRON_PORT:
# allow only VIF related PCI requests in live migration.
raise exception.MigrationPreCheckError(
reason= "non-VIF related PCI requests for instance "
"are not allowed for live migration.")
# All PCI requests are VIF related, now check neutron,
# source and destination compute nodes.
if not self.network_api.supports_port_binding_extension(
self.context):
raise exception.MigrationPreCheckError(
reason="Cannot live migrate VIF with related PCI, Neutron "
"does not support required port binding extension.")
if not (supports_vif_related_pci_allocations(self.context,
src_host) and
supports_vif_related_pci_allocations(self.context,
dest_host)):
raise exception.MigrationPreCheckError(
reason="Cannot live migrate VIF with related PCI, "
"source and destination nodes do not support "
"the operation.")
def _check_host_is_up(self, host):
service = objects.Service.get_by_compute_host(self.context, host)
if not self.servicegroup_api.service_is_up(service):
raise exception.ComputeServiceUnavailable(host=host)
def _check_requested_destination(self):
"""Performs basic pre-live migration checks for the forced host.
:returns: tuple of (source ComputeNode, destination ComputeNode)
"""
self._check_destination_is_not_source()
self._check_host_is_up(self.destination)
self._check_destination_has_enough_memory()
source_node, dest_node = self._check_compatible_with_source_hypervisor(
self.destination)
# NOTE(gibi): This code path is used when the live migration is forced
# to a target host and skipping the scheduler. Such operation is
# rejected for servers with nested resource allocations since
# I7cbd5d9fb875ebf72995362e0b6693492ce32051. So here we can safely
# assume that the provider mapping is empty.
self._call_livem_checks_on_host(self.destination, {})
# Make sure the forced destination host is in the same cell that the
# instance currently lives in.
# NOTE(mriedem): This can go away if/when the forced destination host
# case calls select_destinations.
source_cell_mapping = self._get_source_cell_mapping()
dest_cell_mapping = self._get_destination_cell_mapping()
if source_cell_mapping.uuid != dest_cell_mapping.uuid:
raise exception.MigrationPreCheckError(
reason=(_('Unable to force live migrate instance %s '
'across cells.') % self.instance.uuid))
return source_node, dest_node
def _check_destination_is_not_source(self):
if self.destination == self.source:
raise exception.UnableToMigrateToSelf(
instance_id=self.instance.uuid, host=self.destination)
def _check_destination_has_enough_memory(self):
compute = self._get_compute_info(self.destination)
free_ram_mb = compute.free_ram_mb
total_ram_mb = compute.memory_mb
mem_inst = self.instance.memory_mb
# NOTE(sbauza): Now the ComputeNode object reports an allocation ratio
# that can be provided by the compute_node if new or by the controller
ram_ratio = compute.ram_allocation_ratio
# NOTE(sbauza): Mimic the RAMFilter logic in order to have the same
# ram validation
avail = total_ram_mb * ram_ratio - (total_ram_mb - free_ram_mb)
if not mem_inst or avail <= mem_inst:
instance_uuid = self.instance.uuid
dest = self.destination
reason = _("Unable to migrate %(instance_uuid)s to %(dest)s: "
"Lack of memory(host:%(avail)s <= "
"instance:%(mem_inst)s)")
raise exception.MigrationPreCheckError(reason=reason % dict(
instance_uuid=instance_uuid, dest=dest, avail=avail,
mem_inst=mem_inst))
def _get_compute_info(self, host):
return objects.ComputeNode.get_first_node_by_host_for_old_compat(
self.context, host)
def _check_compatible_with_source_hypervisor(self, destination):
source_info = self._get_compute_info(self.source)
destination_info = self._get_compute_info(destination)
source_type = source_info.hypervisor_type
destination_type = destination_info.hypervisor_type
if source_type != destination_type:
raise exception.InvalidHypervisorType()
source_version = source_info.hypervisor_version
destination_version = destination_info.hypervisor_version
if source_version > destination_version:
raise exception.DestinationHypervisorTooOld()
return source_info, destination_info
def _call_livem_checks_on_host(self, destination, provider_mapping):
self._check_can_migrate_pci(self.source, destination)
try:
self.migrate_data = self.compute_rpcapi.\
check_can_live_migrate_destination(self.context, self.instance,
destination, self.block_migration, self.disk_over_commit,
self.migration, self.limits)
except messaging.MessagingTimeout:
msg = _("Timeout while checking if we can live migrate to host: "
"%s") % destination
raise exception.MigrationPreCheckError(msg)
# Check to see that neutron supports the binding-extended API.
if self.network_api.supports_port_binding_extension(self.context):
if 'vifs' not in self.migrate_data:
# migrate data vifs were not constructed in dest compute
# during check_can_live_migrate_destination, construct a
# skeleton to be updated after port binding.
# TODO(adrianc): This can be removed once we move to U release
self.migrate_data.vifs = migrate_data_obj.VIFMigrateData.\
create_skeleton_migrate_vifs(
self.instance.get_network_info())
bindings = self._bind_ports_on_destination(
destination, provider_mapping)
self._update_migrate_vifs_from_bindings(self.migrate_data.vifs,
bindings)
@staticmethod
def _get_port_profile_from_provider_mapping(port_id, provider_mappings):
if port_id in provider_mappings:
# NOTE(gibi): In the resource provider mapping there can be
# more than one RP fulfilling a request group. But resource
# requests of a Neutron port is always mapped to a
# numbered request group that is always fulfilled by one
# resource provider. So we only pass that single RP UUID
# here.
return {'allocation': provider_mappings[port_id][0]}
else:
return {}
def _bind_ports_on_destination(self, destination, provider_mappings):
LOG.debug('Start binding ports on destination host: %s', destination,
instance=self.instance)
# Bind ports on the destination host; returns a dict, keyed by
# port ID, of a new destination host port binding dict per port
# that was bound. This information is then stuffed into the
# migrate_data.
try:
# NOTE(adrianc): migrate_data.vifs was partially filled
# by destination compute if compute is new enough.
# if that is the case, it may have updated the required port
# profile for the destination node (e.g new PCI address if SR-IOV)
# perform port binding against the requested profile
ports_profile = {}
for mig_vif in self.migrate_data.vifs:
profile = mig_vif.profile if 'profile_json' in mig_vif else {}
# NOTE(gibi): provider_mappings also contribute to the
# binding profile of the ports if the port has resource
# request. So we need to merge the profile information from
# both sources.
profile.update(
self._get_port_profile_from_provider_mapping(
mig_vif.port_id, provider_mappings))
if profile:
ports_profile[mig_vif.port_id] = profile
bindings = self.network_api.bind_ports_to_host(
context=self.context, instance=self.instance, host=destination,
vnic_types=None, port_profiles=ports_profile)
except exception.PortBindingFailed as e:
# Port binding failed for that host, try another one.
raise exception.MigrationPreCheckError(
reason=e.format_message())
return bindings
def _update_migrate_vifs_from_bindings(self, migrate_vifs, bindings):
for migrate_vif in migrate_vifs:
for attr_name, attr_val in bindings[migrate_vif.port_id].items():
setattr(migrate_vif, attr_name, attr_val)
def _get_source_cell_mapping(self):
"""Returns the CellMapping for the cell in which the instance lives
:returns: nova.objects.CellMapping record for the cell where
the instance currently lives.
:raises: MigrationPreCheckError - in case a mapping is not found
"""
try:
return objects.InstanceMapping.get_by_instance_uuid(
self.context, self.instance.uuid).cell_mapping
except exception.InstanceMappingNotFound:
raise exception.MigrationPreCheckError(
reason=(_('Unable to determine in which cell '
'instance %s lives.') % self.instance.uuid))
def _get_destination_cell_mapping(self):
"""Returns the CellMapping for the destination host
:returns: nova.objects.CellMapping record for the cell where
the destination host is mapped.
:raises: MigrationPreCheckError - in case a mapping is not found
"""
try:
return objects.HostMapping.get_by_host(
self.context, self.destination).cell_mapping
except exception.HostMappingNotFound:
raise exception.MigrationPreCheckError(
reason=(_('Unable to determine in which cell '
'destination host %s lives.') % self.destination))
def _get_request_spec_for_select_destinations(self, attempted_hosts=None):
"""Builds a RequestSpec that can be passed to select_destinations
Used when calling the scheduler to pick a destination host for live
migrating the instance.
:param attempted_hosts: List of host names to ignore in the scheduler.
This is generally at least seeded with the source host.
:returns: nova.objects.RequestSpec object
"""
request_spec = self.request_spec
# NOTE(sbauza): Force_hosts/nodes needs to be reset
# if we want to make sure that the next destination
# is not forced to be the original host
request_spec.reset_forced_destinations()
port_res_req = (
self.network_api.get_requested_resource_for_instance(
self.context, self.instance.uuid))
# NOTE(gibi): When cyborg or other module wants to handle
# similar non-nova resources then here we have to collect
# all the external resource requests in a single list and
# add them to the RequestSpec.
request_spec.requested_resources = port_res_req
scheduler_utils.setup_instance_group(self.context, request_spec)
# We currently only support live migrating to hosts in the same
# cell that the instance lives in, so we need to tell the scheduler
# to limit the applicable hosts based on cell.
cell_mapping = self._get_source_cell_mapping()
LOG.debug('Requesting cell %(cell)s while live migrating',
{'cell': cell_mapping.identity},
instance=self.instance)
if ('requested_destination' in request_spec and
request_spec.requested_destination):
request_spec.requested_destination.cell = cell_mapping
else:
request_spec.requested_destination = objects.Destination(
cell=cell_mapping)
request_spec.ensure_project_and_user_id(self.instance)
request_spec.ensure_network_metadata(self.instance)
compute_utils.heal_reqspec_is_bfv(
self.context, request_spec, self.instance)
return request_spec
def _find_destination(self):
# TODO(johngarbutt) this retry loop should be shared
attempted_hosts = [self.source]
request_spec = self._get_request_spec_for_select_destinations(
attempted_hosts)
host = None
while host is None:
self._check_not_over_max_retries(attempted_hosts)
request_spec.ignore_hosts = attempted_hosts
try:
selection_lists = self.query_client.select_destinations(
self.context, request_spec, [self.instance.uuid],
return_objects=True, return_alternates=False)
# We only need the first item in the first list, as there is
# only one instance, and we don't care about any alternates.
selection = selection_lists[0][0]
host = selection.service_host
except messaging.RemoteError as ex:
# TODO(ShaoHe Feng) There maybe multi-scheduler, and the
# scheduling algorithm is R-R, we can let other scheduler try.
# Note(ShaoHe Feng) There are types of RemoteError, such as
# NoSuchMethod, UnsupportedVersion, we can distinguish it by
# ex.exc_type.
raise exception.MigrationSchedulerRPCError(
reason=six.text_type(ex))
scheduler_utils.fill_provider_mapping(request_spec, selection)
provider_mapping = request_spec.get_request_group_mapping()
if provider_mapping:
# NOTE(gibi): this call might update the pci_requests of the
# instance based on the destination host if so then such change
# will be persisted when post_live_migration_at_destination
# runs.
compute_utils.\
update_pci_request_spec_with_allocated_interface_name(
self.context, self.report_client, self.instance,
provider_mapping)
try:
self._check_compatible_with_source_hypervisor(host)
self._call_livem_checks_on_host(host, provider_mapping)
except (exception.Invalid, exception.MigrationPreCheckError) as e:
LOG.debug("Skipping host: %(host)s because: %(e)s",
{"host": host, "e": e})
attempted_hosts.append(host)
# The scheduler would have created allocations against the
# selected destination host in Placement, so we need to remove
# those before moving on.
self._remove_host_allocations(selection.compute_node_uuid)
host = None
# TODO(artom) We should probably just return the whole selection object
# at this point.
return (selection.service_host, selection.nodename, selection.limits)
def _remove_host_allocations(self, compute_node_uuid):
"""Removes instance allocations against the given node from Placement
:param compute_node_uuid: UUID of ComputeNode resource provider
"""
# Now remove the allocations for our instance against that node.
# Note that this does not remove allocations against any other node
# or shared resource provider, it's just undoing what the scheduler
# allocated for the given (destination) node.
self.report_client.remove_provider_tree_from_instance_allocation(
self.context, self.instance.uuid, compute_node_uuid)
def _check_not_over_max_retries(self, attempted_hosts):
if CONF.migrate_max_retries == -1:
return
retries = len(attempted_hosts) - 1
if retries > CONF.migrate_max_retries:
if self.migration:
self.migration.status = 'failed'
self.migration.save()
msg = (_('Exceeded max scheduling retries %(max_retries)d for '
'instance %(instance_uuid)s during live migration')
% {'max_retries': retries,
'instance_uuid': self.instance.uuid})
raise exception.MaxRetriesExceeded(reason=msg)
|
apache-2.0
| -4,334,092,726,895,962,600
| 48.110124
| 79
| 0.619914
| false
| 4.579165
| false
| false
| false
|
bgribble/mfp
|
mfp/test/test-dsp.py
|
1
|
1777
|
from unittest import TestCase
from mfp.mfp_app import MFPApp
from mfp.patch import Patch
from mfp.scope import NaiveScope
def setup():
MFPApp().setup()
def mkproc(case, init_type, init_args=None):
return MFPApp().create(init_type, init_args, case.patch, None, init_type)
class DSPObjectTests (TestCase):
def setUp(self):
self.patch = Patch('default', '', None, NaiveScope(), 'default')
def tearDown(self):
import time
time.sleep(0.500)
def test_create(self):
'''test_create: [dsp] can make a DSP object'''
o = mkproc(self, "osc~", "500")
def test_read(self):
'''test_read: [dsp] can read back a creation parameter'''
o = mkproc(self, "osc~", "500")
print("test_read: objid = ", o, o.dsp_obj)
f = o.dsp_obj.getparam("_sig_1")
print(f)
assert f == 500
def test_connect_disconnect(self):
'''test_connect_disconnect: [dsp] make/break connections'''
print("============= Creating in~")
inp = mkproc(self, "in~", "0")
print("============= Creating out~")
outp = mkproc(self, "out~", "0")
print("============= Created objects")
inp.connect(0, outp, 0)
print("============= Called connect")
inp.disconnect(0, outp, 0)
print("============== disconnected")
def test_delete(self):
'''test_destroy: [dsp] destroy dsp object'''
print("Creating")
inp = mkproc(self, "in~", "0")
outp = mkproc(self, "out~", "0")
print("connecting")
inp.connect(0, outp, 0)
print("deleting")
outp.delete()
inp.delete()
print("done")
def teardown():
MFPApp().finish()
print("test-dsp.py: MFPApp finish done")
|
gpl-2.0
| 1,709,242,076,048,338,700
| 27.206349
| 77
| 0.546427
| false
| 3.417308
| true
| false
| false
|
project-icp/bee-pollinator-app
|
src/icp/icp/celery.py
|
1
|
3824
|
from __future__ import absolute_import
import os
import rollbar
import logging
from celery import Celery
from celery._state import connect_on_app_finalize
from celery.signals import task_failure
from django.conf import settings
@connect_on_app_finalize
def add_unlock_chord_task_shim(app):
"""
Override native unlock_chord to support configurable max_retries.
Original code taken from https://goo.gl/3mX0ie
This task is used by result backends without native chord support.
It joins chords by creating a task chain polling the header for completion.
"""
from celery.canvas import maybe_signature
from celery.exceptions import ChordError
from celery.result import allow_join_result, result_from_tuple
logger = logging.getLogger(__name__)
MAX_RETRIES = settings.CELERY_CHORD_UNLOCK_MAX_RETRIES
@app.task(name='celery.chord_unlock', shared=False, default_retry_delay=1,
ignore_result=True, lazy=False, bind=True,
max_retries=MAX_RETRIES)
def unlock_chord(self, group_id, callback, interval=None,
max_retries=MAX_RETRIES, result=None,
Result=app.AsyncResult, GroupResult=app.GroupResult,
result_from_tuple=result_from_tuple, **kwargs):
if interval is None:
interval = self.default_retry_delay
# check if the task group is ready, and if so apply the callback.
callback = maybe_signature(callback, app)
deps = GroupResult(
group_id,
[result_from_tuple(r, app=app) for r in result],
app=app,
)
j = deps.join_native if deps.supports_native_join else deps.join
try:
ready = deps.ready()
except Exception as exc:
raise self.retry(
exc=exc, countdown=interval, max_retries=max_retries)
else:
if not ready:
raise self.retry(countdown=interval, max_retries=max_retries)
callback = maybe_signature(callback, app=app)
try:
with allow_join_result():
ret = j(timeout=3.0, propagate=True)
except Exception as exc:
try:
culprit = next(deps._failed_join_report())
reason = 'Dependency {0.id} raised {1!r}'.format(
culprit, exc,
)
except StopIteration:
reason = repr(exc)
logger.error('Chord %r raised: %r', group_id, exc, exc_info=1)
app.backend.chord_error_from_stack(callback,
ChordError(reason))
else:
try:
callback.delay(ret)
except Exception as exc:
logger.error('Chord %r raised: %r', group_id, exc, exc_info=1)
app.backend.chord_error_from_stack(
callback,
exc=ChordError('Callback error: {0!r}'.format(exc)),
)
return unlock_chord
# set the default Django settings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE',
'icp.settings.production')
app = Celery('icp')
# Using a string here means the worker will not have to
# pickle the object when using Windows.
app.config_from_object('django.conf:settings')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
rollbar_settings = getattr(settings, 'ROLLBAR', {})
if rollbar_settings:
rollbar.init(rollbar_settings.get('access_token'),
rollbar_settings.get('environment'))
@task_failure.connect
def handle_task_failure(**kw):
if rollbar_settings:
rollbar.report_exc_info(extra_data=kw)
@app.task(bind=True)
def debug_task(self):
print('Request: {0!r}'.format(self.request))
|
apache-2.0
| 5,018,780,426,498,828,000
| 33.45045
| 79
| 0.613494
| false
| 3.983333
| false
| false
| false
|
h4ng3r/radare2
|
sys/meson.py
|
1
|
10237
|
"""Meson build for radare2"""
import argparse
import glob
import logging
import os
import re
import shutil
import subprocess
import sys
BUILDDIR = 'build'
BACKENDS = ['ninja', 'vs2015', 'vs2017']
PATH_FMT = {}
MESON = None
ROOT = None
log = None
def set_global_variables():
"""[R_API] Set global variables"""
global log
global ROOT
global MESON
ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
logging.basicConfig(format='[%(name)s][%(levelname)s]: %(message)s',
level=logging.DEBUG)
log = logging.getLogger('r2-meson')
with open(os.path.join(ROOT, 'configure.acr')) as f:
f.readline()
version = f.readline().split()[1].rstrip()
if os.name == 'nt':
meson = os.path.join(os.path.dirname(sys.executable), 'Scripts', 'meson.py')
MESON = [sys.executable, meson]
else:
MESON = ['meson']
PATH_FMT['ROOT'] = ROOT
PATH_FMT['R2_VERSION'] = version
log.debug('Root: %s', ROOT)
log.debug('Meson: %s', MESON)
log.debug('Version: %s', version)
def meson(root, build, prefix=None, backend=None,
release=False, shared=False, *, options=[]):
"""[R_API] Invoke meson"""
command = MESON + [root, build]
if prefix:
command.append('--prefix={}'.format(prefix))
if backend:
command.append('--backend={}'.format(backend))
if release:
command.append('--buildtype=release')
if shared:
command.append('--default-library=shared')
else:
command.append('--default-library=static')
if options:
command.extend(options)
log.debug('Invoking meson: %s', command)
ret = subprocess.call(command)
if ret != 0:
log.error('Meson error. Exiting.')
sys.exit(1)
def ninja(folder, *targets):
"""[R_API] Invoke ninja"""
command = ['ninja', '-C', folder]
if targets:
command.extend(targets)
log.debug('Invoking ninja: %s', command)
ret = subprocess.call(command)
if ret != 0:
log.error('Ninja error. Exiting.')
sys.exit(1)
def msbuild(project, *params):
"""[R_API] Invoke MSbuild"""
command = ['msbuild', project]
if params:
command.extend(params)
log.info('Invoking MSbuild: %s', command)
ret = subprocess.call(command)
if ret != 0:
log.error('MSbuild error. Exiting.')
sys.exit(1)
def copytree(src, dst, exclude=()):
src = src.format(**PATH_FMT)
dst = dst.format(**PATH_FMT)
log.debug('copytree "%s" -> "%s"', src, dst)
shutil.copytree(src, dst, ignore=shutil.ignore_patterns(*exclude) if exclude else None)
def move(src, dst):
src = src.format(**PATH_FMT)
dst = dst.format(**PATH_FMT)
term = os.path.sep if os.path.isdir(dst) else ''
log.debug('move "%s" -> "%s%s"', src, dst, term)
for file in glob.iglob(src):
shutil.move(file, dst)
def copy(src, dst):
src = src.format(**PATH_FMT)
dst = dst.format(**PATH_FMT)
term = os.path.sep if os.path.isdir(dst) else ''
log.debug('copy "%s" -> "%s%s"', src, dst, term)
for file in glob.iglob(src, recursive='**' in src):
shutil.copy2(file, dst)
def makedirs(path):
path = path.format(**PATH_FMT)
log.debug('makedirs "%s"', path)
os.makedirs(path)
def xp_compat(builddir):
log.info('Running XP compat script')
with open(os.path.join(builddir, 'REGEN.vcxproj'), 'r') as f:
version = re.search('<PlatformToolset>(.*)</PlatformToolset>', f.read()).group(1)
if version.endswith('_xp'):
log.info('Skipping %s', builddir)
return
log.debug('Translating from %s to %s_xp', version, version)
newversion = version+'_xp'
for f in glob.iglob(os.path.join(builddir, '**', '*.vcxproj'), recursive=True):
with open(f, 'r') as proj:
c = proj.read()
c = c.replace(version, newversion)
with open(f, 'w') as proj:
proj.write(c)
log.debug("%s .. OK", f)
def vs_dedup(builddir):
"""Remove duplicated dependency entries from vs project"""
start = '<AdditionalDependencies>'
end = ';%(AdditionalDependencies)'
for f in glob.iglob(os.path.join(builddir, '**', '*.vcxproj'), recursive=True):
with open(f) as proj:
data = proj.read()
idx = data.find(start)
if idx < 0:
continue
idx += len(start)
idx2 = data.find(end, idx)
if idx2 < 0:
continue
libs = set(data[idx:idx2].split(';'))
with open(f, 'w') as proj:
proj.write(data[:idx])
proj.write(';'.join(sorted(libs)))
proj.write(data[idx2:])
log.debug('%s processed', f)
def win_dist(args):
"""Create r2 distribution for Windows"""
builddir = os.path.join(ROOT, args.dir)
PATH_FMT['DIST'] = args.install
PATH_FMT['BUILDDIR'] = builddir
makedirs(r'{DIST}')
copy(r'{BUILDDIR}\binr\*\*.exe', r'{DIST}')
copy(r'{BUILDDIR}\libr\*\*.dll', r'{DIST}')
makedirs(r'{DIST}\lib')
if args.shared:
copy(r'{BUILDDIR}\libr\*\*.lib', r'{DIST}\lib')
else:
copy(r'{BUILDDIR}\libr\*\*.a', r'{DIST}\lib')
copy(r'{BUILDDIR}\shlr\libr_shlr.a', r'{DIST}\lib')
win_dist_libr2()
def win_dist_libr2(**path_fmt):
"""[R_API] Add libr2 data/www/include/doc to dist directory"""
PATH_FMT.update(path_fmt)
copytree(r'{ROOT}\shlr\www', r'{DIST}\www')
copytree(r'{ROOT}\libr\magic\d\default', r'{DIST}\share\radare2\{R2_VERSION}\magic')
makedirs(r'{DIST}\share\radare2\{R2_VERSION}\syscall')
copy(r'{BUILDDIR}\libr\syscall\d\*.sdb', r'{DIST}\share\radare2\{R2_VERSION}\syscall')
makedirs(r'{DIST}\share\radare2\{R2_VERSION}\fcnsign')
copy(r'{BUILDDIR}\libr\anal\d\*.sdb', r'{DIST}\share\radare2\{R2_VERSION}\fcnsign')
makedirs(r'{DIST}\share\radare2\{R2_VERSION}\opcodes')
copy(r'{BUILDDIR}\libr\asm\d\*.sdb', r'{DIST}\share\radare2\{R2_VERSION}\opcodes')
makedirs(r'{DIST}\include\libr\sdb')
makedirs(r'{DIST}\include\libr\r_util')
copy(r'{ROOT}\libr\include\*.h', r'{DIST}\include\libr')
copy(r'{BUILDDIR}\r_version.h', r'{DIST}\include\libr')
copy(r'{BUILDDIR}\r_userconf.h', r'{DIST}\include\libr')
copy(r'{ROOT}\libr\include\sdb\*.h', r'{DIST}\include\libr\sdb')
copy(r'{ROOT}\libr\include\r_util\*.h', r'{DIST}\include\libr\r_util')
makedirs(r'{DIST}\share\doc\radare2')
copy(r'{ROOT}\doc\fortunes.*', r'{DIST}\share\doc\radare2')
copytree(r'{ROOT}\libr\bin\d', r'{DIST}\share\radare2\{R2_VERSION}\format',
exclude=('Makefile', 'meson.build', 'dll'))
makedirs(r'{DIST}\share\radare2\{R2_VERSION}\format\dll')
copy(r'{BUILDDIR}\libr\bin\d\*.sdb', r'{DIST}\share\radare2\{R2_VERSION}\format\dll')
copytree(r'{ROOT}\libr\cons\d', r'{DIST}\share\radare2\{R2_VERSION}\cons',
exclude=('Makefile', 'meson.build'))
makedirs(r'{DIST}\share\radare2\{R2_VERSION}\hud')
copy(r'{ROOT}\doc\hud', r'{DIST}\share\radare2\{R2_VERSION}\hud\main')
def build(args):
""" Build radare2 """
log.info('Building radare2')
r2_builddir = os.path.join(ROOT, args.dir)
options = ['-D%s' % x for x in args.options]
if not os.path.exists(r2_builddir):
meson(ROOT, r2_builddir, prefix=args.prefix, backend=args.backend,
release=args.release, shared=args.shared, options=options)
if args.backend != 'ninja':
vs_dedup(r2_builddir)
if args.xp:
xp_compat(r2_builddir)
if not args.project:
project = os.path.join(r2_builddir, 'radare2.sln')
msbuild(project, '/m')
else:
ninja(r2_builddir)
def install(args):
""" Install radare2 """
if os.name == 'nt':
win_dist(args)
return
log.warning('Install not implemented yet for this platform.')
# TODO
#if os.name == 'posix':
# os.system('DESTDIR="{destdir}" ninja -C {build} install'
# .format(destdir=destdir, build=args.dir))
def main():
# Create logger and get applications paths
set_global_variables()
# Create parser
parser = argparse.ArgumentParser(description='Mesonbuild scripts for radare2')
parser.add_argument('--project', action='store_true',
help='Create a visual studio project and do not build.')
parser.add_argument('--release', action='store_true',
help='Set the build as Release (remove debug info)')
parser.add_argument('--backend', choices=BACKENDS, default='ninja',
help='Choose build backend (default: %(default)s)')
parser.add_argument('--shared', action='store_true',
help='Link dynamically (shared library) rather than statically')
parser.add_argument('--prefix', default=None,
help='Set project installation prefix')
parser.add_argument('--dir', default=BUILDDIR, required=False,
help='Destination build directory (default: %(default)s)')
parser.add_argument('--xp', action='store_true',
help='Adds support for Windows XP')
if os.name == 'nt':
parser.add_argument('--install', help='Installation directory')
else:
parser.add_argument('--install', action='store_true',
help='Install radare2 after building')
parser.add_argument('--options', nargs='*', default=[])
args = parser.parse_args()
# Check arguments
if args.project and args.backend == 'ninja':
log.error('--project is not compatible with --backend ninja')
sys.exit(1)
if args.xp and args.backend == 'ninja':
log.error('--xp is not compatible with --backend ninja')
sys.exit(1)
if os.name == 'nt' and args.install and os.path.exists(args.install):
log.error('%s already exists', args.install)
sys.exit(1)
if os.name == 'nt' and not args.prefix:
args.prefix = os.path.join(ROOT, args.dir, 'priv_install_dir')
for o in args.options:
if not '=' in o:
log.error('Invalid option: %s', o)
sys.exit(1)
# Build it!
log.debug('Arguments: %s', args)
build(args)
if args.install:
install(args)
if __name__ == '__main__':
main()
|
lgpl-3.0
| -6,741,204,642,511,179,000
| 34.058219
| 91
| 0.601348
| false
| 3.240582
| false
| false
| false
|
IdanMann/SnapshotGenerator
|
snapgen.py
|
1
|
5427
|
from PIL import Image
from resources import common
import settings
class SnapshotGenerator:
def __init__(self, base_available_snapshot_image, skeleton, bid_image=None,
base_unavailable_snapshot_image=None):
# Initialize objects
self.elements_skeleton = BaseElementsSkeleton(skeleton=skeleton)
self.image_template = BaseImageTemplate(base_available_snapshot_image=base_available_snapshot_image,
base_unavailable_snapshot_image=base_unavailable_snapshot_image)
self.bid_image_template = BaseBidImageTemplate(bid_image=bid_image)
# Validate integrity
self.image_template.verify()
self.elements_skeleton.verify(self.image_template.get_available_image_size(),
self.image_template.get_unavailable_image_size())
self.bid_image_template.verify(self.image_template.get_available_image_size()[0],
self.image_template.get_available_image_size()[1])
def add_bid(self, bid_data):
# Extend base_available_snapshot with a slot
raise NotImplementedError
def set_title(self):
raise NotImplementedError
class BaseImageTemplate:
# Image Template, receives the images used to generate the snapshot and an ElementsSkeleton object
def __init__(self, base_available_snapshot_image, base_unavailable_snapshot_image=None):
try:
self.base_available_snapshot_image = Image.open(base_available_snapshot_image).convert('RGBA')
self.base_unavailable_snapshot_image = Image.open(base_unavailable_snapshot_image)\
if base_unavailable_snapshot_image else self.base_available_snapshot_image
except Exception as e:
# Failed to open base image files
raise Exception(e)
self.base_available_max_x, self.base_available_max_y = self.base_available_snapshot_image.size()
self.base_unavailable_max_x, self.base_unavailable_max_y = self.base_unavailable_snapshot_image.size()
def verify(self):
# Ensure images past are of valid dimensions
# check that both templates are of consistent dimensions
assert self.base_available_max_x == self.base_unavailable_max_x, \
"X dimensions for the base images are not equal"
assert self.base_available_max_y == self.base_unavailable_max_y, \
"Y dimensions for the base images are not equal"
def get_available_image_size(self):
return self.base_available_snapshot_image.size()
def get_unavailable_image_size(self):
return self.base_unavailable_snapshot_image.size()
def _extend_edge(self):
# This method can be used to extend the base image size to allow big elements to fit in
raise NotImplementedError
class BaseElementsSkeleton:
# Basic snapshot elements meta data
def __init__(self, skeleton):
self.meta_data = skeleton.get(common.META_DATA)
self.field_mapping = skeleton.get(common.MAPPING)
assert isinstance(self.meta_data, dict),\
"Could not load meta data using the key: {meta_data}".format(meta_data=common.META_DATA)
assert isinstance(self.field_mapping, dict),\
"Could not load mapping using the key: {mapping}".format(mapping=common.MAPPING)
# Title
title_key = self.field_mapping.get("title")
title_font = self.meta_data.get("title_font", settings.DEFAULT_FONT)
title_color = common.create_rgba_color_tuple(self.meta_data.get("title_color", settings.DEFAULT_COLOR_STRING))
self.title_x = self.meta_data.get("title_x_position", 0)
self.title_y = self.meta_data.get("title_y_position", 0)
# Bid
self.first_bid_x, self.first_bid_y = (0, 0)
def verify(self, base_available_xy=(0, 0), base_unavailable_xy=(0, 0)):
# check that title is not out of bounds
assert self.title_x >= 0, "Title's X dimension must be 0 or higher"
assert self.title_y >= 0, "Title's Y dimension must be 0 or higher"
assert self.title_x <= base_available_xy[0] and self.title_x <= base_unavailable_xy[0],\
"Title's X position is out of the image boundaries"
assert self.title_y <= base_available_xy[1] and self.title_y <= base_unavailable_xy[1],\
"Title's Y position is out of the image boundaries"
# check that the first bid is not out of bounds
assert self.first_bid_x >= 0, "First bid's X dimension must be 0 or higher"
assert self.first_bid_y >= 0, "First bid's Y dimension must be 0 or higher"
class BaseBidImageTemplate:
# Base bid object with all parameters to create a bid
def __init__(self, bid_image):
assert bid_image, "Could not find a bid image to use"
try:
self.bid_image = Image.open(bid_image)
except Exception as e:
raise Exception(e)
self.bid_max_x, self.bid_max_y = self.bid_image.size()
def verify(self, base_available_max_x, base_available_max_y):
# check that the first bid is not out of bounds
assert self.bid_max_x <= base_available_max_x, \
"X dimensions for the bid image are bigger than the base image"
assert self.bid_max_y <= base_available_max_y, \
"Y dimensions for the bid image are bigger than the base image"
|
mit
| -1,809,330,954,875,185,000
| 46.191304
| 118
| 0.657638
| false
| 3.915584
| false
| false
| false
|
distributed-system-analysis/pbench
|
lib/pbench/server/api/resources/query_apis/controllers_list.py
|
1
|
6159
|
from flask import jsonify
from logging import Logger
from typing import Any, AnyStr, Dict
from pbench.server import PbenchServerConfig
from pbench.server.api.resources.query_apis import (
ElasticBase,
Schema,
Parameter,
ParamType,
PostprocessError,
)
class ControllersList(ElasticBase):
"""
Get the names of controllers within a date range.
"""
def __init__(self, config: PbenchServerConfig, logger: Logger):
super().__init__(
config,
logger,
Schema(
Parameter("user", ParamType.USER, required=False),
Parameter("start", ParamType.DATE, required=True),
Parameter("end", ParamType.DATE, required=True),
),
)
def assemble(self, json_data: Dict[AnyStr, Any]) -> Dict[AnyStr, Any]:
"""
Construct a search for Pbench controller names which have registered
datasets within a specified date range and which are either owned
by a specified username, or have been made publicly accessible.
{
"user": "username",
"start": "start-time",
"end": "end-time"
}
json_data: JSON dictionary of type-normalized parameters
user: specifies the owner of the data to be searched; it need not
necessarily be the user represented by the session token
header, assuming the session user is authorized to view "user"s
data. If "user": None is specified, then only public datasets
will be returned.
TODO: When we have authorization infrastructure, we'll need to
check that "session user" has rights to view "user" data. We might
also default a missing "user" JSON field with the authorization
token's user. This would require a different mechanism to signal
"return public data"; for example, we could specify either
"access": "public", "access": "private", or "access": "all" to
include both private and public data.
"start" and "end" are datetime objects representing a set of Elasticsearch
run document indices in which to search.
"""
user = json_data.get("user")
start = json_data.get("start")
end = json_data.get("end")
# We need to pass string dates as part of the Elasticsearch query; we
# use the unconverted strings passed by the caller rather than the
# adjusted and normalized datetime objects for this.
start_arg = f"{start:%Y-%m}"
end_arg = f"{end:%Y-%m}"
self.logger.info(
"Discover controllers for user {}, prefix {}: ({} - {})",
user,
self.prefix,
start,
end,
)
uri_fragment = self._gen_month_range("run", start, end)
return {
"path": f"/{uri_fragment}/_search",
"kwargs": {
"json": {
"query": {
"bool": {
"filter": [
{"term": self._get_user_term(user)},
{
"range": {
"@timestamp": {"gte": start_arg, "lte": end_arg}
}
},
]
}
},
"size": 0, # Don't return "hits", only aggregations
"aggs": {
"controllers": {
"terms": {
"field": "run.controller",
"order": [{"runs": "desc"}],
},
"aggs": {"runs": {"max": {"field": "run.start"}}},
}
},
},
"params": {"ignore_unavailable": "true"},
},
}
def postprocess(self, es_json: Dict[AnyStr, Any]) -> Dict[AnyStr, Any]:
"""
Returns a summary of the returned Elasticsearch query results, showing
the Pbench controller name, the number of runs using that controller
name, and the start timestamp of the latest run both in binary and
string form:
[
{
"key": "alphaville.example.com",
"controller": "alphaville.example.com",
"results": 2,
"last_modified_value": 1598473155810.0,
"last_modified_string": "2020-08-26T20:19:15.810Z"
}
]
"""
controllers = []
# If there are no matches for the user, controller name,
# and time range, return the empty list rather than failing.
# Note that we can't check the length of ["hits"]["hits"]
# because we've told Elasticsearch to return only aggregations,
# not source documents.
try:
count = es_json["hits"]["total"]["value"]
if int(count) == 0:
self.logger.warning("No data returned by Elasticsearch")
return jsonify(controllers)
except KeyError as e:
raise PostprocessError(
f"Can't find Elasticsearch match data {e} in {es_json!r}"
)
except ValueError as e:
raise PostprocessError(f"Elasticsearch hit count {count!r} value: {e}")
buckets = es_json["aggregations"]["controllers"]["buckets"]
self.logger.info("{} controllers found", len(buckets))
for controller in buckets:
c = {}
c["key"] = controller["key"]
c["controller"] = controller["key"]
c["results"] = controller["doc_count"]
c["last_modified_value"] = controller["runs"]["value"]
c["last_modified_string"] = controller["runs"]["value_as_string"]
controllers.append(c)
# construct response object
return jsonify(controllers)
|
gpl-3.0
| -8,051,723,155,547,731,000
| 38.480769
| 88
| 0.508686
| false
| 4.838178
| false
| false
| false
|
dg321123/cache
|
response_filter.py
|
1
|
1814
|
import json
# This assumes that only list responses are split across pages. I don't like it, but
# it gets me started quickly, punting the question about handling response formats to
# the future.
def coalesce_response(response, n):
collection = []
for page in response:
list_response = json.loads(page)
if isinstance(list_response, list):
collection += list_response
else:
collection = list_response
return collection
# Method to return the top 'n' responses
def top_response_filter(response, n):
collection = coalesce_response(response, n)
return collection[:n]
# Method to return the bottom 'n' responses
def bottom_response_filter(response, n):
collection = coalesce_response(response, n)
return collection[-1 * n:]
# This method can be extended to incorporate other filter types, say average or sum of top n elements.
def response_filter(response, filter_type, count):
if filter_type == 'top':
filter_method = top_response_filter
elif filter_type == 'bottom':
filter_method = bottom_response_filter
else:
filter_method = coalesce_response
return filter_method(response, count)
# Split the path into 3 parts -
# 1. key = key into the cache
# 2. filter_type = kind of filter to apply on the response from the cache
# 3. count = limit the number of response elements
# In the future, you can add other filters such as mean, median, etc.
def path_to_parts(path):
parts = path.split('/')
key = ''
filter_type = ''
count = 0
for part in parts:
if part == 'top' or part == 'bottom':
filter_type = part
elif part.isdigit():
count = int(part)
else:
key += '/' + part
return [key, filter_type, count]
|
gpl-2.0
| -4,825,386,514,783,277,000
| 28.754098
| 102
| 0.651599
| false
| 4.013274
| false
| false
| false
|
42cc/apiclient-kava
|
setup.py
|
1
|
1143
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from os.path import join, dirname
from setuptools import setup, find_packages
def get_version(fname='kavahq/__init__.py'):
with open(fname) as f:
for line in f:
if line.startswith('__version__'):
return eval(line.split('=')[-1])
setup(
name='kavahq-api',
version=get_version(),
packages=find_packages(),
requires=['python (>= 2.7)', ],
install_requires=['requests'],
tests_require=['mock', 'unittest2', 'nose', 'coverage'],
description='wrapper over kavahq.com API',
long_description=open(join(dirname(__file__), 'README.rst')).read(),
author='42 Coffee Cups',
author_email='contact@42cc.co',
url='https://github.com/42cc/apiclient-kava',
download_url='https://github.com/42cc/apiclient-kava/archive/master.zip',
license='GPL v2 License',
keywords=['kavahq', 'api'],
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU General Public License v2 (GPLv2)',
'Programming Language :: Python',
],
)
|
gpl-2.0
| 6,429,455,022,025,177,000
| 31.657143
| 77
| 0.616798
| false
| 3.723127
| false
| false
| false
|
ric2b/Vivaldi-browser
|
chromium/tools/binary_size/diagnose_bloat.py
|
1
|
34013
|
#!/usr/bin/env python
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Tool for finding the cause of binary size bloat.
See //tools/binary_size/README.md for example usage.
Note: this tool will perform gclient sync/git checkout on your local repo.
"""
from __future__ import print_function
import atexit
import argparse
import collections
from contextlib import contextmanager
import distutils.spawn
import json
import logging
import multiprocessing
import os
import re
import shutil
import subprocess
import sys
import tempfile
import zipfile
_COMMIT_COUNT_WARN_THRESHOLD = 15
_ALLOWED_CONSECUTIVE_FAILURES = 2
_SRC_ROOT = os.path.abspath(
os.path.join(os.path.dirname(__file__), os.pardir, os.pardir))
_DEFAULT_ARCHIVE_DIR = os.path.join(_SRC_ROOT, 'out', 'binary-size-results')
_DEFAULT_OUT_DIR = os.path.join(_SRC_ROOT, 'out', 'binary-size-build')
_BINARY_SIZE_DIR = os.path.join(_SRC_ROOT, 'tools', 'binary_size')
_RESOURCE_SIZES_PATH = os.path.join(
_SRC_ROOT, 'build', 'android', 'resource_sizes.py')
_LLVM_TOOLS_DIR = os.path.join(
_SRC_ROOT, 'third_party', 'llvm-build', 'Release+Asserts', 'bin')
_DOWNLOAD_OBJDUMP_PATH = os.path.join(
_SRC_ROOT, 'tools', 'clang', 'scripts', 'download_objdump.py')
_GN_PATH = os.path.join(_SRC_ROOT, 'third_party', 'depot_tools', 'gn')
_NINJA_PATH = os.path.join(_SRC_ROOT, 'third_party', 'depot_tools', 'ninja')
_DiffResult = collections.namedtuple('DiffResult', ['name', 'value', 'units'])
class BaseDiff(object):
"""Base class capturing binary size diffs."""
def __init__(self, name):
self.name = name
self.banner = '\n' + '*' * 30 + name + '*' * 30
def AppendResults(self, logfiles):
"""Print and write diff results to an open |logfile|."""
full, short = logfiles
_WriteToFile(full, self.banner)
_WriteToFile(short, self.banner)
for s in self.Summary():
_WriteToFile(short, s)
_WriteToFile(short, '')
for s in self.DetailedResults():
full.write(s + '\n')
@property
def summary_stat(self):
"""Returns a tuple of (name, value, units) for the most important metric."""
raise NotImplementedError()
def Summary(self):
"""A short description that summarizes the source of binary size bloat."""
raise NotImplementedError()
def DetailedResults(self):
"""An iterable description of the cause of binary size bloat."""
raise NotImplementedError()
def ProduceDiff(self, before_dir, after_dir):
"""Prepare a binary size diff with ready to print results."""
raise NotImplementedError()
def RunDiff(self, logfiles, before_dir, after_dir):
logging.info('Creating: %s', self.name)
self.ProduceDiff(before_dir, after_dir)
self.AppendResults(logfiles)
class NativeDiff(BaseDiff):
# E.g.: Section Sizes (Total=1.2 kb (1222 bytes)):
_RE_SUMMARY_STAT = re.compile(
r'Section Sizes \(Total=(?P<value>-?[0-9\.]+) ?(?P<units>\w+)')
_SUMMARY_STAT_NAME = 'Native Library Delta'
def __init__(self, size_name, supersize_path):
self._size_name = size_name
self._supersize_path = supersize_path
self._diff = []
super(NativeDiff, self).__init__('Native Diff')
@property
def summary_stat(self):
m = NativeDiff._RE_SUMMARY_STAT.search(self._diff)
if m:
return _DiffResult(
NativeDiff._SUMMARY_STAT_NAME, m.group('value'), m.group('units'))
raise Exception('Could not extract total from:\n' + self._diff)
def DetailedResults(self):
return self._diff.splitlines()
def Summary(self):
return self.DetailedResults()[:100]
def ProduceDiff(self, before_dir, after_dir):
before_size = os.path.join(before_dir, self._size_name)
after_size = os.path.join(after_dir, self._size_name)
cmd = [self._supersize_path, 'diff', before_size, after_size]
self._diff = _RunCmd(cmd)[0].replace('{', '{{').replace('}', '}}')
class ResourceSizesDiff(BaseDiff):
# Ordered by output appearance.
_SUMMARY_SECTIONS = (
'Specifics', 'InstallSize', 'InstallBreakdown', 'Dex')
# Sections where it makes sense to sum subsections into a section total.
_AGGREGATE_SECTIONS = (
'InstallBreakdown', 'Breakdown', 'MainLibInfo', 'Uncompressed')
def __init__(self, apk_name, filename='results-chart.json'):
self._apk_name = apk_name
self._diff = None # Set by |ProduceDiff()|
self._filename = filename
super(ResourceSizesDiff, self).__init__('Resource Sizes Diff')
@property
def summary_stat(self):
for section_name, results in self._diff.iteritems():
for subsection_name, value, units in results:
if 'normalized' in subsection_name:
full_name = '{} {}'.format(section_name, subsection_name)
return _DiffResult(full_name, value, units)
raise Exception('Could not find "normalized" in: ' + repr(self._diff))
def DetailedResults(self):
return self._ResultLines()
def Summary(self):
footer_lines = [
'',
'For an explanation of these metrics, see:',
('https://chromium.googlesource.com/chromium/src/+/master/docs/speed/'
'binary_size/metrics.md#Metrics-for-Android')]
return self._ResultLines(
include_sections=ResourceSizesDiff._SUMMARY_SECTIONS) + footer_lines
def ProduceDiff(self, before_dir, after_dir):
before = self._LoadResults(before_dir)
after = self._LoadResults(after_dir)
self._diff = collections.defaultdict(list)
for section, section_dict in after.iteritems():
for subsection, v in section_dict.iteritems():
# Ignore entries when resource_sizes.py chartjson format has changed.
if (section not in before or
subsection not in before[section] or
v['units'] != before[section][subsection]['units']):
logging.warning(
'Found differing dict structures for resource_sizes.py, '
'skipping %s %s', section, subsection)
else:
self._diff[section].append(_DiffResult(
subsection,
v['value'] - before[section][subsection]['value'],
v['units']))
def _ResultLines(self, include_sections=None):
"""Generates diff lines for the specified sections (defaults to all)."""
section_lines = collections.defaultdict(list)
for section_name, section_results in self._diff.iteritems():
if not include_sections or section_name in include_sections:
subsection_lines = []
section_sum = 0
units = ''
for name, value, units in section_results:
# Omit subsections with no changes for summaries.
if value == 0 and include_sections:
continue
section_sum += value
subsection_lines.append('{:>+14,} {} {}'.format(value, units, name))
section_header = section_name
if section_name in ResourceSizesDiff._AGGREGATE_SECTIONS:
section_header += ' ({:+,} {})'.format(section_sum, units)
section_header += ':'
# Omit sections with empty subsections.
if subsection_lines:
section_lines[section_name].append(section_header)
section_lines[section_name].extend(subsection_lines)
if not section_lines:
return ['Empty ' + self.name]
ret = []
for k in include_sections or sorted(section_lines):
ret.extend(section_lines[k])
return ret
def _LoadResults(self, archive_dir):
chartjson_file = os.path.join(archive_dir, self._filename)
with open(chartjson_file) as f:
chartjson = json.load(f)
charts = chartjson['charts']
# Older versions of resource_sizes.py prefixed the apk onto section names.
ret = {}
for section, section_dict in charts.iteritems():
section_no_target = re.sub(r'^.*_', '', section)
ret[section_no_target] = section_dict
return ret
class _BuildHelper(object):
"""Helper class for generating and building targets."""
def __init__(self, args):
self.clean = args.clean
self.enable_chrome_android_internal = args.enable_chrome_android_internal
self.extra_gn_args_str = args.gn_args
self.apply_patch = args.extra_rev
self.max_jobs = args.max_jobs
self.max_load_average = args.max_load_average
self.output_directory = args.output_directory
self.target = args.target
self.target_os = args.target_os
self.use_goma = args.use_goma
self._SetDefaults()
self.is_bundle = 'minimal' in self.target
@property
def abs_apk_path(self):
return os.path.join(self.output_directory, self.apk_path)
@property
def abs_mapping_path(self):
return os.path.join(self.output_directory, self.mapping_path)
@property
def apk_name(self):
# my_great_apk -> MyGreat.apk
apk_name = ''.join(s.title() for s in self.target.split('_')[:-1]) + '.apk'
if self.is_bundle:
# my_great_minimal_apks -> MyGreatMinimal.apk -> MyGreat.minimal.apks
apk_name = apk_name.replace('Minimal.apk', '.minimal.apks')
return apk_name.replace('Webview', 'WebView')
@property
def apk_path(self):
return os.path.join('apks', self.apk_name)
@property
def mapping_path(self):
if self.is_bundle:
return self.apk_path.replace('.minimal.apks', '.aab') + '.mapping'
else:
return self.apk_path + '.mapping'
@property
def main_lib_path(self):
# TODO(agrieve): Could maybe extract from .apk or GN?
if self.IsLinux():
return 'chrome'
if 'monochrome' in self.target or 'trichrome' in self.target:
ret = 'lib.unstripped/libmonochrome.so'
elif 'webview' in self.target:
ret = 'lib.unstripped/libwebviewchromium.so'
else:
ret = 'lib.unstripped/libchrome.so'
return ret
@property
def abs_main_lib_path(self):
return os.path.join(self.output_directory, self.main_lib_path)
@property
def map_file_path(self):
return self.main_lib_path + '.map.gz'
@property
def size_name(self):
if self.IsLinux():
return os.path.basename(self.main_lib_path) + '.size'
return self.apk_name + '.size'
def _SetDefaults(self):
has_goma_dir = os.path.exists(os.path.join(os.path.expanduser('~'), 'goma'))
self.use_goma = self.use_goma and has_goma_dir
self.max_load_average = (self.max_load_average or
str(multiprocessing.cpu_count()))
has_internal = os.path.exists(
os.path.join(os.path.dirname(_SRC_ROOT), 'src-internal'))
if has_internal:
self.extra_gn_args_str = (
'is_chrome_branded=true ' + self.extra_gn_args_str)
else:
self.extra_gn_args_str = (
'ffmpeg_branding="Chrome" proprietary_codecs=true' +
self.extra_gn_args_str)
if self.IsLinux():
self.extra_gn_args_str = (
'is_cfi=false generate_linker_map=true ' + self.extra_gn_args_str)
self.extra_gn_args_str = ' ' + self.extra_gn_args_str.strip()
if not self.max_jobs:
if self.use_goma:
self.max_jobs = '10000'
elif has_internal:
self.max_jobs = '500'
else:
self.max_jobs = '50'
if not self.target:
if self.IsLinux():
self.target = 'chrome'
elif self.enable_chrome_android_internal:
self.target = 'monochrome_minimal_apks'
else:
self.target = 'monochrome_public_minimal_apks'
def _GenGnCmd(self):
gn_args = 'is_official_build=true'
gn_args += ' android_channel="stable"'
# Variables often become unused when experimenting with macros to reduce
# size, so don't fail on warnings.
gn_args += ' treat_warnings_as_errors=false'
# Speed things up a bit by skipping lint & errorprone.
gn_args += ' disable_android_lint=true'
gn_args += ' use_errorprone_java_compiler=false'
gn_args += ' use_goma=%s' % str(self.use_goma).lower()
gn_args += ' target_os="%s"' % self.target_os
if self.IsAndroid():
gn_args += (' enable_chrome_android_internal=%s' %
str(self.enable_chrome_android_internal).lower())
gn_args += self.extra_gn_args_str
return [_GN_PATH, 'gen', self.output_directory, '--args=%s' % gn_args]
def _GenNinjaCmd(self):
cmd = [_NINJA_PATH, '-C', self.output_directory]
cmd += ['-j', self.max_jobs] if self.max_jobs else []
cmd += ['-l', self.max_load_average] if self.max_load_average else []
cmd += [self.target]
return cmd
def Run(self):
"""Run GN gen/ninja build and return the process returncode."""
logging.info('Building %s within %s (this might take a while).',
self.target, os.path.relpath(self.output_directory))
if self.clean:
_RunCmd([_GN_PATH, 'clean', self.output_directory])
retcode = _RunCmd(
self._GenGnCmd(), verbose=True, exit_on_failure=False)[1]
if retcode:
return retcode
return _RunCmd(
self._GenNinjaCmd(), verbose=True, exit_on_failure=False)[1]
def IsAndroid(self):
return self.target_os == 'android'
def IsLinux(self):
return self.target_os == 'linux'
class _BuildArchive(object):
"""Class for managing a directory with build results and build metadata."""
def __init__(self, rev, base_archive_dir, build, subrepo, slow_options,
save_unstripped):
self.build = build
self.dir = os.path.join(base_archive_dir, rev)
metadata_path = os.path.join(self.dir, 'metadata.txt')
self.rev = rev
self.metadata = _Metadata([self], build, metadata_path, subrepo)
self._slow_options = slow_options
self._save_unstripped = save_unstripped
def ArchiveBuildResults(self, supersize_path, tool_prefix=None):
"""Save build artifacts necessary for diffing."""
logging.info('Saving build results to: %s', self.dir)
_EnsureDirsExist(self.dir)
if self.build.IsAndroid():
self._ArchiveFile(self.build.abs_apk_path)
self._ArchiveFile(self.build.abs_mapping_path)
self._ArchiveResourceSizes()
self._ArchiveSizeFile(supersize_path, tool_prefix)
if self._save_unstripped:
self._ArchiveFile(self.build.abs_main_lib_path)
self.metadata.Write()
assert self.Exists()
def Exists(self):
ret = self.metadata.Exists() and os.path.exists(self.archived_size_path)
if self._save_unstripped:
ret = ret and os.path.exists(self.archived_unstripped_path)
return ret
@property
def archived_unstripped_path(self):
return os.path.join(self.dir, os.path.basename(self.build.main_lib_path))
@property
def archived_size_path(self):
return os.path.join(self.dir, self.build.size_name)
def _ArchiveResourceSizes(self):
cmd = [
_RESOURCE_SIZES_PATH, self.build.abs_apk_path, '--output-dir', self.dir,
'--chartjson', '--chromium-output-dir', self.build.output_directory
]
if self._slow_options:
cmd += ['--estimate-patch-size', '--dump-static-initializers']
_RunCmd(cmd)
def _ArchiveFile(self, filename):
if not os.path.exists(filename):
_Die('missing expected file: %s', filename)
shutil.copy(filename, self.dir)
def _ArchiveSizeFile(self, supersize_path, tool_prefix):
existing_size_file = self.build.abs_apk_path + '.size'
if os.path.exists(existing_size_file):
logging.info('Found existing .size file')
shutil.copy(existing_size_file, self.archived_size_path)
else:
supersize_cmd = [
supersize_path, 'archive', self.archived_size_path, '--elf-file',
self.build.abs_main_lib_path, '--output-directory',
self.build.output_directory
]
if tool_prefix:
supersize_cmd += ['--tool-prefix', tool_prefix]
if self.build.IsAndroid():
supersize_cmd += ['-f', self.build.abs_apk_path]
logging.info('Creating .size file')
_RunCmd(supersize_cmd)
class _DiffArchiveManager(object):
"""Class for maintaining BuildArchives and their related diff artifacts."""
def __init__(self, revs, archive_dir, diffs, build, subrepo, slow_options,
save_unstripped):
self.archive_dir = archive_dir
self.build = build
self.build_archives = [
_BuildArchive(rev, archive_dir, build, subrepo, slow_options,
save_unstripped)
for rev in revs
]
self.diffs = diffs
self.subrepo = subrepo
self._summary_stats = []
def MaybeDiff(self, before_id, after_id):
"""Perform diffs given two build archives."""
before = self.build_archives[before_id]
after = self.build_archives[after_id]
diff_path, short_diff_path = self._DiffFilePaths(before, after)
if not self._CanDiff(before, after):
logging.info(
'Skipping diff for %s due to missing build archives.', diff_path)
return
metadata_path = self._DiffMetadataPath(before, after)
metadata = _Metadata(
[before, after], self.build, metadata_path, self.subrepo)
if metadata.Exists():
logging.info(
'Skipping diff for %s and %s. Matching diff already exists: %s',
before.rev, after.rev, diff_path)
else:
with open(diff_path, 'w') as diff_file, \
open(short_diff_path, 'w') as summary_file:
for d in self.diffs:
d.RunDiff((diff_file, summary_file), before.dir, after.dir)
metadata.Write()
self._AddDiffSummaryStat(before, after)
if os.path.exists(short_diff_path):
_PrintFile(short_diff_path)
logging.info('See detailed diff results here: %s',
os.path.relpath(diff_path))
def GenerateHtmlReport(self, before_id, after_id):
"""Generate HTML report given two build archives."""
before = self.build_archives[before_id]
after = self.build_archives[after_id]
diff_path = self._DiffDir(before, after)
if not self._CanDiff(before, after):
logging.info(
'Skipping HTML report for %s due to missing build archives.',
diff_path)
return
supersize_path = os.path.join(_BINARY_SIZE_DIR, 'supersize')
report_path = os.path.join(diff_path, 'diff.ndjson')
supersize_cmd = [supersize_path, 'html_report', '--diff-with',
before.archived_size_path,
after.archived_size_path,
report_path]
logging.info('Creating HTML report')
_RunCmd(supersize_cmd)
logging.info('View using a local server via: %s start_server %s',
os.path.relpath(supersize_path),
os.path.relpath(report_path))
def Summarize(self):
path = os.path.join(self.archive_dir, 'last_diff_summary.txt')
if self._summary_stats:
with open(path, 'w') as f:
stats = sorted(
self._summary_stats, key=lambda x: x[0].value, reverse=True)
_WriteToFile(f, '\nDiff Summary')
for s, before, after in stats:
_WriteToFile(f, '{:>+10} {} {} for range: {}..{}',
s.value, s.units, s.name, before, after)
# Print cached file if all builds were cached.
num_archives = len(self.build_archives)
if os.path.exists(path) and num_archives > 1:
_PrintFile(path)
if num_archives <= 2:
if not all(a.Exists() for a in self.build_archives):
return
supersize_path = os.path.join(_BINARY_SIZE_DIR, 'supersize')
size2 = ''
if num_archives == 2:
size2 = os.path.relpath(self.build_archives[-1].archived_size_path)
logging.info('Enter supersize console via: %s console %s %s',
os.path.relpath(supersize_path),
os.path.relpath(self.build_archives[0].archived_size_path), size2)
def _AddDiffSummaryStat(self, before, after):
stat = None
if self.build.IsAndroid():
summary_diff_type = ResourceSizesDiff
else:
summary_diff_type = NativeDiff
for d in self.diffs:
if isinstance(d, summary_diff_type):
stat = d.summary_stat
if stat:
self._summary_stats.append((stat, before.rev, after.rev))
def _CanDiff(self, before, after):
return before.Exists() and after.Exists()
def _DiffFilePaths(self, before, after):
ret = os.path.join(self._DiffDir(before, after), 'diff_results')
return ret + '.txt', ret + '.short.txt'
def _DiffMetadataPath(self, before, after):
return os.path.join(self._DiffDir(before, after), 'metadata.txt')
def _DiffDir(self, before, after):
archive_range = '%s..%s' % (before.rev, after.rev)
diff_path = os.path.join(self.archive_dir, 'diffs', archive_range)
_EnsureDirsExist(diff_path)
return diff_path
class _Metadata(object):
def __init__(self, archives, build, path, subrepo):
self.data = {
'revs': [a.rev for a in archives],
'apply_patch': build.apply_patch,
'archive_dirs': [a.dir for a in archives],
'target': build.target,
'target_os': build.target_os,
'subrepo': subrepo,
'path': path,
'gn_args': {
'extra_gn_args_str': build.extra_gn_args_str,
'enable_chrome_android_internal': build.enable_chrome_android_internal,
}
}
def Exists(self):
path = self.data['path']
if os.path.exists(path):
with open(path, 'r') as f:
return self.data == json.load(f)
return False
def Write(self):
with open(self.data['path'], 'w') as f:
json.dump(self.data, f)
def _EnsureDirsExist(path):
if not os.path.exists(path):
os.makedirs(path)
def _RunCmd(cmd, verbose=False, exit_on_failure=True):
"""Convenience function for running commands.
Args:
cmd: the command to run.
verbose: if this is True, then the stdout and stderr of the process will be
printed. If it's false, the stdout will be returned.
exit_on_failure: die if an error occurs when this is True.
Returns:
Tuple of (process stdout, process returncode).
"""
assert not (verbose and exit_on_failure)
cmd_str = ' '.join(c for c in cmd)
logging.debug('Running: %s', cmd_str)
proc_stdout = proc_stderr = subprocess.PIPE
if verbose:
proc_stdout, proc_stderr = sys.stdout, subprocess.STDOUT
proc = subprocess.Popen(cmd, stdout=proc_stdout, stderr=proc_stderr)
stdout, stderr = proc.communicate()
if proc.returncode and exit_on_failure:
_Die('command failed: %s\nstderr:\n%s', cmd_str, stderr)
stdout = stdout.strip() if stdout else ''
return stdout, proc.returncode
def _GitCmd(args, subrepo):
return _RunCmd(['git', '-C', subrepo] + args)[0]
def _GclientSyncCmd(rev, subrepo):
cwd = os.getcwd()
os.chdir(subrepo)
_, retcode = _RunCmd(['gclient', 'sync', '-r', 'src@' + rev],
verbose=True, exit_on_failure=False)
os.chdir(cwd)
return retcode
def _SyncAndBuild(archive, build, subrepo, no_gclient, extra_rev):
"""Sync, build and return non 0 if any commands failed."""
# Simply do a checkout if subrepo is used.
if _CurrentGitHash(subrepo) == archive.rev:
if subrepo != _SRC_ROOT:
logging.info('Skipping git checkout since already at desired rev')
else:
logging.info('Skipping gclient sync since already at desired rev')
elif subrepo != _SRC_ROOT or no_gclient:
_GitCmd(['checkout', archive.rev], subrepo)
else:
# Move to a detached state since gclient sync doesn't work with local
# commits on a branch.
_GitCmd(['checkout', '--detach'], subrepo)
logging.info('Syncing to %s', archive.rev)
ret = _GclientSyncCmd(archive.rev, subrepo)
if ret:
return ret
with _ApplyPatch(extra_rev, subrepo):
return build.Run()
@contextmanager
def _ApplyPatch(rev, subrepo):
if not rev:
yield
else:
restore_func = _GenRestoreFunc(subrepo)
try:
_GitCmd(['cherry-pick', rev, '--strategy-option', 'theirs'], subrepo)
yield
finally:
restore_func()
def _GenerateRevList(rev, reference_rev, all_in_range, subrepo, step):
"""Normalize and optionally generate a list of commits in the given range.
Returns:
A list of revisions ordered from oldest to newest.
"""
rev_seq = '%s^..%s' % (reference_rev, rev)
stdout = _GitCmd(['rev-list', rev_seq], subrepo)
all_revs = stdout.splitlines()[::-1]
if all_in_range or len(all_revs) < 2 or step:
revs = all_revs
if step:
revs = revs[::step]
else:
revs = [all_revs[0], all_revs[-1]]
num_revs = len(revs)
if num_revs >= _COMMIT_COUNT_WARN_THRESHOLD:
_VerifyUserAccepts(
'You\'ve provided a commit range that contains %d commits.' % num_revs)
logging.info('Processing %d commits', num_revs)
return revs
def _ValidateRevs(rev, reference_rev, subrepo, extra_rev):
def git_fatal(args, message):
devnull = open(os.devnull, 'wb')
retcode = subprocess.call(
['git', '-C', subrepo] + args, stdout=devnull, stderr=subprocess.STDOUT)
if retcode:
_Die(message)
no_obj_message = ('%s either doesn\'t exist or your local repo is out of '
'date, try "git fetch origin master"')
git_fatal(['cat-file', '-e', rev], no_obj_message % rev)
git_fatal(['cat-file', '-e', reference_rev], no_obj_message % reference_rev)
if extra_rev:
git_fatal(['cat-file', '-e', extra_rev], no_obj_message % extra_rev)
git_fatal(['merge-base', '--is-ancestor', reference_rev, rev],
'reference-rev is newer than rev')
def _VerifyUserAccepts(message):
print(message + ' Do you want to proceed? [y/n]')
if raw_input('> ').lower() != 'y':
sys.exit()
def _EnsureDirectoryClean(subrepo):
logging.info('Checking source directory')
stdout = _GitCmd(['status', '--porcelain'], subrepo)
# Ignore untracked files.
if stdout and stdout[:2] != '??':
logging.error('Failure: please ensure working directory is clean.')
sys.exit()
def _Die(s, *args):
logging.error('Failure: ' + s, *args)
sys.exit(1)
def _WriteToFile(logfile, s, *args, **kwargs):
if isinstance(s, basestring):
data = s.format(*args, **kwargs) + '\n'
else:
data = '\n'.join(s) + '\n'
logfile.write(data)
def _PrintFile(path):
with open(path) as f:
sys.stdout.write(f.read())
@contextmanager
def _TmpCopyBinarySizeDir():
"""Recursively copy files to a temp dir and yield temp paths."""
# Needs to be at same level of nesting as the real //tools/binary_size
# since supersize uses this to find d3 in //third_party.
tmp_dir = tempfile.mkdtemp(dir=_SRC_ROOT)
try:
bs_dir = os.path.join(tmp_dir, 'binary_size')
shutil.copytree(_BINARY_SIZE_DIR, bs_dir)
# We also copy the tools supersize needs, but only if they exist.
tool_prefix = None
if os.path.exists(_DOWNLOAD_OBJDUMP_PATH):
if not os.path.exists(os.path.join(_LLVM_TOOLS_DIR, 'llvm-readelf')):
_RunCmd([_DOWNLOAD_OBJDUMP_PATH])
tools_dir = os.path.join(bs_dir, 'bintools')
tool_prefix = os.path.join(tools_dir, 'llvm-')
shutil.copytree(_LLVM_TOOLS_DIR, tools_dir)
yield (os.path.join(bs_dir, 'supersize'), tool_prefix)
finally:
shutil.rmtree(tmp_dir)
def _CurrentGitHash(subrepo):
return _GitCmd(['rev-parse', 'HEAD'], subrepo)
def _GenRestoreFunc(subrepo):
branch = _GitCmd(['rev-parse', '--abbrev-ref', 'HEAD'], subrepo)
# Happens when the repo didn't start on a named branch.
if branch == 'HEAD':
branch = _GitCmd(['rev-parse', 'HEAD'], subrepo)
def _RestoreFunc():
logging.warning('Restoring original git checkout')
_GitCmd(['checkout', branch], subrepo)
return _RestoreFunc
def _SetRestoreFunc(subrepo):
atexit.register(_GenRestoreFunc(subrepo))
def main():
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('rev',
help='Find binary size bloat for this commit.')
parser.add_argument('--archive-directory',
default=_DEFAULT_ARCHIVE_DIR,
help='Where results are stored.')
parser.add_argument('--reference-rev',
help='Older rev to diff against. If not supplied, '
'the previous commit to rev will be used.')
parser.add_argument('--all',
action='store_true',
help='Build/download all revs from --reference-rev to '
'rev and diff the contiguous revisions.')
parser.add_argument('--include-slow-options',
action='store_true',
help='Run some extra steps that take longer to complete. '
'This includes apk-patch-size estimation and '
'static-initializer counting.')
parser.add_argument('--single',
action='store_true',
help='Sets --reference-rev=rev.')
parser.add_argument('--unstripped',
action='store_true',
help='Save the unstripped native library when archiving.')
parser.add_argument(
'--subrepo',
help='Specify a subrepo directory to use. Implies '
'--no-gclient. All git commands will be executed '
'from the subrepo directory.')
parser.add_argument('--no-gclient',
action='store_true',
help='Do not perform gclient sync steps.')
parser.add_argument('--apply-patch', dest='extra_rev',
help='A local commit to cherry-pick before each build. '
'This can leave your repo in a broken state if '
'the cherry-pick fails.')
parser.add_argument('--step', type=int,
help='Assumes --all and only builds/downloads every '
'--step\'th revision.')
parser.add_argument('-v',
'--verbose',
action='store_true',
help='Show commands executed, extra debugging output'
', and Ninja/GN output.')
build_group = parser.add_argument_group('build arguments')
build_group.add_argument('-j',
dest='max_jobs',
help='Run N jobs in parallel.')
build_group.add_argument('-l',
dest='max_load_average',
help='Do not start new jobs if the load average is '
'greater than N.')
build_group.add_argument('--no-goma',
action='store_false',
dest='use_goma',
default=True,
help='Do not use goma when building with ninja.')
build_group.add_argument('--clean',
action='store_true',
help='Do a clean build for each revision.')
build_group.add_argument('--gn-args',
default='',
help='Extra GN args to set.')
build_group.add_argument('--target-os',
default='android',
choices=['android', 'linux'],
help='target_os gn arg. Default: android.')
build_group.add_argument('--output-directory',
default=_DEFAULT_OUT_DIR,
help='ninja output directory. '
'Default: %s.' % _DEFAULT_OUT_DIR)
build_group.add_argument('--enable-chrome-android-internal',
action='store_true',
help='Allow downstream targets to be built.')
build_group.add_argument('--target',
help='GN target to build. Linux default: chrome. '
'Android default: monochrome_public_minimal_apks or '
'monochrome_minimal_apks (depending on '
'--enable-chrome-android-internal).')
if len(sys.argv) == 1:
parser.print_help()
return 1
args = parser.parse_args()
log_level = logging.DEBUG if args.verbose else logging.INFO
logging.basicConfig(level=log_level,
format='%(levelname).1s %(relativeCreated)6d %(message)s')
build = _BuildHelper(args)
subrepo = args.subrepo or _SRC_ROOT
_EnsureDirectoryClean(subrepo)
_SetRestoreFunc(subrepo)
if build.IsLinux():
_VerifyUserAccepts('Linux diffs have known deficiencies (crbug/717550).')
reference_rev = args.reference_rev or args.rev + '^'
if args.single:
reference_rev = args.rev
_ValidateRevs(args.rev, reference_rev, subrepo, args.extra_rev)
revs = _GenerateRevList(args.rev, reference_rev, args.all, subrepo, args.step)
with _TmpCopyBinarySizeDir() as paths:
supersize_path, tool_prefix = paths
diffs = [NativeDiff(build.size_name, supersize_path)]
if build.IsAndroid():
diffs += [
ResourceSizesDiff(build.apk_name)
]
diff_mngr = _DiffArchiveManager(revs, args.archive_directory, diffs, build,
subrepo, args.include_slow_options,
args.unstripped)
consecutive_failures = 0
i = 0
for i, archive in enumerate(diff_mngr.build_archives):
if archive.Exists():
logging.info('Found matching metadata for %s, skipping build step.',
archive.rev)
else:
build_failure = _SyncAndBuild(archive, build, subrepo, args.no_gclient,
args.extra_rev)
if build_failure:
logging.info(
'Build failed for %s, diffs using this rev will be skipped.',
archive.rev)
consecutive_failures += 1
if len(diff_mngr.build_archives) <= 2:
_Die('Stopping due to build failure.')
elif consecutive_failures > _ALLOWED_CONSECUTIVE_FAILURES:
_Die('%d builds failed in a row, last failure was %s.',
consecutive_failures, archive.rev)
else:
archive.ArchiveBuildResults(supersize_path, tool_prefix)
consecutive_failures = 0
if i != 0:
diff_mngr.MaybeDiff(i - 1, i)
diff_mngr.GenerateHtmlReport(0, i)
diff_mngr.Summarize()
if __name__ == '__main__':
sys.exit(main())
|
bsd-3-clause
| -672,655,338,578,976,400
| 34.992593
| 80
| 0.62582
| false
| 3.601927
| false
| false
| false
|
MarkMolina/moneypenny-bot
|
bittrex_playground.py
|
1
|
24571
|
import StringIO
import json
import logging
import random
import urllib
import urllib2
import time
import math
import re
import requests
# import requests_toolbelt.adapters.appengine
# Use the App Engine Requests adapter. This makes sure that Requests uses
# URLFetch.
# requests_toolbelt.adapters.appengine.monkeypatch()
# sending images
# try:
# from PIL import Image
# except:
# pass
# import multipart
#
# # standard app engineimports
# from google.appengine.api import urlfetch
# from google.appengine.ext import deferred
# from google.appengine.ext import ndb
# from google.appengine.api.taskqueue import TaskRetryOptions
# import webapp2
TOKEN = '363749995:AAEMaasMVLSPqSuSr1MiEFcgQH_Yn88hlbg'
BASE_URL = 'https://api.telegram.org/bot' + TOKEN + '/'
#urlfetch.set_default_fetch_deadline(60)
ALERTS = set()
#
# def deffered_track_pair_price(pair, current_price, target_price, chat_id, message_id):
# alert_key = (pair, target_price)
# logging.info("Checking price alert..{} if {}".format(pair, target_price))
# kraken = KrakenExchange()
# ticker = kraken.getTicker(pair=ASSETPAIRS[pair])
# askPrice = float(ticker['Ask Price'][0])
# bidPrice = float(ticker['Bid Price'][0])
# live_price = (askPrice + bidPrice) / 2
# target_price = float(target_price)
# if current_price < target_price and live_price >= target_price:
# ALERTS.remove(alert_key)
# reply_message(
# chat_id=chat_id,
# message_id=message_id,
# msg="{} just hit {}!".format(
# pair, live_price
# )
# )
# elif current_price > target_price and live_price <= target_price:
# ALERTS.remove(alert_key)
# reply_message(
# chat_id=chat_id,
# message_id=message_id,
# msg="{} just hit {}!".format(
# pair, live_price
# )
# )
# else:
# raise Exception("Alert not hit, fail task so it is retried")
#
#
# def track_pair_price(pair, current_price, target_price, chat_id, message_id):
# ALERTS.add(
# (pair, target_price)
# )
#
# deferred.defer(
# deffered_track_pair_price,
# pair, current_price, target_price, chat_id, message_id,
# _retry_options=TaskRetryOptions(
# min_backoff_seconds=60,
# task_age_limit=86400
# ) # 1 day
# )
#
#
# # ================================
#
# class EnableStatus(ndb.Model):
# # key name: str(chat_id)
# enabled = ndb.BooleanProperty(indexed=False, default=False)
#
#
# # ================================
#
# def setEnabled(chat_id, yes):
# es = EnableStatus.get_or_insert(str(chat_id))
# es.enabled = yes
# es.put()
#
# def getEnabled(chat_id):
# es = EnableStatus.get_by_id(str(chat_id))
# if es:
# return es.enabled
# return False
#
#
# # ================================
#
# class MeHandler(webapp2.RequestHandler):
# def get(self):
# urlfetch.set_default_fetch_deadline(60)
# self.response.write(json.dumps(json.load(urllib2.urlopen(BASE_URL + 'getMe'))))
#
#
# class GetUpdatesHandler(webapp2.RequestHandler):
# def get(self):
# urlfetch.set_default_fetch_deadline(60)
# self.response.write(json.dumps(json.load(urllib2.urlopen(BASE_URL + 'getUpdates'))))
#
#
# class SetWebhookHandler(webapp2.RequestHandler):
# def get(self):
# urlfetch.set_default_fetch_deadline(60)
# url = self.request.get('url')
# if url:
# self.response.write(json.dumps(json.load(urllib2.urlopen(BASE_URL + 'setWebhook', urllib.urlencode({'url': url})))))
#
#
# def reply_message(chat_id, message_id, msg=None, img=None):
# if msg:
# resp = urllib2.urlopen(BASE_URL + 'sendMessage', urllib.urlencode({
# 'chat_id': str(chat_id),
# 'text': msg.encode('utf-8'),
# 'disable_web_page_preview': 'true',
# 'reply_to_message_id': str(message_id),
# 'parse_mode': 'Markdown'
# })).read()
# elif img:
# resp = multipart.post_multipart(BASE_URL + 'sendPhoto', [
# ('chat_id', str(chat_id)),
# ('reply_to_message_id', str(message_id)),
# ], [
# ('photo', 'image.jpg', img),
# ])
# else:
# logging.error('no msg or img specified')
# resp = None
#
# logging.info('send response:')
# logging.info(resp)
class WebhookHandler(webapp2.RequestHandler):
def post(self):
urlfetch.set_default_fetch_deadline(60)
body = json.loads(self.request.body)
logging.info('request body:')
logging.info(body)
self.response.write(json.dumps(body))
update_id = body['update_id']
try:
message = body['message']
except:
message = body['edited_message']
message_id = message.get('message_id')
date = message.get('date')
text = message.get('text')
fr = message.get('from')
chat = message['chat']
chat_id = chat['id']
def reply(msg=None, img=None):
reply_message(msg=msg, img=img, chat_id=chat_id, message_id=message_id)
if not text:
logging.info('no text')
return
if text.startswith('/'):
text_kraken = re.sub('(\/btc)', '/xbt', text)
text_kraken = re.sub('(btc$)', 'xbt', text)
text_kraken = re.sub('(btc\s+)', 'xbt ', text)
if text == '/start':
reply('Bot enabled')
setEnabled(chat_id, True)
if text == '/alerts':
reply(
"*Alerts*\n{}".format(
"\n".join([
"{}: {}".format(pair, price)
for pair, price in ALERTS
])
)
)
elif text == '/stop':
reply('Bot disabled')
setEnabled(chat_id, False)
elif text == '/rules':
reply('1. You do not talk about WHALE HUNTERS \n2. You DO NOT talk about WHALE HUNTERS \n3. Master level of TA skills required \n3.141592 Bring pie \n4. Inactive members will be banned')
elif text == '/image':
img = Image.new('RGB', (512, 512))
base = random.randint(0, 16777216)
pixels = [base+i*j for i in range(512) for j in range(512)]
img.putdata(pixels)
output = StringIO.StringIO()
img.save(output, 'JPEG')
reply(img=output.getvalue())
elif text == '/help' or text == '/options':
r = '/rules : show rules\n/image : generate an image\n/time(s) : get server time\n/assets : list of assets\n/pairs : list of all pairs (long)\n/<asset> : show this assets pairs\n/<assetpair> : show assetpairs price\n/alerts : show alerts'
reply(r)
elif text == '/time' or text == '/times':
time = KrakenExchange().getServerTime()['rfc1123']
r = 'Kraken server time: {}'.format(time)
reply(r)
elif text == '/assets':
r = 'Reply with /<asset> to get its pairs\n{}'.format(', '.join(ASSETS))
reply(r)
elif text == '/pairs':
assets = ASSETPAIRS.keys()
assets.sort()
r = 'Reply with /<assetpair> to get bid/ask prices\n{}'.format(', '.join(assets))
reply(r)
elif text[1:].upper() in ASSETS:
pairs = []
for pair in ASSETPAIRS:
if pair[:3] == text[1:].upper()[:3]:
pairs.append(pair)
r = 'Reply with /<assetpair> to get bid/ask prices\n{}'.format(', '.join(pairs))
reply(r)
elif text_kraken.split(' ')[0][1:].upper() in ASSETPAIRS.keys():
pair = text_kraken.split(' ')[0][1:].upper()
kraken = KrakenExchange()
ticker = kraken.getTicker(pair=ASSETPAIRS[pair])
askPrice = float(ticker['Ask Price'][0])
bidPrice = float(ticker['Bid Price'][0])
price = (askPrice + bidPrice) / 2
highPrice = float(ticker['High'][0])
lowPrice = float(ticker['Low'][0])
# time = kraken.serverTime['rfc1123']
r = ""
if len(text_kraken.split(' ')) > 1:
if text_kraken.split(' ')[1] == 'fib':
l_one = highPrice
l_two = highPrice - ((highPrice - lowPrice) * 0.236)
l_three = highPrice - ((highPrice - lowPrice) * 0.382)
l_four = highPrice - ((highPrice - lowPrice) * 0.5)
l_five = highPrice - ((highPrice - lowPrice) * 0.618)
l_six = highPrice - ((highPrice - lowPrice) * 0.786)
l_seven = lowPrice
l_eight = highPrice - ((highPrice - lowPrice) * 1.272)
l_nine = highPrice - ((highPrice - lowPrice) * 1.618)
r = '*{0}* 24h fib levels\n\n*0%*: {1}\n*23.6%*: {2}\n*38.2%*: {3}\n*50%*: {4}\n*61.8%*: {5}\n*78.6%*: {6}\n*100%*: {7}\n\n*127.2%*: {8}\n*161.8%*: {9}\n'.format(pair, l_one, l_two, l_three, l_four, l_five, l_six, l_seven, l_eight, l_nine)
if text_kraken.split(' ')[1] == 'book':
order_book = kraken.getOrderBook(pair=ASSETPAIRS[pair])
book = order_book[ASSETPAIRS[pair]]
r = "*OrderBook* {0} \n*Asks*\n{1}\n\n*Bids*\n{2}".format(
pair,
"\n".join(
["{} {}".format(ask[0], ask[1]) for ask in book['asks'][:10]]
),
"\n".join(
["{} {}".format(bid[0], bid[1]) for bid in book['bids'][:10]]
),
)
if text_kraken.split(' ')[1] == 'alert':
try:
target_price = text_kraken.split(' ')[2]
track_pair_price(pair, price, target_price, chat_id, message_id)
r = 'You want me to keep an eye on your {}? I will let you know if it rises or drops to {}'.format(
pair, target_price
)
logging.info(r)
except IndexError:
r = 'Tell me what price you want an alert for, doofus!'
else:
r = '*{}* \n*Price:* {} \n*---* \n*High:* {} \n*Low:* {}'.format(pair, price, highPrice, lowPrice)
# r += '\n\n_updated: {}_'.format(time)
reply(r)
elif text.split(' ')[0][1:].upper() in BITT_ASSETPAIRS:
# TODO: insert bittrex methods here
pair = text.split(' ')[0][1:]
bittrex = BittrexExchange()
ticker = bittrex.getTicker(pair=pair)
askPrice = float(ticker['Ask Price'])
bidPrice = float(ticker['Bid Price'])
price = (askPrice + bidPrice) / 2
highPrice = float(ticker['High'])
lowPrice = float(ticker['Low'])
r = ""
if len(text.split(' ')) > 1:
if text.split(' ')[1] == 'fib':
l_one = highPrice
l_two = highPrice - ((highPrice - lowPrice) * 0.236)
l_three = highPrice - ((highPrice - lowPrice) * 0.382)
l_four = highPrice - ((highPrice - lowPrice) * 0.5)
l_five = highPrice - ((highPrice - lowPrice) * 0.618)
l_six = highPrice - ((highPrice - lowPrice) * 0.786)
l_seven = lowPrice
l_eight = highPrice - ((highPrice - lowPrice) * 1.272)
l_nine = highPrice - ((highPrice - lowPrice) * 1.618)
r = '*{0}* 24h fib levels\n\n*0%*: {1}\n*23.6%*: {2}\n*38.2%*: {3}\n*50%*: {4}\n*61.8%*: {5}\n*78.6%*: {6}\n*100%*: {7}\n\n*127.2%*: {8}\n*161.8%*: {9}\n'.format(pair, l_one, l_two, l_three, l_four, l_five, l_six, l_seven, l_eight, l_nine)
else:
r = '*{}* \n*Price:* {} \n*---* \n*High:* {} \n*Low:* {}'.format(pair, price, highPrice, lowPrice)
reply(r)
elif len(text) == 4 or len(text) == 7:
reply('This asset(pair) is not recognized. Pick one from the /assets list, stupid.')
else:
reply('You know, this sort of behaviour could qualify as sexual harassment.')
# bot text reply's
elif 'beach' in text:
reply('dont forget to bring a towel')
# elif ('sell' in text or 'dropping' in text or 'dumping' in text) and random.choice([True, False]):
# reply('weak hands!')
# elif 'what time' in text:
# reply('look at the corner of your screen!')
# elif 'moon' in text:
# reply('http://www.louwmanexclusive.com/nl/brands/lamborghini/')
# elif 'bitch' in text:
# reply('dont talk to me like that!')
# elif 'penny' in text:
# reply('Dont talk behind my back!')
else:
if getEnabled(chat_id):
reply('I got your message! (but I do not know how to answer)')
else:
logging.info('not enabled for chat_id {}'.format(chat_id))
# ===== Kraken Exchange methods & classes ======
PUBLIC_URLS = {
'time': 'https://api.kraken.com/0/public/Time',
'assets': 'https://api.kraken.com/0/public/Assets',
'assetPairs': 'https://api.kraken.com/0/public/AssetPairs',
'ticker': 'https://api.kraken.com/0/public/Ticker',
'ohlc': 'https://api.kraken.com/0/public/OHLC',
'orderBook': 'https://api.kraken.com/0/public/Depth',
'recentTrades': 'https://api.kraken.com/0/public/Trades',
'spread': 'https://api.kraken.com/0/public/Spread',
}
TICKER_MAPPING = {
'a': 'Ask Price',
'b': 'Bid Price',
'c': 'Last Trade',
'v': 'Volume',
'p': 'Volume weighted avg',
't': '# Trades',
'l': 'Low',
'h': 'High',
'o': 'Opening Price',
}
ASSETS = ['DASH', 'EOS', 'ETC', 'ETH', 'GNO', 'ICN', 'LTC', 'MLN', 'REP', 'USDT',
'XBT', 'XDG', 'XLM', 'XMR', 'XRP', 'ZEC', 'BCH']
ASSETPAIRS = {
'DASHEUR': 'DASHEUR',
'DASHUSD': 'DASHUSD',
'DASHXBT': 'DASHXBT',
'EOSETH': 'EOSETH',
'EOSEUR': 'EOSEUR',
'EOSUSD': 'EOSUSD',
'EOSXBT': 'EOSXBT',
'ETCETH': 'XETCXETH',
'ETCEUR': 'XETCZEUR',
'ETCUSD': 'XETCZUSD',
'ETCXBT': 'XETCXXBT',
'ETHCAD': 'XETHZCAD',
'ETHEUR': 'XETHZEUR',
'ETHGBP': 'XETHZGBP',
'ETHJPY': 'XETHZJPY',
'ETHUSD': 'XETHZUSD',
'ETHXBT': 'XETHXXBT',
'GNOETH': 'GNOETH',
'GNOEUR': 'GNOEUR',
'GNOUSD': 'GNOUSD',
'GNOXBT': 'GNOXBT',
'ICNETH': 'XICNXETH',
'ICNXBT': 'XICNXXBT',
'LTCEUR': 'XLTCZEUR',
'LTCUSD': 'XLTCZUSD',
'LTCXBT': 'XLTCXXBT',
'MLNETH': 'XMLNXETH',
'MLNXBT': 'XMLNXXBT',
'REPETH': 'XREPXETH',
'REPEUR': 'XREPZEUR',
'REPUSD': 'XREPZUSD',
'REPXBT': 'XREPXXBT',
'USDTUSD': 'USDTZUSD',
'XBTCAD': 'XXBTZCAD',
'XBTEUR': 'XXBTZEUR',
'XBTGBP': 'XXBTZGBP',
'XBTJPY': 'XXBTZJPY',
'XBTUSD': 'XXBTZUSD',
'XDGXBT': 'XXDGXXBT',
'XLMEUR': 'XXLMZEUR',
'XLMUSD': 'XXLMZUSD',
'XLMXBT': 'XXLMXXBT',
'XMREUR': 'XXMRZEUR',
'XMRUSD': 'XXMRZUSD',
'XMRXBT': 'XXMRXXBT',
'XRPCAD': 'XXRPZCAD',
'XRPEUR': 'XXRPZEUR',
'XRPJPY': 'XXRPZJPY',
'XRPUSD': 'XXRPZUSD',
'XRPXBT': 'XXRPXXBT',
'ZECEUR': 'XZECZEUR',
'ZECUSD': 'XZECZUSD',
'ZECXBT': 'XZECXXBT',
'BCHEUR': 'BCHEUR',
'BCHUSD': 'BCHUSD',
'BCHXBT': 'BCHXBT',
}
MAXREQUESTS = 15
def _query(url, header):
r = requests.post(url, data=header)
if r.status_code == 200:
return json.loads(r.text)['result']
class KrakenExchange(object):
"""
Holds all methods for fetching Assets, Assetpairs and current Ticker
values from the Kraken Exchange.
Time Skew can be displayed by requesting server time.
"""
def __init__(self):
super(KrakenExchange, self).__init__()
def query_public(self, type, header=None):
return _query(PUBLIC_URLS[type], header)
def getServerTime(self):
serverTime = self.query_public('time')
if type(serverTime) == ValueError:
return serverTime.message
self.serverTime = serverTime
return self.serverTime
def getServerSkew(self):
self.serverSkew = time.time() - self.getServerTime()['unixtime']
return self.serverSkew
def getOrderBook(self, pair):
header = dict(
pair=pair,
count=10,
)
r = self.query_public('orderBook', header)
return r
def getTicker(self, pair):
header = {'pair': pair} if pair else None
r = self.query_public('ticker', header)
if type(r) == ValueError:
return r.message
self.ticker = {}
ticker = r[pair]
for t in ticker.keys():
self.ticker[TICKER_MAPPING[t]] = ticker[t]
return self.ticker
# ===== Bittrex Exchange methods & classes ======
BITT_PUBLIC_URLS = {
# hold open markets, assets and pairs.
'markets': 'https://bittrex.com/api/v1.1/public/getmarkets',
'currencies': 'https://bittrex.com/api/v1.1/public/getcurrencies ',
# Just the current price and bid ask.
'ticker': 'https://bittrex.com/api/v1.1/public/getticker',
# > 1 market 24h summary, current high-low etc
'summary': 'https://bittrex.com/api/v1.1/public/getmarketsummary',
# > 1 market 24h summary, current high-low etc
'summaries': 'https://bittrex.com/api/v1.1/public/getmarketsummaries',
'orderBook': 'https://bittrex.com/api/v1.1/public/getorderbook',
'history': 'https://bittrex.com/api/v1.1/public/getmarkethistory'
}
BITT_TICKER_MAPPING = {
'MarketName': 'Pair',
'High': 'High',
'Low': 'Low',
'Volume': 'Volume',
'Last': 'Last',
'BaseVolume': 'Base Volume',
'Bid': 'Bid Price',
'Ask': 'Ask Price',
'OpenBuyOrders': '# Buy Orders',
'OpenSellOrders': '# Sell Orders'
}
BITT_ASSETPAIRS = [
u'BTC-LTC',
u'BTC-DOGE',
u'BTC-VTC',
u'BTC-PPC',
u'BTC-FTC',
u'BTC-RDD',
u'BTC-NXT',
u'BTC-DASH',
u'BTC-POT',
u'BTC-BLK',
u'BTC-EMC2',
u'BTC-XMY',
u'BTC-AUR',
u'BTC-EFL',
u'BTC-GLD',
u'BTC-SLR',
u'BTC-PTC',
u'BTC-GRS',
u'BTC-NLG',
u'BTC-RBY',
u'BTC-XWC',
u'BTC-MONA',
u'BTC-THC',
u'BTC-ENRG',
u'BTC-ERC',
u'BTC-NAUT',
u'BTC-VRC',
u'BTC-CURE',
u'BTC-XBB',
u'BTC-XMR',
u'BTC-CLOAK',
u'BTC-START',
u'BTC-KORE',
u'BTC-XDN',
u'BTC-TRUST',
u'BTC-NAV',
u'BTC-XST',
u'BTC-BTCD',
u'BTC-VIA',
u'BTC-UNO',
u'BTC-PINK',
u'BTC-IOC',
u'BTC-CANN',
u'BTC-SYS',
u'BTC-NEOS',
u'BTC-DGB',
u'BTC-BURST',
u'BTC-EXCL',
u'BTC-SWIFT',
u'BTC-DOPE',
u'BTC-BLOCK',
u'BTC-ABY',
u'BTC-BYC',
u'BTC-XMG',
u'BTC-BLITZ',
u'BTC-BAY',
u'BTC-BTS',
u'BTC-FAIR',
u'BTC-SPR',
u'BTC-VTR',
u'BTC-XRP',
u'BTC-GAME',
u'BTC-COVAL',
u'BTC-NXS',
u'BTC-XCP',
u'BTC-BITB',
u'BTC-GEO',
u'BTC-FLDC',
u'BTC-GRC',
u'BTC-FLO',
u'BTC-NBT',
u'BTC-MUE',
u'BTC-XEM',
u'BTC-CLAM',
u'BTC-DMD',
u'BTC-GAM',
u'BTC-SPHR',
u'BTC-OK',
u'BTC-SNRG',
u'BTC-PKB',
u'BTC-CPC',
u'BTC-AEON',
u'BTC-ETH',
u'BTC-GCR',
u'BTC-TX',
u'BTC-BCY',
u'BTC-EXP',
u'BTC-INFX',
u'BTC-OMNI',
u'BTC-AMP',
u'BTC-AGRS',
u'BTC-XLM',
u'BTC-BTA',
u'USDT-BTC',
u'BITCNY-BTC',
u'BTC-CLUB',
u'BTC-VOX',
u'BTC-EMC',
u'BTC-FCT',
u'BTC-MAID',
u'BTC-EGC',
u'BTC-SLS',
u'BTC-RADS',
u'BTC-DCR',
u'BTC-SAFEX',
u'BTC-BSD',
u'BTC-XVG',
u'BTC-PIVX',
u'BTC-XVC',
u'BTC-MEME',
u'BTC-STEEM',
u'BTC-2GIVE',
u'BTC-LSK',
u'BTC-PDC',
u'BTC-BRK',
u'BTC-DGD',
u'ETH-DGD',
u'BTC-WAVES',
u'BTC-RISE',
u'BTC-LBC',
u'BTC-SBD',
u'BTC-BRX',
u'BTC-DRACO',
u'BTC-ETC',
u'ETH-ETC',
u'BTC-STRAT',
u'BTC-UNB',
u'BTC-SYNX',
u'BTC-TRIG',
u'BTC-EBST',
u'BTC-VRM',
u'BTC-SEQ',
u'BTC-XAUR',
u'BTC-SNGLS',
u'BTC-REP',
u'BTC-SHIFT',
u'BTC-ARDR',
u'BTC-XZC',
u'BTC-NEO',
u'BTC-ZEC',
u'BTC-ZCL',
u'BTC-IOP',
u'BTC-DAR',
u'BTC-GOLOS',
u'BTC-HKG',
u'BTC-UBQ',
u'BTC-KMD',
u'BTC-GBG',
u'BTC-SIB',
u'BTC-ION',
u'BTC-LMC',
u'BTC-QWARK',
u'BTC-CRW',
u'BTC-SWT',
u'BTC-TIME',
u'BTC-MLN',
u'BTC-ARK',
u'BTC-DYN',
u'BTC-TKS',
u'BTC-MUSIC',
u'BTC-DTB',
u'BTC-INCNT',
u'BTC-GBYTE',
u'BTC-GNT',
u'BTC-NXC',
u'BTC-EDG',
u'BTC-LGD',
u'BTC-TRST',
u'ETH-GNT',
u'ETH-REP',
u'USDT-ETH',
u'ETH-WINGS',
u'BTC-WINGS',
u'BTC-RLC',
u'BTC-GNO',
u'BTC-GUP',
u'BTC-LUN',
u'ETH-GUP',
u'ETH-RLC',
u'ETH-LUN',
u'ETH-SNGLS',
u'ETH-GNO',
u'BTC-APX',
u'BTC-TKN',
u'ETH-TKN',
u'BTC-HMQ',
u'ETH-HMQ',
u'BTC-ANT',
u'ETH-TRST',
u'ETH-ANT',
u'BTC-SC',
u'ETH-BAT',
u'BTC-BAT',
u'BTC-ZEN',
u'BTC-1ST',
u'BTC-QRL',
u'ETH-1ST',
u'ETH-QRL',
u'BTC-CRB',
u'ETH-CRB',
u'ETH-LGD',
u'BTC-PTOY',
u'ETH-PTOY',
u'BTC-MYST',
u'ETH-MYST',
u'BTC-CFI',
u'ETH-CFI',
u'BTC-BNT',
u'ETH-BNT',
u'BTC-NMR',
u'ETH-NMR',
u'ETH-TIME',
u'ETH-LTC',
u'ETH-XRP',
u'BTC-SNT',
u'ETH-SNT',
u'BTC-DCT',
u'BTC-XEL',
u'BTC-MCO',
u'ETH-MCO',
u'BTC-ADT',
u'ETH-ADT',
u'BTC-FUN',
u'ETH-FUN',
u'BTC-PAY',
u'ETH-PAY',
u'BTC-MTL',
u'ETH-MTL',
u'BTC-STORJ',
u'ETH-STORJ',
u'BTC-ADX',
u'ETH-ADX',
u'ETH-DASH',
u'ETH-SC',
u'ETH-ZEC',
u'USDT-ZEC',
u'USDT-LTC',
u'USDT-ETC',
u'USDT-XRP',
u'BTC-OMG',
u'ETH-OMG',
u'BTC-CVC',
u'ETH-CVC',
u'BTC-PART',
u'BTC-QTUM',
u'ETH-QTUM',
u'ETH-XMR',
u'ETH-XEM',
u'ETH-XLM',
u'ETH-NEO',
u'USDT-XMR',
u'USDT-DASH',
u'ETH-BCC',
u'USDT-BCC',
u'BTC-BCC',
u'USDT-NEO',
u'ETH-WAVES',
u'ETH-STRAT',
u'ETH-DGB',
u'ETH-FCT',
u'ETH-BTS']
# TODO: retrieve all pairs from the `getmarket` data. Pairs will have "-"
# which will be handy for separation.
class BittrexExchange(object):
"""
Holds all methods for fetching:
- Assets, Assetpairs, Current Ticker, 24h summary, order book, and history
values and current Ticker
values from the Kraken Exchange.
Time Skew can be displayed by requesting server time.
"""
def __init__(self):
super(BittrexExchange, self).__init__()
def query_public(self, type, header=None):
return _query(BITT_PUBLIC_URLS[type], header)
def getTicker(self, pair):
header = {'market': pair} if pair else None
r = self.query_public('summary', header)
if type(r) == ValueError:
return r.message
self.ticker = {}
ticker = r[0]
# print(ticker)
for t in ticker.keys():
if t in BITT_TICKER_MAPPING.keys():
self.ticker[BITT_TICKER_MAPPING[t]] = ticker[t]
return self.ticker
def getmarkets(self, type, header=None):
header = None
r = self.query_public('markets', header)
self.markets = []
markets = r
for i, cont in enumerate(markets):
self.markets.append(markets[i]["MarketName"])
return self.markets
|
mit
| -872,781,368,876,894,800
| 29.148466
| 263
| 0.503602
| false
| 2.935954
| false
| false
| false
|
AntonelliLab/seqcap_processor
|
bin/aTRAM-master/tests/lib/test_core_atram.py
|
1
|
5511
|
"""Testing functions in core_atram."""
# pylint: disable=too-many-arguments,unused-variable
from os.path import join
from unittest.mock import patch, MagicMock, call
import tempfile
import lib.core_atram as core_atram
from lib.assemblers.base import BaseAssembler
def set_up():
"""Build a generic assembler."""
cxn = 'cxn'
args = {
'query': ['query_file_1', 'query_file_2'],
'blast_db': ['blast_db_1', 'blast_db_2'],
'iterations': 1,
'log_file': 'log_file_1',
'log_level': 'info',
'temp_dir': 'temp_dir_1'}
assembler = BaseAssembler(args, cxn)
return args, cxn, assembler
@patch('lib.core_atram.write_query_seq')
def test_split_queries_01(write_query_seq):
"""Test split queries where there are no fasta files to split."""
args, cxn, _ = set_up()
args['query_split'] = []
queries = core_atram.split_queries(args)
write_query_seq.assert_not_called()
assert args['query'] == queries
@patch('lib.core_atram.write_query_seq')
def test_split_queries_02(write_query_seq):
"""Test split queries where there are fasta files to split."""
args, cxn, assembler = set_up()
args['query_split'] = ['tests/data/split_queries1.txt']
args['protein'] = True
with tempfile.TemporaryDirectory(prefix='test_') as temp_dir:
args['temp_dir'] = temp_dir
queries = core_atram.split_queries(args)
split_files = [
join(temp_dir, 'queries', 'split_queries1_seq1_1_1.fasta'),
join(temp_dir, 'queries', 'split_queries1_seq2_2_2_2.fasta'),
join(temp_dir, 'queries', 'split_queries1_seq3_3.fasta'),
join(temp_dir, 'queries', 'split_queries1_seq1_1_4.fasta')]
calls = [
call(split_files[0], 'seq1/1', 'A' * 10),
call(split_files[1], 'seq2:2/2', 'C' * 20),
call(split_files[2], 'seq3', 'G' * 30),
call(split_files[3], 'seq1+1', 'T' * 10)]
write_query_seq.assert_has_calls(calls)
assert split_files == queries
def test_write_query_seq_01():
"""It writes a sequence to a fasta file."""
args, cxn, assembler = set_up()
with tempfile.TemporaryDirectory(prefix='test_') as temp_dir:
path = join(temp_dir, 'test_query.fasta')
core_atram.write_query_seq(
path,
'my sequence name',
'aaaacccgggtt')
with open(path) as test_file:
expect = (
'>my sequence name\n'
'aaaacccgggtt\n')
assert expect == test_file.read()
@patch('lib.db_atram.create_sra_blast_hits_table')
@patch('lib.db_atram.create_contig_blast_hits_table')
@patch('lib.db_atram.create_assembled_contigs_table')
def test_clean_database_01(
create_assembled_contigs_table,
create_contig_blast_hits_table,
create_sra_blast_hits_table):
"""It runs the clean_database function."""
args, cxn, assembler = set_up()
dbh = 'my_db'
core_atram.clean_database(dbh)
create_assembled_contigs_table.assert_called_once_with(dbh)
create_contig_blast_hits_table.assert_called_once_with(dbh)
create_sra_blast_hits_table.assert_called_once_with(dbh)
@patch('lib.core_atram.blast_query_against_all_shards')
@patch('lib.core_atram.create_query_from_contigs')
@patch('lib.core_atram.filter_contigs')
def test_assembly_loop_iteration_01(
filter_contigs,
create_query_from_contigs,
blast_query_against_all_shards):
"""It iterates over the assembly processes."""
args, _, assembler = set_up()
temp_dir = 'my_temp_dir'
assembler.blast_only = False
assembler.state['query_file'] = args['query'][0]
assembler.state['blast_db'] = args['blast_db'][0]
assembler.state['iter_dir'] = 'my_iter_dir'
assembler.init_iteration = MagicMock()
assembler.count_blast_hits = MagicMock(return_value=1)
assembler.write_input_files = MagicMock()
assembler.run = MagicMock()
assembler.nothing_assembled = MagicMock(return_value=False)
assembler.assembled_contigs_count = MagicMock(return_value=11)
assembler.no_new_contigs = MagicMock(return_value=False)
core_atram.assembly_loop_iteration(args, assembler)
blast_query_against_all_shards.assert_called_once_with(assembler)
assert assembler.count_blast_hits.call_count == 1
assembler.no_new_contigs.assert_called_once_with(11)
create_query_from_contigs.create_query_from_contigs(assembler)
filter_contigs.create_query_from_contigs(assembler)
@patch('lib.blast.all_shard_paths')
def test_shard_fraction_01(all_shard_paths):
"""It gets the shards we are using when there is no split."""
args, cxn, assembler = set_up()
returns = ['1st', '2nd', '3rd', '4th']
assembler.state['blast_db'] = args['blast_db'][0]
assembler.args['fraction'] = 1.0
all_shard_paths.return_value = returns
shards = core_atram.shard_fraction(assembler)
assert returns == shards
all_shard_paths.assert_called_once_with(args['blast_db'][0])
@patch('lib.blast.all_shard_paths')
def test_shard_fraction_02(all_shard_paths):
"""It gets the shards we are using when there is a split."""
args, cxn, assembler = set_up()
assembler.args['fraction'] = 0.5
assembler.state['blast_db'] = args['blast_db'][0]
returns = ['1st', '2nd', '3rd', '4th']
all_shard_paths.return_value = returns
shards = core_atram.shard_fraction(assembler)
assert ['1st', '2nd'] == shards
all_shard_paths.assert_called_once_with(args['blast_db'][0])
|
mit
| 3,588,765,181,514,383,000
| 32
| 69
| 0.648521
| false
| 3.059967
| true
| false
| false
|
alivecor/tensorflow
|
tensorflow/python/ops/array_ops.py
|
1
|
82629
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Support for manipulating tensors.
See the @{$python/array_ops} guide.
@@string_to_number
@@to_double
@@to_float
@@to_bfloat16
@@to_int32
@@to_int64
@@cast
@@bitcast
@@saturate_cast
@@broadcast_dynamic_shape
@@broadcast_static_shape
@@shape
@@shape_n
@@size
@@rank
@@reshape
@@squeeze
@@expand_dims
@@meshgrid
@@slice
@@strided_slice
@@split
@@tile
@@pad
@@concat
@@stack
@@parallel_stack
@@unstack
@@reverse_sequence
@@reverse
@@reverse_v2
@@transpose
@@extract_image_patches
@@space_to_batch_nd
@@space_to_batch
@@required_space_to_batch_paddings
@@batch_to_space_nd
@@batch_to_space
@@space_to_depth
@@depth_to_space
@@gather
@@gather_nd
@@unique_with_counts
@@scatter_nd
@@dynamic_partition
@@dynamic_stitch
@@boolean_mask
@@one_hot
@@sequence_mask
@@dequantize
@@quantize_v2
@@quantized_concat
@@setdiff1d
@@fake_quant_with_min_max_args
@@fake_quant_with_min_max_args_gradient
@@fake_quant_with_min_max_vars
@@fake_quant_with_min_max_vars_gradient
@@fake_quant_with_min_max_vars_per_channel
@@fake_quant_with_min_max_vars_per_channel_gradient
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
# 'Constant' gets imported in the module 'array_ops'.
from tensorflow.python.framework.constant_op import constant
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_math_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_array_ops import *
from tensorflow.python.util import deprecation
# pylint: enable=wildcard-import
# Used for slicing to specify a new 1 size dimension
newaxis = None
# We override the 'slice' for the "slice" op, so we keep python's
# existing 'slice' for later use in this module.
_baseslice = slice
def identity(input, name=None): # pylint: disable=redefined-builtin
r"""Return a tensor with the same shape and contents as input.
Args:
input: A `Tensor`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
if context.in_graph_mode():
return gen_array_ops.identity(input, name=name)
else:
if context.context().device_name != input.device:
return input._copy() # pylint: disable=protected-access
return input
# pylint: disable=redefined-builtin,protected-access
def expand_dims(input, axis=None, name=None, dim=None):
"""Inserts a dimension of 1 into a tensor's shape.
Given a tensor `input`, this operation inserts a dimension of 1 at the
dimension index `axis` of `input`'s shape. The dimension index `axis` starts
at zero; if you specify a negative number for `axis` it is counted backward
from the end.
This operation is useful if you want to add a batch dimension to a single
element. For example, if you have a single image of shape `[height, width,
channels]`, you can make it a batch of 1 image with `expand_dims(image, 0)`,
which will make the shape `[1, height, width, channels]`.
Other examples:
```python
# 't' is a tensor of shape [2]
tf.shape(tf.expand_dims(t, 0)) # [1, 2]
tf.shape(tf.expand_dims(t, 1)) # [2, 1]
tf.shape(tf.expand_dims(t, -1)) # [2, 1]
# 't2' is a tensor of shape [2, 3, 5]
tf.shape(tf.expand_dims(t2, 0)) # [1, 2, 3, 5]
tf.shape(tf.expand_dims(t2, 2)) # [2, 3, 1, 5]
tf.shape(tf.expand_dims(t2, 3)) # [2, 3, 5, 1]
```
This operation requires that:
`-1-input.dims() <= dim <= input.dims()`
This operation is related to `squeeze()`, which removes dimensions of
size 1.
Args:
input: A `Tensor`.
axis: 0-D (scalar). Specifies the dimension index at which to
expand the shape of `input`. Must be in the range
`[-rank(input) - 1, rank(input)]`.
name: The name of the output `Tensor`.
dim: 0-D (scalar). Equivalent to `axis`, to be deprecated.
Returns:
A `Tensor` with the same data as `input`, but its shape has an additional
dimension of size 1 added.
Raises:
ValueError: if both `dim` and `axis` are specified.
"""
# TODO(aselle): Remove argument dim
if dim is not None:
if axis is not None:
raise ValueError("can't specify both 'dim' and 'axis'")
axis = dim
return gen_array_ops._expand_dims(input, axis, name)
# pylint: enable=redefined-builtin,protected-access
# Aliases for some automatically-generated names.
# pylint: disable=protected-access
@deprecation.deprecated(
"2016-11-30",
"This op will be removed after the deprecation date. "
"Please switch to tf.setdiff1d().")
def listdiff(x, y, out_idx=None, name=None):
return gen_array_ops._list_diff(x, y, out_idx, name)
listdiff.__doc__ = gen_array_ops._list_diff.__doc__ + "\n" + listdiff.__doc__
# pylint: enable=protected-access
# pylint: disable=undefined-variable,protected-access
def setdiff1d(x, y, index_dtype=dtypes.int32, name=None):
return gen_array_ops._list_diff(x, y, index_dtype, name)
setdiff1d.__doc__ = gen_array_ops._list_diff.__doc__
# pylint: enable=protected-access
def broadcast_dynamic_shape(shape_x, shape_y):
# pylint: disable=protected-access
"""Returns the broadcasted dynamic shape between `shape_x` and `shape_y`.
Args:
shape_x: A rank 1 integer `Tensor`, representing the shape of x.
shape_y: A rank 1 integer `Tensor`, representing the shape of y.
Returns:
A rank 1 integer `Tensor` representing the broadcasted shape.
"""
return gen_array_ops._broadcast_args(shape_x, shape_y)
# pylint: enable=protected-access
def broadcast_static_shape(shape_x, shape_y):
"""Returns the broadcasted static shape between `shape_x` and `shape_y`.
Args:
shape_x: A `TensorShape`
shape_y: A `TensorShape`
Returns:
A `TensorShape` representing the broadcasted shape.
Raises:
ValueError: If the two shapes can not be broadcasted.
"""
return common_shapes.broadcast_shape(shape_x, shape_y)
def shape(input, name=None, out_type=dtypes.int32):
# pylint: disable=redefined-builtin
"""Returns the shape of a tensor.
This operation returns a 1-D integer tensor representing the shape of `input`.
For example:
```python
t = tf.constant([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]])
tf.shape(t) # [2, 2, 3]
```
Args:
input: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
out_type: (Optional) The specified output type of the operation
(`int32` or `int64`). Defaults to `tf.int32`.
Returns:
A `Tensor` of type `out_type`.
"""
return shape_internal(input, name, optimize=True, out_type=out_type)
def shape_internal(input, name=None, optimize=True, out_type=dtypes.int32):
# pylint: disable=redefined-builtin
"""Returns the shape of a tensor.
Args:
input: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
optimize: if true, encode the shape as a constant when possible.
out_type: (Optional) The specified output type of the operation
(`int32` or `int64`). Defaults to tf.int32.
Returns:
A `Tensor` of type `out_type`.
"""
with ops.name_scope(name, "Shape", [input]) as name:
if isinstance(input, (sparse_tensor.SparseTensor,
sparse_tensor.SparseTensorValue)):
return gen_math_ops.cast(input.dense_shape, out_type)
else:
input_tensor = ops.convert_to_tensor(input)
input_shape = input_tensor.get_shape()
if optimize and input_shape.is_fully_defined():
return constant(input_shape.as_list(), out_type, name=name)
return gen_array_ops.shape(input, name=name, out_type=out_type)
def size(input, name=None, out_type=dtypes.int32):
# pylint: disable=redefined-builtin
"""Returns the size of a tensor.
This operation returns an integer representing the number of elements in
`input`.
For example:
```python
t = tf.constant([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]])
tf.size(t) # 12
```
Args:
input: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
out_type: (Optional) The specified output type of the operation
(`int32` or `int64`). Defaults to tf.int32.
Returns:
A `Tensor` of type `out_type`. Defaults to tf.int32.
"""
return size_internal(input, name, optimize=True, out_type=out_type)
def size_internal(input, name=None, optimize=True, out_type=dtypes.int32):
# pylint: disable=redefined-builtin,protected-access
"""Returns the size of a tensor.
Args:
input: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
optimize: if true, encode the size as a constant when possible.
out_type: (Optional) The specified output type of the operation
(`int32` or `int64`). Defaults to tf.int32.
Returns:
A `Tensor` of type `out_type`.
"""
with ops.name_scope(name, "Size", [input]) as name:
if isinstance(input, (sparse_tensor.SparseTensor,
sparse_tensor.SparseTensorValue)):
return gen_math_ops._prod(
gen_math_ops.cast(input.dense_shape, out_type), 0, name=name)
else:
input_tensor = ops.convert_to_tensor(input)
input_shape = input_tensor.get_shape()
if optimize and input_shape.is_fully_defined():
return constant(input_shape.num_elements(), out_type, name=name)
return gen_array_ops.size(input, name=name, out_type=out_type)
def rank(input, name=None):
# pylint: disable=redefined-builtin
"""Returns the rank of a tensor.
Returns a 0-D `int32` `Tensor` representing the rank of `input`.
For example:
```python
# shape of tensor 't' is [2, 2, 3]
t = tf.constant([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]])
tf.rank(t) # 3
```
**Note**: The rank of a tensor is not the same as the rank of a matrix. The
rank of a tensor is the number of indices required to uniquely select each
element of the tensor. Rank is also known as "order", "degree", or "ndims."
Args:
input: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `int32`.
@compatibility(numpy)
Equivalent to np.ndim
@end_compatibility
"""
return rank_internal(input, name, optimize=True)
def rank_internal(input, name=None, optimize=True):
# pylint: disable=redefined-builtin
"""Returns the rank of a tensor.
Args:
input: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
optimize: if true, encode the rank as a constant when possible.
Returns:
A `Tensor` of type `int32`.
"""
with ops.name_scope(name, "Rank", [input]) as name:
if isinstance(input, (sparse_tensor.SparseTensor,
sparse_tensor.SparseTensorValue)):
return gen_array_ops.size(input.dense_shape, name=name)
else:
input_tensor = ops.convert_to_tensor(input)
input_shape = input_tensor.get_shape()
if optimize and input_shape.ndims is not None:
return constant(input_shape.ndims, dtypes.int32, name=name)
return gen_array_ops.rank(input, name=name)
def _one_like_dtype(other):
if isinstance(other, ops.Tensor):
return constant(1, other.dtype)
else:
return np.ones_like(other).dtype.type(1)
def _SliceHelper(tensor, slice_spec, var=None):
"""Overload for Tensor.__getitem__.
This operation extracts the specified region from the tensor.
The notation is similar to NumPy with the restriction that
currently only support basic indexing. That means that
using a tensor as input is not currently allowed
Some useful examples:
```python
# strip leading and trailing 2 elements
foo = tf.constant([1,2,3,4,5,6])
print(foo[2:-2].eval()) # [3,4]
# skip every row and reverse every column
foo = tf.constant([[1,2,3], [4,5,6], [7,8,9]])
print(foo[::2,::-1].eval()) # [[3,2,1], [9,8,7]]
# Insert another dimension
foo = tf.constant([[1,2,3], [4,5,6], [7,8,9]])
print(foo[tf.newaxis, :, :].eval()) # => [[[1,2,3], [4,5,6], [7,8,9]]]
print(foo[:, tf.newaxis, :].eval()) # => [[[1,2,3]], [[4,5,6]], [[7,8,9]]]
print(foo[:, :, tf.newaxis].eval()) # => [[[1],[2],[3]], [[4],[5],[6]],
[[7],[8],[9]]]
# Ellipses (3 equivalent operations)
foo = tf.constant([[1,2,3], [4,5,6], [7,8,9]])
print(foo[tf.newaxis, :, :].eval()) # [[[1,2,3], [4,5,6], [7,8,9]]]
print(foo[tf.newaxis, ...].eval()) # [[[1,2,3], [4,5,6], [7,8,9]]]
print(foo[tf.newaxis].eval()) # [[[1,2,3], [4,5,6], [7,8,9]]]
```
Notes:
- `tf.newaxis` is `None` as in NumPy.
- An implicit ellipsis is placed at the end of the `slice_spec`
- NumPy advanced indexing is currently not supported.
Args:
tensor: An ops.Tensor object.
slice_spec: The arguments to Tensor.__getitem__.
var: In the case of variable slice assignment, the Variable
object to slice (i.e. tensor is the read-only view of this
variable).
Returns:
The appropriate slice of "tensor", based on "slice_spec".
Raises:
ValueError: If a slice range is negative size.
TypeError: If the slice indices aren't int, slice, or Ellipsis.
"""
if not isinstance(slice_spec, (list, tuple)):
slice_spec = [slice_spec]
begin, end, strides = [], [], []
index = 0
new_axis_mask, shrink_axis_mask = 0, 0
begin_mask, end_mask = 0, 0
ellipsis_mask = 0
for s in slice_spec:
if isinstance(s, _baseslice):
# python doesn't always use None when constructing ranges
# for example a[:] gives slice(None,sys.maxsize,None)
# whereas a[::1] gives slice(None,None,None)
if s.start is not None and s.start is not sys.maxsize:
begin.append(s.start)
else:
begin.append(0)
begin_mask |= (1 << index)
if s.stop is not None and s.stop != sys.maxsize:
end.append(s.stop)
else:
end.append(0)
end_mask |= (1 << index)
if s.step is not None:
strides.append(s.step)
else:
# Use a 1 of the same dtype as begin.
strides.append(_one_like_dtype(begin[-1]))
elif s is Ellipsis:
begin.append(0)
end.append(0)
strides.append(1)
ellipsis_mask |= (1 << index)
elif s is newaxis:
begin.append(0)
end.append(0)
strides.append(1)
new_axis_mask |= (1 << index)
else:
begin.append(s)
end.append(s + 1)
strides.append(_one_like_dtype(s))
shrink_axis_mask |= (1 << index)
index += 1
# stack possibly involves no tensors, so we must use op_scope correct graph.
with ops.name_scope(None, "strided_slice",
[tensor] + begin + end + strides) as name:
if begin:
packed_begin, packed_end, packed_strides = (stack(begin), stack(end),
stack(strides))
else:
var_empty = constant([], dtype=dtypes.int32)
packed_begin = packed_end = packed_strides = var_empty
return strided_slice(
tensor,
packed_begin,
packed_end,
packed_strides,
begin_mask=begin_mask,
end_mask=end_mask,
shrink_axis_mask=shrink_axis_mask,
new_axis_mask=new_axis_mask,
ellipsis_mask=ellipsis_mask,
var=var,
name=name)
# pylint: disable=undefined-variable,protected-access
def slice(input_, begin, size, name=None):
# pylint: disable=redefined-builtin
"""Extracts a slice from a tensor.
This operation extracts a slice of size `size` from a tensor `input` starting
at the location specified by `begin`. The slice `size` is represented as a
tensor shape, where `size[i]` is the number of elements of the 'i'th dimension
of `input` that you want to slice. The starting location (`begin`) for the
slice is represented as an offset in each dimension of `input`. In other
words, `begin[i]` is the offset into the 'i'th dimension of `input` that you
want to slice from.
Note that @{tf.Tensor.__getitem__} is typically a more pythonic way to
perform slices, as it allows you to write `foo[3:7, :-2]` instead of
`tf.slice([3, 0], [4, foo.get_shape()[1]-2])`.
`begin` is zero-based; `size` is one-based. If `size[i]` is -1,
all remaining elements in dimension i are included in the
slice. In other words, this is equivalent to setting:
`size[i] = input.dim_size(i) - begin[i]`
This operation requires that:
`0 <= begin[i] <= begin[i] + size[i] <= Di for i in [0, n]`
For example:
```python
t = tf.constant([[[1, 1, 1], [2, 2, 2]],
[[3, 3, 3], [4, 4, 4]],
[[5, 5, 5], [6, 6, 6]]])
tf.slice(t, [1, 0, 0], [1, 1, 3]) # [[[3, 3, 3]]]
tf.slice(t, [1, 0, 0], [1, 2, 3]) # [[[3, 3, 3],
# [4, 4, 4]]]
tf.slice(t, [1, 0, 0], [2, 1, 3]) # [[[3, 3, 3]],
# [[5, 5, 5]]]
```
Args:
input_: A `Tensor`.
begin: An `int32` or `int64` `Tensor`.
size: An `int32` or `int64` `Tensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` the same type as `input`.
"""
return gen_array_ops._slice(input_, begin, size, name=name)
# pylint: disable=invalid-name
def strided_slice(input_,
begin,
end,
strides=None,
begin_mask=0,
end_mask=0,
ellipsis_mask=0,
new_axis_mask=0,
shrink_axis_mask=0,
var=None,
name=None):
"""Extracts a strided slice of a tensor (generalized python array indexing).
**Most users will want to use @{tf.Tensor.__getitem__} and
@{tf.Variable.__getitem__}.** That allows NumPy style slicing syntax (i.e.
`tensor[..., 3:4:-1, tf.newaxis, 3]`).
This op is the low-level interface that are used to implement operators.
Those interfaces are much more friendly, and highly recommended.
To a first order, this operation extracts a slice of size `end - begin`
from a tensor `input`
starting at the location specified by `begin`. The slice continues by adding
`stride` to the `begin` index until all dimensions are not less than `end`.
Note that components of stride can be negative, which causes a reverse
slice.
This operation can be thought of an encoding of a numpy style sliced
range. Given a python slice input[<spec0>, <spec1>, ..., <specn>]
this function will be called as follows.
`begin`, `end`, and `strides` will be all length n. n is in general
not the same dimensionality as `input`.
For the ith spec,
`begin_mask`, `end_mask`, `ellipsis_mask`, `new_axis_mask`,
and `shrink_axis_mask` will have the ith bit corresponding to
the ith spec.
If the ith bit of `begin_mask` is non-zero, `begin[i]` is ignored and
the fullest possible range in that dimension is used instead.
`end_mask` works analogously, except with the end range.
`foo[5:,:,:3]` on a 7x8x9 tensor is equivalent to `foo[5:7,0:8,0:3]`.
`foo[::-1]` reverses a tensor with shape 8.
If the ith bit of `ellipsis_mask` is non-zero, as many unspecified dimensions
as needed will be inserted between other dimensions. Only one
non-zero bit is allowed in `ellipsis_mask`.
For example `foo[3:5,...,4:5]` on a shape 10x3x3x10 tensor is
equivalent to `foo[3:5,:,:,4:5]` and
`foo[3:5,...]` is equivalent to `foo[3:5,:,:,:]`.
If the ith bit of `new_axis_mask` is one, then `begin`,
`end`, and `stride` are ignored and a new length 1 dimension is
added at this point in the output tensor.
For example `foo[3:5,4]` on a 10x8 tensor produces a shape 2 tensor
whereas `foo[3:5,4:5]` produces a shape 2x1 tensor with shrink_mask
being 1<<1 == 2.
If the ith bit of `shrink_axis_mask` is one, then `begin`,
`end[i]`, and `stride[i]` are used to do a slice in the appropriate
dimension, but the output tensor will be reduced in dimensionality
by one. This is only valid if the ith entry of slice[i]==1.
NOTE: `begin` and `end` are zero-indexed`.
`strides` entries must be non-zero.
```python
t = tf.constant([[[1, 1, 1], [2, 2, 2]],
[[3, 3, 3], [4, 4, 4]],
[[5, 5, 5], [6, 6, 6]]])
tf.strided_slice(t, [1, 0, 0], [2, 1, 3], [1, 1, 1]) # [[[3, 3, 3]]]
tf.strided_slice(t, [1, 0, 0], [2, 2, 3], [1, 1, 1]) # [[[3, 3, 3],
# [4, 4, 4]]]
tf.strided_slice(t, [1, -1, 0], [2, -3, 3], [1, -1, 1]) # [[[4, 4, 4],
# [3, 3, 3]]]
```
Args:
input_: A `Tensor`.
begin: An `int32` or `int64` `Tensor`.
end: An `int32` or `int64` `Tensor`.
strides: An `int32` or `int64` `Tensor`.
begin_mask: An `int32` mask.
end_mask: An `int32` mask.
ellipsis_mask: An `int32` mask.
new_axis_mask: An `int32` mask.
shrink_axis_mask: An `int32` mask.
var: The variable corresponding to `input_` or None
name: A name for the operation (optional).
Returns:
A `Tensor` the same type as `input`.
"""
if strides is None:
strides = ones_like(begin)
op = gen_array_ops.strided_slice(
input=input_,
begin=begin,
end=end,
strides=strides,
name=name,
begin_mask=begin_mask,
end_mask=end_mask,
ellipsis_mask=ellipsis_mask,
new_axis_mask=new_axis_mask,
shrink_axis_mask=shrink_axis_mask)
parent_name = name
def assign(val, name=None):
"""Closure that holds all the arguments to create an assignment."""
if var is None:
raise ValueError("Sliced assignment is only supported for variables")
if name is None:
name = parent_name + "_assign"
return var._strided_slice_assign(
begin=begin,
end=end,
strides=strides,
value=val,
name=name,
begin_mask=begin_mask,
end_mask=end_mask,
ellipsis_mask=ellipsis_mask,
new_axis_mask=new_axis_mask,
shrink_axis_mask=shrink_axis_mask)
if context.in_graph_mode():
# TODO(apassos) In eager mode assignment will be done by overriding
# __setitem__ instead.
op.assign = assign
return op
def _SliceHelperVar(var, slice_spec):
"""Creates a slice helper object given a variable.
This allows creating a sub-tensor from part of the current contents
of a variable. See ${tf.Tensor$`Tensor.__getitem__`}
for detailed examples of slicing.
This function in addition also allows assignment to a sliced range.
This is similar to `__setitem__` functionality in Python. However,
the syntax is different so that the user can capture the assignment
operation for grouping or passing to `sess.run()`.
For example,
```python
import tensorflow as tf
A = tf.Variable([[1,2,3], [4,5,6], [7,8,9]], dtype=tf.float32)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
print(sess.run(A[:2, :2])) # => [[1,2], [4,5]]
op = A[:2,:2].assign(22. * tf.ones((2, 2)))
print(sess.run(op)) # => [[22, 22, 3], [22, 22, 6], [7,8,9]]
```
Note that assignments currently do not support NumPy broadcasting
semantics.
Args:
var: An `ops.Variable` object.
slice_spec: The arguments to `Tensor.__getitem__`.
Returns:
The appropriate slice of "tensor", based on "slice_spec".
As an operator. The operator also has a `assign()` method
that can be used to generate an assignment operator.
Raises:
ValueError: If a slice range is negative size.
TypeError: If the slice indices aren't int, slice, or Ellipsis.
"""
return _SliceHelper(var._AsTensor(), slice_spec, var)
ops.Tensor._override_operator("__getitem__", _SliceHelper)
def parallel_stack(values, name="parallel_stack"):
"""Stacks a list of rank-`R` tensors into one rank-`(R+1)` tensor in parallel.
Requires that the shape of inputs be known at graph construction time.
Packs the list of tensors in `values` into a tensor with rank one higher than
each tensor in `values`, by packing them along the first dimension.
Given a list of length `N` of tensors of shape `(A, B, C)`; the `output`
tensor will have the shape `(N, A, B, C)`.
For example:
```python
x = tf.constant([1, 4])
y = tf.constant([2, 5])
z = tf.constant([3, 6])
tf.parallel_stack([x, y, z]) # [[1, 4], [2, 5], [3, 6]]
```
The difference between `stack` and `parallel_stack` is that `stack` requires
all the inputs be computed before the operation will begin but doesn't require
that the input shapes be known during graph construction.
`parallel_stack` will copy pieces of the input into the output as they become
available, in some situations this can provide a performance benefit.
Unlike `stack`, `parallel_stack` does NOT support backpropagation.
This is the opposite of unstack. The numpy equivalent is
tf.parallel_stack([x, y, z]) = np.asarray([x, y, z])
Args:
values: A list of `Tensor` objects with the same shape and type.
name: A name for this operation (optional).
Returns:
output: A stacked `Tensor` with the same type as `values`.
"""
with ops.name_scope(name):
value_t = ops.convert_to_tensor(values[0])
value_shape = ops.convert_to_tensor(value_t).get_shape()
output_shape = tensor_shape.TensorShape([len(values)])
output_shape = output_shape.concatenate(value_shape)
# expand_dims converts concat to stack.
return gen_array_ops._parallel_concat(
[expand_dims(value, 0) for value in values], shape=output_shape)
def stack(values, axis=0, name="stack"):
"""Stacks a list of rank-`R` tensors into one rank-`(R+1)` tensor.
Packs the list of tensors in `values` into a tensor with rank one higher than
each tensor in `values`, by packing them along the `axis` dimension.
Given a list of length `N` of tensors of shape `(A, B, C)`;
if `axis == 0` then the `output` tensor will have the shape `(N, A, B, C)`.
if `axis == 1` then the `output` tensor will have the shape `(A, N, B, C)`.
Etc.
For example:
```python
x = tf.constant([1, 4])
y = tf.constant([2, 5])
z = tf.constant([3, 6])
tf.stack([x, y, z]) # [[1, 4], [2, 5], [3, 6]] (Pack along first dim.)
tf.stack([x, y, z], axis=1) # [[1, 2, 3], [4, 5, 6]]
```
This is the opposite of unstack. The numpy equivalent is
```python
tf.stack([x, y, z]) = np.asarray([x, y, z])
```
Args:
values: A list of `Tensor` objects with the same shape and type.
axis: An `int`. The axis to stack along. Defaults to the first dimension.
Negative values wrap around, so the valid range is `[-(R+1), R+1)`.
name: A name for this operation (optional).
Returns:
output: A stacked `Tensor` with the same type as `values`.
Raises:
ValueError: If `axis` is out of the range [-(R+1), R+1).
"""
if axis == 0:
try:
# If the input is a constant list, it can be converted to a constant op
return ops.convert_to_tensor(values, name=name)
except (TypeError, ValueError):
pass # Input list contains non-constant tensors
value_shape = ops.convert_to_tensor(values[0], name=name).get_shape()
if value_shape.ndims is not None:
expanded_num_dims = value_shape.ndims + 1
if axis < -expanded_num_dims or axis >= expanded_num_dims:
raise ValueError("axis = %d not in [%d, %d)" % (axis, -expanded_num_dims,
expanded_num_dims))
return gen_array_ops._pack(values, axis=axis, name=name)
# pylint: disable=invalid-name
def _autopacking_helper(list_or_tuple, dtype, name):
"""Converts the given list or tuple to a tensor by packing.
Args:
list_or_tuple: A (possibly nested) list or tuple containing a tensor.
dtype: The element type of the returned tensor.
name: A name for the returned tensor.
Returns:
A `tf.Tensor` with value equivalent to `list_or_tuple`.
"""
must_pack = False
converted_elems = []
with ops.name_scope(name) as scope:
for i, elem in enumerate(list_or_tuple):
if ops.is_dense_tensor_like(elem):
if dtype is not None and elem.dtype.base_dtype != dtype:
raise TypeError("Cannot convert a list containing a tensor of dtype "
"%s to %s (Tensor is: %r)" % (elem.dtype, dtype,
elem))
converted_elems.append(elem)
must_pack = True
elif isinstance(elem, (list, tuple)):
converted_elem = _autopacking_helper(elem, dtype, str(i))
if ops.is_dense_tensor_like(converted_elem):
must_pack = True
converted_elems.append(converted_elem)
else:
converted_elems.append(elem)
if must_pack:
elems_as_tensors = []
for i, elem in enumerate(converted_elems):
if ops.is_dense_tensor_like(elem):
elems_as_tensors.append(elem)
else:
# NOTE(mrry): This is inefficient, but it enables us to
# handle the case where the list arguments are other
# convertible-to-tensor types, such as numpy arrays.
elems_as_tensors.append(
constant_op.constant(elem, dtype=dtype, name=str(i)))
return gen_array_ops._pack(elems_as_tensors, name=scope)
else:
return converted_elems
def _get_dtype_from_nested_lists(list_or_tuple):
"""Returns the dtype of any tensor-like object in `list_or_tuple`, if found.
Args:
list_or_tuple: A list or tuple representing an object that can be
converted to a `tf.Tensor`.
Returns:
The dtype of any tensor-like object in `list_or_tuple`, or `None` if no
such object exists.
"""
for elem in list_or_tuple:
if ops.is_dense_tensor_like(elem):
return elem.dtype.base_dtype
elif isinstance(elem, (list, tuple)):
maybe_dtype = _get_dtype_from_nested_lists(elem)
if maybe_dtype is not None:
return maybe_dtype
return None
def _autopacking_conversion_function(v, dtype=None, name=None, as_ref=False):
"""Tensor conversion function that automatically packs arguments."""
if as_ref:
return NotImplemented
inferred_dtype = _get_dtype_from_nested_lists(v)
if inferred_dtype is None:
# We did not find any tensor-like objects in the nested lists, so defer to
# other conversion functions.
return NotImplemented
if dtype is not None and dtype != inferred_dtype:
return NotImplemented
return _autopacking_helper(v, inferred_dtype, name or "packed")
# pylint: enable=invalid-name
# NOTE: Register this conversion function to run *before* one that
# assumes every element is a value.
ops.register_tensor_conversion_function((list, tuple),
_autopacking_conversion_function, 99)
def unstack(value, num=None, axis=0, name="unstack"):
"""Unpacks the given dimension of a rank-`R` tensor into rank-`(R-1)` tensors.
Unpacks `num` tensors from `value` by chipping it along the `axis` dimension.
If `num` is not specified (the default), it is inferred from `value`'s shape.
If `value.shape[axis]` is not known, `ValueError` is raised.
For example, given a tensor of shape `(A, B, C, D)`;
If `axis == 0` then the i'th tensor in `output` is the slice
`value[i, :, :, :]` and each tensor in `output` will have shape `(B, C, D)`.
(Note that the dimension unpacked along is gone, unlike `split`).
If `axis == 1` then the i'th tensor in `output` is the slice
`value[:, i, :, :]` and each tensor in `output` will have shape `(A, C, D)`.
Etc.
This is the opposite of stack. The numpy equivalent is
tf.unstack(x, n) = list(x)
Args:
value: A rank `R > 0` `Tensor` to be unstacked.
num: An `int`. The length of the dimension `axis`. Automatically inferred
if `None` (the default).
axis: An `int`. The axis to unstack along. Defaults to the first
dimension. Negative values wrap around, so the valid range is `[-R, R)`.
name: A name for the operation (optional).
Returns:
The list of `Tensor` objects unstacked from `value`.
Raises:
ValueError: If `num` is unspecified and cannot be inferred.
ValueError: If `axis` is out of the range [-R, R).
"""
if num is None:
value = ops.convert_to_tensor(value)
value_shape = value.get_shape()
if value_shape.ndims is not None:
if axis < -value_shape.ndims or axis >= value_shape.ndims:
raise ValueError("axis = %d not in [%d, %d)" %
(axis, -value_shape.ndims, value_shape.ndims))
num = value_shape[axis].value
if num is None:
raise ValueError("Cannot infer num from shape %s" % value_shape)
return gen_array_ops._unpack(value, num=num, axis=axis, name=name)
def concat(values, axis, name="concat"):
"""Concatenates tensors along one dimension.
Concatenates the list of tensors `values` along dimension `axis`. If
`values[i].shape = [D0, D1, ... Daxis(i), ...Dn]`, the concatenated
result has shape
[D0, D1, ... Raxis, ...Dn]
where
Raxis = sum(Daxis(i))
That is, the data from the input tensors is joined along the `axis`
dimension.
The number of dimensions of the input tensors must match, and all dimensions
except `axis` must be equal.
For example:
```python
t1 = [[1, 2, 3], [4, 5, 6]]
t2 = [[7, 8, 9], [10, 11, 12]]
tf.concat([t1, t2], 0) # [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]
tf.concat([t1, t2], 1) # [[1, 2, 3, 7, 8, 9], [4, 5, 6, 10, 11, 12]]
# tensor t3 with shape [2, 3]
# tensor t4 with shape [2, 3]
tf.shape(tf.concat([t3, t4], 0)) # [4, 3]
tf.shape(tf.concat([t3, t4], 1)) # [2, 6]
```
Note: If you are concatenating along a new axis consider using stack.
E.g.
```python
tf.concat([tf.expand_dims(t, axis) for t in tensors], axis)
```
can be rewritten as
```python
tf.stack(tensors, axis=axis)
```
Args:
values: A list of `Tensor` objects or a single `Tensor`.
axis: 0-D `int32` `Tensor`. Dimension along which to concatenate. Must be
in the range `[-rank(values), rank(values))`.
name: A name for the operation (optional).
Returns:
A `Tensor` resulting from concatenation of the input tensors.
"""
if not isinstance(values, (list, tuple)):
values = [values]
# TODO(mrry): Change to return values?
if len(values) == 1: # Degenerate case of one tensor.
# Make a throwaway call to convert_to_tensor to make sure
# that axis is of the correct type, and make sure that
# the returned tensor is a scalar.
# TODO(keveman): Implement a standalone type and shape checker.
with ops.name_scope(name) as scope:
ops.convert_to_tensor(
axis, name="concat_dim",
dtype=dtypes.int32).get_shape().assert_is_compatible_with(
tensor_shape.scalar())
return identity(values[0], name=scope)
return gen_array_ops._concat_v2(values=values, axis=axis, name=name)
def boolean_mask(tensor, mask, name="boolean_mask"):
"""Apply boolean mask to tensor. Numpy equivalent is `tensor[mask]`.
```python
# 1-D example
tensor = [0, 1, 2, 3]
mask = np.array([True, False, True, False])
boolean_mask(tensor, mask) # [0, 2]
```
In general, `0 < dim(mask) = K <= dim(tensor)`, and `mask`'s shape must match
the first K dimensions of `tensor`'s shape. We then have:
`boolean_mask(tensor, mask)[i, j1,...,jd] = tensor[i1,...,iK,j1,...,jd]`
where `(i1,...,iK)` is the ith `True` entry of `mask` (row-major order).
Args:
tensor: N-D tensor.
mask: K-D boolean tensor, K <= N and K must be known statically.
name: A name for this operation (optional).
Returns:
(N-K+1)-dimensional tensor populated by entries in `tensor` corresponding
to `True` values in `mask`.
Raises:
ValueError: If shapes do not conform.
Examples:
```python
# 2-D example
tensor = [[1, 2], [3, 4], [5, 6]]
mask = np.array([True, False, True])
boolean_mask(tensor, mask) # [[1, 2], [5, 6]]
```
"""
def _apply_mask_1d(reshaped_tensor, mask):
"""Mask tensor along dimension 0 with a 1-D mask."""
indices = squeeze(where(mask), squeeze_dims=[1])
return gather(reshaped_tensor, indices)
with ops.name_scope(name, values=[tensor, mask]):
tensor = ops.convert_to_tensor(tensor, name="tensor")
mask = ops.convert_to_tensor(mask, name="mask")
shape_mask = mask.get_shape()
ndims_mask = shape_mask.ndims
shape_tensor = tensor.get_shape()
if ndims_mask == 0:
raise ValueError("mask cannot be scalar.")
if ndims_mask is None:
raise ValueError(
"Number of mask dimensions must be specified, even if some dimensions"
" are None. E.g. shape=[None] is ok, but shape=None is not.")
shape_tensor[:ndims_mask].assert_is_compatible_with(shape_mask)
leading_size = gen_math_ops._prod(shape(tensor)[:ndims_mask], [0])
tensor = reshape(tensor,
concat([[leading_size],
shape(tensor)[ndims_mask:]], 0))
first_dim = shape_tensor[:ndims_mask].num_elements()
tensor.set_shape(
tensor_shape.as_shape([first_dim])
.concatenate(shape_tensor[ndims_mask:]))
mask = reshape(mask, [-1])
return _apply_mask_1d(tensor, mask)
def sparse_mask(a, mask_indices, name=None):
"""Masks elements of `IndexedSlices`.
Given an `IndexedSlices` instance `a`, returns another `IndexedSlices` that
contains a subset of the slices of `a`. Only the slices at indices not
specified in `mask_indices` are returned.
This is useful when you need to extract a subset of slices in an
`IndexedSlices` object.
For example:
```python
# `a` contains slices at indices [12, 26, 37, 45] from a large tensor
# with shape [1000, 10]
a.indices # [12, 26, 37, 45]
tf.shape(a.values) # [4, 10]
# `b` will be the subset of `a` slices at its second and third indices, so
# we want to mask its first and last indices (which are at absolute
# indices 12, 45)
b = tf.sparse_mask(a, [12, 45])
b.indices # [26, 37]
tf.shape(b.values) # [2, 10]
```
Args:
a: An `IndexedSlices` instance.
mask_indices: Indices of elements to mask.
name: A name for the operation (optional).
Returns:
The masked `IndexedSlices` instance.
"""
with ops.name_scope(name, "sparse_mask", [a, mask_indices]) as name:
indices = a.indices
out_indices, to_gather = setdiff1d(indices, mask_indices)
out_values = gather(a.values, to_gather, name=name)
return ops.IndexedSlices(out_values, out_indices, a.dense_shape)
def split(value, num_or_size_splits, axis=0, num=None, name="split"):
"""Splits a tensor into sub tensors.
If `num_or_size_splits` is an integer type, `num_split`, then splits `value`
along dimension `axis` into `num_split` smaller tensors.
Requires that `num_split` evenly divides `value.shape[axis]`.
If `num_or_size_splits` is not an integer type, it is presumed to be a Tensor
`size_splits`, then splits `value` into `len(size_splits)` pieces. The shape
of the `i`-th piece has the same size as the `value` except along dimension
`axis` where the size is `size_splits[i]`.
For example:
```python
# 'value' is a tensor with shape [5, 30]
# Split 'value' into 3 tensors with sizes [4, 15, 11] along dimension 1
split0, split1, split2 = tf.split(value, [4, 15, 11], 1)
tf.shape(split0) # [5, 4]
tf.shape(split1) # [5, 15]
tf.shape(split2) # [5, 11]
# Split 'value' into 3 tensors along dimension 1
split0, split1, split2 = tf.split(value, num_or_size_splits=3, axis=1)
tf.shape(split0) # [5, 10]
```
Args:
value: The `Tensor` to split.
num_or_size_splits: Either a 0-D integer `Tensor` indicating the number of
splits along split_dim or a 1-D integer `Tensor` integer tensor containing
the sizes of each output tensor along split_dim. If a scalar then it must
evenly divide `value.shape[axis]`; otherwise the sum of sizes along the
split dimension must match that of the `value`.
axis: A 0-D `int32` `Tensor`. The dimension along which to split.
Must be in the range `[-rank(value), rank(value))`. Defaults to 0.
num: Optional, used to specify the number of outputs when it cannot be
inferred from the shape of `size_splits`.
name: A name for the operation (optional).
Returns:
if `num_or_size_splits` is a scalar returns `num_or_size_splits` `Tensor`
objects; if `num_or_size_splits` is a 1-D Tensor returns
`num_or_size_splits.get_shape[0]` `Tensor` objects resulting from splitting
`value`.
Raises:
ValueError: If `num` is unspecified and cannot be inferred.
"""
size_splits = ops.convert_to_tensor(num_or_size_splits)
if size_splits.get_shape().ndims == 0 and size_splits.dtype.is_integer:
return gen_array_ops._split(
split_dim=axis, num_split=num_or_size_splits, value=value, name=name)
else:
if num is None:
size_splits_shape = size_splits.get_shape()
num = size_splits_shape.dims[0]
if num._value is None:
raise ValueError("Cannot infer num from shape %s" % num_or_size_splits)
return gen_array_ops._split_v(
value=value,
size_splits=size_splits,
split_dim=axis,
num_split=num,
name=name)
def transpose(a, perm=None, name="transpose"):
"""Transposes `a`. Permutes the dimensions according to `perm`.
The returned tensor's dimension i will correspond to the input dimension
`perm[i]`. If `perm` is not given, it is set to (n-1...0), where n is
the rank of the input tensor. Hence by default, this operation performs a
regular matrix transpose on 2-D input Tensors.
For example:
```python
x = tf.constant([[1, 2, 3], [4, 5, 6]])
tf.transpose(x) # [[1, 4]
# [2, 5]
# [3, 6]]
# Equivalently
tf.transpose(x, perm=[1, 0]) # [[1, 4]
# [2, 5]
# [3, 6]]
# 'perm' is more useful for n-dimensional tensors, for n > 2
x = tf.constant([[[ 1, 2, 3],
[ 4, 5, 6]],
[[ 7, 8, 9],
[10, 11, 12]]])
# Take the transpose of the matrices in dimension-0
tf.transpose(x, perm=[0, 2, 1]) # [[[1, 4],
# [2, 5],
# [3, 6]],
# [[7, 10],
# [8, 11],
# [9, 12]]]
```
Args:
a: A `Tensor`.
perm: A permutation of the dimensions of `a`.
name: A name for the operation (optional).
Returns:
A transposed `Tensor`.
"""
with ops.name_scope(name, "transpose", [a]) as name:
if perm is None:
rank = gen_array_ops.rank(a)
perm = (rank - 1) - gen_math_ops._range(0, rank, 1)
ret = gen_array_ops.transpose(a, perm, name=name)
# NOTE(mrry): Setting the shape explicitly because
# reverse is not handled by the shape function.
if context.in_graph_mode():
input_shape = ret.op.inputs[0].get_shape().dims
if input_shape is not None:
ret.set_shape(input_shape[::-1])
else:
ret = gen_array_ops.transpose(a, perm, name=name)
return ret
# pylint: disable=invalid-name
def matrix_transpose(a, name="matrix_transpose"):
"""Transposes last two dimensions of tensor `a`.
For example:
```python
x = tf.constant([[1, 2, 3], [4, 5, 6]])
tf.matrix_transpose(x) # [[1, 4],
# [2, 5],
# [3, 6]]
# Matrix with two batch dimensions.
# x.shape is [1, 2, 3, 4]
# tf.matrix_transpose(x) is shape [1, 2, 4, 3]
```
Note that `tf.matmul` provides kwargs allowing for transpose of arguments.
This is done with minimal cost, and is preferable to using this function. E.g.
```python
# Good! Transpose is taken at minimal additional cost.
tf.matmul(matrix, b, transpose_b=True)
# Inefficient!
tf.matmul(matrix, tf.matrix_transpose(b))
```
Args:
a: A `Tensor` with `rank >= 2`.
name: A name for the operation (optional).
Returns:
A transposed batch matrix `Tensor`.
Raises:
ValueError: If `a` is determined statically to have `rank < 2`.
"""
with ops.name_scope(name, values=[a]):
a = ops.convert_to_tensor(a, name="a")
# If we know the number of dimensions (statically), we can do two things:
# 1. Check that `a` is a (batch) matrix.
# 2. Use a python list for perm. This preserves static shape information
# and avoids extra computations.
a_shape = a.get_shape()
ndims = a_shape.ndims
if ndims is not None:
if ndims < 2:
raise ValueError(
"Argument 'a' should be a (batch) matrix, with rank >= 2. Found: "
"%s" % a_shape)
perm = list(range(ndims - 2)) + [ndims - 1] + [ndims - 2]
else:
a_rank = rank(a)
perm = concat((gen_math_ops._range(0, a_rank - 2, 1),
[a_rank - 1, a_rank - 2]), 0)
return transpose(a, perm=perm)
# pylint: enable=invalid-name
def zeros(shape, dtype=dtypes.float32, name=None):
"""Creates a tensor with all elements set to zero.
This operation returns a tensor of type `dtype` with shape `shape` and
all elements set to zero.
For example:
```python
tf.zeros([3, 4], tf.int32) # [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]
```
Args:
shape: A list of integers, a tuple of integers, or a 1-D `Tensor` of type
`int32`.
dtype: The type of an element in the resulting `Tensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` with all elements set to zero.
"""
dtype = dtypes.as_dtype(dtype).base_dtype
with ops.name_scope(name, "zeros", [shape]) as name:
if dtype == dtypes.bool:
zero = False
elif dtype == dtypes.string:
zero = ""
else:
zero = 0
try:
shape = tensor_shape.as_shape(shape)
output = constant(zero, shape=shape, dtype=dtype, name=name)
except (TypeError, ValueError):
shape = ops.convert_to_tensor(shape, dtype=dtypes.int32, name="shape")
output = fill(shape, constant(zero, dtype=dtype), name=name)
assert output.dtype.base_dtype == dtype
return output
def zeros_like(tensor, dtype=None, name=None, optimize=True):
"""Creates a tensor with all elements set to zero.
Given a single tensor (`tensor`), this operation returns a tensor of the
same type and shape as `tensor` with all elements set to zero. Optionally,
you can use `dtype` to specify a new type for the returned tensor.
For example:
```python
tensor = tf.constant([[1, 2, 3], [4, 5, 6]])
tf.zeros_like(tensor) # [[0, 0, 0], [0, 0, 0]]
```
Args:
tensor: A `Tensor`.
dtype: A type for the returned `Tensor`. Must be `float32`, `float64`,
`int8`, `int16`, `int32`, `int64`, `uint8`, `complex64`, or `complex128`.
name: A name for the operation (optional).
optimize: if true, attempt to statically determine the shape of 'tensor'
and encode it as a constant.
Returns:
A `Tensor` with all elements set to zero.
"""
with ops.name_scope(name, "zeros_like", [tensor]) as name:
tensor = ops.convert_to_tensor(tensor, name="tensor")
# For now, variant types must be created via zeros_like; as we need to
# pass the input variant object to the proper zeros callback.
if tensor.shape.is_fully_defined() and tensor.dtype != dtypes.variant:
# We can produce a zeros tensor independent of the value of 'tensor',
# since the shape is known statically.
return zeros(tensor.shape, dtype=dtype or tensor.dtype, name=name)
if dtype is not None and dtype != tensor.dtype and dtype != dtypes.variant:
return zeros(
shape_internal(tensor, optimize=optimize), dtype=dtype, name=name)
else:
return gen_array_ops._zeros_like(tensor, name=name)
def ones_like(tensor, dtype=None, name=None, optimize=True):
"""Creates a tensor with all elements set to 1.
Given a single tensor (`tensor`), this operation returns a tensor of the same
type and shape as `tensor` with all elements set to 1. Optionally, you can
specify a new type (`dtype`) for the returned tensor.
For example:
```python
tensor = tf.constant([[1, 2, 3], [4, 5, 6]])
tf.ones_like(tensor) # [[1, 1, 1], [1, 1, 1]]
```
Args:
tensor: A `Tensor`.
dtype: A type for the returned `Tensor`. Must be `float32`, `float64`,
`int8`, `int16`, `int32`, `int64`, `uint8`, `complex64`, `complex128` or
`bool`.
name: A name for the operation (optional).
optimize: if true, attempt to statically determine the shape of 'tensor'
and encode it as a constant.
Returns:
A `Tensor` with all elements set to 1.
"""
with ops.name_scope(name, "ones_like", [tensor]) as name:
tensor = ops.convert_to_tensor(tensor, name="tensor")
ones_shape = shape_internal(tensor, optimize=optimize)
if dtype is None:
dtype = tensor.dtype
ret = ones(ones_shape, dtype=dtype, name=name)
if context.in_graph_mode():
ret.set_shape(tensor.get_shape())
return ret
def ones(shape, dtype=dtypes.float32, name=None):
"""Creates a tensor with all elements set to 1.
This operation returns a tensor of type `dtype` with shape `shape` and all
elements set to 1.
For example:
```python
tf.ones([2, 3], tf.int32) # [[1, 1, 1], [1, 1, 1]]
```
Args:
shape: A list of integers, a tuple of integers, or a 1-D `Tensor` of type
`int32`.
dtype: The type of an element in the resulting `Tensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` with all elements set to 1.
"""
dtype = dtypes.as_dtype(dtype).base_dtype
with ops.name_scope(name, "ones", [shape]) as name:
one = True if dtype == dtypes.bool else 1
try:
shape = tensor_shape.as_shape(shape)
output = constant(one, shape=shape, dtype=dtype, name=name)
except (TypeError, ValueError):
shape = ops.convert_to_tensor(shape, dtype=dtypes.int32, name="shape")
output = fill(shape, constant(one, dtype=dtype), name=name)
assert output.dtype.base_dtype == dtype
return output
def placeholder(dtype, shape=None, name=None):
"""Inserts a placeholder for a tensor that will be always fed.
**Important**: This tensor will produce an error if evaluated. Its value must
be fed using the `feed_dict` optional argument to `Session.run()`,
`Tensor.eval()`, or `Operation.run()`.
For example:
```python
x = tf.placeholder(tf.float32, shape=(1024, 1024))
y = tf.matmul(x, x)
with tf.Session() as sess:
print(sess.run(y)) # ERROR: will fail because x was not fed.
rand_array = np.random.rand(1024, 1024)
print(sess.run(y, feed_dict={x: rand_array})) # Will succeed.
```
Args:
dtype: The type of elements in the tensor to be fed.
shape: The shape of the tensor to be fed (optional). If the shape is not
specified, you can feed a tensor of any shape.
name: A name for the operation (optional).
Returns:
A `Tensor` that may be used as a handle for feeding a value, but not
evaluated directly.
"""
return gen_array_ops._placeholder(dtype=dtype, shape=shape, name=name)
# pylint: disable=redefined-outer-name
def _normalize_sparse_shape(shape, name):
"""Returns a tuple of (Tensor or None, rank or None)."""
if shape is None:
return (None, None)
rank = shape.get_shape()[0] if isinstance(shape, ops.Tensor) else len(shape)
if not isinstance(shape, ops.Tensor) and None in shape:
return (None, rank)
return (ops.convert_to_tensor(shape, dtype=dtypes.int64, name=name), rank)
def sparse_placeholder(dtype, shape=None, name=None):
"""Inserts a placeholder for a sparse tensor that will be always fed.
**Important**: This sparse tensor will produce an error if evaluated.
Its value must be fed using the `feed_dict` optional argument to
`Session.run()`, `Tensor.eval()`, or `Operation.run()`.
For example:
```python
x = tf.sparse_placeholder(tf.float32)
y = tf.sparse_reduce_sum(x)
with tf.Session() as sess:
print(sess.run(y)) # ERROR: will fail because x was not fed.
indices = np.array([[3, 2, 0], [4, 5, 1]], dtype=np.int64)
values = np.array([1.0, 2.0], dtype=np.float32)
shape = np.array([7, 9, 2], dtype=np.int64)
print(sess.run(y, feed_dict={
x: tf.SparseTensorValue(indices, values, shape)})) # Will succeed.
print(sess.run(y, feed_dict={
x: (indices, values, shape)})) # Will succeed.
sp = tf.SparseTensor(indices=indices, values=values, dense_shape=shape)
sp_value = sp.eval(session=sess)
print(sess.run(y, feed_dict={x: sp_value})) # Will succeed.
```
Args:
dtype: The type of `values` elements in the tensor to be fed.
shape: The shape of the tensor to be fed (optional). If the shape is not
specified, you can feed a sparse tensor of any shape.
name: A name for prefixing the operations (optional).
Returns:
A `SparseTensor` that may be used as a handle for feeding a value, but not
evaluated directly.
"""
shape_name = (name + "/shape") if name is not None else None
shape, rank = _normalize_sparse_shape(shape, shape_name)
if shape is None:
shape = placeholder(dtypes.int64, shape=[rank], name=shape_name)
return sparse_tensor.SparseTensor(
values=placeholder(
dtype,
shape=[None],
name=(name + "/values") if name is not None else None),
indices=placeholder(
dtypes.int64, shape=[None, rank],
name=(name + "/indices") if name is not None else None),
dense_shape=shape)
# pylint: enable=redefined-outer-name
def pad(tensor, paddings, mode="CONSTANT", name=None, constant_values=0): # pylint: disable=invalid-name
"""Pads a tensor.
This operation pads a `tensor` according to the `paddings` you specify.
`paddings` is an integer tensor with shape `[n, 2]`, where n is the rank of
`tensor`. For each dimension D of `input`, `paddings[D, 0]` indicates how
many values to add before the contents of `tensor` in that dimension, and
`paddings[D, 1]` indicates how many values to add after the contents of
`tensor` in that dimension. If `mode` is "REFLECT" then both `paddings[D, 0]`
and `paddings[D, 1]` must be no greater than `tensor.dim_size(D) - 1`. If
`mode` is "SYMMETRIC" then both `paddings[D, 0]` and `paddings[D, 1]` must be
no greater than `tensor.dim_size(D)`.
The padded size of each dimension D of the output is:
`paddings[D, 0] + tensor.dim_size(D) + paddings[D, 1]`
For example:
```python
t = tf.constant([[1, 2, 3], [4, 5, 6]])
paddings = tf.constant([[1, 1,], [2, 2]])
# 'constant_values' is 0.
# rank of 't' is 2.
tf.pad(t, paddings, "CONSTANT") # [[0, 0, 0, 0, 0, 0, 0],
# [0, 0, 1, 2, 3, 0, 0],
# [0, 0, 4, 5, 6, 0, 0],
# [0, 0, 0, 0, 0, 0, 0]]
tf.pad(t, paddings, "REFLECT") # [[6, 5, 4, 5, 6, 5, 4],
# [3, 2, 1, 2, 3, 2, 1],
# [6, 5, 4, 5, 6, 5, 4],
# [3, 2, 1, 2, 3, 2, 1]]
tf.pad(t, paddings, "SYMMETRIC") # [[2, 1, 1, 2, 3, 3, 2],
# [2, 1, 1, 2, 3, 3, 2],
# [5, 4, 4, 5, 6, 6, 5],
# [5, 4, 4, 5, 6, 6, 5]]
```
Args:
tensor: A `Tensor`.
paddings: A `Tensor` of type `int32`.
mode: One of "CONSTANT", "REFLECT", or "SYMMETRIC" (case-insensitive)
name: A name for the operation (optional).
constant_values: In "CONSTANT" mode, the scalar pad value to use. Must be
same type as `tensor`.
Returns:
A `Tensor`. Has the same type as `tensor`.
Raises:
ValueError: When mode is not one of "CONSTANT", "REFLECT", or "SYMMETRIC".
"""
# Convert lower/mixed case to upper for NumPy compatibility
# NumPy uses all lower-case modes.
mode = mode.upper()
if mode == "CONSTANT":
# TODO(rjryan): Once the forward compatibility period (3 weeks) have passed
# remove the "Pad" fallback here.
if constant_values != 0:
result = gen_array_ops._pad_v2(
tensor, paddings, constant_values, name=name)
else:
result = gen_array_ops._pad(tensor, paddings, name=name)
elif mode == "REFLECT":
result = gen_array_ops._mirror_pad(
tensor, paddings, mode="REFLECT", name=name)
elif mode == "SYMMETRIC":
result = gen_array_ops._mirror_pad(
tensor, paddings, mode="SYMMETRIC", name=name)
else:
raise ValueError("Unknown padding mode: %s" % mode)
# Restore shape information where possible.
if context.in_graph_mode():
paddings_constant = tensor_util.constant_value(
result.op.inputs[1], partial=True)
input_shape = result.op.inputs[0].shape
if (input_shape.ndims is not None and not result.shape.is_fully_defined()
and paddings_constant is not None):
new_shape = []
for padding, dim in zip(paddings_constant, input_shape.as_list()):
if padding is None or dim is None or not all(padding):
new_shape.append(None)
else:
new_shape.append(sum(padding) + dim)
result.set_shape(new_shape)
return result
def meshgrid(*args, **kwargs):
"""Broadcasts parameters for evaluation on an N-D grid.
Given N one-dimensional coordinate arrays `*args`, returns a list `outputs`
of N-D coordinate arrays for evaluating expressions on an N-D grid.
Notes:
`meshgrid` supports cartesian ('xy') and matrix ('ij') indexing conventions.
When the `indexing` argument is set to 'xy' (the default), the broadcasting
instructions for the first two dimensions are swapped.
Examples:
Calling `X, Y = meshgrid(x, y)` with the tensors
```python
x = [1, 2, 3]
y = [4, 5, 6]
X, Y = tf.meshgrid(x, y)
# X = [[1, 2, 3],
# [1, 2, 3],
# [1, 2, 3]]
# Y = [[4, 4, 4],
# [5, 5, 5],
# [6, 6, 6]]
```
Args:
*args: `Tensor`s with rank 1.
indexing: Either 'xy' or 'ij' (optional, default: 'xy').
name: A name for the operation (optional).
Returns:
outputs: A list of N `Tensor`s with rank N.
"""
indexing = kwargs.pop("indexing", "xy")
name = kwargs.pop("name", "meshgrid")
if kwargs:
key = list(kwargs.keys())[0]
raise TypeError("'{}' is an invalid keyword argument "
"for this function".format(key))
if indexing not in ("xy", "ij"):
raise ValueError("indexing parameter must be either 'xy' or 'ij'")
with ops.name_scope(name, "meshgrid", args) as name:
ndim = len(args)
s0 = (1,) * ndim
# Prepare reshape by inserting dimensions with size 1 where needed
output = []
for i, x in enumerate(args):
output.append(reshape(stack(x), (s0[:i] + (-1,) + s0[i + 1::])))
# Create parameters for broadcasting each tensor to the full size
shapes = [size(x) for x in args]
output_dtype = ops.convert_to_tensor(args[0]).dtype.base_dtype
if indexing == "xy" and ndim > 1:
output[0] = reshape(output[0], (1, -1) + (1,) * (ndim - 2))
output[1] = reshape(output[1], (-1, 1) + (1,) * (ndim - 2))
shapes[0], shapes[1] = shapes[1], shapes[0]
# TODO: improve performance with a broadcast
mult_fact = ones(shapes, output_dtype)
return [x * mult_fact for x in output]
NEW_AXIS = -1
SHRINK_AXIS = -2
# PEP-8 naming
# pylint: disable=invalid-name
def _compute_size_of_strided_dim(shrink, spec, size):
"""Computes the size of a single strided slice dimension."""
unknown = None # Document what None means here.
use_full_range = None # Document other use of None.
# if this is a shrink axis (i.e. a non-range index)
# it either will produce an error or return 1
if shrink:
return 1
if size is unknown or size.value is unknown:
return unknown
size = size.value
stride = spec.step
if stride is not unknown:
if stride == 0:
return unknown
stride = spec.step
valid_range = [0, size] if stride > 0 else [-1, size - 1]
# PEP-8 naming
# pylint: disable=invalid-name
def canonical(x, c):
if x is use_full_range:
return valid_range[c] if stride > 0 else valid_range[(c + 1) & 1]
else:
x_fwd = size + x if x < 0 else x # make negative indices positive
return max(valid_range[0], min(valid_range[1], x_fwd))
begin = canonical(spec.start, 0)
end = canonical(spec.stop, 1)
interval_length = end - begin
if interval_length == 0 or ((interval_length < 0) != (stride < 0)):
return 0
else:
remainder = 1 if interval_length % stride != 0 else 0
return interval_length // stride + remainder
else:
return unknown # unknown because stride is unknown
def _TileGradShape(op):
"""Shape function for the TileGrad op."""
multiples_shape = op.inputs[1].get_shape().with_rank(1)
input_shape = op.inputs[0].get_shape().with_rank(multiples_shape[0])
# NOTE(mrry): Represent `multiples` as a `TensorShape` because (i)
# it is a vector of non-negative integers, and (ii) doing so allows
# us to handle partially-known multiples.
multiples = tensor_util.constant_value_as_shape(op.inputs[1]).with_rank(
input_shape.ndims)
if multiples.ndims is None:
return [tensor_shape.unknown_shape()]
else:
output_dims = []
for dim, multiple in zip(input_shape.dims, multiples.dims):
output_dims.append(dim // multiple)
return [tensor_shape.TensorShape(output_dims)]
def edit_distance(hypothesis, truth, normalize=True, name="edit_distance"):
"""Computes the Levenshtein distance between sequences.
This operation takes variable-length sequences (`hypothesis` and `truth`),
each provided as a `SparseTensor`, and computes the Levenshtein distance.
You can normalize the edit distance by length of `truth` by setting
`normalize` to true.
For example, given the following input:
```python
# 'hypothesis' is a tensor of shape `[2, 1]` with variable-length values:
# (0,0) = ["a"]
# (1,0) = ["b"]
hypothesis = tf.SparseTensor(
[[0, 0, 0],
[1, 0, 0]],
["a", "b"]
(2, 1, 1))
# 'truth' is a tensor of shape `[2, 2]` with variable-length values:
# (0,0) = []
# (0,1) = ["a"]
# (1,0) = ["b", "c"]
# (1,1) = ["a"]
truth = tf.SparseTensor(
[[0, 1, 0],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0]]
["a", "b", "c", "a"],
(2, 2, 2))
normalize = True
```
This operation would return the following:
```python
# 'output' is a tensor of shape `[2, 2]` with edit distances normalized
# by 'truth' lengths.
output ==> [[inf, 1.0], # (0,0): no truth, (0,1): no hypothesis
[0.5, 1.0]] # (1,0): addition, (1,1): no hypothesis
```
Args:
hypothesis: A `SparseTensor` containing hypothesis sequences.
truth: A `SparseTensor` containing truth sequences.
normalize: A `bool`. If `True`, normalizes the Levenshtein distance by
length of `truth.`
name: A name for the operation (optional).
Returns:
A dense `Tensor` with rank `R - 1`, where R is the rank of the
`SparseTensor` inputs `hypothesis` and `truth`.
Raises:
TypeError: If either `hypothesis` or `truth` are not a `SparseTensor`.
"""
if not isinstance(hypothesis, (sparse_tensor.SparseTensor,
sparse_tensor.SparseTensorValue)):
raise TypeError("Hypothesis must be a SparseTensor.")
if not isinstance(truth, (sparse_tensor.SparseTensor,
sparse_tensor.SparseTensorValue)):
raise TypeError("Truth must be a SparseTensor.")
return gen_array_ops._edit_distance(
hypothesis.indices,
hypothesis.values,
hypothesis.dense_shape,
truth.indices,
truth.values,
truth.dense_shape,
normalize=normalize,
name=name)
@ops.RegisterGradient("FakeQuantWithMinMaxArgs")
def _FakeQuantWithMinMaxArgsGradient(op, grad):
"""Gradient for FakeQuantWithMinMaxArgs op."""
return fake_quant_with_min_max_args_gradient(
grad,
op.inputs[0],
min=op.get_attr("min"),
max=op.get_attr("max"),
num_bits=op.get_attr("num_bits"),
narrow_range=op.get_attr("narrow_range"))
@ops.RegisterGradient("FakeQuantWithMinMaxVars")
def _FakeQuantWithMinMaxVarsGradient(op, grad):
"""Gradient for FakeQuantWithMinMaxVars op."""
return fake_quant_with_min_max_vars_gradient(
grad,
op.inputs[0],
op.inputs[1],
op.inputs[2],
num_bits=op.get_attr("num_bits"),
narrow_range=op.get_attr("narrow_range"))
@ops.RegisterGradient("FakeQuantWithMinMaxVarsPerChannel")
def _FakeQuantWithMinMaxVarsPerChannelGradient(op, grad):
"""Gradient for FakeQuantWithMinMaxVarsPerChannel op."""
return fake_quant_with_min_max_vars_per_channel_gradient(
grad,
op.inputs[0],
op.inputs[1],
op.inputs[2],
num_bits=op.get_attr("num_bits"),
narrow_range=op.get_attr("narrow_range"))
def required_space_to_batch_paddings(input_shape,
block_shape,
base_paddings=None,
name=None):
"""Calculate padding required to make block_shape divide input_shape.
This function can be used to calculate a suitable paddings argument for use
with space_to_batch_nd and batch_to_space_nd.
Args:
input_shape: int32 Tensor of shape [N].
block_shape: int32 Tensor of shape [N].
base_paddings: Optional int32 Tensor of shape [N, 2]. Specifies the minimum
amount of padding to use. All elements must be >= 0. If not specified,
defaults to 0.
name: string. Optional name prefix.
Returns:
(paddings, crops), where:
`paddings` and `crops` are int32 Tensors of rank 2 and shape [N, 2]
satisfying:
paddings[i, 0] = base_paddings[i, 0].
0 <= paddings[i, 1] - base_paddings[i, 1] < block_shape[i]
(input_shape[i] + paddings[i, 0] + paddings[i, 1]) % block_shape[i] == 0
crops[i, 0] = 0
crops[i, 1] = paddings[i, 1] - base_paddings[i, 1]
Raises: ValueError if called with incompatible shapes.
"""
with ops.name_scope(name, "required_space_to_batch_paddings",
[input_shape, block_shape]):
input_shape = ops.convert_to_tensor(
input_shape, dtype=dtypes.int32, name="input_shape")
block_shape = ops.convert_to_tensor(
block_shape, dtype=dtypes.int32, name="block_shape")
block_shape.get_shape().assert_is_fully_defined()
block_shape.get_shape().assert_has_rank(1)
num_block_dims = block_shape.get_shape()[0].value
if num_block_dims == 0:
return zeros([0, 2], dtypes.int32), zeros([0, 2], dtypes.int32)
input_shape.get_shape().assert_is_compatible_with([num_block_dims])
if base_paddings is not None:
base_paddings = ops.convert_to_tensor(
base_paddings, dtype=dtypes.int32, name="base_paddings")
base_paddings.get_shape().assert_is_compatible_with([num_block_dims, 2])
else:
base_paddings = zeros([num_block_dims, 2], dtypes.int32)
const_block_shape = tensor_util.constant_value(block_shape)
const_input_shape = tensor_util.constant_value(input_shape)
const_base_paddings = tensor_util.constant_value(base_paddings)
if (const_block_shape is not None and const_input_shape is not None and
const_base_paddings is not None):
block_shape = const_block_shape
input_shape = const_input_shape
base_paddings = const_base_paddings
# Use same expression for both constant and non-constant case.
pad_start = base_paddings[:, 0]
orig_pad_end = base_paddings[:, 1]
full_input_shape = input_shape + pad_start + orig_pad_end
pad_end_extra = (block_shape - full_input_shape % block_shape) % block_shape
pad_end = orig_pad_end + pad_end_extra
result_paddings = stack(
[[pad_start[i], pad_end[i]] for i in range(num_block_dims)],
name="paddings")
result_crops = stack(
[[0, pad_end_extra[i]] for i in range(num_block_dims)], name="crops")
return result_paddings, result_crops
def space_to_batch(input, paddings, block_size, name=None): # pylint: disable=redefined-builtin
result = space_to_batch_nd(
input,
paddings=paddings,
block_shape=np.array([block_size, block_size], dtype=np.int64),
name=name)
result.set_shape(result.get_shape().with_rank(4))
return result
space_to_batch.__doc__ = gen_array_ops._space_to_batch.__doc__
def batch_to_space(input, crops, block_size, name=None): # pylint: disable=redefined-builtin
result = batch_to_space_nd(
input,
crops=crops,
block_shape=np.array([block_size, block_size], dtype=np.int64),
name=name)
result.set_shape(result.get_shape().with_rank(4))
return result
batch_to_space.__doc__ = gen_array_ops._batch_to_space.__doc__
def one_hot(indices,
depth,
on_value=None,
off_value=None,
axis=None,
dtype=None,
name=None):
"""Returns a one-hot tensor.
The locations represented by indices in `indices` take value `on_value`,
while all other locations take value `off_value`.
`on_value` and `off_value` must have matching data types. If `dtype` is also
provided, they must be the same data type as specified by `dtype`.
If `on_value` is not provided, it will default to the value `1` with type
`dtype`
If `off_value` is not provided, it will default to the value `0` with type
`dtype`
If the input `indices` is rank `N`, the output will have rank `N+1`. The
new axis is created at dimension `axis` (default: the new axis is appended
at the end).
If `indices` is a scalar the output shape will be a vector of length `depth`
If `indices` is a vector of length `features`, the output shape will be:
```
features x depth if axis == -1
depth x features if axis == 0
```
If `indices` is a matrix (batch) with shape `[batch, features]`, the output
shape will be:
```
batch x features x depth if axis == -1
batch x depth x features if axis == 1
depth x batch x features if axis == 0
```
If `dtype` is not provided, it will attempt to assume the data type of
`on_value` or `off_value`, if one or both are passed in. If none of
`on_value`, `off_value`, or `dtype` are provided, `dtype` will default to the
value `tf.float32`.
Note: If a non-numeric data type output is desired (`tf.string`, `tf.bool`,
etc.), both `on_value` and `off_value` _must_ be provided to `one_hot`.
For example:
```python
indices = [0, 1, 2]
depth = 3
tf.one_hot(indices, depth) # output: [3 x 3]
# [[1., 0., 0.],
# [0., 1., 0.],
# [0., 0., 1.]]
indices = [0, 2, -1, 1]
depth = 3
tf.one_hot(indices, depth,
on_value=5.0, off_value=0.0,
axis=-1) # output: [4 x 3]
# [[5.0, 0.0, 0.0], # one_hot(0)
# [0.0, 0.0, 5.0], # one_hot(2)
# [0.0, 0.0, 0.0], # one_hot(-1)
# [0.0, 5.0, 0.0]] # one_hot(1)
indices = [[0, 2], [1, -1]]
depth = 3
tf.one_hot(indices, depth,
on_value=1.0, off_value=0.0,
axis=-1) # output: [2 x 2 x 3]
# [[[1.0, 0.0, 0.0], # one_hot(0)
# [0.0, 0.0, 1.0]], # one_hot(2)
# [[0.0, 1.0, 0.0], # one_hot(1)
# [0.0, 0.0, 0.0]]] # one_hot(-1)
```
Args:
indices: A `Tensor` of indices.
depth: A scalar defining the depth of the one hot dimension.
on_value: A scalar defining the value to fill in output when `indices[j]
= i`. (default: 1)
off_value: A scalar defining the value to fill in output when `indices[j]
!= i`. (default: 0)
axis: The axis to fill (default: -1, a new inner-most axis).
dtype: The data type of the output tensor.
Returns:
output: The one-hot tensor.
Raises:
TypeError: If dtype of either `on_value` or `off_value` don't match `dtype`
TypeError: If dtype of `on_value` and `off_value` don't match one another
"""
with ops.name_scope(name, "one_hot",
[indices, depth, on_value, off_value, axis,
dtype]) as name:
on_exists = on_value is not None
off_exists = off_value is not None
on_dtype = ops.convert_to_tensor(on_value).dtype.base_dtype if on_exists \
else None
off_dtype = ops.convert_to_tensor(off_value).dtype.base_dtype if off_exists\
else None
if on_exists or off_exists:
if dtype is not None:
# Ensure provided on_value and/or off_value match dtype
if (on_exists and on_dtype != dtype):
raise TypeError("dtype {0} of on_value does not match " \
"dtype parameter {1}".format(on_dtype, dtype))
if (off_exists and off_dtype != dtype):
raise TypeError("dtype {0} of off_value does not match " \
"dtype parameter {1}".format(off_dtype, dtype))
else:
# dtype not provided: automatically assign it
dtype = on_dtype if on_exists else off_dtype
elif dtype is None:
# None of on_value, off_value, or dtype provided. Default dtype to float32
dtype = dtypes.float32
if not on_exists:
# on_value not provided: assign to value 1 of type dtype
on_value = ops.convert_to_tensor(1, dtype, name="on_value")
on_dtype = dtype
if not off_exists:
# off_value not provided: assign to value 0 of type dtype
off_value = ops.convert_to_tensor(0, dtype, name="off_value")
off_dtype = dtype
if on_dtype != off_dtype:
raise TypeError("dtype {0} of on_value does not match " \
"dtype {1} of off_value".format(on_dtype, off_dtype))
return gen_array_ops._one_hot(indices, depth, on_value, off_value, axis,
name)
def sequence_mask(lengths, maxlen=None, dtype=dtypes.bool, name=None):
"""Return a mask tensor representing the first N positions of each row.
Example:
```python
tf.sequence_mask([1, 3, 2], 5) # [[True, False, False, False, False],
# [True, True, True, False, False],
# [True, True, False, False, False]]
```
Args:
lengths: 1D integer tensor, all its values < maxlen.
maxlen: scalar integer tensor, maximum length of each row. Default: use
maximum over lengths.
dtype: output type of the resulting tensor.
name: name of the op.
Returns:
A 2D mask tensor, as shown in the example above, cast to specified dtype.
Raises:
ValueError: if the arguments have invalid rank.
"""
with ops.name_scope(name, "SequenceMask", [lengths, maxlen]):
lengths = ops.convert_to_tensor(lengths)
if lengths.get_shape().ndims != 1:
raise ValueError("lengths must be 1D for sequence_mask. Got shape %s" %
lengths.get_shape())
if maxlen is None:
maxlen = gen_math_ops._max(lengths, [0])
else:
maxlen = ops.convert_to_tensor(maxlen)
if maxlen.get_shape().ndims != 0:
raise ValueError("maxlen must be scalar for sequence_mask")
# The basic idea is to compare a range row vector of size maxlen:
# [0, 1, 2, 3, 4]
# to length as a matrix with 1 column: [[1], [3], [2]].
# Because of broadcasting on both arguments this comparison results
# in a matrix of size (len(lengths), maxlen)
row_vector = gen_math_ops._range(
constant(0, maxlen.dtype), maxlen, constant(1, maxlen.dtype))
# Since maxlen >= max(lengths), it is safe to use maxlen as a cast
# authoritative type. Whenever maxlen fits into tf.int32, so do the lengths.
matrix = gen_math_ops.cast(expand_dims(lengths, 1), maxlen.dtype)
result = row_vector < matrix
if dtype is None or result.dtype.base_dtype == dtype.base_dtype:
return result
else:
return gen_math_ops.cast(result, dtype)
def squeeze(input, axis=None, name=None, squeeze_dims=None):
# pylint: disable=redefined-builtin
"""Removes dimensions of size 1 from the shape of a tensor.
Given a tensor `input`, this operation returns a tensor of the same type with
all dimensions of size 1 removed. If you don't want to remove all size 1
dimensions, you can remove specific size 1 dimensions by specifying
`axis`.
For example:
```python
# 't' is a tensor of shape [1, 2, 1, 3, 1, 1]
tf.shape(tf.squeeze(t)) # [2, 3]
```
Or, to remove specific size 1 dimensions:
```python
# 't' is a tensor of shape [1, 2, 1, 3, 1, 1]
tf.shape(tf.squeeze(t, [2, 4])) # [1, 2, 3, 1]
```
Args:
input: A `Tensor`. The `input` to squeeze.
axis: An optional list of `ints`. Defaults to `[]`.
If specified, only squeezes the dimensions listed. The dimension
index starts at 0. It is an error to squeeze a dimension that is not 1.
Must be in the range `[-rank(input), rank(input))`.
name: A name for the operation (optional).
squeeze_dims: Deprecated keyword argument that is now axis.
Returns:
A `Tensor`. Has the same type as `input`.
Contains the same data as `input`, but has one or more dimensions of
size 1 removed.
Raises:
ValueError: When both `squeeze_dims` and `axis` are specified.
"""
if squeeze_dims is not None:
if axis is not None:
raise ValueError("Cannot specify both 'squeeze_dims' and 'axis'")
axis = squeeze_dims
if np.isscalar(axis):
axis = [axis]
return gen_array_ops._squeeze(input, axis, name)
def where(condition, x=None, y=None, name=None):
"""Return the elements, either from `x` or `y`, depending on the `condition`.
If both `x` and `y` are None, then this operation returns the coordinates of
true elements of `condition`. The coordinates are returned in a 2-D tensor
where the first dimension (rows) represents the number of true elements, and
the second dimension (columns) represents the coordinates of the true
elements. Keep in mind, the shape of the output tensor can vary depending on
how many true values there are in input. Indices are output in row-major
order.
If both non-None, `x` and `y` must have the same shape.
The `condition` tensor must be a scalar if `x` and `y` are scalar.
If `x` and `y` are vectors of higher rank, then `condition` must be either a
vector with size matching the first dimension of `x`, or must have the same
shape as `x`.
The `condition` tensor acts as a mask that chooses, based on the value at each
element, whether the corresponding element / row in the output should be taken
from `x` (if true) or `y` (if false).
If `condition` is a vector and `x` and `y` are higher rank matrices, then it
chooses which row (outer dimension) to copy from `x` and `y`. If `condition`
has the same shape as `x` and `y`, then it chooses which element to copy from
`x` and `y`.
Args:
condition: A `Tensor` of type `bool`
x: A Tensor which may have the same shape as `condition`. If `condition` is
rank 1, `x` may have higher rank, but its first dimension must match the
size of `condition`.
y: A `tensor` with the same shape and type as `x`.
name: A name of the operation (optional)
Returns:
A `Tensor` with the same type and shape as `x`, `y` if they are non-None.
A `Tensor` with shape `(num_true, dim_size(condition))`.
Raises:
ValueError: When exactly one of `x` or `y` is non-None.
"""
if x is None and y is None:
return gen_array_ops.where(input=condition, name=name)
elif x is not None and y is not None:
return gen_math_ops._select(condition=condition, t=x, e=y, name=name)
else:
raise ValueError("x and y must both be non-None or both be None.")
def reverse(tensor, axis, name=None):
return gen_array_ops.reverse_v2(tensor, axis, name)
reverse.__doc__ = gen_array_ops.reverse_v2.__doc__
# pylint: disable=redefined-builtin
def reverse_sequence(input,
seq_lengths,
seq_axis=None,
batch_axis=None,
name=None,
seq_dim=None,
batch_dim=None):
seq_axis = deprecation.deprecated_argument_lookup("seq_axis", seq_axis,
"seq_dim", seq_dim)
batch_axis = deprecation.deprecated_argument_lookup("batch_axis", batch_axis,
"batch_dim", batch_dim)
return gen_array_ops.reverse_sequence(
input=input,
seq_lengths=seq_lengths,
seq_dim=seq_axis,
batch_dim=batch_axis,
name=name)
# pylint: enable=redefined-builtin
reverse_sequence.__doc__ = deprecation.rewrite_argument_docstring(
deprecation.rewrite_argument_docstring(
gen_array_ops.reverse_sequence.__doc__, "batch_dim", "batch_axis"),
"seq_dim", "seq_axis")
def gather(params, indices, validate_indices=None, name=None, axis=0):
# TODO(rjryan): Remove "Gather" creation in favor of GatherV2 once the forward
# compatibility 3 week period has passed.
if axis == 0:
return gen_array_ops.gather(
params, indices, validate_indices=validate_indices, name=name)
else:
return gen_array_ops.gather_v2(params, indices, axis, name=name)
gather.__doc__ = gen_array_ops.gather_v2.__doc__
|
apache-2.0
| -9,138,453,467,312,795,000
| 32.864344
| 105
| 0.631098
| false
| 3.414986
| false
| false
| false
|
lidavidm/mathics-heroku
|
venv/lib/python2.7/site-packages/sympy/functions/combinatorial/numbers.py
|
1
|
40953
|
"""
This module implements some special functions that commonly appear in
combinatorial contexts (e.g. in power series); in particular,
sequences of rational numbers such as Bernoulli and Fibonacci numbers.
Factorials, binomial coefficients and related functions are located in
the separate 'factorials' module.
"""
from sympy.core.function import Function, expand_mul
from sympy.core import S, Symbol, Rational, oo, Integer, C, Add, Dummy
from sympy.core.compatibility import as_int, SYMPY_INTS
from sympy.core.cache import cacheit
from sympy.functions.combinatorial.factorials import factorial
from sympy.mpmath import bernfrac
from sympy.mpmath.libmp import ifib as _ifib
def _product(a, b):
p = 1
for k in xrange(a, b + 1):
p *= k
return p
from sympy.utilities.memoization import recurrence_memo
# Dummy symbol used for computing polynomial sequences
_sym = Symbol('x')
_symbols = Function('x')
#----------------------------------------------------------------------------#
# #
# Fibonacci numbers #
# #
#----------------------------------------------------------------------------#
class fibonacci(Function):
"""
Fibonacci numbers / Fibonacci polynomials
The Fibonacci numbers are the integer sequence defined by the
initial terms F_0 = 0, F_1 = 1 and the two-term recurrence
relation F_n = F_{n-1} + F_{n-2}.
The Fibonacci polynomials are defined by F_1(x) = 1,
F_2(x) = x, and F_n(x) = x*F_{n-1}(x) + F_{n-2}(x) for n > 2.
For all positive integers n, F_n(1) = F_n.
* fibonacci(n) gives the nth Fibonacci number, F_n
* fibonacci(n, x) gives the nth Fibonacci polynomial in x, F_n(x)
Examples
========
>>> from sympy import fibonacci, Symbol
>>> [fibonacci(x) for x in range(11)]
[0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55]
>>> fibonacci(5, Symbol('t'))
t**4 + 3*t**2 + 1
References
==========
.. [1] http://en.wikipedia.org/wiki/Fibonacci_number
.. [2] http://mathworld.wolfram.com/FibonacciNumber.html
See Also
========
bell, bernoulli, catalan, euler, harmonic, lucas
"""
@staticmethod
def _fib(n):
return _ifib(n)
@staticmethod
@recurrence_memo([None, S.One, _sym])
def _fibpoly(n, prev):
return (prev[-2] + _sym*prev[-1]).expand()
@classmethod
def eval(cls, n, sym=None):
if n.is_Integer:
n = int(n)
if n < 0:
return S.NegativeOne**(n + 1) * fibonacci(-n)
if sym is None:
return Integer(cls._fib(n))
else:
if n < 1:
raise ValueError("Fibonacci polynomials are defined "
"only for positive integer indices.")
return cls._fibpoly(n).subs(_sym, sym)
class lucas(Function):
"""
Lucas numbers
Lucas numbers satisfy a recurrence relation similar to that of
the Fibonacci sequence, in which each term is the sum of the
preceding two. They are generated by choosing the initial
values L_0 = 2 and L_1 = 1.
* lucas(n) gives the nth Lucas number
Examples
========
>>> from sympy import lucas
>>> [lucas(x) for x in range(11)]
[2, 1, 3, 4, 7, 11, 18, 29, 47, 76, 123]
References
==========
.. [1] http://en.wikipedia.org/wiki/Lucas_number
.. [2] http://mathworld.wolfram.com/LucasNumber.html
See Also
========
bell, bernoulli, catalan, euler, fibonacci, harmonic
"""
@classmethod
def eval(cls, n):
if n.is_Integer:
return fibonacci(n + 1) + fibonacci(n - 1)
#----------------------------------------------------------------------------#
# #
# Bernoulli numbers #
# #
#----------------------------------------------------------------------------#
class bernoulli(Function):
r"""
Bernoulli numbers / Bernoulli polynomials
The Bernoulli numbers are a sequence of rational numbers
defined by B_0 = 1 and the recursive relation (n > 0)::
n
___
\ / n + 1 \
0 = ) | | * B .
/___ \ k / k
k = 0
They are also commonly defined by their exponential generating
function, which is x/(exp(x) - 1). For odd indices > 1, the
Bernoulli numbers are zero.
The Bernoulli polynomials satisfy the analogous formula::
n
___
\ / n \ n-k
B (x) = ) | | * B * x .
n /___ \ k / k
k = 0
Bernoulli numbers and Bernoulli polynomials are related as
B_n(0) = B_n.
We compute Bernoulli numbers using Ramanujan's formula::
/ n + 3 \
B = (A(n) - S(n)) / | |
n \ n /
where A(n) = (n+3)/3 when n = 0 or 2 (mod 6), A(n) = -(n+3)/6
when n = 4 (mod 6), and::
[n/6]
___
\ / n + 3 \
S(n) = ) | | * B
/___ \ n - 6*k / n-6*k
k = 1
This formula is similar to the sum given in the definition, but
cuts 2/3 of the terms. For Bernoulli polynomials, we use the
formula in the definition.
* bernoulli(n) gives the nth Bernoulli number, B_n
* bernoulli(n, x) gives the nth Bernoulli polynomial in x, B_n(x)
Examples
========
>>> from sympy import bernoulli
>>> [bernoulli(n) for n in range(11)]
[1, -1/2, 1/6, 0, -1/30, 0, 1/42, 0, -1/30, 0, 5/66]
>>> bernoulli(1000001)
0
References
==========
.. [1] http://en.wikipedia.org/wiki/Bernoulli_number
.. [2] http://en.wikipedia.org/wiki/Bernoulli_polynomial
.. [3] http://mathworld.wolfram.com/BernoulliNumber.html
.. [4] http://mathworld.wolfram.com/BernoulliPolynomial.html
See Also
========
bell, catalan, euler, fibonacci, harmonic, lucas
"""
# Calculates B_n for positive even n
@staticmethod
def _calc_bernoulli(n):
s = 0
a = int(C.binomial(n + 3, n - 6))
for j in xrange(1, n//6 + 1):
s += a * bernoulli(n - 6*j)
# Avoid computing each binomial coefficient from scratch
a *= _product(n - 6 - 6*j + 1, n - 6*j)
a //= _product(6*j + 4, 6*j + 9)
if n % 6 == 4:
s = -Rational(n + 3, 6) - s
else:
s = Rational(n + 3, 3) - s
return s / C.binomial(n + 3, n)
# We implement a specialized memoization scheme to handle each
# case modulo 6 separately
_cache = {0: S.One, 2: Rational(1, 6), 4: Rational(-1, 30)}
_highest = {0: 0, 2: 2, 4: 4}
@classmethod
def eval(cls, n, sym=None):
if n.is_Number:
if n.is_Integer and n.is_nonnegative:
if n is S.Zero:
return S.One
elif n is S.One:
if sym is None:
return -S.Half
else:
return sym - S.Half
# Bernoulli numbers
elif sym is None:
if n.is_odd:
return S.Zero
n = int(n)
# Use mpmath for enormous Bernoulli numbers
if n > 500:
p, q = bernfrac(n)
return Rational(int(p), int(q))
case = n % 6
highest_cached = cls._highest[case]
if n <= highest_cached:
return cls._cache[n]
# To avoid excessive recursion when, say, bernoulli(1000) is
# requested, calculate and cache the entire sequence ... B_988,
# B_994, B_1000 in increasing order
for i in xrange(highest_cached + 6, n + 6, 6):
b = cls._calc_bernoulli(i)
cls._cache[i] = b
cls._highest[case] = i
return b
# Bernoulli polynomials
else:
n, result = int(n), []
for k in xrange(n + 1):
result.append(C.binomial(n, k)*cls(k)*sym**(n - k))
return Add(*result)
else:
raise ValueError("Bernoulli numbers are defined only"
" for nonnegative integer indices.")
#----------------------------------------------------------------------------#
# #
# Bell numbers #
# #
#----------------------------------------------------------------------------#
class bell(Function):
r"""
Bell numbers / Bell polynomials
The Bell numbers satisfy `B_0 = 1` and
.. math:: B_n = \sum_{k=0}^{n-1} \binom{n-1}{k} B_k.
They are also given by:
.. math:: B_n = \frac{1}{e} \sum_{k=0}^{\infty} \frac{k^n}{k!}.
The Bell polynomials are given by `B_0(x) = 1` and
.. math:: B_n(x) = x \sum_{k=1}^{n-1} \binom{n-1}{k-1} B_{k-1}(x).
The second kind of Bell polynomials (are sometimes called "partial" Bell
polynomials or incomplete Bell polynomials) are defined as
.. math:: B_{n,k}(x_1, x_2,\dotsc x_{n-k+1}) =
\sum_{j_1+j_2+j_2+\dotsb=k \atop j_1+2j_2+3j_2+\dotsb=n}
\frac{n!}{j_1!j_2!\dotsb j_{n-k+1}!}
\left(\frac{x_1}{1!} \right)^{j_1}
\left(\frac{x_2}{2!} \right)^{j_2} \dotsb
\left(\frac{x_{n-k+1}}{(n-k+1)!} \right) ^{j_{n-k+1}}.
* bell(n) gives the `n^{th}` Bell number, `B_n`.
* bell(n, x) gives the `n^{th}` Bell polynomial, `B_n(x)`.
* bell(n, k, (x1, x2, ...)) gives Bell polynomials of the second kind,
`B_{n,k}(x_1, x_2, \dotsc, x_{n-k+1})`.
Notes
=====
Not to be confused with Bernoulli numbers and Bernoulli polynomials,
which use the same notation.
Examples
========
>>> from sympy import bell, Symbol, symbols
>>> [bell(n) for n in range(11)]
[1, 1, 2, 5, 15, 52, 203, 877, 4140, 21147, 115975]
>>> bell(30)
846749014511809332450147
>>> bell(4, Symbol('t'))
t**4 + 6*t**3 + 7*t**2 + t
>>> bell(6, 2, symbols('x:6')[1:])
6*x1*x5 + 15*x2*x4 + 10*x3**2
References
==========
.. [1] http://en.wikipedia.org/wiki/Bell_number
.. [2] http://mathworld.wolfram.com/BellNumber.html
.. [3] http://mathworld.wolfram.com/BellPolynomial.html
See Also
========
bernoulli, catalan, euler, fibonacci, harmonic, lucas
"""
@staticmethod
@recurrence_memo([1, 1])
def _bell(n, prev):
s = 1
a = 1
for k in xrange(1, n):
a = a * (n - k) // k
s += a * prev[k]
return s
@staticmethod
@recurrence_memo([S.One, _sym])
def _bell_poly(n, prev):
s = 1
a = 1
for k in xrange(2, n + 1):
a = a * (n - k + 1) // (k - 1)
s += a * prev[k - 1]
return expand_mul(_sym * s)
@staticmethod
def _bell_incomplete_poly(n, k, symbols):
r"""
The second kind of Bell polynomials (incomplete Bell polynomials).
Calculated by recurrence formula:
.. math:: B_{n,k}(x_1, x_2, \dotsc, x_{n-k+1}) =
\sum_{m=1}^{n-k+1}
\x_m \binom{n-1}{m-1} B_{n-m,k-1}(x_1, x_2, \dotsc, x_{n-m-k})
where
B_{0,0} = 1;
B_{n,0} = 0; for n>=1
B_{0,k} = 0; for k>=1
"""
if (n == 0) and (k == 0):
return S.One
elif (n == 0) or (k == 0):
return S.Zero
s = S.Zero
a = S.One
for m in xrange(1, n - k + 2):
s += a * bell._bell_incomplete_poly(
n - m, k - 1, symbols) * symbols[m - 1]
a = a * (n - m) / m
return expand_mul(s)
@classmethod
def eval(cls, n, k_sym=None, symbols=None):
if n.is_Integer and n.is_nonnegative:
if k_sym is None:
return Integer(cls._bell(int(n)))
elif symbols is None:
return cls._bell_poly(int(n)).subs(_sym, k_sym)
else:
r = cls._bell_incomplete_poly(int(n), int(k_sym), symbols)
return r
#----------------------------------------------------------------------------#
# #
# Harmonic numbers #
# #
#----------------------------------------------------------------------------#
class harmonic(Function):
r"""
Harmonic numbers
The nth harmonic number is given by `\operatorname{H}_{n} =
1 + \frac{1}{2} + \frac{1}{3} + \ldots + \frac{1}{n}`.
More generally:
.. math:: \operatorname{H}_{n,m} = \sum_{k=1}^{n} \frac{1}{k^m}
As `n \rightarrow \infty`, `\operatorname{H}_{n,m} \rightarrow \zeta(m)`,
the Riemann zeta function.
* ``harmonic(n)`` gives the nth harmonic number, `\operatorname{H}_n`
* ``harmonic(n, m)`` gives the nth generalized harmonic number
of order `m`, `\operatorname{H}_{n,m}`, where
``harmonic(n) == harmonic(n, 1)``
Examples
========
>>> from sympy import harmonic, oo
>>> [harmonic(n) for n in range(6)]
[0, 1, 3/2, 11/6, 25/12, 137/60]
>>> [harmonic(n, 2) for n in range(6)]
[0, 1, 5/4, 49/36, 205/144, 5269/3600]
>>> harmonic(oo, 2)
pi**2/6
>>> from sympy import Symbol, Sum
>>> n = Symbol("n")
>>> harmonic(n).rewrite(Sum)
Sum(1/_k, (_k, 1, n))
We can rewrite harmonic numbers in terms of polygamma functions:
>>> from sympy import digamma, polygamma
>>> m = Symbol("m")
>>> harmonic(n).rewrite(digamma)
polygamma(0, n + 1) + EulerGamma
>>> harmonic(n).rewrite(polygamma)
polygamma(0, n + 1) + EulerGamma
>>> harmonic(n,3).rewrite(polygamma)
polygamma(2, n + 1)/2 - polygamma(2, 1)/2
>>> harmonic(n,m).rewrite(polygamma)
(-1)**m*(polygamma(m - 1, 1) - polygamma(m - 1, n + 1))/factorial(m - 1)
Integer offsets in the argument can be pulled out:
>>> from sympy import expand_func
>>> expand_func(harmonic(n+4))
harmonic(n) + 1/(n + 4) + 1/(n + 3) + 1/(n + 2) + 1/(n + 1)
>>> expand_func(harmonic(n-4))
harmonic(n) - 1/(n - 1) - 1/(n - 2) - 1/(n - 3) - 1/n
Some limits can be computed as well:
>>> from sympy import limit, oo
>>> limit(harmonic(n), n, oo)
oo
>>> limit(harmonic(n, 2), n, oo)
pi**2/6
>>> limit(harmonic(n, 3), n, oo)
-polygamma(2, 1)/2
>>> limit(harmonic(m, n), m, oo)
zeta(n)
References
==========
.. [1] http://en.wikipedia.org/wiki/Harmonic_number
.. [2] http://functions.wolfram.com/GammaBetaErf/HarmonicNumber/
.. [3] http://functions.wolfram.com/GammaBetaErf/HarmonicNumber2/
See Also
========
bell, bernoulli, catalan, euler, fibonacci, lucas
"""
# Generate one memoized Harmonic number-generating function for each
# order and store it in a dictionary
_functions = {}
nargs = (1, 2)
@classmethod
def eval(cls, n, m=None):
if m is None:
m = S.One
if n == oo:
return C.zeta(m)
if n.is_Integer and n.is_nonnegative and m.is_Integer:
if n == 0:
return S.Zero
if not m in cls._functions:
@recurrence_memo([0])
def f(n, prev):
return prev[-1] + S.One / n**m
cls._functions[m] = f
return cls._functions[m](int(n))
def _eval_rewrite_as_polygamma(self, n, m=1):
from sympy.functions.special.gamma_functions import polygamma
return S.NegativeOne**m/factorial(m - 1) * (polygamma(m - 1, 1) - polygamma(m - 1, n + 1))
def _eval_rewrite_as_digamma(self, n, m=1):
from sympy.functions.special.gamma_functions import polygamma
return self.rewrite(polygamma)
def _eval_rewrite_as_trigamma(self, n, m=1):
from sympy.functions.special.gamma_functions import polygamma
return self.rewrite(polygamma)
def _eval_rewrite_as_Sum(self, n, m=None):
k = C.Dummy("k", integer=True)
if m is None:
m = S.One
return C.Sum(k**(-m), (k, 1, n))
def _eval_expand_func(self, **hints):
n = self.args[0]
m = self.args[1] if len(self.args) == 2 else 1
if m == S.One:
if n.is_Add:
off = n.args[0]
nnew = n - off
if off.is_Integer and off.is_positive:
result = [S.One/(nnew + i) for i in xrange(off, 0, -1)] + [harmonic(nnew)]
return Add(*result)
elif off.is_Integer and off.is_negative:
result = [-S.One/(nnew + i) for i in xrange(0, off, -1)] + [harmonic(nnew)]
return Add(*result)
return self
def _eval_rewrite_as_tractable(self, n, m=1):
from sympy.functions.special.gamma_functions import polygamma
return self.rewrite(polygamma).rewrite("tractable", deep=True)
#----------------------------------------------------------------------------#
# #
# Euler numbers #
# #
#----------------------------------------------------------------------------#
class euler(Function):
r"""
Euler numbers
The euler numbers are given by::
2*n+1 k
___ ___ j 2*n+1
\ \ / k \ (-1) * (k-2*j)
E = I ) ) | | --------------------
2n /___ /___ \ j / k k
k = 1 j = 0 2 * I * k
E = 0
2n+1
* euler(n) gives the n-th Euler number, E_n
Examples
========
>>> from sympy import Symbol, euler
>>> [euler(n) for n in range(10)]
[1, 0, -1, 0, 5, 0, -61, 0, 1385, 0]
>>> n = Symbol("n")
>>> euler(n+2*n)
euler(3*n)
References
==========
.. [1] http://en.wikipedia.org/wiki/Euler_numbers
.. [2] http://mathworld.wolfram.com/EulerNumber.html
.. [3] http://en.wikipedia.org/wiki/Alternating_permutation
.. [4] http://mathworld.wolfram.com/AlternatingPermutation.html
See Also
========
bell, bernoulli, catalan, fibonacci, harmonic, lucas
"""
nargs = 1
@classmethod
def eval(cls, m, evaluate=True):
if not evaluate:
return
if m.is_odd:
return S.Zero
if m.is_Integer and m.is_nonnegative:
from sympy.mpmath import mp
m = m._to_mpmath(mp.prec)
res = mp.eulernum(m, exact=True)
return Integer(res)
def _eval_rewrite_as_Sum(self, arg):
if arg.is_even:
k = C.Dummy("k", integer=True)
j = C.Dummy("j", integer=True)
n = self.args[0] / 2
Em = (S.ImaginaryUnit * C.Sum( C.Sum( C.binomial(k, j) * ((-1)**j * (k - 2*j)**(2*n + 1)) /
(2**k*S.ImaginaryUnit**k * k), (j, 0, k)), (k, 1, 2*n + 1)))
return Em
def _eval_evalf(self, prec):
m = self.args[0]
if m.is_Integer and m.is_nonnegative:
from sympy.mpmath import mp
from sympy import Expr
m = m._to_mpmath(prec)
oprec = mp.prec
mp.prec = prec
res = mp.eulernum(m)
mp.prec = oprec
return Expr._from_mpmath(res, prec)
#----------------------------------------------------------------------------#
# #
# Catalan numbers #
# #
#----------------------------------------------------------------------------#
class catalan(Function):
r"""
Catalan numbers
The n-th catalan number is given by::
1 / 2*n \
C = ----- | |
n n + 1 \ n /
* catalan(n) gives the n-th Catalan number, C_n
Examples
========
>>> from sympy import (Symbol, binomial, gamma, hyper, polygamma,
... catalan, diff, combsimp, Rational, I)
>>> [ catalan(i) for i in range(1,10) ]
[1, 2, 5, 14, 42, 132, 429, 1430, 4862]
>>> n = Symbol("n", integer=True)
>>> catalan(n)
catalan(n)
Catalan numbers can be transformed into several other, identical
expressions involving other mathematical functions
>>> catalan(n).rewrite(binomial)
binomial(2*n, n)/(n + 1)
>>> catalan(n).rewrite(gamma)
4**n*gamma(n + 1/2)/(sqrt(pi)*gamma(n + 2))
>>> catalan(n).rewrite(hyper)
hyper((-n + 1, -n), (2,), 1)
For some non-integer values of n we can get closed form
expressions by rewriting in terms of gamma functions:
>>> catalan(Rational(1,2)).rewrite(gamma)
8/(3*pi)
We can differentiate the Catalan numbers C(n) interpreted as a
continuous real funtion in n:
>>> diff(catalan(n), n)
(polygamma(0, n + 1/2) - polygamma(0, n + 2) + log(4))*catalan(n)
As a more advanced example consider the following ratio
between consecutive numbers:
>>> combsimp((catalan(n + 1)/catalan(n)).rewrite(binomial))
2*(2*n + 1)/(n + 2)
The Catalan numbers can be generalized to complex numbers:
>>> catalan(I).rewrite(gamma)
4**I*gamma(1/2 + I)/(sqrt(pi)*gamma(2 + I))
and evaluated with arbitrary precision:
>>> catalan(I).evalf(20)
0.39764993382373624267 - 0.020884341620842555705*I
References
==========
.. [1] http://en.wikipedia.org/wiki/Catalan_number
.. [2] http://mathworld.wolfram.com/CatalanNumber.html
.. [3] http://functions.wolfram.com/GammaBetaErf/CatalanNumber/
.. [4] http://geometer.org/mathcircles/catalan.pdf
See Also
========
bell, bernoulli, euler, fibonacci, harmonic, lucas
sympy.functions.combinatorial.factorials.binomial
"""
@classmethod
def eval(cls, n, evaluate=True):
if n.is_Integer and n.is_nonnegative:
return 4**n*C.gamma(n + S.Half)/(C.gamma(S.Half)*C.gamma(n + 2))
def fdiff(self, argindex=1):
n = self.args[0]
return catalan(n)*(C.polygamma(0, n + Rational(1, 2)) - C.polygamma(0, n + 2) + C.log(4))
def _eval_rewrite_as_binomial(self, n):
return C.binomial(2*n, n)/(n + 1)
def _eval_rewrite_as_gamma(self, n):
# The gamma function allows to generalize Catalan numbers to complex n
return 4**n*C.gamma(n + S.Half)/(C.gamma(S.Half)*C.gamma(n + 2))
def _eval_rewrite_as_hyper(self, n):
return C.hyper([1 - n, -n], [2], 1)
def _eval_evalf(self, prec):
return self.rewrite(C.gamma).evalf(prec)
#######################################################################
###
### Functions for enumerating partitions, permutations and combinations
###
#######################################################################
class _MultisetHistogram(tuple):
pass
_N = -1
_ITEMS = -2
_M = slice(None, _ITEMS)
def _multiset_histogram(n):
"""Return tuple used in permutation and combination counting. Input
is a dictionary giving items with counts as values or a sequence of
items (which need not be sorted).
The data is stored in a class deriving from tuple so it is easily
recognized and so it can be converted easily to a list.
"""
if type(n) is dict: # item: count
if not all(isinstance(v, int) and v >= 0 for v in n.values()):
raise ValueError
tot = sum(n.values())
items = sum(1 for k in n if n[k] > 0)
return _MultisetHistogram([n[k] for k in n if n[k] > 0] + [items, tot])
else:
n = list(n)
s = set(n)
if len(s) == len(n):
n = [1]*len(n)
n.extend([len(n), len(n)])
return _MultisetHistogram(n)
m = dict(zip(s, range(len(s))))
d = dict(zip(range(len(s)), [0]*len(s)))
for i in n:
d[m[i]] += 1
return _multiset_histogram(d)
def nP(n, k=None, replacement=False):
"""Return the number of permutations of ``n`` items taken ``k`` at a time.
Possible values for ``n``::
integer - set of length ``n``
sequence - converted to a multiset internally
multiset - {element: multiplicity}
If ``k`` is None then the total of all permutations of length 0
through the number of items represented by ``n`` will be returned.
If ``replacement`` is True then a given item can appear more than once
in the ``k`` items. (For example, for 'ab' permutations of 2 would
include 'aa', 'ab', 'ba' and 'bb'.) The multiplicity of elements in
``n`` is ignored when ``replacement`` is True but the total number
of elements is considered since no element can appear more times than
the number of elements in ``n``.
Examples
========
>>> from sympy.functions.combinatorial.numbers import nP
>>> from sympy.utilities.iterables import multiset_permutations, multiset
>>> nP(3, 2)
6
>>> nP('abc', 2) == nP(multiset('abc'), 2) == 6
True
>>> nP('aab', 2)
3
>>> nP([1, 2, 2], 2)
3
>>> [nP(3, i) for i in range(4)]
[1, 3, 6, 6]
>>> nP(3) == sum(_)
True
When ``replacement`` is True, each item can have multiplicity
equal to the length represented by ``n``:
>>> nP('aabc', replacement=True)
121
>>> [len(list(multiset_permutations('aaaabbbbcccc', i))) for i in range(5)]
[1, 3, 9, 27, 81]
>>> sum(_)
121
References
==========
.. [1] http://en.wikipedia.org/wiki/Permutation
See Also
========
sympy.utilities.iterables.multiset_permutations
"""
try:
n = as_int(n)
except ValueError:
return Integer(_nP(_multiset_histogram(n), k, replacement))
return Integer(_nP(n, k, replacement))
@cacheit
def _nP(n, k=None, replacement=False):
from sympy.functions.combinatorial.factorials import factorial
from sympy.core.mul import prod
if k == 0:
return 1
if isinstance(n, SYMPY_INTS): # n different items
# assert n >= 0
if k is None:
return sum(_nP(n, i, replacement) for i in range(n + 1))
elif replacement:
return n**k
elif k > n:
return 0
elif k == n:
return factorial(k)
elif k == 1:
return n
else:
# assert k >= 0
return _product(n - k + 1, n)
elif isinstance(n, _MultisetHistogram):
if k is None:
return sum(_nP(n, i, replacement) for i in range(n[_N] + 1))
elif replacement:
return n[_ITEMS]**k
elif k == n[_N]:
return factorial(k)/prod([factorial(i) for i in n[_M] if i > 1])
elif k > n[_N]:
return 0
elif k == 1:
return n[_ITEMS]
else:
# assert k >= 0
tot = 0
n = list(n)
for i in range(len(n[_M])):
if not n[i]:
continue
n[_N] -= 1
if n[i] == 1:
n[i] = 0
n[_ITEMS] -= 1
tot += _nP(_MultisetHistogram(n), k - 1)
n[_ITEMS] += 1
n[i] = 1
else:
n[i] -= 1
tot += _nP(_MultisetHistogram(n), k - 1)
n[i] += 1
n[_N] += 1
return tot
@cacheit
def _AOP_product(n):
"""for n = (m1, m2, .., mk) return the coefficients of the polynomial,
prod(sum(x**i for i in range(nj + 1)) for nj in n); i.e. the coefficients
of the product of AOPs (all-one polynomials) or order given in n. The
resulting coefficient corresponding to x**r is the number of r-length
combinations of sum(n) elements with multiplicities given in n.
The coefficients are given as a default dictionary (so if a query is made
for a key that is not present, 0 will be returned).
Examples
========
>>> from sympy.functions.combinatorial.numbers import _AOP_product
>>> from sympy.abc import x
>>> n = (2, 2, 3) # e.g. aabbccc
>>> prod = ((x**2 + x + 1)*(x**2 + x + 1)*(x**3 + x**2 + x + 1)).expand()
>>> c = _AOP_product(n); dict(c)
{0: 1, 1: 3, 2: 6, 3: 8, 4: 8, 5: 6, 6: 3, 7: 1}
>>> [c[i] for i in range(8)] == [prod.coeff(x, i) for i in range(8)]
True
The generating poly used here is the same as that listed in
http://tinyurl.com/cep849r, but in a refactored form.
"""
from collections import defaultdict
n = list(n)
ord = sum(n)
need = (ord + 2)//2
rv = [1]*(n.pop() + 1)
rv.extend([0]*(need - len(rv)))
rv = rv[:need]
while n:
ni = n.pop()
N = ni + 1
was = rv[:]
for i in range(1, min(N, len(rv))):
rv[i] += rv[i - 1]
for i in range(N, need):
rv[i] += rv[i - 1] - was[i - N]
rev = list(reversed(rv))
if ord % 2:
rv = rv + rev
else:
rv[-1:] = rev
d = defaultdict(int)
for i in range(len(rv)):
d[i] = rv[i]
return d
def nC(n, k=None, replacement=False):
"""Return the number of combinations of ``n`` items taken ``k`` at a time.
Possible values for ``n``::
integer - set of length ``n``
sequence - converted to a multiset internally
multiset - {element: multiplicity}
If ``k`` is None then the total of all combinations of length 0
through the number of items represented in ``n`` will be returned.
If ``replacement`` is True then a given item can appear more than once
in the ``k`` items. (For example, for 'ab' sets of 2 would include 'aa',
'ab', and 'bb'.) The multiplicity of elements in ``n`` is ignored when
``replacement`` is True but the total number of elements is considered
since no element can appear more times than the number of elements in
``n``.
Examples
========
>>> from sympy.functions.combinatorial.numbers import nC
>>> from sympy.utilities.iterables import multiset_combinations
>>> nC(3, 2)
3
>>> nC('abc', 2)
3
>>> nC('aab', 2)
2
When ``replacement`` is True, each item can have multiplicity
equal to the length represented by ``n``:
>>> nC('aabc', replacement=True)
35
>>> [len(list(multiset_combinations('aaaabbbbcccc', i))) for i in range(5)]
[1, 3, 6, 10, 15]
>>> sum(_)
35
If there are ``k`` items with multiplicities ``m_1, m_2, ..., m_k``
then the total of all combinations of length 0 hrough ``k`` is the
product, ``(m_1 + 1)*(m_2 + 1)*...*(m_k + 1)``. When the multiplicity
of each item is 1 (i.e., k unique items) then there are 2**k
combinations. For example, if there are 4 unique items, the total number
of combinations is 16:
>>> sum(nC(4, i) for i in range(5))
16
References
==========
.. [1] http://en.wikipedia.org/wiki/Combination
.. [2] http://tinyurl.com/cep849r
See Also
========
sympy.utilities.iterables.multiset_combinations
"""
from sympy.functions.combinatorial.factorials import binomial
from sympy.core.mul import prod
if isinstance(n, SYMPY_INTS):
if k is None:
if not replacement:
return 2**n
return sum(nC(n, i, replacement) for i in range(n + 1))
assert k >= 0
if replacement:
return binomial(n + k - 1, k)
return binomial(n, k)
if isinstance(n, _MultisetHistogram):
N = n[_N]
if k is None:
if not replacement:
return prod(m + 1 for m in n[_M])
return sum(nC(n, i, replacement) for i in range(N + 1))
elif replacement:
return nC(n[_ITEMS], k, replacement)
# assert k >= 0
elif k in (1, N - 1):
return n[_ITEMS]
elif k in (0, N):
return 1
return _AOP_product(tuple(n[_M]))[k]
else:
return nC(_multiset_histogram(n), k, replacement)
@cacheit
def _stirling1(n, k):
if n == k == 0:
return S.One
if 0 in (n, k):
return S.Zero
n1 = n - 1
# some special values
if n == k:
return S.One
elif k == 1:
return factorial(n1)
elif k == n1:
return C.binomial(n, 2)
elif k == n - 2:
return (3*n - 1)*C.binomial(n, 3)/4
elif k == n - 3:
return C.binomial(n, 2)*C.binomial(n, 4)
# general recurrence
return n1*_stirling1(n1, k) + _stirling1(n1, k - 1)
@cacheit
def _stirling2(n, k):
if n == k == 0:
return S.One
if 0 in (n, k):
return S.Zero
n1 = n - 1
# some special values
if k == n1:
return C.binomial(n, 2)
elif k == 2:
return 2**n1 - 1
# general recurrence
return k*_stirling2(n1, k) + _stirling2(n1, k - 1)
def stirling(n, k, d=None, kind=2, signed=False):
"""Return Stirling number S(n, k) of the first or second (default) kind.
The sum of all Stirling numbers of the second kind for k = 1
through n is bell(n). The recurrence relationship for these numbers
is::
{0} {n} {0} {n + 1} {n} { n }
{ } = 1; { } = { } = 0; { } = j*{ } + { }
{0} {0} {k} { k } {k} {k - 1}
where ``j`` is::
``n`` for Stirling numbers of the first kind
``-n`` for signed Stirling numbers of the first kind
``k`` for Stirling numbers of the second kind
The first kind of Stirling number counts the number of permutations of
``n`` distinct items that have ``k`` cycles; the second kind counts the
ways in which ``n`` distinct items can be partitioned into ``k`` parts.
If ``d`` is given, the "reduced Stirling number of the second kind" is
returned: ``S^{d}(n, k) = S(n - d + 1, k - d + 1)`` with ``n >= k >= d``.
(This counts the ways to partition ``n`` consecutive integers into
``k`` groups with no pairwise difference less than ``d``. See example
below.)
To obtain the signed Stirling numbers of the first kind, use keyword
``signed=True``. Using this keyword automatically sets ``kind`` to 1.
Examples
========
>>> from sympy.functions.combinatorial.numbers import stirling, bell
>>> from sympy.combinatorics import Permutation
>>> from sympy.utilities.iterables import multiset_partitions, permutations
First kind (unsigned by default):
>>> [stirling(6, i, kind=1) for i in range(7)]
[0, 120, 274, 225, 85, 15, 1]
>>> perms = list(permutations(range(4)))
>>> [sum(Permutation(p).cycles == i for p in perms) for i in range(5)]
[0, 6, 11, 6, 1]
>>> [stirling(4, i, kind=1) for i in range(5)]
[0, 6, 11, 6, 1]
First kind (signed):
>>> [stirling(4, i, signed=True) for i in range(5)]
[0, -6, 11, -6, 1]
Second kind:
>>> [stirling(10, i) for i in range(12)]
[0, 1, 511, 9330, 34105, 42525, 22827, 5880, 750, 45, 1, 0]
>>> sum(_) == bell(10)
True
>>> len(list(multiset_partitions(range(4), 2))) == stirling(4, 2)
True
Reduced second kind:
>>> from sympy import subsets, oo
>>> def delta(p):
... if len(p) == 1:
... return oo
... return min(abs(i[0] - i[1]) for i in subsets(p, 2))
>>> parts = multiset_partitions(range(5), 3)
>>> d = 2
>>> sum(1 for p in parts if all(delta(i) >= d for i in p))
7
>>> stirling(5, 3, 2)
7
References
==========
.. [1] http://en.wikipedia.org/wiki/Stirling_numbers_of_the_first_kind
.. [2] http://en.wikipedia.org/wiki/Stirling_numbers_of_the_second_kind
See Also
========
sympy.utilities.iterables.multiset_partitions
"""
# TODO: make this a class like bell()
n = as_int(n)
k = as_int(k)
if n < 0:
raise ValueError('n must be nonnegative')
if k > n:
return S.Zero
if d:
# assert k >= d
# kind is ignored -- only kind=2 is supported
return _stirling2(n - d + 1, k - d + 1)
elif signed:
# kind is ignored -- only kind=1 is supported
return (-1)**(n - k)*_stirling1(n, k)
if kind == 1:
return _stirling1(n, k)
elif kind == 2:
return _stirling2(n, k)
else:
raise ValueError('kind must be 1 or 2, not %s' % k)
@cacheit
def _nT(n, k):
"""Return the partitions of ``n`` items into ``k`` parts. This
is used by ``nT`` for the case when ``n`` is an integer."""
if k == 0:
return 1 if k == n else 0
return sum(_nT(n - k, j) for j in range(min(k, n - k) + 1))
def nT(n, k=None):
"""Return the number of ``k``-sized partitions of ``n`` items.
Possible values for ``n``::
integer - ``n`` identical items
sequence - converted to a multiset internally
multiset - {element: multiplicity}
Note: the convention for ``nT`` is different than that of ``nC`` and``nP`` in that
here an integer indicates ``n`` *identical* items instead of a set of
length ``n``; this is in keepng with the ``partitions`` function which
treats its integer-``n`` input like a list of ``n`` 1s. One can use
``range(n)`` for ``n`` to indicate ``n`` distinct items.
If ``k`` is None then the total number of ways to partition the elements
represented in ``n`` will be returned.
Examples
========
>>> from sympy.functions.combinatorial.numbers import nT
Partitions of the given multiset:
>>> [nT('aabbc', i) for i in range(1, 7)]
[1, 8, 11, 5, 1, 0]
>>> nT('aabbc') == sum(_)
True
(TODO The following can be activated with >>> when
taocp_multiset_permutation is in place.)
>> [nT("mississippi", i) for i in range(1, 12)]
[1, 74, 609, 1521, 1768, 1224, 579, 197, 50, 9, 1]
Partitions when all items are identical:
>>> [nT(5, i) for i in range(1, 6)]
[1, 2, 2, 1, 1]
>>> nT('1'*5) == sum(_)
True
When all items are different:
>>> [nT(range(5), i) for i in range(1, 6)]
[1, 15, 25, 10, 1]
>>> nT(range(5)) == sum(_)
True
References
==========
.. [1] http://undergraduate.csse.uwa.edu.au/units/CITS7209/partition.pdf
See Also
========
sympy.utilities.iterables.partitions
sympy.utilities.iterables.multiset_partitions
"""
from sympy.utilities.iterables import multiset_partitions
if isinstance(n, SYMPY_INTS):
# assert n >= 0
# all the same
if k is None:
return sum(_nT(n, k) for k in range(1, n + 1))
return _nT(n, k)
if not isinstance(n, _MultisetHistogram):
try:
# if n contains hashable items there is some
# quick handling that can be done
u = len(set(n))
if u == 1:
return nT(len(n), k)
elif u == len(n):
n = range(u)
raise TypeError
except TypeError:
n = _multiset_histogram(n)
N = n[_N]
if k is None and N == 1:
return 1
if k in (1, N):
return 1
if k == 2 or N == 2 and k is None:
m, r = divmod(N, 2)
rv = sum(nC(n, i) for i in range(1, m + 1))
if not r:
rv -= nC(n, m)//2
if k is None:
rv += 1 # for k == 1
return rv
if N == n[_ITEMS]:
# all distinct
if k is None:
return bell(N)
return stirling(N, k)
if k is None:
return sum(nT(n, k) for k in range(1, N + 1))
tot = 0
for p in multiset_partitions(
[i for i, j in enumerate(n[_M]) for ii in range(j)]):
tot += len(p) == k
return tot
|
gpl-3.0
| -7,633,730,731,319,739,000
| 29.53915
| 103
| 0.497644
| false
| 3.420732
| false
| false
| false
|
semente/django-hashtags
|
hashtags/views.py
|
1
|
3967
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2010 Guilherme Gondim and contributors
#
# This file is part of Django Hashtags.
#
# Django Hashtags is free software under terms of the GNU Lesser
# General Public License version 3 (LGPLv3) as published by the Free
# Software Foundation. See the file README for copying conditions.
from django.core.exceptions import ObjectDoesNotExist
from django.core.paginator import Paginator, InvalidPage
from django.http import Http404, HttpResponse
from django.template import loader, RequestContext
from django.views.generic import list_detail
from hashtags.models import Hashtag, HashtaggedItem
def hashtag_index(request, *args, **kwargs):
"""
A thin wrapper around ``django.views.generic.list_detail.object_list``.
You don't need provide the ``queryset`` if you want.
The ``template_object_name`` by default is ``'hashtag'``. This mean that the
context variable ``object_list`` will be renamed to ``hashtag_list``.
**Template name**:
If ``template_name`` isn't specified, this view will use the template
``hashtags/hashtag_index.html`` by default.
"""
if 'queryset' not in kwargs:
kwargs['queryset'] = Hashtag.objects.all()
if 'template_name' not in kwargs:
kwargs['template_name'] = 'hashtags/hashtag_index.html'
if 'template_object_name' not in kwargs:
kwargs['template_object_name'] = 'hashtag'
return list_detail.object_list(request, *args, **kwargs)
def hashtagged_item_list(request, hashtag, paginate_by=None, page=None,
allow_empty=True, template_loader=loader,
template_name="hashtags/hashtagged_item_list.html",
extra_context={}, context_processors=None,
template_object_name='hashtagged_item_list',
mimetype=None):
"""
A page representing a list of objects hastagged with ``hashtag``.
Works like ``django.views.generic.list_detail.object_list`.
Templates: ``hashtags/hashtagged_item_list.html``
Context:
hashtag
The hashtag object in question
hashtagged_item_list
The list of objects hashtagged with ``hastag``
paginator
An instance of ``django.core.paginator.Paginator``
page_obj
An instance of ``django.core.paginator.Page``
"""
try:
hashtag = Hashtag.objects.get(name=hashtag)
except ObjectDoesNotExist:
raise Http404("Hashtag %s doesn't exist." % hashtag)
queryset = HashtaggedItem.objects.filter(hashtag=hashtag)
if paginate_by:
paginator = Paginator(queryset, paginate_by,
allow_empty_first_page=allow_empty)
if not page:
page = request.GET.get('page', 1)
try:
page_number = int(page)
except ValueError:
if page == 'last':
page_number = paginator.num_pages
else:
# Page is not 'last', nor can it be converted to an int.
raise Http404
try:
page_obj = paginator.page(page_number)
except InvalidPage:
raise Http404
c = RequestContext(request, {
'hashtag': hashtag,
template_object_name: queryset,
'paginator': paginator,
'page_obj': page_obj,
}, context_processors)
else:
c = RequestContext(request, {
'hashtag': hashtag,
template_object_name: queryset,
'paginator': None,
'page_obj': None,
}, context_processors)
if not allow_empty and len(queryset) == 0:
raise Http404
for key, value in extra_context.items():
if callable(value):
c[key] = value()
else:
c[key] = value
t = template_loader.get_template(template_name)
return HttpResponse(t.render(c), mimetype=mimetype)
|
lgpl-3.0
| -5,713,242,525,883,983,000
| 37.144231
| 80
| 0.617595
| false
| 4.171399
| false
| false
| false
|
cheesechoi/Triton
|
cheese/test/cheese_getModelcheck.jle.jg.py
|
1
|
5145
|
from triton import *
import smt2lib
"""
Address 0x400547 progress
[+] Address <cmp argv[1][0] 0x41>
{'SymVar_0': "0x50, 'P'"}
{'SymVar_0': "0x60, '`'"}
{'SymVar_0': "0x5a, 'Z'"}
{'SymVar_0': "0x4a, 'J'"}
{'SymVar_0': "0x42, 'B'"}
{'SymVar_0': "0x62, 'b'"}
{'SymVar_0': "0x6a, 'j'"}
{'SymVar_0': "0x68, 'h'"}
{'SymVar_0': "0x69, 'i'"}
{'SymVar_0': "0x49, 'I'"}
[+] Address <cmp argv[1][0] 0x59>
{'SymVar_0': "0x50, 'P'"}
{'SymVar_0': "0x59, 'Y'"}
{'SymVar_0': "0x58, 'X'"}
{'SymVar_0': "0x48, 'H'"}
{'SymVar_0': "0x44, 'D'"}
{'SymVar_0': "0x4c, 'L'"}
{'SymVar_0': "0x54, 'T'"}
{'SymVar_0': "0x49, 'I'"}
{'SymVar_0': "0x4d, 'M'"}
{'SymVar_0': "0x4f, 'O'"}
nope!
"""
expr = str()
listExpr = list()
def sbefore(instruction):
concretizeAllMem()
concretizeAllReg()
return
def cafter(instruction):
# evaluateAST Test
if 0x400551 == instruction.getAddress(): # jle
bad = list()
regs = getRegs()
for reg, data in regs.items():
#print getRegName(reg)
if 'rip' != getRegName(reg):
continue
cvalue = data['concreteValue']
seid = data['symbolicExpr']
#print "seid %d"%seid
if seid == IDREF.MISC.UNSET:
#print "unset %d"%IDREF.MISC.UNSET
continue
#print "IDREF.MISC.UNSET %d"%IDREF.MISC.UNSET
#print "test:%s %s"%(getRegName(reg), data)
#print getSymExpr(seid)
print getSymExpr(seid).getAst()
expr = getFullExpression(getSymExpr(seid).getAst())
print "excute evalueateAST(expr) --> evalueateAST(%s)"%expr
svalue = evaluateAST(expr)
print svalue
if cvalue != svalue:
bad.append({
'reg':getRegName(reg),
'svalue': svalue,
'cvalue': cvalue,
'expr':getSymExpr(seid).getAst()
})
if len(instruction.getSymbolicExpressions()) == 0:
print "[??] %#x: %s"%(instruction.getAddress(), instruction.getDisassembly())
return
if not bad:
print "[OK] %#x: %s"%(instruction.getAddress(), instruction.getDisassembly())
else:
print "### [KO] ### %#x: %s"%(instruction.getAddress(), instruction.getDisassembly())
for w in bad:
print " Register : %s"%(w['reg'])
print " Symbolic Value : %016x"%(w['svalue'])
print " Concrete Value : %016x"%(w['cvalue'])
print " Expression : %s"%(w['expr'])
return
# 0x0000000000400547 <+26>: movzx eax,BYTE PTR [rax]
if 0x400547 == instruction.getAddress():# == 0x400547:
print "Address 0x400547 progress"
raxId = getRegSymbolicID(IDREF.REG.RAX)
print getSymExpr(raxId)
#convertExprToSymVar(raxId, 8) #only 8bit
# 0x000000000040054d <+32>: cmp BYTE PTR [rbp-0x1],0x41
if instruction.getAddress() == 0x40054d:
print '[+] Address <cmp argv[1][0] 0x41>'
# WE DONT WANT JUMP
# 0x0000000000400551 <+36>: jle 0x40056a <main+61>
# jump if less or equal . ZF = 1 or SF <> OF.
# ZF = 0 and SF == OF
zfId = getRegSymbolicID(IDREF.FLAG.ZF)
zfExpr = getFullExpression(getSymExpr(zfId).getAst())
sfId = getRegSymbolicID(IDREF.FLAG.SF)
sfExpr = getFullExpression(getSymExpr(sfId).getAst())
ofId = getRegSymbolicID(IDREF.FLAG.OF)
ofExpr = getFullExpression(getSymExpr(ofId).getAst())
listExpr.append(smt2lib.smtAssert(smt2lib.equal(zfExpr, smt2lib.bvfalse())))
listExpr.append(smt2lib.smtAssert(smt2lib.equal(sfExpr, ofExpr)))
exprComp = smt2lib.compound(listExpr)
models = getModels(exprComp, 10)
for model in models:
print {k: "0x%x, '%c'" % (v, v) for k, v in model.items()}
raw_input()
#0x0000000000400553 <+38>: cmp BYTE PTR [rbp-0x1],0x59
if instruction.getAddress() == 0x400553:
print '[+] Address <cmp argv[1][0] 0x59>'
# WE DONT WANT JUMP, TOO.
# 0x0000000000400557 <+42>: jg 0x40056a <main+61>
# jmp if greater. ZF = 0 and SF = OF
# ZF = 1 or SF <> OF
zfId = getRegSymbolicID(IDREF.FLAG.ZF)
zfExpr = getFullExpression(getSymExpr(zfId).getAst())
sfId = getRegSymbolicID(IDREF.FLAG.SF)
sfExpr = getFullExpression(getSymExpr(sfId).getAst())
ofId = getRegSymbolicID(IDREF.FLAG.OF)
ofExpr = getFullExpression(getSymExpr(ofId).getAst())
exprJgNotJump = smt2lib.equal(smt2lib.bvor(smt2lib.bvxor(sfExpr,ofExpr), zfExpr), smt2lib.bvtrue())
listExpr.append( smt2lib.smtAssert(exprJgNotJump) )
exprComp = smt2lib.compound(listExpr)
models = getModels(exprComp, 10)
for model in models:
print {k: "0x%x, '%c'" % (v, v) for k, v in model.items()}
raw_input()
if __name__ == '__main__':
startAnalysisFromSymbol('main')
addCallback(cafter, IDREF.CALLBACK.AFTER)
runProgram()
|
lgpl-3.0
| 5,614,911,014,887,900,000
| 31.563291
| 100
| 0.554325
| false
| 2.913364
| false
| false
| false
|
iulian787/spack
|
lib/spack/spack/test/config.py
|
2
|
33282
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
import collections
import getpass
import tempfile
from six import StringIO
from llnl.util.filesystem import touch, mkdirp
import pytest
import spack.paths
import spack.config
import spack.main
import spack.schema.compilers
import spack.schema.config
import spack.schema.env
import spack.schema.packages
import spack.schema.mirrors
import spack.schema.repos
import spack.util.spack_yaml as syaml
import spack.util.path as spack_path
# sample config data
config_low = {
'config': {
'install_tree': {'root': 'install_tree_path'},
'build_stage': ['path1', 'path2', 'path3']}}
config_override_all = {
'config:': {
'install_tree:': {'root': 'override_all'}}}
config_override_key = {
'config': {
'install_tree:': {'root': 'override_key'}}}
config_merge_list = {
'config': {
'build_stage': ['patha', 'pathb']}}
config_override_list = {
'config': {
'build_stage:': ['pathd', 'pathe']}}
config_merge_dict = {
'config': {
'info': {
'a': 3,
'b': 4}}}
config_override_dict = {
'config': {
'info:': {
'a': 7,
'c': 9}}}
@pytest.fixture()
def write_config_file(tmpdir):
"""Returns a function that writes a config file."""
def _write(config, data, scope):
config_yaml = tmpdir.join(scope, config + '.yaml')
config_yaml.ensure()
with config_yaml.open('w') as f:
syaml.dump_config(data, f)
return _write
def check_compiler_config(comps, *compiler_names):
"""Check that named compilers in comps match Spack's config."""
config = spack.config.get('compilers')
compiler_list = ['cc', 'cxx', 'f77', 'fc']
flag_list = ['cflags', 'cxxflags', 'fflags', 'cppflags',
'ldflags', 'ldlibs']
param_list = ['modules', 'paths', 'spec', 'operating_system']
for compiler in config:
conf = compiler['compiler']
if conf['spec'] in compiler_names:
comp = next((c['compiler'] for c in comps if
c['compiler']['spec'] == conf['spec']), None)
if not comp:
raise ValueError('Bad config spec')
for p in param_list:
assert conf[p] == comp[p]
for f in flag_list:
expected = comp.get('flags', {}).get(f, None)
actual = conf.get('flags', {}).get(f, None)
assert expected == actual
for c in compiler_list:
expected = comp['paths'][c]
actual = conf['paths'][c]
assert expected == actual
#
# Some sample compiler config data and tests.
#
a_comps = {
'compilers': [
{'compiler': {
'paths': {
"cc": "/gcc473",
"cxx": "/g++473",
"f77": None,
"fc": None
},
'modules': None,
'spec': 'gcc@4.7.3',
'operating_system': 'CNL10'
}},
{'compiler': {
'paths': {
"cc": "/gcc450",
"cxx": "/g++450",
"f77": 'gfortran',
"fc": 'gfortran'
},
'modules': None,
'spec': 'gcc@4.5.0',
'operating_system': 'CNL10'
}},
{'compiler': {
'paths': {
"cc": "/gcc422",
"cxx": "/g++422",
"f77": 'gfortran',
"fc": 'gfortran'
},
'flags': {
"cppflags": "-O0 -fpic",
"fflags": "-f77",
},
'modules': None,
'spec': 'gcc@4.2.2',
'operating_system': 'CNL10'
}},
{'compiler': {
'paths': {
"cc": "<overwritten>",
"cxx": "<overwritten>",
"f77": '<overwritten>',
"fc": '<overwritten>'},
'modules': None,
'spec': 'clang@3.3',
'operating_system': 'CNL10'
}}
]
}
b_comps = {
'compilers': [
{'compiler': {
'paths': {
"cc": "/icc100",
"cxx": "/icp100",
"f77": None,
"fc": None
},
'modules': None,
'spec': 'icc@10.0',
'operating_system': 'CNL10'
}},
{'compiler': {
'paths': {
"cc": "/icc111",
"cxx": "/icp111",
"f77": 'ifort',
"fc": 'ifort'
},
'modules': None,
'spec': 'icc@11.1',
'operating_system': 'CNL10'
}},
{'compiler': {
'paths': {
"cc": "/icc123",
"cxx": "/icp123",
"f77": 'ifort',
"fc": 'ifort'
},
'flags': {
"cppflags": "-O3",
"fflags": "-f77rtl",
},
'modules': None,
'spec': 'icc@12.3',
'operating_system': 'CNL10'
}},
{'compiler': {
'paths': {
"cc": "<overwritten>",
"cxx": "<overwritten>",
"f77": '<overwritten>',
"fc": '<overwritten>'},
'modules': None,
'spec': 'clang@3.3',
'operating_system': 'CNL10'
}}
]
}
@pytest.fixture()
def compiler_specs():
"""Returns a couple of compiler specs needed for the tests"""
a = [ac['compiler']['spec'] for ac in a_comps['compilers']]
b = [bc['compiler']['spec'] for bc in b_comps['compilers']]
CompilerSpecs = collections.namedtuple('CompilerSpecs', ['a', 'b'])
return CompilerSpecs(a=a, b=b)
def test_write_key_in_memory(mock_low_high_config, compiler_specs):
# Write b_comps "on top of" a_comps.
spack.config.set('compilers', a_comps['compilers'], scope='low')
spack.config.set('compilers', b_comps['compilers'], scope='high')
# Make sure the config looks how we expect.
check_compiler_config(a_comps['compilers'], *compiler_specs.a)
check_compiler_config(b_comps['compilers'], *compiler_specs.b)
def test_write_key_to_disk(mock_low_high_config, compiler_specs):
# Write b_comps "on top of" a_comps.
spack.config.set('compilers', a_comps['compilers'], scope='low')
spack.config.set('compilers', b_comps['compilers'], scope='high')
# Clear caches so we're forced to read from disk.
spack.config.config.clear_caches()
# Same check again, to ensure consistency.
check_compiler_config(a_comps['compilers'], *compiler_specs.a)
check_compiler_config(b_comps['compilers'], *compiler_specs.b)
def test_write_to_same_priority_file(mock_low_high_config, compiler_specs):
# Write b_comps in the same file as a_comps.
spack.config.set('compilers', a_comps['compilers'], scope='low')
spack.config.set('compilers', b_comps['compilers'], scope='low')
# Clear caches so we're forced to read from disk.
spack.config.config.clear_caches()
# Same check again, to ensure consistency.
check_compiler_config(a_comps['compilers'], *compiler_specs.a)
check_compiler_config(b_comps['compilers'], *compiler_specs.b)
#
# Sample repo data and tests
#
repos_low = {'repos': ["/some/path"]}
repos_high = {'repos': ["/some/other/path"]}
# repos
def test_write_list_in_memory(mock_low_high_config):
spack.config.set('repos', repos_low['repos'], scope='low')
spack.config.set('repos', repos_high['repos'], scope='high')
config = spack.config.get('repos')
assert config == repos_high['repos'] + repos_low['repos']
def test_substitute_config_variables(mock_low_high_config):
prefix = spack.paths.prefix.lstrip('/')
assert os.path.join(
'/foo/bar/baz', prefix
) == spack_path.canonicalize_path('/foo/bar/baz/$spack')
assert os.path.join(
spack.paths.prefix, 'foo/bar/baz'
) == spack_path.canonicalize_path('$spack/foo/bar/baz/')
assert os.path.join(
'/foo/bar/baz', prefix, 'foo/bar/baz'
) == spack_path.canonicalize_path('/foo/bar/baz/$spack/foo/bar/baz/')
assert os.path.join(
'/foo/bar/baz', prefix
) == spack_path.canonicalize_path('/foo/bar/baz/${spack}')
assert os.path.join(
spack.paths.prefix, 'foo/bar/baz'
) == spack_path.canonicalize_path('${spack}/foo/bar/baz/')
assert os.path.join(
'/foo/bar/baz', prefix, 'foo/bar/baz'
) == spack_path.canonicalize_path('/foo/bar/baz/${spack}/foo/bar/baz/')
assert os.path.join(
'/foo/bar/baz', prefix, 'foo/bar/baz'
) != spack_path.canonicalize_path('/foo/bar/baz/${spack/foo/bar/baz/')
packages_merge_low = {
'packages': {
'foo': {
'variants': ['+v1']
},
'bar': {
'variants': ['+v2']
}
}
}
packages_merge_high = {
'packages': {
'foo': {
'version': ['a']
},
'bar': {
'version': ['b'],
'variants': ['+v3']
},
'baz': {
'version': ['c']
}
}
}
@pytest.mark.regression('7924')
def test_merge_with_defaults(mock_low_high_config, write_config_file):
"""This ensures that specified preferences merge with defaults as
expected. Originally all defaults were initialized with the
exact same object, which led to aliasing problems. Therefore
the test configs used here leave 'version' blank for multiple
packages in 'packages_merge_low'.
"""
write_config_file('packages', packages_merge_low, 'low')
write_config_file('packages', packages_merge_high, 'high')
cfg = spack.config.get('packages')
assert cfg['foo']['version'] == ['a']
assert cfg['bar']['version'] == ['b']
assert cfg['baz']['version'] == ['c']
def test_substitute_user(mock_low_high_config):
user = getpass.getuser()
assert '/foo/bar/' + user + '/baz' == spack_path.canonicalize_path(
'/foo/bar/$user/baz'
)
def test_substitute_tempdir(mock_low_high_config):
tempdir = tempfile.gettempdir()
assert tempdir == spack_path.canonicalize_path('$tempdir')
assert tempdir + '/foo/bar/baz' == spack_path.canonicalize_path(
'$tempdir/foo/bar/baz'
)
PAD_STRING = spack.util.path.SPACK_PATH_PADDING_CHARS
MAX_PATH_LEN = spack.util.path.get_system_path_max()
MAX_PADDED_LEN = MAX_PATH_LEN - spack.util.path.SPACK_MAX_INSTALL_PATH_LENGTH
reps = [PAD_STRING for _ in range((MAX_PADDED_LEN // len(PAD_STRING) + 1) + 2)]
full_padded_string = os.path.join(
'/path', os.path.sep.join(reps))[:MAX_PADDED_LEN]
@pytest.mark.parametrize('config_settings,expected', [
([], [None, None, None]),
([['config:install_tree:root', '/path']], ['/path', None, None]),
([['config:install_tree', '/path']], ['/path', None, None]),
([['config:install_tree:projections', {'all': '{name}'}]],
[None, None, {'all': '{name}'}]),
([['config:install_path_scheme', '{name}']],
[None, None, {'all': '{name}'}]),
([['config:install_tree:root', '/path'],
['config:install_tree:padded_length', 11]],
[os.path.join('/path', PAD_STRING[:5]), '/path', None]),
([['config:install_tree:root', '/path/$padding:11']],
[os.path.join('/path', PAD_STRING[:5]), '/path', None]),
([['config:install_tree', '/path/${padding:11}']],
[os.path.join('/path', PAD_STRING[:5]), '/path', None]),
([['config:install_tree:padded_length', False]], [None, None, None]),
([['config:install_tree:padded_length', True],
['config:install_tree:root', '/path']],
[full_padded_string, '/path', None]),
([['config:install_tree:', '/path$padding']],
[full_padded_string, '/path', None]),
([['config:install_tree:', '/path/${padding}']],
[full_padded_string, '/path', None]),
])
def test_parse_install_tree(config_settings, expected, mutable_config):
expected_root = expected[0] or spack.store.default_install_tree_root
expected_unpadded_root = expected[1] or expected_root
expected_proj = expected[2] or spack.directory_layout.default_projections
# config settings is a list of 2-element lists, [path, value]
# where path is a config path and value is the value to set at that path
# these can be "splatted" in as the arguments to config.set
for config_setting in config_settings:
mutable_config.set(*config_setting)
config_dict = mutable_config.get('config')
root, unpadded_root, projections = spack.store.parse_install_tree(
config_dict)
assert root == expected_root
assert unpadded_root == expected_unpadded_root
assert projections == expected_proj
def test_read_config(mock_low_high_config, write_config_file):
write_config_file('config', config_low, 'low')
assert spack.config.get('config') == config_low['config']
def test_read_config_override_all(mock_low_high_config, write_config_file):
write_config_file('config', config_low, 'low')
write_config_file('config', config_override_all, 'high')
assert spack.config.get('config') == {
'install_tree': {
'root': 'override_all'
}
}
def test_read_config_override_key(mock_low_high_config, write_config_file):
write_config_file('config', config_low, 'low')
write_config_file('config', config_override_key, 'high')
assert spack.config.get('config') == {
'install_tree': {
'root': 'override_key'
},
'build_stage': ['path1', 'path2', 'path3']
}
def test_read_config_merge_list(mock_low_high_config, write_config_file):
write_config_file('config', config_low, 'low')
write_config_file('config', config_merge_list, 'high')
assert spack.config.get('config') == {
'install_tree': {
'root': 'install_tree_path'
},
'build_stage': ['patha', 'pathb', 'path1', 'path2', 'path3']
}
def test_read_config_override_list(mock_low_high_config, write_config_file):
write_config_file('config', config_low, 'low')
write_config_file('config', config_override_list, 'high')
assert spack.config.get('config') == {
'install_tree': {
'root': 'install_tree_path'
},
'build_stage': config_override_list['config']['build_stage:']
}
def test_ordereddict_merge_order():
""""Test that source keys come before dest keys in merge_yaml results."""
source = syaml.syaml_dict([
("k1", "v1"),
("k2", "v2"),
("k3", "v3"),
])
dest = syaml.syaml_dict([
("k4", "v4"),
("k3", "WRONG"),
("k5", "v5"),
])
result = spack.config.merge_yaml(dest, source)
assert "WRONG" not in result.values()
expected_keys = ["k1", "k2", "k3", "k4", "k5"]
expected_items = [
("k1", "v1"), ("k2", "v2"), ("k3", "v3"), ("k4", "v4"), ("k5", "v5")
]
assert expected_keys == list(result.keys())
assert expected_items == list(result.items())
def test_list_merge_order():
""""Test that source lists are prepended to dest."""
source = ["a", "b", "c"]
dest = ["d", "e", "f"]
result = spack.config.merge_yaml(dest, source)
assert ["a", "b", "c", "d", "e", "f"] == result
def test_internal_config_update(mock_low_high_config, write_config_file):
write_config_file('config', config_low, 'low')
before = mock_low_high_config.get('config')
assert before['install_tree']['root'] == 'install_tree_path'
# add an internal configuration scope
scope = spack.config.InternalConfigScope('command_line')
assert 'InternalConfigScope' in repr(scope)
mock_low_high_config.push_scope(scope)
command_config = mock_low_high_config.get('config', scope='command_line')
command_config['install_tree'] = {'root': 'foo/bar'}
mock_low_high_config.set('config', command_config, scope='command_line')
after = mock_low_high_config.get('config')
assert after['install_tree']['root'] == 'foo/bar'
def test_internal_config_filename(mock_low_high_config, write_config_file):
write_config_file('config', config_low, 'low')
mock_low_high_config.push_scope(
spack.config.InternalConfigScope('command_line'))
with pytest.raises(NotImplementedError):
mock_low_high_config.get_config_filename('command_line', 'config')
def test_mark_internal():
data = {
'config': {
'bool': False,
'int': 6,
'numbers': [1, 2, 3],
'string': 'foo',
'dict': {
'more_numbers': [1, 2, 3],
'another_string': 'foo',
'another_int': 7,
}
}
}
marked = spack.config._mark_internal(data, 'x')
# marked version should be equal to the original
assert data == marked
def assert_marked(obj):
if type(obj) is bool:
return # can't subclass bool, so can't mark it
assert hasattr(obj, '_start_mark') and obj._start_mark.name == 'x'
assert hasattr(obj, '_end_mark') and obj._end_mark.name == 'x'
# everything in the marked version should have marks
checks = (marked.keys(), marked.values(),
marked['config'].keys(), marked['config'].values(),
marked['config']['numbers'],
marked['config']['dict'].keys(),
marked['config']['dict'].values(),
marked['config']['dict']['more_numbers'])
for seq in checks:
for obj in seq:
assert_marked(obj)
def test_internal_config_from_data():
config = spack.config.Configuration()
# add an internal config initialized from an inline dict
config.push_scope(spack.config.InternalConfigScope('_builtin', {
'config': {
'verify_ssl': False,
'build_jobs': 6,
}
}))
assert config.get('config:verify_ssl', scope='_builtin') is False
assert config.get('config:build_jobs', scope='_builtin') == 6
assert config.get('config:verify_ssl') is False
assert config.get('config:build_jobs') == 6
# push one on top and see what happens.
config.push_scope(spack.config.InternalConfigScope('higher', {
'config': {
'checksum': True,
'verify_ssl': True,
}
}))
assert config.get('config:verify_ssl', scope='_builtin') is False
assert config.get('config:build_jobs', scope='_builtin') == 6
assert config.get('config:verify_ssl', scope='higher') is True
assert config.get('config:build_jobs', scope='higher') is None
assert config.get('config:verify_ssl') is True
assert config.get('config:build_jobs') == 6
assert config.get('config:checksum') is True
assert config.get('config:checksum', scope='_builtin') is None
assert config.get('config:checksum', scope='higher') is True
def test_keys_are_ordered():
"""Test that keys in Spack YAML files retain their order from the file."""
expected_order = (
'bin',
'man',
'share/man',
'share/aclocal',
'lib',
'lib64',
'include',
'lib/pkgconfig',
'lib64/pkgconfig',
'share/pkgconfig',
''
)
config_scope = spack.config.ConfigScope(
'modules',
os.path.join(spack.paths.test_path, 'data', 'config')
)
data = config_scope.get_section('modules')
prefix_inspections = data['modules']['prefix_inspections']
for actual, expected in zip(prefix_inspections, expected_order):
assert actual == expected
def test_config_format_error(mutable_config):
"""This is raised when we try to write a bad configuration."""
with pytest.raises(spack.config.ConfigFormatError):
spack.config.set('compilers', {'bad': 'data'}, scope='site')
def get_config_error(filename, schema, yaml_string):
"""Parse a YAML string and return the resulting ConfigFormatError.
Fail if there is no ConfigFormatError
"""
with open(filename, 'w') as f:
f.write(yaml_string)
# parse and return error, or fail.
try:
spack.config.read_config_file(filename, schema)
except spack.config.ConfigFormatError as e:
return e
else:
pytest.fail('ConfigFormatError was not raised!')
def test_config_parse_dict_in_list(tmpdir):
with tmpdir.as_cwd():
e = get_config_error(
'repos.yaml', spack.schema.repos.schema, """\
repos:
- https://foobar.com/foo
- https://foobar.com/bar
- error:
- abcdef
- https://foobar.com/baz
""")
assert "repos.yaml:4" in str(e)
def test_config_parse_str_not_bool(tmpdir):
with tmpdir.as_cwd():
e = get_config_error(
'config.yaml', spack.schema.config.schema, """\
config:
verify_ssl: False
checksum: foobar
dirty: True
""")
assert "config.yaml:3" in str(e)
def test_config_parse_list_in_dict(tmpdir):
with tmpdir.as_cwd():
e = get_config_error(
'mirrors.yaml', spack.schema.mirrors.schema, """\
mirrors:
foo: http://foobar.com/baz
bar: http://barbaz.com/foo
baz: http://bazfoo.com/bar
travis: [1, 2, 3]
""")
assert "mirrors.yaml:5" in str(e)
def test_bad_config_section(mock_low_high_config):
"""Test that getting or setting a bad section gives an error."""
with pytest.raises(spack.config.ConfigSectionError):
spack.config.set('foobar', 'foobar')
with pytest.raises(spack.config.ConfigSectionError):
spack.config.get('foobar')
@pytest.mark.skipif(os.getuid() == 0, reason='user is root')
def test_bad_command_line_scopes(tmpdir, mock_low_high_config):
cfg = spack.config.Configuration()
with tmpdir.as_cwd():
with pytest.raises(spack.config.ConfigError):
spack.config._add_command_line_scopes(cfg, ['bad_path'])
touch('unreadable_file')
with pytest.raises(spack.config.ConfigError):
spack.config._add_command_line_scopes(cfg, ['unreadable_file'])
mkdirp('unreadable_dir')
with pytest.raises(spack.config.ConfigError):
try:
os.chmod('unreadable_dir', 0)
spack.config._add_command_line_scopes(cfg, ['unreadable_dir'])
finally:
os.chmod('unreadable_dir', 0o700) # so tmpdir can be removed
def test_add_command_line_scopes(tmpdir, mutable_config):
config_yaml = str(tmpdir.join('config.yaml'))
with open(config_yaml, 'w') as f:
f.write("""\
config:
verify_ssl: False
dirty: False
""")
spack.config._add_command_line_scopes(mutable_config, [str(tmpdir)])
def test_nested_override():
"""Ensure proper scope naming of nested overrides."""
base_name = spack.config.overrides_base_name
def _check_scopes(num_expected, debug_values):
scope_names = [s.name for s in spack.config.config.scopes.values() if
s.name.startswith(base_name)]
for i in range(num_expected):
name = '{0}{1}'.format(base_name, i)
assert name in scope_names
data = spack.config.config.get_config('config', name)
assert data['debug'] == debug_values[i]
# Check results from single and nested override
with spack.config.override('config:debug', True):
with spack.config.override('config:debug', False):
_check_scopes(2, [True, False])
_check_scopes(1, [True])
def test_alternate_override(monkeypatch):
"""Ensure proper scope naming of override when conflict present."""
base_name = spack.config.overrides_base_name
def _matching_scopes(regexpr):
return [spack.config.InternalConfigScope('{0}1'.format(base_name))]
# Check that the alternate naming works
monkeypatch.setattr(spack.config.config, 'matching_scopes',
_matching_scopes)
with spack.config.override('config:debug', False):
name = '{0}2'.format(base_name)
scope_names = [s.name for s in spack.config.config.scopes.values() if
s.name.startswith(base_name)]
assert name in scope_names
data = spack.config.config.get_config('config', name)
assert data['debug'] is False
def test_immutable_scope(tmpdir):
config_yaml = str(tmpdir.join('config.yaml'))
with open(config_yaml, 'w') as f:
f.write("""\
config:
install_tree:
root: dummy_tree_value
""")
scope = spack.config.ImmutableConfigScope('test', str(tmpdir))
data = scope.get_section('config')
assert data['config']['install_tree'] == {'root': 'dummy_tree_value'}
with pytest.raises(spack.config.ConfigError):
scope._write_section('config')
def test_single_file_scope(tmpdir, config):
env_yaml = str(tmpdir.join("env.yaml"))
with open(env_yaml, 'w') as f:
f.write("""\
env:
config:
verify_ssl: False
dirty: False
packages:
libelf:
compiler: [ 'gcc@4.5.3' ]
repos:
- /x/y/z
""")
scope = spack.config.SingleFileScope(
'env', env_yaml, spack.schema.env.schema, ['env'])
with spack.config.override(scope):
# from the single-file config
assert spack.config.get('config:verify_ssl') is False
assert spack.config.get('config:dirty') is False
assert spack.config.get('packages:libelf:compiler') == ['gcc@4.5.3']
# from the lower config scopes
assert spack.config.get('config:checksum') is True
assert spack.config.get('config:checksum') is True
assert spack.config.get('packages:externalmodule:buildable') is False
assert spack.config.get('repos') == [
'/x/y/z', '$spack/var/spack/repos/builtin']
def test_single_file_scope_section_override(tmpdir, config):
"""Check that individual config sections can be overridden in an
environment config. The config here primarily differs in that the
``packages`` section is intended to override all other scopes (using the
"::" syntax).
"""
env_yaml = str(tmpdir.join("env.yaml"))
with open(env_yaml, 'w') as f:
f.write("""\
env:
config:
verify_ssl: False
packages::
libelf:
compiler: [ 'gcc@4.5.3' ]
repos:
- /x/y/z
""")
scope = spack.config.SingleFileScope(
'env', env_yaml, spack.schema.env.schema, ['env'])
with spack.config.override(scope):
# from the single-file config
assert spack.config.get('config:verify_ssl') is False
assert spack.config.get('packages:libelf:compiler') == ['gcc@4.5.3']
# from the lower config scopes
assert spack.config.get('config:checksum') is True
assert not spack.config.get('packages:externalmodule')
assert spack.config.get('repos') == [
'/x/y/z', '$spack/var/spack/repos/builtin']
def test_write_empty_single_file_scope(tmpdir):
env_schema = spack.schema.env.schema
scope = spack.config.SingleFileScope(
'test', str(tmpdir.ensure('config.yaml')), env_schema, ['spack'])
scope._write_section('config')
# confirm we can write empty config
assert not scope.get_section('config')
def check_schema(name, file_contents):
"""Check a Spack YAML schema against some data"""
f = StringIO(file_contents)
data = syaml.load_config(f)
spack.config.validate(data, name)
def test_good_env_yaml(tmpdir):
check_schema(spack.schema.env.schema, """\
spack:
config:
verify_ssl: False
dirty: False
repos:
- ~/my/repo/location
mirrors:
remote: /foo/bar/baz
compilers:
- compiler:
spec: cce@2.1
operating_system: cnl
modules: []
paths:
cc: /path/to/cc
cxx: /path/to/cxx
fc: /path/to/fc
f77: /path/to/f77
""")
def test_bad_env_yaml(tmpdir):
with pytest.raises(spack.config.ConfigFormatError):
check_schema(spack.schema.env.schema, """\
env:
foobar:
verify_ssl: False
dirty: False
""")
def test_bad_config_yaml(tmpdir):
with pytest.raises(spack.config.ConfigFormatError):
check_schema(spack.schema.config.schema, """\
config:
verify_ssl: False
module_roots:
fmod: /some/fake/location
""")
def test_bad_mirrors_yaml(tmpdir):
with pytest.raises(spack.config.ConfigFormatError):
check_schema(spack.schema.mirrors.schema, """\
mirrors:
local: True
""")
def test_bad_repos_yaml(tmpdir):
with pytest.raises(spack.config.ConfigFormatError):
check_schema(spack.schema.repos.schema, """\
repos:
True
""")
def test_bad_compilers_yaml(tmpdir):
with pytest.raises(spack.config.ConfigFormatError):
check_schema(spack.schema.compilers.schema, """\
compilers:
key_instead_of_list: 'value'
""")
with pytest.raises(spack.config.ConfigFormatError):
check_schema(spack.schema.compilers.schema, """\
compilers:
- shmompiler:
environment: /bad/value
""")
with pytest.raises(spack.config.ConfigFormatError):
check_schema(spack.schema.compilers.schema, """\
compilers:
- compiler:
fenfironfent: /bad/value
""")
@pytest.mark.regression('13045')
def test_dotkit_in_config_does_not_raise(
mock_low_high_config, write_config_file, capsys
):
write_config_file('config',
{'config': {'module_roots': {'dotkit': '/some/path'}}},
'high')
spack.main.print_setup_info('sh')
captured = capsys.readouterr()
# Check that we set the variables we expect and that
# we throw a a deprecation warning without raising
assert '_sp_sys_type' in captured[0] # stdout
assert 'Warning' in captured[1] # stderr
def test_internal_config_section_override(mock_low_high_config,
write_config_file):
write_config_file('config', config_merge_list, 'low')
wanted_list = config_override_list['config']['build_stage:']
mock_low_high_config.push_scope(spack.config.InternalConfigScope
('high', {
'config:': {
'build_stage': wanted_list
}
}))
assert mock_low_high_config.get('config:build_stage') == wanted_list
def test_internal_config_dict_override(mock_low_high_config,
write_config_file):
write_config_file('config', config_merge_dict, 'low')
wanted_dict = config_override_dict['config']['info:']
mock_low_high_config.push_scope(spack.config.InternalConfigScope
('high', config_override_dict))
assert mock_low_high_config.get('config:info') == wanted_dict
def test_internal_config_list_override(mock_low_high_config,
write_config_file):
write_config_file('config', config_merge_list, 'low')
wanted_list = config_override_list['config']['build_stage:']
mock_low_high_config.push_scope(spack.config.InternalConfigScope
('high', config_override_list))
assert mock_low_high_config.get('config:build_stage') == wanted_list
def test_set_section_override(mock_low_high_config, write_config_file):
write_config_file('config', config_merge_list, 'low')
wanted_list = config_override_list['config']['build_stage:']
with spack.config.override('config::build_stage', wanted_list):
assert mock_low_high_config.get('config:build_stage') == wanted_list
assert config_merge_list['config']['build_stage'] == \
mock_low_high_config.get('config:build_stage')
def test_set_list_override(mock_low_high_config, write_config_file):
write_config_file('config', config_merge_list, 'low')
wanted_list = config_override_list['config']['build_stage:']
with spack.config.override('config:build_stage:', wanted_list):
assert wanted_list == mock_low_high_config.get('config:build_stage')
assert config_merge_list['config']['build_stage'] == \
mock_low_high_config.get('config:build_stage')
def test_set_dict_override(mock_low_high_config, write_config_file):
write_config_file('config', config_merge_dict, 'low')
wanted_dict = config_override_dict['config']['info:']
with spack.config.override('config:info:', wanted_dict):
assert wanted_dict == mock_low_high_config.get('config:info')
assert config_merge_dict['config']['info'] == \
mock_low_high_config.get('config:info')
def test_set_bad_path(config):
with pytest.raises(syaml.SpackYAMLError, match='Illegal leading'):
with spack.config.override(':bad:path', ''):
pass
def test_bad_path_double_override(config):
with pytest.raises(syaml.SpackYAMLError,
match='Meaningless second override'):
with spack.config.override('bad::double:override::directive', ''):
pass
|
lgpl-2.1
| -8,205,310,399,040,983,000
| 30.787966
| 79
| 0.584881
| false
| 3.534247
| true
| false
| false
|
tkzeng/molecular-design-toolkit
|
moldesign/geom/monitor.py
|
1
|
3798
|
# Copyright 2016 Autodesk Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import moldesign as mdt
from . import toplevel
from . import constraints, grads, coords, setcoord
class Monitor(object):
def __init__(self, *atoms):
if len(atoms) != self.NUM_ATOMS:
raise ValueError('%s requires %d atoms, but %d passed' %
(type(self), self.NUM_ATOMS, len(atoms)))
self.atoms = atoms
@property
def value(self):
return self.GETTER(*self.atoms)
@value.setter
def value(self, val):
args = self.atoms + (val,)
self.SETTER(*args)
def gradient(self):
return grads._atom_grad_to_mol_grad(self.atoms, self.GRAD(*self.atoms))
@mdt.utils.kwargs_from(constraints.GeometryConstraint)
def constrain(self, **kwargs):
""" Constrain this coordinate.
This will add a new item to the parent molecule's constraint list.
Args:
**kwargs (dict): kwargs for constraints.GeometryConstraint
Returns:
constraints.GeometryConstraint: the constraint object
"""
c = self.CONSTRAINT(*self.atoms, **kwargs)
mol = self.atoms[0].molecule
for atom in mol.atoms[1:]:
if atom.molecule is not mol:
raise ValueError("Can't create constraint; atoms are not part of the same Molecule")
mol.constraints.append(c)
mol._reset_methods()
return c
def __call__(self, obj):
""" Calculate this value for the given trajectory
Args:
obj (mdt.Molecule or mdt.Trajectory): molecule or trajectory to measure
Returns:
moldesign.units.Quantity: this coordinate's value (for a molecule), or a list of values
(for a trajectory)
Note:
Atoms are identified by their index only; the atoms defined in the Monitor must have
the same indices as those in the passed object
"""
return self.GETTER(*(obj.atoms[a.index] for a in self.atoms))
def __str__(self):
return '%s: %s' % (type(self).__name__, self.value)
def __repr__(self):
return '<%s for atoms %s: %s>' % (type(self).__name__,
','.join(str(atom.index) for atom in self.atoms),
self.value)
@toplevel
class DistanceMonitor(Monitor):
NUM_ATOMS = 2
GETTER = staticmethod(coords.distance)
SETTER = staticmethod(setcoord.set_distance)
GRAD = staticmethod(grads.distance_gradient)
CONSTRAINT = constraints.DistanceConstraint
@toplevel
class AngleMonitor(Monitor):
NUM_ATOMS = 3
GETTER = staticmethod(coords.angle)
SETTER = staticmethod(setcoord.set_angle)
GRAD = staticmethod(grads.angle_gradient)
CONSTRAINT = constraints.AngleConstraint
@toplevel
class DihedralMonitor(Monitor):
def __init__(self, *atoms):
if len(atoms) in (1, 2):
atoms = coords._infer_dihedral(*atoms)
super(DihedralMonitor, self).__init__(*atoms)
NUM_ATOMS = 4
GETTER = staticmethod(coords.dihedral)
SETTER = staticmethod(setcoord.set_dihedral)
GRAD = staticmethod(grads.dihedral_gradient)
CONSTRAINT = constraints.DihedralConstraint
|
apache-2.0
| -4,536,990,009,628,288,500
| 32.026087
| 100
| 0.633491
| false
| 4.132753
| false
| false
| false
|
INM-6/Python-Module-of-the-Week
|
session01_Decorators/test_printtime_cm.py
|
1
|
1210
|
#!/usr/bin/env python3
import time, re, io, sys
import contextlib
def test_we_can_import_module():
import printtime_cm
def test_context_manager_exists():
import printtime_cm
printtime_cm.printtime_cm
def test_context_manager_can_be_used():
import printtime_cm
with printtime_cm.printtime_cm():
pass
def test_sleep_1():
import printtime_cm
tmp = io.StringIO()
with contextlib.redirect_stdout(tmp):
with printtime_cm.printtime_cm():
time.sleep(1)
out = tmp.getvalue()
re.match(r'calculations took 1\..*s', out, re.IGNORECASE)
def test_sleep_nested():
import printtime_cm
tmp = io.StringIO()
tmp2 = io.StringIO()
with contextlib.redirect_stdout(tmp):
with printtime_cm.printtime_cm():
with contextlib.redirect_stdout(tmp2):
with printtime_cm.printtime_cm():
time.sleep(1)
time.sleep(1)
out = tmp.getvalue()
out2 = tmp.getvalue()
re.match(r'calculations took 2\..*s', out, re.IGNORECASE)
re.match(r'calculations took 1\..*s', out2, re.IGNORECASE)
if __name__ == '__main__':
import pytest
pytest.main([__file__] + sys.argv[1:])
|
mit
| 7,859,297,349,880,794,000
| 24.208333
| 62
| 0.620661
| false
| 3.361111
| true
| false
| false
|
mozilla/peekaboo
|
peekaboo/main/tests/test_views.py
|
1
|
6973
|
# -*- coding: utf-8 -*-
import os
import datetime
import json
from nose.tools import eq_, ok_
from django.test import TestCase, Client
from django.conf import settings
from django.contrib.auth.models import User
from funfactory.urlresolvers import reverse, split_path
from peekaboo.main.models import Location, Visitor
class LocalizingClient(Client):
"""Client which prepends a locale so test requests can get through
LocaleURLMiddleware without resulting in a locale-prefix-adding 301.
Otherwise, we'd have to hard-code locales into our tests everywhere or
{mock out reverse() and make LocaleURLMiddleware not fire}.
"""
def request(self, **request):
"""Make a request, but prepend a locale if there isn't one already."""
# Fall back to defaults as in the superclass's implementation:
path = request.get('PATH_INFO', self.defaults.get('PATH_INFO', '/'))
locale, shortened = split_path(path)
if not locale:
request['PATH_INFO'] = '/%s/%s' % (settings.LANGUAGE_CODE,
shortened)
return super(LocalizingClient, self).request(**request)
class BaseTestCase(TestCase):
client_class = LocalizingClient
def _login(self, is_staff=True, is_superuser=False):
user, __ = User.objects.get_or_create(
username='shannon',
email='shannon@mozilla.com',
)
if is_superuser:
is_staff = True
user.is_staff = is_staff
user.is_superuser = is_superuser
user.set_password('secret')
user.save()
assert self.client.login(username='shannon', password='secret')
return user
class TestViews(BaseTestCase):
def test_contribute_json(self):
response = self.client.get('/contribute.json')
eq_(response.status_code, 200)
# Should be valid JSON, but it's a streaming content because
# it comes from django.views.static.serve
ok_(json.loads(''.join(response.streaming_content)))
eq_(response['Content-Type'], 'application/json')
def test_log_entries(self):
location = Location.objects.create(
name='Mountain View',
slug='mv',
timezone='US/Pacific',
)
url = reverse('main:log_entries', args=('mv',))
response = self.client.get(url)
eq_(response.status_code, 302)
self._login()
response = self.client.get(url)
eq_(response.status_code, 200)
data = json.loads(response.content)
eq_(data['created'], [])
eq_(data['latest'], None)
# add an entry
visitor1 = Visitor.objects.create(
location=location,
first_name='Bill',
last_name='Gates',
job_title='Boss',
)
response = self.client.get(url)
eq_(response.status_code, 200)
data = json.loads(response.content)
eq_(len(data['created']), 1)
eq_(data['created'][0]['name'], 'Bill Gates')
eq_(data['created'][0]['job_title'], 'Boss')
eq_(data['created'][0]['id'], visitor1.pk)
ok_(isinstance(data['latest'], int))
# this number should be a
latest_timestamp = data['latest']
latest = datetime.datetime.utcfromtimestamp(latest_timestamp)
# this won't contain a timezone but the hour and minute should
# be the same as the `visitor1`
eq_(
visitor1.created.strftime('%H:%M'),
latest.strftime('%H:%M')
)
# include this and nothing new should come
response = self.client.get(url, {
'latest': str(latest_timestamp),
})
eq_(response.status_code, 200)
data = json.loads(response.content)
eq_(data['created'], [])
eq_(data['modified'], [])
eq_(data['latest'], None)
# let's add another, newer
visitor2 = Visitor.objects.create(
location=location,
first_name='Paul',
last_name='Allen',
)
visitor2.created += datetime.timedelta(seconds=1)
visitor2.save()
response = self.client.get(url, {
'latest': str(latest_timestamp),
})
eq_(response.status_code, 200)
data = json.loads(response.content)
eq_(len(data['created']), 1)
eq_(data['created'][0]['name'], 'Paul Allen')
eq_(data['created'][0]['id'], visitor2.pk)
new_latest_timestamp = data['latest']
# this won't contain a timezone but the hour and minute should
# be the same as the `visitor1`
eq_(latest_timestamp + 1, new_latest_timestamp)
# ask one more time and nothing new should come back
previous_latest = data['latest']
response = self.client.get(url, {
'latest': previous_latest,
})
eq_(response.status_code, 200)
data = json.loads(response.content)
eq_(len(data['created']), 0)
eq_(len(data['modified']), 0)
# let's modify the first visitor
visitor1.job_title = 'Philantropist'
visitor1.modified += datetime.timedelta(seconds=10)
visitor1.save()
response = self.client.get(url, {
'latest': previous_latest,
})
eq_(response.status_code, 200)
data = json.loads(response.content)
eq_(len(data['modified']), 1)
previous_latest_timestamp = new_latest_timestamp
new_latest_timestamp = data['latest']
eq_(
previous_latest_timestamp + 10 - 1,
new_latest_timestamp
)
response = self.client.get(url, {
'latest': str(new_latest_timestamp),
})
eq_(response.status_code, 200)
data = json.loads(response.content)
eq_(data['created'], [])
eq_(data['modified'], [])
eq_(data['latest'], None)
def test_eventbrite_upload(self):
url = reverse('main:csv_upload')
response = self.client.get(url)
eq_(response.status_code, 302)
self._login()
response = self.client.get(url)
eq_(response.status_code, 200)
location = Location.objects.create(
name='Berlin',
slug='berlin',
timezone='Europe/Berlin',
)
_here = os.path.dirname(__file__)
response = self.client.post(url, {
'file': open(os.path.join(_here, 'sample-eventbrite.csv')),
'format': 'eventbrite',
'location': location.id,
'date': '2015-06-16 13:00:00', # Europe summer time, is +2h
})
visitors = Visitor.objects.filter(location=location)
first_names = [x.first_name for x in visitors.order_by('first_name')]
eq_(first_names, [u'Nicolai Froehlich', u'Södan'])
first_created = [x.created for x in visitors][0]
eq_(first_created.strftime('%H:%M %Z'), '11:00 UTC')
|
mpl-2.0
| 3,321,931,372,646,653,000
| 32.681159
| 78
| 0.576736
| false
| 3.954623
| true
| false
| false
|
pombredanne/datanommer
|
datanommer.commands/setup.py
|
1
|
1930
|
# This file is a part of datanommer, a message sink for fedmsg.
# Copyright (C) 2014, Red Hat, Inc.
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
from setuptools import setup, find_packages
import sys
f = open('README.rst')
long_description = f.read().strip()
long_description = long_description.split('split here', 1)[1]
f.close()
version = '0.4.6'
setup(
name='datanommer.commands',
version=version,
description="Console comands for datanommer",
long_description=long_description,
author='Ralph Bean',
author_email='rbean@redhat.com',
url='http://github.com/fedora-infra/datanommer',
license='GPLv3+',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
namespace_packages=['datanommer'],
include_package_data=True,
zip_safe=False,
install_requires=[
"datanommer.models",
"fedmsg",
],
entry_points={
'console_scripts': (
'datanommer-create-db=datanommer.commands:create',
'datanommer-dump=datanommer.commands:dump',
'datanommer-stats=datanommer.commands:stats',
'datanommer-latest=datanommer.commands:latest',
),
},
tests_require=[
"nose",
"mock",
"fedmsg_meta_fedora_infrastructure",
"freezegun",
],
test_suite='nose.collector',
)
|
gpl-3.0
| -8,626,820,665,828,198,000
| 32.275862
| 79
| 0.678238
| false
| 3.614232
| false
| false
| false
|
dkdfirefly/speaker_project
|
code/separateLeadStereo/separateLeadStereoParam.py
|
1
|
41756
|
#!/usr/bin/python
# copyright (C) 2011 Jean-Louis Durrieu
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
import SIMM
#import scikits.audiolab
import scipy
#if np.double(scipy.__version__[:3]) < 0.8:
# raise ImportError('Version of scipy is %s, to read wavfile, one needs >= 0.8' %(scipy.__version__))
import scipy.io.wavfile as wav
import os
import sys
from tracking import viterbiTrackingArray
# SOME USEFUL, INSTRUMENTAL, FUNCTIONS
def db(val):
"""
db(positiveValue)
Returns the decibel value of the input positiveValue
"""
return 10 * np.log10(val)
def ISDistortion(X,Y):
"""
value = ISDistortion(X, Y)
Returns the value of the Itakura-Saito (IS) divergence between
matrix X and matrix Y. X and Y should be two NumPy arrays with
same dimension.
"""
return sum((-np.log(X / Y) + (X / Y) - 1))
# DEFINING SOME WINDOW FUNCTIONS
def sinebell(lengthWindow):
"""
window = sinebell(lengthWindow)
Computes a "sinebell" window function of length L=lengthWindow
The formula is:
window(t) = sin(pi * t / L), t = 0..L-1
"""
window = np.sin((np.pi * (np.arange(lengthWindow))) \
/ (1.0 * lengthWindow))
return window
def hann(args):
"""
window = hann(args)
Computes a Hann window, with NumPy's function hanning(args).
"""
return np.hanning(args)
# FUNCTIONS FOR TIME-FREQUENCY REPRESENTATION
def stft(data, window=sinebell(2048), hopsize=256.0, nfft=2048.0, \
fs=44100.0):
"""
X, F, N = stft(data, window=sinebell(2048), hopsize=1024.0,
nfft=2048.0, fs=44100)
Computes the short time Fourier transform (STFT) of data.
Inputs:
data : one-dimensional time-series to be
analyzed
window=sinebell(2048) : analysis window
hopsize=1024.0 : hopsize for the analysis
nfft=2048.0 : number of points for the Fourier
computation (the user has to provide an
even number)
fs=44100.0 : sampling rate of the signal
Outputs:
X : STFT of data
F : values of frequencies at each Fourier
bins
N : central time at the middle of each
analysis window
"""
# window defines the size of the analysis windows
lengthWindow = window.size
# !!! adding zeros to the beginning of data, such that the first
# window is centered on the first sample of data
data = np.concatenate((np.zeros(lengthWindow / 2.0),data))
lengthData = data.size
# adding one window for the last frame (same reason as for the
# first frame)
numberFrames = np.ceil((lengthData - lengthWindow) / hopsize \
+ 1) + 1
newLengthData = (numberFrames - 1) * hopsize + lengthWindow
# zero-padding data such that it holds an exact number of frames
data = np.concatenate((data, np.zeros([newLengthData - lengthData])))
# the output STFT has nfft/2+1 rows. Note that nfft has to be an
# even number (and a power of 2 for the fft to be fast)
numberFrequencies = nfft / 2.0 + 1
STFT = np.zeros([numberFrequencies, numberFrames], dtype=complex)
for n in np.arange(numberFrames):
beginFrame = n * hopsize
endFrame = beginFrame + lengthWindow
frameToProcess = window * data[beginFrame:endFrame]
STFT[:,n] = np.fft.rfft(frameToProcess, nfft);
F = np.arange(numberFrequencies) / nfft * fs
N = np.arange(numberFrames) * hopsize / fs
return STFT, F, N
def istft(X, window=sinebell(2048), hopsize=256.0, nfft=2048.0):
"""
data = istft(X, window=sinebell(2048), hopsize=256.0, nfft=2048.0)
Computes an inverse of the short time Fourier transform (STFT),
here, the overlap-add procedure is implemented.
Inputs:
X : STFT of the signal, to be "inverted"
window=sinebell(2048) : synthesis window
(should be the "complementary" window
for the analysis window)
hopsize=1024.0 : hopsize for the analysis
nfft=2048.0 : number of points for the Fourier
computation
(the user has to provide an even number)
Outputs:
data : time series corresponding to the given
STFT the first half-window is removed,
complying with the STFT computation
given in the function 'stft'
"""
lengthWindow = np.array(window.size)
numberFrequencies, numberFrames = np.array(X.shape)
lengthData = hopsize * (numberFrames - 1) + lengthWindow
data = np.zeros(lengthData)
for n in np.arange(numberFrames):
beginFrame = n * hopsize
endFrame = beginFrame + lengthWindow
frameTMP = np.fft.irfft(X[:,n], nfft)
frameTMP = frameTMP[:lengthWindow]
data[beginFrame:endFrame] = data[beginFrame:endFrame] \
+ window * frameTMP
# remove the extra bit before data that was - supposedly - added
# in the stft computation:
data = data[(lengthWindow / 2.0):]
return data
# DEFINING THE FUNCTIONS TO CREATE THE 'BASIS' WF0
def generate_WF0_chirped(minF0, maxF0, Fs, Nfft=2048, stepNotes=4, \
lengthWindow=2048, Ot=0.5, perF0=2, \
depthChirpInSemiTone=0.5, loadWF0=True,
analysisWindow='hanning'):
"""
F0Table, WF0 = generate_WF0_chirped(minF0, maxF0, Fs, Nfft=2048,
stepNotes=4, lengthWindow=2048,
Ot=0.5, perF0=2,
depthChirpInSemiTone=0.5)
Generates a 'basis' matrix for the source part WF0, using the
source model KLGLOTT88, with the following I/O arguments:
Inputs:
minF0 the minimum value for the fundamental
frequency (F0)
maxF0 the maximum value for F0
Fs the desired sampling rate
Nfft the number of bins to compute the Fourier
transform
stepNotes the number of F0 per semitone
lengthWindow the size of the window for the Fourier
transform
Ot the glottal opening coefficient for
KLGLOTT88
perF0 the number of chirps considered per F0
value
depthChirpInSemiTone the maximum value, in semitone, of the
allowed chirp per F0
Outputs:
F0Table the vector containing the values of the fundamental
frequencies in Hertz (Hz) corresponding to the
harmonic combs in WF0, i.e. the columns of WF0
WF0 the basis matrix, where each column is a harmonic comb
generated by KLGLOTT88 (with a sinusoidal model, then
transformed into the spectral domain)
"""
# generating a filename to keep data:
filename = str('').join(['wf0_',
'_minF0-', str(minF0),
'_maxF0-', str(maxF0),
'_Fs-', str(Fs),
'_Nfft-', str(Nfft),
'_stepNotes-', str(stepNotes),
'_Ot-', str(Ot),
'_perF0-', str(perF0),
'_depthChirp-', str(depthChirpInSemiTone),
'_analysisWindow-', analysisWindow,
'.npz'])
if os.path.isfile(filename) and loadWF0:
struc = np.load(filename)
return struc['F0Table'], struc['WF0']
# converting to double arrays:
minF0=np.double(minF0)
maxF0=np.double(maxF0)
Fs=np.double(Fs)
stepNotes=np.double(stepNotes)
# computing the F0 table:
numberOfF0 = np.ceil(12.0 * stepNotes * np.log2(maxF0 / minF0)) + 1
F0Table=minF0 * (2 ** (np.arange(numberOfF0,dtype=np.double) \
/ (12 * stepNotes)))
numberElementsInWF0 = numberOfF0 * perF0
# computing the desired WF0 matrix
WF0 = np.zeros([Nfft, numberElementsInWF0],dtype=np.double)
for fundamentalFrequency in np.arange(numberOfF0):
odgd, odgdSpec = \
generate_ODGD_spec(F0Table[fundamentalFrequency], Fs, \
Ot=Ot, lengthOdgd=lengthWindow, \
Nfft=Nfft, t0=0.0,\
analysisWindowType=analysisWindow) # 20100924 trying with hann window
WF0[:,fundamentalFrequency * perF0] = np.abs(odgdSpec) ** 2
for chirpNumber in np.arange(perF0 - 1):
F2 = F0Table[fundamentalFrequency] \
* (2 ** ((chirpNumber + 1.0) * depthChirpInSemiTone \
/ (12.0 * (perF0 - 1.0))))
# F0 is the mean of F1 and F2.
F1 = 2.0 * F0Table[fundamentalFrequency] - F2
odgd, odgdSpec = \
generate_ODGD_spec_chirped(F1, F2, Fs, \
Ot=Ot, \
lengthOdgd=lengthWindow, \
Nfft=Nfft, t0=0.0)
WF0[:,fundamentalFrequency * perF0 + chirpNumber + 1] = \
np.abs(odgdSpec) ** 2
np.savez(filename, F0Table=F0Table, WF0=WF0)
return F0Table, WF0
def generate_ODGD_spec(F0, Fs, lengthOdgd=2048, Nfft=2048, Ot=0.5, \
t0=0.0, analysisWindowType='sinebell'):
"""
generateODGDspec:
generates a waveform ODGD and the corresponding spectrum,
using as analysis window the -optional- window given as
argument.
"""
# converting input to double:
F0 = np.double(F0)
Fs = np.double(Fs)
Ot = np.double(Ot)
t0 = np.double(t0)
# compute analysis window of given type:
if analysisWindowType=='sinebell':
analysisWindow = sinebell(lengthOdgd)
else:
if analysisWindowType=='hanning' or \
analysisWindowType=='hanning':
analysisWindow = hann(lengthOdgd)
# maximum number of partials in the spectral comb:
partialMax = np.floor((Fs / 2) / F0)
# Frequency numbers of the partials:
frequency_numbers = np.arange(1,partialMax + 1)
# intermediate value
temp_array = 1j * 2.0 * np.pi * frequency_numbers * Ot
# compute the amplitudes for each of the frequency peaks:
amplitudes = F0 * 27 / 4 \
* (np.exp(-temp_array) \
+ (2 * (1 + 2 * np.exp(-temp_array)) / temp_array) \
- (6 * (1 - np.exp(-temp_array)) \
/ (temp_array ** 2))) \
/ temp_array
# Time stamps for the time domain ODGD
timeStamps = np.arange(lengthOdgd) / Fs + t0 / F0
# Time domain odgd:
odgd = np.exp(np.outer(2.0 * 1j * np.pi * F0 * frequency_numbers, \
timeStamps)) \
* np.outer(amplitudes, np.ones(lengthOdgd))
odgd = np.sum(odgd, axis=0)
# spectrum:
odgdSpectrum = np.fft.fft(np.real(odgd * analysisWindow), n=Nfft)
return odgd, odgdSpectrum
def generate_ODGD_spec_chirped(F1, F2, Fs, lengthOdgd=2048, Nfft=2048, \
Ot=0.5, t0=0.0, \
analysisWindowType='sinebell'):
"""
generateODGDspecChirped:
generates a waveform ODGD and the corresponding spectrum,
using as analysis window the -optional- window given as
argument.
"""
# converting input to double:
F1 = np.double(F1)
F2 = np.double(F2)
F0 = np.double(F1 + F2) / 2.0
Fs = np.double(Fs)
Ot = np.double(Ot)
t0 = np.double(t0)
# compute analysis window of given type:
if analysisWindowType == 'sinebell':
analysisWindow = sinebell(lengthOdgd)
else:
if analysisWindowType == 'hanning' or \
analysisWindowType == 'hann':
analysisWindow = hann(lengthOdgd)
# maximum number of partials in the spectral comb:
partialMax = np.floor((Fs / 2) / np.max(F1, F2))
# Frequency numbers of the partials:
frequency_numbers = np.arange(1,partialMax + 1)
# intermediate value
temp_array = 1j * 2.0 * np.pi * frequency_numbers * Ot
# compute the amplitudes for each of the frequency peaks:
amplitudes = F0 * 27 / 4 * \
(np.exp(-temp_array) \
+ (2 * (1 + 2 * np.exp(-temp_array)) / temp_array) \
- (6 * (1 - np.exp(-temp_array)) \
/ (temp_array ** 2))) \
/ temp_array
# Time stamps for the time domain ODGD
timeStamps = np.arange(lengthOdgd) / Fs + t0 / F0
# Time domain odgd:
odgd = np.exp(2.0 * 1j * np.pi \
* (np.outer(F1 * frequency_numbers,timeStamps) \
+ np.outer((F2 - F1) \
* frequency_numbers,timeStamps ** 2) \
/ (2 * lengthOdgd / Fs))) \
* np.outer(amplitudes,np.ones(lengthOdgd))
odgd = np.sum(odgd,axis=0)
# spectrum:
odgdSpectrum = np.fft.fft(real(odgd * analysisWindow), n=Nfft)
return odgd, odgdSpectrum
def generateHannBasis(numberFrequencyBins, sizeOfFourier, Fs, \
frequencyScale='linear', numberOfBasis=20, \
overlap=.75):
isScaleRecognized = False
if frequencyScale == 'linear':
# number of windows generated:
numberOfWindowsForUnit = np.ceil(1.0 / (1.0 - overlap))
# recomputing the overlap to exactly fit the entire
# number of windows:
overlap = 1.0 - 1.0 / np.double(numberOfWindowsForUnit)
# length of the sine window - that is also to say: bandwidth
# of the sine window:
lengthSineWindow = np.ceil(numberFrequencyBins \
/ ((1.0 - overlap) \
* (numberOfBasis - 1) + 1 \
- 2.0 * overlap))
# even window length, for convenience:
lengthSineWindow = 2.0 * np.floor(lengthSineWindow / 2.0)
# for later compatibility with other frequency scales:
mappingFrequency = np.arange(numberFrequencyBins)
# size of the "big" window
sizeBigWindow = 2.0 * numberFrequencyBins
# centers for each window
## the first window is centered at, in number of window:
firstWindowCenter = -numberOfWindowsForUnit + 1
## and the last is at
lastWindowCenter = numberOfBasis - numberOfWindowsForUnit + 1
## center positions in number of frequency bins
sineCenters = np.round(\
np.arange(firstWindowCenter, lastWindowCenter) \
* (1 - overlap) * np.double(lengthSineWindow) \
+ lengthSineWindow / 2.0)
# For future purpose: to use different frequency scales
isScaleRecognized = True
# For frequency scale in logarithm (such as ERB scales)
if frequencyScale == 'log':
isScaleRecognized = False
# checking whether the required scale is recognized
if not(isScaleRecognized):
print "The desired feature for frequencyScale is not recognized yet..."
return 0
# the shape of one window:
prototypeSineWindow = hann(lengthSineWindow)
# adding zeroes on both sides, such that we do not need to check
# for boundaries
bigWindow = np.zeros([sizeBigWindow * 2, 1])
bigWindow[(sizeBigWindow - lengthSineWindow / 2.0):\
(sizeBigWindow + lengthSineWindow / 2.0)] \
= np.vstack(prototypeSineWindow)
WGAMMA = np.zeros([numberFrequencyBins, numberOfBasis])
for p in np.arange(numberOfBasis):
WGAMMA[:, p] = np.hstack(bigWindow[np.int32(mappingFrequency \
- sineCenters[p] \
+ sizeBigWindow)])
return WGAMMA
# MAIN FUNCTION, FOR DEFAULT BEHAVIOUR IF THE SCRIPT IS "LAUNCHED"
def main():
import optparse
usage = "usage: %prog [options] inputAudioFile"
parser = optparse.OptionParser(usage)
# Name of the output files:
parser.add_option("-v", "--vocal-output-file",
dest="voc_output_file", type="string",
help="name of the audio output file for the estimated\n"\
"solo (vocal) part",
default="estimated_solo.wav")
parser.add_option("-m", "--music-output-file",
dest="mus_output_file", type="string",
help="name of the audio output file for the estimated\n"\
"music part",
default="estimated_music.wav")
parser.add_option("-p", "--pitch-output-file",
dest="pitch_output_file", type="string",
help="name of the output file for the estimated pitches",
default="pitches.txt")
# Some more optional options:
parser.add_option("-d", "--with-display", dest="displayEvolution",
action="store_true",help="display the figures",
default=False)
parser.add_option("-q", "--quiet", dest="verbose",
action="store_false",
help="use to quiet all output verbose",
default=True)
parser.add_option("--nb-iterations", dest="nbiter",
help="number of iterations", type="int",
default=100)
parser.add_option("--window-size", dest="windowSize", type="float",
default=0.04644,help="size of analysis windows, in s.")
parser.add_option("--Fourier-size", dest="fourierSize", type="int",
default=2048,
help="size of Fourier transforms, "\
"in samples.")
parser.add_option("--hopsize", dest="hopsize", type="float",
default=0.0058,
help="size of the hop between analysis windows, in s.")
parser.add_option("--nb-accElements", dest="R", type="float",
default=40.0,
help="number of elements for the accompaniment.")
parser.add_option("--with-melody", dest="melody", type="string",
default=None,
help="provide the melody in a file named MELODY, "\
"with at each line: <time (s)><F0 (Hz)>.")
(options, args) = parser.parse_args()
if len(args) != 1:
parser.error("incorrect number of arguments, use option -h for help.")
displayEvolution = options.displayEvolution
if displayEvolution:
import matplotlib.pyplot as plt
import imageMatlab
## plt.rc('text', usetex=True)
plt.rc('image',cmap='jet') ## gray_r
plt.ion()
# Compulsory option: name of the input file:
inputAudioFile = args[0]
fs, data = wav.read(inputAudioFile)
# data = np.double(data) / 32768.0 # makes data vary from -1 to 1
scaleData = 1.2 * data.max() # to rescale the data.
dataType = data.dtype
data = np.double(data) / scaleData # makes data vary from -1 to 1
tmp = np.zeros((data.size, 2))
tmp[:,0] = data
tmp[:,1] = data
data = tmp
if data.shape[0] == data.size: # data is multi-channel
print "The audio file is not stereo. Try separateLead.py instead."
raise ValueError("number of dimensions of the input not 2")
if data.shape[1] != 2:
print "The data is multichannel, but not stereo... \n"
print "Unfortunately this program does not scale well. Data is \n"
print "reduced to its 2 first channels.\n"
data = data[:,0:2]
# Processing the options:
windowSizeInSamples = np.round(options.windowSize * fs)
hopsize = np.round(options.hopsize * fs)
NFT = options.fourierSize
niter = options.nbiter
R = options.R
if options.verbose:
print "Some parameter settings:"
print " Size of analysis windows: ", windowSizeInSamples
print " Hopsize: ", hopsize
print " Size of Fourier transforms: ", NFT
print " Number of iterations to be done: ", niter
print " Number of elements in WM: ", R
XR, F, N = stft(data[:,0], fs=fs, hopsize=hopsize,
window=sinebell(windowSizeInSamples), nfft=NFT)
XL, F, N = stft(data[:,1], fs=fs, hopsize=hopsize,
window=sinebell(windowSizeInSamples), nfft=NFT)
# SX is the power spectrogram:
## SXR = np.maximum(np.abs(XR) ** 2, 10 ** -8)
## SXL = np.maximum(np.abs(XL) ** 2, 10 ** -8)
SXR = np.abs(XR) ** 2
SXL = np.abs(XL) ** 2
del data, F, N
# TODO: also process these as options:
eps = 10 ** -9
minF0 = 100
maxF0 = 800
Fs = fs
F, N = SXR.shape
stepNotes = 20 # this is the number of F0s within one semitone
# until 17/09/2010 : stepNotes = 20
# 17/09/2010 : trying stepNotes = 8, checking for less artefacts
K = 10 # number of spectral shapes for the filter part
# R = 40 # number of spectral shapes for the accompaniment
P = 30 # number of elements in dictionary of smooth filters
chirpPerF0 = 1 # number of chirped spectral shapes between each F0
# this feature should be further studied before
# we find a good way of doing that.
# Create the harmonic combs, for each F0 between minF0 and maxF0:
F0Table, WF0 = \
generate_WF0_chirped(minF0, maxF0, Fs, Nfft=NFT, \
stepNotes=stepNotes, \
lengthWindow=windowSizeInSamples, Ot=0.25, \
perF0=chirpPerF0, \
depthChirpInSemiTone=.15, loadWF0=True,\
analysisWindow='sinebell')
WF0 = WF0[0:F, :] # ensure same size as SX
NF0 = F0Table.size # number of harmonic combs
# Normalization:
WF0 = WF0 / np.outer(np.ones(F), np.amax(WF0, axis=0))
# Create the dictionary of smooth filters, for the filter part of
# the lead isntrument:
WGAMMA = generateHannBasis(F, NFT, Fs=fs, frequencyScale='linear', \
numberOfBasis=P, overlap=.75)
if displayEvolution:
plt.figure(1);plt.clf()
plt.xticks(fontsize=16)
plt.yticks(fontsize=16)
plt.xlabel(r'Frame number $n$', fontsize=16)
plt.ylabel(r'Leading source number $u$', fontsize=16)
plt.ion()
# plt.show()
## the following seems superfluous if mpl's backend is macosx...
## raw_input("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"\
## "!! Press Return to resume the program. !!\n"\
## "!! Be sure that the figure has been !!\n"\
## "!! already displayed, so that the !!\n"\
## "!! evolution of HF0 will be visible. !!\n"\
## "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
if options.melody is None:
## section to estimate the melody, on monophonic algo:
SX = np.maximum(np.abs((XR + XL) / 2.0) ** 2, 10 ** -8)
# First round of parameter estimation:
HGAMMA, HPHI, HF0, HM, WM, recoError1 = SIMM.SIMM(
# the data to be fitted to:
SX,
# the basis matrices for the spectral combs
WF0,
# and for the elementary filters:
WGAMMA,
# number of desired filters, accompaniment spectra:
numberOfFilters=K, numberOfAccompanimentSpectralShapes=R,
# putting only 2 elements in accompaniment for a start...
# if any, initial amplitude matrices for
HGAMMA0=None, HPHI0=None,
HF00=None,
WM0=None, HM0=None,
# Some more optional arguments, to control the "convergence"
# of the algo
numberOfIterations=niter, updateRulePower=1.,
stepNotes=stepNotes,
lambdaHF0 = 0.0 / (1.0 * SX.max()), alphaHF0=0.9,
verbose=options.verbose, displayEvolution=displayEvolution)
if displayEvolution:
h2 = plt.figure(2);plt.clf();
imageMatlab.imageM(20 * np.log10(HF0))
matMax = (20 * np.log10(HF0)).max()
matMed = np.median(20 * np.log10(HF0))
plt.clim([matMed - 100, matMax])
# Viterbi decoding to estimate the predominant fundamental
# frequency line
scale = 1.0
transitions = np.exp(-np.floor(np.arange(0,NF0) / stepNotes) * scale)
cutoffnote = 2 * 5 * stepNotes
transitions[cutoffnote:] = transitions[cutoffnote - 1]
transitionMatrixF0 = np.zeros([NF0 + 1, NF0 + 1]) # toeplitz matrix
b = np.arange(NF0)
transitionMatrixF0[0:NF0, 0:NF0] = \
transitions[\
np.array(np.abs(np.outer(np.ones(NF0), b) \
- np.outer(b, np.ones(NF0))), dtype=int)]
pf_0 = transitions[cutoffnote - 1] * 10 ** (-90)
p0_0 = transitions[cutoffnote - 1] * 10 ** (-100)
p0_f = transitions[cutoffnote - 1] * 10 ** (-80)
transitionMatrixF0[0:NF0, NF0] = pf_0
transitionMatrixF0[NF0, 0:NF0] = p0_f
transitionMatrixF0[NF0, NF0] = p0_0
sumTransitionMatrixF0 = np.sum(transitionMatrixF0, axis=1)
transitionMatrixF0 = transitionMatrixF0 \
/ np.outer(sumTransitionMatrixF0, \
np.ones(NF0 + 1))
priorProbabilities = 1 / (NF0 + 1.0) * np.ones([NF0 + 1])
logHF0 = np.zeros([NF0 + 1, N])
normHF0 = np.amax(HF0, axis=0)
barHF0 = np.array(HF0)
logHF0[0:NF0, :] = np.log(barHF0)
logHF0[0:NF0, normHF0==0] = np.amin(logHF0[logHF0>-np.Inf])
logHF0[NF0, :] = np.maximum(np.amin(logHF0[logHF0>-np.Inf]),-100)
indexBestPath = viterbiTrackingArray(\
logHF0, np.log(priorProbabilities),
np.log(transitionMatrixF0), verbose=options.verbose)
if displayEvolution:
h2.hold(True)
plt.plot(indexBestPath, '-b')
h2.hold(False)
plt.axis('tight')
## raw_input("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"\
## "!! Press Return to resume the program !!\n"\
## "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
del logHF0
# detection of silences:
HF00 = np.zeros([NF0 * chirpPerF0, N])
scopeAllowedHF0 = 2.0 / 1.0
dim1index = np.array(\
np.maximum(\
np.minimum(\
np.outer(chirpPerF0 * indexBestPath,
np.ones(chirpPerF0 \
* (2 \
* np.floor(stepNotes / scopeAllowedHF0) \
+ 1))) \
+ np.outer(np.ones(N),
np.arange(-chirpPerF0 \
* np.floor(stepNotes / scopeAllowedHF0),
chirpPerF0 \
* (np.floor(stepNotes / scopeAllowedHF0) \
+ 1))),
chirpPerF0 * NF0 - 1),
0),
dtype=int).reshape(1, N * chirpPerF0 \
* (2 * np.floor(stepNotes / scopeAllowedHF0) \
+ 1))
dim2index = np.outer(np.arange(N),
np.ones(chirpPerF0 \
* (2 * np.floor(stepNotes \
/ scopeAllowedHF0) + 1), \
dtype=int)\
).reshape(1, N * chirpPerF0 \
* (2 * np.floor(stepNotes \
/ scopeAllowedHF0) \
+ 1))
HF00[dim1index, dim2index] = HF0[dim1index, dim2index]# HF0.max()
HF00[:, indexBestPath == (NF0 - 1)] = 0.0
HF00[:, indexBestPath == 0] = 0.0
thres_energy = 0.000584
SF0 = np.maximum(np.dot(WF0, HF00), eps)
SPHI = np.maximum(np.dot(WGAMMA, np.dot(HGAMMA, HPHI)), eps)
SM = np.maximum(np.dot(WM, HM), eps)
hatSX = np.maximum(SPHI * SF0 + SM, eps)
energyMel = np.sum(np.abs((SPHI * SF0)/hatSX * \
(XR+XL) * 0.5) \
** 2, axis=0)
energyMelSorted = np.sort(energyMel)
energyMelCumul = np.cumsum(energyMelSorted)
energyMelCumulNorm = energyMelCumul / max(energyMelCumul[-1], eps)
# normalized to the maximum of energy:
# expressed in 0.01 times the percentage
ind_999 = np.nonzero(energyMelCumulNorm>thres_energy)[0][0]
if ind_999 is None:
ind_999 = N
melNotPresent = (energyMel <= energyMelCumulNorm[ind_999])
indexBestPath[melNotPresent] = 0
else:
## take the provided melody line:
# load melody from file:
melodyFromFile = np.loadtxt(options.melody)
sizeProvidedMel = melodyFromFile.shape
if len(sizeProvidedMel) == 1:
print "The melody should be provided as <Time (s)><F0 (Hz)>."
raise ValueError("Bad melody format")
melTimeStamps = melodyFromFile[:,0] # + 1024 / np.double(Fs)
melFreqHz = melodyFromFile[:,1]
if minF0 > melFreqHz[melFreqHz>40.0].min() or maxF0 < melFreqHz.max():
minF0 = melFreqHz[melFreqHz>40.0].min() *.97
maxF0 = np.maximum(melFreqHz.max()*1.03, 2*minF0 * 1.03)
print "Recomputing the source basis for "
print "minF0 = ", minF0, "Hz and maxF0 = ", maxF0, "Hz."
# Create the harmonic combs, for each F0 between minF0 and maxF0:
F0Table, WF0 = \
generate_WF0_chirped(minF0, maxF0, Fs, Nfft=NFT, \
stepNotes=stepNotes, \
lengthWindow=windowSizeInSamples,
Ot=0.25, \
perF0=chirpPerF0, \
depthChirpInSemiTone=.15)
WF0 = WF0[0:F, :] # ensure same size as SX
NF0 = F0Table.size # number of harmonic combs
# Normalization:
WF0 = WF0 / np.outer(np.ones(F), np.amax(WF0, axis=0))
sigTimeStamps = np.arange(N) * hopsize / np.double(Fs)
distMatTimeStamps = np.abs(np.outer(np.ones(sizeProvidedMel[0]),
sigTimeStamps) -
np.outer(melTimeStamps, np.ones(N)))
minDistTimeStamps = distMatTimeStamps.argmin(axis=0)
f0BestPath = melFreqHz[minDistTimeStamps]
distMatF0 = np.abs(np.outer(np.ones(NF0), f0BestPath) -
np.outer(F0Table, np.ones(N)))
indexBestPath = distMatF0.argmin(axis=0)
# setting silences to 0, with tolerance = 1/2 window length
indexBestPath[distMatTimeStamps[minDistTimeStamps,range(N)] >= \
0.5 * options.windowSize] = 0
indexBestPath[f0BestPath<=0] = 0
freqMelody = F0Table[np.array(indexBestPath,dtype=int)]
freqMelody[indexBestPath==0] = - freqMelody[indexBestPath==0]
np.savetxt(options.pitch_output_file,
np.array([np.arange(N) * hopsize / np.double(Fs),
freqMelody]).T)
# Second round of parameter estimation, with specific
# initial HF00:
HF00 = np.zeros([NF0 * chirpPerF0, N])
scopeAllowedHF0 = 2.0 / 1.0
# indexes for HF00:
# TODO: reprogram this with a 'where'?...
dim1index = np.array(\
np.maximum(\
np.minimum(\
np.outer(chirpPerF0 * indexBestPath,
np.ones(chirpPerF0 \
* (2 \
* np.floor(stepNotes / scopeAllowedHF0) \
+ 1))) \
+ np.outer(np.ones(N),
np.arange(-chirpPerF0 \
* np.floor(stepNotes / scopeAllowedHF0),
chirpPerF0 \
* (np.floor(stepNotes / scopeAllowedHF0) \
+ 1))),
chirpPerF0 * NF0 - 1),
0),
dtype=int)
dim1index = dim1index[indexBestPath!=0,:]
## dim1index = dim1index.reshape(1, N * chirpPerF0 \
## * (2 * np.floor(stepNotes / scopeAllowedHF0) \
## + 1))
dim1index = dim1index.reshape(1,dim1index.size)
dim2index = np.outer(np.arange(N),
np.ones(chirpPerF0 \
* (2 * np.floor(stepNotes \
/ scopeAllowedHF0) + 1), \
dtype=int)\
)
dim2index = dim2index[indexBestPath!=0,:]
dim2index = dim2index.reshape(1,dim2index.size)
## dim2index.reshape(1, N * chirpPerF0 \
## * (2 * np.floor(stepNotes \
## / scopeAllowedHF0) \
## + 1))
HF00[dim1index, dim2index] = 1 # HF0.max()
HF00[:, indexBestPath == (NF0 - 1)] = 0.0
HF00[:, indexBestPath == 0] = 0.0
WF0effective = WF0
HF00effective = HF00
if options.melody is None:
del HF0, HGAMMA, HPHI, HM, WM, HF00, SX
alphaR, alphaL, HGAMMA, HPHI, HF0, \
betaR, betaL, HM, WM, recoError2 = SIMM.Stereo_SIMM(
# the data to be fitted to:
SXR, SXL,
# the basis matrices for the spectral combs
WF0effective,
# and for the elementary filters:
WGAMMA,
# number of desired filters, accompaniment spectra:
numberOfFilters=K, numberOfAccompanimentSpectralShapes=R,
# if any, initial amplitude matrices for
HGAMMA0=None, HPHI0=None,
HF00=HF00effective,
WM0=None, HM0=None,
# Some more optional arguments, to control the "convergence"
# of the algo
numberOfIterations=niter, updateRulePower=1.0,
stepNotes=stepNotes,
lambdaHF0 = 0.0 / (1.0 * SXR.max()), alphaHF0=0.9,
verbose=options.verbose, displayEvolution=displayEvolution)
WPHI = np.dot(WGAMMA, HGAMMA)
SPHI = np.dot(WPHI, HPHI)
SF0 = np.dot(WF0effective, HF0)
hatSXR = (alphaR**2) * SF0 * SPHI + np.dot(np.dot(WM, betaR**2),HM)
hatSXL = (alphaL**2) * SF0 * SPHI + np.dot(np.dot(WM, betaL**2),HM)
hatVR = (alphaR**2) * SPHI * SF0 / hatSXR * XR
vestR = istft(hatVR, hopsize=hopsize, nfft=NFT,
window=sinebell(windowSizeInSamples)) / 4.0
hatVR = (alphaL**2) * SPHI * SF0 / hatSXL * XL
vestL = istft(hatVR, hopsize=hopsize, nfft=NFT,
window=sinebell(windowSizeInSamples)) / 4.0
#scikits.audiolab.wavwrite(np.array([vestR,vestL]).T, \
# options.voc_output_file, fs)
vestR = np.array(np.round(vestR*scaleData), dtype=dataType)
vestL = np.array(np.round(vestL*scaleData), dtype=dataType)
wav.write(options.voc_output_file, fs, \
np.array([vestR,vestL]).T)
#wav.write(options.voc_output_file, fs, \
# np.int16(32768.0 * np.array([vestR,vestL]).T))
hatMR = (np.dot(np.dot(WM,betaR ** 2),HM)) / hatSXR * XR
mestR = istft(hatMR, hopsize=hopsize, nfft=NFT,
window=sinebell(windowSizeInSamples)) / 4.0
hatMR = (np.dot(np.dot(WM,betaL ** 2),HM)) / hatSXL * XL
mestL = istft(hatMR, hopsize=hopsize, nfft=NFT,
window=sinebell(windowSizeInSamples)) / 4.0
#scikits.audiolab.wavwrite(np.array([mestR,mestL]).T, \
# options.mus_output_file, fs)
mestR = np.array(np.round(mestR*scaleData), dtype=dataType)
mestL = np.array(np.round(mestL*scaleData), dtype=dataType)
wav.write(options.mus_output_file, fs, \
np.array([mestR,mestL]).T)
#wav.write(options.mus_output_file, fs, \
# np.int16(32768.0 * np.array([mestR,mestL]).T))
del hatMR, mestL, vestL, vestR, mestR, hatVR, hatSXR, hatSXL, SPHI, SF0
# adding the unvoiced part in the source basis:
WUF0 = np.hstack([WF0, np.ones([WF0.shape[0], 1])])
HUF0 = np.vstack([HF0, np.ones([1, HF0.shape[1]])])
## HUF0[-1,:] = HF0.sum(axis=0) # should we do this?
alphaR, alphaL, HGAMMA, HPHI, HF0, \
betaR, betaL, HM, WM, recoError3 = SIMM.Stereo_SIMM(
# the data to be fitted to:
SXR, SXL,
# the basis matrices for the spectral combs
WUF0,
# and for the elementary filters:
WGAMMA,
# number of desired filters, accompaniment spectra:
numberOfFilters=K, numberOfAccompanimentSpectralShapes=R,
# if any, initial amplitude matrices for
HGAMMA0=HGAMMA, HPHI0=HPHI,
HF00=HUF0,
WM0=None,#WM,
HM0=None,#HM,
# Some more optional arguments, to control the "convergence"
# of the algo
numberOfIterations=niter, updateRulePower=1.0,
stepNotes=stepNotes,
lambdaHF0 = 0.0 / (1.0 * SXR.max()), alphaHF0=0.9,
verbose=options.verbose, displayEvolution=displayEvolution,
updateHGAMMA=False)
WPHI = np.dot(WGAMMA, HGAMMA)
SPHI = np.dot(WPHI, HPHI)
SF0 = np.dot(WUF0, HF0)
hatSXR = (alphaR**2) * SF0 * SPHI + np.dot(np.dot(WM, betaR**2),HM)
hatSXL = (alphaL**2) * SF0 * SPHI + np.dot(np.dot(WM, betaL**2),HM)
hatVR = (alphaR**2) * SPHI * SF0 / hatSXR * XR
vestR = istft(hatVR, hopsize=hopsize, nfft=NFT,
window=sinebell(windowSizeInSamples)) / 4.0
hatVR = (alphaL**2) * SPHI * SF0 / hatSXL * XL
vestL = istft(hatVR, hopsize=hopsize, nfft=NFT,
window=sinebell(windowSizeInSamples)) / 4.0
outputFileName = options.voc_output_file[:-4] + '_VUIMM.wav'
# scikits.audiolab.wavwrite(np.array([vestR,vestL]).T, outputFileName, fs)
vestR = np.array(np.round(vestR*scaleData), dtype=dataType)
vestL = np.array(np.round(vestL*scaleData), dtype=dataType)
wav.write(outputFileName, fs, \
np.array([vestR,vestL]).T)
hatMR = (np.dot(np.dot(WM,betaR ** 2),HM)) / hatSXR * XR
mestR = istft(hatMR, hopsize=hopsize, nfft=NFT,
window=sinebell(windowSizeInSamples)) / 4.0
hatMR = (np.dot(np.dot(WM,betaL ** 2),HM)) / hatSXL * XL
mestL = istft(hatMR, hopsize=hopsize, nfft=NFT,
window=sinebell(windowSizeInSamples)) / 4.0
outputFileName = options.mus_output_file[:-4] + '_VUIMM.wav'
#scikits.audiolab.wavwrite(np.array([mestR,mestL]).T, outputFileName, fs)
mestR = np.array(np.round(mestR*scaleData), dtype=dataType)
mestL = np.array(np.round(mestL*scaleData), dtype=dataType)
wav.write(outputFileName, fs, \
np.array([mestR,mestL]).T)
if displayEvolution:
plt.close('all')
## raw_input("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"\
## "!! Press Return to end the program... !!\n"\
## "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
print "Done!"
if __name__ == '__main__':
main()
|
mit
| 3,770,763,837,554,159,600
| 40.138916
| 104
| 0.531349
| false
| 3.613361
| false
| false
| false
|
mmolero/pcloudpy
|
pcloudpy/core/filters/OrientedNormalEstimation.py
|
1
|
3575
|
"""
Class that define oriented normal estimation method based on PCA Eigen method to fit plane and minimum spanning tree
"""
__all__ = ["OrientedNormalsEstimation"]
import numpy as np
from scipy.linalg import eigh
from sklearn.neighbors import NearestNeighbors
import networkx as nx
from pcloudpy.core.filters.base import FilterBase
from ..io.converters import numpy_from_polydata, copy_polydata_add_normals
class OrientedNormalsEstimation(FilterBase):
"""
NormalEstimation filter estimates normals of a point cloud using PCA Eigen method to fit plane
Parameters
----------
number_neighbors: int
number of neighbors to be considered in the normals estimation
Attributes
----------
input_: vtkPolyData
Input Data to be filtered
output_: vtkPolyData
Output Data
"""
def __init__(self, number_neighbors = 10):
self.number_neighbors = number_neighbors
def update(self):
array_with_color = numpy_from_polydata(self.input_)
normals = np.empty_like(array_with_color[:,0:3])
coord = array_with_color[:,0:3]
neigh = NearestNeighbors(self.number_neighbors)
neigh.fit(coord)
for i in range(0,len(coord)):
#Determine the neighbours of point
d = neigh.kneighbors(coord[i])
#Add coordinates of neighbours , dont include center point to array. Determine coordinate by the index of the neighbours.
y = np.zeros((self.number_neighbors-1,3))
y = coord[d[1][0][1:self.number_neighbors],0:3]
#Get information content
#Assign information content to each point i.e xyzb
normals[i,0:3] = self.get_normals(y)
#Get the point with highest z value , this will be used as the starting point for my depth search
z_max_point = np.where(coord[:,2]== np.max(coord[:,2]))
z_max_point = int(z_max_point[0])
if normals[z_max_point,2] < 0 : #ie normal doesnt point out
normals[z_max_point,:]=-normals[z_max_point,:]
#Create a graph
G = nx.Graph()
#Add all points and there neighbours to graph, make the weight equal to the distance between points
for i in range(0,len(coord)):
d = neigh.kneighbors(coord[i,:3])
for c in range(1,self.number_neighbors):
p1 = d[1][0][0]
p2 = d[1][0][c]
n1 = normals[d[1][0][0],:]
n2 = normals[d[1][0][c],:]
dot = np.dot(n1,n2)
G.add_edge(p1,p2,weight =1-np.abs(dot))
T = nx.minimum_spanning_tree(G)
x=[]
for i in nx.dfs_edges(T,z_max_point):
x+=i
inds = np.where(np.diff(x))[0]
out = np.split(x,inds[np.diff(inds)==1][1::2]+1)
for j in range(0,len(out)):
for i in range(0,len(out[j])-1):
n1 = normals[out[j][i],:]
n2 = normals[out[j][i+1],:]
if np.dot(n2,n1)<0:
normals[out[j][i+1],:]=-normals[out[j][i+1],:]
self.output_ = copy_polydata_add_normals(self.input_, normals)
def get_normals(self, XYZ):
#The below code uses the PCA Eigen method to fit plane.
#Get the covariance matrix
average = np.sum(XYZ, axis=0)/XYZ.shape[0]
b = np.transpose(XYZ - average)
cov = np.cov(b)
#Get eigen val and vec
e_val,e_vect = eigh(cov, overwrite_a=True, overwrite_b=True)
norm = e_vect[:,0]
return norm
|
bsd-3-clause
| -1,820,266,291,135,787,500
| 28.8
| 133
| 0.582098
| false
| 3.518701
| false
| false
| false
|
iotile/coretools
|
transport_plugins/bled112/iotile_transport_bled112/broadcast_v2_dedupe.py
|
1
|
3901
|
"""This module is used to identify and filter out broadcast v2 broadcasts, which leads to significant
performance increases.
"""
import time
import struct
import collections
from typing import Dict
from iotile.cloud.utilities import device_id_to_slug
def packet_is_broadcast_v2(packet: bytearray) -> bool:
"""Simple/efficient check for whether a given packet from the bled112 is an IOTile Broadcast v2 packet."""
#Broadcast packets consist of 32 bytes for data, 10 for BLE packet header and 4 for bled112 bgapi header
if len(packet) != 46:
return False
#This identifies the bgapi packet as an event
if not (packet[0] == 0x80 and packet[2] == 6 and packet[3] == 0):
return False
#This identifies the event as a broadcast v2 packet
if not (packet[18] == 0x1b and packet[19] == 0x16 and packet[20] == 0xdd and packet[21] == 0xfd):
return False
return True
class BroadcastV2DeduperCollection:
"""Main interface into the Broadcast v2 deduplication code.
This contains a dictionary, keyed on the broadcast sender's encoded UUID, and with the values being
a small class that stores the last received packet from that UUID and the last time the packet
was forwarded. That class (bc_v2_deduper) will report whether the packet is new and should be allowed through.
Args:
pass_packets_every(float, seconds): For each encoded_uuid address, at least one packet will be allowed through
every "pass_packets_every" seconds
"""
MAX_DEDUPERS = 500
def __init__(self, pass_packets_every: float = 5):
self._pass_packets_every = pass_packets_every
self.dedupers = collections.OrderedDict() #type: collections.OrderedDict[bytes, BroadcastV2Deduper]
def allow_packet(self, packet: bytearray) -> bool:
"""Run a packet through the broadcast_v2 deduper.
Returns False if the packet should be dropped
"""
if not packet_is_broadcast_v2(packet):
return True
encoded_uuid = bytes(packet[22:26])
stream = bytes(packet[36:38])
uuid_and_stream = (encoded_uuid, stream)
data = bytes(packet[22:])
deduper = self.dedupers.get(uuid_and_stream)
if deduper is None:
deduper = BroadcastV2Deduper(uuid_and_stream, self._pass_packets_every)
if len(self.dedupers) == self.MAX_DEDUPERS:
self.evict_oldest_deduper()
self.dedupers[uuid_and_stream] = deduper
return deduper.allow_packet(data)
def evict_oldest_deduper(self):
"""Find and remove the oldest deduper
This function will likely be called rarely, if at all
"""
self.dedupers.popitem(last=False)
class BroadcastV2Deduper():
"""Individual deduplicator for an specific UUID and stream."""
def __init__(self, uuid_and_stream: tuple, pass_packets_every: float = 5):
self.encoded_uuid = uuid_and_stream[0]
self._pass_packets_every = pass_packets_every
self.last_allowed_packet = 0 #type: float
self.last_data = bytes()
self._slug = ""
def get_slug(self):
"""For debugging, unpack the UUID into a slug so it can be printed. Only do this if needed though."""
if self._slug:
return self._slug
uuid = struct.unpack("<L", self.encoded_uuid)
self._slug = device_id_to_slug("%04X" % uuid)
return self._slug
def allow_packet(self, broadcast_data: bytes)-> bool:
"""Check if the packet is allowed. If so, save it and return True. Otherwise return False."""
if (time.monotonic() > self.last_allowed_packet + self._pass_packets_every or
self.last_data != broadcast_data):
self.last_data = broadcast_data
self.last_allowed_packet = time.monotonic()
return True
return False
|
gpl-3.0
| -8,435,621,857,235,197,000
| 37.623762
| 118
| 0.655473
| false
| 3.862376
| false
| false
| false
|
topseer/django
|
dJangoAdmin/dJangoAdmin/urls.py
|
1
|
1593
|
"""
locallibrary URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
]
# Use include() to add URLS from the catalog application
from django.conf.urls import include
urlpatterns += [
url(r'^catalog/', include('catalog.urls')),
]
urlpatterns += [
url(r'^polls/', include('polls.urls')),
]
#Add Django site authentication urls (for login, logout, password management)
urlpatterns += [
url(r'^accounts/', include('django.contrib.auth.urls')),
url(r'^catalog/accounts/', include('django.contrib.auth.urls')),
url(r'^catalog/dashboard/accounts/', include('django.contrib.auth.urls')),
]
# Use static() to add url mapping to serve static files during development (only)
from django.conf import settings
from django.conf.urls.static import static
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
|
mit
| -268,994,568,428,312,130
| 31.229167
| 81
| 0.689893
| false
| 3.620455
| false
| false
| false
|
runt18/nupic
|
src/nupic/support/exceptions.py
|
1
|
2930
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import sys
import traceback
class TimeoutError(Exception):
""" The requested operation timed out """
pass
class NupicJobFailException(Exception):
""" This exception signals that the Nupic job (e.g., Hypersearch, Production,
etc.) should be aborted due to the given error.
"""
def __init__(self, errorCode, msg):
"""
Parameters:
---------------------------------------------------------------------
errorCode: An error code from the support.errorcodes.ErrorCodes
enumeration
msg: Error message string
"""
self.__errorCode = errorCode
self.__msg = msg
super(JobFatalException, self).__init__(errorCode, msg)
return
def getWorkerCompletionMessage(self):
""" Generates a worker completion message that is suitable for the
worker_completion_message field in jobs table
Parameters:
---------------------------------------------------------------------
retval: The worker completion message appropriate for the
"worker_completion_message" field in jobs table
"""
msg = "{0!s}: {1!s}\n{2!s}".format(self.__errorCode, self.__msg, traceback.format_exc())
return msg
@classmethod
def mapCurrentException(cls, e, errorCode, msg):
""" Raises NupicJobFailException by mapping from another exception that
is being handled in the caller's scope and preserves the current exception's
traceback.
Parameters:
---------------------------------------------------------------------
e: The source exception
errorCode: An error code from the support.errorcodes.ErrorCodes
enumeration
msg: Error message string
"""
traceback = sys.exc_info()[2]
assert traceback is not None
newMsg = "{0!s}: {1!r}".format(msg, e)
e = NupicJobFailException(errorCode=errorCode, msg=newMsg)
raise e, None, traceback
|
agpl-3.0
| -2,779,699,714,728,841,700
| 31.197802
| 92
| 0.601024
| false
| 4.787582
| false
| false
| false
|
monkpit/pyfocas
|
FanucImplementation/Fwlib32_h.py
|
1
|
13167
|
# -*- coding: utf-8 -*-
""" Fwlib32_h.py
This file contains ctypes structures to match the data structures
found in the library header Fwlib32.h.
All classes contain `_pack_ = 4`; this comes from Fwlib32.h:
#pragma pack(push,4)
Don't unit test these because it would basically be running tests against
the ctypes module itself and not any of our own code.
Further documentation can be found in the FOCAS documentation.
Look up the documentation of the Equivalent data type.
For example, for documentation on "AlarmStatus", look up "ODBALM".
"""
import ctypes
"""Constants"""
MAX_AXIS = 32
"""int: The maximum number of axes a control will return"""
ALL_AXES = -1
"""int: A constant value to request that a function return all axes at once"""
DATAIO_ALARM_MASK = (0x1 << 2) | (0x1 << 7)
SERVO_ALARM_MASK = 0x1 << 6
MACRO_ALARM_MASK = 0x1 << 8
OVERHEAT_ALARM_MASK = 0x1 << 5
OVERTRAVEL_ALARM_MASK = 0x1 << 4
SPINDLE_ALARM_MASK = 0x1 << 9
"""bit masks to determine alarm status
take an alarm data and AND it with the mask
If the result is True the alarm is active
If it's False it's cleared.
For example, see: DriverImplementations.alarmStringBuilder
"""
class AlarmStatus(ctypes.Structure):
"""
Equivalent of ODBALM
"""
_pack_ = 4
_fields_ = [("dummy", ctypes.c_short * 2),
("data", ctypes.c_short), ]
ODBALM = AlarmStatus
class LoadElement(ctypes.Structure):
"""
Equivalent of LOADELM
"""
_pack_ = 4
_fields_ = [("data", ctypes.c_long),
("decimal", ctypes.c_short),
("unit", ctypes.c_short),
("name", ctypes.c_char),
("suffix1", ctypes.c_char),
("suffix2", ctypes.c_char),
("reserve", ctypes.c_char), ]
LOADELM = LoadElement
class ServoLoad(ctypes.Structure):
"""
Equivalent of ODBSVLOAD
"""
_pack_ = 4
_fields_ = [("load", LoadElement)]
ODBSVLOAD = ServoLoad
class SpindleLoad(ctypes.Structure):
"""
Equivalent of ODBSPLOAD
"""
_pack_ = 4
_fields_ = [("load", LoadElement),
("speed", LoadElement), ]
ODBSPLOAD = SpindleLoad
class StatInfo(ctypes.Structure):
_pack_ = 4
_fields_ = [("hdck", ctypes.c_short),
("tmmode", ctypes.c_short),
("auto", ctypes.c_short),
("run", ctypes.c_short),
("motion", ctypes.c_short),
("mstb", ctypes.c_short),
("estop", ctypes.c_short),
("alarm", ctypes.c_short),
("edit", ctypes.c_short), ]
@property
def __dict__(self):
# unreadable
return dict((f, getattr(self, f)) for f, _ in self._fields_)
class ModalAux(ctypes.Structure):
_pack_ = 4
_fields_ = [("aux_data", ctypes.c_long),
("flag1", ctypes.c_char),
("flag2", ctypes.c_char), ]
class ModalAuxUnion(ctypes.Union):
_pack_ = 4
_fields_ = [("g_data", ctypes.c_char),
("g_rdata", ctypes.c_char * 35),
("g_1shot", ctypes.c_char * 4),
("aux", ModalAux),
("raux1", ModalAux * 27),
("raux2", ModalAux * MAX_AXIS), ]
class ModalData(ctypes.Structure):
"""
Equivalent of ODBMDL
"""
_pack_ = 4
_fields_ = [("datano", ctypes.c_short),
("type", ctypes.c_short),
("modal", ModalAuxUnion), ]
ODBMDL = ModalData
class ExecutingProgram(ctypes.Structure):
"""
Equivalent of ODBEXEPRG
"""
_pack_ = 4
_fields_ = [("name", ctypes.c_char * 36),
("oNumber", ctypes.c_long), ]
ODBEXEPRG = ExecutingProgram
class AxisName(ctypes.Structure):
"""
Equivalent of ODBAXISNAME
"""
_pack_ = 4
_fields_ = [("name", ctypes.c_char),
("suffix", ctypes.c_char)]
ODBAXISNAME = AxisName
class AxisData(ctypes.Structure):
"""
Equivalent of ODBAXDT
"""
_pack_ = 4
_fields_ = [("axisName", ctypes.c_char * 4),
("position", ctypes.c_long),
("decimalPosition", ctypes.c_short),
("unit", ctypes.c_short),
("flag", ctypes.c_short),
("_reserved", ctypes.c_short), ]
ODBAXDT = AxisData
class AlarmRecord(ctypes.Structure):
_pack_ = 4
_fields_ = [("recordType", ctypes.c_short),
("alarmGroup", ctypes.c_short),
("alarmNumber", ctypes.c_short),
("axis", ctypes.c_byte),
("_AlarmRecord_dummy", ctypes.c_byte)]
class MDIRecord(ctypes.Structure):
_pack_ = 4
_fields_ = [("recordType", ctypes.c_short),
("keycode", ctypes.c_byte),
("powerFlag", ctypes.c_byte),
("_MDIRecord_dummy", ctypes.c_char * 4), ]
class SignalRecord(ctypes.Structure):
_pack_ = 4
_fields_ = [("recordType", ctypes.c_short),
("signalName", ctypes.c_byte),
("oldSignal", ctypes.c_byte),
("newSignal", ctypes.c_byte),
("_SignalRecord_dummy", ctypes.c_byte),
("signalNumber", ctypes.c_short), ]
class DateOrPower(ctypes.Structure):
_pack_ = 4
_fields_ = [("recordType", ctypes.c_short),
("year", ctypes.c_byte),
("month", ctypes.c_byte),
("day", ctypes.c_byte),
("powerFlag", ctypes.c_byte),
("_DateOrPower_dummy", ctypes.c_byte * 2)]
class OperationHistoryDataUnion(ctypes.Union):
"""
Union for operation history data
"""
_pack_ = 4
_fields_ = [("alarm", AlarmRecord),
("mdi", MDIRecord),
("signal", SignalRecord),
("dateOrPower", DateOrPower), ]
class OperationHistory(ctypes.Structure):
"""
Equivalent of ODBHIS
"""
_pack_ = 4
_fields_ = [("startNumber", ctypes.c_ushort),
("_ODBHIS_type", ctypes.c_short),
("endNumber", ctypes.c_ushort),
("data", OperationHistoryDataUnion * 10)]
ODBHIS = OperationHistory
class ProgramDirectory2(ctypes.Structure):
"""
Equivalent of PRGDIR2
"""
_pack_ = 4
_fields_ = [("number", ctypes.c_short),
("length", ctypes.c_long),
("comment", ctypes.c_char * 51),
("_ProgramDirectory2_dummy", ctypes.c_char), ]
PRGDIR2 = ProgramDirectory2
class PanelSignals150(ctypes.Structure):
"""
Equivalent of IODBSGNL with less data
"""
_pack_ = 4
_fields_ = [("_PanelSignals150_dummy", ctypes.c_short), # dummy
("type", ctypes.c_short), # data select flag
("mode", ctypes.c_short), # mode signal
("manualFeedAxis", ctypes.c_short), # Manual handle feed axis selection signal
("manualFeedDistance", ctypes.c_short), # Manual handle feed travel distance selection signal
("rapidOverride", ctypes.c_short), # rapid traverse override signal
("jogOverride", ctypes.c_short), # manual feedrate override signal
("feedOverride", ctypes.c_short), # feedrate override signal
("spindleOverride", ctypes.c_short), # (not used)
("blockDelete", ctypes.c_short), # optional block skip signal
("singleBlock", ctypes.c_short), # single block signal
("machineLock", ctypes.c_short), # machine lock signal
("dryRun", ctypes.c_short), # dry run signal
("memoryProtection", ctypes.c_short), # memory protection signal
("feedHold", ctypes.c_short), # automatic operation halt signal
("manualRapid", ctypes.c_short), # (not used)
("_PanelSignals150_dummy2", ctypes.c_short * 2), ] # dummy
class PanelSignals160(ctypes.Structure):
"""
Equivalent of IODBSGNL
"""
_pack_ = 4
_fields_ = [("_PanelSignals160_dummy", ctypes.c_short), # dummy
("type", ctypes.c_short), # data select flag
("mode", ctypes.c_short), # mode signal
("manualFeedAxis", ctypes.c_short), # Manual handle feed axis selection signal
("manualFeedDistance", ctypes.c_short), # Manual handle feed travel distance selection signal
("rapidOverride", ctypes.c_short), # rapid traverse override signal
("jogOverride", ctypes.c_short), # manual feedrate override signal
("feedOverride", ctypes.c_short), # feedrate override signal
("spindleOverride", ctypes.c_short), # (not used)
("blockDelete", ctypes.c_short), # optional block skip signal
("singleBlock", ctypes.c_short), # single block signal
("machineLock", ctypes.c_short), # machine lock signal
("dryRun", ctypes.c_short), # dry run signal
("memoryProtection", ctypes.c_short), # memory protection signal
("feedHold", ctypes.c_short),] # automatic operation halt signal
IODBSGNL = PanelSignals160
class PMCData(ctypes.Structure):
"""
Actual PMC values that were read
Used to replace anonymous struct in IODBPMC called "u"
"""
_pack_ = 1
_fields_ = [("cdata", ctypes.c_byte * 5),
("idata", ctypes.c_short * 5),
("ldata", ctypes.c_byte * 5), ]
@property
def pmcValue(self):
if self.cdata[0] < 0:
self.cdata[0] = -self.cdata[0] - 1
return self.cdata[0]
class PMC(ctypes.Structure):
"""
A data structure to hold values read from PMC addresses
Equivalent of IODBPMC
"""
_pack_ = 4
_fields_ = [("addressType", ctypes.c_short),
("dataType", ctypes.c_short),
("startAddress", ctypes.c_short),
("endAddress", ctypes.c_short),
("data", PMCData), ]
IODBPMC = PMC
class FAxis(ctypes.Structure):
_pack_ = 4
_fields_ = [("_absolute", ctypes.c_long * MAX_AXIS),
("_machine", ctypes.c_long * MAX_AXIS),
("_relative", ctypes.c_long * MAX_AXIS),
("_distance", ctypes.c_long * MAX_AXIS), ]
@property
def __dict__(self):
# unreadable
return dict((f, [x for x in getattr(self, f)])
for (f, _) in self._fields_)
# return {"absolute": self.absolute,
# "machine": self.machine,
# "relative": self.relative,
# "distance": self.distance}
class OAxis(ctypes.Structure):
_pack_ = 4
_fields_ = [("absolute", ctypes.c_long),
("machine", ctypes.c_long),
("relative", ctypes.c_long),
("distance", ctypes.c_long), ]
@property
def __dict__(self):
# unreadable
return dict((f, getattr(self, f)) for f, _ in self._fields_)
class PositionUnion(ctypes.Union):
"""
Alias for the anonymous union "pos" defined in some fwlib32 structures
"""
_pack_ = 4
_fields_ = [("_faxis", FAxis),
("_oaxis", OAxis), ]
@property
def __dict__(self):
# unreadable
return dict([("faxis", self._faxis.__dict__),
("oaxis", self._oaxis.__dict__)])
class DynamicResult(ctypes.Structure):
"""
Alias for ODBDY2 because what does that even mean
"""
_pack_ = 4
_fields_ = [("_DynamicResult_dummy", ctypes.c_short),
("axis", ctypes.c_short),
("alarm", ctypes.c_long),
("programNumber", ctypes.c_long),
("mainProgramNumber", ctypes.c_long),
("sequenceNumber", ctypes.c_long),
("actualFeed", ctypes.c_long),
("actualSpindleSpeed", ctypes.c_long),
("position", PositionUnion), ]
@property
def __dict__(self):
# unreadable
return dict((f, getattr(self, f)) for f, _ in self._fields_)
ODBDY2 = DynamicResult
class IDBPMMGTI(ctypes.Structure):
"""
Equivalent of IDBPMMGTI in FOCAS documentation
"""
_pack_ = 4
_fields_ = [("top", ctypes.c_long),
("num", ctypes.c_long), ]
class ODBPMMGET(ctypes.Structure):
"""
Equivalent of ODBPMMGET in FOCAS documentation
"""
_pack_ = 4
_fields_ = [("position", ctypes.c_long),
("actualFeed", ctypes.c_long),
("data", ctypes.c_long * 20),
("number", ctypes.c_long * 20),
("axis", ctypes.c_short * 20),
("type", ctypes.c_short * 20),
("alarmAxis", ctypes.c_char * 40),
("alarmNumber", ctypes.c_ushort * 40),
("channel", ctypes.c_long),
("group", ctypes.c_long), ]
class ProgramData(ctypes.Structure):
"""
Equivalent of ODBPRO
"""
_pack_ = 4
_fields_ = [("dummy", ctypes.c_short * 2),
("program", ctypes.c_long),
("mainProgram", ctypes.c_long)]
ODBPRO = ProgramData
|
mit
| 8,969,923,093,926,644,000
| 28.925
| 111
| 0.540214
| false
| 3.701715
| false
| false
| false
|
Fe-Nik-S/Examples
|
python/patterns/behavioral/iterator.py
|
1
|
1040
|
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
#
# ---------------------------------------------------------------------
# Copyright (C) 2017-2018 The --- Project
# See LICENSE for details
# ---------------------------------------------------------------------
class Fibonacci(object):
def __init__(self, count_to):
self._count_to = count_to
def __iter__(self):
self._current = 0
self._next = 1
return self
def __next__(self):
result = self._current
if self._current > self._count_to:
raise StopIteration
self._current, self._next = self._next, self._current + self._next
return result
if __name__ == "__main__":
count_to = 100
print("Fibonacci sequence values up to {}:".format(count_to))
fib_iterator = Fibonacci(600)
for _ in fib_iterator:
print(_, end=" ")
### OUTPUT ###
# Fibonacci sequence values up to 100:
# 0 1 1 2 3 5 8 13 21 34 55 89 144 233 377
|
mit
| -1,968,337,526,883,203,800
| 26.157895
| 74
| 0.443798
| false
| 4.015564
| false
| false
| false
|
phpnick/RegPy
|
tm.py
|
1
|
1210
|
"""
FoSAPy - TM module
Author: Niklas Rieken
"""
import time
class TM():
""" M = (Q, Sigma, Gamma, delta, q_0, q_f, B) """
Q = []
Sigma = []
Gamma = []
delta = {}
q_0 = None
q_f = None
B = None
def __init__(self, Q, Sigma, Gamma, delta, q_0, q_f, B='B'):
""" Constructor """
self.Q = Q
self.Sigma = Sigma
self.Gamma = Gamma
self.delta = delta
self.q_0 = q_0
self.q_f = q_f
self.B = B
def __repr__(self):
""" To string method """
return "M = (\n\tQ = {0},\n\tSigma = {1},\n\tGamma = {2},\n\tdelta = {3},\n\tq_0 = {4},\n\tq_f = {5},\n\tB = {6}\n)".format(self.Q, self.Sigma, self.Gamma, self.delta, self.q_0, self.q_f, self.B)
def simulate(self, w):
""" Runs w on M """
q = self.q_0
u = ''
v = w
print("{0} {1} {2}".format(u, q, v))
time.sleep(2)
while q != self.q_f:
if len(v) == 0:
v = 'B'
p = self.delta[q, v[0]][0]
v = self.delta[q, v[0]][1] + v[1:]
if self.delta[q, v[0]][2] == 'L':
if len(u) == 0:
u = 'B'
v = u[-1] + v
u = u[:-1]
elif self.delta[q, v[0]][2] == 'R':
if len(v) == 0:
v = 'B'
u = u + v[0]
v = v[1:]
else:
pass
q = p
print("{0} {1} {2}".format(u, q, v))
time.sleep(2)
|
mit
| 822,664,330,862,086,500
| 18.836066
| 197
| 0.465289
| false
| 2.033613
| false
| false
| false
|
Forage/Gramps
|
gramps/gen/datehandler/_date_cs.py
|
1
|
8857
|
# -*- coding: utf-8 -*-
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2004-2006 Donald N. Allingham
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id$
#
"""
Czech-specific classes for parsing and displaying dates.
"""
from __future__ import unicode_literals
#-------------------------------------------------------------------------
#
# Python modules
#
#-------------------------------------------------------------------------
import re
#-------------------------------------------------------------------------
#
# GRAMPS modules
#
#-------------------------------------------------------------------------
from ..lib.date import Date
from ._dateparser import DateParser
from ._datedisplay import DateDisplay
from ._datehandler import register_datehandler
#-------------------------------------------------------------------------
#
# Czech parser
#
#-------------------------------------------------------------------------
class DateParserCZ(DateParser):
"""
Converts a text string into a Date object
"""
month_to_int = DateParser.month_to_int
month_to_int["leden"] = 1
month_to_int["ledna"] = 1
month_to_int["lednu"] = 1
month_to_int["led"] = 1
month_to_int["I"] = 1
month_to_int["i"] = 1
month_to_int["únor"] = 2
month_to_int["února"] = 2
month_to_int["únoru"] = 2
month_to_int["ún"] = 2
month_to_int["II"] = 2
month_to_int["ii"] = 2
month_to_int["březen"] = 3
month_to_int["března"] = 3
month_to_int["březnu"] = 3
month_to_int["bře"] = 3
month_to_int["III"] = 3
month_to_int["iii"] = 3
month_to_int["duben"] = 4
month_to_int["dubna"] = 4
month_to_int["dubnu"] = 4
month_to_int["dub"] = 4
month_to_int["IV"] = 4
month_to_int["iv"] = 4
month_to_int["květen"] = 5
month_to_int["května"] = 5
month_to_int["květnu"] = 5
month_to_int["V"] = 5
month_to_int["v"] = 5
month_to_int["červen"] = 6
month_to_int["června"] = 6
month_to_int["červnu"] = 6
month_to_int["čer"] = 6
month_to_int["vi"] = 6
month_to_int["červenec"] = 7
month_to_int["července"] = 7
month_to_int["červenci"] = 7
month_to_int["čvc"] = 7
month_to_int["VII"] = 7
month_to_int["vii"] = 7
month_to_int["srpen"] = 8
month_to_int["srpna"] = 8
month_to_int["srpnu"] = 8
month_to_int["srp"] = 8
month_to_int["VIII"] = 8
month_to_int["viii"] = 8
month_to_int["září"] = 9
month_to_int["zář"] = 9
month_to_int["IX"] = 9
month_to_int["ix"] = 9
month_to_int["říjen"] = 10
month_to_int["října"] = 10
month_to_int["říjnu"] = 10
month_to_int["říj"] = 10
month_to_int["X"] = 10
month_to_int["x"] = 10
month_to_int["listopad"] = 11
month_to_int["listopadu"] = 11
month_to_int["lis"] = 11
month_to_int["XI"] = 11
month_to_int["xi"] = 11
month_to_int["prosinec"] = 12
month_to_int["prosince"] = 12
month_to_int["prosinci"] = 12
month_to_int["pro"] = 12
month_to_int["XII"] = 12
month_to_int["xii"] = 12
modifier_to_int = {
'před' : Date.MOD_BEFORE,
'do' : Date.MOD_BEFORE,
'po' : Date.MOD_AFTER,
'asi' : Date.MOD_ABOUT,
'kolem' : Date.MOD_ABOUT,
'přibl.' : Date.MOD_ABOUT,
}
calendar_to_int = {
'gregoriánský' : Date.CAL_GREGORIAN,
'greg.' : Date.CAL_GREGORIAN,
'g' : Date.CAL_GREGORIAN,
'juliánský' : Date.CAL_JULIAN,
'jul.' : Date.CAL_JULIAN,
'j' : Date.CAL_JULIAN,
'hebrejský' : Date.CAL_HEBREW,
'hebr.' : Date.CAL_HEBREW,
'h' : Date.CAL_HEBREW,
'islámský' : Date.CAL_ISLAMIC,
'isl.' : Date.CAL_ISLAMIC,
'i' : Date.CAL_ISLAMIC,
'francouzský republikánský' : Date.CAL_FRENCH,
'fr.' : Date.CAL_FRENCH,
'perský' : Date.CAL_PERSIAN,
'per.' : Date.CAL_PERSIAN,
'p' : Date.CAL_PERSIAN,
'švédský' : Date.CAL_SWEDISH,
'sve.' : Date.CAL_SWEDISH,
's' : Date.CAL_SWEDISH,
}
quality_to_int = {
'odhadované' : Date.QUAL_ESTIMATED,
'odh.' : Date.QUAL_ESTIMATED,
'vypočtené' : Date.QUAL_CALCULATED,
'vyp.' : Date.QUAL_CALCULATED,
}
def init_strings(self):
DateParser.init_strings(self)
self._span = re.compile(
"(od)\s+(?P<start>.+)\s+(do)\s+(?P<stop>.+)",
re.IGNORECASE)
self._range = re.compile(
"(mezi)\s+(?P<start>.+)\s+(a)\s+(?P<stop>.+)",
re.IGNORECASE)
#-------------------------------------------------------------------------
#
# Czech display
#
#-------------------------------------------------------------------------
class DateDisplayCZ(DateDisplay):
"""
Czech language date display class.
"""
long_months = ( "", "leden", "únor", "březen", "duben", "květen",
"červen", "červenec", "srpen", "září", "říjen",
"listopad", "prosinec" )
short_months = ( "", "led", "úno", "bře", "dub", "kvě", "čer",
"čvc", "srp", "zář", "říj", "lis", "pro" )
calendar = (
"", "juliánský", "hebrejský",
"francouzský republikánský", "perský", "islámský",
"švédský"
)
_mod_str = ("", "před ", "po ", "kolem ", "", "", "")
_qual_str = ("", "přibližně ", "vypočteno ")
bce = ["před naším letopočtem", "před Kristem",
"př. n. l.", "př. Kr."] + DateParser.bce
formats = (
"ISO (rrrr-mm-dd)",
"numerický",
"měsíc den, Rok",
"měs den, Rok",
"den. měsíc rok",
"den. měs rok"
)
def display(self, date):
"""
Return a text string representing the date.
"""
mod = date.get_modifier()
cal = date.get_calendar()
qual = date.get_quality()
start = date.get_start_date()
newyear = date.get_new_year()
qual_str = self._qual_str[qual]
if mod == Date.MOD_TEXTONLY:
return date.get_text()
elif start == Date.EMPTY:
return ""
elif mod == Date.MOD_NONE:
date_decl_string = self.display_cal[cal](start)
date_decl_string = date_decl_string.replace("den ", "dna ")
date_decl_string = date_decl_string.replace("or ", "ora ")
date_decl_string = date_decl_string.replace("en ", "na ")
date_decl_string = date_decl_string.replace("ad ", "adu ")
date_decl_string = date_decl_string.replace("ec ", "ce ")
return date_decl_string
elif mod == Date.MOD_SPAN:
dat1 = self.display_cal[cal](start)
dat2 = self.display_cal[cal](date.get_stop_date())
scal = self.format_extras(cal, newyear)
return "%s%s %s %s %s%s" % (qual_str, 'od', dat1,
'do', dat2, scal)
elif mod == Date.MOD_RANGE:
dat1 = self.display_cal[cal](start)
dat2 = self.display_cal[cal](date.get_stop_date())
scal = self.format_extras(cal, newyear)
return "%s%s %s %s %s%s" % (qual_str, 'mezi',
dat1, 'a', dat2, scal)
else:
text = self.display_cal[date.get_calendar()](start)
scal = self.format_extras(cal, newyear)
return "%s%s%s%s" % (qual_str, self._mod_str[mod],
text, scal)
#-------------------------------------------------------------------------
#
# Register classes
#
#-------------------------------------------------------------------------
register_datehandler(("cs", "CS", "cs_CZ", "Czech"), DateParserCZ, DateDisplayCZ)
|
gpl-2.0
| 3,832,893,781,411,460,000
| 31.42963
| 81
| 0.473961
| false
| 3.108271
| false
| false
| false
|
hobson/pug
|
docs/source/conf.py
|
1
|
11525
|
# -*- coding: utf-8 -*-
#
# PUG documentation build configuration file, created by
# sphinx-quickstart on Sat Apr 11 17:46:58 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinxcontrib.napoleon',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pug'
copyright = u'2015, PDX Python User Group'
author = u'PDX Python User Group'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.0.21'
# The full version, including alpha/beta/rc tags.
release = '0.0.21'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'en'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'pugdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'PUG.tex', u'PUG Documentation',
u'PDX Python User Group', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pug', u'PUG Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'PUG', u'PUG Documentation',
author, 'PUG', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The basename for the epub file. It defaults to the project name.
#epub_basename = project
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or 'en' if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the Pillow.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
|
mit
| 2,219,194,266,160,377,900
| 30.40327
| 80
| 0.706291
| false
| 3.656409
| true
| false
| false
|
CrawlScript/Tensorflow-AutoEncoder
|
tutorial_iris.py
|
1
|
3636
|
#coding = utf-8
from mpl_toolkits.mplot3d import Axes3D
from autoencoder import AutoEncoder, DataIterator
import codecs
from random import shuffle
from matplotlib import pyplot as plt
import numpy as np
class IrisDataSet(object):
def get_label_id(self, label):
if label in self.label_id_dict:
return self.label_id_dict[label]
self.label_id_dict[label] = self.next_label_id
self.next_label_id += 1
return self.next_label_id - 1
def __init__(self):
self.next_label_id = 0
self.label_id_dict = {}
with codecs.open("tutorial_datasets/iris/iris.data", "r", "utf-8") as f:
str_datas = [line.strip() for line in f]
str_datas = [line.split(",") for line in str_datas if len(line) > 0]
shuffle(str_datas)
self.datas = [[float(d) for d in row_data[0:-1]] for row_data in str_datas]
# normalize datas
self.datas = np.array(self.datas, dtype = np.float32)
self.datas = self.datas/self.datas.max(0)
self.datas = self.datas * 2 - 1
self.labels = [self.get_label_id(row_data[-1]) for row_data in str_datas]
iris_dataset = IrisDataSet()
# train data
datas = iris_dataset.datas
labels = iris_dataset.labels
# data wrapper
iterator = DataIterator(datas)
fine_tuning_iterator = DataIterator(datas, labels = labels)
# train autoencoder
# assume the input dimension is input_d
# the network is like input_d -> 4 -> 2 -> 4 -> input_d
autoencoder = AutoEncoder()
# train autoencoder without fine-tuning
print "\ntrain autoencoder without fine-tuning ==========\n"
autoencoder.fit([4, 2], iterator, stacked = True, learning_rate = 0.02, max_epoch = 5000, tied = True, activation = "tanh")
# encode data (without fine-tuning)
encoded_datas = autoencoder.encode(datas)
print "encoder (without fine-tuning) ================"
print encoded_datas
# train autoencoder with fine-tuning
print "\ntrain autoencoder with fine-tuning ==========\n"
autoencoder.fine_tune(fine_tuning_iterator, supervised = True, learning_rate = 0.02, max_epoch = 10000, tied = True)
#autoencoder.fine_tune(fine_tuning_iterator, supervised = False, learning_rate = 0.02, max_epoch = 6000)
# encode data (with fine-tuning)
tuned_encoded_datas = autoencoder.encode(datas)
print "encoder (with fine-tuning)================"
print tuned_encoded_datas
# predict data( based on fine tuning )
predicted_datas = autoencoder.predict(datas)
print "predicted ================"
print predicted_datas
predicted_labels = predicted_datas.argmax(1)
eval_array = (predicted_labels == labels)
correct_count = len(np.where(eval_array == True)[0])
error_count = len(np.where(eval_array == False)[0])
correct_rate = float(correct_count)/(correct_count + error_count)
error_rate = float(error_count)/(correct_count + error_count)
print "correct: {}({})\terror: {}({})".format(correct_count, "%.2f" % correct_rate, error_count, "%.2f" % error_rate)
autoencoder.close()
#visualize encoded datas
colors = ["red", "green", "blue"]
label_colors = [colors[label_id] for label_id in labels]
fig_3d =plt.figure("origin iris data")
plot_3d = fig_3d.add_subplot(111, projection='3d')
plot_3d.scatter(datas[:,0], datas[:,1], datas[:, 2], color = label_colors)
fig_2d = plt.figure("encoded iris data (without fine-tuning)")
plot_2d = fig_2d.add_subplot(111)
plot_2d.scatter(encoded_datas[:,0], encoded_datas[:,1], color = label_colors)
fig_tuned_2d = plt.figure("encoded iris data (with fine-tuning)")
plot_tuned_2d = fig_tuned_2d.add_subplot(111)
plot_tuned_2d.scatter(tuned_encoded_datas[:,0], tuned_encoded_datas[:,1], color = label_colors)
plt.show()
|
gpl-3.0
| -1,081,317,834,420,715,400
| 37.273684
| 123
| 0.681793
| false
| 3.14261
| false
| false
| false
|
platformio/platformio
|
platformio/clients/account.py
|
1
|
9820
|
# Copyright (c) 2014-present PlatformIO <contact@platformio.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
from platformio import __accounts_api__, app
from platformio.clients.rest import RESTClient
from platformio.exception import PlatformioException
class AccountError(PlatformioException):
MESSAGE = "{0}"
class AccountNotAuthorized(AccountError):
MESSAGE = "You are not authorized! Please log in to PIO Account."
class AccountAlreadyAuthorized(AccountError):
MESSAGE = "You are already authorized with {0} account."
class AccountClient(RESTClient): # pylint:disable=too-many-public-methods
SUMMARY_CACHE_TTL = 60 * 60 * 24 * 7
def __init__(self):
super(AccountClient, self).__init__(base_url=__accounts_api__)
@staticmethod
def get_refresh_token():
try:
return app.get_state_item("account").get("auth").get("refresh_token")
except: # pylint:disable=bare-except
raise AccountNotAuthorized()
@staticmethod
def delete_local_session():
app.delete_state_item("account")
@staticmethod
def delete_local_state(key):
account = app.get_state_item("account")
if not account or key not in account:
return
del account[key]
app.set_state_item("account", account)
def send_auth_request(self, *args, **kwargs):
headers = kwargs.get("headers", {})
if "Authorization" not in headers:
token = self.fetch_authentication_token()
headers["Authorization"] = "Bearer %s" % token
kwargs["headers"] = headers
return self.send_request(*args, **kwargs)
def login(self, username, password):
try:
self.fetch_authentication_token()
except: # pylint:disable=bare-except
pass
else:
raise AccountAlreadyAuthorized(
app.get_state_item("account", {}).get("email", "")
)
result = self.send_request(
"post", "/v1/login", data={"username": username, "password": password},
)
app.set_state_item("account", result)
return result
def login_with_code(self, client_id, code, redirect_uri):
try:
self.fetch_authentication_token()
except: # pylint:disable=bare-except
pass
else:
raise AccountAlreadyAuthorized(
app.get_state_item("account", {}).get("email", "")
)
result = self.send_request(
"post",
"/v1/login/code",
data={"client_id": client_id, "code": code, "redirect_uri": redirect_uri},
)
app.set_state_item("account", result)
return result
def logout(self):
refresh_token = self.get_refresh_token()
self.delete_local_session()
try:
self.send_request(
"post", "/v1/logout", data={"refresh_token": refresh_token},
)
except AccountError:
pass
return True
def change_password(self, old_password, new_password):
return self.send_auth_request(
"post",
"/v1/password",
data={"old_password": old_password, "new_password": new_password},
)
def registration(
self, username, email, password, firstname, lastname
): # pylint:disable=too-many-arguments
try:
self.fetch_authentication_token()
except: # pylint:disable=bare-except
pass
else:
raise AccountAlreadyAuthorized(
app.get_state_item("account", {}).get("email", "")
)
return self.send_request(
"post",
"/v1/registration",
data={
"username": username,
"email": email,
"password": password,
"firstname": firstname,
"lastname": lastname,
},
)
def auth_token(self, password, regenerate):
return self.send_auth_request(
"post",
"/v1/token",
data={"password": password, "regenerate": 1 if regenerate else 0},
).get("auth_token")
def forgot_password(self, username):
return self.send_request("post", "/v1/forgot", data={"username": username},)
def get_profile(self):
return self.send_auth_request("get", "/v1/profile",)
def update_profile(self, profile, current_password):
profile["current_password"] = current_password
self.delete_local_state("summary")
response = self.send_auth_request("put", "/v1/profile", data=profile,)
return response
def get_account_info(self, offline=False):
account = app.get_state_item("account") or {}
if (
account.get("summary")
and account["summary"].get("expire_at", 0) > time.time()
):
return account["summary"]
if offline and account.get("email"):
return {
"profile": {
"email": account.get("email"),
"username": account.get("username"),
}
}
result = self.send_auth_request("get", "/v1/summary",)
account["summary"] = dict(
profile=result.get("profile"),
packages=result.get("packages"),
subscriptions=result.get("subscriptions"),
user_id=result.get("user_id"),
expire_at=int(time.time()) + self.SUMMARY_CACHE_TTL,
)
app.set_state_item("account", account)
return result
def destroy_account(self):
return self.send_auth_request("delete", "/v1/account")
def create_org(self, orgname, email, displayname):
return self.send_auth_request(
"post",
"/v1/orgs",
data={"orgname": orgname, "email": email, "displayname": displayname},
)
def get_org(self, orgname):
return self.send_auth_request("get", "/v1/orgs/%s" % orgname)
def list_orgs(self):
return self.send_auth_request("get", "/v1/orgs",)
def update_org(self, orgname, data):
return self.send_auth_request(
"put", "/v1/orgs/%s" % orgname, data={k: v for k, v in data.items() if v}
)
def destroy_org(self, orgname):
return self.send_auth_request("delete", "/v1/orgs/%s" % orgname,)
def add_org_owner(self, orgname, username):
return self.send_auth_request(
"post", "/v1/orgs/%s/owners" % orgname, data={"username": username},
)
def list_org_owners(self, orgname):
return self.send_auth_request("get", "/v1/orgs/%s/owners" % orgname,)
def remove_org_owner(self, orgname, username):
return self.send_auth_request(
"delete", "/v1/orgs/%s/owners" % orgname, data={"username": username},
)
def create_team(self, orgname, teamname, description):
return self.send_auth_request(
"post",
"/v1/orgs/%s/teams" % orgname,
data={"name": teamname, "description": description},
)
def destroy_team(self, orgname, teamname):
return self.send_auth_request(
"delete", "/v1/orgs/%s/teams/%s" % (orgname, teamname),
)
def get_team(self, orgname, teamname):
return self.send_auth_request(
"get", "/v1/orgs/%s/teams/%s" % (orgname, teamname),
)
def list_teams(self, orgname):
return self.send_auth_request("get", "/v1/orgs/%s/teams" % orgname,)
def update_team(self, orgname, teamname, data):
return self.send_auth_request(
"put",
"/v1/orgs/%s/teams/%s" % (orgname, teamname),
data={k: v for k, v in data.items() if v},
)
def add_team_member(self, orgname, teamname, username):
return self.send_auth_request(
"post",
"/v1/orgs/%s/teams/%s/members" % (orgname, teamname),
data={"username": username},
)
def remove_team_member(self, orgname, teamname, username):
return self.send_auth_request(
"delete",
"/v1/orgs/%s/teams/%s/members" % (orgname, teamname),
data={"username": username},
)
def fetch_authentication_token(self):
if os.environ.get("PLATFORMIO_AUTH_TOKEN"):
return os.environ.get("PLATFORMIO_AUTH_TOKEN")
auth = app.get_state_item("account", {}).get("auth", {})
if auth.get("access_token") and auth.get("access_token_expire"):
if auth.get("access_token_expire") > time.time():
return auth.get("access_token")
if auth.get("refresh_token"):
try:
result = self.send_request(
"post",
"/v1/login",
headers={
"Authorization": "Bearer %s" % auth.get("refresh_token")
},
)
app.set_state_item("account", result)
return result.get("auth").get("access_token")
except AccountError:
self.delete_local_session()
raise AccountNotAuthorized()
|
apache-2.0
| -9,000,951,945,673,739,000
| 32.862069
| 86
| 0.563238
| false
| 4.01636
| false
| false
| false
|
KDNT/p2pool-worldcoin-old
|
p2pool/data.py
|
1
|
55789
|
from __future__ import division
import hashlib
import os
import random
import sys
import time
from twisted.python import log
import p2pool
from p2pool.bitcoin import data as bitcoin_data, script, sha256
from p2pool.util import math, forest, pack
# hashlink
hash_link_type = pack.ComposedType([
('state', pack.FixedStrType(32)),
('extra_data', pack.FixedStrType(0)), # bit of a hack, but since the donation script is at the end, const_ending is long enough to always make this empty
('length', pack.VarIntType()),
])
def prefix_to_hash_link(prefix, const_ending=''):
assert prefix.endswith(const_ending), (prefix, const_ending)
x = sha256.sha256(prefix)
return dict(state=x.state, extra_data=x.buf[:max(0, len(x.buf)-len(const_ending))], length=x.length//8)
def check_hash_link(hash_link, data, const_ending=''):
extra_length = hash_link['length'] % (512//8)
assert len(hash_link['extra_data']) == max(0, extra_length - len(const_ending))
extra = (hash_link['extra_data'] + const_ending)[len(hash_link['extra_data']) + len(const_ending) - extra_length:]
assert len(extra) == extra_length
return pack.IntType(256).unpack(hashlib.sha256(sha256.sha256(data, (hash_link['state'], extra, 8*hash_link['length'])).digest()).digest())
# shares
share_type = pack.ComposedType([
('type', pack.VarIntType()),
('contents', pack.VarStrType()),
])
def load_share(share, net, peer_addr):
assert peer_addr is None or isinstance(peer_addr, tuple)
if share['type'] < Share.VERSION:
from p2pool import p2p
raise p2p.PeerMisbehavingError('sent an obsolete share')
elif share['type'] == Share.VERSION:
return Share(net, peer_addr, Share.share_type.unpack(share['contents']))
elif share['type'] == NewShare.VERSION:
return NewShare(net, peer_addr, NewShare.share_type.unpack(share['contents']))
else:
raise ValueError('unknown share type: %r' % (share['type'],))
DONATION_SCRIPT = '4104ffd03de44a6e11b9917f3a29f9443283d9871c9d743ef30d5eddcd37094b64d1b3d8090496b53256786bf5c82932ec23c3b74d9f05a6f95a8b5529352656664bac'.decode('hex')
class NewShare(object):
VERSION = 13
VOTING_VERSION = 13
SUCCESSOR = None
small_block_header_type = pack.ComposedType([
('version', pack.VarIntType()),
('previous_block', pack.PossiblyNoneType(0, pack.IntType(256))),
('timestamp', pack.IntType(32)),
('bits', bitcoin_data.FloatingIntegerType()),
('nonce', pack.IntType(32)),
])
share_info_type = pack.ComposedType([
('share_data', pack.ComposedType([
('previous_share_hash', pack.PossiblyNoneType(0, pack.IntType(256))),
('coinbase', pack.VarStrType()),
('nonce', pack.IntType(32)),
('pubkey_hash', pack.IntType(160)),
('subsidy', pack.IntType(64)),
('donation', pack.IntType(16)),
('stale_info', pack.EnumType(pack.IntType(8), dict((k, {0: None, 253: 'orphan', 254: 'doa'}.get(k, 'unk%i' % (k,))) for k in xrange(256)))),
('desired_version', pack.VarIntType()),
])),
('new_transaction_hashes', pack.ListType(pack.IntType(256))),
('transaction_hash_refs', pack.ListType(pack.VarIntType(), 2)), # pairs of share_count, tx_count
('far_share_hash', pack.PossiblyNoneType(0, pack.IntType(256))),
('max_bits', bitcoin_data.FloatingIntegerType()),
('bits', bitcoin_data.FloatingIntegerType()),
('timestamp', pack.IntType(32)),
('absheight', pack.IntType(32)),
('abswork', pack.IntType(128)),
])
share_type = pack.ComposedType([
('min_header', small_block_header_type),
('share_info', share_info_type),
('ref_merkle_link', pack.ComposedType([
('branch', pack.ListType(pack.IntType(256))),
('index', pack.IntType(0)),
])),
('last_txout_nonce', pack.IntType(64)),
('hash_link', hash_link_type),
('merkle_link', pack.ComposedType([
('branch', pack.ListType(pack.IntType(256))),
('index', pack.IntType(0)), # it will always be 0
])),
])
ref_type = pack.ComposedType([
('identifier', pack.FixedStrType(64//8)),
('share_info', share_info_type),
])
gentx_before_refhash = pack.VarStrType().pack(DONATION_SCRIPT) + pack.IntType(64).pack(0) + pack.VarStrType().pack('\x6a\x28' + pack.IntType(256).pack(0) + pack.IntType(64).pack(0))[:3]
@classmethod
def generate_transaction(cls, tracker, share_data, block_target, desired_timestamp, desired_target, ref_merkle_link, desired_other_transaction_hashes_and_fees, net, known_txs=None, last_txout_nonce=0, base_subsidy=None):
previous_share = tracker.items[share_data['previous_share_hash']] if share_data['previous_share_hash'] is not None else None
height, last = tracker.get_height_and_last(share_data['previous_share_hash'])
assert height >= net.REAL_CHAIN_LENGTH or last is None
if height < net.TARGET_LOOKBEHIND:
pre_target3 = net.MAX_TARGET
else:
attempts_per_second = get_pool_attempts_per_second(tracker, share_data['previous_share_hash'], net.TARGET_LOOKBEHIND, min_work=True, integer=True)
pre_target = 2**256//(net.NEW_SHARE_PERIOD*attempts_per_second) - 1 if attempts_per_second else 2**256-1
pre_target2 = math.clip(pre_target, (previous_share.max_target*9//10, previous_share.max_target*11//10))
pre_target3 = math.clip(pre_target2, (net.MIN_TARGET, net.MAX_TARGET))
max_bits = bitcoin_data.FloatingInteger.from_target_upper_bound(pre_target3)
bits = bitcoin_data.FloatingInteger.from_target_upper_bound(math.clip(desired_target, (pre_target3//30, pre_target3)))
new_transaction_hashes = []
new_transaction_size = 0
transaction_hash_refs = []
other_transaction_hashes = []
past_shares = list(tracker.get_chain(share_data['previous_share_hash'], min(height, 100)))
tx_hash_to_this = {}
for i, share in enumerate(past_shares):
for j, tx_hash in enumerate(share.new_transaction_hashes):
if tx_hash not in tx_hash_to_this:
tx_hash_to_this[tx_hash] = [1+i, j] # share_count, tx_count
for tx_hash, fee in desired_other_transaction_hashes_and_fees:
if tx_hash in tx_hash_to_this:
this = tx_hash_to_this[tx_hash]
else:
if known_txs is not None:
this_size = bitcoin_data.tx_type.packed_size(known_txs[tx_hash])
if new_transaction_size + this_size > 50000: # only allow 50 kB of new txns/share
break
new_transaction_size += this_size
new_transaction_hashes.append(tx_hash)
this = [0, len(new_transaction_hashes)-1]
transaction_hash_refs.extend(this)
other_transaction_hashes.append(tx_hash)
included_transactions = set(other_transaction_hashes)
removed_fees = [fee for tx_hash, fee in desired_other_transaction_hashes_and_fees if tx_hash not in included_transactions]
definite_fees = sum(0 if fee is None else fee for tx_hash, fee in desired_other_transaction_hashes_and_fees if tx_hash in included_transactions)
if None not in removed_fees:
share_data = dict(share_data, subsidy=share_data['subsidy'] - sum(removed_fees))
else:
assert base_subsidy is not None
share_data = dict(share_data, subsidy=base_subsidy + definite_fees)
weights, total_weight, donation_weight = tracker.get_cumulative_weights(previous_share.share_data['previous_share_hash'] if previous_share is not None else None,
min(height, net.REAL_CHAIN_LENGTH-1),
65535*net.NEW_SPREAD*bitcoin_data.target_to_average_attempts(block_target),
)
assert total_weight == sum(weights.itervalues()) + donation_weight, (total_weight, sum(weights.itervalues()) + donation_weight)
amounts = dict((script, share_data['subsidy']*(199*weight)//(200*total_weight)) for script, weight in weights.iteritems()) # 99.5% goes according to weights prior to this share
this_script = bitcoin_data.pubkey_hash_to_script2(share_data['pubkey_hash'])
amounts[this_script] = amounts.get(this_script, 0) + share_data['subsidy']//200 # 0.5% goes to block finder
amounts[DONATION_SCRIPT] = amounts.get(DONATION_SCRIPT, 0) + share_data['subsidy'] - sum(amounts.itervalues()) # all that's left over is the donation weight and some extra satoshis due to rounding
if sum(amounts.itervalues()) != share_data['subsidy'] or any(x < 0 for x in amounts.itervalues()):
raise ValueError()
dests = sorted(amounts.iterkeys(), key=lambda script: (script == DONATION_SCRIPT, amounts[script], script))[-4000:] # block length limit, unlikely to ever be hit
share_info = dict(
share_data=share_data,
far_share_hash=None if last is None and height < 99 else tracker.get_nth_parent_hash(share_data['previous_share_hash'], 99),
max_bits=max_bits,
bits=bits,
timestamp=math.clip(desired_timestamp, (
(previous_share.timestamp + net.NEW_SHARE_PERIOD) - (net.NEW_SHARE_PERIOD - 1), # = previous_share.timestamp + 1
(previous_share.timestamp + net.NEW_SHARE_PERIOD) + (net.NEW_SHARE_PERIOD - 1),
)) if previous_share is not None else desired_timestamp,
new_transaction_hashes=new_transaction_hashes,
transaction_hash_refs=transaction_hash_refs,
absheight=((previous_share.absheight if previous_share is not None else 0) + 1) % 2**32,
abswork=((previous_share.abswork if previous_share is not None else 0) + bitcoin_data.target_to_average_attempts(bits.target)) % 2**128,
)
gentx = dict(
version=1,
tx_ins=[dict(
previous_output=None,
sequence=None,
script=share_data['coinbase'],
)],
tx_outs=[dict(value=amounts[script], script=script) for script in dests if amounts[script] or script == DONATION_SCRIPT] + [dict(
value=0,
script='\x6a\x28' + cls.get_ref_hash(net, share_info, ref_merkle_link) + pack.IntType(64).pack(last_txout_nonce),
)],
lock_time=0,
)
def get_share(header, last_txout_nonce=last_txout_nonce):
min_header = dict(header); del min_header['merkle_root']
share = cls(net, None, dict(
min_header=min_header,
share_info=share_info,
ref_merkle_link=dict(branch=[], index=0),
last_txout_nonce=(last_txout_nonce%2**32*2**32)|(last_txout_nonce>>32), # XXX
hash_link=prefix_to_hash_link(bitcoin_data.tx_type.pack(gentx)[:-32-8-4], cls.gentx_before_refhash),
merkle_link=bitcoin_data.calculate_merkle_link([None] + other_transaction_hashes, 0),
))
assert share.header == header # checks merkle_root
return share
return share_info, gentx, other_transaction_hashes, get_share
@classmethod
def get_ref_hash(cls, net, share_info, ref_merkle_link):
return pack.IntType(256).pack(bitcoin_data.check_merkle_link(bitcoin_data.hash256(cls.ref_type.pack(dict(
identifier=net.IDENTIFIER,
share_info=share_info,
))), ref_merkle_link))
__slots__ = 'net peer_addr contents min_header share_info hash_link merkle_link hash share_data max_target target timestamp previous_hash new_script desired_version gentx_hash header pow_hash header_hash new_transaction_hashes time_seen absheight abswork'.split(' ')
def __init__(self, net, peer_addr, contents):
self.net = net
self.peer_addr = peer_addr
self.contents = contents
self.min_header = contents['min_header']
self.share_info = contents['share_info']
self.hash_link = contents['hash_link']
self.merkle_link = contents['merkle_link']
if not (2 <= len(self.share_info['share_data']['coinbase']) <= 100):
raise ValueError('''bad coinbase size! %i bytes''' % (len(self.share_info['share_data']['coinbase']),))
if len(self.merkle_link['branch']) > 16:
raise ValueError('merkle branch too long!')
assert not self.hash_link['extra_data'], repr(self.hash_link['extra_data'])
self.share_data = self.share_info['share_data']
self.max_target = self.share_info['max_bits'].target
self.target = self.share_info['bits'].target
self.timestamp = self.share_info['timestamp']
self.previous_hash = self.share_data['previous_share_hash']
self.new_script = bitcoin_data.pubkey_hash_to_script2(self.share_data['pubkey_hash'])
self.desired_version = self.share_data['desired_version']
self.absheight = self.share_info['absheight']
self.abswork = self.share_info['abswork']
n = set()
for share_count, tx_count in self.iter_transaction_hash_refs():
assert share_count < 110
if share_count == 0:
n.add(tx_count)
assert n == set(range(len(self.share_info['new_transaction_hashes'])))
self.gentx_hash = check_hash_link(
self.hash_link,
self.get_ref_hash(net, self.share_info, contents['ref_merkle_link']) + pack.IntType(64).pack(self.contents['last_txout_nonce']) + pack.IntType(32).pack(0),
self.gentx_before_refhash,
)
merkle_root = bitcoin_data.check_merkle_link(self.gentx_hash, self.merkle_link)
self.header = dict(self.min_header, merkle_root=merkle_root)
self.pow_hash = net.PARENT.POW_FUNC(bitcoin_data.block_header_type.pack(self.header))
self.hash = self.header_hash = bitcoin_data.hash256(bitcoin_data.block_header_type.pack(self.header))
if self.target > net.MAX_TARGET:
from p2pool import p2p
raise p2p.PeerMisbehavingError('share target invalid')
if self.pow_hash > self.target:
from p2pool import p2p
raise p2p.PeerMisbehavingError('share PoW invalid')
self.new_transaction_hashes = self.share_info['new_transaction_hashes']
# XXX eww
self.time_seen = time.time()
def __repr__(self):
return 'Share' + repr((self.net, self.peer_addr, self.contents))
def as_share(self):
return dict(type=self.VERSION, contents=self.share_type.pack(self.contents))
def iter_transaction_hash_refs(self):
return zip(self.share_info['transaction_hash_refs'][::2], self.share_info['transaction_hash_refs'][1::2])
def check(self, tracker):
from p2pool import p2p
if self.share_data['previous_share_hash'] is not None:
previous_share = tracker.items[self.share_data['previous_share_hash']]
if type(self) is type(previous_share):
pass
elif type(self) is type(previous_share).SUCCESSOR:
if tracker.get_height(previous_share.hash) < self.net.CHAIN_LENGTH:
from p2pool import p2p
raise p2p.PeerMisbehavingError('switch without enough history')
# switch only valid if 85% of hashes in [self.net.CHAIN_LENGTH*9//10, self.net.CHAIN_LENGTH] for new version
counts = get_desired_version_counts(tracker,
tracker.get_nth_parent_hash(previous_share.hash, self.net.CHAIN_LENGTH*9//10), self.net.CHAIN_LENGTH//10)
if counts.get(self.VERSION, 0) < sum(counts.itervalues())*85//100:
raise p2p.PeerMisbehavingError('switch without enough hash power upgraded')
else:
raise p2p.PeerMisbehavingError('''%s can't follow %s''' % (type(self).__name__, type(previous_share).__name__))
other_tx_hashes = [tracker.items[tracker.get_nth_parent_hash(self.hash, share_count)].share_info['new_transaction_hashes'][tx_count] for share_count, tx_count in self.iter_transaction_hash_refs()]
share_info, gentx, other_tx_hashes2, get_share = self.generate_transaction(tracker, self.share_info['share_data'], self.header['bits'].target, self.share_info['timestamp'], self.share_info['bits'].target, self.contents['ref_merkle_link'], [(h, None) for h in other_tx_hashes], self.net, last_txout_nonce=self.contents['last_txout_nonce'])
assert other_tx_hashes2 == other_tx_hashes
if share_info != self.share_info:
raise ValueError('share_info invalid')
if bitcoin_data.hash256(bitcoin_data.tx_type.pack(gentx)) != self.gentx_hash:
raise ValueError('''gentx doesn't match hash_link''')
if bitcoin_data.calculate_merkle_link([None] + other_tx_hashes, 0) != self.merkle_link:
raise ValueError('merkle_link and other_tx_hashes do not match')
return gentx # only used by as_block
def get_other_tx_hashes(self, tracker):
parents_needed = max(share_count for share_count, tx_count in self.iter_transaction_hash_refs()) if self.share_info['transaction_hash_refs'] else 0
parents = tracker.get_height(self.hash) - 1
if parents < parents_needed:
return None
last_shares = list(tracker.get_chain(self.hash, parents_needed + 1))
return [last_shares[share_count].share_info['new_transaction_hashes'][tx_count] for share_count, tx_count in self.iter_transaction_hash_refs()]
def _get_other_txs(self, tracker, known_txs):
other_tx_hashes = self.get_other_tx_hashes(tracker)
if other_tx_hashes is None:
return None # not all parents present
if not all(tx_hash in known_txs for tx_hash in other_tx_hashes):
return None # not all txs present
return [known_txs[tx_hash] for tx_hash in other_tx_hashes]
def should_punish_reason(self, previous_block, bits, tracker, known_txs):
if (self.header['previous_block'], self.header['bits']) != (previous_block, bits) and self.header_hash != previous_block and self.peer_addr is not None:
return True, 'Block-stale detected! height(%x) < height(%x) or %08x != %08x' % (self.header['previous_block'], previous_block, self.header['bits'].bits, bits.bits)
if self.pow_hash <= self.header['bits'].target:
return -1, 'block solution'
other_txs = self._get_other_txs(tracker, known_txs)
if other_txs is None:
pass
else:
all_txs_size = sum(bitcoin_data.tx_type.packed_size(tx) for tx in other_txs)
if all_txs_size > 1000000:
return True, 'txs over block size limit'
new_txs_size = sum(bitcoin_data.tx_type.packed_size(known_txs[tx_hash]) for tx_hash in self.share_info['new_transaction_hashes'])
if new_txs_size > 50000:
return True, 'new txs over limit'
return False, None
def as_block(self, tracker, known_txs):
other_txs = self._get_other_txs(tracker, known_txs)
if other_txs is None:
return None # not all txs present
return dict(header=self.header, txs=[self.check(tracker)] + other_txs)
class Share(object):
VERSION = 9
VOTING_VERSION = 11
SUCCESSOR = NewShare
absheight = abswork = 0
small_block_header_type = pack.ComposedType([
('version', pack.VarIntType()),
('previous_block', pack.PossiblyNoneType(0, pack.IntType(256))),
('timestamp', pack.IntType(32)),
('bits', bitcoin_data.FloatingIntegerType()),
('nonce', pack.IntType(32)),
])
share_info_type = pack.ComposedType([
('share_data', pack.ComposedType([
('previous_share_hash', pack.PossiblyNoneType(0, pack.IntType(256))),
('coinbase', pack.VarStrType()),
('nonce', pack.IntType(32)),
('pubkey_hash', pack.IntType(160)),
('subsidy', pack.IntType(64)),
('donation', pack.IntType(16)),
('stale_info', pack.EnumType(pack.IntType(8), dict((k, {0: None, 253: 'orphan', 254: 'doa'}.get(k, 'unk%i' % (k,))) for k in xrange(256)))),
('desired_version', pack.VarIntType()),
])),
('new_transaction_hashes', pack.ListType(pack.IntType(256))),
('transaction_hash_refs', pack.ListType(pack.VarIntType(), 2)), # pairs of share_count, tx_count
('far_share_hash', pack.PossiblyNoneType(0, pack.IntType(256))),
('max_bits', bitcoin_data.FloatingIntegerType()),
('bits', bitcoin_data.FloatingIntegerType()),
('timestamp', pack.IntType(32)),
])
share_type = pack.ComposedType([
('min_header', small_block_header_type),
('share_info', share_info_type),
('ref_merkle_link', pack.ComposedType([
('branch', pack.ListType(pack.IntType(256))),
('index', pack.IntType(0)),
])),
('last_txout_nonce', pack.IntType(32)),
('hash_link', hash_link_type),
('merkle_link', pack.ComposedType([
('branch', pack.ListType(pack.IntType(256))),
('index', pack.IntType(0)), # it will always be 0
])),
])
ref_type = pack.ComposedType([
('identifier', pack.FixedStrType(64//8)),
('share_info', share_info_type),
])
gentx_before_refhash = pack.VarStrType().pack(DONATION_SCRIPT) + pack.IntType(64).pack(0) + pack.VarStrType().pack('\x24' + pack.IntType(256).pack(0) + pack.IntType(32).pack(0))[:2]
@classmethod
def generate_transaction(cls, tracker, share_data, block_target, desired_timestamp, desired_target, ref_merkle_link, desired_other_transaction_hashes_and_fees, net, known_txs=None, last_txout_nonce=0, base_subsidy=None):
previous_share = tracker.items[share_data['previous_share_hash']] if share_data['previous_share_hash'] is not None else None
height, last = tracker.get_height_and_last(share_data['previous_share_hash'])
assert height >= net.REAL_CHAIN_LENGTH or last is None
if height < net.TARGET_LOOKBEHIND:
pre_target3 = net.MAX_TARGET
else:
attempts_per_second = get_pool_attempts_per_second(tracker, share_data['previous_share_hash'], net.TARGET_LOOKBEHIND, min_work=True, integer=True)
pre_target = 2**256//(net.SHARE_PERIOD*attempts_per_second) - 1 if attempts_per_second else 2**256-1
pre_target2 = math.clip(pre_target, (previous_share.max_target*9//10, previous_share.max_target*11//10))
pre_target3 = math.clip(pre_target2, (net.MIN_TARGET, net.MAX_TARGET))
max_bits = bitcoin_data.FloatingInteger.from_target_upper_bound(pre_target3)
bits = bitcoin_data.FloatingInteger.from_target_upper_bound(math.clip(desired_target, (pre_target3//10, pre_target3)))
new_transaction_hashes = []
new_transaction_size = 0
transaction_hash_refs = []
other_transaction_hashes = []
past_shares = list(tracker.get_chain(share_data['previous_share_hash'], min(height, 100)))
tx_hash_to_this = {}
for i, share in enumerate(past_shares):
for j, tx_hash in enumerate(share.new_transaction_hashes):
if tx_hash not in tx_hash_to_this:
tx_hash_to_this[tx_hash] = [1+i, j] # share_count, tx_count
for tx_hash, fee in desired_other_transaction_hashes_and_fees:
if tx_hash in tx_hash_to_this:
this = tx_hash_to_this[tx_hash]
else:
if known_txs is not None:
this_size = bitcoin_data.tx_type.packed_size(known_txs[tx_hash])
if new_transaction_size + this_size > 50000: # only allow 50 kB of new txns/share
break
new_transaction_size += this_size
new_transaction_hashes.append(tx_hash)
this = [0, len(new_transaction_hashes)-1]
transaction_hash_refs.extend(this)
other_transaction_hashes.append(tx_hash)
included_transactions = set(other_transaction_hashes)
removed_fees = [fee for tx_hash, fee in desired_other_transaction_hashes_and_fees if tx_hash not in included_transactions]
definite_fees = sum(0 if fee is None else fee for tx_hash, fee in desired_other_transaction_hashes_and_fees if tx_hash in included_transactions)
if None not in removed_fees:
share_data = dict(share_data, subsidy=share_data['subsidy'] - sum(removed_fees))
else:
assert base_subsidy is not None
share_data = dict(share_data, subsidy=base_subsidy + definite_fees)
weights, total_weight, donation_weight = tracker.get_cumulative_weights(share_data['previous_share_hash'],
min(height, net.REAL_CHAIN_LENGTH),
65535*net.SPREAD*bitcoin_data.target_to_average_attempts(block_target),
)
assert total_weight == sum(weights.itervalues()) + donation_weight, (total_weight, sum(weights.itervalues()) + donation_weight)
amounts = dict((script, share_data['subsidy']*(199*weight)//(200*total_weight)) for script, weight in weights.iteritems()) # 99.5% goes according to weights prior to this share
this_script = bitcoin_data.pubkey_hash_to_script2(share_data['pubkey_hash'])
amounts[this_script] = amounts.get(this_script, 0) + share_data['subsidy']//200 # 0.5% goes to block finder
amounts[DONATION_SCRIPT] = amounts.get(DONATION_SCRIPT, 0) + share_data['subsidy'] - sum(amounts.itervalues()) # all that's left over is the donation weight and some extra satoshis due to rounding
if sum(amounts.itervalues()) != share_data['subsidy'] or any(x < 0 for x in amounts.itervalues()):
raise ValueError()
dests = sorted(amounts.iterkeys(), key=lambda script: (script == DONATION_SCRIPT, amounts[script], script))[-4000:] # block length limit, unlikely to ever be hit
share_info = dict(
share_data=share_data,
far_share_hash=None if last is None and height < 99 else tracker.get_nth_parent_hash(share_data['previous_share_hash'], 99),
max_bits=max_bits,
bits=bits,
timestamp=math.clip(desired_timestamp, (
(previous_share.timestamp + net.SHARE_PERIOD) - (net.SHARE_PERIOD - 1), # = previous_share.timestamp + 1
(previous_share.timestamp + net.SHARE_PERIOD) + (net.SHARE_PERIOD - 1),
)) if previous_share is not None else desired_timestamp,
new_transaction_hashes=new_transaction_hashes,
transaction_hash_refs=transaction_hash_refs,
)
gentx = dict(
version=1,
tx_ins=[dict(
previous_output=None,
sequence=None,
script=share_data['coinbase'],
)],
tx_outs=[dict(value=amounts[script], script=script) for script in dests if amounts[script] or script == DONATION_SCRIPT] + [dict(
value=0,
script='\x24' + cls.get_ref_hash(net, share_info, ref_merkle_link) + pack.IntType(32).pack(last_txout_nonce),
)],
lock_time=0,
)
def get_share(header, last_txout_nonce=last_txout_nonce):
min_header = dict(header); del min_header['merkle_root']
share = cls(net, None, dict(
min_header=min_header,
share_info=share_info,
ref_merkle_link=dict(branch=[], index=0),
last_txout_nonce=last_txout_nonce,
hash_link=prefix_to_hash_link(bitcoin_data.tx_type.pack(gentx)[:-32-4-4], cls.gentx_before_refhash),
merkle_link=bitcoin_data.calculate_merkle_link([None] + other_transaction_hashes, 0),
))
assert share.header == header # checks merkle_root
return share
return share_info, gentx, other_transaction_hashes, get_share
@classmethod
def get_ref_hash(cls, net, share_info, ref_merkle_link):
return pack.IntType(256).pack(bitcoin_data.check_merkle_link(bitcoin_data.hash256(cls.ref_type.pack(dict(
identifier=net.IDENTIFIER,
share_info=share_info,
))), ref_merkle_link))
__slots__ = 'net peer_addr contents min_header share_info hash_link merkle_link hash share_data max_target target timestamp previous_hash new_script desired_version gentx_hash header pow_hash header_hash new_transaction_hashes time_seen'.split(' ')
def __init__(self, net, peer_addr, contents):
self.net = net
self.peer_addr = peer_addr
self.contents = contents
self.min_header = contents['min_header']
self.share_info = contents['share_info']
self.hash_link = contents['hash_link']
self.merkle_link = contents['merkle_link']
if not (2 <= len(self.share_info['share_data']['coinbase']) <= 100):
raise ValueError('''bad coinbase size! %i bytes''' % (len(self.share_info['share_data']['coinbase']),))
if len(self.merkle_link['branch']) > 16:
raise ValueError('merkle branch too long!')
assert not self.hash_link['extra_data'], repr(self.hash_link['extra_data'])
self.share_data = self.share_info['share_data']
self.max_target = self.share_info['max_bits'].target
self.target = self.share_info['bits'].target
self.timestamp = self.share_info['timestamp']
self.previous_hash = self.share_data['previous_share_hash']
self.new_script = bitcoin_data.pubkey_hash_to_script2(self.share_data['pubkey_hash'])
self.desired_version = self.share_data['desired_version']
n = set()
for share_count, tx_count in self.iter_transaction_hash_refs():
assert share_count < 110
if share_count == 0:
n.add(tx_count)
assert n == set(range(len(self.share_info['new_transaction_hashes'])))
self.gentx_hash = check_hash_link(
self.hash_link,
self.get_ref_hash(net, self.share_info, contents['ref_merkle_link']) + pack.IntType(32).pack(self.contents['last_txout_nonce']) + pack.IntType(32).pack(0),
self.gentx_before_refhash,
)
merkle_root = bitcoin_data.check_merkle_link(self.gentx_hash, self.merkle_link)
self.header = dict(self.min_header, merkle_root=merkle_root)
self.pow_hash = net.PARENT.POW_FUNC(bitcoin_data.block_header_type.pack(self.header))
self.hash = self.header_hash = bitcoin_data.hash256(bitcoin_data.block_header_type.pack(self.header))
if self.target > net.MAX_TARGET:
from p2pool import p2p
raise p2p.PeerMisbehavingError('share target invalid')
if self.pow_hash > self.target:
from p2pool import p2p
raise p2p.PeerMisbehavingError('share PoW invalid')
self.new_transaction_hashes = self.share_info['new_transaction_hashes']
# XXX eww
self.time_seen = time.time()
def __repr__(self):
return 'Share' + repr((self.net, self.peer_addr, self.contents))
def as_share(self):
return dict(type=self.VERSION, contents=self.share_type.pack(self.contents))
def iter_transaction_hash_refs(self):
return zip(self.share_info['transaction_hash_refs'][::2], self.share_info['transaction_hash_refs'][1::2])
def check(self, tracker):
from p2pool import p2p
if self.share_data['previous_share_hash'] is not None:
previous_share = tracker.items[self.share_data['previous_share_hash']]
if type(self) is type(previous_share):
pass
elif type(self) is type(previous_share).SUCCESSOR:
if tracker.get_height(previous_share.hash) < self.net.CHAIN_LENGTH:
from p2pool import p2p
raise p2p.PeerMisbehavingError('switch without enough history')
# switch only valid if 85% of hashes in [self.net.CHAIN_LENGTH*9//10, self.net.CHAIN_LENGTH] for new version
counts = get_desired_version_counts(tracker,
tracker.get_nth_parent_hash(previous_share.hash, self.net.CHAIN_LENGTH*9//10), self.net.CHAIN_LENGTH//10)
if counts.get(self.VERSION, 0) < sum(counts.itervalues())*85//100:
raise p2p.PeerMisbehavingError('switch without enough hash power upgraded')
else:
raise p2p.PeerMisbehavingError('''%s can't follow %s''' % (type(self).__name__, type(previous_share).__name__))
other_tx_hashes = [tracker.items[tracker.get_nth_parent_hash(self.hash, share_count)].share_info['new_transaction_hashes'][tx_count] for share_count, tx_count in self.iter_transaction_hash_refs()]
share_info, gentx, other_tx_hashes2, get_share = self.generate_transaction(tracker, self.share_info['share_data'], self.header['bits'].target, self.share_info['timestamp'], self.share_info['bits'].target, self.contents['ref_merkle_link'], [(h, None) for h in other_tx_hashes], self.net, last_txout_nonce=self.contents['last_txout_nonce'])
assert other_tx_hashes2 == other_tx_hashes
if share_info != self.share_info:
raise ValueError('share_info invalid')
if bitcoin_data.hash256(bitcoin_data.tx_type.pack(gentx)) != self.gentx_hash:
raise ValueError('''gentx doesn't match hash_link''')
if bitcoin_data.calculate_merkle_link([None] + other_tx_hashes, 0) != self.merkle_link:
raise ValueError('merkle_link and other_tx_hashes do not match')
return gentx # only used by as_block
def get_other_tx_hashes(self, tracker):
parents_needed = max(share_count for share_count, tx_count in self.iter_transaction_hash_refs()) if self.share_info['transaction_hash_refs'] else 0
parents = tracker.get_height(self.hash) - 1
if parents < parents_needed:
return None
last_shares = list(tracker.get_chain(self.hash, parents_needed + 1))
return [last_shares[share_count].share_info['new_transaction_hashes'][tx_count] for share_count, tx_count in self.iter_transaction_hash_refs()]
def _get_other_txs(self, tracker, known_txs):
other_tx_hashes = self.get_other_tx_hashes(tracker)
if other_tx_hashes is None:
return None # not all parents present
if not all(tx_hash in known_txs for tx_hash in other_tx_hashes):
return None # not all txs present
return [known_txs[tx_hash] for tx_hash in other_tx_hashes]
def should_punish_reason(self, previous_block, bits, tracker, known_txs):
if (self.header['previous_block'], self.header['bits']) != (previous_block, bits) and self.header_hash != previous_block and self.peer_addr is not None:
return True, 'Block-stale detected! %x < %x' % (self.header['previous_block'], previous_block)
if self.pow_hash <= self.header['bits'].target:
return -1, 'block solution'
other_txs = self._get_other_txs(tracker, known_txs)
if other_txs is None:
if self.time_seen != 0: # ignore if loaded from ShareStore
return True, 'not all txs present'
else:
all_txs_size = sum(bitcoin_data.tx_type.packed_size(tx) for tx in other_txs)
if all_txs_size > 1000000:
return True, 'txs over block size limit'
new_txs_size = sum(bitcoin_data.tx_type.packed_size(known_txs[tx_hash]) for tx_hash in self.share_info['new_transaction_hashes'])
if new_txs_size > 50000:
return True, 'new txs over limit'
return False, None
def as_block(self, tracker, known_txs):
other_txs = self._get_other_txs(tracker, known_txs)
if other_txs is None:
return None # not all txs present
return dict(header=self.header, txs=[self.check(tracker)] + other_txs)
class WeightsSkipList(forest.TrackerSkipList):
# share_count, weights, total_weight
def get_delta(self, element):
from p2pool.bitcoin import data as bitcoin_data
share = self.tracker.items[element]
att = bitcoin_data.target_to_average_attempts(share.target)
return 1, {share.new_script: att*(65535-share.share_data['donation'])}, att*65535, att*share.share_data['donation']
def combine_deltas(self, (share_count1, weights1, total_weight1, total_donation_weight1), (share_count2, weights2, total_weight2, total_donation_weight2)):
return share_count1 + share_count2, math.add_dicts(weights1, weights2), total_weight1 + total_weight2, total_donation_weight1 + total_donation_weight2
def initial_solution(self, start, (max_shares, desired_weight)):
assert desired_weight % 65535 == 0, divmod(desired_weight, 65535)
return 0, None, 0, 0
def apply_delta(self, (share_count1, weights_list, total_weight1, total_donation_weight1), (share_count2, weights2, total_weight2, total_donation_weight2), (max_shares, desired_weight)):
if total_weight1 + total_weight2 > desired_weight and share_count2 == 1:
assert (desired_weight - total_weight1) % 65535 == 0
script, = weights2.iterkeys()
new_weights = {script: (desired_weight - total_weight1)//65535*weights2[script]//(total_weight2//65535)}
return share_count1 + share_count2, (weights_list, new_weights), desired_weight, total_donation_weight1 + (desired_weight - total_weight1)//65535*total_donation_weight2//(total_weight2//65535)
return share_count1 + share_count2, (weights_list, weights2), total_weight1 + total_weight2, total_donation_weight1 + total_donation_weight2
def judge(self, (share_count, weights_list, total_weight, total_donation_weight), (max_shares, desired_weight)):
if share_count > max_shares or total_weight > desired_weight:
return 1
elif share_count == max_shares or total_weight == desired_weight:
return 0
else:
return -1
def finalize(self, (share_count, weights_list, total_weight, total_donation_weight), (max_shares, desired_weight)):
assert share_count <= max_shares and total_weight <= desired_weight
assert share_count == max_shares or total_weight == desired_weight
return math.add_dicts(*math.flatten_linked_list(weights_list)), total_weight, total_donation_weight
class OkayTracker(forest.Tracker):
def __init__(self, net):
forest.Tracker.__init__(self, delta_type=forest.get_attributedelta_type(dict(forest.AttributeDelta.attrs,
work=lambda share: bitcoin_data.target_to_average_attempts(share.target),
min_work=lambda share: bitcoin_data.target_to_average_attempts(share.max_target),
)))
self.net = net
self.verified = forest.SubsetTracker(delta_type=forest.get_attributedelta_type(dict(forest.AttributeDelta.attrs,
work=lambda share: bitcoin_data.target_to_average_attempts(share.target),
)), subset_of=self)
self.get_cumulative_weights = WeightsSkipList(self)
def attempt_verify(self, share):
if share.hash in self.verified.items:
return True
height, last = self.get_height_and_last(share.hash)
if height < self.net.CHAIN_LENGTH + 1 and last is not None:
raise AssertionError()
try:
share.check(self)
except:
log.err(None, 'Share check failed:')
return False
else:
self.verified.add(share)
return True
def think(self, block_rel_height_func, previous_block, bits, known_txs):
desired = set()
# O(len(self.heads))
# make 'unverified heads' set?
# for each overall head, attempt verification
# if it fails, attempt on parent, and repeat
# if no successful verification because of lack of parents, request parent
bads = set()
for head in set(self.heads) - set(self.verified.heads):
head_height, last = self.get_height_and_last(head)
for share in self.get_chain(head, head_height if last is None else min(5, max(0, head_height - self.net.CHAIN_LENGTH))):
if self.attempt_verify(share):
break
if share.hash in self.heads:
bads.add(share.hash)
else:
if last is not None:
desired.add((
self.items[random.choice(list(self.reverse[last]))].peer_addr,
last,
max(x.timestamp for x in self.get_chain(head, min(head_height, 5))),
min(x.target for x in self.get_chain(head, min(head_height, 5))),
))
for bad in bads:
assert bad not in self.verified.items
assert bad in self.heads
if p2pool.DEBUG:
print "BAD", bad
self.remove(bad)
# try to get at least CHAIN_LENGTH height for each verified head, requesting parents if needed
for head in list(self.verified.heads):
head_height, last_hash = self.verified.get_height_and_last(head)
last_height, last_last_hash = self.get_height_and_last(last_hash)
# XXX review boundary conditions
want = max(self.net.CHAIN_LENGTH - head_height, 0)
can = max(last_height - 1 - self.net.CHAIN_LENGTH, 0) if last_last_hash is not None else last_height
get = min(want, can)
#print 'Z', head_height, last_hash is None, last_height, last_last_hash is None, want, can, get
for share in self.get_chain(last_hash, get):
if not self.attempt_verify(share):
break
if head_height < self.net.CHAIN_LENGTH and last_last_hash is not None:
desired.add((
self.items[random.choice(list(self.verified.reverse[last_hash]))].peer_addr,
last_last_hash,
max(x.timestamp for x in self.get_chain(head, min(head_height, 5))),
min(x.target for x in self.get_chain(head, min(head_height, 5))),
))
# decide best tree
decorated_tails = sorted((self.score(max(self.verified.tails[tail_hash], key=self.verified.get_work), block_rel_height_func), tail_hash) for tail_hash in self.verified.tails)
if p2pool.DEBUG:
print len(decorated_tails), 'tails:'
for score, tail_hash in decorated_tails:
print format_hash(tail_hash), score
best_tail_score, best_tail = decorated_tails[-1] if decorated_tails else (None, None)
# decide best verified head
decorated_heads = sorted(((
self.verified.get_work(self.verified.get_nth_parent_hash(h, min(5, self.verified.get_height(h)))),
#self.items[h].peer_addr is None,
-self.items[h].should_punish_reason(previous_block, bits, self, known_txs)[0],
-self.items[h].time_seen,
), h) for h in self.verified.tails.get(best_tail, []))
if p2pool.DEBUG:
print len(decorated_heads), 'heads. Top 10:'
for score, head_hash in decorated_heads[-10:]:
print ' ', format_hash(head_hash), format_hash(self.items[head_hash].previous_hash), score
best_head_score, best = decorated_heads[-1] if decorated_heads else (None, None)
if best is not None:
best_share = self.items[best]
punish, punish_reason = best_share.should_punish_reason(previous_block, bits, self, known_txs)
if punish > 0:
print 'Punishing share for %r! Jumping from %s to %s!' % (punish_reason, format_hash(best), format_hash(best_share.previous_hash))
best = best_share.previous_hash
timestamp_cutoff = min(int(time.time()), best_share.timestamp) - 3600
target_cutoff = int(2**256//(self.net.SHARE_PERIOD*best_tail_score[1] + 1) * 2 + .5) if best_tail_score[1] is not None else 2**256-1
else:
timestamp_cutoff = int(time.time()) - 24*60*60
target_cutoff = 2**256-1
if p2pool.DEBUG:
print 'Desire %i shares. Cutoff: %s old diff>%.2f' % (len(desired), math.format_dt(time.time() - timestamp_cutoff), bitcoin_data.target_to_difficulty(target_cutoff))
for peer_addr, hash, ts, targ in desired:
print ' ', None if peer_addr is None else '%s:%i' % peer_addr, format_hash(hash), math.format_dt(time.time() - ts), bitcoin_data.target_to_difficulty(targ), ts >= timestamp_cutoff, targ <= target_cutoff
return best, [(peer_addr, hash) for peer_addr, hash, ts, targ in desired if ts >= timestamp_cutoff], decorated_heads
def score(self, share_hash, block_rel_height_func):
# returns approximate lower bound on chain's hashrate in the last self.net.CHAIN_LENGTH*15//16*self.net.SHARE_PERIOD time
head_height = self.verified.get_height(share_hash)
if head_height < self.net.CHAIN_LENGTH:
return head_height, None
end_point = self.verified.get_nth_parent_hash(share_hash, self.net.CHAIN_LENGTH*15//16)
block_height = max(block_rel_height_func(share.header['previous_block']) for share in
self.verified.get_chain(end_point, self.net.CHAIN_LENGTH//16))
return self.net.CHAIN_LENGTH, self.verified.get_delta(share_hash, end_point).work/((0 - block_height + 1)*self.net.PARENT.BLOCK_PERIOD)
def get_pool_attempts_per_second(tracker, previous_share_hash, dist, min_work=False, integer=False):
assert dist >= 2
near = tracker.items[previous_share_hash]
far = tracker.items[tracker.get_nth_parent_hash(previous_share_hash, dist - 1)]
attempts = tracker.get_delta(near.hash, far.hash).work if not min_work else tracker.get_delta(near.hash, far.hash).min_work
time = near.timestamp - far.timestamp
if time <= 0:
time = 1
if integer:
return attempts//time
return attempts/time
def get_average_stale_prop(tracker, share_hash, lookbehind):
stales = sum(1 for share in tracker.get_chain(share_hash, lookbehind) if share.share_data['stale_info'] is not None)
return stales/(lookbehind + stales)
def get_stale_counts(tracker, share_hash, lookbehind, rates=False):
res = {}
for share in tracker.get_chain(share_hash, lookbehind - 1):
res['good'] = res.get('good', 0) + bitcoin_data.target_to_average_attempts(share.target)
s = share.share_data['stale_info']
if s is not None:
res[s] = res.get(s, 0) + bitcoin_data.target_to_average_attempts(share.target)
if rates:
dt = tracker.items[share_hash].timestamp - tracker.items[tracker.get_nth_parent_hash(share_hash, lookbehind - 1)].timestamp
res = dict((k, v/dt) for k, v in res.iteritems())
return res
def get_user_stale_props(tracker, share_hash, lookbehind):
res = {}
for share in tracker.get_chain(share_hash, lookbehind - 1):
stale, total = res.get(share.share_data['pubkey_hash'], (0, 0))
total += 1
if share.share_data['stale_info'] is not None:
stale += 1
total += 1
res[share.share_data['pubkey_hash']] = stale, total
return dict((pubkey_hash, stale/total) for pubkey_hash, (stale, total) in res.iteritems())
def get_expected_payouts(tracker, best_share_hash, block_target, subsidy, net):
weights, total_weight, donation_weight = tracker.get_cumulative_weights(best_share_hash, min(tracker.get_height(best_share_hash), net.REAL_CHAIN_LENGTH), 65535*net.SPREAD*bitcoin_data.target_to_average_attempts(block_target))
res = dict((script, subsidy*weight//total_weight) for script, weight in weights.iteritems())
res[DONATION_SCRIPT] = res.get(DONATION_SCRIPT, 0) + subsidy - sum(res.itervalues())
return res
def get_desired_version_counts(tracker, best_share_hash, dist):
res = {}
for share in tracker.get_chain(best_share_hash, dist):
res[share.desired_version] = res.get(share.desired_version, 0) + bitcoin_data.target_to_average_attempts(share.target)
return res
def get_warnings(tracker, best_share, net, bitcoind_warning, bitcoind_work_value):
res = []
desired_version_counts = get_desired_version_counts(tracker, best_share,
min(net.CHAIN_LENGTH, 60*60//net.SHARE_PERIOD, tracker.get_height(best_share)))
majority_desired_version = max(desired_version_counts, key=lambda k: desired_version_counts[k])
if majority_desired_version > (Share.SUCCESSOR if Share.SUCCESSOR is not None else Share).VOTING_VERSION and desired_version_counts[majority_desired_version] > sum(desired_version_counts.itervalues())/2:
res.append('A MAJORITY OF SHARES CONTAIN A VOTE FOR AN UNSUPPORTED SHARE IMPLEMENTATION! (v%i with %i%% support)\n'
'An upgrade is likely necessary. Check http://p2pool.forre.st/ for more information.' % (
majority_desired_version, 100*desired_version_counts[majority_desired_version]/sum(desired_version_counts.itervalues())))
if bitcoind_warning is not None:
if 'This is a pre-release test build' not in bitcoind_warning:
res.append('(from bitcoind) %s' % (bitcoind_warning,))
if time.time() > bitcoind_work_value['last_update'] + 60:
res.append('''LOST CONTACT WITH BITCOIND for %s! Check that it isn't frozen or dead!''' % (math.format_dt(time.time() - bitcoind_work_value['last_update']),))
return res
def format_hash(x):
if x is None:
return 'xxxxxxxx'
return '%08x' % (x % 2**32)
class ShareStore(object):
def __init__(self, prefix, net):
self.filename = prefix
self.dirname = os.path.dirname(os.path.abspath(prefix))
self.filename = os.path.basename(os.path.abspath(prefix))
self.net = net
self.known = None # will be filename -> set of share hashes, set of verified hashes
self.known_desired = None
def get_shares(self):
if self.known is not None:
raise AssertionError()
known = {}
filenames, next = self.get_filenames_and_next()
for filename in filenames:
share_hashes, verified_hashes = known.setdefault(filename, (set(), set()))
with open(filename, 'rb') as f:
for line in f:
try:
type_id_str, data_hex = line.strip().split(' ')
type_id = int(type_id_str)
if type_id == 0:
pass
elif type_id == 1:
pass
elif type_id == 2:
verified_hash = int(data_hex, 16)
yield 'verified_hash', verified_hash
verified_hashes.add(verified_hash)
elif type_id == 5:
raw_share = share_type.unpack(data_hex.decode('hex'))
if raw_share['type'] in [0, 1, 2, 3, 4, 5, 6, 7, 8]:
continue
share = load_share(raw_share, self.net, None)
yield 'share', share
share_hashes.add(share.hash)
else:
raise NotImplementedError("share type %i" % (type_id,))
except Exception:
log.err(None, "HARMLESS error while reading saved shares, continuing where left off:")
self.known = known
self.known_desired = dict((k, (set(a), set(b))) for k, (a, b) in known.iteritems())
def _add_line(self, line):
filenames, next = self.get_filenames_and_next()
if filenames and os.path.getsize(filenames[-1]) < 10e6:
filename = filenames[-1]
else:
filename = next
with open(filename, 'ab') as f:
f.write(line + '\n')
return filename
def add_share(self, share):
for filename, (share_hashes, verified_hashes) in self.known.iteritems():
if share.hash in share_hashes:
break
else:
filename = self._add_line("%i %s" % (5, share_type.pack(share.as_share()).encode('hex')))
share_hashes, verified_hashes = self.known.setdefault(filename, (set(), set()))
share_hashes.add(share.hash)
share_hashes, verified_hashes = self.known_desired.setdefault(filename, (set(), set()))
share_hashes.add(share.hash)
def add_verified_hash(self, share_hash):
for filename, (share_hashes, verified_hashes) in self.known.iteritems():
if share_hash in verified_hashes:
break
else:
filename = self._add_line("%i %x" % (2, share_hash))
share_hashes, verified_hashes = self.known.setdefault(filename, (set(), set()))
verified_hashes.add(share_hash)
share_hashes, verified_hashes = self.known_desired.setdefault(filename, (set(), set()))
verified_hashes.add(share_hash)
def get_filenames_and_next(self):
suffixes = sorted(int(x[len(self.filename):]) for x in os.listdir(self.dirname) if x.startswith(self.filename) and x[len(self.filename):].isdigit())
return [os.path.join(self.dirname, self.filename + str(suffix)) for suffix in suffixes], os.path.join(self.dirname, self.filename + (str(suffixes[-1] + 1) if suffixes else str(0)))
def forget_share(self, share_hash):
for filename, (share_hashes, verified_hashes) in self.known_desired.iteritems():
if share_hash in share_hashes:
share_hashes.remove(share_hash)
self.check_remove()
def forget_verified_share(self, share_hash):
for filename, (share_hashes, verified_hashes) in self.known_desired.iteritems():
if share_hash in verified_hashes:
verified_hashes.remove(share_hash)
self.check_remove()
def check_remove(self):
to_remove = set()
for filename, (share_hashes, verified_hashes) in self.known_desired.iteritems():
#print filename, len(share_hashes) + len(verified_hashes)
if not share_hashes and not verified_hashes:
to_remove.add(filename)
for filename in to_remove:
self.known.pop(filename)
self.known_desired.pop(filename)
os.remove(filename)
print "REMOVED", filename
|
gpl-3.0
| 4,844,746,086,630,213,000
| 52.386603
| 346
| 0.613365
| false
| 3.655898
| false
| false
| false
|
andresmrm/rss2email
|
rss2email/__init__.py
|
1
|
1661
|
# Copyright (C) 2012-2013 W. Trevor King <wking@tremily.us>
#
# This file is part of rss2email.
#
# rss2email is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 2 of the License, or (at your option) version 3 of
# the License.
#
# rss2email is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# rss2email. If not, see <http://www.gnu.org/licenses/>.
"""rss2email: get RSS feeds emailed to you
"""
import logging as _logging
import sys as _sys
__version__ = '3.6'
__url__ = 'https://github.com/wking/rss2email'
__author__ = 'W. Trevor King'
__email__ = 'rss2email@tremily.us'
__copyright__ = '(C) 2004 Aaron Swartz. GNU GPL 2 or 3.'
__contributors__ = [
'Aaron Swartz (original author)',
'Brian Lalor',
'Dean Jackson',
'Eelis van der Weegen',
'Erik Hetzner',
'Etienne Millon',
'George Saunders',
'Joey Hess',
'Lindsey Smith (lindsey@allthingsrss.com)',
'Marcel Ackermann (http://www.DreamFlasher.de)',
"Martin 'Joey' Schulze",
'Matej Cepl',
'W. Trevor King',
]
LOG = _logging.getLogger('rss2email')
LOG.addHandler(_logging.StreamHandler())
LOG.setLevel(_logging.ERROR)
if _sys.version_info < (3, 2):
raise ImportError(
"rss2email requires Python 3.2, but you're using:\n{}".format(
_sys.version))
|
gpl-2.0
| -7,075,809,744,930,004,000
| 30.339623
| 79
| 0.684527
| false
| 3.139887
| false
| false
| false
|
carborgar/metropol
|
metropol_abogados/views/ExpedientViews.py
|
1
|
3817
|
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.shortcuts import get_object_or_404
from django.core.urlresolvers import reverse
from django.contrib import messages
from django.http import HttpResponseRedirect
from django.contrib.auth.decorators import permission_required
from django.views import generic
from django.db.models import Q
from metropol_abogados.services import ExpedientService
from metropol_abogados.models import Expedient
from metropol_abogados.forms import ExpedientForm, ExpedientListFilterForm
def get_redirect(request, expedient_id):
msg = "Expediente %s correctamente" % ("guardado" if not expedient_id else "editado")
messages.success(request, msg)
if expedient_id:
return HttpResponseRedirect(reverse('expedient-details', args=(expedient_id,)))
else:
return HttpResponseRedirect(reverse('expedient-list'))
@permission_required('auth.management_metropol')
def edit(request, expedient_id=None):
if request.method == 'POST':
form = ExpedientForm(request.POST)
if form.is_valid():
ExpedientService.save_from_form(form)
return get_redirect(request, expedient_id)
else:
initial_data = {'expedient_num': Expedient.objects.latest().id + 1}
if expedient_id:
expedient = get_object_or_404(Expedient, id=expedient_id)
initial_data = ExpedientService.build_initial_data(expedient)
form = ExpedientForm(initial=initial_data)
return render_to_response("expedient/edit.html", {'form': form}, context_instance=RequestContext(request))
@permission_required('auth.management_metropol')
def expedient_list(request):
form = ExpedientListFilterForm(request.GET)
expedients = ExpedientService.find_all()
if form.is_valid():
search_term = form.cleaned_data['keyword'] or None
selected_branch_id = form.cleaned_data['branch'] or None
selected_headquarters_id = form.cleaned_data['headquarters'] or None
selected_state = form.cleaned_data['state'] or None
selected_customers = form.cleaned_data['customers'] or None
if search_term:
expedients = expedients.filter(Q(id__icontains=search_term) | Q(description__icontains=search_term))
# Remember -> -1 equals "without" and None is "all"
if selected_branch_id:
if selected_branch_id == '-1':
# Filter expedients without branch
expedients = expedients.filter(phase__isnull=True)
else:
expedients = expedients.filter(phase__law_branch__id=selected_branch_id)
if selected_headquarters_id:
if selected_headquarters_id == '-1':
# Filter expedients without headquarters
expedients = expedients.filter(headquarters__isnull=True)
else:
expedients = expedients.filter(headquarters__id=selected_headquarters_id)
if selected_state:
expedients = expedients.filter(state=selected_state)
if selected_customers:
expedients = expedients.filter(expperrol__person__in=selected_customers, expperrol__role__text_help__iexact='CLIENTE').distinct()
return render_to_response("expedient/list.html", {"expedients": expedients, 'filter_form': form}, context_instance=RequestContext(request))
class DetailsView(generic.DetailView):
model = Expedient
template_name = 'expedient/details.html'
@permission_required('auth.management_metropol')
def delete(request, expedient_id):
expedient = get_object_or_404(Expedient, id=expedient_id)
expedient.delete()
messages.success(request, "Se ha borrado el expediente correctamente.")
return HttpResponseRedirect(reverse('expedient-list'))
|
mit
| -1,160,492,237,735,441,200
| 39.178947
| 143
| 0.700812
| false
| 3.631779
| false
| false
| false
|
ibamacsr/indicar-process
|
indicarprocess/tmsapi/views.py
|
2
|
1134
|
from rest_framework.generics import ListAPIView, RetrieveAPIView
from catalogo.models import CatalogoLandsat, CatalogoRapidEye
from .serializers import LandsatSerializer, RapidEyeSerializer
class LandsatListAPI(ListAPIView):
serializer_class = LandsatSerializer
def get_queryset(self):
bbox = self.request.query_params.get('extent', None)
if bbox:
return CatalogoLandsat.objects.filter(geom__intersects=bbox).order_by('data')
else:
return []
class RapidEyeListAPI(ListAPIView):
serializer_class = RapidEyeSerializer
def get_queryset(self):
bbox = self.request.query_params.get('extent', None)
if bbox:
return CatalogoRapidEye.objects.filter(geom__intersects=bbox).order_by('data')
else:
return []
class LandsatDetailView(RetrieveAPIView):
queryset = CatalogoLandsat.objects.all()
serializer_class = LandsatSerializer
lookup_field = 'image'
class RapidEyeDetailView(RetrieveAPIView):
queryset = CatalogoRapidEye.objects.all()
serializer_class = RapidEyeSerializer
lookup_field = 'image'
|
agpl-3.0
| 7,511,570,201,475,237,000
| 28.076923
| 90
| 0.710758
| false
| 3.792642
| false
| false
| false
|
pauloacmelo/papelex_winthor
|
9813_ui_examples.py
|
1
|
3575
|
# coding: utf-8
from base import *
from PySide import QtGui, QtCore
import requests
import json
import urllib2
class Routine9812(WinthorRoutine):
def __init__(self, *args):
# super(WinthorRoutine, self).__init__('TESTE')
print(args)
super(Routine9812, self).__init__(args[4] or 9812, u'Cálculo de Frete', *args)
self.initUI()
def initUI(self):
super(Routine9812, self).initUI()
# saself.form = QFormLayout(self)
textInput = QtGui.QLineEdit(self)
self.mainwindow.addWidget(textInput)
combo = QtGui.QComboBox(self)
self.mainwindow.addWidget(combo)
combo.addItem(u'Opção 1', combo)
combo.addItem(u'Opção 2', combo)
but = QtGui.QPushButton('TEST', self)
but.clicked.connect(self.buttonAction)
self.mainwindow.addWidget(but)
table_view = QtGui.QTableView(self)
header = [u'Transportadora', u'Preço', u'Cubagem', u'Prazo']
data = [
['1, 1', '1, 2', '1, 3'],
['2, 1', '2, 2', '2, 3'],
['3, 1', '3, 2', '3, 3'],]
table_view.setModel(QTableModel(self, data, header))
self.mainwindow.addWidget(table_view)
def buttonAction(self):
print self.db.query('select CODPROD from PCPEDI where NUMPED = %s' % 224010951)
def quote_order_shipping(order_id):
self.quotation()
# destination_zip_code example: '20756-200'
# products example: [{"weight": 2.1,"cost_of_goods": 101.23,"width": 13,"height": 10,"length": 10,"quantity": 1,"sku_id": "1","description": "descrição do item","can_group": "true"}]
def quotation(destination_zip_code, products):
data = {
"origin_zip_code": "21010-410",
"destination_zip_code": destination_zip_code,
"products": products,
"quoting_mode": "DYNAMIC_BOX_ALL_ITEMS",
"additional_information": {
"free_shipping": False,
"extra_cost_absolute": 0,
"extra_cost_percentage": 0,
"lead_time_business_days": 0,
"sales_channel": "hotsite",
"tax_id": "22337462000127",
"client_type": "gold",
"payment_type": "",
"is_state_tax_payer": False,
"delivery_method_ids": []
},
"identification": {
"session": "04e5bdf7ed15e571c0265c18333b6fdf1434658753",
"page_name": "carrinho",
"ip": "000.000.000.000",
"url": "http://www.intelipost.com.br/checkout/cart/"
}
}
req = urllib2.Request('https://api.intelipost.com.br/api/v1/quote_by_product', json.dumps(data))
req.add_header('Content-Type', 'application/json')
req.add_header('api_key', '36a3fa0d4108231864a60988a15272b9fd692c3320206ceb3e85e61688e11d79')
res = urllib2.urlopen(req)
return json.loads(res.read())
class ErrorMessage(QtGui.QWidget):
def __init__(self):
QtGui.QWidget.__init__(self)
QtGui.QMessageBox.critical(self,
"Erro!",
"Utilize a rotina a partir do menu.")
self.close()
# Expected call: routine.exe USER DB_PASS DB_ALIAS DB_USER ROUTINE_NUMBER
def main(args):
app = QtGui.QApplication([])
if len(args) != 6:
print('Erro! Número de parâmetros diferente do esperado.')
print('Esperado: 6. Encontrado: %s' % len(args))
ErrorMessage()
return
args = args[1:]
ex = Routine9812(*args)
sys.exit(app.exec_())
if __name__ == '__main__':
main(sys.argv)
|
mit
| 4,274,113,163,035,480,000
| 35.010101
| 186
| 0.580084
| false
| 3.264652
| false
| false
| false
|
rht/zulip
|
zerver/lib/redis_utils.py
|
1
|
2543
|
from django.conf import settings
from typing import Any, Dict, Optional
from zerver.lib.utils import generate_random_token
import re
import redis
import ujson
# Redis accepts keys up to 512MB in size, but there's no reason for us to use such size,
# so we want to stay limited to 1024 characters.
MAX_KEY_LENGTH = 1024
class ZulipRedisError(Exception):
pass
class ZulipRedisKeyTooLongError(ZulipRedisError):
pass
class ZulipRedisKeyOfWrongFormatError(ZulipRedisError):
pass
def get_redis_client() -> redis.StrictRedis:
return redis.StrictRedis(host=settings.REDIS_HOST, port=settings.REDIS_PORT,
password=settings.REDIS_PASSWORD, db=0)
def put_dict_in_redis(redis_client: redis.StrictRedis, key_format: str,
data_to_store: Dict[str, Any],
expiration_seconds: int,
token_length: int=64) -> str:
key_length = len(key_format) - len('{token}') + token_length
if key_length > MAX_KEY_LENGTH:
error_msg = "Requested key too long in put_dict_in_redis. Key format: %s, token length: %s"
raise ZulipRedisKeyTooLongError(error_msg % (key_format, token_length))
token = generate_random_token(token_length)
key = key_format.format(token=token)
with redis_client.pipeline() as pipeline:
pipeline.set(key, ujson.dumps(data_to_store))
pipeline.expire(key, expiration_seconds)
pipeline.execute()
return key
def get_dict_from_redis(redis_client: redis.StrictRedis, key_format: str, key: str
) -> Optional[Dict[str, Any]]:
# This function requires inputting the intended key_format to validate
# that the key fits it, as an additionally security measure. This protects
# against bugs where a caller requests a key based on user input and doesn't
# validate it - which could potentially allow users to poke around arbitrary redis keys.
if len(key) > MAX_KEY_LENGTH:
error_msg = "Requested key too long in get_dict_from_redis: %s"
raise ZulipRedisKeyTooLongError(error_msg % (key,))
validate_key_fits_format(key, key_format)
data = redis_client.get(key)
if data is None:
return None
return ujson.loads(data)
def validate_key_fits_format(key: str, key_format: str) -> None:
assert "{token}" in key_format
regex = key_format.format(token=r"[a-z0-9]+")
if not re.fullmatch(regex, key):
raise ZulipRedisKeyOfWrongFormatError("%s does not match format %s" % (key, key_format))
|
apache-2.0
| -6,855,087,069,317,244,000
| 38.734375
| 99
| 0.681085
| false
| 3.622507
| false
| false
| false
|
kratzer/bsm
|
bsm.py
|
1
|
11076
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2013 Kai Kratzer, Stuttgart, Germany; all rights
# reserved unless otherwise stated.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307 USA
#
#
# Sound playing machine using pygame
# Further information in the "README" and "COPYING" files.
#
# Dependencies: apt-get install python-pygame
#
# directory listing
import glob
# system
import os
import sys
# parsing
import re
# random numbers
import random
# pygame (main window, sounds, events, timer)
import pygame
# calculations
import math
# pygame local variables
from pygame.locals import *
# Screen settings
width=1366
height=768
fullscreen = False
# Soundfile extensions
# not all is possible, look at the pygame documentation
sndfile_extensions = ['wav']
# Keybindings for the sounds (e.g. if no mouse/touch is available)
keybindings = { \
K_a: 'alkohol', \
K_b: 'bang', \
K_e: 'bier', \
K_q: 'dead', \
K_d: 'dynamit', \
K_f: 'fehlschuss', \
K_h: 'freude', \
K_g: 'gatling', \
K_s: 'general_store', \
K_i: 'indianer', \
K_n: 'kein_bang', \
K_k: 'kinnhaken', \
K_x: 'knapp', \
K_p: 'postkutsche', \
K_a: 'angry', \
K_w: 'shot_sheriff', \
K_r: 'talk', \
K_t: 'treffer', \
K_v: 'verwirrung', \
}
# timelimit for player's move. This is invoked, if "timelimit" button is pressed
# speech announces 30, 20, 10, 5, 4, 3, 2, 1 seconds till end
player_timelimit = 30
# walk through subdirectories, collect sounds
def read_dir():
bangdict = {}
print "Reading directories..."
for dname, dnames, fnames in os.walk('.'):
dname = re.sub('.*/','',dname)
if dname != '.' and dname != 'ambiente' and dname != 'speech':
soundfiles = []
for ext in sndfile_extensions:
soundfiles += glob.glob(dname + '/' + '*.' + ext)
if len(soundfiles) > 0:
bangdict[dname] = soundfiles
print "done."
return bangdict
# Choose random sound from folder
def random_sound(tkey):
rndn = random.randint(0,len(bangsounds[tkey])-1)
return bangsounds[tkey][rndn]
# Queue sound to player
def queue_sound(tsnd):
print "Playing", tsnd
sound = pygame.mixer.Sound(tsnd)
sound.play()
# transform 2D index to linear
def get_linear_index(x,y):
return x + y*nfieldx
# get y coordinate of linear index
def get_index_y(li):
return li / nfieldx
# get x coordinate of linear index
def get_index_x(li):
return li % nfieldx
# get field coordinates by mouse cursor position
def get_field(xm, ym):
for xn in range(len(xborders)-1):
if xm > xborders[xn] and xm <= xborders[xn+1]:
break
for yn in range(len(yborders)-1):
if ym >= yborders[yn] and ym <= yborders[yn+1]:
break
return xn, yn
# get button name by mouse coordinates
def get_button(xm, ym):
xn, yn = get_field(xm, ym)
return bangbuttons[get_linear_index(xn,yn)]
# draw a small (white) exit corner in the bottom right field
def draw_exitcorner():
pygame.draw.rect(window, cwhite, (width-exitcorner_size,height-exitcorner_size,width,height))
def buttoncaption(buttonname):
return re.sub('_',' ',buttonname.capitalize())
# INIT SOUNDS
# dictionary of sounds and buttonnames
bangsounds = read_dir()
# alphabetically sorted buttons in array
bangbuttons = sorted(bangsounds, key=lambda key: bangsounds[key])
# add custom buttons, e.g. for timelimit, stoptimelimit and stopsound
bangbuttons += ['timelimit', 'stoptime', 'stopsound','nextplayer']
nbuttons = len(bangbuttons)
# GAME WINDOW
pygame.init()
pygame.mixer.init()
pygame.font.init()
# fps clock
fpsClock = pygame.time.Clock()
# linewidth and 0.5*linewidth
lw = 4
lwh = int(round(0.5*lw))
# create window handler
if fullscreen:
window = pygame.display.set_mode((width, height), pygame.FULLSCREEN)
else:
window = pygame.display.set_mode((width, height), DOUBLEBUF | HWSURFACE)
pygame.display.set_caption('Bang!soundmachine')
# set colors
cwhite = pygame.Color(255,255,255)
cblack = pygame.Color(0,0,0)
cred = pygame.Color(255,0,0)
cblue = pygame.Color(0,190,255)
cgreen = pygame.Color(0,255,150)
cyellow = pygame.Color(255,255,0)
# set color for buttons
colorbuttons = {\
'bang': cred, 'gatling': cred, 'kinnhaken': cred, \
'fehlschuss': cgreen, 'treffer': cgreen, \
'postkutsche': cyellow, 'general_store': cyellow, \
'kein_bang': cblue\
}
# size for the exit corner
exitcorner_size = 30
# initial window drawings
window.fill(cblack)
pygame.draw.line(window, cwhite, (0,0),(0,height),lw)
pygame.draw.line(window, cwhite, (0,0),(width,0),lw)
pygame.draw.line(window, cwhite, (0,height-lw+1),(width,height-lw+1),lw)
pygame.draw.line(window, cwhite, (width-lw+1,0),(width-lw+1,height),lw)
draw_exitcorner()
awidth = width - 2*lw
aheight = height - 2*lw
surface = (awidth) * (aheight)
ratio = float(awidth) / float(aheight)
fieldsurface = float(surface) / float(nbuttons)
# get field size with a certain edge ratio
fieldy = math.sqrt(fieldsurface / ratio)
fieldx = fieldy * ratio
fieldy = fieldy
testsurface = fieldx * fieldy
# higher number of fields in every direction
nfieldx = int(round(0.5+float(awidth)/fieldx))
nfieldy = int(round(0.5+float(aheight)/fieldy))
# try to avoid empty columns or rows
if (nfieldx - 1) * nfieldy >= nbuttons:
nfieldx -= 1
if (nfieldy - 1) * nfieldx >= nbuttons:
nfieldy -= 1
xborders = [0]
yborders = [0]
# draw borders of fields
if nfieldx > 0:
dx = int(awidth / nfieldx)
xoff = dx
for i in range(nfieldx-1):
xborders.append(xoff)
pygame.draw.line(window, cwhite, (xoff-lwh,0),(xoff-lwh,height),lw)
xoff += dx
if nfieldy > 0:
dy = int(aheight / nfieldy)
yoff = dy
for i in range(nfieldy-1):
yborders.append(yoff)
pygame.draw.line(window, cwhite, (0,yoff-lwh),(width,yoff-lwh),lw)
yoff += dy
xborders.append(width)
yborders.append(height)
# get maximum font size by testing if every button string fits into the fields
fontsize = 100
in_progress = True
print "Determining maximum possible font size..."
while in_progress:
tfont = pygame.font.SysFont("Arial", fontsize)
xtmp, ytmp = tfont.size(buttoncaption(bangbuttons[-1]))
xvals = [xtmp]
yvals = [ytmp]
for i in range(nbuttons-1):
xtmp, ytmp = tfont.size(buttoncaption(bangbuttons[i]))
xvals.append(xtmp)
yvals.append(ytmp)
if max(xvals) >= dx or max(yvals) >= dy:
fontsize -= 1
else:
in_progress = False
print "Done."
# Set buttons
for i in range(nbuttons):
buttonname = bangbuttons[i]
if buttonname in colorbuttons:
tcolor = colorbuttons[buttonname]
else:
tcolor = cwhite
ttext = tfont.render(buttoncaption(buttonname), True, tcolor)
trect = ttext.get_rect()
rx, ry = trect.bottomright
# midpoint rectangle
mrx = 0.5 * rx
mry = 0.5 * ry
ix = get_index_x(i)
iy = get_index_y(i)
xta = xborders[ix]
xtb = xborders[ix+1]
yta = yborders[iy]
ytb = yborders[iy+1]
# midpoint field
mx = 0.5 * (xta + xtb)
my = 0.5 * (yta + ytb)
# move button text start corner to the correct coordinates
window.blit(ttext,(int(mx-mrx),int(my-mry)))
# display the drawings
pygame.display.update()
# Startup sound
queue_sound('speech/hellouser.wav')
# frames per second
fps = 10
# frame counter
counter = 0
# second counter
seconds = 0
# timelimit starting value for user move
timelimit = False
#last_ifx = 0
#last_ify = 0
# MAIN LOOP
while True:
# loop over events
for event in pygame.event.get():
# check for quit request
if event.type == QUIT:
pygame.quit()
sys.exit()
# key pressed
elif event.type == KEYDOWN:
# check if in keybindings
if event.key in keybindings:
tbutton = keybindings[event.key]
psnd = random_sound(tbutton)
queue_sound(psnd)
# fade out sounds if escape is pressed
elif event.key == K_ESCAPE:
pygame.mixer.fadeout(2000)
# track mouse motion (fields could e.g. be highlighted)
elif event.type == MOUSEMOTION:
xm, ym = event.pos
#ifx, ify = get_field(xm, ym)
#if ifx != last_ifx or ify != last_ify:
# last_ifx = ifx
# last_ify = ify
# print ifx, ify
# Mouse button is pressed
elif event.type == MOUSEBUTTONDOWN:
xm, ym = event.pos
# hit exit corner, quit!
if xm > width - exitcorner_size and ym > height - exitcorner_size:
pygame.event.post(pygame.event.Event(QUIT))
else:
# try to play sound, otherwise fade out (e.g. if button without function is pressed)
try:
cbutton = get_button(xm, ym)
if cbutton == 'stopsound':
pygame.mixer.fadeout(1000)
# start player timer
elif cbutton == 'timelimit':
seconds = 0
timelimit = True
elif cbutton == 'stoptime':
timelimit = False
elif cbutton == 'nextplayer':
queue_sound('speech/end_of_line.wav')
else:
queue_sound(random_sound(cbutton))
except Exception as e:
pygame.mixer.fadeout(2000)
pygame.display.update()
# increment fps counter
counter += 1
# if we have reached the number of fps, 1s has passed.
if counter >= fps:
# check for player timelimit
if timelimit:
# remaining seconds
seconds_left = player_timelimit - seconds
# play sounds
if seconds_left > 0 and seconds_left <= 5:
queue_sound('speech/' + str(seconds_left) + '_seconds.wav')
elif seconds_left == 30:
queue_sound('speech/30_seconds.wav')
elif seconds_left == 20:
queue_sound('speech/20_seconds.wav')
elif seconds_left == 10:
queue_sound('speech/10_seconds.wav')
elif seconds_left == 0:
timelimit = False
queue_sound('speech/ba_endline.wav')
counter = 0
seconds += 1
# let the clock tick
fpsClock.tick(fps)
|
gpl-3.0
| -6,629,903,377,394,719,000
| 27.183206
| 100
| 0.624503
| false
| 3.227273
| false
| false
| false
|
ds-hwang/chromium-crosswalk
|
mojo/public/tools/manifest/manifest_collator.py
|
2
|
1537
|
#!/usr/bin/env python
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" A collator for Mojo Application Manifests """
import argparse
import json
import shutil
import sys
import urlparse
def ParseJSONFile(filename):
with open(filename) as json_file:
try:
return json.load(json_file)
except ValueError:
print "%s is not a valid JSON document" % filename
return None
def main():
parser = argparse.ArgumentParser(
description="Collate Mojo application manifests.")
parser.add_argument("--parent")
parser.add_argument("--output")
parser.add_argument("--application-name")
args, children = parser.parse_known_args()
parent = ParseJSONFile(args.parent)
if parent == None:
return 1
parsed = urlparse.urlparse(parent['url'])
if args.application_name != parsed.hostname:
raise ValueError("Application name '%s' specified in build file does not " \
"match application name '%s' specified in manifest." %
(args.application_name, parsed.hostname))
applications = []
for child in children:
application = ParseJSONFile(child)
if application == None:
return 1
applications.append(application)
if len(applications) > 0:
parent['applications'] = applications
with open(args.output, 'w') as output_file:
json.dump(parent, output_file)
return 0
if __name__ == "__main__":
sys.exit(main())
|
bsd-3-clause
| 8,324,561,153,217,035,000
| 26.446429
| 80
| 0.681848
| false
| 4.076923
| false
| false
| false
|
rahul003/mxnet
|
tests/python/unittest/test_sparse_ndarray.py
|
1
|
38575
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pickle as pkl
from mxnet.ndarray import NDArray
from mxnet.test_utils import *
from common import setup_module, with_seed, random_seed, teardown
from mxnet.base import mx_real_t
from numpy.testing import assert_allclose
import numpy.random as rnd
import numpy as np
from common import assertRaises
from mxnet.ndarray.sparse import RowSparseNDArray, CSRNDArray
def sparse_nd_ones(shape, stype):
return mx.nd.ones(shape).tostype(stype)
@with_seed()
def test_sparse_nd_elemwise_add():
def check_sparse_nd_elemwise_binary(shapes, stypes, f, g):
# generate inputs
nds = []
for i, stype in enumerate(stypes):
if stype == 'row_sparse':
nd, _ = rand_sparse_ndarray(shapes[i], stype)
elif stype == 'default':
nd = mx.nd.array(random_arrays(shapes[i]), dtype = np.float32)
else:
assert(False)
nds.append(nd)
# check result
test = f(nds[0], nds[1])
assert_almost_equal(test.asnumpy(), g(nds[0].asnumpy(), nds[1].asnumpy()))
num_repeats = 3
g = lambda x,y: x + y
op = mx.nd.elemwise_add
for i in range(num_repeats):
shape = [rand_shape_2d()] * 2
check_sparse_nd_elemwise_binary(shape, ['default'] * 2, op, g)
check_sparse_nd_elemwise_binary(shape, ['row_sparse', 'row_sparse'], op, g)
@with_seed()
def test_sparse_nd_copy():
def check_sparse_nd_copy(from_stype, to_stype, shape):
from_nd = rand_ndarray(shape, from_stype)
# copy to ctx
to_ctx = from_nd.copyto(default_context())
# copy to stype
to_nd = rand_ndarray(shape, to_stype)
to_nd = from_nd.copyto(to_nd)
assert np.sum(np.abs(from_nd.asnumpy() != to_ctx.asnumpy())) == 0.0
assert np.sum(np.abs(from_nd.asnumpy() != to_nd.asnumpy())) == 0.0
shape = rand_shape_2d()
shape_3d = rand_shape_3d()
stypes = ['row_sparse', 'csr']
for stype in stypes:
check_sparse_nd_copy(stype, 'default', shape)
check_sparse_nd_copy('default', stype, shape)
check_sparse_nd_copy('row_sparse', 'row_sparse', shape_3d)
check_sparse_nd_copy('row_sparse', 'default', shape_3d)
check_sparse_nd_copy('default', 'row_sparse', shape_3d)
@with_seed()
def test_sparse_nd_basic():
def check_sparse_nd_basic_rsp():
storage_type = 'row_sparse'
shape = rand_shape_2d()
nd, (v, idx) = rand_sparse_ndarray(shape, storage_type)
assert(nd._num_aux == 1)
assert(nd.indices.dtype == np.int64)
assert(nd.stype == 'row_sparse')
check_sparse_nd_basic_rsp()
@with_seed()
def test_sparse_nd_setitem():
def check_sparse_nd_setitem(stype, shape, dst):
x = mx.nd.zeros(shape=shape, stype=stype)
x[:] = dst
dst_nd = mx.nd.array(dst) if isinstance(dst, (np.ndarray, np.generic)) else dst
assert np.all(x.asnumpy() == dst_nd.asnumpy() if isinstance(dst_nd, NDArray) else dst)
shape = rand_shape_2d()
for stype in ['row_sparse', 'csr']:
# ndarray assignment
check_sparse_nd_setitem(stype, shape, rand_ndarray(shape, 'default'))
check_sparse_nd_setitem(stype, shape, rand_ndarray(shape, stype))
# numpy assignment
check_sparse_nd_setitem(stype, shape, np.ones(shape))
# scalar assigned to row_sparse NDArray
check_sparse_nd_setitem('row_sparse', shape, 2)
@with_seed()
def test_sparse_nd_slice():
shape = (rnd.randint(2, 10), rnd.randint(2, 10))
stype = 'csr'
A, _ = rand_sparse_ndarray(shape, stype)
A2 = A.asnumpy()
start = rnd.randint(0, shape[0] - 1)
end = rnd.randint(start + 1, shape[0])
assert same(A[start:end].asnumpy(), A2[start:end])
assert same(A[start - shape[0]:end].asnumpy(), A2[start:end])
assert same(A[start:].asnumpy(), A2[start:])
assert same(A[:end].asnumpy(), A2[:end])
ind = rnd.randint(-shape[0], shape[0] - 1)
assert same(A[ind].asnumpy(), A2[ind][np.newaxis, :])
start_col = rnd.randint(0, shape[1] - 1)
end_col = rnd.randint(start_col + 1, shape[1])
result = mx.nd.slice(A, begin=(start, start_col), end=(end, end_col))
result_dense = mx.nd.slice(mx.nd.array(A2), begin=(start, start_col), end=(end, end_col))
assert same(result_dense.asnumpy(), result.asnumpy())
A = mx.nd.sparse.zeros('csr', shape)
A2 = A.asnumpy()
assert same(A[start:end].asnumpy(), A2[start:end])
result = mx.nd.slice(A, begin=(start, start_col), end=(end, end_col))
result_dense = mx.nd.slice(mx.nd.array(A2), begin=(start, start_col), end=(end, end_col))
assert same(result_dense.asnumpy(), result.asnumpy())
def check_slice_nd_csr_fallback(shape):
stype = 'csr'
A, _ = rand_sparse_ndarray(shape, stype)
A2 = A.asnumpy()
start = rnd.randint(0, shape[0] - 1)
end = rnd.randint(start + 1, shape[0])
# non-trivial step should fallback to dense slice op
result = mx.nd.sparse.slice(A, begin=(start,), end=(end + 1,), step=(2,))
result_dense = mx.nd.slice(mx.nd.array(A2), begin=(start,), end=(end + 1,), step=(2,))
assert same(result_dense.asnumpy(), result.asnumpy())
shape = (rnd.randint(2, 10), rnd.randint(1, 10))
check_slice_nd_csr_fallback(shape)
@with_seed()
def test_sparse_nd_concat():
def check_concat(arrays):
ret = np.concatenate([arr.asnumpy() for arr in arrays], axis=0)
same(mx.nd.concat(*arrays, dim=0).asnumpy(), ret)
nds = []
zero_nds = []
ncols = rnd.randint(2, 10)
for i in range(3):
shape = (rnd.randint(2, 10), ncols)
A, _ = rand_sparse_ndarray(shape, 'csr')
nds.append(A)
zero_nds.append(mx.nd.zeros(shape).tostype('csr'))
check_concat(nds)
check_concat(zero_nds)
@with_seed()
def test_sparse_nd_equal():
for stype in ['row_sparse', 'csr']:
shape = rand_shape_2d()
x = mx.nd.zeros(shape=shape, stype=stype)
y = sparse_nd_ones(shape, stype)
z = x == y
assert (z.asnumpy() == np.zeros(shape)).all()
z = 0 == x
assert (z.asnumpy() == np.ones(shape)).all()
@with_seed()
def test_sparse_nd_not_equal():
for stype in ['row_sparse', 'csr']:
shape = rand_shape_2d()
x = mx.nd.zeros(shape=shape, stype=stype)
y = sparse_nd_ones(shape, stype)
z = x != y
assert (z.asnumpy() == np.ones(shape)).all()
z = 0 != x
assert (z.asnumpy() == np.zeros(shape)).all()
@with_seed()
def test_sparse_nd_greater():
for stype in ['row_sparse', 'csr']:
shape = rand_shape_2d()
x = mx.nd.zeros(shape=shape, stype=stype)
y = sparse_nd_ones(shape, stype)
z = x > y
assert (z.asnumpy() == np.zeros(shape)).all()
z = y > 0
assert (z.asnumpy() == np.ones(shape)).all()
z = 0 > y
assert (z.asnumpy() == np.zeros(shape)).all()
@with_seed()
def test_sparse_nd_greater_equal():
for stype in ['row_sparse', 'csr']:
shape = rand_shape_2d()
x = mx.nd.zeros(shape=shape, stype=stype)
y = sparse_nd_ones(shape, stype)
z = x >= y
assert (z.asnumpy() == np.zeros(shape)).all()
z = y >= 0
assert (z.asnumpy() == np.ones(shape)).all()
z = 0 >= y
assert (z.asnumpy() == np.zeros(shape)).all()
z = y >= 1
assert (z.asnumpy() == np.ones(shape)).all()
@with_seed()
def test_sparse_nd_lesser():
for stype in ['row_sparse', 'csr']:
shape = rand_shape_2d()
x = mx.nd.zeros(shape=shape, stype=stype)
y = sparse_nd_ones(shape, stype)
z = y < x
assert (z.asnumpy() == np.zeros(shape)).all()
z = 0 < y
assert (z.asnumpy() == np.ones(shape)).all()
z = y < 0
assert (z.asnumpy() == np.zeros(shape)).all()
@with_seed()
def test_sparse_nd_lesser_equal():
for stype in ['row_sparse', 'csr']:
shape = rand_shape_2d()
x = mx.nd.zeros(shape=shape, stype=stype)
y = sparse_nd_ones(shape, stype)
z = y <= x
assert (z.asnumpy() == np.zeros(shape)).all()
z = 0 <= y
assert (z.asnumpy() == np.ones(shape)).all()
z = y <= 0
assert (z.asnumpy() == np.zeros(shape)).all()
z = 1 <= y
assert (z.asnumpy() == np.ones(shape)).all()
@with_seed()
def test_sparse_nd_binary():
N = 3
def check_binary(fn, stype):
for _ in range(N):
ndim = 2
oshape = np.random.randint(1, 6, size=(ndim,))
bdim = 2
lshape = list(oshape)
# one for broadcast op, another for elemwise op
rshape = list(oshape[ndim-bdim:])
for i in range(bdim):
sep = np.random.uniform(0, 1)
if sep < 0.33:
lshape[ndim-i-1] = 1
elif sep < 0.66:
rshape[bdim-i-1] = 1
lhs = np.random.uniform(0, 1, size=lshape)
rhs = np.random.uniform(0, 1, size=rshape)
lhs_nd = mx.nd.array(lhs).tostype(stype)
rhs_nd = mx.nd.array(rhs).tostype(stype)
assert_allclose(fn(lhs, rhs), fn(lhs_nd, rhs_nd).asnumpy(), rtol=1e-4, atol=1e-4)
assert_allclose(fn(lhs, lhs), fn(lhs_nd, lhs_nd).asnumpy(), rtol=1e-4, atol=1e-4)
stypes = ['row_sparse', 'csr']
for stype in stypes:
check_binary(lambda x, y: x + y, stype)
check_binary(lambda x, y: x - y, stype)
check_binary(lambda x, y: x * y, stype)
check_binary(lambda x, y: x / y, stype)
check_binary(lambda x, y: x ** y, stype)
check_binary(lambda x, y: x > y, stype)
check_binary(lambda x, y: x < y, stype)
check_binary(lambda x, y: x >= y, stype)
check_binary(lambda x, y: x <= y, stype)
check_binary(lambda x, y: x == y, stype)
@with_seed()
def test_sparse_nd_binary_scalar_op():
N = 3
def check(fn, stype, out_stype=None):
for _ in range(N):
ndim = 2
shape = np.random.randint(1, 6, size=(ndim,))
npy = np.random.normal(0, 1, size=shape)
nd = mx.nd.array(npy).tostype(stype)
if out_stype is not None:
assert(nd.stype == out_stype)
assert_allclose(fn(npy), fn(nd).asnumpy(), rtol=1e-4, atol=1e-4)
stypes = ['row_sparse', 'csr']
for stype in stypes:
check(lambda x: 1 + x, stype)
check(lambda x: 1 - x, stype)
check(lambda x: 1 * x, stype)
check(lambda x: 1 / x, stype)
check(lambda x: 2 ** x, stype)
check(lambda x: 1 > x, stype)
check(lambda x: 0.5 > x, stype)
check(lambda x: 0.5 < x, stype)
check(lambda x: 0.5 >= x, stype)
check(lambda x: 0.5 <= x, stype)
check(lambda x: 0.5 == x, stype)
check(lambda x: x / 2, stype, out_stype=stype)
check(lambda x: x + 0, stype, out_stype=stype)
check(lambda x: x - 0, stype, out_stype=stype)
@with_seed()
def test_sparse_nd_binary_iop():
N = 3
def check_binary(fn, stype):
for _ in range(N):
ndim = 2
oshape = np.random.randint(1, 6, size=(ndim,))
lshape = list(oshape)
rshape = list(oshape)
lhs = np.random.uniform(0, 1, size=lshape)
rhs = np.random.uniform(0, 1, size=rshape)
lhs_nd = mx.nd.array(lhs).tostype(stype)
rhs_nd = mx.nd.array(rhs).tostype(stype)
assert_allclose(fn(lhs, rhs),
fn(lhs_nd, rhs_nd).asnumpy(),
rtol=1e-4, atol=1e-4)
def inplace_add(x, y):
x += y
return x
def inplace_mul(x, y):
x *= y
return x
stypes = ['csr', 'row_sparse']
fns = [inplace_add, inplace_mul]
for stype in stypes:
for fn in fns:
check_binary(fn, stype)
@with_seed()
def test_sparse_nd_negate():
def check_sparse_nd_negate(shape, stype):
npy = np.random.uniform(-10, 10, rand_shape_2d())
arr = mx.nd.array(npy).tostype(stype)
assert_almost_equal(npy, arr.asnumpy())
assert_almost_equal(-npy, (-arr).asnumpy())
# a final check to make sure the negation (-) is not implemented
# as inplace operation, so the contents of arr does not change after
# we compute (-arr)
assert_almost_equal(npy, arr.asnumpy())
shape = rand_shape_2d()
stypes = ['csr', 'row_sparse']
for stype in stypes:
check_sparse_nd_negate(shape, stype)
@with_seed()
def test_sparse_nd_broadcast():
sample_num = 1000
# TODO(haibin) test with more than 2 dimensions
def test_broadcast_to(stype):
for _ in range(sample_num):
ndim = 2
target_shape = np.random.randint(1, 11, size=ndim)
shape = target_shape.copy()
axis_flags = np.random.randint(0, 2, size=ndim)
for (axis, flag) in enumerate(axis_flags):
if flag:
shape[axis] = 1
dat = np.random.rand(*shape) - 0.5
numpy_ret = dat
ndarray = mx.nd.array(dat).tostype(stype)
ndarray_ret = ndarray.broadcast_to(shape=target_shape)
if type(ndarray_ret) is mx.ndarray.NDArray:
ndarray_ret = ndarray_ret.asnumpy()
assert (ndarray_ret.shape == target_shape).all()
err = np.square(ndarray_ret - numpy_ret).mean()
assert err < 1E-8
def test_broadcast_like(stype):
for _ in range(sample_num):
ndim = 2
target_shape = np.random.randint(1, 11, size=ndim)
target = mx.nd.ones(shape=tuple(target_shape))
shape = target_shape.copy()
axis_flags = np.random.randint(0, 2, size=ndim)
for (axis, flag) in enumerate(axis_flags):
if flag:
shape[axis] = 1
dat = np.random.rand(*shape) - 0.5
numpy_ret = dat
ndarray = mx.nd.array(dat).tostype(stype)
ndarray_ret = ndarray.broadcast_like(target)
if type(ndarray_ret) is mx.ndarray.NDArray:
ndarray_ret = ndarray_ret.asnumpy()
assert (ndarray_ret.shape == target_shape).all()
err = np.square(ndarray_ret - numpy_ret).mean()
assert err < 1E-8
stypes = ['csr', 'row_sparse']
for stype in stypes:
test_broadcast_to(stype)
test_broadcast_like(stype)
@with_seed()
def test_sparse_nd_transpose():
npy = np.random.uniform(-10, 10, rand_shape_2d())
stypes = ['csr', 'row_sparse']
for stype in stypes:
nd = mx.nd.array(npy).tostype(stype)
assert_almost_equal(npy.T, (nd.T).asnumpy())
@with_seed()
def test_sparse_nd_storage_fallback():
def check_output_fallback(shape):
ones = mx.nd.ones(shape)
out = mx.nd.zeros(shape=shape, stype='csr')
mx.nd.broadcast_add(ones, ones * 2, out=out)
assert(np.sum(out.asnumpy() - 3) == 0)
def check_input_fallback(shape):
ones = mx.nd.ones(shape)
out = mx.nd.broadcast_add(ones.tostype('csr'), ones.tostype('row_sparse'))
assert(np.sum(out.asnumpy() - 2) == 0)
def check_fallback_with_temp_resource(shape):
ones = mx.nd.ones(shape)
out = mx.nd.sum(ones)
assert(out.asscalar() == np.prod(shape))
shape = rand_shape_2d()
check_output_fallback(shape)
check_input_fallback(shape)
check_fallback_with_temp_resource(shape)
@with_seed()
def test_sparse_nd_random():
""" test sparse random operator on cpu """
# gpu random operator doesn't use fixed seed
if default_context().device_type is 'gpu':
return
shape = (100, 100)
fns = [mx.nd.random.uniform, mx.nd.random.normal, mx.nd.random.gamma]
for fn in fns:
rsp_out = mx.nd.zeros(shape=shape, stype='row_sparse')
dns_out = mx.nd.zeros(shape=shape, stype='default')
with random_seed(0):
fn(shape=shape, out=dns_out)
with random_seed(0):
fn(shape=shape, out=rsp_out)
assert_almost_equal(dns_out.asnumpy(), rsp_out.asnumpy())
@with_seed()
def test_sparse_nd_astype():
stypes = ['row_sparse', 'csr']
for stype in stypes:
x = mx.nd.zeros(shape=rand_shape_2d(), stype=stype, dtype='float32')
y = x.astype('int32')
assert(y.dtype == np.int32), y.dtype
@with_seed()
def test_sparse_nd_astype_copy():
stypes = ['row_sparse', 'csr']
for stype in stypes:
x = mx.nd.zeros(shape=rand_shape_2d(), stype=stype, dtype='int32')
y = x.astype('float32')
assert (y.dtype == np.float32)
# Test that a new ndarray has been allocated
assert (id(x) != id(y))
y = x.astype('float32', copy=False)
assert (y.dtype == np.float32)
# Test that a new ndarray has been allocated
assert (id(x) != id(y))
y = x.astype('int32')
assert (y.dtype == np.int32)
# Test that a new ndarray has been allocated
# even though they have same dtype
assert (id(x) != id(y))
# Test that a new ndarray has not been allocated
y = x.astype('int32', copy=False)
assert (id(x) == id(y))
# Test the string version 'int32'
# has the same behaviour as the np.int32
y = x.astype(np.int32, copy=False)
assert (id(x) == id(y))
@with_seed(0)
def test_sparse_nd_pickle():
repeat = 1
dim0 = 40
dim1 = 40
stypes = ['row_sparse', 'csr']
densities = [0, 0.5]
stype_dict = {'row_sparse': RowSparseNDArray, 'csr': CSRNDArray}
for _ in range(repeat):
shape = rand_shape_2d(dim0, dim1)
for stype in stypes:
for density in densities:
a, _ = rand_sparse_ndarray(shape, stype, density)
assert isinstance(a, stype_dict[stype])
data = pkl.dumps(a)
b = pkl.loads(data)
assert isinstance(b, stype_dict[stype])
assert same(a.asnumpy(), b.asnumpy())
@with_seed(0)
def test_sparse_nd_save_load():
repeat = 1
stypes = ['default', 'row_sparse', 'csr']
stype_dict = {'default': NDArray, 'row_sparse': RowSparseNDArray, 'csr': CSRNDArray}
num_data = 20
densities = [0, 0.5]
fname = 'tmp_list.bin'
for _ in range(repeat):
data_list1 = []
for i in range(num_data):
stype = stypes[np.random.randint(0, len(stypes))]
shape = rand_shape_2d(dim0=40, dim1=40)
density = densities[np.random.randint(0, len(densities))]
data_list1.append(rand_ndarray(shape, stype, density))
assert isinstance(data_list1[-1], stype_dict[stype])
mx.nd.save(fname, data_list1)
data_list2 = mx.nd.load(fname)
assert len(data_list1) == len(data_list2)
for x, y in zip(data_list1, data_list2):
assert same(x.asnumpy(), y.asnumpy())
data_map1 = {'ndarray xx %s' % i: x for i, x in enumerate(data_list1)}
mx.nd.save(fname, data_map1)
data_map2 = mx.nd.load(fname)
assert len(data_map1) == len(data_map2)
for k, x in data_map1.items():
y = data_map2[k]
assert same(x.asnumpy(), y.asnumpy())
os.remove(fname)
@with_seed()
def test_sparse_nd_unsupported():
nd = mx.nd.zeros((2,2), stype='row_sparse')
fn_slice = lambda x: x._slice(None, None)
fn_at = lambda x: x._at(None)
fn_reshape = lambda x: x.reshape(None)
fns = [fn_slice, fn_at, fn_reshape]
for fn in fns:
try:
fn(nd)
assert(False)
except:
pass
@with_seed()
def test_create_csr():
def check_create_csr_from_nd(shape, density, dtype):
matrix = rand_ndarray(shape, 'csr', density)
# create data array with provided dtype and ctx
data = mx.nd.array(matrix.data.asnumpy(), dtype=dtype)
indptr = matrix.indptr
indices = matrix.indices
csr_created = mx.nd.sparse.csr_matrix((data, indices, indptr), shape=shape)
assert csr_created.stype == 'csr'
assert same(csr_created.data.asnumpy(), data.asnumpy())
assert same(csr_created.indptr.asnumpy(), indptr.asnumpy())
assert same(csr_created.indices.asnumpy(), indices.asnumpy())
# verify csr matrix dtype and ctx is consistent from the ones provided
assert csr_created.dtype == dtype, (csr_created, dtype)
assert csr_created.data.dtype == dtype, (csr_created.data.dtype, dtype)
assert csr_created.context == Context.default_ctx, (csr_created.context, Context.default_ctx)
csr_copy = mx.nd.array(csr_created)
assert(same(csr_copy.asnumpy(), csr_created.asnumpy()))
def check_create_csr_from_coo(shape, density, dtype):
matrix = rand_ndarray(shape, 'csr', density)
sp_csr = matrix.asscipy()
sp_coo = sp_csr.tocoo()
csr_created = mx.nd.sparse.csr_matrix((sp_coo.data, (sp_coo.row, sp_coo.col)), shape=shape, dtype=dtype)
assert csr_created.stype == 'csr'
assert same(csr_created.data.asnumpy(), sp_csr.data)
assert same(csr_created.indptr.asnumpy(), sp_csr.indptr)
assert same(csr_created.indices.asnumpy(), sp_csr.indices)
csr_copy = mx.nd.array(csr_created)
assert(same(csr_copy.asnumpy(), csr_created.asnumpy()))
# verify csr matrix dtype and ctx is consistent
assert csr_created.dtype == dtype, (csr_created.dtype, dtype)
assert csr_created.data.dtype == dtype, (csr_created.data.dtype, dtype)
assert csr_created.context == Context.default_ctx, (csr_created.context, Context.default_ctx)
def check_create_csr_from_scipy(shape, density, f):
def assert_csr_almost_equal(nd, sp):
assert_almost_equal(nd.data.asnumpy(), sp.data)
assert_almost_equal(nd.indptr.asnumpy(), sp.indptr)
assert_almost_equal(nd.indices.asnumpy(), sp.indices)
sp_csr = nd.asscipy()
assert_almost_equal(sp_csr.data, sp.data)
assert_almost_equal(sp_csr.indptr, sp.indptr)
assert_almost_equal(sp_csr.indices, sp.indices)
assert(sp.dtype == sp_csr.dtype), (sp.dtype, sp_csr.dtype)
try:
import scipy.sparse as spsp
# random canonical csr
csr_sp = spsp.rand(shape[0], shape[1], density, format="csr")
csr_nd = f(csr_sp)
assert_csr_almost_equal(csr_nd, csr_sp)
# non-canonical csr which contains duplicates and unsorted indices
indptr = np.array([0, 2, 3, 7])
indices = np.array([0, 2, 2, 0, 1, 2, 1])
data = np.array([1, 2, 3, 4, 5, 6, 1])
non_canonical_csr = spsp.csr_matrix((data, indices, indptr), shape=(3, 3), dtype=csr_nd.dtype)
canonical_csr_nd = f(non_canonical_csr, dtype=csr_nd.dtype)
canonical_csr_sp = non_canonical_csr.copy()
canonical_csr_sp.sum_duplicates()
canonical_csr_sp.sort_indices()
assert_csr_almost_equal(canonical_csr_nd, canonical_csr_sp)
except ImportError:
print("Could not import scipy.sparse. Skipping unit tests for scipy csr creation")
dim0 = 20
dim1 = 20
densities = [0, 0.5]
dtype = np.float64
for density in densities:
shape = rand_shape_2d(dim0, dim1)
check_create_csr_from_nd(shape, density, dtype)
check_create_csr_from_coo(shape, density, dtype)
check_create_csr_from_scipy(shape, density, mx.nd.sparse.array)
check_create_csr_from_scipy(shape, density, mx.nd.array)
@with_seed()
def test_create_row_sparse():
dim0 = 50
dim1 = 50
densities = [0, 0.5, 1]
for density in densities:
shape = rand_shape_2d(dim0, dim1)
matrix = rand_ndarray(shape, 'row_sparse', density)
data = matrix.data
indices = matrix.indices
rsp_created = mx.nd.sparse.row_sparse_array((data, indices), shape=shape)
assert rsp_created.stype == 'row_sparse'
assert same(rsp_created.data.asnumpy(), data.asnumpy())
assert same(rsp_created.indices.asnumpy(), indices.asnumpy())
rsp_copy = mx.nd.array(rsp_created)
assert(same(rsp_copy.asnumpy(), rsp_created.asnumpy()))
# add this test since we added np.int32 and np.int64 to integer_types
if len(shape) == 2:
for np_int_type in (np.int32, np.int64):
shape = list(shape)
shape = [np_int_type(x) for x in shape]
arg1 = tuple(shape)
mx.nd.sparse.row_sparse_array(arg1, tuple(shape))
shape[0] += 1
assert_exception(mx.nd.sparse.row_sparse_array, ValueError, arg1, tuple(shape))
@with_seed()
def test_create_sparse_nd_infer_shape():
def check_create_csr_infer_shape(shape, density, dtype):
try:
matrix = rand_ndarray(shape, 'csr', density=density)
data = matrix.data
indptr = matrix.indptr
indices = matrix.indices
nd = mx.nd.sparse.csr_matrix((data, indices, indptr), dtype=dtype)
num_rows, num_cols = nd.shape
assert(num_rows == len(indptr) - 1)
assert(indices.shape[0] > 0), indices
assert(np.sum((num_cols <= indices).asnumpy()) == 0)
assert(nd.dtype == dtype), (nd.dtype, dtype)
# cannot infer on invalid shape
except ValueError:
pass
def check_create_rsp_infer_shape(shape, density, dtype):
try:
array = rand_ndarray(shape, 'row_sparse', density=density)
data = array.data
indices = array.indices
nd = mx.nd.sparse.row_sparse_array((data, indices), dtype=dtype)
inferred_shape = nd.shape
assert(inferred_shape[1:] == data.shape[1:])
assert(indices.ndim > 0)
assert(nd.dtype == dtype)
if indices.shape[0] > 0:
assert(np.sum((inferred_shape[0] <= indices).asnumpy()) == 0)
# cannot infer on invalid shape
except ValueError:
pass
dtype = np.int32
shape = rand_shape_2d()
shape_3d = rand_shape_3d()
densities = [0, 0.5, 1]
for density in densities:
check_create_csr_infer_shape(shape, density, dtype)
check_create_rsp_infer_shape(shape, density, dtype)
check_create_rsp_infer_shape(shape_3d, density, dtype)
@with_seed()
def test_create_sparse_nd_from_dense():
def check_create_from_dns(shape, f, dense_arr, dtype, default_dtype, ctx):
arr = f(dense_arr, dtype=dtype, ctx=ctx)
assert(same(arr.asnumpy(), np.ones(shape)))
assert(arr.dtype == dtype)
assert(arr.context == ctx)
# verify the default dtype inferred from dense arr
arr2 = f(dense_arr)
assert(arr2.dtype == default_dtype)
assert(arr2.context == Context.default_ctx)
shape = rand_shape_2d()
dtype = np.int32
src_dtype = np.float64
ctx = mx.cpu(1)
dense_arrs = [mx.nd.ones(shape, dtype=src_dtype), np.ones(shape, dtype=src_dtype), \
np.ones(shape, dtype=src_dtype).tolist()]
for f in [mx.nd.sparse.csr_matrix, mx.nd.sparse.row_sparse_array]:
for dense_arr in dense_arrs:
default_dtype = dense_arr.dtype if isinstance(dense_arr, (NDArray, np.ndarray)) \
else np.float32
check_create_from_dns(shape, f, dense_arr, dtype, default_dtype, ctx)
@with_seed()
def test_create_sparse_nd_from_sparse():
def check_create_from_sp(shape, f, sp_arr, dtype, src_dtype, ctx):
arr = f(sp_arr, dtype=dtype, ctx=ctx)
assert(same(arr.asnumpy(), np.ones(shape)))
assert(arr.dtype == dtype)
assert(arr.context == ctx)
# verify the default dtype inferred from dense arr
arr2 = f(sp_arr)
assert(arr2.dtype == src_dtype)
assert(arr2.context == Context.default_ctx)
shape = rand_shape_2d()
src_dtype = np.float64
dtype = np.int32
ctx = mx.cpu(1)
ones = mx.nd.ones(shape, dtype=src_dtype)
csr_arrs = [ones.tostype('csr')]
rsp_arrs = [ones.tostype('row_sparse')]
try:
import scipy.sparse as spsp
csr_sp = spsp.csr_matrix(np.ones(shape, dtype=src_dtype))
csr_arrs.append(csr_sp)
except ImportError:
print("Could not import scipy.sparse. Skipping unit tests for scipy csr creation")
f_csr = mx.nd.sparse.csr_matrix
f_rsp = mx.nd.sparse.row_sparse_array
for sp_arr in csr_arrs:
check_create_from_sp(shape, f_csr, sp_arr, dtype, src_dtype, ctx)
for sp_arr in rsp_arrs:
check_create_from_sp(shape, f_rsp, sp_arr, dtype, src_dtype, ctx)
@with_seed()
def test_create_sparse_nd_empty():
def check_empty(shape, stype):
arr = mx.nd.empty(shape, stype=stype)
assert(arr.stype == stype)
assert same(arr.asnumpy(), np.zeros(shape))
def check_csr_empty(shape, dtype, ctx):
arr = mx.nd.sparse.csr_matrix(shape, dtype=dtype, ctx=ctx)
assert(arr.stype == 'csr')
assert(arr.dtype == dtype)
assert(arr.context == ctx)
assert same(arr.asnumpy(), np.zeros(shape))
# check the default value for dtype and ctx
arr = mx.nd.sparse.csr_matrix(shape)
assert(arr.dtype == np.float32)
assert(arr.context == Context.default_ctx)
def check_rsp_empty(shape, dtype, ctx):
arr = mx.nd.sparse.row_sparse_array(shape, dtype=dtype, ctx=ctx)
assert(arr.stype == 'row_sparse')
assert(arr.dtype == dtype)
assert(arr.context == ctx)
assert same(arr.asnumpy(), np.zeros(shape))
# check the default value for dtype and ctx
arr = mx.nd.sparse.row_sparse_array(shape)
assert(arr.dtype == np.float32)
assert(arr.context == Context.default_ctx)
stypes = ['csr', 'row_sparse']
shape = rand_shape_2d()
shape_3d = rand_shape_3d()
dtype = np.int32
ctx = mx.cpu(1)
for stype in stypes:
check_empty(shape, stype)
check_csr_empty(shape, dtype, ctx)
check_rsp_empty(shape, dtype, ctx)
check_rsp_empty(shape_3d, dtype, ctx)
@with_seed()
def test_synthetic_dataset_generator():
def test_powerlaw_generator(csr_arr, final_row=1):
"""Test power law distribution
Total Elements: 32000, Number of zeros: 3200
Every row has 2 * non zero elements of the previous row.
Also since (2047 < 3200 < 4095) this will be true till 10th row"""
indices = csr_arr.indices.asnumpy()
indptr = csr_arr.indptr.asnumpy()
for row in range(1, final_row + 1):
nextrow = row + 1
current_row_nnz = indices[indptr[row] - 1] + 1
next_row_nnz = indices[indptr[nextrow] - 1] + 1
assert next_row_nnz == 2 * current_row_nnz
# Test if density is preserved
csr_arr_cols, _ = rand_sparse_ndarray(shape=(32, 10000), stype="csr",
density=0.01, distribution="powerlaw")
csr_arr_small, _ = rand_sparse_ndarray(shape=(5, 5), stype="csr",
density=0.5, distribution="powerlaw")
csr_arr_big, _ = rand_sparse_ndarray(shape=(32, 1000000), stype="csr",
density=0.4, distribution="powerlaw")
csr_arr_square, _ = rand_sparse_ndarray(shape=(1600, 1600), stype="csr",
density=0.5, distribution="powerlaw")
assert len(csr_arr_cols.data) == 3200
test_powerlaw_generator(csr_arr_cols, final_row=9)
test_powerlaw_generator(csr_arr_small, final_row=1)
test_powerlaw_generator(csr_arr_big, final_row=4)
test_powerlaw_generator(csr_arr_square, final_row=6)
@with_seed()
def test_sparse_nd_fluent():
def check_fluent_regular(stype, func, kwargs, shape=(5, 17), equal_nan=False):
with mx.name.NameManager():
data = mx.nd.random_uniform(shape=shape, ctx=default_context()).tostype(stype)
regular = getattr(mx.ndarray, func)(data, **kwargs)
fluent = getattr(data, func)(**kwargs)
if isinstance(regular, list):
for r, f in zip(regular, fluent):
assert almost_equal(r.asnumpy(), f.asnumpy(), equal_nan=equal_nan)
else:
assert almost_equal(regular.asnumpy(), fluent.asnumpy(), equal_nan=equal_nan)
all_funcs = ['zeros_like', 'square', 'round', 'rint', 'fix', 'floor', 'ceil', 'trunc',
'abs', 'sign', 'sin', 'degrees', 'radians', 'expm1']
for func in all_funcs:
check_fluent_regular('csr', func, {})
check_fluent_regular('row_sparse', func, {})
all_funcs = ['arcsin', 'arctan', 'tan', 'sinh', 'tanh',
'arcsinh', 'arctanh', 'log1p', 'sqrt', 'relu']
for func in all_funcs:
check_fluent_regular('csr', func, {}, equal_nan=True)
check_fluent_regular('row_sparse', func, {}, equal_nan=True)
check_fluent_regular('csr', 'slice', {'begin': (2, 5), 'end': (4, 7)}, shape=(5, 17))
check_fluent_regular('row_sparse', 'clip', {'a_min': -0.25, 'a_max': 0.75})
for func in ['sum', 'mean', 'norm']:
check_fluent_regular('csr', func, {'axis': 0})
@with_seed()
def test_sparse_nd_exception():
""" test invalid sparse operator will throw a exception """
a = mx.nd.ones((2,2))
assertRaises(mx.base.MXNetError, mx.nd.sparse.retain, a, invalid_arg="garbage_value")
assertRaises(ValueError, mx.nd.sparse.csr_matrix, a, shape=(3,2))
assertRaises(ValueError, mx.nd.sparse.csr_matrix, (2,2), shape=(3,2))
assertRaises(ValueError, mx.nd.sparse.row_sparse_array, (2,2), shape=(3,2))
assertRaises(ValueError, mx.nd.sparse.zeros, "invalid_stype", (2,2))
@with_seed()
def test_sparse_nd_check_format():
""" test check_format for sparse ndarray """
shape = rand_shape_2d()
stypes = ["csr", "row_sparse"]
for stype in stypes:
arr, _ = rand_sparse_ndarray(shape, stype)
arr.check_format()
arr = mx.nd.sparse.zeros(stype, shape)
arr.check_format()
# CSR format index pointer array should be less than the number of rows
shape = (3, 4)
data_list = [7, 8, 9]
indices_list = [0, 2, 1]
indptr_list = [0, 5, 2, 3]
a = mx.nd.sparse.csr_matrix((data_list, indices_list, indptr_list), shape=shape)
assertRaises(mx.base.MXNetError, a.check_format)
# CSR format indices should be in ascending order per row
indices_list = [2, 1, 1]
indptr_list = [0, 2, 2, 3]
a = mx.nd.sparse.csr_matrix((data_list, indices_list, indptr_list), shape=shape)
assertRaises(mx.base.MXNetError, a.check_format)
# CSR format indptr should end with value equal with size of indices
indices_list = [1, 2, 1]
indptr_list = [0, 2, 2, 4]
a = mx.nd.sparse.csr_matrix((data_list, indices_list, indptr_list), shape=shape)
assertRaises(mx.base.MXNetError, a.check_format)
# CSR format indices should not be negative
indices_list = [0, 2, 1]
indptr_list = [0, -2, 2, 3]
a = mx.nd.sparse.csr_matrix((data_list, indices_list, indptr_list), shape=shape)
assertRaises(mx.base.MXNetError, a.check_format)
# Row Sparse format indices should be less than the number of rows
shape = (3, 2)
data_list = [[1, 2], [3, 4]]
indices_list = [1, 4]
a = mx.nd.sparse.row_sparse_array((data_list, indices_list), shape=shape)
assertRaises(mx.base.MXNetError, a.check_format)
# Row Sparse format indices should be in ascending order
indices_list = [1, 0]
a = mx.nd.sparse.row_sparse_array((data_list, indices_list), shape=shape)
assertRaises(mx.base.MXNetError, a.check_format)
# Row Sparse format indices should not be negative
indices_list = [1, -2]
a = mx.nd.sparse.row_sparse_array((data_list, indices_list), shape=shape)
assertRaises(mx.base.MXNetError, a.check_format)
@with_seed()
def test_sparse_nd_norm():
def check_sparse_nd_norm(stype, shape, density, **kwargs):
data, _ = rand_sparse_ndarray(shape, stype, density)
norm = data.norm(**kwargs)
expected_norm = data.tostype('default').norm(**kwargs)
assert_almost_equal(norm.asnumpy(), expected_norm.asnumpy())
shape = (5, 5)
stypes = ['row_sparse', 'csr']
densities = [0, 0.5, 1]
for stype in stypes:
for density in densities:
check_sparse_nd_norm(stype, shape, density, axis=None, keepdims=False, ord=2)
# test fallback
check_sparse_nd_norm(stype, shape, density, axis=0, keepdims=False, ord=2)
check_sparse_nd_norm(stype, shape, density, axis=None, keepdims=True, ord=2)
@with_seed()
def test_sparse_fc():
def check_sparse_fc(batch_size, dim_in, dim_out, stype):
data = rand_ndarray((batch_size, dim_in), stype, density=0.5)
weight = rand_ndarray((dim_out, dim_in), 'row_sparse', density=1)
bias = rand_ndarray((dim_out, 1), 'row_sparse', density=1)
out = mx.nd.sparse.FullyConnected(data, weight, num_hidden=dim_out, bias=bias)
data_dns = data.tostype('default')
weight_dns = weight.tostype('default')
out_dns = mx.nd.FullyConnected(data_dns, weight_dns, num_hidden=dim_out, bias=bias)
assert_almost_equal(out.asnumpy(), out_dns.asnumpy())
# test FC with row_sparse weight w/ density=1, dense data
check_sparse_fc(5, 10, 8, 'default')
# test FC with row_sparse weight w/ density=1, csr data (fallback)
check_sparse_fc(5, 10, 8, 'csr')
if __name__ == '__main__':
import nose
nose.runmodule()
|
apache-2.0
| -1,889,587,153,475,739,600
| 37.807847
| 112
| 0.589086
| false
| 3.251433
| true
| false
| false
|
madgik/exareme
|
Exareme-Docker/src/madisServer/MadisServer.py
|
1
|
2979
|
import tornado.web
from tornado import gen
from tornado.log import enable_pretty_logging
from tornado.options import define, options
import logging
import os
PROCESSES_PER_CPU = 2
WEB_SERVER_PORT=8888
define("port", default=WEB_SERVER_PORT, help="run on the given port", type=int)
import MadisInstance
from MadisInstance import QueryExecutionException
class Application(tornado.web.Application):
def __init__(self):
handlers = [
(r"/", MainHandler)
]
tornado.web.Application.__init__(self, handlers)
class BaseHandler(tornado.web.RequestHandler):
def __init__(self, *args):
tornado.web.RequestHandler.__init__(self, *args)
class MainHandler(BaseHandler):
#logging stuff..
enable_pretty_logging()
logger = logging.getLogger('MainHandler')
hdlr = logging.FileHandler('/var/log/MadisServer.log','w+')
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
if os.environ['LOG_LEVEL'] == "DEBUG":
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
access_log = logging.getLogger("tornado.access")
app_log = logging.getLogger("tornado.application")
gen_log = logging.getLogger("tornado.general")
access_log.addHandler(hdlr)
app_log.addHandler(hdlr)
gen_log.addHandler(hdlr)
madisInstance=MadisInstance.MadisInstance(logger)
def execQuery(self,dbFilename,query):
self.logger.debug("(MadisServer::execQuery) will call madisInstance.connectToDb({})".format(dbFilename))
self.madisInstance.connectToDb(dbFilename)
try:
self.logger.debug("(MadisServer::execQuery) will call madisInstance.execute({})".format(query))
result= self.madisInstance.execute(query)
finally:
self.madisInstance.closeConnectionToDb()
return result
@tornado.gen.coroutine
def post(self):
dbFilename=self.get_argument("dbfilename")
query=self.get_argument("query")
self.logger.debug("(MadisServer::post) dbfilename={} query={}".format(dbFilename,query))
try:
str_result=self.execQuery(dbFilename,query)
except QueryExecutionException as e:
#raise tornado.web.HTTPError(status_code=500,log_message="...the log message??")
self.logger.error("(MadisServer::post) QueryExecutionException: {}".format(str(e)))
#print "QueryExecutionException ->{}".format(str(e))
self.set_status(500)
self.write(str(e))
self.finish()
return
self.logger.debug("(MadisServer::post) str_result-> {}".format(str_result))
self.write("{}".format(str_result))
self.finish()
def main():
sockets = tornado.netutil.bind_sockets(options.port)
tornado.process.fork_processes(tornado.process.cpu_count() * PROCESSES_PER_CPU)
server = tornado.httpserver.HTTPServer(Application())
server.add_sockets(sockets)
tornado.ioloop.IOLoop.instance().start()
if __name__ == "__main__":
main()
|
mit
| -3,267,898,687,122,339,000
| 30.357895
| 108
| 0.699564
| false
| 3.655215
| false
| false
| false
|
mheap/ansible
|
lib/ansible/module_utils/basic.py
|
1
|
116232
|
# Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013
# Copyright (c), Toshio Kuratomi <tkuratomi@ansible.com> 2016
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
from __future__ import absolute_import, division, print_function
SIZE_RANGES = {
'Y': 1 << 80,
'Z': 1 << 70,
'E': 1 << 60,
'P': 1 << 50,
'T': 1 << 40,
'G': 1 << 30,
'M': 1 << 20,
'K': 1 << 10,
'B': 1,
}
FILE_ATTRIBUTES = {
'A': 'noatime',
'a': 'append',
'c': 'compressed',
'C': 'nocow',
'd': 'nodump',
'D': 'dirsync',
'e': 'extents',
'E': 'encrypted',
'h': 'blocksize',
'i': 'immutable',
'I': 'indexed',
'j': 'journalled',
'N': 'inline',
's': 'zero',
'S': 'synchronous',
't': 'notail',
'T': 'blockroot',
'u': 'undelete',
'X': 'compressedraw',
'Z': 'compresseddirty',
}
PASS_VARS = {
'check_mode': 'check_mode',
'debug': '_debug',
'diff': '_diff',
'keep_remote_files': '_keep_remote_files',
'module_name': '_name',
'no_log': 'no_log',
'remote_tmp': '_remote_tmp',
'selinux_special_fs': '_selinux_special_fs',
'shell_executable': '_shell',
'socket': '_socket_path',
'syslog_facility': '_syslog_facility',
'tmpdir': '_tmpdir',
'verbosity': '_verbosity',
'version': 'ansible_version',
}
PASS_BOOLS = ('no_log', 'debug', 'diff')
# Ansible modules can be written in any language.
# The functions available here can be used to do many common tasks,
# to simplify development of Python modules.
import atexit
import locale
import os
import re
import shlex
import subprocess
import sys
import types
import time
import select
import shutil
import stat
import tempfile
import traceback
import grp
import pwd
import platform
import errno
import datetime
from itertools import chain, repeat
try:
import syslog
HAS_SYSLOG = True
except ImportError:
HAS_SYSLOG = False
try:
from systemd import journal
has_journal = True
except ImportError:
has_journal = False
HAVE_SELINUX = False
try:
import selinux
HAVE_SELINUX = True
except ImportError:
pass
# Python2 & 3 way to get NoneType
NoneType = type(None)
try:
import json
# Detect the python-json library which is incompatible
# Look for simplejson if that's the case
try:
if not isinstance(json.loads, types.FunctionType) or not isinstance(json.dumps, types.FunctionType):
raise ImportError
except AttributeError:
raise ImportError
except ImportError:
try:
import simplejson as json
except ImportError:
print('\n{"msg": "Error: ansible requires the stdlib json or simplejson module, neither was found!", "failed": true}')
sys.exit(1)
except SyntaxError:
print('\n{"msg": "SyntaxError: probably due to installed simplejson being for a different python version", "failed": true}')
sys.exit(1)
else:
sj_version = json.__version__.split('.')
if sj_version < ['1', '6']:
# Version 1.5 released 2007-01-18 does not have the encoding parameter which we need
print('\n{"msg": "Error: Ansible requires the stdlib json or simplejson >= 1.6. Neither was found!", "failed": true}')
AVAILABLE_HASH_ALGORITHMS = dict()
try:
import hashlib
# python 2.7.9+ and 2.7.0+
for attribute in ('available_algorithms', 'algorithms'):
algorithms = getattr(hashlib, attribute, None)
if algorithms:
break
if algorithms is None:
# python 2.5+
algorithms = ('md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512')
for algorithm in algorithms:
AVAILABLE_HASH_ALGORITHMS[algorithm] = getattr(hashlib, algorithm)
except ImportError:
import sha
AVAILABLE_HASH_ALGORITHMS = {'sha1': sha.sha}
try:
import md5
AVAILABLE_HASH_ALGORITHMS['md5'] = md5.md5
except ImportError:
pass
from ansible.module_utils.common._collections_compat import (
deque,
KeysView,
Mapping, MutableMapping,
Sequence, MutableSequence,
Set, MutableSet,
)
from ansible.module_utils.pycompat24 import get_exception, literal_eval
from ansible.module_utils.six import (
PY2,
PY3,
b,
binary_type,
integer_types,
iteritems,
string_types,
text_type,
)
from ansible.module_utils.six.moves import map, reduce, shlex_quote
from ansible.module_utils._text import to_native, to_bytes, to_text
from ansible.module_utils.parsing.convert_bool import BOOLEANS_FALSE, BOOLEANS_TRUE, boolean
# Note: When getting Sequence from collections, it matches with strings. If
# this matters, make sure to check for strings before checking for sequencetype
SEQUENCETYPE = frozenset, KeysView, Sequence
PASSWORD_MATCH = re.compile(r'^(?:.+[-_\s])?pass(?:[-_\s]?(?:word|phrase|wrd|wd)?)(?:[-_\s].+)?$', re.I)
_NUMBERTYPES = tuple(list(integer_types) + [float])
# Deprecated compat. Only kept in case another module used these names Using
# ansible.module_utils.six is preferred
NUMBERTYPES = _NUMBERTYPES
imap = map
try:
# Python 2
unicode
except NameError:
# Python 3
unicode = text_type
try:
# Python 2
basestring
except NameError:
# Python 3
basestring = string_types
_literal_eval = literal_eval
# End of deprecated names
# Internal global holding passed in params. This is consulted in case
# multiple AnsibleModules are created. Otherwise each AnsibleModule would
# attempt to read from stdin. Other code should not use this directly as it
# is an internal implementation detail
_ANSIBLE_ARGS = None
FILE_COMMON_ARGUMENTS = dict(
# These are things we want. About setting metadata (mode, ownership, permissions in general) on
# created files (these are used by set_fs_attributes_if_different and included in
# load_file_common_arguments)
mode=dict(type='raw'),
owner=dict(),
group=dict(),
seuser=dict(),
serole=dict(),
selevel=dict(),
setype=dict(),
attributes=dict(aliases=['attr']),
# The following are not about perms and should not be in a rewritten file_common_args
src=dict(), # Maybe dest or path would be appropriate but src is not
follow=dict(type='bool', default=False), # Maybe follow is appropriate because it determines whether to follow symlinks for permission purposes too
force=dict(type='bool'),
# not taken by the file module, but other action plugins call the file module so this ignores
# them for now. In the future, the caller should take care of removing these from the module
# arguments before calling the file module.
content=dict(no_log=True), # used by copy
backup=dict(), # Used by a few modules to create a remote backup before updating the file
remote_src=dict(), # used by assemble
regexp=dict(), # used by assemble
delimiter=dict(), # used by assemble
directory_mode=dict(), # used by copy
unsafe_writes=dict(type='bool'), # should be available to any module using atomic_move
)
PASSWD_ARG_RE = re.compile(r'^[-]{0,2}pass[-]?(word|wd)?')
# Used for parsing symbolic file perms
MODE_OPERATOR_RE = re.compile(r'[+=-]')
USERS_RE = re.compile(r'[^ugo]')
PERMS_RE = re.compile(r'[^rwxXstugo]')
PERM_BITS = 0o7777 # file mode permission bits
EXEC_PERM_BITS = 0o0111 # execute permission bits
DEFAULT_PERM = 0o0666 # default file permission bits
# Used for determining if the system is running a new enough python version
# and should only restrict on our documented minimum versions
_PY3_MIN = sys.version_info[:2] >= (3, 5)
_PY2_MIN = (2, 6) <= sys.version_info[:2] < (3,)
_PY_MIN = _PY3_MIN or _PY2_MIN
if not _PY_MIN:
print(
'\n{"failed": true, '
'"msg": "Ansible requires a minimum of Python2 version 2.6 or Python3 version 3.5. Current version: %s"}' % ''.join(sys.version.splitlines())
)
sys.exit(1)
def get_platform():
''' what's the platform? example: Linux is a platform. '''
return platform.system()
def get_distribution():
''' return the distribution name '''
if platform.system() == 'Linux':
try:
supported_dists = platform._supported_dists + ('arch', 'alpine', 'devuan')
distribution = platform.linux_distribution(supported_dists=supported_dists)[0].capitalize()
if not distribution and os.path.isfile('/etc/system-release'):
distribution = platform.linux_distribution(supported_dists=['system'])[0].capitalize()
if 'Amazon' in distribution:
distribution = 'Amazon'
else:
distribution = 'OtherLinux'
except:
# FIXME: MethodMissing, I assume?
distribution = platform.dist()[0].capitalize()
else:
distribution = None
return distribution
def get_distribution_version():
''' return the distribution version '''
if platform.system() == 'Linux':
try:
distribution_version = platform.linux_distribution()[1]
if not distribution_version and os.path.isfile('/etc/system-release'):
distribution_version = platform.linux_distribution(supported_dists=['system'])[1]
except:
# FIXME: MethodMissing, I assume?
distribution_version = platform.dist()[1]
else:
distribution_version = None
return distribution_version
def get_all_subclasses(cls):
'''
used by modules like Hardware or Network fact classes to retrieve all subclasses of a given class.
__subclasses__ return only direct sub classes. This one go down into the class tree.
'''
# Retrieve direct subclasses
subclasses = cls.__subclasses__()
to_visit = list(subclasses)
# Then visit all subclasses
while to_visit:
for sc in to_visit:
# The current class is now visited, so remove it from list
to_visit.remove(sc)
# Appending all subclasses to visit and keep a reference of available class
for ssc in sc.__subclasses__():
subclasses.append(ssc)
to_visit.append(ssc)
return subclasses
def load_platform_subclass(cls, *args, **kwargs):
'''
used by modules like User to have different implementations based on detected platform. See User
module for an example.
'''
this_platform = get_platform()
distribution = get_distribution()
subclass = None
# get the most specific superclass for this platform
if distribution is not None:
for sc in get_all_subclasses(cls):
if sc.distribution is not None and sc.distribution == distribution and sc.platform == this_platform:
subclass = sc
if subclass is None:
for sc in get_all_subclasses(cls):
if sc.platform == this_platform and sc.distribution is None:
subclass = sc
if subclass is None:
subclass = cls
return super(cls, subclass).__new__(subclass)
def json_dict_unicode_to_bytes(d, encoding='utf-8', errors='surrogate_or_strict'):
''' Recursively convert dict keys and values to byte str
Specialized for json return because this only handles, lists, tuples,
and dict container types (the containers that the json module returns)
'''
if isinstance(d, text_type):
return to_bytes(d, encoding=encoding, errors=errors)
elif isinstance(d, dict):
return dict(map(json_dict_unicode_to_bytes, iteritems(d), repeat(encoding), repeat(errors)))
elif isinstance(d, list):
return list(map(json_dict_unicode_to_bytes, d, repeat(encoding), repeat(errors)))
elif isinstance(d, tuple):
return tuple(map(json_dict_unicode_to_bytes, d, repeat(encoding), repeat(errors)))
else:
return d
def json_dict_bytes_to_unicode(d, encoding='utf-8', errors='surrogate_or_strict'):
''' Recursively convert dict keys and values to byte str
Specialized for json return because this only handles, lists, tuples,
and dict container types (the containers that the json module returns)
'''
if isinstance(d, binary_type):
# Warning, can traceback
return to_text(d, encoding=encoding, errors=errors)
elif isinstance(d, dict):
return dict(map(json_dict_bytes_to_unicode, iteritems(d), repeat(encoding), repeat(errors)))
elif isinstance(d, list):
return list(map(json_dict_bytes_to_unicode, d, repeat(encoding), repeat(errors)))
elif isinstance(d, tuple):
return tuple(map(json_dict_bytes_to_unicode, d, repeat(encoding), repeat(errors)))
else:
return d
def return_values(obj):
""" Return native stringified values from datastructures.
For use with removing sensitive values pre-jsonification."""
if isinstance(obj, (text_type, binary_type)):
if obj:
yield to_native(obj, errors='surrogate_or_strict')
return
elif isinstance(obj, SEQUENCETYPE):
for element in obj:
for subelement in return_values(element):
yield subelement
elif isinstance(obj, Mapping):
for element in obj.items():
for subelement in return_values(element[1]):
yield subelement
elif isinstance(obj, (bool, NoneType)):
# This must come before int because bools are also ints
return
elif isinstance(obj, NUMBERTYPES):
yield to_native(obj, nonstring='simplerepr')
else:
raise TypeError('Unknown parameter type: %s, %s' % (type(obj), obj))
def _remove_values_conditions(value, no_log_strings, deferred_removals):
"""
Helper function for :meth:`remove_values`.
:arg value: The value to check for strings that need to be stripped
:arg no_log_strings: set of strings which must be stripped out of any values
:arg deferred_removals: List which holds information about nested
containers that have to be iterated for removals. It is passed into
this function so that more entries can be added to it if value is
a container type. The format of each entry is a 2-tuple where the first
element is the ``value`` parameter and the second value is a new
container to copy the elements of ``value`` into once iterated.
:returns: if ``value`` is a scalar, returns ``value`` with two exceptions:
1. :class:`~datetime.datetime` objects which are changed into a string representation.
2. objects which are in no_log_strings are replaced with a placeholder
so that no sensitive data is leaked.
If ``value`` is a container type, returns a new empty container.
``deferred_removals`` is added to as a side-effect of this function.
.. warning:: It is up to the caller to make sure the order in which value
is passed in is correct. For instance, higher level containers need
to be passed in before lower level containers. For example, given
``{'level1': {'level2': 'level3': [True]} }`` first pass in the
dictionary for ``level1``, then the dict for ``level2``, and finally
the list for ``level3``.
"""
if isinstance(value, (text_type, binary_type)):
# Need native str type
native_str_value = value
if isinstance(value, text_type):
value_is_text = True
if PY2:
native_str_value = to_bytes(value, errors='surrogate_or_strict')
elif isinstance(value, binary_type):
value_is_text = False
if PY3:
native_str_value = to_text(value, errors='surrogate_or_strict')
if native_str_value in no_log_strings:
return 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER'
for omit_me in no_log_strings:
native_str_value = native_str_value.replace(omit_me, '*' * 8)
if value_is_text and isinstance(native_str_value, binary_type):
value = to_text(native_str_value, encoding='utf-8', errors='surrogate_then_replace')
elif not value_is_text and isinstance(native_str_value, text_type):
value = to_bytes(native_str_value, encoding='utf-8', errors='surrogate_then_replace')
else:
value = native_str_value
elif isinstance(value, Sequence):
if isinstance(value, MutableSequence):
new_value = type(value)()
else:
new_value = [] # Need a mutable value
deferred_removals.append((value, new_value))
value = new_value
elif isinstance(value, Set):
if isinstance(value, MutableSet):
new_value = type(value)()
else:
new_value = set() # Need a mutable value
deferred_removals.append((value, new_value))
value = new_value
elif isinstance(value, Mapping):
if isinstance(value, MutableMapping):
new_value = type(value)()
else:
new_value = {} # Need a mutable value
deferred_removals.append((value, new_value))
value = new_value
elif isinstance(value, tuple(chain(NUMBERTYPES, (bool, NoneType)))):
stringy_value = to_native(value, encoding='utf-8', errors='surrogate_or_strict')
if stringy_value in no_log_strings:
return 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER'
for omit_me in no_log_strings:
if omit_me in stringy_value:
return 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER'
elif isinstance(value, datetime.datetime):
value = value.isoformat()
else:
raise TypeError('Value of unknown type: %s, %s' % (type(value), value))
return value
def remove_values(value, no_log_strings):
""" Remove strings in no_log_strings from value. If value is a container
type, then remove a lot more"""
deferred_removals = deque()
no_log_strings = [to_native(s, errors='surrogate_or_strict') for s in no_log_strings]
new_value = _remove_values_conditions(value, no_log_strings, deferred_removals)
while deferred_removals:
old_data, new_data = deferred_removals.popleft()
if isinstance(new_data, Mapping):
for old_key, old_elem in old_data.items():
new_elem = _remove_values_conditions(old_elem, no_log_strings, deferred_removals)
new_data[old_key] = new_elem
else:
for elem in old_data:
new_elem = _remove_values_conditions(elem, no_log_strings, deferred_removals)
if isinstance(new_data, MutableSequence):
new_data.append(new_elem)
elif isinstance(new_data, MutableSet):
new_data.add(new_elem)
else:
raise TypeError('Unknown container type encountered when removing private values from output')
return new_value
def heuristic_log_sanitize(data, no_log_values=None):
''' Remove strings that look like passwords from log messages '''
# Currently filters:
# user:pass@foo/whatever and http://username:pass@wherever/foo
# This code has false positives and consumes parts of logs that are
# not passwds
# begin: start of a passwd containing string
# end: end of a passwd containing string
# sep: char between user and passwd
# prev_begin: where in the overall string to start a search for
# a passwd
# sep_search_end: where in the string to end a search for the sep
data = to_native(data)
output = []
begin = len(data)
prev_begin = begin
sep = 1
while sep:
# Find the potential end of a passwd
try:
end = data.rindex('@', 0, begin)
except ValueError:
# No passwd in the rest of the data
output.insert(0, data[0:begin])
break
# Search for the beginning of a passwd
sep = None
sep_search_end = end
while not sep:
# URL-style username+password
try:
begin = data.rindex('://', 0, sep_search_end)
except ValueError:
# No url style in the data, check for ssh style in the
# rest of the string
begin = 0
# Search for separator
try:
sep = data.index(':', begin + 3, end)
except ValueError:
# No separator; choices:
if begin == 0:
# Searched the whole string so there's no password
# here. Return the remaining data
output.insert(0, data[0:begin])
break
# Search for a different beginning of the password field.
sep_search_end = begin
continue
if sep:
# Password was found; remove it.
output.insert(0, data[end:prev_begin])
output.insert(0, '********')
output.insert(0, data[begin:sep + 1])
prev_begin = begin
output = ''.join(output)
if no_log_values:
output = remove_values(output, no_log_values)
return output
def bytes_to_human(size, isbits=False, unit=None):
base = 'Bytes'
if isbits:
base = 'bits'
suffix = ''
for suffix, limit in sorted(iteritems(SIZE_RANGES), key=lambda item: -item[1]):
if (unit is None and size >= limit) or unit is not None and unit.upper() == suffix[0]:
break
if limit != 1:
suffix += base[0]
else:
suffix = base
return '%.2f %s' % (size / limit, suffix)
def human_to_bytes(number, default_unit=None, isbits=False):
'''
Convert number in string format into bytes (ex: '2K' => 2048) or using unit argument
ex:
human_to_bytes('10M') <=> human_to_bytes(10, 'M')
'''
m = re.search(r'^\s*(\d*\.?\d*)\s*([A-Za-z]+)?', str(number), flags=re.IGNORECASE)
if m is None:
raise ValueError("human_to_bytes() can't interpret following string: %s" % str(number))
try:
num = float(m.group(1))
except:
raise ValueError("human_to_bytes() can't interpret following number: %s (original input string: %s)" % (m.group(1), number))
unit = m.group(2)
if unit is None:
unit = default_unit
if unit is None:
''' No unit given, returning raw number '''
return int(round(num))
range_key = unit[0].upper()
try:
limit = SIZE_RANGES[range_key]
except:
raise ValueError("human_to_bytes() failed to convert %s (unit = %s). The suffix must be one of %s" % (number, unit, ", ".join(SIZE_RANGES.keys())))
# default value
unit_class = 'B'
unit_class_name = 'byte'
# handling bits case
if isbits:
unit_class = 'b'
unit_class_name = 'bit'
# check unit value if more than one character (KB, MB)
if len(unit) > 1:
expect_message = 'expect %s%s or %s' % (range_key, unit_class, range_key)
if range_key == 'B':
expect_message = 'expect %s or %s' % (unit_class, unit_class_name)
if unit_class_name in unit.lower():
pass
elif unit[1] != unit_class:
raise ValueError("human_to_bytes() failed to convert %s. Value is not a valid string (%s)" % (number, expect_message))
return int(round(num * limit))
def is_executable(path):
'''is the given path executable?
Limitations:
* Does not account for FSACLs.
* Most times we really want to know "Can the current user execute this
file" This function does not tell us that, only if an execute bit is set.
'''
# These are all bitfields so first bitwise-or all the permissions we're
# looking for, then bitwise-and with the file's mode to determine if any
# execute bits are set.
return ((stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH) & os.stat(path)[stat.ST_MODE])
def _load_params():
''' read the modules parameters and store them globally.
This function may be needed for certain very dynamic custom modules which
want to process the parameters that are being handed the module. Since
this is so closely tied to the implementation of modules we cannot
guarantee API stability for it (it may change between versions) however we
will try not to break it gratuitously. It is certainly more future-proof
to call this function and consume its outputs than to implement the logic
inside it as a copy in your own code.
'''
global _ANSIBLE_ARGS
if _ANSIBLE_ARGS is not None:
buffer = _ANSIBLE_ARGS
else:
# debug overrides to read args from file or cmdline
# Avoid tracebacks when locale is non-utf8
# We control the args and we pass them as utf8
if len(sys.argv) > 1:
if os.path.isfile(sys.argv[1]):
fd = open(sys.argv[1], 'rb')
buffer = fd.read()
fd.close()
else:
buffer = sys.argv[1]
if PY3:
buffer = buffer.encode('utf-8', errors='surrogateescape')
# default case, read from stdin
else:
if PY2:
buffer = sys.stdin.read()
else:
buffer = sys.stdin.buffer.read()
_ANSIBLE_ARGS = buffer
try:
params = json.loads(buffer.decode('utf-8'))
except ValueError:
# This helper used too early for fail_json to work.
print('\n{"msg": "Error: Module unable to decode valid JSON on stdin. Unable to figure out what parameters were passed", "failed": true}')
sys.exit(1)
if PY2:
params = json_dict_unicode_to_bytes(params)
try:
return params['ANSIBLE_MODULE_ARGS']
except KeyError:
# This helper does not have access to fail_json so we have to print
# json output on our own.
print('\n{"msg": "Error: Module unable to locate ANSIBLE_MODULE_ARGS in json data from stdin. Unable to figure out what parameters were passed", '
'"failed": true}')
sys.exit(1)
def env_fallback(*args, **kwargs):
''' Load value from environment '''
for arg in args:
if arg in os.environ:
return os.environ[arg]
raise AnsibleFallbackNotFound
def _lenient_lowercase(lst):
"""Lowercase elements of a list.
If an element is not a string, pass it through untouched.
"""
lowered = []
for value in lst:
try:
lowered.append(value.lower())
except AttributeError:
lowered.append(value)
return lowered
def format_attributes(attributes):
attribute_list = []
for attr in attributes:
if attr in FILE_ATTRIBUTES:
attribute_list.append(FILE_ATTRIBUTES[attr])
return attribute_list
def get_flags_from_attributes(attributes):
flags = []
for key, attr in FILE_ATTRIBUTES.items():
if attr in attributes:
flags.append(key)
return ''.join(flags)
def _json_encode_fallback(obj):
if isinstance(obj, Set):
return list(obj)
elif isinstance(obj, datetime.datetime):
return obj.isoformat()
raise TypeError("Cannot json serialize %s" % to_native(obj))
def jsonify(data, **kwargs):
for encoding in ("utf-8", "latin-1"):
try:
return json.dumps(data, encoding=encoding, default=_json_encode_fallback, **kwargs)
# Old systems using old simplejson module does not support encoding keyword.
except TypeError:
try:
new_data = json_dict_bytes_to_unicode(data, encoding=encoding)
except UnicodeDecodeError:
continue
return json.dumps(new_data, default=_json_encode_fallback, **kwargs)
except UnicodeDecodeError:
continue
raise UnicodeError('Invalid unicode encoding encountered')
class AnsibleFallbackNotFound(Exception):
pass
class AnsibleModule(object):
def __init__(self, argument_spec, bypass_checks=False, no_log=False,
check_invalid_arguments=None, mutually_exclusive=None, required_together=None,
required_one_of=None, add_file_common_args=False, supports_check_mode=False,
required_if=None):
'''
common code for quickly building an ansible module in Python
(although you can write modules in anything that can return JSON)
see library/* for examples
'''
self._name = os.path.basename(__file__) # initialize name until we can parse from options
self.argument_spec = argument_spec
self.supports_check_mode = supports_check_mode
self.check_mode = False
self.bypass_checks = bypass_checks
self.no_log = no_log
# Check whether code set this explicitly for deprecation purposes
if check_invalid_arguments is None:
check_invalid_arguments = True
module_set_check_invalid_arguments = False
else:
module_set_check_invalid_arguments = True
self.check_invalid_arguments = check_invalid_arguments
self.mutually_exclusive = mutually_exclusive
self.required_together = required_together
self.required_one_of = required_one_of
self.required_if = required_if
self.cleanup_files = []
self._debug = False
self._diff = False
self._socket_path = None
self._shell = None
self._verbosity = 0
# May be used to set modifications to the environment for any
# run_command invocation
self.run_command_environ_update = {}
self._warnings = []
self._deprecations = []
self._clean = {}
self.aliases = {}
self._legal_inputs = ['_ansible_%s' % k for k in PASS_VARS]
self._options_context = list()
self._tmpdir = None
if add_file_common_args:
for k, v in FILE_COMMON_ARGUMENTS.items():
if k not in self.argument_spec:
self.argument_spec[k] = v
self._load_params()
self._set_fallbacks()
# append to legal_inputs and then possibly check against them
try:
self.aliases = self._handle_aliases()
except Exception as e:
# Use exceptions here because it isn't safe to call fail_json until no_log is processed
print('\n{"failed": true, "msg": "Module alias error: %s"}' % to_native(e))
sys.exit(1)
# Save parameter values that should never be logged
self.no_log_values = set()
self._handle_no_log_values()
# check the locale as set by the current environment, and reset to
# a known valid (LANG=C) if it's an invalid/unavailable locale
self._check_locale()
self._check_arguments(check_invalid_arguments)
# check exclusive early
if not bypass_checks:
self._check_mutually_exclusive(mutually_exclusive)
self._set_defaults(pre=True)
self._CHECK_ARGUMENT_TYPES_DISPATCHER = {
'str': self._check_type_str,
'list': self._check_type_list,
'dict': self._check_type_dict,
'bool': self._check_type_bool,
'int': self._check_type_int,
'float': self._check_type_float,
'path': self._check_type_path,
'raw': self._check_type_raw,
'jsonarg': self._check_type_jsonarg,
'json': self._check_type_jsonarg,
'bytes': self._check_type_bytes,
'bits': self._check_type_bits,
}
if not bypass_checks:
self._check_required_arguments()
self._check_argument_types()
self._check_argument_values()
self._check_required_together(required_together)
self._check_required_one_of(required_one_of)
self._check_required_if(required_if)
self._set_defaults(pre=False)
# deal with options sub-spec
self._handle_options()
if not self.no_log:
self._log_invocation()
# finally, make sure we're in a sane working dir
self._set_cwd()
# Do this at the end so that logging parameters have been set up
# This is to warn third party module authors that the functionatlity is going away.
# We exclude uri and zfs as they have their own deprecation warnings for users and we'll
# make sure to update their code to stop using check_invalid_arguments when 2.9 rolls around
if module_set_check_invalid_arguments and self._name not in ('uri', 'zfs'):
self.deprecate('Setting check_invalid_arguments is deprecated and will be removed.'
' Update the code for this module In the future, AnsibleModule will'
' always check for invalid arguments.', version='2.9')
@property
def tmpdir(self):
# if _ansible_tmpdir was not set, the module needs to create it and
# clean it up once finished.
if self._tmpdir is None:
basedir = os.path.expanduser(os.path.expandvars(self._remote_tmp))
if not os.path.exists(basedir):
self.warn("Module remote_tmp %s did not exist and was created "
"with a mode of 0700, this may cause issues when "
"running as another user. To avoid this, create the "
"remote_tmp dir with the correct permissions "
"manually" % basedir)
os.makedirs(basedir, mode=0o700)
basefile = "ansible-moduletmp-%s-" % time.time()
tmpdir = tempfile.mkdtemp(prefix=basefile, dir=basedir)
if not self._keep_remote_files:
atexit.register(shutil.rmtree, tmpdir)
self._tmpdir = tmpdir
return self._tmpdir
def warn(self, warning):
if isinstance(warning, string_types):
self._warnings.append(warning)
self.log('[WARNING] %s' % warning)
else:
raise TypeError("warn requires a string not a %s" % type(warning))
def deprecate(self, msg, version=None):
if isinstance(msg, string_types):
self._deprecations.append({
'msg': msg,
'version': version
})
self.log('[DEPRECATION WARNING] %s %s' % (msg, version))
else:
raise TypeError("deprecate requires a string not a %s" % type(msg))
def load_file_common_arguments(self, params):
'''
many modules deal with files, this encapsulates common
options that the file module accepts such that it is directly
available to all modules and they can share code.
'''
path = params.get('path', params.get('dest', None))
if path is None:
return {}
else:
path = os.path.expanduser(os.path.expandvars(path))
b_path = to_bytes(path, errors='surrogate_or_strict')
# if the path is a symlink, and we're following links, get
# the target of the link instead for testing
if params.get('follow', False) and os.path.islink(b_path):
b_path = os.path.realpath(b_path)
path = to_native(b_path)
mode = params.get('mode', None)
owner = params.get('owner', None)
group = params.get('group', None)
# selinux related options
seuser = params.get('seuser', None)
serole = params.get('serole', None)
setype = params.get('setype', None)
selevel = params.get('selevel', None)
secontext = [seuser, serole, setype]
if self.selinux_mls_enabled():
secontext.append(selevel)
default_secontext = self.selinux_default_context(path)
for i in range(len(default_secontext)):
if i is not None and secontext[i] == '_default':
secontext[i] = default_secontext[i]
attributes = params.get('attributes', None)
return dict(
path=path, mode=mode, owner=owner, group=group,
seuser=seuser, serole=serole, setype=setype,
selevel=selevel, secontext=secontext, attributes=attributes,
)
# Detect whether using selinux that is MLS-aware.
# While this means you can set the level/range with
# selinux.lsetfilecon(), it may or may not mean that you
# will get the selevel as part of the context returned
# by selinux.lgetfilecon().
def selinux_mls_enabled(self):
if not HAVE_SELINUX:
return False
if selinux.is_selinux_mls_enabled() == 1:
return True
else:
return False
def selinux_enabled(self):
if not HAVE_SELINUX:
seenabled = self.get_bin_path('selinuxenabled')
if seenabled is not None:
(rc, out, err) = self.run_command(seenabled)
if rc == 0:
self.fail_json(msg="Aborting, target uses selinux but python bindings (libselinux-python) aren't installed!")
return False
if selinux.is_selinux_enabled() == 1:
return True
else:
return False
# Determine whether we need a placeholder for selevel/mls
def selinux_initial_context(self):
context = [None, None, None]
if self.selinux_mls_enabled():
context.append(None)
return context
# If selinux fails to find a default, return an array of None
def selinux_default_context(self, path, mode=0):
context = self.selinux_initial_context()
if not HAVE_SELINUX or not self.selinux_enabled():
return context
try:
ret = selinux.matchpathcon(to_native(path, errors='surrogate_or_strict'), mode)
except OSError:
return context
if ret[0] == -1:
return context
# Limit split to 4 because the selevel, the last in the list,
# may contain ':' characters
context = ret[1].split(':', 3)
return context
def selinux_context(self, path):
context = self.selinux_initial_context()
if not HAVE_SELINUX or not self.selinux_enabled():
return context
try:
ret = selinux.lgetfilecon_raw(to_native(path, errors='surrogate_or_strict'))
except OSError as e:
if e.errno == errno.ENOENT:
self.fail_json(path=path, msg='path %s does not exist' % path)
else:
self.fail_json(path=path, msg='failed to retrieve selinux context')
if ret[0] == -1:
return context
# Limit split to 4 because the selevel, the last in the list,
# may contain ':' characters
context = ret[1].split(':', 3)
return context
def user_and_group(self, path, expand=True):
b_path = to_bytes(path, errors='surrogate_or_strict')
if expand:
b_path = os.path.expanduser(os.path.expandvars(b_path))
st = os.lstat(b_path)
uid = st.st_uid
gid = st.st_gid
return (uid, gid)
def find_mount_point(self, path):
path_is_bytes = False
if isinstance(path, binary_type):
path_is_bytes = True
b_path = os.path.realpath(to_bytes(os.path.expanduser(os.path.expandvars(path)), errors='surrogate_or_strict'))
while not os.path.ismount(b_path):
b_path = os.path.dirname(b_path)
if path_is_bytes:
return b_path
return to_text(b_path, errors='surrogate_or_strict')
def is_special_selinux_path(self, path):
"""
Returns a tuple containing (True, selinux_context) if the given path is on a
NFS or other 'special' fs mount point, otherwise the return will be (False, None).
"""
try:
f = open('/proc/mounts', 'r')
mount_data = f.readlines()
f.close()
except:
return (False, None)
path_mount_point = self.find_mount_point(path)
for line in mount_data:
(device, mount_point, fstype, options, rest) = line.split(' ', 4)
if path_mount_point == mount_point:
for fs in self._selinux_special_fs:
if fs in fstype:
special_context = self.selinux_context(path_mount_point)
return (True, special_context)
return (False, None)
def set_default_selinux_context(self, path, changed):
if not HAVE_SELINUX or not self.selinux_enabled():
return changed
context = self.selinux_default_context(path)
return self.set_context_if_different(path, context, False)
def set_context_if_different(self, path, context, changed, diff=None):
if not HAVE_SELINUX or not self.selinux_enabled():
return changed
if self.check_file_absent_if_check_mode(path):
return True
cur_context = self.selinux_context(path)
new_context = list(cur_context)
# Iterate over the current context instead of the
# argument context, which may have selevel.
(is_special_se, sp_context) = self.is_special_selinux_path(path)
if is_special_se:
new_context = sp_context
else:
for i in range(len(cur_context)):
if len(context) > i:
if context[i] is not None and context[i] != cur_context[i]:
new_context[i] = context[i]
elif context[i] is None:
new_context[i] = cur_context[i]
if cur_context != new_context:
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
diff['before']['secontext'] = cur_context
if 'after' not in diff:
diff['after'] = {}
diff['after']['secontext'] = new_context
try:
if self.check_mode:
return True
rc = selinux.lsetfilecon(to_native(path), ':'.join(new_context))
except OSError as e:
self.fail_json(path=path, msg='invalid selinux context: %s' % to_native(e),
new_context=new_context, cur_context=cur_context, input_was=context)
if rc != 0:
self.fail_json(path=path, msg='set selinux context failed')
changed = True
return changed
def set_owner_if_different(self, path, owner, changed, diff=None, expand=True):
if owner is None:
return changed
b_path = to_bytes(path, errors='surrogate_or_strict')
if expand:
b_path = os.path.expanduser(os.path.expandvars(b_path))
if self.check_file_absent_if_check_mode(b_path):
return True
orig_uid, orig_gid = self.user_and_group(b_path, expand)
try:
uid = int(owner)
except ValueError:
try:
uid = pwd.getpwnam(owner).pw_uid
except KeyError:
path = to_text(b_path)
self.fail_json(path=path, msg='chown failed: failed to look up user %s' % owner)
if orig_uid != uid:
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
diff['before']['owner'] = orig_uid
if 'after' not in diff:
diff['after'] = {}
diff['after']['owner'] = uid
if self.check_mode:
return True
try:
os.lchown(b_path, uid, -1)
except (IOError, OSError) as e:
path = to_text(b_path)
self.fail_json(path=path, msg='chown failed: %s' % (to_text(e)))
changed = True
return changed
def set_group_if_different(self, path, group, changed, diff=None, expand=True):
if group is None:
return changed
b_path = to_bytes(path, errors='surrogate_or_strict')
if expand:
b_path = os.path.expanduser(os.path.expandvars(b_path))
if self.check_file_absent_if_check_mode(b_path):
return True
orig_uid, orig_gid = self.user_and_group(b_path, expand)
try:
gid = int(group)
except ValueError:
try:
gid = grp.getgrnam(group).gr_gid
except KeyError:
path = to_text(b_path)
self.fail_json(path=path, msg='chgrp failed: failed to look up group %s' % group)
if orig_gid != gid:
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
diff['before']['group'] = orig_gid
if 'after' not in diff:
diff['after'] = {}
diff['after']['group'] = gid
if self.check_mode:
return True
try:
os.lchown(b_path, -1, gid)
except OSError:
path = to_text(b_path)
self.fail_json(path=path, msg='chgrp failed')
changed = True
return changed
def set_mode_if_different(self, path, mode, changed, diff=None, expand=True):
if mode is None:
return changed
b_path = to_bytes(path, errors='surrogate_or_strict')
if expand:
b_path = os.path.expanduser(os.path.expandvars(b_path))
path_stat = os.lstat(b_path)
if self.check_file_absent_if_check_mode(b_path):
return True
if not isinstance(mode, int):
try:
mode = int(mode, 8)
except Exception:
try:
mode = self._symbolic_mode_to_octal(path_stat, mode)
except Exception as e:
path = to_text(b_path)
self.fail_json(path=path,
msg="mode must be in octal or symbolic form",
details=to_native(e))
if mode != stat.S_IMODE(mode):
# prevent mode from having extra info orbeing invalid long number
path = to_text(b_path)
self.fail_json(path=path, msg="Invalid mode supplied, only permission info is allowed", details=mode)
prev_mode = stat.S_IMODE(path_stat.st_mode)
if prev_mode != mode:
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
diff['before']['mode'] = '0%03o' % prev_mode
if 'after' not in diff:
diff['after'] = {}
diff['after']['mode'] = '0%03o' % mode
if self.check_mode:
return True
# FIXME: comparison against string above will cause this to be executed
# every time
try:
if hasattr(os, 'lchmod'):
os.lchmod(b_path, mode)
else:
if not os.path.islink(b_path):
os.chmod(b_path, mode)
else:
# Attempt to set the perms of the symlink but be
# careful not to change the perms of the underlying
# file while trying
underlying_stat = os.stat(b_path)
os.chmod(b_path, mode)
new_underlying_stat = os.stat(b_path)
if underlying_stat.st_mode != new_underlying_stat.st_mode:
os.chmod(b_path, stat.S_IMODE(underlying_stat.st_mode))
except OSError as e:
if os.path.islink(b_path) and e.errno == errno.EPERM: # Can't set mode on symbolic links
pass
elif e.errno in (errno.ENOENT, errno.ELOOP): # Can't set mode on broken symbolic links
pass
else:
raise
except Exception as e:
path = to_text(b_path)
self.fail_json(path=path, msg='chmod failed', details=to_native(e),
exception=traceback.format_exc())
path_stat = os.lstat(b_path)
new_mode = stat.S_IMODE(path_stat.st_mode)
if new_mode != prev_mode:
changed = True
return changed
def set_attributes_if_different(self, path, attributes, changed, diff=None, expand=True):
if attributes is None:
return changed
b_path = to_bytes(path, errors='surrogate_or_strict')
if expand:
b_path = os.path.expanduser(os.path.expandvars(b_path))
if self.check_file_absent_if_check_mode(b_path):
return True
existing = self.get_file_attributes(b_path)
if existing.get('attr_flags', '') != attributes:
attrcmd = self.get_bin_path('chattr')
if attrcmd:
attrcmd = [attrcmd, '=%s' % attributes, b_path]
changed = True
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
diff['before']['attributes'] = existing.get('attr_flags')
if 'after' not in diff:
diff['after'] = {}
diff['after']['attributes'] = attributes
if not self.check_mode:
try:
rc, out, err = self.run_command(attrcmd)
if rc != 0 or err:
raise Exception("Error while setting attributes: %s" % (out + err))
except Exception as e:
self.fail_json(path=to_text(b_path), msg='chattr failed',
details=to_native(e), exception=traceback.format_exc())
return changed
def get_file_attributes(self, path):
output = {}
attrcmd = self.get_bin_path('lsattr', False)
if attrcmd:
attrcmd = [attrcmd, '-vd', path]
try:
rc, out, err = self.run_command(attrcmd)
if rc == 0:
res = out.split()
output['attr_flags'] = res[1].replace('-', '').strip()
output['version'] = res[0].strip()
output['attributes'] = format_attributes(output['attr_flags'])
except:
pass
return output
@classmethod
def _symbolic_mode_to_octal(cls, path_stat, symbolic_mode):
"""
This enables symbolic chmod string parsing as stated in the chmod man-page
This includes things like: "u=rw-x+X,g=r-x+X,o=r-x+X"
"""
new_mode = stat.S_IMODE(path_stat.st_mode)
# Now parse all symbolic modes
for mode in symbolic_mode.split(','):
# Per single mode. This always contains a '+', '-' or '='
# Split it on that
permlist = MODE_OPERATOR_RE.split(mode)
# And find all the operators
opers = MODE_OPERATOR_RE.findall(mode)
# The user(s) where it's all about is the first element in the
# 'permlist' list. Take that and remove it from the list.
# An empty user or 'a' means 'all'.
users = permlist.pop(0)
use_umask = (users == '')
if users == 'a' or users == '':
users = 'ugo'
# Check if there are illegal characters in the user list
# They can end up in 'users' because they are not split
if USERS_RE.match(users):
raise ValueError("bad symbolic permission for mode: %s" % mode)
# Now we have two list of equal length, one contains the requested
# permissions and one with the corresponding operators.
for idx, perms in enumerate(permlist):
# Check if there are illegal characters in the permissions
if PERMS_RE.match(perms):
raise ValueError("bad symbolic permission for mode: %s" % mode)
for user in users:
mode_to_apply = cls._get_octal_mode_from_symbolic_perms(path_stat, user, perms, use_umask)
new_mode = cls._apply_operation_to_mode(user, opers[idx], mode_to_apply, new_mode)
return new_mode
@staticmethod
def _apply_operation_to_mode(user, operator, mode_to_apply, current_mode):
if operator == '=':
if user == 'u':
mask = stat.S_IRWXU | stat.S_ISUID
elif user == 'g':
mask = stat.S_IRWXG | stat.S_ISGID
elif user == 'o':
mask = stat.S_IRWXO | stat.S_ISVTX
# mask out u, g, or o permissions from current_mode and apply new permissions
inverse_mask = mask ^ PERM_BITS
new_mode = (current_mode & inverse_mask) | mode_to_apply
elif operator == '+':
new_mode = current_mode | mode_to_apply
elif operator == '-':
new_mode = current_mode - (current_mode & mode_to_apply)
return new_mode
@staticmethod
def _get_octal_mode_from_symbolic_perms(path_stat, user, perms, use_umask):
prev_mode = stat.S_IMODE(path_stat.st_mode)
is_directory = stat.S_ISDIR(path_stat.st_mode)
has_x_permissions = (prev_mode & EXEC_PERM_BITS) > 0
apply_X_permission = is_directory or has_x_permissions
# Get the umask, if the 'user' part is empty, the effect is as if (a) were
# given, but bits that are set in the umask are not affected.
# We also need the "reversed umask" for masking
umask = os.umask(0)
os.umask(umask)
rev_umask = umask ^ PERM_BITS
# Permission bits constants documented at:
# http://docs.python.org/2/library/stat.html#stat.S_ISUID
if apply_X_permission:
X_perms = {
'u': {'X': stat.S_IXUSR},
'g': {'X': stat.S_IXGRP},
'o': {'X': stat.S_IXOTH},
}
else:
X_perms = {
'u': {'X': 0},
'g': {'X': 0},
'o': {'X': 0},
}
user_perms_to_modes = {
'u': {
'r': rev_umask & stat.S_IRUSR if use_umask else stat.S_IRUSR,
'w': rev_umask & stat.S_IWUSR if use_umask else stat.S_IWUSR,
'x': rev_umask & stat.S_IXUSR if use_umask else stat.S_IXUSR,
's': stat.S_ISUID,
't': 0,
'u': prev_mode & stat.S_IRWXU,
'g': (prev_mode & stat.S_IRWXG) << 3,
'o': (prev_mode & stat.S_IRWXO) << 6},
'g': {
'r': rev_umask & stat.S_IRGRP if use_umask else stat.S_IRGRP,
'w': rev_umask & stat.S_IWGRP if use_umask else stat.S_IWGRP,
'x': rev_umask & stat.S_IXGRP if use_umask else stat.S_IXGRP,
's': stat.S_ISGID,
't': 0,
'u': (prev_mode & stat.S_IRWXU) >> 3,
'g': prev_mode & stat.S_IRWXG,
'o': (prev_mode & stat.S_IRWXO) << 3},
'o': {
'r': rev_umask & stat.S_IROTH if use_umask else stat.S_IROTH,
'w': rev_umask & stat.S_IWOTH if use_umask else stat.S_IWOTH,
'x': rev_umask & stat.S_IXOTH if use_umask else stat.S_IXOTH,
's': 0,
't': stat.S_ISVTX,
'u': (prev_mode & stat.S_IRWXU) >> 6,
'g': (prev_mode & stat.S_IRWXG) >> 3,
'o': prev_mode & stat.S_IRWXO},
}
# Insert X_perms into user_perms_to_modes
for key, value in X_perms.items():
user_perms_to_modes[key].update(value)
def or_reduce(mode, perm):
return mode | user_perms_to_modes[user][perm]
return reduce(or_reduce, perms, 0)
def set_fs_attributes_if_different(self, file_args, changed, diff=None, expand=True):
# set modes owners and context as needed
changed = self.set_context_if_different(
file_args['path'], file_args['secontext'], changed, diff
)
changed = self.set_owner_if_different(
file_args['path'], file_args['owner'], changed, diff, expand
)
changed = self.set_group_if_different(
file_args['path'], file_args['group'], changed, diff, expand
)
changed = self.set_mode_if_different(
file_args['path'], file_args['mode'], changed, diff, expand
)
changed = self.set_attributes_if_different(
file_args['path'], file_args['attributes'], changed, diff, expand
)
return changed
def check_file_absent_if_check_mode(self, file_path):
return self.check_mode and not os.path.exists(file_path)
def set_directory_attributes_if_different(self, file_args, changed, diff=None, expand=True):
return self.set_fs_attributes_if_different(file_args, changed, diff, expand)
def set_file_attributes_if_different(self, file_args, changed, diff=None, expand=True):
return self.set_fs_attributes_if_different(file_args, changed, diff, expand)
def add_path_info(self, kwargs):
'''
for results that are files, supplement the info about the file
in the return path with stats about the file path.
'''
path = kwargs.get('path', kwargs.get('dest', None))
if path is None:
return kwargs
b_path = to_bytes(path, errors='surrogate_or_strict')
if os.path.exists(b_path):
(uid, gid) = self.user_and_group(path)
kwargs['uid'] = uid
kwargs['gid'] = gid
try:
user = pwd.getpwuid(uid)[0]
except KeyError:
user = str(uid)
try:
group = grp.getgrgid(gid)[0]
except KeyError:
group = str(gid)
kwargs['owner'] = user
kwargs['group'] = group
st = os.lstat(b_path)
kwargs['mode'] = '0%03o' % stat.S_IMODE(st[stat.ST_MODE])
# secontext not yet supported
if os.path.islink(b_path):
kwargs['state'] = 'link'
elif os.path.isdir(b_path):
kwargs['state'] = 'directory'
elif os.stat(b_path).st_nlink > 1:
kwargs['state'] = 'hard'
else:
kwargs['state'] = 'file'
if HAVE_SELINUX and self.selinux_enabled():
kwargs['secontext'] = ':'.join(self.selinux_context(path))
kwargs['size'] = st[stat.ST_SIZE]
else:
kwargs['state'] = 'absent'
return kwargs
def _check_locale(self):
'''
Uses the locale module to test the currently set locale
(per the LANG and LC_CTYPE environment settings)
'''
try:
# setting the locale to '' uses the default locale
# as it would be returned by locale.getdefaultlocale()
locale.setlocale(locale.LC_ALL, '')
except locale.Error:
# fallback to the 'C' locale, which may cause unicode
# issues but is preferable to simply failing because
# of an unknown locale
locale.setlocale(locale.LC_ALL, 'C')
os.environ['LANG'] = 'C'
os.environ['LC_ALL'] = 'C'
os.environ['LC_MESSAGES'] = 'C'
except Exception as e:
self.fail_json(msg="An unknown error was encountered while attempting to validate the locale: %s" %
to_native(e), exception=traceback.format_exc())
def _handle_aliases(self, spec=None, param=None):
# this uses exceptions as it happens before we can safely call fail_json
aliases_results = {} # alias:canon
if param is None:
param = self.params
if spec is None:
spec = self.argument_spec
for (k, v) in spec.items():
self._legal_inputs.append(k)
aliases = v.get('aliases', None)
default = v.get('default', None)
required = v.get('required', False)
if default is not None and required:
# not alias specific but this is a good place to check this
raise Exception("internal error: required and default are mutually exclusive for %s" % k)
if aliases is None:
continue
if not isinstance(aliases, SEQUENCETYPE) or isinstance(aliases, (binary_type, text_type)):
raise Exception('internal error: aliases must be a list or tuple')
for alias in aliases:
self._legal_inputs.append(alias)
aliases_results[alias] = k
if alias in param:
param[k] = param[alias]
return aliases_results
def _handle_no_log_values(self, spec=None, param=None):
if spec is None:
spec = self.argument_spec
if param is None:
param = self.params
# Use the argspec to determine which args are no_log
for arg_name, arg_opts in spec.items():
if arg_opts.get('no_log', False):
# Find the value for the no_log'd param
no_log_object = param.get(arg_name, None)
if no_log_object:
self.no_log_values.update(return_values(no_log_object))
if arg_opts.get('removed_in_version') is not None and arg_name in param:
self._deprecations.append({
'msg': "Param '%s' is deprecated. See the module docs for more information" % arg_name,
'version': arg_opts.get('removed_in_version')
})
def _check_arguments(self, check_invalid_arguments, spec=None, param=None, legal_inputs=None):
self._syslog_facility = 'LOG_USER'
unsupported_parameters = set()
if spec is None:
spec = self.argument_spec
if param is None:
param = self.params
if legal_inputs is None:
legal_inputs = self._legal_inputs
for (k, v) in list(param.items()):
if check_invalid_arguments and k not in legal_inputs:
unsupported_parameters.add(k)
elif k.startswith('_ansible_'):
# handle setting internal properties from internal ansible vars
key = k.replace('_ansible_', '')
if key in PASS_BOOLS:
setattr(self, PASS_VARS[key], self.boolean(v))
else:
setattr(self, PASS_VARS[key], v)
# clean up internal params:
del self.params[k]
if unsupported_parameters:
msg = "Unsupported parameters for (%s) module: %s" % (self._name, ', '.join(sorted(list(unsupported_parameters))))
if self._options_context:
msg += " found in %s." % " -> ".join(self._options_context)
msg += " Supported parameters include: %s" % (', '.join(sorted(spec.keys())))
self.fail_json(msg=msg)
if self.check_mode and not self.supports_check_mode:
self.exit_json(skipped=True, msg="remote module (%s) does not support check mode" % self._name)
def _count_terms(self, check, param=None):
count = 0
if param is None:
param = self.params
for term in check:
if term in param:
count += 1
return count
def _check_mutually_exclusive(self, spec, param=None):
if spec is None:
return
for check in spec:
count = self._count_terms(check, param)
if count > 1:
msg = "parameters are mutually exclusive: %s" % ', '.join(check)
if self._options_context:
msg += " found in %s" % " -> ".join(self._options_context)
self.fail_json(msg=msg)
def _check_required_one_of(self, spec, param=None):
if spec is None:
return
for check in spec:
count = self._count_terms(check, param)
if count == 0:
msg = "one of the following is required: %s" % ', '.join(check)
if self._options_context:
msg += " found in %s" % " -> ".join(self._options_context)
self.fail_json(msg=msg)
def _check_required_together(self, spec, param=None):
if spec is None:
return
for check in spec:
counts = [self._count_terms([field], param) for field in check]
non_zero = [c for c in counts if c > 0]
if len(non_zero) > 0:
if 0 in counts:
msg = "parameters are required together: %s" % ', '.join(check)
if self._options_context:
msg += " found in %s" % " -> ".join(self._options_context)
self.fail_json(msg=msg)
def _check_required_arguments(self, spec=None, param=None):
''' ensure all required arguments are present '''
missing = []
if spec is None:
spec = self.argument_spec
if param is None:
param = self.params
for (k, v) in spec.items():
required = v.get('required', False)
if required and k not in param:
missing.append(k)
if len(missing) > 0:
msg = "missing required arguments: %s" % ", ".join(missing)
if self._options_context:
msg += " found in %s" % " -> ".join(self._options_context)
self.fail_json(msg=msg)
def _check_required_if(self, spec, param=None):
''' ensure that parameters which conditionally required are present '''
if spec is None:
return
if param is None:
param = self.params
for sp in spec:
missing = []
max_missing_count = 0
is_one_of = False
if len(sp) == 4:
key, val, requirements, is_one_of = sp
else:
key, val, requirements = sp
# is_one_of is True at least one requirement should be
# present, else all requirements should be present.
if is_one_of:
max_missing_count = len(requirements)
term = 'any'
else:
term = 'all'
if key in param and param[key] == val:
for check in requirements:
count = self._count_terms((check,), param)
if count == 0:
missing.append(check)
if len(missing) and len(missing) >= max_missing_count:
msg = "%s is %s but %s of the following are missing: %s" % (key, val, term, ', '.join(missing))
if self._options_context:
msg += " found in %s" % " -> ".join(self._options_context)
self.fail_json(msg=msg)
def _check_argument_values(self, spec=None, param=None):
''' ensure all arguments have the requested values, and there are no stray arguments '''
if spec is None:
spec = self.argument_spec
if param is None:
param = self.params
for (k, v) in spec.items():
choices = v.get('choices', None)
if choices is None:
continue
if isinstance(choices, SEQUENCETYPE) and not isinstance(choices, (binary_type, text_type)):
if k in param:
# Allow one or more when type='list' param with choices
if isinstance(param[k], list):
diff_list = ", ".join([item for item in param[k] if item not in choices])
if diff_list:
choices_str = ", ".join([to_native(c) for c in choices])
msg = "value of %s must be one or more of: %s. Got no match for: %s" % (k, choices_str, diff_list)
if self._options_context:
msg += " found in %s" % " -> ".join(self._options_context)
self.fail_json(msg=msg)
elif param[k] not in choices:
# PyYaml converts certain strings to bools. If we can unambiguously convert back, do so before checking
# the value. If we can't figure this out, module author is responsible.
lowered_choices = None
if param[k] == 'False':
lowered_choices = _lenient_lowercase(choices)
overlap = BOOLEANS_FALSE.intersection(choices)
if len(overlap) == 1:
# Extract from a set
(param[k],) = overlap
if param[k] == 'True':
if lowered_choices is None:
lowered_choices = _lenient_lowercase(choices)
overlap = BOOLEANS_TRUE.intersection(choices)
if len(overlap) == 1:
(param[k],) = overlap
if param[k] not in choices:
choices_str = ", ".join([to_native(c) for c in choices])
msg = "value of %s must be one of: %s, got: %s" % (k, choices_str, param[k])
if self._options_context:
msg += " found in %s" % " -> ".join(self._options_context)
self.fail_json(msg=msg)
else:
msg = "internal error: choices for argument %s are not iterable: %s" % (k, choices)
if self._options_context:
msg += " found in %s" % " -> ".join(self._options_context)
self.fail_json(msg=msg)
def safe_eval(self, value, locals=None, include_exceptions=False):
# do not allow method calls to modules
if not isinstance(value, string_types):
# already templated to a datavaluestructure, perhaps?
if include_exceptions:
return (value, None)
return value
if re.search(r'\w\.\w+\(', value):
if include_exceptions:
return (value, None)
return value
# do not allow imports
if re.search(r'import \w+', value):
if include_exceptions:
return (value, None)
return value
try:
result = literal_eval(value)
if include_exceptions:
return (result, None)
else:
return result
except Exception as e:
if include_exceptions:
return (value, e)
return value
def _check_type_str(self, value):
if isinstance(value, string_types):
return value
# Note: This could throw a unicode error if value's __str__() method
# returns non-ascii. Have to port utils.to_bytes() if that happens
return str(value)
def _check_type_list(self, value):
if isinstance(value, list):
return value
if isinstance(value, string_types):
return value.split(",")
elif isinstance(value, int) or isinstance(value, float):
return [str(value)]
raise TypeError('%s cannot be converted to a list' % type(value))
def _check_type_dict(self, value):
if isinstance(value, dict):
return value
if isinstance(value, string_types):
if value.startswith("{"):
try:
return json.loads(value)
except:
(result, exc) = self.safe_eval(value, dict(), include_exceptions=True)
if exc is not None:
raise TypeError('unable to evaluate string as dictionary')
return result
elif '=' in value:
fields = []
field_buffer = []
in_quote = False
in_escape = False
for c in value.strip():
if in_escape:
field_buffer.append(c)
in_escape = False
elif c == '\\':
in_escape = True
elif not in_quote and c in ('\'', '"'):
in_quote = c
elif in_quote and in_quote == c:
in_quote = False
elif not in_quote and c in (',', ' '):
field = ''.join(field_buffer)
if field:
fields.append(field)
field_buffer = []
else:
field_buffer.append(c)
field = ''.join(field_buffer)
if field:
fields.append(field)
return dict(x.split("=", 1) for x in fields)
else:
raise TypeError("dictionary requested, could not parse JSON or key=value")
raise TypeError('%s cannot be converted to a dict' % type(value))
def _check_type_bool(self, value):
if isinstance(value, bool):
return value
if isinstance(value, string_types) or isinstance(value, int):
return self.boolean(value)
raise TypeError('%s cannot be converted to a bool' % type(value))
def _check_type_int(self, value):
if isinstance(value, int):
return value
if isinstance(value, string_types):
return int(value)
raise TypeError('%s cannot be converted to an int' % type(value))
def _check_type_float(self, value):
if isinstance(value, float):
return value
if isinstance(value, (binary_type, text_type, int)):
return float(value)
raise TypeError('%s cannot be converted to a float' % type(value))
def _check_type_path(self, value):
value = self._check_type_str(value)
return os.path.expanduser(os.path.expandvars(value))
def _check_type_jsonarg(self, value):
# Return a jsonified string. Sometimes the controller turns a json
# string into a dict/list so transform it back into json here
if isinstance(value, (text_type, binary_type)):
return value.strip()
else:
if isinstance(value, (list, tuple, dict)):
return self.jsonify(value)
raise TypeError('%s cannot be converted to a json string' % type(value))
def _check_type_raw(self, value):
return value
def _check_type_bytes(self, value):
try:
self.human_to_bytes(value)
except ValueError:
raise TypeError('%s cannot be converted to a Byte value' % type(value))
def _check_type_bits(self, value):
try:
self.human_to_bytes(value, isbits=True)
except ValueError:
raise TypeError('%s cannot be converted to a Bit value' % type(value))
def _handle_options(self, argument_spec=None, params=None):
''' deal with options to create sub spec '''
if argument_spec is None:
argument_spec = self.argument_spec
if params is None:
params = self.params
for (k, v) in argument_spec.items():
wanted = v.get('type', None)
if wanted == 'dict' or (wanted == 'list' and v.get('elements', '') == 'dict'):
spec = v.get('options', None)
if v.get('apply_defaults', False):
if spec is not None:
if params.get(k) is None:
params[k] = {}
else:
continue
elif spec is None or k not in params or params[k] is None:
continue
self._options_context.append(k)
if isinstance(params[k], dict):
elements = [params[k]]
else:
elements = params[k]
for param in elements:
if not isinstance(param, dict):
self.fail_json(msg="value of %s must be of type dict or list of dict" % k)
self._set_fallbacks(spec, param)
options_aliases = self._handle_aliases(spec, param)
self._handle_no_log_values(spec, param)
options_legal_inputs = list(spec.keys()) + list(options_aliases.keys())
self._check_arguments(self.check_invalid_arguments, spec, param, options_legal_inputs)
# check exclusive early
if not self.bypass_checks:
self._check_mutually_exclusive(v.get('mutually_exclusive', None), param)
self._set_defaults(pre=True, spec=spec, param=param)
if not self.bypass_checks:
self._check_required_arguments(spec, param)
self._check_argument_types(spec, param)
self._check_argument_values(spec, param)
self._check_required_together(v.get('required_together', None), param)
self._check_required_one_of(v.get('required_one_of', None), param)
self._check_required_if(v.get('required_if', None), param)
self._set_defaults(pre=False, spec=spec, param=param)
# handle multi level options (sub argspec)
self._handle_options(spec, param)
self._options_context.pop()
def _check_argument_types(self, spec=None, param=None):
''' ensure all arguments have the requested type '''
if spec is None:
spec = self.argument_spec
if param is None:
param = self.params
for (k, v) in spec.items():
wanted = v.get('type', None)
if k not in param:
continue
value = param[k]
if value is None:
continue
if not callable(wanted):
if wanted is None:
# Mostly we want to default to str.
# For values set to None explicitly, return None instead as
# that allows a user to unset a parameter
if param[k] is None:
continue
wanted = 'str'
try:
type_checker = self._CHECK_ARGUMENT_TYPES_DISPATCHER[wanted]
except KeyError:
self.fail_json(msg="implementation error: unknown type %s requested for %s" % (wanted, k))
else:
# set the type_checker to the callable, and reset wanted to the callable's name (or type if it doesn't have one, ala MagicMock)
type_checker = wanted
wanted = getattr(wanted, '__name__', to_native(type(wanted)))
try:
param[k] = type_checker(value)
except (TypeError, ValueError) as e:
self.fail_json(msg="argument %s is of type %s and we were unable to convert to %s: %s" %
(k, type(value), wanted, to_native(e)))
def _set_defaults(self, pre=True, spec=None, param=None):
if spec is None:
spec = self.argument_spec
if param is None:
param = self.params
for (k, v) in spec.items():
default = v.get('default', None)
if pre is True:
# this prevents setting defaults on required items
if default is not None and k not in param:
param[k] = default
else:
# make sure things without a default still get set None
if k not in param:
param[k] = default
def _set_fallbacks(self, spec=None, param=None):
if spec is None:
spec = self.argument_spec
if param is None:
param = self.params
for (k, v) in spec.items():
fallback = v.get('fallback', (None,))
fallback_strategy = fallback[0]
fallback_args = []
fallback_kwargs = {}
if k not in param and fallback_strategy is not None:
for item in fallback[1:]:
if isinstance(item, dict):
fallback_kwargs = item
else:
fallback_args = item
try:
param[k] = fallback_strategy(*fallback_args, **fallback_kwargs)
except AnsibleFallbackNotFound:
continue
def _load_params(self):
''' read the input and set the params attribute.
This method is for backwards compatibility. The guts of the function
were moved out in 2.1 so that custom modules could read the parameters.
'''
# debug overrides to read args from file or cmdline
self.params = _load_params()
def _log_to_syslog(self, msg):
if HAS_SYSLOG:
module = 'ansible-%s' % self._name
facility = getattr(syslog, self._syslog_facility, syslog.LOG_USER)
syslog.openlog(str(module), 0, facility)
syslog.syslog(syslog.LOG_INFO, msg)
def debug(self, msg):
if self._debug:
self.log('[debug] %s' % msg)
def log(self, msg, log_args=None):
if not self.no_log:
if log_args is None:
log_args = dict()
module = 'ansible-%s' % self._name
if isinstance(module, binary_type):
module = module.decode('utf-8', 'replace')
# 6655 - allow for accented characters
if not isinstance(msg, (binary_type, text_type)):
raise TypeError("msg should be a string (got %s)" % type(msg))
# We want journal to always take text type
# syslog takes bytes on py2, text type on py3
if isinstance(msg, binary_type):
journal_msg = remove_values(msg.decode('utf-8', 'replace'), self.no_log_values)
else:
# TODO: surrogateescape is a danger here on Py3
journal_msg = remove_values(msg, self.no_log_values)
if PY3:
syslog_msg = journal_msg
else:
syslog_msg = journal_msg.encode('utf-8', 'replace')
if has_journal:
journal_args = [("MODULE", os.path.basename(__file__))]
for arg in log_args:
journal_args.append((arg.upper(), str(log_args[arg])))
try:
journal.send(u"%s %s" % (module, journal_msg), **dict(journal_args))
except IOError:
# fall back to syslog since logging to journal failed
self._log_to_syslog(syslog_msg)
else:
self._log_to_syslog(syslog_msg)
def _log_invocation(self):
''' log that ansible ran the module '''
# TODO: generalize a separate log function and make log_invocation use it
# Sanitize possible password argument when logging.
log_args = dict()
for param in self.params:
canon = self.aliases.get(param, param)
arg_opts = self.argument_spec.get(canon, {})
no_log = arg_opts.get('no_log', False)
if self.boolean(no_log):
log_args[param] = 'NOT_LOGGING_PARAMETER'
# try to capture all passwords/passphrase named fields missed by no_log
elif PASSWORD_MATCH.search(param) and arg_opts.get('type', 'str') != 'bool' and not arg_opts.get('choices', False):
# skip boolean and enums as they are about 'password' state
log_args[param] = 'NOT_LOGGING_PASSWORD'
self.warn('Module did not set no_log for %s' % param)
else:
param_val = self.params[param]
if not isinstance(param_val, (text_type, binary_type)):
param_val = str(param_val)
elif isinstance(param_val, text_type):
param_val = param_val.encode('utf-8')
log_args[param] = heuristic_log_sanitize(param_val, self.no_log_values)
msg = ['%s=%s' % (to_native(arg), to_native(val)) for arg, val in log_args.items()]
if msg:
msg = 'Invoked with %s' % ' '.join(msg)
else:
msg = 'Invoked'
self.log(msg, log_args=log_args)
def _set_cwd(self):
try:
cwd = os.getcwd()
if not os.access(cwd, os.F_OK | os.R_OK):
raise Exception()
return cwd
except:
# we don't have access to the cwd, probably because of sudo.
# Try and move to a neutral location to prevent errors
for cwd in [self.tmpdir, os.path.expandvars('$HOME'), tempfile.gettempdir()]:
try:
if os.access(cwd, os.F_OK | os.R_OK):
os.chdir(cwd)
return cwd
except:
pass
# we won't error here, as it may *not* be a problem,
# and we don't want to break modules unnecessarily
return None
def get_bin_path(self, arg, required=False, opt_dirs=None):
'''
find system executable in PATH.
Optional arguments:
- required: if executable is not found and required is true, fail_json
- opt_dirs: optional list of directories to search in addition to PATH
if found return full path; otherwise return None
'''
opt_dirs = [] if opt_dirs is None else opt_dirs
sbin_paths = ['/sbin', '/usr/sbin', '/usr/local/sbin']
paths = []
for d in opt_dirs:
if d is not None and os.path.exists(d):
paths.append(d)
paths += os.environ.get('PATH', '').split(os.pathsep)
bin_path = None
# mangle PATH to include /sbin dirs
for p in sbin_paths:
if p not in paths and os.path.exists(p):
paths.append(p)
for d in paths:
if not d:
continue
path = os.path.join(d, arg)
if os.path.exists(path) and not os.path.isdir(path) and is_executable(path):
bin_path = path
break
if required and bin_path is None:
self.fail_json(msg='Failed to find required executable %s in paths: %s' % (arg, os.pathsep.join(paths)))
return bin_path
def boolean(self, arg):
''' return a bool for the arg '''
if arg is None:
return arg
try:
return boolean(arg)
except TypeError as e:
self.fail_json(msg=to_native(e))
def jsonify(self, data):
try:
return jsonify(data)
except UnicodeError as e:
self.fail_json(msg=to_text(e))
def from_json(self, data):
return json.loads(data)
def add_cleanup_file(self, path):
if path not in self.cleanup_files:
self.cleanup_files.append(path)
def do_cleanup_files(self):
for path in self.cleanup_files:
self.cleanup(path)
def _return_formatted(self, kwargs):
self.add_path_info(kwargs)
if 'invocation' not in kwargs:
kwargs['invocation'] = {'module_args': self.params}
if 'warnings' in kwargs:
if isinstance(kwargs['warnings'], list):
for w in kwargs['warnings']:
self.warn(w)
else:
self.warn(kwargs['warnings'])
if self._warnings:
kwargs['warnings'] = self._warnings
if 'deprecations' in kwargs:
if isinstance(kwargs['deprecations'], list):
for d in kwargs['deprecations']:
if isinstance(d, SEQUENCETYPE) and len(d) == 2:
self.deprecate(d[0], version=d[1])
else:
self.deprecate(d)
else:
self.deprecate(kwargs['deprecations'])
if self._deprecations:
kwargs['deprecations'] = self._deprecations
kwargs = remove_values(kwargs, self.no_log_values)
print('\n%s' % self.jsonify(kwargs))
def exit_json(self, **kwargs):
''' return from the module, without error '''
self.do_cleanup_files()
self._return_formatted(kwargs)
sys.exit(0)
def fail_json(self, **kwargs):
''' return from the module, with an error message '''
if 'msg' not in kwargs:
raise AssertionError("implementation error -- msg to explain the error is required")
kwargs['failed'] = True
# add traceback if debug or high verbosity and it is missing
# Note: badly named as exception, it is really always been 'traceback'
if 'exception' not in kwargs and sys.exc_info()[2] and (self._debug or self._verbosity >= 3):
kwargs['exception'] = ''.join(traceback.format_tb(sys.exc_info()[2]))
self.do_cleanup_files()
self._return_formatted(kwargs)
sys.exit(1)
def fail_on_missing_params(self, required_params=None):
''' This is for checking for required params when we can not check via argspec because we
need more information than is simply given in the argspec.
'''
if not required_params:
return
missing_params = []
for required_param in required_params:
if not self.params.get(required_param):
missing_params.append(required_param)
if missing_params:
self.fail_json(msg="missing required arguments: %s" % ', '.join(missing_params))
def digest_from_file(self, filename, algorithm):
''' Return hex digest of local file for a digest_method specified by name, or None if file is not present. '''
if not os.path.exists(filename):
return None
if os.path.isdir(filename):
self.fail_json(msg="attempted to take checksum of directory: %s" % filename)
# preserve old behaviour where the third parameter was a hash algorithm object
if hasattr(algorithm, 'hexdigest'):
digest_method = algorithm
else:
try:
digest_method = AVAILABLE_HASH_ALGORITHMS[algorithm]()
except KeyError:
self.fail_json(msg="Could not hash file '%s' with algorithm '%s'. Available algorithms: %s" %
(filename, algorithm, ', '.join(AVAILABLE_HASH_ALGORITHMS)))
blocksize = 64 * 1024
infile = open(os.path.realpath(filename), 'rb')
block = infile.read(blocksize)
while block:
digest_method.update(block)
block = infile.read(blocksize)
infile.close()
return digest_method.hexdigest()
def md5(self, filename):
''' Return MD5 hex digest of local file using digest_from_file().
Do not use this function unless you have no other choice for:
1) Optional backwards compatibility
2) Compatibility with a third party protocol
This function will not work on systems complying with FIPS-140-2.
Most uses of this function can use the module.sha1 function instead.
'''
if 'md5' not in AVAILABLE_HASH_ALGORITHMS:
raise ValueError('MD5 not available. Possibly running in FIPS mode')
return self.digest_from_file(filename, 'md5')
def sha1(self, filename):
''' Return SHA1 hex digest of local file using digest_from_file(). '''
return self.digest_from_file(filename, 'sha1')
def sha256(self, filename):
''' Return SHA-256 hex digest of local file using digest_from_file(). '''
return self.digest_from_file(filename, 'sha256')
def backup_local(self, fn):
'''make a date-marked backup of the specified file, return True or False on success or failure'''
backupdest = ''
if os.path.exists(fn):
# backups named basename.PID.YYYY-MM-DD@HH:MM:SS~
ext = time.strftime("%Y-%m-%d@%H:%M:%S~", time.localtime(time.time()))
backupdest = '%s.%s.%s' % (fn, os.getpid(), ext)
try:
self.preserved_copy(fn, backupdest)
except (shutil.Error, IOError) as e:
self.fail_json(msg='Could not make backup of %s to %s: %s' % (fn, backupdest, to_native(e)))
return backupdest
def cleanup(self, tmpfile):
if os.path.exists(tmpfile):
try:
os.unlink(tmpfile)
except OSError as e:
sys.stderr.write("could not cleanup %s: %s" % (tmpfile, to_native(e)))
def preserved_copy(self, src, dest):
"""Copy a file with preserved ownership, permissions and context"""
# shutil.copy2(src, dst)
# Similar to shutil.copy(), but metadata is copied as well - in fact,
# this is just shutil.copy() followed by copystat(). This is similar
# to the Unix command cp -p.
#
# shutil.copystat(src, dst)
# Copy the permission bits, last access time, last modification time,
# and flags from src to dst. The file contents, owner, and group are
# unaffected. src and dst are path names given as strings.
shutil.copy2(src, dest)
# Set the context
if self.selinux_enabled():
context = self.selinux_context(src)
self.set_context_if_different(dest, context, False)
# chown it
try:
dest_stat = os.stat(src)
tmp_stat = os.stat(dest)
if dest_stat and (tmp_stat.st_uid != dest_stat.st_uid or tmp_stat.st_gid != dest_stat.st_gid):
os.chown(dest, dest_stat.st_uid, dest_stat.st_gid)
except OSError as e:
if e.errno != errno.EPERM:
raise
# Set the attributes
current_attribs = self.get_file_attributes(src)
current_attribs = current_attribs.get('attr_flags', '')
self.set_attributes_if_different(dest, current_attribs, True)
def atomic_move(self, src, dest, unsafe_writes=False):
'''atomically move src to dest, copying attributes from dest, returns true on success
it uses os.rename to ensure this as it is an atomic operation, rest of the function is
to work around limitations, corner cases and ensure selinux context is saved if possible'''
context = None
dest_stat = None
b_src = to_bytes(src, errors='surrogate_or_strict')
b_dest = to_bytes(dest, errors='surrogate_or_strict')
if os.path.exists(b_dest):
try:
dest_stat = os.stat(b_dest)
# copy mode and ownership
os.chmod(b_src, dest_stat.st_mode & PERM_BITS)
os.chown(b_src, dest_stat.st_uid, dest_stat.st_gid)
# try to copy flags if possible
if hasattr(os, 'chflags') and hasattr(dest_stat, 'st_flags'):
try:
os.chflags(b_src, dest_stat.st_flags)
except OSError as e:
for err in 'EOPNOTSUPP', 'ENOTSUP':
if hasattr(errno, err) and e.errno == getattr(errno, err):
break
else:
raise
except OSError as e:
if e.errno != errno.EPERM:
raise
if self.selinux_enabled():
context = self.selinux_context(dest)
else:
if self.selinux_enabled():
context = self.selinux_default_context(dest)
creating = not os.path.exists(b_dest)
try:
# Optimistically try a rename, solves some corner cases and can avoid useless work, throws exception if not atomic.
os.rename(b_src, b_dest)
except (IOError, OSError) as e:
if e.errno not in [errno.EPERM, errno.EXDEV, errno.EACCES, errno.ETXTBSY, errno.EBUSY]:
# only try workarounds for errno 18 (cross device), 1 (not permitted), 13 (permission denied)
# and 26 (text file busy) which happens on vagrant synced folders and other 'exotic' non posix file systems
self.fail_json(msg='Could not replace file: %s to %s: %s' % (src, dest, to_native(e)),
exception=traceback.format_exc())
else:
# Use bytes here. In the shippable CI, this fails with
# a UnicodeError with surrogateescape'd strings for an unknown
# reason (doesn't happen in a local Ubuntu16.04 VM)
b_dest_dir = os.path.dirname(b_dest)
b_suffix = os.path.basename(b_dest)
error_msg = None
tmp_dest_name = None
try:
tmp_dest_fd, tmp_dest_name = tempfile.mkstemp(prefix=b'.ansible_tmp',
dir=b_dest_dir, suffix=b_suffix)
except (OSError, IOError) as e:
error_msg = 'The destination directory (%s) is not writable by the current user. Error was: %s' % (os.path.dirname(dest), to_native(e))
except TypeError:
# We expect that this is happening because python3.4.x and
# below can't handle byte strings in mkstemp(). Traceback
# would end in something like:
# file = _os.path.join(dir, pre + name + suf)
# TypeError: can't concat bytes to str
error_msg = ('Failed creating tmp file for atomic move. This usually happens when using Python3 less than Python3.5. '
'Please use Python2.x or Python3.5 or greater.')
finally:
if error_msg:
if unsafe_writes:
self._unsafe_writes(b_src, b_dest)
else:
self.fail_json(msg=error_msg, exception=traceback.format_exc())
if tmp_dest_name:
b_tmp_dest_name = to_bytes(tmp_dest_name, errors='surrogate_or_strict')
try:
try:
# close tmp file handle before file operations to prevent text file busy errors on vboxfs synced folders (windows host)
os.close(tmp_dest_fd)
# leaves tmp file behind when sudo and not root
try:
shutil.move(b_src, b_tmp_dest_name)
except OSError:
# cleanup will happen by 'rm' of tmpdir
# copy2 will preserve some metadata
shutil.copy2(b_src, b_tmp_dest_name)
if self.selinux_enabled():
self.set_context_if_different(
b_tmp_dest_name, context, False)
try:
tmp_stat = os.stat(b_tmp_dest_name)
if dest_stat and (tmp_stat.st_uid != dest_stat.st_uid or tmp_stat.st_gid != dest_stat.st_gid):
os.chown(b_tmp_dest_name, dest_stat.st_uid, dest_stat.st_gid)
except OSError as e:
if e.errno != errno.EPERM:
raise
try:
os.rename(b_tmp_dest_name, b_dest)
except (shutil.Error, OSError, IOError) as e:
if unsafe_writes and e.errno == errno.EBUSY:
self._unsafe_writes(b_tmp_dest_name, b_dest)
else:
self.fail_json(msg='Unable to rename file: %s to %s: %s' % (src, dest, to_native(e)),
exception=traceback.format_exc())
except (shutil.Error, OSError, IOError) as e:
self.fail_json(msg='Failed to replace file: %s to %s: %s' % (src, dest, to_native(e)),
exception=traceback.format_exc())
finally:
self.cleanup(b_tmp_dest_name)
if creating:
# make sure the file has the correct permissions
# based on the current value of umask
umask = os.umask(0)
os.umask(umask)
os.chmod(b_dest, DEFAULT_PERM & ~umask)
try:
os.chown(b_dest, os.geteuid(), os.getegid())
except OSError:
# We're okay with trying our best here. If the user is not
# root (or old Unices) they won't be able to chown.
pass
if self.selinux_enabled():
# rename might not preserve context
self.set_context_if_different(dest, context, False)
def _unsafe_writes(self, src, dest):
# sadly there are some situations where we cannot ensure atomicity, but only if
# the user insists and we get the appropriate error we update the file unsafely
try:
out_dest = in_src = None
try:
out_dest = open(dest, 'wb')
in_src = open(src, 'rb')
shutil.copyfileobj(in_src, out_dest)
finally: # assuring closed files in 2.4 compatible way
if out_dest:
out_dest.close()
if in_src:
in_src.close()
except (shutil.Error, OSError, IOError) as e:
self.fail_json(msg='Could not write data to file (%s) from (%s): %s' % (dest, src, to_native(e)),
exception=traceback.format_exc())
def _read_from_pipes(self, rpipes, rfds, file_descriptor):
data = b('')
if file_descriptor in rfds:
data = os.read(file_descriptor.fileno(), 9000)
if data == b(''):
rpipes.remove(file_descriptor)
return data
def _clean_args(self, args):
if not self._clean:
# create a printable version of the command for use in reporting later,
# which strips out things like passwords from the args list
to_clean_args = args
if PY2:
if isinstance(args, text_type):
to_clean_args = to_bytes(args)
else:
if isinstance(args, binary_type):
to_clean_args = to_text(args)
if isinstance(args, (text_type, binary_type)):
to_clean_args = shlex.split(to_clean_args)
clean_args = []
is_passwd = False
for arg in (to_native(a) for a in to_clean_args):
if is_passwd:
is_passwd = False
clean_args.append('********')
continue
if PASSWD_ARG_RE.match(arg):
sep_idx = arg.find('=')
if sep_idx > -1:
clean_args.append('%s=********' % arg[:sep_idx])
continue
else:
is_passwd = True
arg = heuristic_log_sanitize(arg, self.no_log_values)
clean_args.append(arg)
self._clean = ' '.join(shlex_quote(arg) for arg in clean_args)
return self._clean
def run_command(self, args, check_rc=False, close_fds=True, executable=None, data=None, binary_data=False, path_prefix=None, cwd=None,
use_unsafe_shell=False, prompt_regex=None, environ_update=None, umask=None, encoding='utf-8', errors='surrogate_or_strict'):
'''
Execute a command, returns rc, stdout, and stderr.
:arg args: is the command to run
* If args is a list, the command will be run with shell=False.
* If args is a string and use_unsafe_shell=False it will split args to a list and run with shell=False
* If args is a string and use_unsafe_shell=True it runs with shell=True.
:kw check_rc: Whether to call fail_json in case of non zero RC.
Default False
:kw close_fds: See documentation for subprocess.Popen(). Default True
:kw executable: See documentation for subprocess.Popen(). Default None
:kw data: If given, information to write to the stdin of the command
:kw binary_data: If False, append a newline to the data. Default False
:kw path_prefix: If given, additional path to find the command in.
This adds to the PATH environment vairable so helper commands in
the same directory can also be found
:kw cwd: If given, working directory to run the command inside
:kw use_unsafe_shell: See `args` parameter. Default False
:kw prompt_regex: Regex string (not a compiled regex) which can be
used to detect prompts in the stdout which would otherwise cause
the execution to hang (especially if no input data is specified)
:kw environ_update: dictionary to *update* os.environ with
:kw umask: Umask to be used when running the command. Default None
:kw encoding: Since we return native strings, on python3 we need to
know the encoding to use to transform from bytes to text. If you
want to always get bytes back, use encoding=None. The default is
"utf-8". This does not affect transformation of strings given as
args.
:kw errors: Since we return native strings, on python3 we need to
transform stdout and stderr from bytes to text. If the bytes are
undecodable in the ``encoding`` specified, then use this error
handler to deal with them. The default is ``surrogate_or_strict``
which means that the bytes will be decoded using the
surrogateescape error handler if available (available on all
python3 versions we support) otherwise a UnicodeError traceback
will be raised. This does not affect transformations of strings
given as args.
:returns: A 3-tuple of return code (integer), stdout (native string),
and stderr (native string). On python2, stdout and stderr are both
byte strings. On python3, stdout and stderr are text strings converted
according to the encoding and errors parameters. If you want byte
strings on python3, use encoding=None to turn decoding to text off.
'''
# used by clean args later on
self._clean = None
if not isinstance(args, (list, binary_type, text_type)):
msg = "Argument 'args' to run_command must be list or string"
self.fail_json(rc=257, cmd=args, msg=msg)
shell = False
if use_unsafe_shell:
# stringify args for unsafe/direct shell usage
if isinstance(args, list):
args = " ".join([shlex_quote(x) for x in args])
# not set explicitly, check if set by controller
if executable:
args = [executable, '-c', args]
elif self._shell not in (None, '/bin/sh'):
args = [self._shell, '-c', args]
else:
shell = True
else:
# ensure args are a list
if isinstance(args, (binary_type, text_type)):
# On python2.6 and below, shlex has problems with text type
# On python3, shlex needs a text type.
if PY2:
args = to_bytes(args, errors='surrogate_or_strict')
elif PY3:
args = to_text(args, errors='surrogateescape')
args = shlex.split(args)
# expand shellisms
args = [os.path.expanduser(os.path.expandvars(x)) for x in args if x is not None]
prompt_re = None
if prompt_regex:
if isinstance(prompt_regex, text_type):
if PY3:
prompt_regex = to_bytes(prompt_regex, errors='surrogateescape')
elif PY2:
prompt_regex = to_bytes(prompt_regex, errors='surrogate_or_strict')
try:
prompt_re = re.compile(prompt_regex, re.MULTILINE)
except re.error:
self.fail_json(msg="invalid prompt regular expression given to run_command")
rc = 0
msg = None
st_in = None
# Manipulate the environ we'll send to the new process
old_env_vals = {}
# We can set this from both an attribute and per call
for key, val in self.run_command_environ_update.items():
old_env_vals[key] = os.environ.get(key, None)
os.environ[key] = val
if environ_update:
for key, val in environ_update.items():
old_env_vals[key] = os.environ.get(key, None)
os.environ[key] = val
if path_prefix:
old_env_vals['PATH'] = os.environ['PATH']
os.environ['PATH'] = "%s:%s" % (path_prefix, os.environ['PATH'])
# If using test-module and explode, the remote lib path will resemble ...
# /tmp/test_module_scratch/debug_dir/ansible/module_utils/basic.py
# If using ansible or ansible-playbook with a remote system ...
# /tmp/ansible_vmweLQ/ansible_modlib.zip/ansible/module_utils/basic.py
# Clean out python paths set by ansiballz
if 'PYTHONPATH' in os.environ:
pypaths = os.environ['PYTHONPATH'].split(':')
pypaths = [x for x in pypaths
if not x.endswith('/ansible_modlib.zip') and
not x.endswith('/debug_dir')]
os.environ['PYTHONPATH'] = ':'.join(pypaths)
if not os.environ['PYTHONPATH']:
del os.environ['PYTHONPATH']
if data:
st_in = subprocess.PIPE
kwargs = dict(
executable=executable,
shell=shell,
close_fds=close_fds,
stdin=st_in,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
# store the pwd
prev_dir = os.getcwd()
# make sure we're in the right working directory
if cwd and os.path.isdir(cwd):
cwd = os.path.abspath(os.path.expanduser(cwd))
kwargs['cwd'] = cwd
try:
os.chdir(cwd)
except (OSError, IOError) as e:
self.fail_json(rc=e.errno, msg="Could not open %s, %s" % (cwd, to_native(e)),
exception=traceback.format_exc())
old_umask = None
if umask:
old_umask = os.umask(umask)
try:
if self._debug:
self.log('Executing: ' + self._clean_args(args))
cmd = subprocess.Popen(args, **kwargs)
# the communication logic here is essentially taken from that
# of the _communicate() function in ssh.py
stdout = b('')
stderr = b('')
rpipes = [cmd.stdout, cmd.stderr]
if data:
if not binary_data:
data += '\n'
if isinstance(data, text_type):
data = to_bytes(data)
cmd.stdin.write(data)
cmd.stdin.close()
while True:
rfds, wfds, efds = select.select(rpipes, [], rpipes, 1)
stdout += self._read_from_pipes(rpipes, rfds, cmd.stdout)
stderr += self._read_from_pipes(rpipes, rfds, cmd.stderr)
# if we're checking for prompts, do it now
if prompt_re:
if prompt_re.search(stdout) and not data:
if encoding:
stdout = to_native(stdout, encoding=encoding, errors=errors)
else:
stdout = stdout
return (257, stdout, "A prompt was encountered while running a command, but no input data was specified")
# only break out if no pipes are left to read or
# the pipes are completely read and
# the process is terminated
if (not rpipes or not rfds) and cmd.poll() is not None:
break
# No pipes are left to read but process is not yet terminated
# Only then it is safe to wait for the process to be finished
# NOTE: Actually cmd.poll() is always None here if rpipes is empty
elif not rpipes and cmd.poll() is None:
cmd.wait()
# The process is terminated. Since no pipes to read from are
# left, there is no need to call select() again.
break
cmd.stdout.close()
cmd.stderr.close()
rc = cmd.returncode
except (OSError, IOError) as e:
self.log("Error Executing CMD:%s Exception:%s" % (self._clean_args(args), to_native(e)))
self.fail_json(rc=e.errno, msg=to_native(e), cmd=self._clean_args(args))
except Exception as e:
self.log("Error Executing CMD:%s Exception:%s" % (self._clean_args(args), to_native(traceback.format_exc())))
self.fail_json(rc=257, msg=to_native(e), exception=traceback.format_exc(), cmd=self._clean_args(args))
# Restore env settings
for key, val in old_env_vals.items():
if val is None:
del os.environ[key]
else:
os.environ[key] = val
if old_umask:
os.umask(old_umask)
if rc != 0 and check_rc:
msg = heuristic_log_sanitize(stderr.rstrip(), self.no_log_values)
self.fail_json(cmd=self._clean_args(args), rc=rc, stdout=stdout, stderr=stderr, msg=msg)
# reset the pwd
os.chdir(prev_dir)
if encoding is not None:
return (rc, to_native(stdout, encoding=encoding, errors=errors),
to_native(stderr, encoding=encoding, errors=errors))
return (rc, stdout, stderr)
def append_to_file(self, filename, str):
filename = os.path.expandvars(os.path.expanduser(filename))
fh = open(filename, 'a')
fh.write(str)
fh.close()
def bytes_to_human(self, size):
return bytes_to_human(size)
# for backwards compatibility
pretty_bytes = bytes_to_human
def human_to_bytes(self, number, isbits=False):
return human_to_bytes(number, isbits)
#
# Backwards compat
#
# In 2.0, moved from inside the module to the toplevel
is_executable = is_executable
def get_module_path():
return os.path.dirname(os.path.realpath(__file__))
|
gpl-3.0
| 449,926,905,707,652,200
| 38.615542
| 155
| 0.553841
| false
| 4.173501
| false
| false
| false
|
silverfield/pythonsessions
|
s12_chat/chat_client.py
|
1
|
3462
|
# ---------------------------------------------------------------
# Imports
# ---------------------------------------------------------------
import sys
import socket
import select
import time
import os
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from s12_chat import chat_settings
# ---------------------------------------------------------------
# Class
# ---------------------------------------------------------------
class ChatClient:
"""Simple implementation of a chat client"""
# ---------------------------------------------------------------
# Initialisation
# ---------------------------------------------------------------
def __init__(self, nick, server_hostname, server_port=chat_settings.SERVER_PORT):
self._server_hostname = server_hostname
self._server_port = server_port
self._nick = nick
# set up client socket
self._client_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._client_sock.settimeout(2) # put to timeout mode
try:
self._client_sock.connect((self._server_hostname, self._server_port))
except ConnectionRefusedError:
print("Server probably not running at {}:{}".format(server_hostname, server_port))
exit(0)
self._client_sock.send(self._nick.encode())
print("Chat server on " + str(self._client_sock.getpeername()))
print("You are on " + str(self._client_sock.getsockname()))
# ---------------------------------------------------------------
# Interface
# ---------------------------------------------------------------
def start_chatting(self):
print("Hi " + str(self._nick) + "! You're connected to the chat server. You can start sending messages")
self.__prompt()
socket_list = [sys.stdin, self._client_sock]
while True:
time.sleep(0.01)
# get the list sockets which are readable
r_sockets, _, _ = select.select(socket_list, [], [])
for sock in r_sockets:
if sock == self._client_sock: # incoming message from server
data = sock.recv(chat_settings.BUFFER_SIZE).decode()
if not data:
print("Server shut down. Terminating...")
exit(0)
print()
print(data)
self.__prompt()
else: # user entered a message
msg = sys.stdin.readline()
self._client_sock.send(msg.encode())
self.__prompt()
# ---------------------------------------------------------------
# Implementation
# ---------------------------------------------------------------
def __prompt(self):
sys.stdout.write("[" + self._nick + "] ")
sys.stdout.flush()
# ---------------------------------------------------------------
# Main
# ---------------------------------------------------------------
def main(argv):
if len(argv) < 2:
print("Provide arguments: nick server_hostname [server_port]")
exit(1)
nick = argv[0]
server_hostname = argv[1]
server_port = chat_settings.SERVER_PORT
if len(argv) >= 3:
server_port = int(argv[2])
client = ChatClient(nick, server_hostname, server_port)
client.start_chatting()
if __name__ == '__main__':
main(sys.argv[1:])
|
mit
| 9,178,664,240,059,496,000
| 33.63
| 112
| 0.431831
| false
| 4.917614
| false
| false
| false
|
openstack/neutron-lib
|
neutron_lib/api/validators/availability_zone.py
|
1
|
1626
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_serialization import jsonutils
from neutron_lib._i18n import _
from neutron_lib.api import validators
from neutron_lib.db import constants as db_const
from neutron_lib import exceptions
def convert_az_list_to_string(az_list):
"""Convert a list of availability zones into a string.
:param az_list: A list of AZs.
:returns: The az_list in string format.
"""
return jsonutils.dumps(az_list)
def convert_az_string_to_list(az_string):
"""Convert an AZ list in string format into a python list.
:param az_string: The AZ list in string format.
:returns: The python list of AZs build from az_string.
"""
return jsonutils.loads(az_string) if az_string else []
def _validate_availability_zone_hints(data, valid_value=None):
msg = validators.validate_list_of_unique_strings(data)
if msg:
return msg
az_string = convert_az_list_to_string(data)
if len(az_string) > db_const.AZ_HINTS_DB_LEN:
msg = _("Too many availability_zone_hints specified")
raise exceptions.InvalidInput(error_message=msg)
|
apache-2.0
| -3,779,166,133,405,238,300
| 33.595745
| 69
| 0.725092
| false
| 3.712329
| false
| false
| false
|
ngageoint/scale
|
scale/metrics/models.py
|
1
|
49093
|
"""Defines the database models for various system metrics."""
from __future__ import unicode_literals
import datetime
import logging
import sys
import django.contrib.gis.db.models as models
import django.utils.timezone as timezone
from django.db import transaction
from error.models import Error
from job.models import Job, JobExecutionEnd, JobType
from ingest.models import Ingest, Strike
from metrics.registry import MetricsPlotData, MetricsType, MetricsTypeGroup, MetricsTypeFilter
logger = logging.getLogger(__name__)
class PlotBigIntegerField(models.BigIntegerField):
"""Custom field used to indicate a model attribute can be used as a plot value.
:keyword verbose_name: The display name of the field.
:type verbose_name: string
:keyword name: The internal database name of the field.
:type name: string
:keyword aggregate: The math operation used to compute the value. Examples: avg, max, min, sum
:type aggregate: string
:keyword group: The base field name used to group together related values. For example, a field may have several
aggregate variations that all reference the same base attribute.
:type group: string
:keyword units: The mathematical units applied to the value. Examples: seconds, minutes, hours
:type units: string
"""
def __init__(self, verbose_name=None, name=None, aggregate=None, group=None, units=None, **kwargs):
self.aggregate = aggregate
self.group = group
self.units = units
super(PlotBigIntegerField, self).__init__(verbose_name, name, **kwargs)
class PlotIntegerField(models.IntegerField):
"""Custom field used to indicate a model attribute can be used as a plot value.
:keyword verbose_name: The display name of the field.
:type verbose_name: string
:keyword name: The internal database name of the field.
:type name: string
:keyword aggregate: The math operation used to compute the value. Examples: avg, max, min, sum
:type aggregate: string
:keyword group: The base field name used to group together related values. For example, a field may have several
aggregate variations that all reference the same base attribute.
:type group: string
:keyword units: The mathematical units applied to the value. Examples: seconds, minutes, hours
:type units: string
"""
def __init__(self, verbose_name=None, name=None, aggregate=None, group=None, units=None, **kwargs):
self.aggregate = aggregate
self.group = group
self.units = units
super(PlotIntegerField, self).__init__(verbose_name, name, **kwargs)
PLOT_FIELD_TYPES = [PlotBigIntegerField, PlotIntegerField]
class MetricsErrorManager(models.Manager):
"""Provides additional methods for computing daily error metrics."""
def calculate(self, date):
"""See :meth:`metrics.registry.MetricsTypeProvider.calculate`."""
started = datetime.datetime.combine(date, datetime.time.min).replace(tzinfo=timezone.utc)
ended = datetime.datetime.combine(date, datetime.time.max).replace(tzinfo=timezone.utc)
# Fetch all the job executions with an error for the requested day
job_exe_ends = JobExecutionEnd.objects.filter(error__is_builtin=True, ended__gte=started, ended__lte=ended)
job_exe_ends = job_exe_ends.select_related('error')
# Calculate the overall counts based on job status
entry_map = {}
for job_exe_end in job_exe_ends.iterator():
occurred_datetime = job_exe_end.ended if job_exe_end.ended else date
entry_date_time = datetime.datetime(occurred_datetime.year, occurred_datetime.month, occurred_datetime.day,
occurred_datetime.hour, tzinfo=timezone.utc)
if job_exe_end.error not in entry_map:
entry_map[job_exe_end.error] = {}
if entry_date_time not in entry_map[job_exe_end.error]:
entry = MetricsError(error=job_exe_end.error, occurred=entry_date_time, created=timezone.now())
entry.total_count = 0
entry_map[job_exe_end.error][entry_date_time] = entry
entry = entry_map[job_exe_end.error][entry_date_time]
entry.total_count += 1
# Save the new metrics to the database
for entry in entry_map:
for entry_time in entry_map[entry]:
self._replace_entries(entry_time, entry, [entry_map[entry][entry_time]])
def get_metrics_type(self, include_choices=False):
"""See :meth:`metrics.registry.MetricsTypeProvider.get_metrics_type`."""
# Create the metrics type definition
metrics_type = MetricsType('errors', 'Errors', 'Metrics for jobs grouped by errors.')
metrics_type.filters = [MetricsTypeFilter('name', 'string'), MetricsTypeFilter('category', 'string')]
metrics_type.groups = MetricsError.GROUPS
metrics_type.set_columns(MetricsError, PLOT_FIELD_TYPES)
# Optionally include all the possible error choices
if include_choices:
metrics_type.choices = Error.objects.filter(is_builtin=True)
return metrics_type
def get_plot_data(self, started=None, ended=None, choice_ids=None, columns=None):
"""See :meth:`metrics.registry.MetricsTypeProvider.get_plot_data`."""
# Fetch all the matching job type metrics based on query filters
entries = MetricsError.objects.all().order_by('occurred')
if started:
entries = entries.filter(occurred__gte=started)
if ended:
entries = entries.filter(occurred__lte=ended)
if choice_ids:
entries = entries.filter(error_id__in=choice_ids)
if not columns:
columns = self.get_metrics_type().columns
column_names = [c.name for c in columns]
entries = entries.values('error_id', 'occurred', *column_names)
# Convert the database models to plot models
return MetricsPlotData.create(entries, 'occurred', 'error_id', choice_ids, columns)
@transaction.atomic
def _replace_entries(self, date, error, entries):
"""Replaces all the existing metric entries for the given date with new ones.
:param date: The date when job executions associated with the metrics ended.
:type date: datetime.date
:param entries: The new metrics model to save.
:type entries: list[:class:`metrics.models.MetricsError`]
"""
# Delete all the previous metrics entries
MetricsError.objects.filter(occurred=date, error=error).delete()
# Save all the new metrics models
MetricsError.objects.bulk_create(entries)
class MetricsError(models.Model):
"""Tracks all the error metrics grouped by error type.
:keyword error: The error type associated with these metrics.
:type error: :class:`django.db.models.ForeignKey`
:keyword occurred: The date when the errors included in this model were created.
:type occurred: :class:`django.db.models.DateField`
:keyword total_count: The total number of errors of this type that occurred for the day.
:type total_count: :class:`metrics.models.PlotBigIntegerField`
:keyword created: When the model was first created.
:type created: :class:`django.db.models.DateTimeField`
"""
GROUPS = [
MetricsTypeGroup('overview', 'Overview', 'Overall counts based on error type.'),
]
error = models.ForeignKey('error.Error', on_delete=models.PROTECT)
occurred = models.DateTimeField(db_index=True)
total_count = PlotBigIntegerField(aggregate='sum', blank=True, group='overview',
help_text='Number of jobs that failed with a particular error type.', null=True,
units='count', verbose_name='Total Count')
created = models.DateTimeField(auto_now_add=True)
objects = MetricsErrorManager()
class Meta(object):
"""meta information for the db"""
db_table = 'metrics_error'
class MetricsIngestManager(models.Manager):
"""Provides additional methods for computing daily ingest metrics."""
def calculate(self, date):
"""See :meth:`metrics.registry.MetricsTypeProvider.calculate`."""
started = datetime.datetime.combine(date, datetime.time.min).replace(tzinfo=timezone.utc)
ended = datetime.datetime.combine(date, datetime.time.max).replace(tzinfo=timezone.utc)
# Fetch all the ingests relevant for metrics
ingests = Ingest.objects.filter(status__in=['DEFERRED', 'INGESTED', 'ERRORED', 'DUPLICATE'],
ingest_ended__gte=started, ingest_ended__lte=ended, strike__isnull=False)
ingests = ingests.select_related('strike').defer('strike__configuration')
# Calculate the overall counts based on ingest status
entry_map = {}
for ingest in ingests.iterator():
occurred_datetime = ingest.ingest_ended if ingest.ingest_ended else date
entry_datetime = datetime.datetime(occurred_datetime.year, occurred_datetime.month, occurred_datetime.day,
occurred_datetime.hour, tzinfo=timezone.utc)
if ingest.strike not in entry_map:
entry_map[ingest.strike] = {}
if entry_datetime not in entry_map[ingest.strike]:
entry = MetricsIngest(strike=ingest.strike, occurred=entry_datetime, created=timezone.now())
entry.deferred_count = 0
entry.ingested_count = 0
entry.errored_count = 0
entry.duplicate_count = 0
entry.total_count = 0
entry_map[ingest.strike][entry_datetime] = entry
entry = entry_map[ingest.strike][entry_datetime]
self._update_metrics(entry_datetime, ingest, entry)
# Save the new metrics to the database
for entry in entry_map:
for entry_time in entry_map[entry]:
self._replace_entries(entry_time, entry, [entry_map[entry][entry_time]])
def get_metrics_type(self, include_choices=False):
"""See :meth:`metrics.registry.MetricsTypeProvider.get_metrics_type`."""
# Create the metrics type definition
metrics_type = MetricsType('ingests', 'Ingests', 'Metrics for ingests grouped by strike process.')
metrics_type.filters = [MetricsTypeFilter('name', 'string')]
metrics_type.groups = MetricsIngest.GROUPS
metrics_type.set_columns(MetricsIngest, PLOT_FIELD_TYPES)
# Optionally include all the possible strike choices
if include_choices:
metrics_type.choices = Strike.objects.all()
return metrics_type
def get_plot_data(self, started=None, ended=None, choice_ids=None, columns=None):
"""See :meth:`metrics.registry.MetricsTypeProvider.get_plot_data`."""
# Fetch all the matching ingest metrics based on query filters
entries = MetricsIngest.objects.all().order_by('occurred')
if started:
entries = entries.filter(occurred__gte=started)
if ended:
entries = entries.filter(occurred__lte=ended)
if choice_ids:
entries = entries.filter(strike_id__in=choice_ids)
if not columns:
columns = self.get_metrics_type().columns
column_names = [c.name for c in columns]
entries = entries.values('strike_id', 'occurred', *column_names)
# Convert the database models to plot models
return MetricsPlotData.create(entries, 'occurred', 'strike_id', choice_ids, columns)
def _update_metrics(self, date, ingest, entry):
"""Updates the metrics model attributes for a single ingest.
:param date: The date when ingests associated with the metrics ended.
:type date: datetime.date
:param ingest: The ingest from which to derive statistics.
:type ingest: :class:`ingest.models.Ingest`
:param entry: The metrics model to update.
:type entry: :class:`metrics.models.MetricsIngest`
"""
if ingest.status == 'DEFERRED':
entry.deferred_count += 1
entry.total_count += 1
elif ingest.status == 'INGESTED':
entry.ingested_count += 1
entry.total_count += 1
elif ingest.status == 'ERRORED':
entry.errored_count += 1
entry.total_count += 1
elif ingest.status == 'DUPLICATE':
entry.duplicate_count += 1
entry.total_count += 1
# Update file size metrics
if ingest.file_size:
entry._file_count = (entry._file_count if hasattr(entry, '_file_count') else 0) + 1
entry.file_size_sum = (entry.file_size_sum or 0) + ingest.file_size
entry.file_size_min = min(entry.file_size_min or sys.maxint, ingest.file_size)
entry.file_size_max = max(entry.file_size_max or 0, ingest.file_size)
entry.file_size_avg = entry.file_size_sum / entry._file_count
# Update elapsed transfer time metrics
if ingest.transfer_started and ingest.transfer_ended:
transfer_secs = max((ingest.transfer_ended - ingest.transfer_started).total_seconds(), 0)
entry._transfer_count = (entry._transfer_count if hasattr(entry, '_transfer_count') else 0) + 1
entry.transfer_time_sum = (entry.transfer_time_sum or 0) + transfer_secs
entry.transfer_time_min = min(entry.transfer_time_min or sys.maxint, transfer_secs)
entry.transfer_time_max = max(entry.transfer_time_max or 0, transfer_secs)
entry.transfer_time_avg = entry.transfer_time_sum / entry._transfer_count
# Update elapsed ingest time metrics
if ingest.status == 'INGESTED' and ingest.ingest_started and ingest.ingest_ended:
ingest_secs = max((ingest.ingest_ended - ingest.ingest_started).total_seconds(), 0)
entry._ingest_count = (entry._ingest_count if hasattr(entry, '_ingest_count') else 0) + 1
entry.ingest_time_sum = (entry.ingest_time_sum or 0) + ingest_secs
entry.ingest_time_min = min(entry.ingest_time_min or sys.maxint, ingest_secs)
entry.ingest_time_max = max(entry.ingest_time_max or 0, ingest_secs)
entry.ingest_time_avg = entry.ingest_time_sum / entry._ingest_count
return entry
@transaction.atomic
def _replace_entries(self, date, strike, entries):
"""Replaces all the existing metric entries for the given date with new ones.
:param date: The date when ingests associated with the metrics ended.
:type date: datetime.date
:param entries: The new metrics model to save.
:type entries: list[:class:`metrics.models.MetricsIngest`]
"""
# Delete all the previous metrics entries
MetricsIngest.objects.filter(occurred=date, strike=strike).delete()
# Save all the new metrics models
MetricsIngest.objects.bulk_create(entries)
class MetricsIngest(models.Model):
"""Tracks all the ingest metrics grouped by strike process.
:keyword strike: The strike process associated with these metrics.
:type strike: :class:`django.db.models.ForeignKey`
:keyword occurred: The date when the ingests included in this model were ended.
:type occurred: :class:`django.db.models.DateField`
:keyword deferred_count: The total number of deferred ingests.
:type deferred_count: :class:`metrics.models.PlotBigIntegerField`
:keyword ingested_count: The total number of successfully completed ingests.
:type ingested_count: :class:`metrics.models.PlotBigIntegerField`
:keyword errored_count: The total number of failed ingests.
:type errored_count: :class:`metrics.models.PlotBigIntegerField`
:keyword duplicate_count: The total number of duplicated ingests.
:type duplicate_count: :class:`metrics.models.PlotBigIntegerField`
:keyword file_size_sum: The total size of ingested files in bytes.
:type file_size_sum: :class:`metrics.models.PlotBigIntegerField`
:keyword file_size_min: The minimum size of ingested files in bytes.
:type file_size_min: :class:`metrics.models.PlotBigIntegerField`
:keyword file_size_max: The maximum size of ingested files in bytes.
:type file_size_max: :class:`metrics.models.PlotBigIntegerField`
:keyword file_size_avg: The average size of ingested files in bytes.
:type file_size_avg: :class:`metrics.models.PlotBigIntegerField`
:keyword transfer_time_sum: The total time spent transferring ingested files in seconds.
:type transfer_time_sum: :class:`metrics.models.PlotBigIntegerField`
:keyword transfer_time_min: The minimum time spent transferring ingested files in seconds.
:type transfer_time_min: :class:`metrics.models.PlotIntegerField`
:keyword transfer_time_max: The maximum time spent transferring ingested files in seconds.
:type transfer_time_max: :class:`metrics.models.PlotIntegerField`
:keyword transfer_time_avg: The average time spent transferring ingested files in seconds.
:type transfer_time_avg: :class:`metrics.models.PlotIntegerField`
:keyword ingest_time_sum: The total time spent ingesting files in seconds.
:type ingest_time_sum: :class:`metrics.models.PlotBigIntegerField`
:keyword ingest_time_min: The minimum time spent ingesting files in seconds.
:type ingest_time_min: :class:`metrics.models.PlotIntegerField`
:keyword ingest_time_max: The maximum time spent ingesting files in seconds.
:type ingest_time_max: :class:`metrics.models.PlotIntegerField`
:keyword ingest_time_avg: The average time spent ingesting files in seconds.
:type ingest_time_avg: :class:`metrics.models.PlotIntegerField`
:keyword created: When the model was first created.
:type created: :class:`django.db.models.DateTimeField`
"""
GROUPS = [
MetricsTypeGroup('overview', 'Overview', 'Overall counts based on ingest status.'),
MetricsTypeGroup('file_size', 'File Size', 'Size information about ingested files.'),
MetricsTypeGroup('transfer_time', 'Transfer Time', 'When files were being transferred before ingest.'),
MetricsTypeGroup('ingest_time', 'Ingest Time', 'When files were processed during ingest.'),
]
strike = models.ForeignKey('ingest.Strike', on_delete=models.PROTECT)
occurred = models.DateTimeField(db_index=True)
deferred_count = PlotBigIntegerField(aggregate='sum', blank=True, group='overview',
help_text='Number of files deferred (ignored) by the ingest process.',
null=True, units='count', verbose_name='Deferred Count')
ingested_count = PlotBigIntegerField(aggregate='sum', blank=True, group='overview',
help_text='Number of files successfully ingested.', null=True, units='count',
verbose_name='Ingested Count')
errored_count = PlotBigIntegerField(aggregate='sum', blank=True, group='overview',
help_text='Number of files that failed to ingest.', null=True, units='count',
verbose_name='Errored Count')
duplicate_count = PlotBigIntegerField(aggregate='sum', blank=True, group='overview',
help_text='Number of files that were duplicates of previous ingests.',
null=True, units='count', verbose_name='Duplicate Count')
total_count = PlotBigIntegerField(aggregate='sum', blank=True, group='overview',
help_text='Number of deferred, ingested, errored, and duplicate ingests.',
null=True, units='count', verbose_name='Total Count')
file_size_sum = PlotBigIntegerField(aggregate='sum', blank=True, group='file_size',
help_text='Total size of ingested files.', null=True, units='bytes',
verbose_name='File Size (Sum)')
file_size_min = PlotBigIntegerField(aggregate='min', blank=True, group='file_size',
help_text='Minimum size of ingested files.', null=True, units='bytes',
verbose_name='File Size (Min)')
file_size_max = PlotBigIntegerField(aggregate='max', blank=True, group='file_size',
help_text='Maximum size of ingested files.',
null=True, units='bytes', verbose_name='File Size (Max)')
file_size_avg = PlotBigIntegerField(aggregate='avg', blank=True, group='file_size',
help_text='Average size of ingested files.', null=True,
units='bytes', verbose_name='File Size (Avg)')
transfer_time_sum = PlotBigIntegerField(aggregate='sum', blank=True, group='transfer_time',
help_text='Total time spent transferring files before ingest.', null=True,
units='seconds', verbose_name='Transfer Time (Sum)')
transfer_time_min = PlotIntegerField(aggregate='min', blank=True, group='transfer_time',
help_text='Minimum time spent transferring files before ingest.', null=True,
units='seconds', verbose_name='Transfer Time (Min)')
transfer_time_max = PlotIntegerField(aggregate='max', blank=True, group='transfer_time',
help_text='Maximum time spent transferring files before ingest.', null=True,
units='seconds', verbose_name='Transfer Time (Max)')
transfer_time_avg = PlotIntegerField(aggregate='avg', blank=True, group='transfer_time',
help_text='Average time spent transferring files before ingest.',
null=True, units='seconds', verbose_name='Transfer Time (Avg)')
ingest_time_sum = PlotBigIntegerField(aggregate='sum', blank=True, group='ingest_time',
help_text='Total time spent processing files during ingest.',
null=True, units='seconds', verbose_name='Ingest Time (Sum)')
ingest_time_min = PlotIntegerField(aggregate='min', blank=True, group='ingest_time',
help_text='Minimum time spent processing files during ingest.',
null=True, units='seconds', verbose_name='Ingest Time (Min)')
ingest_time_max = PlotIntegerField(aggregate='max', blank=True, group='ingest_time',
help_text='Maximum time spent processing files during ingest.',
null=True, units='seconds', verbose_name='Ingest Time (Max)')
ingest_time_avg = PlotIntegerField(aggregate='avg', blank=True, group='ingest_time',
help_text='Average time spent processing files during ingest.',
null=True, units='seconds', verbose_name='Ingest Time (Avg)')
created = models.DateTimeField(auto_now_add=True)
objects = MetricsIngestManager()
class Meta(object):
"""meta information for the db"""
db_table = 'metrics_ingest'
class MetricsJobTypeManager(models.Manager):
"""Provides additional methods for computing daily job type metrics."""
def calculate(self, date):
"""See :meth:`metrics.registry.MetricsTypeProvider.calculate`."""
started = datetime.datetime.combine(date, datetime.time.min).replace(tzinfo=timezone.utc)
ended = datetime.datetime.combine(date, datetime.time.max).replace(tzinfo=timezone.utc)
# Fetch all the jobs relevant for metrics
jobs = Job.objects.filter(status__in=['CANCELED', 'COMPLETED', 'FAILED'], ended__gte=started, ended__lte=ended)
jobs = jobs.select_related('job_type', 'error').defer('input', 'output')
# Calculate the overall counts based on job status
entry_map = {}
for job in jobs.iterator():
occurred_datetime = job.ended if job.ended else date
entry_date_time = datetime.datetime(occurred_datetime.year, occurred_datetime.month, occurred_datetime.day,
occurred_datetime.hour, tzinfo=timezone.utc)
if job.job_type not in entry_map:
entry_map[job.job_type] = {}
if entry_date_time not in entry_map[job.job_type]:
entry = MetricsJobType(job_type=job.job_type, occurred=entry_date_time, created=timezone.now())
entry.completed_count = 0
entry.failed_count = 0
entry.canceled_count = 0
entry.total_count = 0
entry.error_system_count = 0
entry.error_data_count = 0
entry.error_algorithm_count = 0
entry_map[job.job_type][entry_date_time] = entry
entry = entry_map[job.job_type][entry_date_time]
self._update_counts(occurred_datetime, job, entry)
# Fetch all the completed job executions for the requested day
job_exe_ends = JobExecutionEnd.objects.filter(status__in=['COMPLETED'], ended__gte=started, ended__lte=ended)
job_exe_ends = job_exe_ends.select_related('job_type')
# Calculate the metrics per job execution grouped by job type
for job_exe_end in job_exe_ends.iterator():
entry = entry_map[job_exe_end.job.job_type]
for entry_time in entry:
self._update_times(entry_time, job_exe_end, entry[entry_time])
# Save the new metrics to the database
for entry in entry_map:
for entry_time in entry_map[entry]:
self._replace_entries(entry_time, entry, [entry_map[entry][entry_time]])
def get_metrics_type(self, include_choices=False):
"""See :meth:`metrics.registry.MetricsTypeProvider.get_metrics_type`."""
# Create the metrics type definition
metrics_type = MetricsType('job-types', 'Job Types', 'Metrics for jobs and executions grouped by job type.')
metrics_type.filters = [MetricsTypeFilter('name', 'string'), MetricsTypeFilter('version', 'string')]
metrics_type.groups = MetricsJobType.GROUPS
metrics_type.set_columns(MetricsJobType, PLOT_FIELD_TYPES)
# Optionally include all the possible job type choices
if include_choices:
metrics_type.choices = JobType.objects.all()
return metrics_type
def get_plot_data(self, started=None, ended=None, choice_ids=None, columns=None):
"""See :meth:`metrics.registry.MetricsTypeProvider.get_plot_data`."""
# Fetch all the matching job type metrics based on query filters
entries = MetricsJobType.objects.all().order_by('occurred')
if started:
entries = entries.filter(occurred__gte=started)
if ended:
entries = entries.filter(occurred__lte=ended)
if choice_ids:
entries = entries.filter(job_type_id__in=choice_ids)
if not columns:
columns = self.get_metrics_type().columns
column_names = [c.name for c in columns]
entries = entries.values('job_type_id', 'occurred', *column_names)
# Convert the database models to plot models
return MetricsPlotData.create(entries, 'occurred', 'job_type_id', choice_ids, columns)
def _update_counts(self, date, job, entry):
"""Updates the metrics model attributes for a single job.
:param date: The date when jobs associated with the metrics ended.
:type date: datetime.date
:param job: The job from which to derive statistics.
:type job: :class:`job.models.Job`
:param entry: The metrics model to update.
:type entry: :class:`metrics.models.MetricsJobType`
"""
if job.status == 'COMPLETED':
entry.completed_count += 1
entry.total_count += 1
elif job.status == 'FAILED':
entry.failed_count += 1
entry.total_count += 1
elif job.status == 'CANCELED':
entry.canceled_count += 1
entry.total_count += 1
if job.error:
if job.error.category == 'SYSTEM':
entry.error_system_count += 1
elif job.error.category == 'DATA':
entry.error_data_count += 1
elif job.error.category == 'ALGORITHM':
entry.error_algorithm_count += 1
def _update_times(self, date, job_exe_end, entry):
"""Updates the metrics model attributes for a single job execution.
:param date: The date when job executions associated with the metrics ended.
:type date: datetime.date
:param job_exe_end: The job execution from which to derive statistics.
:type job_exe_end: :class:`job.models.JobExecutionEnd`
:param entry: The metrics model to update.
:type entry: :class:`metrics.models.MetricsJobType`
"""
entry_count = entry.completed_count if entry.completed_count > 0 else entry.total_count
# Update elapsed queue time metrics
queue_secs = None
if job_exe_end.queued and job_exe_end.started:
queue_secs = max((job_exe_end.started - job_exe_end.queued).total_seconds(), 0)
entry.queue_time_sum = (entry.queue_time_sum or 0) + queue_secs
entry.queue_time_min = min(entry.queue_time_min or sys.maxint, queue_secs)
entry.queue_time_max = max(entry.queue_time_max or 0, queue_secs)
if entry_count:
entry.queue_time_avg = entry.queue_time_sum / entry_count
task_results = job_exe_end.get_task_results()
pull_secs = None
pull_task_length = task_results.get_task_run_length('pull')
if pull_task_length:
pull_secs = max(pull_task_length.total_seconds(), 0)
# Update elapsed pre-task time metrics
pre_secs = None
pre_task_length = task_results.get_task_run_length('pre')
if pre_task_length:
pre_secs = max(pre_task_length.total_seconds(), 0)
entry.pre_time_sum = (entry.pre_time_sum or 0) + pre_secs
entry.pre_time_min = min(entry.pre_time_min or sys.maxint, pre_secs)
entry.pre_time_max = max(entry.pre_time_max or 0, pre_secs)
if entry_count:
entry.pre_time_avg = entry.pre_time_sum / entry_count
# Update elapsed actual job time metrics
job_secs = None
job_task_length = task_results.get_task_run_length('main')
if job_task_length:
job_secs = max(job_task_length.total_seconds(), 0)
entry.job_time_sum = (entry.job_time_sum or 0) + job_secs
entry.job_time_min = min(entry.job_time_min or sys.maxint, job_secs)
entry.job_time_max = max(entry.job_time_max or 0, job_secs)
if entry_count:
entry.job_time_avg = entry.job_time_sum / entry_count
# Update elapsed post-task time metrics
post_secs = None
post_task_length = task_results.get_task_run_length('post')
if post_task_length:
post_secs = max(post_task_length.total_seconds(), 0)
entry.post_time_sum = (entry.post_time_sum or 0) + post_secs
entry.post_time_min = min(entry.post_time_min or sys.maxint, post_secs)
entry.post_time_max = max(entry.post_time_max or 0, post_secs)
if entry_count:
entry.post_time_avg = entry.post_time_sum / entry_count
# Update elapsed overall run and stage time metrics
if job_exe_end.started and job_exe_end.ended:
run_secs = max((job_exe_end.ended - job_exe_end.started).total_seconds(), 0)
entry.run_time_sum = (entry.run_time_sum or 0) + run_secs
entry.run_time_min = min(entry.run_time_min or sys.maxint, run_secs)
entry.run_time_max = max(entry.run_time_max or 0, run_secs)
if entry_count:
entry.run_time_avg = entry.run_time_sum / entry_count
stage_secs = max(run_secs - ((pull_secs or 0) + (pre_secs or 0) + (job_secs or 0) + (post_secs or 0)), 0)
entry.stage_time_sum = (entry.stage_time_sum or 0) + stage_secs
entry.stage_time_min = min(entry.stage_time_min or sys.maxint, stage_secs)
entry.stage_time_max = max(entry.stage_time_max or 0, stage_secs)
if entry_count:
entry.stage_time_avg = entry.stage_time_sum / entry_count
return entry
@transaction.atomic
def _replace_entries(self, date, job_type, entries):
"""Replaces all the existing metric entries for the given date with new ones.
:param date: The date when job executions associated with the metrics ended.
:type date: datetime.date
:param entries: The new metrics model to save.
:type entries: list[:class:`metrics.models.MetricsJobType`]
"""
# Delete all the previous metrics entries
MetricsJobType.objects.filter(occurred=date, job_type=job_type).delete()
# Save all the new metrics models
MetricsJobType.objects.bulk_create(entries)
class MetricsJobType(models.Model):
"""Tracks all the job execution metrics grouped by job type.
:keyword job_type: The type of job associated with these metrics.
:type job_type: :class:`django.db.models.ForeignKey`
:keyword occurred: The date when the job executions included in this model were ended.
:type occurred: :class:`django.db.models.DateField`
:keyword completed_count: The total number of completed job executions.
:type completed_count: :class:`metrics.models.PlotBigIntegerField`
:keyword failed_count: The total number of failed job executions.
:type failed_count: :class:`metrics.models.PlotBigIntegerField`
:keyword canceled_count: The total number of canceled job executions.
:type canceled_count: :class:`metrics.models.PlotBigIntegerField`
:keyword total_count: The total number of ended job executions (completed, failed, canceled).
:type total_count: :class:`metrics.models.PlotBigIntegerField`
:keyword error_system_count: The number of failed job executions due to a system error.
:type error_system_count: :class:`metrics.models.PlotBigIntegerField`
:keyword error_data_count: The number of failed job executions due to a data error.
:type error_data_count: :class:`metrics.models.PlotBigIntegerField`
:keyword error_algorithm_count: The number of failed job executions due to an algorithm error.
:type error_algorithm_count: :class:`metrics.models.PlotBigIntegerField`
:keyword queue_time_sum: The total time job executions were queued in seconds.
:type queue_time_sum: :class:`metrics.models.PlotBigIntegerField`
:keyword queue_time_min: The minimum time a job execution was queued in seconds.
:type queue_time_min: :class:`metrics.models.PlotIntegerField`
:keyword queue_time_max: The maximum time a job execution was queued in seconds.
:type queue_time_max: :class:`metrics.models.PlotIntegerField`
:keyword queue_time_avg: The average time job executions were queued in seconds.
:type queue_time_avg: :class:`metrics.models.PlotIntegerField`
:keyword pre_time_sum: The total time job executions were executing pre-task steps in seconds.
:type pre_time_sum: :class:`metrics.models.PlotBigIntegerField`
:keyword pre_time_min: The minimum time a job execution was executing pre-task steps in seconds.
:type pre_time_min: :class:`metrics.models.PlotIntegerField`
:keyword pre_time_max: The maximum time a job execution was executing pre-task steps in seconds.
:type pre_time_max: :class:`metrics.models.PlotIntegerField`
:keyword pre_time_avg: The average time job executions were executing pre-task steps in seconds.
:type pre_time_avg: :class:`metrics.models.PlotIntegerField`
:keyword job_time_sum: The total time job executions were executing the actual job task in seconds.
:type job_time_sum: :class:`metrics.models.PlotBigIntegerField`
:keyword job_time_min: The minimum time a job execution was executing the actual job task in seconds.
:type job_time_min: :class:`metrics.models.PlotIntegerField`
:keyword job_time_max: The maximum time a job execution was executing the actual job task in seconds.
:type job_time_max: :class:`metrics.models.PlotIntegerField`
:keyword job_time_avg: The average time job executions were executing the actual job task in seconds.
:type job_time_avg: :class:`metrics.models.PlotIntegerField`
:keyword post_time_sum: The total time job executions were executing post-task steps in seconds.
:type post_time_sum: :class:`metrics.models.PlotBigIntegerField`
:keyword post_time_min: The minimum time a job execution was executing post-task steps in seconds.
:type post_time_min: :class:`metrics.models.PlotIntegerField`
:keyword post_time_max: The maximum time a job execution was executing post-task steps in seconds.
:type post_time_max: :class:`metrics.models.PlotIntegerField`
:keyword post_time_avg: The average time job executions were executing post-task steps in seconds.
:type post_time_avg: :class:`metrics.models.PlotIntegerField`
:keyword run_time_sum: The total time job executions were running in seconds.
:type run_time_sum: :class:`metrics.models.PlotBigIntegerField`
:keyword run_time_min: The minimum time a job execution was running in seconds.
:type run_time_min: :class:`metrics.models.PlotIntegerField`
:keyword run_time_max: The maximum time a job execution was running in seconds.
:type run_time_max: :class:`metrics.models.PlotIntegerField`
:keyword run_time_avg: The average time job executions were running in seconds.
:type run_time_avg: :class:`metrics.models.PlotIntegerField`
:keyword stage_time_sum: The total time job executions spent in system staging between tasks in seconds.
:type stage_time_sum: :class:`metrics.models.PlotBigIntegerField`
:keyword stage_time_min: The minimum time a job execution spent in system staging between tasks in seconds.
:type stage_time_min: :class:`metrics.models.PlotIntegerField`
:keyword stage_time_max: The maximum time a job execution spent in system staging between tasks in seconds.
:type stage_time_max: :class:`metrics.models.PlotIntegerField`
:keyword stage_time_avg: The average time job executions spent in system staging between tasks in seconds.
:type stage_time_avg: :class:`metrics.models.PlotIntegerField`
:keyword created: When the model was first created.
:type created: :class:`django.db.models.DateTimeField`
"""
GROUPS = [
MetricsTypeGroup('overview', 'Overview', 'Overall counts based on job status.'),
MetricsTypeGroup('errors', 'Errors', 'Overall error counts based on category.'),
MetricsTypeGroup('queue_time', 'Queue Time', 'When jobs were in the queue.'),
MetricsTypeGroup('pre_time', 'Pre-task Time', 'When jobs were being prepared.'),
MetricsTypeGroup('job_time', 'Job Task Time', 'When jobs were executing their actual goal.'),
MetricsTypeGroup('post_time', 'Post-task Time', 'When jobs were being cleaned up.'),
MetricsTypeGroup('run_time', 'Run Time', 'When related tasks were run (pre, job, post).'),
MetricsTypeGroup('stage_time', 'Stage Time', 'Times related to the overhead of the system.'),
]
job_type = models.ForeignKey('job.JobType', on_delete=models.PROTECT)
occurred = models.DateTimeField(db_index=True)
completed_count = PlotBigIntegerField(aggregate='sum', blank=True, group='overview',
help_text='Number of successfully completed jobs.', null=True, units='count',
verbose_name='Completed Count')
failed_count = PlotBigIntegerField(aggregate='sum', blank=True, group='overview',
help_text='Number of incomplete failed jobs.', null=True, units='count',
verbose_name='Failed Count')
canceled_count = PlotBigIntegerField(aggregate='sum', blank=True, group='overview',
help_text='Number of incomplete canceled jobs.', null=True, units='count',
verbose_name='Canceled Count')
total_count = PlotBigIntegerField(aggregate='sum', blank=True, group='overview',
help_text='Number of completed, failed, and canceled jobs.', null=True,
units='count', verbose_name='Total Count')
error_system_count = PlotBigIntegerField(aggregate='sum', blank=True, group='errors',
help_text='Number of failed jobs due to a system error.', null=True,
units='count', verbose_name='System Error Count')
error_data_count = PlotBigIntegerField(aggregate='sum', blank=True, group='errors',
help_text='Number of failed jobs due to a data error.', null=True,
units='count', verbose_name='Data Error Count')
error_algorithm_count = PlotBigIntegerField(aggregate='sum', blank=True, group='errors',
help_text='Number of failed jobs due to an algorithm error.', null=True,
units='count', verbose_name='Algorithm Error Count')
queue_time_sum = PlotBigIntegerField(aggregate='sum', blank=True, group='queue_time',
help_text='Total time the job waited in the queue.', null=True,
units='seconds', verbose_name='Queue Time (Sum)')
queue_time_min = PlotIntegerField(aggregate='min', blank=True, group='queue_time',
help_text='Minimum time the job waited in the queue.', null=True, units='seconds',
verbose_name='Queue Time (Min)')
queue_time_max = PlotIntegerField(aggregate='max', blank=True, group='queue_time',
help_text='Maximum time the job waited in the queue.',
null=True, units='seconds', verbose_name='Queue Time (Max)')
queue_time_avg = PlotIntegerField(aggregate='avg', blank=True, group='queue_time',
help_text='Average time the job waited in the queue.', null=True,
units='seconds', verbose_name='Queue Time (Avg)')
pre_time_sum = PlotBigIntegerField(aggregate='sum', blank=True, group='pre_time',
help_text='Total time spent preparing the job task.', null=True, units='seconds',
verbose_name='Pre-task Time (Sum)')
pre_time_min = PlotIntegerField(aggregate='min', blank=True, group='pre_time',
help_text='Minimum time spent preparing the job task.', null=True, units='seconds',
verbose_name='Pre-task Time (Min)')
pre_time_max = PlotIntegerField(aggregate='max', blank=True, group='pre_time',
help_text='Maximum time spent preparing the job task.', null=True, units='seconds',
verbose_name='Pre-task Time (Max)')
pre_time_avg = PlotIntegerField(aggregate='avg', blank=True, group='pre_time',
help_text='Average time spent preparing the job task.',
null=True, units='seconds', verbose_name='Pre-task Time (Avg)')
job_time_sum = PlotBigIntegerField(aggregate='sum', blank=True, group='job_time',
help_text='Total time spent running the job task.',
null=True, units='seconds', verbose_name='Job Task Time (Sum)')
job_time_min = PlotIntegerField(aggregate='min', blank=True, group='job_time',
help_text='Minimum time spent running the job task.',
null=True, units='seconds', verbose_name='Job Task Time (Min)')
job_time_max = PlotIntegerField(aggregate='max', blank=True, group='job_time',
help_text='Maximum time spent running the job task.',
null=True, units='seconds', verbose_name='Job Task Time (Max)')
job_time_avg = PlotIntegerField(aggregate='avg', blank=True, group='job_time',
help_text='Average time spent running the job task.',
null=True, units='seconds', verbose_name='Job Task Time (Avg)')
post_time_sum = PlotBigIntegerField(aggregate='sum', blank=True, group='post_time',
help_text='Total time spent finalizing the job task.',
null=True, units='seconds', verbose_name='Post-task Time (Sum)')
post_time_min = PlotIntegerField(aggregate='min', blank=True, group='post_time',
help_text='Minimum time spent finalizing the job task.',
null=True, units='seconds', verbose_name='Post-task Time (Min)')
post_time_max = PlotIntegerField(aggregate='max', blank=True, group='post_time',
help_text='Maximum time spent finalizing the job task.',
null=True, units='seconds', verbose_name='Post-task Time (Max)')
post_time_avg = PlotIntegerField(aggregate='avg', blank=True, group='post_time',
help_text='Average time spent finalizing the job task.',
null=True, units='seconds', verbose_name='Post-task Time (Avg)')
run_time_sum = PlotBigIntegerField(aggregate='sum', blank=True, group='run_time',
help_text='Total time spent running the pre, job, and post tasks.',
null=True, units='seconds', verbose_name='Run Time (Sum)')
run_time_min = PlotIntegerField(aggregate='min', blank=True, group='run_time',
help_text='Minimum time spent running the pre, job, and post tasks.',
null=True, units='seconds', verbose_name='Run Time (Min)')
run_time_max = PlotIntegerField(aggregate='max', blank=True, group='run_time',
help_text='Maximum time spent running the pre, job, and post tasks.',
null=True, units='seconds', verbose_name='Run Time (Max)')
run_time_avg = PlotIntegerField(aggregate='avg', blank=True, group='run_time',
help_text='Average time spent running the pre, job, and post tasks.',
null=True, units='seconds', verbose_name='Run Time (Avg)')
stage_time_sum = PlotBigIntegerField(aggregate='sum', blank=True, group='stage_time',
help_text='Total overhead time spent managing tasks.',
null=True, units='seconds', verbose_name='Stage Time (Sum)')
stage_time_min = PlotIntegerField(aggregate='min', blank=True, group='stage_time',
help_text='Minimum overhead time spent managing tasks.',
null=True, units='seconds', verbose_name='Stage Time (Min)')
stage_time_max = PlotIntegerField(aggregate='min', blank=True, group='stage_time',
help_text='Maximum overhead time spent managing tasks.',
null=True, units='seconds', verbose_name='Stage Time (Max)')
stage_time_avg = PlotIntegerField(aggregate='avg', blank=True, group='stage_time',
help_text='Average overhead time spent managing tasks.',
null=True, units='seconds', verbose_name='Stage Time (Avg)')
created = models.DateTimeField(auto_now_add=True)
objects = MetricsJobTypeManager()
class Meta(object):
"""meta information for the db"""
db_table = 'metrics_job_type'
|
apache-2.0
| 2,971,459,741,209,194,500
| 56.151339
| 120
| 0.631719
| false
| 4.218336
| false
| false
| false
|
TheAlgorithms/Python
|
machine_learning/similarity_search.py
|
1
|
4778
|
"""
Similarity Search : https://en.wikipedia.org/wiki/Similarity_search
Similarity search is a search algorithm for finding the nearest vector from
vectors, used in natural language processing.
In this algorithm, it calculates distance with euclidean distance and
returns a list containing two data for each vector:
1. the nearest vector
2. distance between the vector and the nearest vector (float)
"""
import math
from typing import List, Union
import numpy as np
def euclidean(input_a: np.ndarray, input_b: np.ndarray) -> float:
"""
Calculates euclidean distance between two data.
:param input_a: ndarray of first vector.
:param input_b: ndarray of second vector.
:return: Euclidean distance of input_a and input_b. By using math.sqrt(),
result will be float.
>>> euclidean(np.array([0]), np.array([1]))
1.0
>>> euclidean(np.array([0, 1]), np.array([1, 1]))
1.0
>>> euclidean(np.array([0, 0, 0]), np.array([0, 0, 1]))
1.0
"""
return math.sqrt(sum(pow(a - b, 2) for a, b in zip(input_a, input_b)))
def similarity_search(
dataset: np.ndarray, value_array: np.ndarray
) -> List[List[Union[List[float], float]]]:
"""
:param dataset: Set containing the vectors. Should be ndarray.
:param value_array: vector/vectors we want to know the nearest vector from dataset.
:return: Result will be a list containing
1. the nearest vector
2. distance from the vector
>>> dataset = np.array([[0], [1], [2]])
>>> value_array = np.array([[0]])
>>> similarity_search(dataset, value_array)
[[[0], 0.0]]
>>> dataset = np.array([[0, 0], [1, 1], [2, 2]])
>>> value_array = np.array([[0, 1]])
>>> similarity_search(dataset, value_array)
[[[0, 0], 1.0]]
>>> dataset = np.array([[0, 0, 0], [1, 1, 1], [2, 2, 2]])
>>> value_array = np.array([[0, 0, 1]])
>>> similarity_search(dataset, value_array)
[[[0, 0, 0], 1.0]]
>>> dataset = np.array([[0, 0, 0], [1, 1, 1], [2, 2, 2]])
>>> value_array = np.array([[0, 0, 0], [0, 0, 1]])
>>> similarity_search(dataset, value_array)
[[[0, 0, 0], 0.0], [[0, 0, 0], 1.0]]
These are the errors that might occur:
1. If dimensions are different.
For example, dataset has 2d array and value_array has 1d array:
>>> dataset = np.array([[1]])
>>> value_array = np.array([1])
>>> similarity_search(dataset, value_array)
Traceback (most recent call last):
...
ValueError: Wrong input data's dimensions... dataset : 2, value_array : 1
2. If data's shapes are different.
For example, dataset has shape of (3, 2) and value_array has (2, 3).
We are expecting same shapes of two arrays, so it is wrong.
>>> dataset = np.array([[0, 0], [1, 1], [2, 2]])
>>> value_array = np.array([[0, 0, 0], [0, 0, 1]])
>>> similarity_search(dataset, value_array)
Traceback (most recent call last):
...
ValueError: Wrong input data's shape... dataset : 2, value_array : 3
3. If data types are different.
When trying to compare, we are expecting same types so they should be same.
If not, it'll come up with errors.
>>> dataset = np.array([[0, 0], [1, 1], [2, 2]], dtype=np.float32)
>>> value_array = np.array([[0, 0], [0, 1]], dtype=np.int32)
>>> similarity_search(dataset, value_array) # doctest: +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
TypeError: Input data have different datatype...
dataset : float32, value_array : int32
"""
if dataset.ndim != value_array.ndim:
raise ValueError(
f"Wrong input data's dimensions... dataset : {dataset.ndim}, "
f"value_array : {value_array.ndim}"
)
try:
if dataset.shape[1] != value_array.shape[1]:
raise ValueError(
f"Wrong input data's shape... dataset : {dataset.shape[1]}, "
f"value_array : {value_array.shape[1]}"
)
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError("Wrong shape")
if dataset.dtype != value_array.dtype:
raise TypeError(
f"Input data have different datatype... dataset : {dataset.dtype}, "
f"value_array : {value_array.dtype}"
)
answer = []
for value in value_array:
dist = euclidean(value, dataset[0])
vector = dataset[0].tolist()
for dataset_value in dataset[1:]:
temp_dist = euclidean(value, dataset_value)
if dist > temp_dist:
dist = temp_dist
vector = dataset_value.tolist()
answer.append([vector, dist])
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
|
mit
| -4,778,309,454,849,145,000
| 33.128571
| 87
| 0.593554
| false
| 3.597892
| false
| false
| false
|
pfalcon/picotui
|
picotui/widgets.py
|
1
|
15228
|
from .basewidget import *
from .editorext import *
from .defs import *
__all__ = (
"ACTION_OK",
"ACTION_CANCEL",
"ACTION_NEXT",
"ACTION_PREV",
"EditableWidget",
"Dialog",
"WLabel",
"WFrame",
"WButton",
"WCheckbox",
"WRadioButton",
"WListBox",
"WPopupList",
"WDropDown",
"WTextEntry",
"WMultiEntry",
"WComboBox",
"WCompletionList",
"WAutoComplete",
)
class Dialog(Widget):
finish_on_esc = True
def __init__(self, x, y, w=0, h=0, title=""):
super().__init__()
self.x = x
self.y = y
self.w = w
self.h = h
self.title = ""
if title:
self.title = " %s " % title
self.childs = []
# On both sides
self.border_w = 2
self.border_h = 2
self.focus_w = None
self.focus_idx = -1
def add(self, x, y, widget):
if isinstance(widget, str):
# Convert raw string to WLabel
widget = WLabel(widget)
widget.set_xy(self.x + x, self.y + y)
self.childs.append(widget)
widget.owner = self
def autosize(self, extra_w=0, extra_h=0):
w = 0
h = 0
for wid in self.childs:
w = max(w, wid.x - self.x + wid.w)
h = max(h, wid.y - self.y + wid.h)
self.w = max(self.w, w + self.border_w - 1) + extra_w
self.h = max(self.h, h + self.border_h - 1) + extra_h
def redraw(self):
# Init some state on first redraw
if self.focus_idx == -1:
self.autosize()
self.focus_idx, self.focus_w = self.find_focusable_by_idx(0, 1)
if self.focus_w:
self.focus_w.focus = True
# Redraw widgets with cursor off
self.cursor(False)
self.dialog_box(self.x, self.y, self.w, self.h, self.title)
for w in self.childs:
w.redraw()
# Then give widget in focus a chance to enable cursor
if self.focus_w:
self.focus_w.set_cursor()
def find_focusable_by_idx(self, from_idx, direction):
sz = len(self.childs)
while 0 <= from_idx < sz:
if isinstance(self.childs[from_idx], FocusableWidget):
return from_idx, self.childs[from_idx]
from_idx = (from_idx + direction) % sz
return None, None
def find_focusable_by_xy(self, x, y):
i = 0
for w in self.childs:
if isinstance(w, FocusableWidget) and w.inside(x, y):
return i, w
i += 1
return None, None
def change_focus(self, widget):
if widget is self.focus_w:
return
if self.focus_w:
self.focus_w.focus = False
self.focus_w.redraw()
self.focus_w = widget
widget.focus = True
widget.redraw()
widget.set_cursor()
def move_focus(self, direction):
prev_idx = (self.focus_idx + direction) % len(self.childs)
self.focus_idx, new_w = self.find_focusable_by_idx(prev_idx, direction)
self.change_focus(new_w)
def handle_key(self, key):
if key == KEY_QUIT:
return key
if key == KEY_ESC and self.finish_on_esc:
return ACTION_CANCEL
if key == KEY_TAB:
self.move_focus(1)
elif key == KEY_SHIFT_TAB:
self.move_focus(-1)
elif self.focus_w:
if key == KEY_ENTER:
if self.focus_w.finish_dialog is not False:
return self.focus_w.finish_dialog
res = self.focus_w.handle_key(key)
if res == ACTION_PREV:
self.move_focus(-1)
elif res == ACTION_NEXT:
self.move_focus(1)
else:
return res
def handle_mouse(self, x, y):
# Work in absolute coordinates
if self.inside(x, y):
self.focus_idx, w = self.find_focusable_by_xy(x, y)
# print(w)
if w:
self.change_focus(w)
return w.handle_mouse(x, y)
class WLabel(Widget):
def __init__(self, text, w=0):
self.t = text
self.h = 1
self.w = w
if not w:
self.w = len(text)
def redraw(self):
self.goto(self.x, self.y)
self.wr_fixedw(self.t, self.w)
class WFrame(Widget):
def __init__(self, w, h, title=""):
self.w = w
self.h = h
self.t = title
def redraw(self):
self.draw_box(self.x, self.y, self.w, self.h)
if self.t:
pos = 1
self.goto(self.x + pos, self.y)
self.wr(" %s " % self.t)
class WButton(FocusableWidget):
def __init__(self, w, text):
Widget.__init__(self)
self.t = text
self.h = 1
self.w = w or len(text) + 2
self.disabled = False
self.focus = False
self.finish_dialog = False
def redraw(self):
self.goto(self.x, self.y)
if self.disabled:
self.attr_color(C_WHITE, C_GRAY)
else:
if self.focus:
self.attr_color(C_B_WHITE, C_GREEN)
else:
self.attr_color(C_BLACK, C_GREEN)
self.wr(self.t.center(self.w))
self.attr_reset()
def handle_mouse(self, x, y):
if not self.disabled:
if self.finish_dialog is not False:
return self.finish_dialog
else:
self.signal("click")
def handle_key(self, key):
if key == KEY_UP or key == KEY_LEFT:
return ACTION_PREV
if key == KEY_DOWN or key == KEY_RIGHT:
return ACTION_NEXT
# For dialog buttons (.finish_dialog=True), KEY_ENTER won't
# reach here.
if key == KEY_ENTER:
self.signal("click")
def on_click(self):
pass
class WCheckbox(ChoiceWidget):
def __init__(self, title, choice=False):
super().__init__(choice)
self.t = title
self.h = 1
self.w = 4 + len(title)
self.focus = False
def redraw(self):
self.goto(self.x, self.y)
if self.focus:
self.attr_color(C_B_BLUE, None)
self.wr("[x] " if self.choice else "[ ] ")
self.wr(self.t)
self.attr_reset()
def flip(self):
self.choice = not self.choice
self.redraw()
self.signal("changed")
def handle_mouse(self, x, y):
self.flip()
def handle_key(self, key):
if key == KEY_UP:
return ACTION_PREV
if key == KEY_DOWN:
return ACTION_NEXT
if key == b" ":
self.flip()
class WRadioButton(ItemSelWidget):
def __init__(self, items):
super().__init__(items)
self.h = len(items)
self.w = 4 + self.longest(items)
self.focus = False
def redraw(self):
i = 0
if self.focus:
self.attr_color(C_B_BLUE, None)
for t in self.items:
self.goto(self.x, self.y + i)
self.wr("(*) " if self.choice == i else "( ) ")
self.wr(t)
i += 1
self.attr_reset()
def handle_mouse(self, x, y):
self.choice = y - self.y
self.redraw()
self.signal("changed")
def handle_key(self, key):
if key == KEY_UP:
self.move_sel(-1)
elif key == KEY_DOWN:
self.move_sel(1)
class WListBox(EditorExt, ChoiceWidget):
def __init__(self, w, h, items):
EditorExt.__init__(self)
ChoiceWidget.__init__(self, 0)
self.width = w
self.w = w
self.height = h
self.h = h
self.set_items(items)
self.focus = False
def set_items(self, items):
self.items = items
self.set_lines(items)
def render_line(self, l):
# Default identity implementation is suitable for
# items being list of strings.
return l
def show_line(self, l, i):
hlite = self.cur_line == i
if hlite:
if self.focus:
self.attr_color(C_B_WHITE, C_GREEN)
else:
self.attr_color(C_BLACK, C_GREEN)
if i != -1:
l = self.render_line(l)[:self.width]
self.wr(l)
self.clear_num_pos(self.width - len(l))
if hlite:
self.attr_reset()
def handle_mouse(self, x, y):
res = super().handle_mouse(x, y)
self.choice = self.cur_line
self.redraw()
self.signal("changed")
return res
def handle_key(self, key):
res = super().handle_key(key)
self.choice = self.cur_line
self.redraw()
self.signal("changed")
return res
def handle_edit_key(self, key):
pass
def set_cursor(self):
Widget.set_cursor(self)
def cursor(self, state):
# Force off
super().cursor(False)
class WPopupList(Dialog):
class OneShotList(WListBox):
def handle_key(self, key):
if key == KEY_ENTER:
return ACTION_OK
if key == KEY_ESC:
return ACTION_CANCEL
return super().handle_key(key)
def handle_mouse(self, x, y):
if super().handle_mouse(x, y) == True:
# (Processed) mouse click finishes selection
return ACTION_OK
def __init__(self, x, y, w, h, items, sel_item=0):
super().__init__(x, y, w, h)
self.list = self.OneShotList(w - 2, h - 2, items)
self.list.cur_line = sel_item
self.add(1, 1, self.list)
def handle_mouse(self, x, y):
if not self.inside(x, y):
return ACTION_CANCEL
return super().handle_mouse(x, y)
def get_choice(self):
return self.list.cur_line
def get_selected_value(self):
if not self.list.content:
return None
return self.list.content[self.list.cur_line]
class WDropDown(ChoiceWidget):
def __init__(self, w, items, *, dropdown_h=5):
super().__init__(0)
self.items = items
self.h = 1
self.w = w
self.dropdown_h = dropdown_h
self.focus = False
def redraw(self):
self.goto(self.x, self.y)
if self.focus:
self.attr_color(C_B_WHITE, C_CYAN)
else:
self.attr_color(C_BLACK, C_CYAN)
self.wr_fixedw(self.items[self.choice], self.w - 1)
self.attr_reset()
self.wr(DOWN_ARROW)
def handle_mouse(self, x, y):
popup = WPopupList(self.x, self.y + 1, self.w, self.dropdown_h, self.items, self.choice)
res = popup.loop()
if res == ACTION_OK:
self.choice = popup.get_choice()
self.signal("changed")
self.owner.redraw()
def handle_key(self, key):
self.handle_mouse(0, 0)
class WTextEntry(EditorExt, EditableWidget):
def __init__(self, w, text):
EditorExt.__init__(self, width=w, height=1)
self.t = text
self.h = 1
self.w = w
self.focus = False
self.set(text)
self.col = len(text)
self.adjust_cursor_eol()
self.just_started = True
def get(self):
return self.get_cur_line()
def set(self, text):
self.set_lines([text])
def handle_cursor_keys(self, key):
if super().handle_cursor_keys(key):
if self.just_started:
self.just_started = False
self.redraw()
return True
return False
def handle_edit_key(self, key):
if key == KEY_ENTER:
# Don't treat as editing key
return True
if self.just_started:
if key != KEY_BACKSPACE:
# Overwrite initial string with new content
self.set_lines([""])
self.col = 0
self.just_started = False
return super().handle_edit_key(key)
def handle_mouse(self, x, y):
if self.just_started:
self.just_started = False
self.redraw()
super().handle_mouse(x, y)
def show_line(self, l, i):
if self.just_started:
fg = C_WHITE
else:
fg = C_BLACK
self.attr_color(fg, C_CYAN)
super().show_line(l, i)
self.attr_reset()
class WMultiEntry(EditorExt, EditableWidget):
def __init__(self, w, h, lines):
EditorExt.__init__(self, width=w, height=h)
self.h = h
self.w = w
self.focus = False
self.set_lines(lines)
def get(self):
return self.content
def set(self, lines):
self.set_lines(lines)
def show_line(self, l, i):
self.attr_color(C_BLACK, C_CYAN)
super().show_line(l, i)
self.attr_reset()
class WComboBox(WTextEntry):
popup_class = WPopupList
popup_h = 5
def __init__(self, w, text, items):
# w - 1 width goes to Editor widget
super().__init__(w - 1, text)
# We have full requested width, will show arrow symbol as last char
self.w = w
self.items = items
def redraw(self):
self.goto(self.x + self.w - 1, self.y)
self.wr(DOWN_ARROW)
super().redraw()
def get_choices(self, substr):
return self.items
def show_popup(self):
choices = self.get_choices(self.get())
popup = self.popup_class(self.x, self.y + 1, self.longest(choices) + 2, self.popup_h, choices)
popup.main_widget = self
res = popup.loop()
if res == ACTION_OK:
val = popup.get_selected_value()
if val is not None:
self.set_lines([val])
self.margin = 0
self.col = sys.maxsize
self.adjust_cursor_eol()
self.just_started = False
self.owner.redraw()
def handle_key(self, key):
if key == KEY_DOWN:
self.show_popup()
else:
return super().handle_key(key)
def handle_mouse(self, x, y):
if x == self.x + self.w - 1:
self.show_popup()
else:
super().handle_mouse(x, y)
class WCompletionList(WPopupList):
def __init__(self, x, y, w, h, items):
Dialog.__init__(self, x, y, w, h)
self.list = self.OneShotList(w - 2, h - 2, items)
self.add(1, 1, self.list)
chk = WCheckbox("Prefix")
def is_prefix_changed(wid):
main = self.main_widget
choices = main.get_choices(main.get(), wid.choice)
self.list.set_lines(choices)
self.list.top_line = 0
self.list.cur_line = 0
self.list.row = 0
self.list.redraw()
chk.on("changed", is_prefix_changed)
self.add(1, h - 1, chk)
class WAutoComplete(WComboBox):
popup_class = WCompletionList
def get_choices(self, substr, only_prefix=False):
substr = substr.lower()
if only_prefix:
choices = list(filter(lambda x: x.lower().startswith(substr), self.items))
else:
choices = list(filter(lambda x: substr in x.lower(), self.items))
return choices
|
mit
| 1,381,513,039,429,334,800
| 25.952212
| 102
| 0.517271
| false
| 3.503106
| false
| false
| false
|
allanliebold/data-structures
|
src/dll.py
|
1
|
2785
|
"""Implementation of Doubly-Linked list with a head and tail."""
from linked_list import LinkedList
from linked_list import Node
class Dll(object):
"""Doubly-Linked List class object."""
def __init__(self):
"""Doubly-linked list initialization.
Composed of some attributes from linked-list, and also has a tail.
"""
self._linkedlist = LinkedList()
self.head = self._linkedlist.head
self._length = self._linkedlist._length
self.tail = None
def push(self, data):
"""Push node to head of list."""
prev_head = self.head
new_head = self._linkedlist.push(data)
if self.tail is None:
self.tail = new_head
if self.head:
prev_head.prev = new_head
self.head = new_head
self.head.next_node = prev_head
self._length += 1
self.head.prev = None
def pop(self):
"""Remove node at head of list."""
if not self.head:
raise IndexError('List empty')
deleted_node = self.head.data
self._length -= 1
if not self.head.next_node:
self.head = None
self.tail = None
else:
self.head = self.head.next_node
self.head.prev = None
return deleted_node
def append(self, data):
"""Append method for Dll to add to tail."""
prev_tail = self.tail
new_tail = Node(data)
if self._length == 0:
self.tail = new_tail
self.head = new_tail
self.tail.prev = None
self.tail = new_tail
if self._length > 0:
prev_tail.next_node = new_tail
self.tail.prev = prev_tail
self._length += 1
def shift(self):
"""Shift method for Dll to remove from tail end."""
if self._length == 0:
raise IndexError('List empty')
deleted_node = self.tail.data
self._length -= 1
if not self.tail.prev:
self.head = None
self.tail = None
else:
self.tail = self.tail.prev
self.tail.next_node = None
return deleted_node
def remove(self, val):
"""Remove method for Dll to remove specified node."""
if self._length < 1:
raise IndexError('Value not present. List empty.')
if self._length == 1:
self.head = None
self.tail = None
target = self._linkedlist.search(val)
if target.prev:
target.prev.next_node = target.next_node
if target.next_node:
target.next_node.prev = target.prev
return target
def __len__(self):
"""Function uses built-in len function to show length."""
return self._length
|
mit
| 4,066,315,183,033,655,300
| 29.944444
| 74
| 0.547217
| false
| 4.113737
| false
| false
| false
|
nuagenetworks/vspk-python
|
vspk/v5_0/nulocation.py
|
1
|
13395
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015, Alcatel-Lucent Inc, 2017 Nokia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from .fetchers import NUMetadatasFetcher
from .fetchers import NUGlobalMetadatasFetcher
from bambou import NURESTObject
class NULocation(NURESTObject):
""" Represents a Location in the VSD
Notes:
Gateway location details.
"""
__rest_name__ = "location"
__resource_name__ = "locations"
## Constants
CONST_ENTITY_SCOPE_GLOBAL = "GLOBAL"
CONST_ENTITY_SCOPE_ENTERPRISE = "ENTERPRISE"
def __init__(self, **kwargs):
""" Initializes a Location instance
Notes:
You can specify all parameters while calling this methods.
A special argument named `data` will enable you to load the
object from a Python dictionary
Examples:
>>> location = NULocation(id=u'xxxx-xxx-xxx-xxx', name=u'Location')
>>> location = NULocation(data=my_dict)
"""
super(NULocation, self).__init__()
# Read/Write Attributes
self._last_updated_by = None
self._latitude = None
self._address = None
self._ignore_geocode = None
self._time_zone_id = None
self._entity_scope = None
self._locality = None
self._longitude = None
self._country = None
self._associated_entity_name = None
self._associated_entity_type = None
self._state = None
self._external_id = None
self.expose_attribute(local_name="last_updated_by", remote_name="lastUpdatedBy", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="latitude", remote_name="latitude", attribute_type=float, is_required=False, is_unique=False)
self.expose_attribute(local_name="address", remote_name="address", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="ignore_geocode", remote_name="ignoreGeocode", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="time_zone_id", remote_name="timeZoneID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="entity_scope", remote_name="entityScope", attribute_type=str, is_required=False, is_unique=False, choices=[u'ENTERPRISE', u'GLOBAL'])
self.expose_attribute(local_name="locality", remote_name="locality", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="longitude", remote_name="longitude", attribute_type=float, is_required=False, is_unique=False)
self.expose_attribute(local_name="country", remote_name="country", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="associated_entity_name", remote_name="associatedEntityName", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="associated_entity_type", remote_name="associatedEntityType", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="state", remote_name="state", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="external_id", remote_name="externalID", attribute_type=str, is_required=False, is_unique=True)
# Fetchers
self.metadatas = NUMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.global_metadatas = NUGlobalMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self._compute_args(**kwargs)
# Properties
@property
def last_updated_by(self):
""" Get last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
return self._last_updated_by
@last_updated_by.setter
def last_updated_by(self, value):
""" Set last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
self._last_updated_by = value
@property
def latitude(self):
""" Get latitude value.
Notes:
Latitude in decimal format.
"""
return self._latitude
@latitude.setter
def latitude(self, value):
""" Set latitude value.
Notes:
Latitude in decimal format.
"""
self._latitude = value
@property
def address(self):
""" Get address value.
Notes:
Formatted address including property number, street name, suite or office number, ...
"""
return self._address
@address.setter
def address(self, value):
""" Set address value.
Notes:
Formatted address including property number, street name, suite or office number, ...
"""
self._address = value
@property
def ignore_geocode(self):
""" Get ignore_geocode value.
Notes:
Request BSS to perform a geocode on the address - If no value passed, requestGeocode will be set to true
This attribute is named `ignoreGeocode` in VSD API.
"""
return self._ignore_geocode
@ignore_geocode.setter
def ignore_geocode(self, value):
""" Set ignore_geocode value.
Notes:
Request BSS to perform a geocode on the address - If no value passed, requestGeocode will be set to true
This attribute is named `ignoreGeocode` in VSD API.
"""
self._ignore_geocode = value
@property
def time_zone_id(self):
""" Get time_zone_id value.
Notes:
Time zone in which the Gateway is located. This can be in the form of a UTC/GMT offset, continent/city location, or country/region. The available time zones can be found in /usr/share/zoneinfo on a Linux machine or retrieved with TimeZone.getAvailableIDs() in Java. Refer to the IANA (Internet Assigned Numbers Authority) for a list of time zones. URL : http://www.iana.org/time-zones Default value is UTC (translating to Etc/Zulu)
This attribute is named `timeZoneID` in VSD API.
"""
return self._time_zone_id
@time_zone_id.setter
def time_zone_id(self, value):
""" Set time_zone_id value.
Notes:
Time zone in which the Gateway is located. This can be in the form of a UTC/GMT offset, continent/city location, or country/region. The available time zones can be found in /usr/share/zoneinfo on a Linux machine or retrieved with TimeZone.getAvailableIDs() in Java. Refer to the IANA (Internet Assigned Numbers Authority) for a list of time zones. URL : http://www.iana.org/time-zones Default value is UTC (translating to Etc/Zulu)
This attribute is named `timeZoneID` in VSD API.
"""
self._time_zone_id = value
@property
def entity_scope(self):
""" Get entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
return self._entity_scope
@entity_scope.setter
def entity_scope(self, value):
""" Set entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
self._entity_scope = value
@property
def locality(self):
""" Get locality value.
Notes:
Locality/City/County
"""
return self._locality
@locality.setter
def locality(self, value):
""" Set locality value.
Notes:
Locality/City/County
"""
self._locality = value
@property
def longitude(self):
""" Get longitude value.
Notes:
Longitude in decimal format.
"""
return self._longitude
@longitude.setter
def longitude(self, value):
""" Set longitude value.
Notes:
Longitude in decimal format.
"""
self._longitude = value
@property
def country(self):
""" Get country value.
Notes:
Country
"""
return self._country
@country.setter
def country(self, value):
""" Set country value.
Notes:
Country
"""
self._country = value
@property
def associated_entity_name(self):
""" Get associated_entity_name value.
Notes:
Name of the associated entity.
This attribute is named `associatedEntityName` in VSD API.
"""
return self._associated_entity_name
@associated_entity_name.setter
def associated_entity_name(self, value):
""" Set associated_entity_name value.
Notes:
Name of the associated entity.
This attribute is named `associatedEntityName` in VSD API.
"""
self._associated_entity_name = value
@property
def associated_entity_type(self):
""" Get associated_entity_type value.
Notes:
Object type of the associated entity.
This attribute is named `associatedEntityType` in VSD API.
"""
return self._associated_entity_type
@associated_entity_type.setter
def associated_entity_type(self, value):
""" Set associated_entity_type value.
Notes:
Object type of the associated entity.
This attribute is named `associatedEntityType` in VSD API.
"""
self._associated_entity_type = value
@property
def state(self):
""" Get state value.
Notes:
State/Province/Region
"""
return self._state
@state.setter
def state(self, value):
""" Set state value.
Notes:
State/Province/Region
"""
self._state = value
@property
def external_id(self):
""" Get external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
return self._external_id
@external_id.setter
def external_id(self, value):
""" Set external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
self._external_id = value
|
bsd-3-clause
| -3,558,432,641,807,527,000
| 29.103371
| 453
| 0.582605
| false
| 4.598352
| false
| false
| false
|
albireox/marvin
|
python/marvin/utils/datamodel/dap/MPL6.py
|
1
|
22491
|
# !usr/bin/env python
# -*- coding: utf-8 -*-
#
# Licensed under a 3-clause BSD license.
#
# @Author: Brian Cherinka
# @Date: 2017-09-13 16:05:56
# @Last modified by: José Sánchez-Gallego (gallegoj@uw.edu)
# @Last modified time: 2018-08-06 11:45:33
from __future__ import absolute_import, division, print_function
from astropy import units as u
from marvin.utils.datamodel.maskbit import get_maskbits
from .base import Bintype, Channel, DAPDataModel, Model, MultiChannelProperty, Property
from .base import spaxel as spaxel_unit
from .MPL5 import ALL, GAU_MILESHC, NRE, SPX, VOR10
HYB10 = Bintype('HYB10', description='Binning and stellar continuum fitting as VOR10, '
'but emission lines are fitted per spaxel.')
# The two lines in the OII doublet is fitted independently for gaussian
# measurements. In that case oii_3727 and oii_3729 are populated. For summed
# flux measurements, the lines cannot be separated so oiid_3728 contains
# the summed flux. In that case, oii_3729 is null and only kept to maintain`
# the number of channels constant.
oiid_channel = Channel('oiid_3728', formats={'string': 'OIId 3728',
'latex': r'$\forb{O\,IId}\;\lambda\lambda 3728$'}, idx=0)
oii_channel = Channel('oii_3727', formats={'string': 'OII 3727',
'latex': r'$\forb{O\,II}\;\lambda 3727$'}, idx=0)
MPL6_emline_channels = [
Channel('oii_3729', formats={'string': 'OII 3729',
'latex': r'$\forb{O\,II}\;\lambda 3729$'}, idx=1),
Channel('hthe_3798', formats={'string': 'H-theta 3798',
'latex': r'H$\theta\;\lambda 3798$'}, idx=2),
Channel('heta_3836', formats={'string': 'H-eta 3836',
'latex': r'H$\eta\;\lambda 3836$'}, idx=3),
Channel('neiii_3869', formats={'string': 'NeIII 3869',
'latex': r'$\forb{Ne\,III}\;\lambda 3869$'}, idx=4),
Channel('hzet_3890', formats={'string': 'H-zeta 3890',
'latex': r'H$\zeta\;\lambda 3890$'}, idx=5),
Channel('neiii_3968', formats={'string': 'NeIII 3968',
'latex': r'$\forb{Ne\,III}\;\lambda 3968$'}, idx=6),
Channel('heps_3971', formats={'string': 'H-epsilon 3971',
'latex': r'H$\epsilon\;\lambda 3971$'}, idx=7),
Channel('hdel_4102', formats={'string': 'H-delta 4102',
'latex': r'H$\delta\;\lambda 4102$'}, idx=8),
Channel('hgam_4341', formats={'string': 'H-gamma 4341',
'latex': r'H$\gamma\;\lambda 4341$'}, idx=9),
Channel('heii_4687', formats={'string': 'HeII 4681',
'latex': r'He\,II$\;\lambda 4687$'}, idx=10),
Channel('hb_4862', formats={'string': 'H-beta 4862',
'latex': r'H$\beta\;\lambda 4862$'}, idx=11),
Channel('oiii_4960', formats={'string': 'OIII 4960',
'latex': r'$\forb{O\,III}\;\lambda 4960$'}, idx=12),
Channel('oiii_5008', formats={'string': 'OIII 5008',
'latex': r'$\forb{O\,III}\;\lambda 5008$'}, idx=13),
Channel('hei_5877', formats={'string': 'HeI 5877',
'latex': r'He\,I$\;\lambda 5877$'}, idx=14),
Channel('oi_6302', formats={'string': 'OI 6302',
'latex': r'$\forb{O\,I}\;\lambda 6302$'}, idx=15),
Channel('oi_6365', formats={'string': 'OI 6365',
'latex': r'$\forb{O\,I}\;\lambda 6365$'}, idx=16),
Channel('nii_6549', formats={'string': 'NII 6549',
'latex': r'$\forb{N\,II}\;\lambda 6549$'}, idx=17),
Channel('ha_6564', formats={'string': 'H-alpha 6564',
'latex': r'H$\alpha\;\lambda 6564$'}, idx=18),
Channel('nii_6585', formats={'string': 'NII 6585',
'latex': r'$\forb{N\,II}\;\lambda 6585$'}, idx=19),
Channel('sii_6718', formats={'string': 'SII 6718',
'latex': r'$\forb{S\,II}\;\lambda 6718$'}, idx=20),
Channel('sii_6732', formats={'string': 'SII 6732',
'latex': r'$\forb{S\,II\]\;\lambda 6732$'}, idx=21)
]
MPL6_specindex_channels = [
Channel('cn1', formats={'string': 'CN1'}, unit=u.mag, idx=0),
Channel('cn2', formats={'string': 'CN2'}, unit=u.mag, idx=1),
Channel('ca4227', formats={'string': 'Ca 4227',
'latex': r'Ca\,\lambda 4227'}, unit=u.Angstrom, idx=2),
Channel('g4300', formats={'string': 'G4300',
'latex': r'G\,\lambda 4300'}, unit=u.Angstrom, idx=3),
Channel('fe4383', formats={'string': 'Fe 4383',
'latex': r'Fe\,\lambda 4383'}, unit=u.Angstrom, idx=4),
Channel('ca4455', formats={'string': 'Ca 4455',
'latex': r'Ca\,\lambda 4455'}, unit=u.Angstrom, idx=5),
Channel('fe4531', formats={'string': 'Fe 4531',
'latex': r'Fe\,\lambda 4531'}, unit=u.Angstrom, idx=6),
Channel('c24668', formats={'string': 'C24668',
'latex': r'C2\,\lambda 4668'}, unit=u.Angstrom, idx=7),
Channel('hb', formats={'string': 'Hb',
'latex': r'H\beta'}, unit=u.Angstrom, idx=8),
Channel('fe5015', formats={'string': 'Fe 5015',
'latex': r'Fe\,\lambda 5015'}, unit=u.Angstrom, idx=9),
Channel('mg1', formats={'string': 'Mg1'}, unit=u.mag, idx=10),
Channel('mg2', formats={'string': 'Mg2'}, unit=u.mag, idx=11),
Channel('mgb', formats={'string': 'Mgb'}, unit=u.Angstrom, idx=12),
Channel('fe5270', formats={'string': 'Fe 5270',
'latex': r'Fe\,\lambda 5270'}, unit=u.Angstrom, idx=13),
Channel('fe5335', formats={'string': 'Fe 5335',
'latex': r'Fe\,\lambda 5335'}, unit=u.Angstrom, idx=14),
Channel('fe5406', formats={'string': 'Fe 5406',
'latex': r'Fe\,\lambda 5406'}, unit=u.Angstrom, idx=15),
Channel('fe5709', formats={'string': 'Fe 5709',
'latex': r'Fe\,\lambda 5709'}, unit=u.Angstrom, idx=16),
Channel('fe5782', formats={'string': 'Fe 5782',
'latex': r'Fe\,\lambda 5782'}, unit=u.Angstrom, idx=17),
Channel('nad', formats={'string': 'NaD'}, unit=u.Angstrom, idx=18),
Channel('tio1', formats={'string': 'TiO1'}, unit=u.mag, idx=19),
Channel('tio2', formats={'string': 'TiO2'}, unit=u.mag, idx=20),
Channel('hdeltaa', formats={'string': 'HDeltaA',
'latex': r'H\delta\,A'}, unit=u.Angstrom, idx=21),
Channel('hgammaa', formats={'string': 'HGammaA',
'latex': r'H\gamma\,F'}, unit=u.Angstrom, idx=22),
Channel('hdeltaf', formats={'string': 'HDeltaA',
'latex': r'H\delta\,F'}, unit=u.Angstrom, idx=23),
Channel('hgammaf', formats={'string': 'HGammaF',
'latex': r'H\gamma\,F'}, unit=u.Angstrom, idx=24),
Channel('cahk', formats={'string': 'CaHK'}, unit=u.Angstrom, idx=25),
Channel('caii1', formats={'string': 'CaII1'}, unit=u.Angstrom, idx=26),
Channel('caii2', formats={'string': 'CaII2'}, unit=u.Angstrom, idx=27),
Channel('caii3', formats={'string': 'CaII3'}, unit=u.Angstrom, idx=28),
Channel('pa17', formats={'string': 'Pa17'}, unit=u.Angstrom, idx=29),
Channel('pa14', formats={'string': 'Pa14'}, unit=u.Angstrom, idx=30),
Channel('pa12', formats={'string': 'Pa12'}, unit=u.Angstrom, idx=31),
Channel('mgicvd', formats={'string': 'MgICvD'}, unit=u.Angstrom, idx=32),
Channel('naicvd', formats={'string': 'NaICvD'}, unit=u.Angstrom, idx=33),
Channel('mgiir', formats={'string': 'MgIIR'}, unit=u.Angstrom, idx=34),
Channel('fehcvd', formats={'string': 'FeHCvD'}, unit=u.Angstrom, idx=35),
Channel('nai', formats={'string': 'NaI'}, unit=u.Angstrom, idx=36),
Channel('btio', formats={'string': 'bTiO'}, unit=u.mag, idx=37),
Channel('atio', formats={'string': 'aTiO'}, unit=u.mag, idx=38),
Channel('cah1', formats={'string': 'CaH1'}, unit=u.mag, idx=39),
Channel('cah2', formats={'string': 'CaH2'}, unit=u.mag, idx=40),
Channel('naisdss', formats={'string': 'NaISDSS'}, unit=u.Angstrom, idx=41),
Channel('tio2sdss', formats={'string': 'TiO2SDSS'}, unit=u.Angstrom, idx=42),
Channel('d4000', formats={'string': 'D4000'}, unit=u.dimensionless_unscaled, idx=43),
Channel('dn4000', formats={'string': 'Dn4000'}, unit=u.dimensionless_unscaled, idx=44),
Channel('tiocvd', formats={'string': 'TiOCvD'}, unit=u.dimensionless_unscaled, idx=45)
]
MPL6_binid_channels = [
Channel('binned_spectra', formats={'string': 'Binned spectra'},
unit=u.dimensionless_unscaled, idx=0),
Channel('stellar_continua', formats={'string': 'Stellar continua'},
unit=u.dimensionless_unscaled, idx=1),
Channel('em_line_moments', formats={'string': 'Emission line moments'},
unit=u.dimensionless_unscaled, idx=2),
Channel('em_line_models', formats={'string': 'Emission line models'},
unit=u.dimensionless_unscaled, idx=3),
Channel('spectral_indices', formats={'string': 'Spectral indices'},
unit=u.dimensionless_unscaled, idx=4)]
binid_properties = MultiChannelProperty('binid', ivar=False, mask=False,
channels=MPL6_binid_channels,
description='Numerical ID for spatial bins.')
MPL6_maps = [
MultiChannelProperty('spx_skycoo', ivar=False, mask=False,
channels=[Channel('on_sky_x', formats={'string': 'On-sky X'}, idx=0),
Channel('on_sky_y', formats={'string': 'On-sky Y'}, idx=1)],
unit=u.arcsec,
formats={'string': 'Sky coordinates'},
description='Offsets of each spaxel from the galaxy center.'),
MultiChannelProperty('spx_ellcoo', ivar=False, mask=False,
channels=[Channel('elliptical_radius',
formats={'string': 'Elliptical radius'},
idx=0, unit=u.arcsec),
Channel('r_re',
formats={'string': 'R/Reff'},
idx=1),
Channel('elliptical_azimuth',
formats={'string': 'Elliptical azimuth'},
idx=2, unit=u.deg)],
formats={'string': 'Elliptical coordinates'},
description='Elliptical polar coordinates of each spaxel from '
'the galaxy center.'),
Property('spx_mflux', ivar=True, mask=False,
unit=u.erg / u.s / (u.cm ** 2) / spaxel_unit, scale=1e-17,
formats={'string': 'r-band mean flux'},
description='Mean flux in r-band (5600.1-6750.0 ang).'),
Property('spx_snr', ivar=False, mask=False,
formats={'string': 'r-band SNR'},
description='r-band signal-to-noise ratio per pixel.'),
binid_properties,
MultiChannelProperty('bin_lwskycoo', ivar=False, mask=False,
channels=[Channel('lum_weighted_on_sky_x',
formats={'string': 'Light-weighted offset X'},
idx=0, unit=u.arcsec),
Channel('lum_weighted_on_sky_y',
formats={'string': 'Light-weighted offset Y'},
idx=1, unit=u.arcsec)],
description='Light-weighted offset of each bin from the galaxy center.'),
MultiChannelProperty('bin_lwellcoo', ivar=False, mask=False,
channels=[Channel('lum_weighted_elliptical_radius',
formats={'string': 'Light-weighted radial offset'},
idx=0, unit=u.arcsec),
Channel('r_re',
formats={'string': 'R/REff'},
idx=1),
Channel('lum_weighted_elliptical_azimuth',
formats={'string': 'Light-weighted azimuthal offset'},
idx=2, unit=u.deg)],
description='Light-weighted elliptical polar coordinates of each bin '
'from the galaxy center.'),
Property('bin_area', ivar=False, mask=False,
unit=u.arcsec ** 2,
formats={'string': 'Bin area'},
description='Area of each bin.'),
Property('bin_farea', ivar=False, mask=False,
formats={'string': 'Bin fractional area'},
description='Fractional area that the bin covers for the expected bin '
'shape (only relevant for radial binning).'),
Property('bin_mflux', ivar=True, mask=True,
unit=u.erg / u.s / (u.cm ** 2) / spaxel_unit, scale=1e-17,
formats={'string': 'r-band binned spectra mean flux'},
description='Mean flux in the r-band for the binned spectra.'),
Property('bin_snr', ivar=False, mask=False,
formats={'string': 'Bin SNR'},
description='r-band signal-to-noise ratio per pixel in the binned spectra.'),
Property('stellar_vel', ivar=True, mask=True,
unit=u.km / u.s,
formats={'string': 'Stellar velocity'},
description='Stellar velocity relative to NSA redshift.'),
Property('stellar_sigma', ivar=True, mask=True,
unit=u.km / u.s,
formats={'string': 'Stellar velocity dispersion', 'latex': r'Stellar $\sigma$'},
description='Stellar velocity dispersion (must be corrected using '
'STELLAR_SIGMACORR)'),
Property('stellar_sigmacorr', ivar=False, mask=False,
unit=u.km / u.s,
formats={'string': 'Stellar sigma correction',
'latex': r'Stellar $\sigma$ correction'},
description='Quadrature correction for STELLAR_SIGMA to obtain the '
'astrophysical velocity dispersion.)'),
MultiChannelProperty('stellar_cont_fresid', ivar=False, mask=False,
channels=[Channel('68th_percentile',
formats={'string': '68th percentile',
'latex': r'68^{th} percentile'}, idx=0),
Channel('99th_percentile',
formats={'string': '99th percentile',
'latex': r'99^{th} percentile'}, idx=1)],
formats={'string': 'Fractional residual growth'},
description='68%% and 99%% growth of the fractional residuals between '
'the model and data.'),
Property('stellar_cont_rchi2', ivar=False, mask=False,
formats={'string': 'Stellar continuum reduced chi-square',
'latex': r'Stellar\ continuum\ reduced\ \chi^2'},
description='Reduced chi-square of the stellar continuum fit.'),
MultiChannelProperty('emline_sflux', ivar=True, mask=True,
channels=[oiid_channel] + MPL6_emline_channels,
formats={'string': 'Emission line summed flux'},
unit=u.erg / u.s / (u.cm ** 2) / spaxel_unit, scale=1e-17,
binid=binid_properties[3],
description='Non-parametric summed flux for emission lines.'),
MultiChannelProperty('emline_sew', ivar=True, mask=True,
channels=[oiid_channel] + MPL6_emline_channels,
formats={'string': 'Emission line EW'},
unit=u.Angstrom,
binid=binid_properties[3],
description='Emission line non-parametric equivalent '
'widths measurements.'),
MultiChannelProperty('emline_gflux', ivar=True, mask=True,
channels=[oii_channel] + MPL6_emline_channels,
formats={'string': 'Emission line Gaussian flux'},
unit=u.erg / u.s / (u.cm ** 2) / spaxel_unit, scale=1e-17,
binid=binid_properties[3],
description='Gaussian profile integrated flux for emission lines.'),
MultiChannelProperty('emline_gvel', ivar=True, mask=True,
channels=[oii_channel] + MPL6_emline_channels,
formats={'string': 'Emission line Gaussian velocity'},
unit=u.km / u.s,
binid=binid_properties[3],
description='Gaussian profile velocity for emission lines.'),
MultiChannelProperty('emline_gew', ivar=True, mask=True,
channels=[oii_channel] + MPL6_emline_channels,
formats={'string': 'Emission line Gaussian EW'},
unit=u.Angstrom,
binid=binid_properties[3],
description='Gaussian-fitted equivalent widths measurements '
'(based on EMLINE_GFLUX).'),
MultiChannelProperty('emline_gsigma', ivar=True, mask=True,
channels=[oii_channel] + MPL6_emline_channels,
formats={'string': 'Emission line Gaussian sigma',
'latex': r'Emission line Gaussian $\sigma$'},
unit=u.km / u.s,
binid=binid_properties[3],
description='Gaussian profile velocity dispersion for emission lines; '
'must be corrected using EMLINE_INSTSIGMA.'),
MultiChannelProperty('emline_instsigma', ivar=False, mask=False,
channels=[oii_channel] + MPL6_emline_channels,
formats={'string': 'Emission line instrumental sigma',
'latex': r'Emission line instrumental $\sigma$'},
unit=u.km / u.s,
binid=binid_properties[3],
description='Instrumental dispersion at the fitted line center.'),
MultiChannelProperty('emline_tplsigma', ivar=False, mask=False,
channels=[oii_channel] + MPL6_emline_channels,
formats={'string': 'Emission line template instrumental sigma',
'latex': r'Emission line template instrumental $\sigma$'},
unit=u.km / u.s,
binid=binid_properties[3],
description='The dispersion of each emission line used in '
'the template spectra'),
MultiChannelProperty('specindex', ivar=True, mask=True,
channels=MPL6_specindex_channels,
formats={'string': 'Spectral index'},
description='Measurements of spectral indices.'),
MultiChannelProperty('specindex_corr', ivar=False, mask=False,
channels=MPL6_specindex_channels,
formats={'string': 'Spectral index sigma correction',
'latex': r'Spectral index $\sigma$ correction'},
description='Velocity dispersion corrections for the '
'spectral index measurements '
'(can be ignored for D4000, Dn4000).')
]
MPL6_models = [
Model('binned_flux', 'FLUX', 'WAVE', extension_ivar='IVAR',
extension_mask='MASK', unit=u.erg / u.s / (u.cm ** 2) / spaxel_unit,
scale=1e-17, formats={'string': 'Binned flux'},
description='Flux of the binned spectra',
binid=binid_properties[0]),
Model('full_fit', 'MODEL', 'WAVE', extension_ivar=None,
extension_mask='MASK', unit=u.erg / u.s / (u.cm ** 2) / spaxel_unit,
scale=1e-17, formats={'string': 'Best fitting model'},
description='The best fitting model spectra (sum of the fitted '
'continuum and emission-line models)',
binid=binid_properties[0]),
Model('emline_fit', 'EMLINE', 'WAVE', extension_ivar=None,
extension_mask='EMLINE_MASK',
unit=u.erg / u.s / (u.cm ** 2) / spaxel_unit,
scale=1e-17, formats={'string': 'Emission line model spectrum'},
description='The model spectrum with only the emission lines.',
binid=binid_properties[3]),
Model('emline_base_fit', 'EMLINE_BASE', 'WAVE', extension_ivar=None,
extension_mask='EMLINE_MASK',
unit=u.erg / u.s / (u.cm ** 2) / spaxel_unit,
scale=1e-17, formats={'string': 'Emission line baseline fit'},
description='The model of the constant baseline fitted beneath the '
'emission lines.',
binid=binid_properties[3])
]
# MPL-6 DapDataModel goes here
MPL6 = DAPDataModel('2.1.3', aliases=['MPL-6', 'MPL6'],
bintypes=[SPX, HYB10, VOR10, ALL, NRE],
db_only=[SPX, HYB10],
templates=[GAU_MILESHC],
properties=MPL6_maps,
models=MPL6_models,
bitmasks=get_maskbits('MPL-6'),
default_bintype='SPX',
default_template='GAU-MILESHC',
property_table='SpaxelProp6',
default_binid=binid_properties[0],
default_mapmask=['NOCOV', 'UNRELIABLE', 'DONOTUSE'],
qual_flag='DAPQUAL')
|
bsd-3-clause
| 7,902,900,672,459,805,000
| 60.111413
| 98
| 0.516475
| false
| 3.82401
| false
| false
| false
|
Dutchj/pbtweeter
|
pbtweeter/twitter/tweets.py
|
1
|
2532
|
import config as cfg
import random
import speedrun
from datetime import datetime
from seconds import seconds_to_time
def post_tweet(api, lb, cat, p, t):
player_name = p
twitter_handle = speedrun.get_twitter_handle(p)
if twitter_handle is None:
return
if not twitter_handle == '':
player_name = twitter_handle
if t < int(lb[cfg.game][cat]['1']['time']):
return post_wr_tweet(api, cat, player_name, t)
elif t == int(lb[cfg.game][cat]['1']['time']):
return post_tie_tweet(api, cat, player_name, t)
else:
return post_pb_tweet(api, cat, player_name, t)
def post_pb_tweet(api, cat, p, t):
try:
if not cfg.debug:
api.update_status(status=random.choice(cfg.pb_messages).format(game=cfg.game, category=cat, player=p,
time=seconds_to_time(t)))
except Exception, e:
print datetime.now().strftime('[%Y-%m-%d %H:%M:%S]'), e
else:
print datetime.now().strftime('[%Y-%m-%d %H:%M:%S]'), "Tweeted out {player}'s PB ({time}) in {category}".format(
player=p, time=seconds_to_time(t), category=cat)
if cfg.debug:
return False
return True
def post_wr_tweet(api, cat, p, t):
try:
if not cfg.debug:
api.update_status(status=random.choice(cfg.wr_messages).format(game=cfg.game, category=cat, player=p,
time=seconds_to_time(t)))
except Exception, e:
print datetime.now().strftime('[%Y-%m-%d %H:%M:%S]'), e
else:
print datetime.now().strftime('[%Y-%m-%d %H:%M:%S]'), "Tweeted out {player}'s WR ({time}) in {category}".format(
player=p, time=seconds_to_time(t), category=cat)
if cfg.debug:
return False
return True
def post_tie_tweet(api, cat, p, t):
try:
if not cfg.debug:
api.update_status(status=random.choice(cfg.tie_messages).format(game=cfg.game, category=cat, player=p,
time=seconds_to_time(t)))
except Exception, e:
print datetime.now().strftime('[%Y-%m-%d %H:%M:%S]'), e
else:
print datetime.now().strftime('[%Y-%m-%d %H:%M:%S]'), "Tweeted out {player}'s WR tie ({time}) in {category}"\
.format(player=p, time=seconds_to_time(t), category=cat)
if cfg.debug:
return False
return True
|
gpl-2.0
| 7,368,153,244,888,053,000
| 37.363636
| 120
| 0.541074
| false
| 3.531381
| false
| false
| false
|
iSchool-Zambia/django-ischool-oppia
|
oppia/profile/forms.py
|
1
|
18863
|
# oppia/profile/forms.py
import hashlib
import urllib
from django import forms
from django.conf import settings
from django.contrib.auth import (authenticate, login, views)
from django.core.urlresolvers import reverse
from django.core.validators import validate_email
from django.contrib.auth.models import User
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Button, Layout, Fieldset, ButtonHolder, Submit, Div, HTML
class LoginForm(forms.Form):
username = forms.CharField(max_length=30,
error_messages={'required': _(u'Please enter a username.')},)
password = forms.CharField(widget=forms.PasswordInput,
error_messages={'required': _(u'Please enter a password.'),},
required=True)
next = forms.CharField(widget=forms.HiddenInput())
def __init__(self, *args, **kwargs):
super(LoginForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_action = reverse('profile_login')
self.helper.form_class = 'form-horizontal'
self.helper.label_class = 'col-lg-2'
self.helper.field_class = 'col-lg-4'
self.helper.layout = Layout(
'username',
'password',
'next',
Div(
Submit('submit', _(u'Login'), css_class='btn btn-default'),
HTML("""<a class="btn btn-default" href="{% url 'profile_reset' %}">"""+_(u'Forgotten password?') + """</a>"""),
css_class='col-lg-offset-2 col-lg-4',
),
)
def clean(self):
cleaned_data = self.cleaned_data
username = cleaned_data.get("username")
password = cleaned_data.get("password")
user = authenticate(username=username, password=password)
if user is None or not user.is_active:
raise forms.ValidationError( _(u"Invalid username or password. Please try again."))
return cleaned_data
class RegisterForm(forms.Form):
username = forms.CharField(max_length=30,
min_length=4,
error_messages={'required': _(u'Please enter a username.')},)
email = forms.CharField(validators=[validate_email],
error_messages={'invalid': _(u'Please enter a valid e-mail address.'),
'required': _(u'Please enter your e-mail address.')},
required=True)
password = forms.CharField(widget=forms.PasswordInput,
error_messages={'required': _(u'Please enter a password.'),
'min_length': _(u'Your password should be at least 6 characters long.')},
min_length=6,
required=True)
password_again = forms.CharField(widget=forms.PasswordInput,
min_length=6,
error_messages={'required': _(u'Please enter your password again.'),
'min_length': _(u'Your password again should be at least 6 characters long.')},
required=True)
first_name = forms.CharField(max_length=100,
error_messages={'required': _(u'Please enter your first name.'),
'min_length': _(u'Your first name should be at least 2 characters long.')},
min_length=2,
required=True)
last_name = forms.CharField(max_length=100,
error_messages={'required': _(u'Please enter your last name.'),
'min_length': _(u'Your last name should be at least 2 characters long.')},
min_length=2,
required=True)
job_title = forms.CharField(max_length=100,required=True)
organisation = forms.CharField(max_length=100,required=True)
profession = forms.CharField(max_length=100,required=True)
service_entry_date = forms.DateField(
required=True,
error_messages={'required': _('Please enter a valid date'),
'invalid':_('Please enter a valid date')},
)
location = forms.ChoiceField(widget=forms.Select, required=False)
def __init__(self, *args, **kwargs):
super(RegisterForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_action = reverse('profile_register')
self.helper.form_class = 'form-horizontal'
self.helper.label_class = 'col-lg-2'
self.helper.field_class = 'col-lg-4'
self.helper.layout = Layout(
'username',
'email',
'password',
'password_again',
'first_name',
'last_name',
'job_title',
'organisation',
'profession',
'service_entry_date',
'location',
Div(
Submit('submit', _(u'Register'), css_class='btn btn-default'),
css_class='col-lg-offset-2 col-lg-4',
),
)
def clean(self):
cleaned_data = self.cleaned_data
email = cleaned_data.get("email")
password = cleaned_data.get("password")
password_again = cleaned_data.get("password_again")
username = cleaned_data.get("username")
# check the username not already used
num_rows = User.objects.filter(username=username).count()
if num_rows != 0:
raise forms.ValidationError( _(u"Username has already been registered, please select another."))
# check the email address not already used
num_rows = User.objects.filter(email=email).count()
if num_rows != 0:
raise forms.ValidationError( _(u"Email has already been registered"))
# check the password are the same
if password and password_again:
if password != password_again:
raise forms.ValidationError( _(u"Passwords do not match."))
# Always return the full collection of cleaned data.
return cleaned_data
class RegisterFormAPI(forms.Form):
username = forms.CharField(max_length=30,
min_length=4,
error_messages={'required': _(u'Please enter a username.')},)
email = forms.CharField(validators=[validate_email],
error_messages={'invalid': _(u'Please enter a valid e-mail address.'),
'required': _(u'Please enter your e-mail address.')},
required=True)
password = forms.CharField(widget=forms.PasswordInput,
error_messages={'required': _(u'Please enter a password.'),
'min_length': _(u'Your password should be at least 6 characters long.')},
min_length=6,
required=True)
password_again = forms.CharField(widget=forms.PasswordInput,
min_length=6,
error_messages={'required': _(u'Please enter your password again.'),
'min_length': _(u'Your password again should be at least 6 characters long.')},
required=True)
first_name = forms.CharField(max_length=100,
error_messages={'required': _(u'Please enter your first name.'),
'min_length': _(u'Your first name should be at least 2 characters long.')},
min_length=2,
required=True)
last_name = forms.CharField(max_length=100,
error_messages={'required': _(u'Please enter your last name.'),
'min_length': _(u'Your last name should be at least 2 characters long.')},
min_length=2,
required=True)
def __init__(self, *args, **kwargs):
super(RegisterFormAPI, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_action = reverse('profile_register')
self.helper.form_class = 'form-horizontal'
self.helper.label_class = 'col-lg-2'
self.helper.field_class = 'col-lg-4'
self.helper.layout = Layout(
'username',
'email',
'password',
'password_again',
'first_name',
'last_name',
'job_title',
'organisation',
'profession',
'service_entry_date',
'location',
Div(
Submit('submit', _(u'Register'), css_class='btn btn-default'),
css_class='col-lg-offset-2 col-lg-4',
),
)
def clean(self):
cleaned_data = self.cleaned_data
email = cleaned_data.get("email")
password = cleaned_data.get("password")
password_again = cleaned_data.get("password_again")
username = cleaned_data.get("username")
# check the username not already used
num_rows = User.objects.filter(username=username).count()
if num_rows != 0:
raise forms.ValidationError( _(u"Username has already been registered, please select another."))
# check the email address not already used
num_rows = User.objects.filter(email=email).count()
if num_rows != 0:
raise forms.ValidationError( _(u"Email has already been registered"))
# check the password are the same
if password and password_again:
if password != password_again:
raise forms.ValidationError( _(u"Passwords do not match."))
# Always return the full collection of cleaned data.
return cleaned_data
class ResetForm(forms.Form):
username = forms.CharField(max_length=30,
error_messages={'invalid': _(u'Please enter a username or email address.')},
required=True)
def __init__(self, *args, **kwargs):
super(ResetForm, self).__init__(*args, **kwargs)
self.fields['username'].label = "Username or email"
self.helper = FormHelper()
self.helper.form_action = reverse('profile_reset')
self.helper.form_class = 'form-horizontal'
self.helper.label_class = 'col-lg-2'
self.helper.field_class = 'col-lg-4'
self.helper.layout = Layout(
'username',
Div(
Submit('submit', _(u'Reset password'), css_class='btn btn-default'),
css_class='col-lg-offset-2 col-lg-4',
),
)
def clean(self):
cleaned_data = self.cleaned_data
username = cleaned_data.get("username")
try:
user = User.objects.get(username__exact=username)
except User.DoesNotExist:
try:
user = User.objects.get(email__exact=username)
except User.DoesNotExist:
raise forms.ValidationError( _(u"Username/email not found"))
return cleaned_data
class ProfileForm(forms.Form):
api_key = forms.CharField(widget = forms.TextInput(attrs={'readonly':'readonly'}),
required=False, help_text=_(u'You cannot edit the API Key.'))
username = forms.CharField(widget = forms.TextInput(attrs={'readonly':'readonly'}),
required=False, help_text=_(u'You cannot edit the username.'))
email = forms.CharField(validators=[validate_email],
error_messages={'invalid': _(u'Please enter a valid e-mail address.')},
required=True)
password = forms.CharField(widget=forms.PasswordInput,
required=False,
min_length=6,
error_messages={'min_length': _(u'The new password should be at least 6 characters long')},)
password_again = forms.CharField(widget=forms.PasswordInput,
required=False,
min_length=6)
first_name = forms.CharField(max_length=100,
min_length=2,
required=True)
last_name = forms.CharField(max_length=100,
min_length=2,
required=True)
job_title = forms.CharField(max_length=100,required=True)
organisation = forms.CharField(max_length=100,required=True)
profession = forms.CharField(max_length=100,required=True)
service_entry_date = forms.DateField(
required=True,
error_messages={'required': _('Please enter a valid date'),
'invalid':_('Please enter a valid date')},
)
location = forms.ChoiceField(widget=forms.Select, required=False)
def __init__(self, *args, **kwargs):
super(ProfileForm, self).__init__(*args, **kwargs)
if len(args) == 1:
email = args[0]['email']
username = args[0]['username']
else:
kw = kwargs.pop('initial')
email = kw['email']
username = kw['username']
self.helper = FormHelper()
self.helper.form_class = 'form-horizontal'
self.helper.label_class = 'col-lg-2'
self.helper.field_class = 'col-lg-4'
if settings.OPPIA_SHOW_GRAVATARS:
gravatar_url = "https://www.gravatar.com/avatar.php?"
gravatar_url += urllib.urlencode({
'gravatar_id':hashlib.md5(email).hexdigest(),
'size':64
})
self.helper.layout = Layout(
Div(
HTML("""<label class="control-label col-lg-2">"""+_(u'Photo') + """</label>"""),
Div(
HTML(mark_safe('<img src="{0}" alt="gravatar for {1}" class="gravatar" width="{2}" height="{2}"/>'.format(gravatar_url, username, 64))),
HTML("""<br/>"""),
HTML("""<a href="https://www.gravatar.com">"""+_(u'Update gravatar')+"""</a>"""),
css_class="col-lg-4",
),
css_class="form-group",
),
'api_key',
'username',
'email',
'first_name',
'last_name',
'job_title',
'organisation',
'profession',
'service_entry_date',
'location',
Div(
HTML("""<h3>"""+_(u'Change password') + """</h3>"""),
),
'password',
'password_again',
Div(
Submit('submit', _(u'Save'), css_class='btn btn-default'),
css_class='col-lg-offset-2 col-lg-4',
),
)
else:
self.helper.layout = Layout(
'api_key',
'username',
'email',
'first_name',
'last_name',
Div(
HTML("""<h3>"""+_(u'Change password') + """</h3>"""),
),
'password',
'password_again',
Div(
Submit('submit', _(u'Save'), css_class='btn btn-default'),
css_class='col-lg-offset-2 col-lg-4',
),
)
def clean(self):
cleaned_data = self.cleaned_data
# check email not used by anyone else
email = cleaned_data.get("email")
username = cleaned_data.get("username")
num_rows = User.objects.exclude(username__exact=username).filter(email=email).count()
if num_rows != 0:
raise forms.ValidationError( _(u"Email address already in use"))
# if password entered then check they are the same
password = cleaned_data.get("password")
password_again = cleaned_data.get("password_again")
if password and password_again:
if password != password_again:
raise forms.ValidationError( _(u"Passwords do not match."))
return cleaned_data
class UploadProfileForm(forms.Form):
upload_file = forms.FileField(
required=True,
error_messages={'required': _('Please select a file to upload')},)
def __init__(self, *args, **kwargs):
super(UploadProfileForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_action = reverse('profile_upload')
self.helper.form_class = 'form-horizontal'
self.helper.label_class = 'col-lg-2'
self.helper.field_class = 'col-lg-4'
self.helper.layout = Layout(
'upload_file',
Div(
Submit('submit', _(u'Upload'), css_class='btn btn-default'),
css_class='col-lg-offset-2 col-lg-4',
),
)
|
gpl-3.0
| 3,110,669,798,487,709,000
| 47.369231
| 164
| 0.483168
| false
| 4.899481
| false
| false
| false
|
Rav3nPL/p2pool-yac
|
p2pool/networks.py
|
1
|
1811
|
from p2pool.bitcoin import networks
from p2pool.util import math
# CHAIN_LENGTH = number of shares back client keeps
# REAL_CHAIN_LENGTH = maximum number of shares back client uses to compute payout
# REAL_CHAIN_LENGTH must always be <= CHAIN_LENGTH
# REAL_CHAIN_LENGTH must be changed in sync with all other clients
# changes can be done by changing one, then the other
nets = dict(
yacoin=math.Object(
PARENT=networks.nets['yacoin'],
SHARE_PERIOD=10, # seconds
CHAIN_LENGTH=12*60*60//10, # shares
REAL_CHAIN_LENGTH=12*60*60//10, # shares
TARGET_LOOKBEHIND=30, # shares
SPREAD=10, # blocks
IDENTIFIER='c138eee9e7923514'.decode('hex'),
PREFIX='d206c3aaaee749b4'.decode('hex'),
P2P_PORT=8337,
MIN_TARGET=0,
MAX_TARGET=2**256//2**20 - 1,
PERSIST=True,
WORKER_PORT=8336,
BOOTSTRAP_ADDRS='rav3n.dtdns.net 37.59.119.242 95.138.185.176 213.239.207.114 81.17.30.121 46.163.105.201 88.190.223.101'.split(' '),
ANNOUNCE_CHANNEL='#p2pool-alt',
VERSION_CHECK=lambda v: v >= 60004,
),
yacoin_testnet=math.Object(
PARENT=networks.nets['yacoin_testnet'],
SHARE_PERIOD=3, # seconds
CHAIN_LENGTH=20*60//3, # shares
REAL_CHAIN_LENGTH=20*60//3, # shares
TARGET_LOOKBEHIND=200, # shares
SPREAD=12, # blocks
IDENTIFIER='e037d5b8c7923510'.decode('hex'),
PREFIX='7208c1a54ef649b0'.decode('hex'),
P2P_PORT=19777,
MIN_TARGET=0,
MAX_TARGET=2**256//2**20 - 1,
PERSIST=False,
WORKER_PORT=18336,
BOOTSTRAP_ADDRS=' '.split(' '),
ANNOUNCE_CHANNEL='#p2pool-alt',
VERSION_CHECK=lambda v: v >= 60004,
),
)
for net_name, net in nets.iteritems():
net.NAME = net_name
|
gpl-3.0
| 8,911,000,933,144,216,000
| 35.22
| 141
| 0.62286
| false
| 3.008306
| false
| false
| false
|
davidkeegan/dklrt
|
Time.py
|
1
|
2701
|
#!/usr/bin/python
# Time and Date Utilities (dklrt).
# (c) David Keegan 2011-08-06.
import sys, re
from time import *
import datetime
import Misc
ModuleName = __name__
ReDateSep = '[-/]'
ReDate = '\d{4}%s\d{1,2}%s\d{1,2}' % (ReDateSep, ReDateSep)
RePeriod = '(\d+)([ymwdh])'
DateFormat = '%Y-%m-%d'
ReDateTimeSep = "[-/: ]";
DateTimeFormat = '%Y%m%d%H%M%S'
SecPerHour = 60
SecPerDay = 24 * SecPerHour * SecPerHour
def _Throw(Msg): Misc.Throw(Msg, ModuleName)
def DateTimeParse(DateTimeStr):
"""Converts a date(/time) string to seconds since the epoch.
Assumes zeroes for missing time components.
"""
Dts = re.sub(ReDateTimeSep, '', DateTimeStr);
if len(Dts) < 8:
_Throw('Bad Date/Time string: "%s"!' % DateTimeStr)
while len(Dts) < 14: Dts = Dts + "0";
return mktime(strptime(Dts, DateTimeFormat))
def DateToText(Seconds):
# Round seconds to integer first as we're truncating the time
# component.
return strftime(DateFormat, localtime(round(Seconds)))
def DateToday():
return DateTimeParse(DateToText(time()))
def DateAddPeriod(Seconds, Periodstr):
"""Adds the period to the Seconds (a date)."""
Match = re.match(RePeriod, Periodstr)
if not Match: _Throw("Bad Period String: %s!" % Periodstr)
Count = int(Match.group(1))
Unit = Match.group(2)
Rv = Seconds
if Unit == 'y': Rv = DateAddYears(Rv, Count)
elif Unit== 'm': Rv = DateAddMonths(Rv, Count)
elif Unit == 'w': Rv = Rv + (Count * SecPerDay * 7)
elif Unit == 'd': Rv = Rv + (Count * SecPerDay)
elif Unit == 'h': Rv = Rv + (Count * SecPerHour)
else: _Throw('Bad Period Unit: "%s"!' % Unit)
return Rv
def DateAddYears(Seconds, Count):
"""Shifts Seconds (a date) forward by Count years.
If Seconds is Feb 29, shifts to Feb 28, even if shifing to a
leap year.
"""
if not isinstance(Count, (int, long)):
_Throw("Count argument not an int!")
dtd = datetime.date.fromtimestamp(Seconds)
if not Count == 0:
if (dtd.month == 2) and (dtd.day == 29):
dtd = dtd.replace(day=28)
dtd = dtd.replace(year=(dtd.year + Count))
return mktime(dtd.timetuple())
def DateAddMonths(Seconds, Count):
"""Shifts Seconds (a date) forward by Count months.
If the day is >= 29, shifts to 28.
"""
if not isinstance(Count, (int, long)):
_Throw("Count argument not an int!")
dtd = datetime.date.fromtimestamp(Seconds)
if not Count == 0:
if dtd.day >= 29: dtd = dtd.replace(day=28)
Month = (dtd.month + Count) - 1
Years = Month / 12
dtd = dtd.replace(year=(dtd.year + Years))
Month = (Month % 12) + 1
dtd = dtd.replace(month=Month)
return mktime(dtd.timetuple())
|
gpl-3.0
| 7,456,639,810,320,415,000
| 29.693182
| 66
| 0.630507
| false
| 2.987832
| false
| false
| false
|
awni/tensorflow
|
tensorflow/contrib/skflow/python/skflow/ops/dropout_ops.py
|
1
|
1561
|
"""Dropout operations and handling."""
# Copyright 2015-present The Scikit Flow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
# Key to collect dropout probabilities.
DROPOUTS = "dropouts"
def dropout(tensor_in, prob, name=None):
"""Adds dropout node and stores probability tensor into graph collection.
Args:
tensor_in: Input tensor.
prob: Float or Tensor.
Returns:
Tensor of the same shape of `tensor_in`.
Raises:
ValueError: If `keep_prob` is not in `(0, 1]`.
"""
with tf.op_scope([tensor_in], name, "dropout") as name:
if isinstance(prob, float):
prob = tf.get_variable("prob", [],
initializer=tf.constant_initializer(prob),
trainable=False)
tf.add_to_collection(DROPOUTS, prob)
return tf.nn.dropout(tensor_in, prob)
|
apache-2.0
| 6,417,583,460,618,329,000
| 32.934783
| 77
| 0.667521
| false
| 4.065104
| false
| false
| false
|
ryanraaum/african-mtdna
|
popdata_sources/montano2013/process.py
|
1
|
1562
|
from oldowan.mtconvert import seq2sites, sites2seq, str2sites
from string import translate
import pandas as pd
import numpy as np
import sys
sys.path.append('../../scripts')
from utils import *
## load metadata
metadata = pd.read_csv('metadata.csv', index_col=0)
region = range2region(metadata.ix[0,'SeqRange'])
with open('montano2013.csv', 'rU') as f:
f.readline() # skip past header
data = f.readlines()
counts = np.zeros((len(data), 5), dtype=np.int)
hids = []
sites = []
for i in range(len(data)):
x = data[i].strip().split(',')
hids.append(x[0])
sites.append(x[2])
count = x[4:]
for j in range(5):
if count[j] == '':
count[j] = '0'
counts[i,] = [int(y) for y in count]
## Validate
passed_validation = True
for i in range(len(sites)):
curr_sites = str2sites(sites[i])
seq = sites2seq(curr_sites, region)
mysites = seq2sites(seq)
if not mysites == curr_sites:
myseq = translate(sites2seq(mysites, region), None, '-')
if not seq == myseq:
passed_validation = False
print i, hids[i]
if passed_validation:
counter = [0] * 5
with open('processed.csv', 'w') as f:
for i in range(len(sites)):
hid = hids[i]
curr_sites = str2sites(sites[i])
seq = sites2seq(curr_sites, region)
mysites = ' '.join([str(x) for x in seq2sites(seq)])
for j in range(5):
prefix = metadata.ix[j,'NewPrefix']
for k in range(counts[i,j]):
counter[j] += 1
num = str(counter[j]).zfill(3)
newid = prefix + num
f.write('%s,%s,%s\n' % (newid, hid, mysites))
|
cc0-1.0
| -4,966,553,181,960,053,000
| 25.05
| 61
| 0.619718
| false
| 2.799283
| false
| false
| false
|
jeromecn/caravel_viz_full
|
caravel/dataframe.py
|
1
|
3190
|
""" Caravel wrapper around pandas.DataFrame.
TODO(bkyryliuk): add support for the conventions like: *_dim or dim_*
dimensions, *_ts, ts_*, ds_*, *_ds - datetime, etc.
TODO(bkyryliuk): recognize integer encoded enums.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import pandas as pd
import numpy as np
INFER_COL_TYPES_THRESHOLD = 95
INFER_COL_TYPES_SAMPLE_SIZE = 100
class CaravelDataFrame(object):
def __init__(self, df):
self.__df = df.where((pd.notnull(df)), None)
@property
def size(self):
return len(self.__df.index)
@property
def data(self):
return self.__df.to_dict(orient='records')
@property
def columns_dict(self):
"""Provides metadata about columns for data visualization.
:return: dict, with the fields name, type, is_date, is_dim and agg.
"""
if self.__df.empty:
return None
columns = []
sample_size = min(INFER_COL_TYPES_SAMPLE_SIZE, len(self.__df.index))
sample = self.__df
if sample_size:
sample = self.__df.sample(sample_size)
for col in self.__df.dtypes.keys():
column = {
'name': col,
'type': self.__df.dtypes[col].name,
'is_date': is_date(self.__df.dtypes[col]),
'is_dim': is_dimension(self.__df.dtypes[col], col),
}
agg = agg_func(self.__df.dtypes[col], col)
if agg_func:
column['agg'] = agg
if column['type'] == 'object':
# check if encoded datetime
if (datetime_conversion_rate(sample[col]) >
INFER_COL_TYPES_THRESHOLD):
column.update({
'type': 'datetime_string',
'is_date': True,
'is_dim': False,
'agg': None
})
# 'agg' is optional attribute
if not column['agg']:
column.pop('agg', None)
columns.append(column)
return columns
# It will give false positives on the numbers that are stored as strings.
# It is hard to distinguish integer numbers and timestamps
def datetime_conversion_rate(data_series):
success = 0
total = 0
for value in data_series:
total = total + 1
try:
pd.to_datetime(value)
success = success + 1
except Exception:
continue
return 100 * success / total
def is_date(dtype):
if dtype.name:
return dtype.name.startswith('datetime')
def is_dimension(dtype, column_name):
if is_id(column_name):
return False
return dtype.name in ('object', 'bool')
def is_id(column_name):
return column_name.startswith('id') or column_name.endswith('id')
def agg_func(dtype, column_name):
# consider checking for key substring too.
if is_id(column_name):
return 'count_distinct'
if np.issubdtype(dtype, np.number):
return 'sum'
return None
|
apache-2.0
| -5,963,044,960,317,949,000
| 27.482143
| 76
| 0.558621
| false
| 4.007538
| false
| false
| false
|
robertu94/autograder
|
autograder/discover/handin.py
|
1
|
4601
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
This module is part of the Clemson ACM Auto Grader
Copyright (c) 2016, Robert Underwood
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
This module is contains the Clemson Handin interaction
"""
import itertools
import os
import yaml
from autograder.source import clone, update
def clone_metadata(settings):
"""
Clones metadata for the first time
"""
discovery_settings = {
"clone": {
"timeout": None,
"method": "hg"
}
}
clone.clone(discovery_settings, settings['project']['discovery'])
def update_metadata(settings):
"""
Clones metadata for the first time
"""
discovery_settings = {
"update": {
"timeout": None,
"method": "hg"
}
}
update.update(discovery_settings, settings['project']['discovery'])
def discover(settings):
"""
Discovers metadata from a Handin Repository
"""
project_directory = settings['project']['discovery']['directory']
assignment_name = settings['project']['discovery']['assignment']
if not os.path.exists(project_directory):
clone_metadata(settings)
#We are going to unintentionally update all repos when we clone them
#So we need to force an update here.
settings['update']['forced'] = True
else:
update_metadata(settings)
manifest_file = os.path.join(project_directory, "admin/manifest.yaml")
assingment_manifest_file = os.path.join(project_directory, "admin/assignments",
assignment_name + ".yaml")
with open(manifest_file) as infile:
manifest = yaml.load(infile)
students_usernames = set(manifest['students'])
with open(assingment_manifest_file) as infile:
assignment_manifest = yaml.load(infile)
shared_buckets_users = set(itertools.chain(
*[assignment_manifest['buckets'][bucket] for bucket in assignment_manifest['buckets']]))
ungrouped_students = students_usernames - shared_buckets_users
student_objects = []
for student in ungrouped_students:
student_objects.append(student_from_username(settings, student, student))
for bucket in assignment_manifest['buckets']:
needs_grading = True
for student in assignment_manifest['buckets'][bucket]:
if student in student_objects:
raise RuntimeError("Students must be uniquely mapped to a bucket")
student_objects.append(
student_from_username(settings, bucket, student, needs_grading))
needs_grading = False
return student_objects
def student_from_username(settings, bucket_name, username, needs_grading=True):
"""
Format student structures from usernames
"""
directory = settings['project']['discovery']['directory']
assignment = settings['project']['discovery']['assignment']
domain = settings['project']['discovery']['domain']
base_repo = settings['project']['discovery']['repo']
return {
"directory": os.path.join(directory, "assignments", assignment, username),
"email": "{username}@{domain}".format(username=username, domain=domain),
"username": username,
"repo": os.path.join(base_repo, "assignments", assignment, bucket_name),
"needs_grading": needs_grading
}
|
bsd-2-clause
| 7,051,272,176,042,949,000
| 36.104839
| 96
| 0.690285
| false
| 4.484405
| false
| false
| false
|
universalcore/springboard
|
springboard/utils.py
|
1
|
6735
|
import os
import re
from functools import wraps
from urlparse import urlparse
import math
from elasticutils import S
from elasticgit.search import RepoHelper
default_excluded_paths = ['/health/', '/api/notify/']
def is_excluded_path(path, excluded_paths):
excl_paths = config_list(excluded_paths) + default_excluded_paths
return (
path and
any([p for p in excl_paths if path.startswith(p)]))
def parse_repo_name(repo_url):
pr = urlparse(repo_url)
_, _, repo_name_dot_ext = pr.path.rpartition('/')
if any([
repo_name_dot_ext.endswith('.git'),
repo_name_dot_ext.endswith('.json')]):
repo_name, _, _ = repo_name_dot_ext.partition('.')
return repo_name
return repo_name_dot_ext
def is_remote_repo_url(repo_url):
return any([
repo_url.startswith('http://'),
repo_url.startswith('https://')])
def repo_url(repo_dir, repo_location):
# If repo_location is an http URL we leave it as is and
# assume it specifies a unicore.distribute repo endpoint.
# If repo_location is not an http URL, we assume it specifies
# a local repo in repo_dir.
if is_remote_repo_url(repo_location):
return repo_location
return os.path.abspath(os.path.join(repo_dir, repo_location))
def ga_context(context_func):
"""
A decorator for Cornice views that allows one to set extra parameters
for Google Analytics tracking::
@ga_context(lambda context: {'dt': context['category'].title, })
@view_config(route_name='page')
def view(request):
return {
'category': self.workspace.S(Category).filter(title='foo')[0],
}
:param func context_func:
A function which takes one argument, a context dictionary made
available to the template.
:returns:
A dict containing the extra variables for Google Analytics
tracking.
"""
def decorator(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
context = func(self, *args, **kwargs)
self.request.google_analytics.update(context_func(context))
return context
return wrapper
return decorator
def config_list(data):
"""
A function that takes a string of values separated by newline characters
and returns a list of those values
:param func context_func:
A function which takes one argument, a string of values separated by
newline characters
:returns:
A list containing the values separated by newline characters,
stripped of whitespace between the value and newline character
"""
return filter(None, (x.strip() for x in data.splitlines()))
def config_dict(data):
"""
A function that takes a string of pair values, indicated by '=', separated
by newline characters and returns a dict of those value pairs
:param func context_func:
A function which takes one argument, a string of value pairs with
'= between them' separated by newline characters
:returns:
A dict containing the value pairs separated by newline characters
"""
lines = config_list(data)
return dict(re.split('\s*=\s*', value) for value in lines)
class Paginator(object):
"""
A thing that helps us page through result sets
:param iterable results:
The iterable of objects to paginate.
:param int page:
The page number, zero-based.
:param int results_per_page:
The number of objects in each page.
:param int slider_value:
The number of page numbers to display, excluding the current page.
"""
def __init__(self, results, page, results_per_page=10, slider_value=5):
self.results = results
self.page = page
self.results_per_page = results_per_page
self.slider_value = slider_value
self.buffer_value = self.slider_value / 2
def total_count(self):
if isinstance(self.results, S):
return self.results.count()
return len(self.results)
def get_page(self):
return self.results[self.page * self.results_per_page:
(self.page + 1) * self.results_per_page]
def has_next_page(self):
return ((self.page + 1) * self.results_per_page) < self.total_count()
def has_previous_page(self):
return self.page
def total_pages(self):
return int(
math.ceil(
float(self.total_count()) / float(self.results_per_page)))
def page_numbers(self):
if (self.page - self.buffer_value) < 0:
return [page_number
for page_number in range(
0, min([self.slider_value, self.total_pages()]))]
elif (self.page + self.buffer_value) >= self.total_pages():
return [page_number
for page_number in range(
max((self.total_pages() - self.slider_value), 0),
self.total_pages())
]
else:
return range(self.page - self.buffer_value,
self.page + self.buffer_value + 1)
def page_numbers_left(self):
page_numbers = self.page_numbers()
if not any(page_numbers):
return False
return page_numbers[:page_numbers.index(self.page)]
def page_numbers_right(self):
page_numbers = self.page_numbers()
if not any(page_numbers):
return False
return page_numbers[page_numbers.index(self.page) + 1:]
def needs_start_ellipsis(self):
page_numbers = self.page_numbers()
if not any(page_numbers):
return False
return page_numbers[0] > 1
def needs_end_ellipsis(self):
page_numbers = self.page_numbers()
if not any(page_numbers):
return False
return page_numbers[-1] < (self.total_pages() - 2)
def show_start(self):
page_numbers = self.page_numbers()
if not any(page_numbers):
return False
return page_numbers[0] > 0
def show_end(self):
page_numbers = self.page_numbers()
if not any(page_numbers):
return False
return page_numbers[-1] < self.total_pages() - 1
class CachingRepoHelper(RepoHelper):
"""
A subclass of RepoHelper that caches the repo's active
branch name to avoid remote calls to get the repo branch.
"""
def active_branch_name(self):
if not hasattr(self, '_active_branch_name'):
self._active_branch_name = super(
CachingRepoHelper, self).active_branch_name()
return self._active_branch_name
|
bsd-2-clause
| 7,822,912,496,108,433,000
| 30.325581
| 78
| 0.610987
| false
| 4.081818
| false
| false
| false
|
wbond/oscrypto
|
oscrypto/_openssl/_libcrypto_cffi.py
|
1
|
9503
|
# coding: utf-8
from __future__ import unicode_literals, division, absolute_import, print_function
import re
from .. import _backend_config
from .._errors import pretty_message
from .._ffi import get_library, register_ffi
from ..errors import LibraryNotFoundError
from cffi import FFI
__all__ = [
'is_libressl',
'libcrypto',
'libressl_version',
'libressl_version_info',
'version',
'version_info',
]
libcrypto_path = _backend_config().get('libcrypto_path')
if libcrypto_path is None:
libcrypto_path = get_library('crypto', 'libcrypto.dylib', '42')
if not libcrypto_path:
raise LibraryNotFoundError('The library libcrypto could not be found')
try:
vffi = FFI()
vffi.cdef("const char *SSLeay_version(int type);")
version_string = vffi.string(vffi.dlopen(libcrypto_path).SSLeay_version(0)).decode('utf-8')
except (AttributeError):
vffi = FFI()
vffi.cdef("const char *OpenSSL_version(int type);")
version_string = vffi.string(vffi.dlopen(libcrypto_path).OpenSSL_version(0)).decode('utf-8')
is_libressl = 'LibreSSL' in version_string
version_match = re.search('\\b(\\d\\.\\d\\.\\d[a-z]*)\\b', version_string)
if not version_match:
version_match = re.search('(?<=LibreSSL )(\\d\\.\\d(\\.\\d)?)\\b', version_string)
if not version_match:
raise LibraryNotFoundError('Error detecting the version of libcrypto')
version = version_match.group(1)
version_parts = re.sub('(\\d)([a-z]+)', '\\1.\\2', version).split('.')
version_info = tuple(int(part) if part.isdigit() else part for part in version_parts)
# LibreSSL is compatible with libcrypto from OpenSSL 1.0.1
libressl_version = ''
libressl_version_info = tuple()
if is_libressl:
libressl_version = version
libressl_version_info = version_info
version = '1.0.1'
version_info = (1, 0, 1)
ffi = FFI()
libcrypto = ffi.dlopen(libcrypto_path)
register_ffi(libcrypto, ffi)
if version_info < (0, 9, 8):
raise LibraryNotFoundError(pretty_message(
'''
OpenSSL versions older than 0.9.8 are not supported - found version %s
''',
version
))
if version_info < (1, 1):
ffi.cdef("""
void ERR_load_crypto_strings(void);
void ERR_free_strings(void);
""")
# The typedef uintptr_t lines here allow us to check for a NULL pointer,
# without having to redefine the structs in our code. This is kind of a hack,
# but it should cause problems since we treat these as opaque.
ffi.cdef("""
typedef ... EVP_MD;
typedef uintptr_t EVP_CIPHER_CTX;
typedef ... EVP_CIPHER;
typedef ... ENGINE;
typedef uintptr_t EVP_PKEY;
typedef uintptr_t X509;
typedef uintptr_t DH;
typedef uintptr_t RSA;
typedef uintptr_t DSA;
typedef uintptr_t EC_KEY;
typedef ... EVP_MD_CTX;
typedef ... EVP_PKEY_CTX;
typedef ... BN_GENCB;
typedef ... BIGNUM;
unsigned long ERR_get_error(void);
char *ERR_error_string(unsigned long e, char *buf);
unsigned long ERR_peek_error(void);
void OPENSSL_config(const char *config_name);
EVP_CIPHER_CTX *EVP_CIPHER_CTX_new(void);
void EVP_CIPHER_CTX_free(EVP_CIPHER_CTX *ctx);
int EVP_CIPHER_CTX_set_key_length(EVP_CIPHER_CTX *x, int keylen);
int EVP_CIPHER_CTX_set_padding(EVP_CIPHER_CTX *x, int padding);
int EVP_CIPHER_CTX_ctrl(EVP_CIPHER_CTX *ctx, int type, int arg, void *ptr);
const EVP_CIPHER *EVP_aes_128_cbc(void);
const EVP_CIPHER *EVP_aes_192_cbc(void);
const EVP_CIPHER *EVP_aes_256_cbc(void);
const EVP_CIPHER *EVP_des_cbc(void);
const EVP_CIPHER *EVP_des_ede_cbc(void);
const EVP_CIPHER *EVP_des_ede3_cbc(void);
const EVP_CIPHER *EVP_rc4(void);
const EVP_CIPHER *EVP_rc2_cbc(void);
int EVP_EncryptInit_ex(EVP_CIPHER_CTX *ctx, const EVP_CIPHER *cipher,
ENGINE *impl, const char *key,
const char *iv);
int EVP_EncryptUpdate(EVP_CIPHER_CTX *ctx, char *out, int *outl,
const char *in, int inl);
int EVP_EncryptFinal_ex(EVP_CIPHER_CTX *ctx, char *out, int *outl);
int EVP_DecryptInit_ex(EVP_CIPHER_CTX *ctx, const EVP_CIPHER *cipher,
ENGINE *impl, const char *key,
const char *iv);
int EVP_DecryptUpdate(EVP_CIPHER_CTX *ctx, char *out, int *outl,
const char *in, int inl);
int EVP_DecryptFinal_ex(EVP_CIPHER_CTX *ctx, char *out, int *outl);
EVP_PKEY *d2i_AutoPrivateKey(EVP_PKEY **a, const char **pp,
long length);
EVP_PKEY *d2i_PUBKEY(EVP_PKEY **a, const char **pp, long length);
int i2d_PUBKEY(EVP_PKEY *a, char **pp);
void EVP_PKEY_free(EVP_PKEY *key);
X509 *d2i_X509(X509 **px, const char **in, int len);
int i2d_X509(X509 *x, char **out);
EVP_PKEY *X509_get_pubkey(X509 *x);
void X509_free(X509 *a);
int EVP_PKEY_size(EVP_PKEY *pkey);
RSA *EVP_PKEY_get1_RSA(EVP_PKEY *pkey);
void RSA_free(RSA *r);
int RSA_public_encrypt(int flen, const char *from,
char *to, RSA *rsa, int padding);
int RSA_private_encrypt(int flen, const char *from,
char *to, RSA *rsa, int padding);
int RSA_public_decrypt(int flen, const char *from,
char *to, RSA *rsa, int padding);
int RSA_private_decrypt(int flen, const char *from,
char *to, RSA *rsa, int padding);
int EVP_DigestUpdate(EVP_MD_CTX *ctx, const void *d, unsigned int cnt);
const EVP_MD *EVP_md5(void);
const EVP_MD *EVP_sha1(void);
const EVP_MD *EVP_sha224(void);
const EVP_MD *EVP_sha256(void);
const EVP_MD *EVP_sha384(void);
const EVP_MD *EVP_sha512(void);
int PKCS12_key_gen_uni(char *pass, int passlen, char *salt,
int saltlen, int id, int iter, int n,
char *out, const EVP_MD *md_type);
void BN_free(BIGNUM *a);
int BN_dec2bn(BIGNUM **a, const char *str);
DH *DH_new(void);
int DH_generate_parameters_ex(DH *dh, int prime_len, int generator, BN_GENCB *cb);
int i2d_DHparams(const DH *a, char **pp);
void DH_free(DH *dh);
RSA *RSA_new(void);
int RSA_generate_key_ex(RSA *rsa, int bits, BIGNUM *e, BN_GENCB *cb);
int i2d_RSAPublicKey(RSA *a, char **pp);
int i2d_RSAPrivateKey(RSA *a, char **pp);
DSA *DSA_new(void);
int DSA_generate_parameters_ex(DSA *dsa, int bits,
const char *seed, int seed_len, int *counter_ret,
unsigned long *h_ret, BN_GENCB *cb);
int DSA_generate_key(DSA *a);
int i2d_DSA_PUBKEY(const DSA *a, char **pp);
int i2d_DSAPrivateKey(const DSA *a, char **pp);
void DSA_free(DSA *dsa);
EC_KEY *EC_KEY_new_by_curve_name(int nid);
int EC_KEY_generate_key(EC_KEY *key);
void EC_KEY_set_asn1_flag(EC_KEY *, int);
int i2d_ECPrivateKey(EC_KEY *key, char **out);
int i2o_ECPublicKey(EC_KEY *key, char **out);
void EC_KEY_free(EC_KEY *key);
""")
if version_info < (1, 1):
ffi.cdef("""
EVP_MD_CTX *EVP_MD_CTX_create(void);
void EVP_MD_CTX_destroy(EVP_MD_CTX *ctx);
""")
else:
ffi.cdef("""
EVP_MD_CTX *EVP_MD_CTX_new(void);
void EVP_MD_CTX_free(EVP_MD_CTX *ctx);
""")
if version_info < (1,):
ffi.cdef("""
typedef ... *DSA_SIG;
typedef ... *ECDSA_SIG;
DSA_SIG *DSA_do_sign(const char *dgst, int dlen, DSA *dsa);
ECDSA_SIG *ECDSA_do_sign(const char *dgst, int dgst_len, EC_KEY *eckey);
DSA_SIG *d2i_DSA_SIG(DSA_SIG **v, const char **pp, long length);
ECDSA_SIG *d2i_ECDSA_SIG(ECDSA_SIG **v, const char **pp, long len);
int i2d_DSA_SIG(const DSA_SIG *a, char **pp);
int i2d_ECDSA_SIG(const ECDSA_SIG *a, char **pp);
int DSA_do_verify(const char *dgst, int dgst_len, DSA_SIG *sig, DSA *dsa);
int ECDSA_do_verify(const char *dgst, int dgst_len, const ECDSA_SIG *sig, EC_KEY *eckey);
void DSA_SIG_free(DSA_SIG *a);
void ECDSA_SIG_free(ECDSA_SIG *a);
DSA *EVP_PKEY_get1_DSA(EVP_PKEY *pkey);
EC_KEY *EVP_PKEY_get1_EC_KEY(EVP_PKEY *pkey);
int RSA_verify_PKCS1_PSS(RSA *rsa, const char *mHash,
const EVP_MD *Hash, const char *EM,
int sLen);
int RSA_padding_add_PKCS1_PSS(RSA *rsa, char *EM,
const char *mHash, const EVP_MD *Hash,
int sLen);
int EVP_DigestInit_ex(EVP_MD_CTX *ctx, const EVP_MD *type, ENGINE *impl);
int EVP_SignFinal(EVP_MD_CTX *ctx, char *sig, unsigned int *s, EVP_PKEY *pkey);
int EVP_VerifyFinal(EVP_MD_CTX *ctx, char *sigbuf, unsigned int siglen, EVP_PKEY *pkey);
void EVP_MD_CTX_set_flags(EVP_MD_CTX *ctx, int flags);
""")
else:
ffi.cdef("""
int PKCS5_PBKDF2_HMAC(const char *pass, int passlen,
const char *salt, int saltlen, int iter,
const EVP_MD *digest,
int keylen, char *out);
int EVP_DigestSignInit(EVP_MD_CTX *ctx, EVP_PKEY_CTX **pctx, const EVP_MD *type, ENGINE *e, EVP_PKEY *pkey);
int EVP_DigestSignFinal(EVP_MD_CTX *ctx, char *sig, size_t *siglen);
int EVP_DigestVerifyInit(EVP_MD_CTX *ctx, EVP_PKEY_CTX **pctx, const EVP_MD *type, ENGINE *e, EVP_PKEY *pkey);
int EVP_DigestVerifyFinal(EVP_MD_CTX *ctx, const char *sig, size_t siglen);
int EVP_PKEY_CTX_ctrl(EVP_PKEY_CTX *ctx, int keytype, int optype, int cmd, int p1, void *p2);
""")
|
mit
| 1,742,790,982,224,963,300
| 35.55
| 118
| 0.614964
| false
| 2.85033
| false
| false
| false
|
paris-ci/CloudBot
|
plugins/remind.py
|
1
|
5860
|
"""
remind.py
Allows users to add reminders for various tasks.
Created By:
- Pangea <https://github.com/PangeaCake>
- Luke Rogers <https://github.com/lukeroge>
License:
GPL v3
"""
from datetime import datetime
import time
import asyncio
from sqlalchemy import Table, Column, String, DateTime, PrimaryKeyConstraint
from cloudbot import hook
from cloudbot.util import database
from cloudbot.util.timeparse import time_parse
from cloudbot.util.timeformat import format_time, time_since
from cloudbot.util import colors
table = Table(
'reminders',
database.metadata,
Column('network', String(50)),
Column('added_user', String(30)),
Column('added_time', DateTime),
Column('added_chan', String(50)),
Column('message', String(512)),
Column('remind_time', DateTime),
PrimaryKeyConstraint('network', 'added_user', 'added_time')
)
@asyncio.coroutine
def delete_reminder(async, db, network, remind_time, user):
query = table.delete() \
.where(table.c.network == network.lower()) \
.where(table.c.remind_time == remind_time) \
.where(table.c.added_user == user.lower())
yield from async(db.execute, query)
yield from async(db.commit)
@asyncio.coroutine
def delete_all(async, db, network, user):
query = table.delete() \
.where(table.c.network == network.lower()) \
.where(table.c.added_user == user.lower())
yield from async(db.execute, query)
yield from async(db.commit)
@asyncio.coroutine
def add_reminder(async, db, network, added_user, added_chan, message, remind_time, added_time):
query = table.insert().values(
network=network.lower(),
added_user=added_user.lower(),
added_time=added_time,
added_chan=added_chan.lower(),
message=message,
remind_time=remind_time
)
yield from async(db.execute, query)
yield from async(db.commit)
@asyncio.coroutine
@hook.on_start()
def load_cache(async, db):
global reminder_cache
reminder_cache = []
for network, remind_time, added_time, user, message in (yield from async(_load_cache_db, db)):
reminder_cache.append((network, remind_time, added_time, user, message))
def _load_cache_db(db):
query = db.execute(table.select())
return [(row["network"], row["remind_time"], row["added_time"], row["added_user"], row["message"]) for row in query]
@asyncio.coroutine
@hook.periodic(30, initial_interval=30)
def check_reminders(bot, async, db):
current_time = datetime.now()
for reminder in reminder_cache:
network, remind_time, added_time, user, message = reminder
if remind_time <= current_time:
if network not in bot.connections:
# connection is invalid
yield from add_reminder(async, db, network, remind_time, user)
yield from load_cache(async, db)
continue
conn = bot.connections[network]
if not conn.ready:
return
remind_text = colors.parse(time_since(added_time, count=2))
alert = colors.parse("{}, you have a reminder from $(b){}$(clear) ago!".format(user, remind_text))
conn.message(user, alert)
conn.message(user, '"{}"'.format(message))
delta = (remind_time - added_time).seconds
if delta > (30 * 60):
late_time = time_since(remind_time, count=2)
late = "(I'm sorry for delivering this message $(b){}$(clear) late," \
" it seems I was unable to deliver it on time)".format(late_time)
conn.message(user, colors.parse(late))
yield from delete_reminder(async, db, network, remind_time, user)
yield from load_cache(async, db)
@asyncio.coroutine
@hook.command('remind', 'reminder')
def remind(text, nick, chan, db, conn, notice, async):
"""<1 minute, 30 seconds>: <do task> -- reminds you to <do task> in <1 minute, 30 seconds>"""
count = len([x for x in reminder_cache if x[0] == conn.name and x[3] == nick.lower()])
if text == "clear":
if count == 0:
return "You have no reminders to delete."
yield from delete_all(async, db, conn.name, nick)
yield from load_cache(async, db)
return "Deleted all ({}) reminders for {}!".format(count, nick)
# split the input on the first ":"
parts = text.split(":", 1)
if len(parts) == 1:
# user didn't add a message, send them help
notice(remind.__doc__)
return
if count > 10:
return "Sorry, you already have too many reminders queued (10), you will need to wait or " \
"clear your reminders to add any more."
time_string = parts[0].strip()
message = colors.strip_all(parts[1].strip())
# get the current time in both DateTime and Unix Epoch
current_epoch = time.time()
current_time = datetime.fromtimestamp(current_epoch)
# parse the time input, return error if invalid
seconds = time_parse(time_string)
if not seconds:
return "Invalid input."
if seconds > 2764800 or seconds < 60:
return "Sorry, remind input must be more then a minute, and less then one month."
# work out the time to remind the user, and check if that time is in the past
remind_time = datetime.fromtimestamp(current_epoch + seconds)
if remind_time < current_time:
return "I can't remind you in the past!"
# finally, add the reminder and send a confirmation message
yield from add_reminder(async, db, conn.name, nick, chan, message, remind_time, current_time)
yield from load_cache(async, db)
remind_text = format_time(seconds, count=2)
output = "Alright, I'll remind you \"{}\" in $(b){}$(clear)!".format(message, remind_text)
return colors.parse(output)
|
gpl-3.0
| 2,699,591,006,697,351,000
| 32.107345
| 120
| 0.635836
| false
| 3.603936
| false
| false
| false
|
kovidgoyal/kitty
|
docs/installer.py
|
1
|
7947
|
#!/usr/bin/env python3
# vim:fileencoding=utf-8
# License: GPL v3 Copyright: 2018, Kovid Goyal <kovid at kovidgoyal.net>
from __future__ import (
absolute_import, division, print_function, unicode_literals
)
import atexit
import json
import os
import platform
import re
import shlex
import shutil
import subprocess
import sys
import tempfile
py3 = sys.version_info[0] > 2
is64bit = platform.architecture()[0] == '64bit'
is_macos = 'darwin' in sys.platform.lower()
if is_macos:
mac_ver = tuple(map(int, platform.mac_ver()[0].split('.')))
if mac_ver[:2] < (10, 12):
raise SystemExit('Your version of macOS is too old, at least 10.12 is required')
try:
__file__
from_file = True
except NameError:
from_file = False
if py3:
unicode = str
raw_input = input
import urllib.request as urllib
def encode_for_subprocess(x):
return x
else:
from future_builtins import map
import urllib2 as urllib
def encode_for_subprocess(x):
if isinstance(x, unicode):
x = x.encode('utf-8')
return x
def run(*args):
if len(args) == 1:
args = shlex.split(args[0])
args = list(map(encode_for_subprocess, args))
ret = subprocess.Popen(args).wait()
if ret != 0:
raise SystemExit(ret)
class Reporter: # {{{
def __init__(self, fname):
self.fname = fname
self.last_percent = 0
def __call__(self, blocks, block_size, total_size):
percent = (blocks*block_size)/float(total_size)
report = '\rDownloaded {:.1%} '.format(percent)
if percent - self.last_percent > 0.05:
self.last_percent = percent
print(report, end='')
sys.stdout.flush()
# }}}
def get_latest_release_data():
print('Checking for latest release on GitHub...')
req = urllib.Request('https://api.github.com/repos/kovidgoyal/kitty/releases/latest', headers={'Accept': 'application/vnd.github.v3+json'})
try:
res = urllib.urlopen(req).read().decode('utf-8')
except Exception as err:
raise SystemExit('Failed to contact {} with error: {}'.format(req.get_full_url(), err))
data = json.loads(res)
html_url = data['html_url'].replace('/tag/', '/download/').rstrip('/')
for asset in data.get('assets', ()):
name = asset['name']
if is_macos:
if name.endswith('.dmg'):
return html_url + '/' + name, asset['size']
else:
if name.endswith('.txz'):
if is64bit:
if name.endswith('-x86_64.txz'):
return html_url + '/' + name, asset['size']
else:
if name.endswith('-i686.txz'):
return html_url + '/' + name, asset['size']
raise SystemExit('Failed to find the installer package on github')
def do_download(url, size, dest):
print('Will download and install', os.path.basename(dest))
reporter = Reporter(os.path.basename(dest))
# Get content length and check if range is supported
rq = urllib.urlopen(url)
headers = rq.info()
sent_size = int(headers['content-length'])
if sent_size != size:
raise SystemExit('Failed to download from {} Content-Length ({}) != {}'.format(url, sent_size, size))
with open(dest, 'wb') as f:
while f.tell() < size:
raw = rq.read(8192)
if not raw:
break
f.write(raw)
reporter(f.tell(), 1, size)
rq.close()
if os.path.getsize(dest) < size:
raise SystemExit('Download failed, try again later')
print('\rDownloaded {} bytes'.format(os.path.getsize(dest)))
def clean_cache(cache, fname):
for x in os.listdir(cache):
if fname not in x:
os.remove(os.path.join(cache, x))
def download_installer(url, size):
fname = url.rpartition('/')[-1]
tdir = tempfile.gettempdir()
cache = os.path.join(tdir, 'kitty-installer-cache')
if not os.path.exists(cache):
os.makedirs(cache)
clean_cache(cache, fname)
dest = os.path.join(cache, fname)
if os.path.exists(dest) and os.path.getsize(dest) == size:
print('Using previously downloaded', fname)
return dest
if os.path.exists(dest):
os.remove(dest)
do_download(url, size, dest)
return dest
def macos_install(dmg, dest='/Applications', launch=True):
mp = tempfile.mkdtemp()
atexit.register(shutil.rmtree, mp)
run('hdiutil', 'attach', dmg, '-mountpoint', mp)
try:
os.chdir(mp)
app = 'kitty.app'
d = os.path.join(dest, app)
if os.path.exists(d):
shutil.rmtree(d)
dest = os.path.join(dest, app)
run('ditto', '-v', app, dest)
print('Successfully installed kitty into', dest)
if launch:
run('open', dest)
finally:
os.chdir('/')
run('hdiutil', 'detach', mp)
def linux_install(installer, dest=os.path.expanduser('~/.local'), launch=True):
dest = os.path.join(dest, 'kitty.app')
if os.path.exists(dest):
shutil.rmtree(dest)
os.makedirs(dest)
print('Extracting tarball...')
run('tar', '-C', dest, '-xJof', installer)
print('kitty successfully installed to', dest)
kitty = os.path.join(dest, 'bin', 'kitty')
print('Use', kitty, 'to run kitty')
if launch:
run(kitty, '--detach')
def main(dest=None, launch=True, installer=None):
if not dest:
if is_macos:
dest = '/Applications'
else:
dest = os.path.expanduser('~/.local')
machine = os.uname()[4]
if machine and machine.lower().startswith('arm'):
raise SystemExit(
'You are running on an ARM system. The kitty binaries are only'
' available for x86 systems. You will have to build from'
' source.')
if not installer:
url, size = get_latest_release_data()
installer = download_installer(url, size)
else:
installer = os.path.abspath(installer)
if not os.access(installer, os.R_OK):
raise SystemExit('Could not read from: {}'.format(installer))
if is_macos:
macos_install(installer, dest=dest, launch=launch)
else:
linux_install(installer, dest=dest, launch=launch)
def script_launch():
# To test: python3 -c "import runpy; runpy.run_path('installer.py', run_name='script_launch')"
def path(x):
return os.path.expandvars(os.path.expanduser(x))
def to_bool(x):
return x.lower() in {'y', 'yes', '1', 'true'}
type_map = {x: path for x in 'dest installer'.split()}
type_map['launch'] = to_bool
kwargs = {}
for arg in sys.argv[1:]:
if arg:
m = re.match('([a-z_]+)=(.+)', arg)
if m is None:
raise SystemExit('Unrecognized command line argument: ' + arg)
k = m.group(1)
if k not in type_map:
raise SystemExit('Unrecognized command line argument: ' + arg)
kwargs[k] = type_map[k](m.group(2))
main(**kwargs)
def update_intaller_wrapper():
# To run: python3 -c "import runpy; runpy.run_path('installer.py', run_name='update_wrapper')" installer.sh
with open(__file__, 'rb') as f:
src = f.read().decode('utf-8')
wrapper = sys.argv[-1]
with open(wrapper, 'r+b') as f:
raw = f.read().decode('utf-8')
nraw = re.sub(r'^# HEREDOC_START.+^# HEREDOC_END', lambda m: '# HEREDOC_START\n{}\n# HEREDOC_END'.format(src), raw, flags=re.MULTILINE | re.DOTALL)
if 'update_intaller_wrapper()' not in nraw:
raise SystemExit('regex substitute of HEREDOC failed')
f.seek(0), f.truncate()
f.write(nraw.encode('utf-8'))
if __name__ == '__main__' and from_file:
main()
elif __name__ == 'update_wrapper':
update_intaller_wrapper()
elif __name__ == 'script_launch':
script_launch()
|
gpl-3.0
| -6,564,165,642,314,307,000
| 30.915663
| 155
| 0.587014
| false
| 3.517928
| false
| false
| false
|
rudatalab/python-objectcube
|
api/api/__init__.py
|
1
|
1147
|
from flask import Flask, jsonify, render_template
from flask_restful import Api
from resource.concept import ConceptResource, ConceptResourceByID
from resource.tag import TagResource, TagResourceByID, TagResourceByValue
from resource.object import ObjectResource, ObjectResourceByID
from resource.blob import BlobResourceByURI
from resource.meta import get_all_meta
app = Flask(__name__)
api = Api(app)
# Concept API
api.add_resource(ConceptResource, '/api/concepts')
api.add_resource(ConceptResourceByID, '/api/concepts/<int:id_>')
# Tag API
api.add_resource(TagResource, '/api/tags')
api.add_resource(TagResourceByID, '/api/tags/<int:id_>')
api.add_resource(TagResourceByValue, '/api/tags/values')
# Object API
api.add_resource(ObjectResource, '/api/objects')
api.add_resource(ObjectResourceByID, '/api/objects/<int:id_>')
# Blob API
api.add_resource(BlobResourceByURI, '/api/blobs/uri/<string:digest>')
@app.route('/api/description')
def api_client():
f = get_all_meta()
return jsonify(**f)
@app.route('/api')
def index():
return render_template('api.html')
if __name__ == '__main__':
app.run(debug=True, port=4000)
|
bsd-2-clause
| -2,096,492,035,668,442,600
| 26.309524
| 73
| 0.744551
| false
| 3.22191
| false
| false
| false
|
Skolopedrion/Theria
|
src/animation/animation.py
|
1
|
1370
|
#!/usr/bin/env python3
# coding: utf-8
import os
import glob
import sfml as sf
class Animation:
"""
An animated texture.
"""
def __init__(self, frames, interval=0):
"""
:param frames: Iterable of sf.Texture objects
:param interval: Time between two frames (default: 0.0s)
"""
self.frames = frames
self.interval = interval
self.index = 0
self.time = 0
@classmethod
def load_from_dir(cls, path, interval=None):
"""
Load an animation from a directory. Directory must contain some image
files named by their index (e.g. "1.png", "2.png", etc...)
:param path: str object, path to the directory to load
:param interval: Time between two frames
:return: Animation
"""
if path[-1] not in (os.sep, '/'):
path += os.sep
frames = list()
for frame_path in glob.iglob(path + '[0-9].png'):
frame = sf.Texture.from_file(frame_path)
frames.append(frame)
if interval is None:
return cls(frames)
else:
return cls(frames, interval)
def get_frame(self, dt):
"""
Returns the texture of the entity.
:param dt: The time between the current and the previous frame.
:return: A sf.Texture instance
"""
self.time += dt
if self.time > self.interval:
self.time = 0
self.index += 1
self.index %= len(self.frames)
return self.frames[self.index]
def reset(self):
self.time = 0
self.index = 0
|
mit
| -2,537,892,201,827,231,000
| 18.571429
| 71
| 0.656204
| false
| 3.017621
| false
| false
| false
|
gdanezis/rousseau-chain
|
rousseau-package/attic/chain.py
|
1
|
3538
|
# Make a hash chain with O(1) update and O(log(N)) proof of membership
from hashlib import sha256 as H
from struct import pack
# Some constants
# The initial value of any chain
# https://en.wikipedia.org/wiki/Om
initialH = H("Om").digest()
def pointFingers(seqLen):
""" Returns the indexes for a particular sequence ID """
seq = 1
while seq <= seqLen:
yield seqLen - seq
seq = seq * 2
class chain(object):
def __init__(self, entries=None, nodes=None):
""" Create a new chain object """
# This holds the actual log entries
# it is a sequnence of byte arrays
self.entries = []
if entries is not None:
self.entries = entries
# The list of 'nodes' holding hashes of the current entry,
# and a sequence of previous node hashes.
self.nodes = []
if nodes is not None:
self.nodes = nodes
def head(self):
""" Return the head of the chain """
if self.nodes == []:
return initialH
else:
return self.nodes[-1]
def add(self, entry):
""" Add an entry at the end of the chain. Returns the index of the new entry. """
# Create the new node head:
entryH = H(entry).digest()
nodeDigest = H(pack("L", len(self.entries)))
nodeDigest.update(entryH)
# Gather which other nodes are to be included:
for i in pointFingers(len(self.entries)):
nodeDigest.update(self.nodes[i])
nodeH = nodeDigest.digest()
self.entries.append(entryH)
self.nodes.append(nodeH)
return len(self.entries) - 1
def evidence(self, seq):
""" Gather evidence that the entry is at a sequence number in the chain. """
entries = {}
nodes = {}
# Add evidence as required
target = len(self.entries) - 1
while seq not in entries:
# Store the entry for the current target
entries[target] = self.entries[target]
nodes[target] = self.nodes[target]
# Store the nodes on which we depend
for i in pointFingers(target):
nodes[i] = self.nodes[i]
if i >= seq:
target = i
# Return all necessary entries and nodes
return entries, nodes
def check_evidence(head, seq, evidence, entry=None, node=None):
""" Check that a bundle of evidence is correct, and correspond to,
a known head, and optionally a known entry and known node. Returns
True or raises an exception. """
entries, nodes = evidence
head_index = max(entries.keys())
# CHECK 1: the head equals the head
if not (head == nodes[head_index]):
raise Exception("Wrong Head")
# CHECK 2: all the hashes match
target = head_index
while target != seq:
new_target = target
# Make the digest
d = H(pack("L", target))
d.update(entries[target])
for i in pointFingers(target):
d.update(nodes[i])
if i >= seq:
new_target = i
if d.digest() != nodes[target]:
raise Exception("Broken Chain")
target = new_target
# CHECK 3: is the node correct?
if node:
if not (node == nodes[seq]):
raise Exception("Wrong end node")
# CHECK 4: is the actual entry correct?
if entry:
if not (H(entry).digest() == entries[seq]):
raise Exception("Wrong end entry")
return True
|
bsd-2-clause
| 8,261,124,379,964,596,000
| 27.079365
| 89
| 0.574901
| false
| 4.128355
| false
| false
| false
|
MeadowHillSoftware/Nativity-in-Bits
|
NiB.py
|
1
|
56126
|
# Nativity in Bits 0.1.5
# Copyright 2008, 2009 Meadow Hill Software
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
from random import randrange
character = {}
def diceRoll(number, die):
rolls = []
num = 0
die += 1
while num < number:
roll = randrange(1, die)
rolls.append(roll)
num += 1
result = 0
for result in rolls:
result += result
return result
def neutrality(morals):
global character
if morals == "Neutral":
character["Alignment"] = "True " + morals
else:
character["Alignment"] = "Neutral " + morals
def humanCommunity():
number = randrange(1, 101)
global character
if number < 6:
character["Community"] = "Small Tribe"
elif number < 11:
character["Community"] = "Religious, Arcane, Monastic, or Military Compound"
elif number < 21:
character["Community"] = "Frontier Homestead"
elif number < 36:
character["Community"] = "Thorp"
elif number < 56:
character["Community"] = "Hamlet"
elif number < 76:
character["Community"] = "Village"
elif number < 81:
character["Community"] = "Small Town"
elif number < 86:
character["Community"] = "Large Town"
elif number < 91:
character["Community"] = "Small City"
elif number < 96:
character["Community"] = "Large City"
else:
character["Community"] = "Metropolis"
def dwarvenCommunity():
number = randrange(1, 91)
global character
if number < 11:
character["Community"] = "Single-Family Redoubt"
elif number < 21:
character["Community"] = "Prospecting Camp"
elif number < 31:
character["Community"] = "Small Mine"
elif number < 46:
character["Community"] = "Large Mine"
elif number < 66:
character["Community"] = "Delve"
else:
character["Community"] = "Large Delve"
def elvenCommunity():
number = randrange(1, 96)
global character
if number < 51:
character["Community"] = "Encampment"
elif number < 86:
character["Community"] = "Village"
else:
character["Community"] = "City"
def ethics(morals):
global character
number = randrange(1, 7)
if number < 3:
character["Alignment"] = "Lawful " + morals
elif number < 5:
neutrality(morals)
else:
character["Alignment"] = "Chaotic " + morals
def nonlawfulEthics(morals):
global character
number = randrange(1, 5)
if number < 3:
character["Alignment"] = "Chaotic " + morals
else:
neutrality(morals)
def dwarvenEthics(morals):
global character
number = randrange(1, 97)
if number < 66:
character["Alignment"] = "Lawful " + morals
elif number < 86:
neutrality(morals)
else:
character["Alignment"] = "Chaotic " + morals
def nonlawfulDwarf(morals):
global character
number = randrange(1, 37)
if number < 26:
neutrality(morals)
else:
character["Alignment"] = "Chaotic " + morals
def elvenEthics(morals):
global character
number = randrange(1, 97)
if number < 66:
character["Alignment"] = "Chaotic " + morals
elif number < 86:
neutrality(morals)
else:
character["Alignment"] = "Lawful " + morals
def nonlawfulElf(morals):
global character
number = randrange(1, 86)
if number < 66:
character["Alignment"] = "Chaotic " + morals
else:
neutrality(morals)
def hinEthics(morals):
global character
number = randrange(1, 101)
if number < 61:
neutrality(morals)
elif number < 81:
character["Alignment"] = "Chaotic " + morals
else:
character["Alignment"] = "Lawful " + morals
def nonlawfulHin(morals):
global character
number = randrange(1, 81)
if number < 61:
neutrality(morals)
else:
character["Alignment"] = "Chaotic " + morals
def specialist():
global character
align = character["Alignment"]
number = randrange(1, 101)
if align == "Lawful Good":
if number < 52:
character["Class"] = "Abjurer"
elif number < 54:
character["Class"] = "Conjurer"
elif number < 69:
character["Class"] = "Diviner"
elif number < 73:
character["Class"] = "Enchanter"
elif number < 85:
character["Class"] = "Evoker"
elif number < 89:
character["Class"] = "Illusionist"
elif number < 97:
character["Class"] = "Necromancer"
else:
character["Class"] = "Transmuter"
elif align == "Lawful Neutral":
if number < 18:
character["Class"] = "Abjurer"
elif number < 23:
character["Class"] = "Conjurer"
elif number < 71:
character["Class"] = "Diviner"
elif number < 75:
character["Class"] = "Enchanter"
elif number < 89:
character["Class"] = "Evoker"
elif number < 93:
character["Class"] = "Illusionist"
elif number < 97:
character["Class"] = "Necromancer"
else:
character["Class"] = "Transmuter"
elif align == "Lawful Evil":
if number < 12:
character["Class"] = "Abjurer"
elif number < 18:
character["Class"] = "Conjurer"
elif number < 38:
character["Class"] = "Diviner"
elif number < 43:
character["Class"] = "Enchanter"
elif number < 59:
character["Class"] = "Evoker"
elif number < 64:
character["Class"] = "Illusionist"
elif number < 96:
character["Class"] = "Necromancer"
else:
character["Class"] = "Transmuter"
elif align == "Neutral Good":
if number < 24:
character["Class"] = "Abjurer"
elif number < 31:
character["Class"] = "Conjurer"
elif number < 38:
character["Class"] = "Diviner"
elif number < 49:
character["Class"] = "Enchanter"
elif number < 67:
character["Class"] = "Evoker"
elif number < 78:
character["Class"] = "Illusionist"
elif number < 90:
character["Class"] = "Necromancer"
else:
character["Class"] = "Transmuter"
elif align == "True Neutral":
if number < 8:
character["Class"] = "Abjurer"
elif number < 22:
character["Class"] = "Conjurer"
elif number < 42:
character["Class"] = "Diviner"
elif number < 54:
character["Class"] = "Enchanter"
elif number < 73:
character["Class"] = "Evoker"
elif number < 84:
character["Class"] = "Illusionist"
elif number < 90:
character["Class"] = "Necromancer"
else:
character["Class"] = "Transmuter"
elif align == "Neutral Evil":
if number < 4:
character["Class"] = "Abjurer"
elif number < 16:
character["Class"] = "Conjurer"
elif number < 22:
character["Class"] = "Diviner"
elif number < 32:
character["Class"] = "Enchanter"
elif number < 48:
character["Class"] = "Evoker"
elif number < 58:
character["Class"] = "Illusionist"
elif number < 91:
character["Class"] = "Necromancer"
else:
character["Class"] = "Transmuter"
elif align == "Chaotic Good":
if number < 8:
character["Class"] = "Abjurer"
elif number < 20:
character["Class"] = "Conjurer"
elif number < 22:
character["Class"] = "Diviner"
elif number < 43:
character["Class"] = "Enchanter"
elif number < 53:
character["Class"] = "Evoker"
elif number < 74:
character["Class"] = "Illusionist"
elif number < 80:
character["Class"] = "Necromancer"
else:
character["Class"] = "Transmuter"
elif align == "Chaotic Neutral":
if number < 3:
character["Class"] = "Abjurer"
elif number < 26:
character["Class"] = "Conjurer"
elif number < 32:
character["Class"] = "Diviner"
elif number < 51:
character["Class"] = "Enchanter"
elif number < 60:
character["Class"] = "Evoker"
elif number < 79:
character["Class"] = "Illusionist"
elif number < 82:
character["Class"] = "Necromancer"
else:
character["Class"] = "Transmuter"
else:
if number < 2:
character["Class"] = "Abjurer"
elif number < 23:
character["Class"] = "Conjurer"
elif number < 25:
character["Class"] = "Diviner"
elif number < 42:
character["Class"] = "Enchanter"
elif number < 50:
character["Class"] = "Evoker"
elif number < 67:
character["Class"] = "Illusionist"
elif number < 84:
character["Class"] = "Necromancer"
else:
character["Class"] = "Transmuter"
def write_file():
stats = file("adventurer.txt", "w")
stats.write("Generated by Nativity in Bits 0.1.5\nSee the Hero Builder's Guidebook (pg. 38) and Player's Handbook II (pg. 136) for more information about some of these terms.\n\nAdventurer Statistics\n")
stats.write("-----------------------------------\n")
stats.write("Class = " + character["Class"] + "\n")
stats.write("Race = " + character["Race"] + "\n")
stats.write("Alignment = " + character["Alignment"] + "\n")
stats.write("Age = " + character["Age"] + "\n")
stats.write("Gender = " + character["Gender"] + "\n")
stats.write("Height = " + character["Height"] + "\n")
stats.write("Temperature Zone = " + character["Temperature Zone"] + "\n")
stats.write("Terrain = " + character["Terrain"] + "\n")
stats.write("Community = " + character["Community"] + "\n")
stats.write("Family Economic Status = " + character["Family Economic Status"] + "\n")
stats.write("Family Social Standing = " + character["Family Social Standing"] + "\n")
stats.write("Family Defense Readiness = " + character["Family Defense Readiness"] + "\n")
stats.write("Family Private Ethics = " + character["Family Private Ethics"] + "\n")
stats.write("Family Public Ethics = " + character["Family Public Ethics"] + "\n")
stats.write("Family Religious Commitment = " + character["Family Religious Commitment"] + "\n")
stats.write("Family Reputation = " + character["Family Reputation"] + "\n")
stats.write("Family Political Views = " + character["Family Political Views"] + "\n")
stats.write("Family Power Structure = " + character["Family Power Structure"] + "\n")
stats.write("Ancestors of Note = " + character["Ancestors of Note"] + "\n")
stats.write("Early Childhood Instruction = " + character["Early Childhood Instruction"] + "\n")
stats.write("Formal Education = " + character["Formal Education"] + "\n")
stats.write("Learning a Trade = " + character["Learning a Trade"] + "\n")
stats.write("Early Childhood Events = " + character["Early Childhood Events"] + "\n")
stats.write("Youth Events = " + character["Youth Events"] + "\n")
stats.write("Pivotal Events = " + character["Pivotal Events"] + "\n")
stats.write("Parents = " + character["Parents"] + "\n")
stats.write("Siblings = " + character["Siblings"] + "\n")
stats.write("Grandparents = " + character["Grandparents"] + "\n")
stats.write("Extended Family = " + character["Extended Family"] + "\n")
stats.write("Friends = " + character["Friends"] + "\n")
stats.write("Enemies = " + character["Enemies"] + "\n")
stats.write("Instructors = " + character["Instructors"] + "\n")
stats.write("Personality Archetype = " + character["Archetype"] + "\n")
stats.write("Personality Traits = " + character["Traits"] + "\n")
stats.close()
number = randrange(1, 101)
if number < 51:
character["Alignment"] = "Good"
elif number < 91:
character["Alignment"] = "Neutral"
else:
character["Alignment"] = "Evil"
number = randrange(1, 101)
if character["Alignment"] == "Good":
if number < 6:
character["Class"] = "Barbarian"
elif number < 11:
character["Class"] = "Bard"
elif number < 31:
character["Class"] = "Cleric"
elif number < 36:
character["Class"] = "Druid"
elif number < 46:
character["Class"] = "Fighter"
elif number < 51:
character["Class"] = "Monk"
elif number < 56:
character["Class"] = "Paladin"
elif number < 66:
character["Class"] = "Ranger"
elif number < 76:
character["Class"] = "Rogue"
elif number < 81:
character["Class"] = "Sorcerer"
else:
character["Class"] = "Wizard"
elif character["Alignment"] == "Neutral":
if number < 6:
character["Class"] = "Barbarian"
elif number < 11:
character["Class"] = "Bard"
elif number < 16:
character["Class"] = "Cleric"
elif number < 26:
character["Class"] = "Druid"
elif number < 46:
character["Class"] = "Fighter"
elif number < 51:
character["Class"] = "Monk"
elif number < 56:
character["Class"] = "Ranger"
elif number < 76:
character["Class"] = "Rogue"
elif number < 81:
character["Class"] = "Sorcerer"
else:
character["Class"] = "Wizard"
else:
if number < 11:
character["Class"] = "Barbarian"
elif number < 16:
character["Class"] = "Bard"
elif number < 36:
character["Class"] = "Cleric"
elif number < 41:
character["Class"] = "Druid"
elif number < 51:
character["Class"] = "Fighter"
elif number < 56:
character["Class"] = "Monk"
elif number < 61:
character["Class"] = "Ranger"
elif number < 81:
character["Class"] = "Rogue"
elif number < 86:
character["Class"] = "Sorcerer"
else:
character["Class"] = "Wizard"
if character["Alignment"] == "Good":
if character["Class"] == "Barbarian":
#table figures multiplied by 75. Assuming one-third of 1% of good barbarians are gnomes, this yields 25 good gnome barbarians.
number = randrange(1, 7376)
if number < 151:
character["Race"] = "Dwarf"
elif number < 2551:
character["Race"] = "Elf"
elif number < 2576:
character["Race"] = "Gnome"
elif number < 2651:
character["Race"] = "Half-Elf"
elif number < 2726:
character["Race"] = "Halfling"
elif number < 4601:
character["Race"] = "Half-Orc"
else:
character["Race"] = "Human"
elif character["Class"] == "Bard":
#table figures multiplied by 3. This yields 18 good gnome bards.
number = randrange(1, 319)
if number < 16:
character["Race"] = "Dwarf"
elif number < 112:
character["Race"] = "Elf"
elif number < 130:
character["Race"] = "Gnome"
elif number < 157:
character["Race"] = "Half-Elf"
elif number < 166:
character["Race"] = "Halfling"
elif number < 169:
character["Race"] = "Half-Orc"
else:
character["Race"] = "Human"
elif character["Class"] == "Cleric":
#table figures multiplied by 5. This yields 50 good gnome clerics.
number = randrange(1, 471)
if number < 116:
character["Race"] = "Dwarf"
elif number < 201:
character["Race"] = "Elf"
elif number < 251:
character["Race"] = "Gnome"
elif number < 276:
character["Race"] = "Half-Elf"
elif number < 341:
character["Race"] = "Halfling"
elif number < 346:
character["Race"] = "Half-Orc"
else:
character["Race"] = "Human"
elif character["Class"] == "Druid":
#table figures multiplied by 36. Assuming one-third of 1% of good druids are dwarves, this yields 12 good dwarf druids.
number = randrange(1, 3577)
if number < 13:
character["Race"] = "Dwarf"
elif number < 1129:
character["Race"] = "Elf"
elif number < 1345:
character["Race"] = "Gnome"
elif number < 1669:
character["Race"] = "Half-Elf"
elif number < 1741:
character["Race"] = "Halfling"
elif number < 1777:
character["Race"] = "Half-Orc"
else:
character["Race"] = "Human"
elif character["Class"] == "Fighter":
#table figures multiplied by 25. This yields 25 good gnome fighters.
number = randrange(1, 2426)
if number < 1026:
character["Race"] = "Dwarf"
elif number < 1176:
character["Race"] = "Elf"
elif number < 1201:
character["Race"] = "Gnome"
elif number < 1251:
character["Race"] = "Half-Elf"
elif number < 1301:
character["Race"] = "Halfling"
elif number < 1426:
character["Race"] = "Half-Orc"
else:
character["Race"] = "Human"
elif character["Class"] == "Monk":
#table figures multiplied by 75. Assuming one-third of 1% of good monks are gnomes, this yields 25 good gnome monks.
number = randrange(1, 7151)
if number < 76:
character["Race"] = "Dwarf"
elif number < 826:
character["Race"] = "Elf"
elif number < 851:
character["Race"] = "Gnome"
elif number < 1226:
character["Race"] = "Half-Elf"
elif number < 1376:
character["Race"] = "Halfling"
elif number < 1751:
character["Race"] = "Half-Orc"
else:
character["Race"] = "Human"
elif character["Class"] == "Paladin":
#table figures multiplied by 3. Assuming one-third of 1% of paladins are elves, this yields 1 elf paladin.
number = randrange(1, 263)
if number < 34:
character["Race"] = "Dwarf"
elif number < 35:
character["Race"] = "Elf"
elif number < 38:
character["Race"] = "Gnome"
elif number < 53:
character["Race"] = "Half-Elf"
elif number < 59:
character["Race"] = "Halfling"
elif number < 62:
character["Race"] = "Half-Orc"
else:
character["Race"] = "Human"
elif character["Class"] == "Ranger":
#table figures multiplied by 9. This yields 45 good dwarf rangers.
number = randrange(1, 874)
if number < 46:
character["Race"] = "Dwarf"
elif number < 325:
character["Race"] = "Elf"
elif number < 379:
character["Race"] = "Gnome"
elif number < 514:
character["Race"] = "Half-Elf"
elif number < 532:
character["Race"] = "Halfling"
elif number < 577:
character["Race"] = "Half-Orc"
else:
character["Race"] = "Human"
elif character["Class"] == "Rogue":
#table figures multiplied by 5. This yields 30 good gnome rogues.
number = randrange(1, 481)
if number < 31:
character["Race"] = "Dwarf"
elif number < 96:
character["Race"] = "Elf"
elif number < 126:
character["Race"] = "Gnome"
elif number < 176:
character["Race"] = "Half-Elf"
elif number < 361:
character["Race"] = "Halfling"
elif number < 386:
character["Race"] = "Half-Orc"
else:
character["Race"] = "Human"
elif character["Class"] == "Sorcerer":
#table figures multiplied by 9. This yields 36 good dwarf sorcerers.
number = randrange(1, 838)
if number < 37:
character["Race"] = "Dwarf"
elif number < 316:
character["Race"] = "Elf"
elif number < 343:
character["Race"] = "Gnome"
elif number < 388:
character["Race"] = "Half-Elf"
elif number < 487:
character["Race"] = "Halfling"
elif number < 505:
character["Race"] = "Half-Orc"
else:
character["Race"] = "Human"
elif character["Class"] == "Wizard":
#table figures multiplied by 12. This yields 12 good dwarf wizards.
number = randrange(1, 1141)
if number < 13:
character["Race"] = "Dwarf"
elif number < 493:
character["Race"] = "Elf"
elif number < 565:
character["Race"] = "Gnome"
elif number < 685:
character["Race"] = "Half-Elf"
elif number < 793:
character["Race"] = "Halfling"
elif number < 805:
character["Race"] = "Half-Orc"
else:
character["Race"] = "Human"
elif character["Alignment"] == "Neutral":
if character["Class"] == "Barbarian":
#gnomes drop by a factor of 5. This yields 5 neutral gnome barbarians.
number = randrange(1, 6531)
if number < 151:
character["Race"] = "Dwarf"
elif number < 1051:
character["Race"] = "Elf"
elif number < 1056:
character["Race"] = "Gnome"
elif number < 1206:
character["Race"] = "Half-Elf"
elif number < 1431:
character["Race"] = "Halfling"
elif number < 4356:
character["Race"] = "Half-Orc"
else:
character["Race"] = "Human"
elif character["Class"] == "Bard":
#gnomes drop by a factor of 3. This yields 6 neutral gnome bards.
number = randrange(1, 268)
if number < 10:
character["Race"] = "Dwarf"
elif number < 64:
character["Race"] = "Elf"
elif number < 70:
character["Race"] = "Gnome"
elif number < 100:
character["Race"] = "Half-Elf"
elif number < 115:
character["Race"] = "Halfling"
elif number < 121:
character["Race"] = "Half-Orc"
else:
character["Race"] = "Human"
elif character["Class"] == "Cleric":
#gnomes drop by a factor of 10. This yields 5 neutral gnome clerics.
number = randrange(1, 451)
if number < 131:
character["Race"] = "Dwarf"
elif number < 191:
character["Race"] = "Elf"
elif number < 196:
character["Race"] = "Gnome"
elif number < 241:
character["Race"] = "Half-Elf"
elif number < 301:
character["Race"] = "Halfling"
elif number < 311:
character["Race"] = "Half-Orc"
else:
character["Race"] = "Human"
elif character["Class"] == "Druid":
#dwarves drop by one-third. This yields 8 neutral dwarf druids.
number = randrange(1, 3177)
if number < 9:
character["Race"] = "Dwarf"
elif number < 1125:
character["Race"] = "Elf"
elif number < 1161:
character["Race"] = "Gnome"
elif number < 1341:
character["Race"] = "Half-Elf"
elif number < 1413:
character["Race"] = "Halfling"
elif number < 1449:
character["Race"] = "Half-Orc"
else:
character["Race"] = "Human"
elif character["Class"] == "Fighter":
#gnomes drop by a factor of 5. This yields 5 neutral gnome fighters.
number = randrange(1, 2406)
if number < 851:
character["Race"] = "Dwarf"
elif number < 1026:
character["Race"] = "Elf"
elif number < 1031:
character["Race"] = "Gnome"
elif number < 1156:
character["Race"] = "Half-Elf"
elif number < 1206:
character["Race"] = "Halfling"
elif number < 1456:
character["Race"] = "Half-Orc"
else:
character["Race"] = "Human"
elif character["Class"] == "Monk":
#gnomes drop by a factor of 5. This yields 5 neutral gnome monks.
number = randrange(1, 7556)
if number < 51:
character["Race"] = "Dwarf"
elif number < 276:
character["Race"] = "Elf"
elif number < 281:
character["Race"] = "Gnome"
elif number < 1031:
character["Race"] = "Half-Elf"
elif number < 1181:
character["Race"] = "Halfling"
elif number < 1931:
character["Race"] = "Half-Orc"
else:
character["Race"] = "Human"
elif character["Class"] == "Ranger":
#dwarves drop by a factor of 5. This yields 9 neutral dwarf rangers.
number = randrange(1, 865)
if number < 10:
character["Race"] = "Dwarf"
elif number < 325:
character["Race"] = "Elf"
elif number < 343:
character["Race"] = "Gnome"
elif number < 496:
character["Race"] = "Half-Elf"
elif number < 514:
character["Race"] = "Halfling"
elif number < 604:
character["Race"] = "Half-Orc"
else:
character["Race"] = "Human"
elif character["Class"] == "Rogue":
#gnomes drop by a factor of 6. This yields 5 neutral gnome rogues.
number = randrange(1, 486)
if number < 21:
character["Race"] = "Dwarf"
elif number < 46:
character["Race"] = "Elf"
elif number < 51:
character["Race"] = "Gnome"
elif number < 126:
character["Race"] = "Half-Elf"
elif number < 316:
character["Race"] = "Halfling"
elif number < 366:
character["Race"] = "Half-Orc"
else:
character["Race"] = "Human"
elif character["Class"] == "Sorcerer":
#dwarves drop by a factor of 4. This yields 9 neutral dwarf sorcerers.
number = randrange(1, 856)
if number < 10:
character["Race"] = "Dwarf"
elif number < 136:
character["Race"] = "Elf"
elif number < 145:
character["Race"] = "Gnome"
elif number < 280:
character["Race"] = "Half-Elf"
elif number < 388:
character["Race"] = "Halfling"
elif number < 433:
character["Race"] = "Half-Orc"
else:
character["Race"] = "Human"
elif character["Class"] == "Wizard":
#dwarves drop by one-third. This yields 8 neutral dwarf wizards.
number = randrange(1, 1173)
if number < 9:
character["Race"] = "Dwarf"
elif number < 345:
character["Race"] = "Elf"
elif number < 357:
character["Race"] = "Gnome"
elif number < 537:
character["Race"] = "Half-Elf"
elif number < 597:
character["Race"] = "Halfling"
elif number < 609:
character["Race"] = "Half-Orc"
else:
character["Race"] = "Human"
else:
if character["Class"] == "Barbarian":
#gnomes drop by another factor of 5. This yields 1 evil gnome barbarian.
number - randrange(1, 2944)
if number < 18:
character["Race"] = "Dwarf"
elif number < 243:
character["Race"] = "Elf"
elif number < 244:
character["Race"] = "Gnome"
elif number < 319:
character["Race"] = "Half-Elf"
elif number < 469:
character["Race"] = "Halfling"
elif number < 2194:
character["Race"] = "Half-Orc"
else:
character["Race"] = "Human"
elif character["Class"] == "Bard":
#gnomes drop by a factor of 5. This yields 1 evil gnome bard.
number = randrange(1, 120)
if number < 2:
character["Race"] = "Dwarf"
elif number < 11:
character["Race"] = "Elf"
elif number < 12:
character["Race"] = "Gnome"
elif number < 15:
character["Race"] = "Half-Elf"
elif number < 21:
character["Race"] = "Halfling"
elif number < 90:
character["Race"] = "Half-Orc"
else:
character["Race"] = "Human"
elif character["Class"] == "Cleric":
#gnomes drop by a factor of 5. This yields 1 evil gnome cleric.
number = randrange(1, 282)
if number < 16:
character["Race"] = "Dwarf"
elif number < 41:
character["Race"] = "Elf"
elif number < 42:
character["Race"] = "Gnome"
elif number < 92:
character["Race"] = "Half-Elf"
elif number < 112:
character["Race"] = "Halfling"
elif number < 127:
character["Race"] = "Half-Orc"
else:
character["Race"] = "Human"
elif character["Class"] == "Druid":
#dwarves drop by a factor of 9. This yields 1 evil dwarf druid.
number = randrange(1, 2025)
if number < 2:
character["Race"] = "Dwarf"
elif number < 73:
character["Race"] = "Elf"
elif number < 81:
character["Race"] = "Gnome"
elif number < 117:
character["Race"] = "Half-Elf"
elif number < 153:
character["Race"] = "Halfling"
elif number < 225:
character["Race"] = "Half-Orc"
else:
character["Race"] = "Human"
elif character["Class"] == "Fighter":
#gnomes drop by another factor of 5. This yields 1 evil gnome fighter.
number = randrange(1, 1327)
if number < 101:
character["Race"] = "Dwarf"
elif number < 176:
character["Race"] = "Elf"
elif number < 177:
character["Race"] = "Gnome"
elif number < 302:
character["Race"] = "Half-Elf"
elif number < 352:
character["Race"] = "Halfling"
elif number < 577:
character["Race"] = "Half-Orc"
else:
character["Race"] = "Human"
elif character["Class"] == "Monk":
#gnomes drop by another factor of 5. This yields 1 evil gnome monk.
number = randrange(1, 6889)
if number < 7:
character["Race"] = "Dwarf"
elif number < 63:
character["Race"] = "Elf"
elif number < 64:
character["Race"] = "Gnome"
elif number < 814:
character["Race"] = "Half-Elf"
elif number < 889:
character["Race"] = "Halfling"
elif number < 1639:
character["Race"] = "Half-Orc"
else:
character["Race"] = "Human"
elif character["Class"] == "Ranger":
#dwarves drop by a factor of 9. This yields 1 evil dwarf ranger.
number = randrange(1, 627)
if number < 2:
character["Race"] = "Dwarf"
elif number < 101:
character["Race"] = "Elf"
elif number < 105:
character["Race"] = "Gnome"
elif number < 258:
character["Race"] = "Half-Elf"
elif number < 276:
character["Race"] = "Halfling"
elif number < 357:
character["Race"] = "Half-Orc"
else:
character["Race"] = "Human"
elif character["Class"] == "Rogue":
#gnomes drop by a factor of 5. This yields 1 evil gnome rogue.
number = randrange(1, 352)
if number < 6:
character["Race"] = "Dwarf"
elif number < 16:
character["Race"] = "Elf"
elif number < 17:
character["Race"] = "Gnome"
elif number < 92:
character["Race"] = "Half-Elf"
elif number < 202:
character["Race"] = "Halfling"
elif number < 252:
character["Race"] = "Half-Orc"
else:
character["Race"] = "Human"
elif character["Class"] == "Sorcerer":
#dwarves drop by a factor of 9. This yields 1 evil dwarf sorcerer.
number = randrange(1, 616)
if number < 2:
character["Race"] = "Dwarf"
elif number < 11:
character["Race"] = "Elf"
elif number < 13:
character["Race"] = "Gnome"
elif number < 148:
character["Race"] = "Half-Elf"
elif number < 211:
character["Race"] = "Halfling"
elif number < 256:
character["Race"] = "Half-Orc"
else:
character["Race"] = "Human"
elif character["Class"] == "Wizard":
#dwarves drop by a factor of 9. This yields 1 evil dwarf wizard.
number = randrange(1, 944)
if number < 2:
character["Race"] = "Dwarf"
elif number < 134:
character["Race"] = "Elf"
elif number < 136:
character["Race"] = "Gnome"
elif number < 316:
character["Race"] = "Half-Elf"
elif number < 340:
character["Race"] = "Halfling"
elif number < 344:
character["Race"] = "Half-Orc"
else:
character["Race"] = "Human"
job = character["Class"]
morals = character["Alignment"]
race = character["Race"]
if job == "Bard" or job == "Barbarian":
if race == "Dwarf":
nonlawfulDwarf(morals)
elif race == "Halfling":
nonlawfulHin(morals)
elif race == "Gnome" or race == "Human":
nonlawfulEthics(morals)
else:
nonlawfulElf(morals)
elif job == "Druid":
if morals != "Neutral":
character["Alignment"] = "Neutral " + morals
else:
if race == "Dwarf":
dwarvenEthics(morals)
elif race == "Halfling":
hinEthics(morals)
elif race == "Gnome" or race == "Human":
ethics(morals)
else:
elvenEthics(morals)
elif job == "Monk":
character["Alignment"] = "Lawful " + morals
elif job == "Paladin":
character["Alignment"] = "Lawful Good"
else:
if race == "Dwarf":
dwarvenEthics(morals)
elif race == "Halfling":
hinEthics(morals)
elif race == "Gnome" or race == "Human":
ethics(morals)
else:
elvenEthics(morals)
if job == "Wizard":
number = randrange(1, 86)
if race == "Gnome":
if number < 66:
character["Class"] = "Illusionist"
else:
number = randrange(1, 86)
if number > 65:
specialist()
else:
if number > 65:
specialist()
number = randrange(1, 101)
if number < 16:
character["Temperature Zone"] = "Cold"
elif number > 65:
character["Temperature Zone"] = "Warm"
else:
character["Temperature Zone"] = "Temperate"
number = randrange(1, 101)
if number < 11:
character["Terrain"] = "Desert"
elif number < 31:
character["Terrain"] = "Plains"
elif number < 46:
character["Terrain"] = "Forest"
elif number < 61:
character["Terrain"] = "Hills"
elif number < 71:
character["Terrain"] = "Mountains"
elif number < 81:
character["Terrain"] = "Marsh"
elif number < 86:
character["Terrain"] = "Aquatic"
elif number < 91:
character["Terrain"] = "Underground"
else:
character["Terrain"] = "Nomadic"
if character["Race"] == "Dwarf":
number = randrange(1, 101)
if number < 11:
character["Community"] = "Single-Family Redoubt"
elif number < 21:
character["Community"] = "Prospecting Camp"
elif number < 31:
character["Community"] = "Small Mine"
elif number < 46:
character["Community"] = "Large Mine"
elif number < 66:
character["Community"] = "Delve"
elif number < 91:
character["Community"] = "Large Delve"
else:
humanCommunity()
value = character["Community"]
value = "Human Area: " + value
character["Community"] = value
elif character["Race"] == "Elf":
number = randrange(1, 101)
if number < 51:
character["Community"] = "Encampment"
elif number < 86:
character["Community"] = "Village"
elif number < 96:
character["Community"] = "City"
else:
humanCommunity()
value = character["Community"]
value = "Human Area: " + value
character["Community"] = value
elif character["Race"] == "Gnome":
number = randrange(1, 101)
if number < 11:
character["Community"] = "Solitary Family"
elif number < 41:
character["Community"] = "Cluster"
elif number < 71:
character["Community"] = "Gathering"
elif number < 81:
humanCommunity()
value = character["Community"]
value = "Human Area: " + value
character["Community"] = value
elif number < 91:
dwarvenCommunity()
value = character["Community"]
value = "Dwarven Area: " + value
character["Community"] = value
else:
elvenCommunity()
value = character["Community"]
value = "Elven Area: " + value
character["Community"] = value
elif character["Race"] == "Half-Elf":
number = randrange(1, 101)
if number < 21:
character["Community"] = "Fringe Community"
elif number < 86:
humanCommunity()
value = character["Community"]
value = "Human Area: " + value
character["Community"] = value
else:
elvenCommunity()
value = character["Community"]
value = "Elven Area: " + value
character["Community"] = value
elif character["Race"] == "Halfling":
number = randrange(1, 101)
if number < 31:
character["Community"] = "Clan"
elif number < 66:
character["Community"] = "Troupe"
elif number < 81:
character["Community"] = "Shire"
elif number < 91:
character["Community"] = "Town"
elif number < 96:
character["Community"] = "County"
else:
humanCommunity()
value = character["Community"]
value = "Human Area: " + value
character["Community"] = value
elif character["Race"] == "Half-Orc":
number = randrange(1, 101)
if number < 21:
character["Community"] = "Fringe Community"
elif number < 86:
humanCommunity()
value = character["Community"]
value = "Human Area: " + value
character["Community"] = value
else:
character["Community"] = "Orc-Dominated Area"
elif character["Race"] == "Human":
humanCommunity()
number = randrange(1, 101)
if number < 6:
character["Family Economic Status"] = "Orphan"
elif number < 16:
character["Family Economic Status"] = "Refugee"
elif number < 41:
character["Family Economic Status"] = "Poor"
elif number < 61:
character["Family Economic Status"] = "Moderate"
elif number < 76:
character["Family Economic Status"] = "Wealthy"
elif number < 81:
character["Family Economic Status"] = "Religious Order"
elif number < 86:
character["Family Economic Status"] = "Arcane Order"
elif number < 91:
character["Family Economic Status"] = "Monastic Order"
elif number < 96:
character["Family Economic Status"] = "Wealth Unimportant"
else:
character["Family Economic Status"] = "Military Support"
number = randrange(1, 101)
if number < 11:
character["Family Social Standing"] = "Newcomer"
elif number < 16:
character["Family Social Standing"] = "Criminal"
elif number < 21:
character["Family Social Standing"] = "Slave"
elif number < 46:
character["Family Social Standing"] = "Lower Class"
elif number < 66:
character["Family Social Standing"] = "Skilled Trade or Merchant Family"
elif number < 76:
character["Family Social Standing"] = "Positive Religious, Arcane, Monastic, or Military Affiliation"
elif number < 86:
character["Family Social Standing"] = "Negative Religious, Arcane, Monastic, or Military Affiliation"
elif number < 96:
character["Family Social Standing"] = "Upper Class"
else:
character["Family Social Standing"] = "Noble"
number = randrange(1, 101)
if number < 11:
character["Family Defense Readiness"] = "None"
elif number < 21:
character["Family Defense Readiness"] = "Low"
elif number < 41:
character["Family Defense Readiness"] = "Rudimentary"
elif number < 56:
character["Family Defense Readiness"] = "Medium"
elif number < 71:
character["Family Defense Readiness"] = "High"
elif number < 81:
character["Family Defense Readiness"] = "Outstanding"
elif number < 91:
character["Family Defense Readiness"] = "Hired"
elif number < 96:
character["Family Defense Readiness"] = "Magical"
else:
character["Family Defense Readiness"] = "Mixed"
number = randrange(1, 101)
if number < 26:
character["Family Private Ethics"] = "Neutral"
elif number < 51:
character["Family Private Ethics"] = "Fair"
elif number < 76:
character["Family Private Ethics"] = "Good"
elif number < 91:
character["Family Private Ethics"] = "Untrustworthy"
else:
character["Family Private Ethics"] = "Evil"
number = randrange(1, 101)
if number < 61:
character["Family Public Ethics"] = "Normal"
elif number < 76:
character["Family Public Ethics"] = "Undeserved"
elif number < 91:
character["Family Public Ethics"] = "Recent Change"
else:
character["Family Public Ethics"] = "Beyond Reproach/Beyond Contempt"
number = randrange(1, 101)
if number < 21:
character["Family Religious Commitment"] = "Neutral/Uninterested"
elif number < 41:
character["Family Religious Commitment"] = "Strong"
elif number < 61:
character["Family Religious Commitment"] = "Historical"
elif number < 71:
character["Family Religious Commitment"] = "Enmity"
elif number < 81:
character["Family Religious Commitment"] = "Participatory"
elif number < 86:
character["Family Religious Commitment"] = "Open Heretics"
elif number < 91:
character["Family Religious Commitment"] = "Hidden Heretics"
else:
character["Family Religious Commitment"] = "Mixed"
number = randrange(1, 101)
if number < 41:
character["Family Reputation"] = "Unknown"
elif number < 56:
character["Family Reputation"] = "Good"
elif number < 66:
character["Family Reputation"] = "Outstanding"
elif number < 76:
character["Family Reputation"] = "A Black Sheep or Two"
elif number < 91:
character["Family Reputation"] = "Mostly Bad"
else:
character["Family Reputation"] = "Bad"
number = randrange(1, 101)
if number < 16:
character["Family Political Views"] = "Apolitical"
elif number < 31:
character["Family Political Views"] = "Supportive"
elif number < 41:
character["Family Political Views"] = "Enfranchised"
elif number < 46:
character["Family Political Views"] = "Enfranchised Progressive"
elif number < 51:
character["Family Political Views"] = "Enfranchised Radical"
elif number < 66:
character["Family Political Views"] = "Loyal Opposition"
elif number < 76:
character["Family Political Views"] = "Dissatisfied"
elif number < 86:
character["Family Political Views"] = "Dissident"
elif number < 91:
character["Family Political Views"] = "Radical"
else:
character["Family Political Views"] = "Mixed"
number = randrange(1, 101)
if number < 11:
character["Family Power Structure"] = "Unorganized"
elif number < 31:
character["Family Power Structure"] = "Elders"
elif number < 41:
character["Family Power Structure"] = "Patriarchy"
elif number < 51:
character["Family Power Structure"] = "Matriarchy"
elif number < 61:
character["Family Power Structure"] = "Oligarchy"
elif number < 71:
character["Family Power Structure"] = "Meritocracy"
elif number < 91:
character["Family Power Structure"] = "Divided"
elif number < 96:
character["Family Power Structure"] = "External"
else:
character["Family Power Structure"] = "Domination"
number = randrange(1, 101)
if number < 50:
character["Ancestors of Note"] = "None"
elif number < 56:
character["Ancestors of Note"] = "Forgotten"
elif number < 61:
character["Ancestors of Note"] = "Immigrant"
elif number < 64:
character["Ancestors of Note"] = "Master Artisan"
elif number < 67:
character["Ancestors of Note"] = "Successful Merchant"
elif number < 70:
character["Ancestors of Note"] = "Unsuccessful Merchant"
elif number < 73:
character["Ancestors of Note"] = "Cleric"
elif number < 76:
character["Ancestors of Note"] = "Arcanist"
elif number < 78:
character["Ancestors of Note"] = "Magic Item"
elif number == 78:
character["Ancestors of Note"] = "Spell Creator"
elif number == 79:
character["Ancestors of Note"] = "Item Creator"
elif number < 82:
character["Ancestors of Note"] = "Victorious Hero"
elif number < 84:
character["Ancestors of Note"] = "Defeated Hero"
elif number == 84:
character["Ancestors of Note"] = "Successful Founder"
elif number == 85:
character["Ancestors of Note"] = "Unsuccessful Founder"
elif number == 86:
character["Ancestors of Note"] = "Successful Leader"
elif number == 87:
character["Ancestors of Note"] = "Unsuccessful Leader"
elif number < 91:
character["Ancestors of Note"] = "Successful Hero"
elif number == 91:
character["Ancestors of Note"] = "Disbelieved Hero"
elif number == 92:
character["Ancestors of Note"] = "False Hero"
elif number == 93:
character["Ancestors of Note"] = "Exile"
elif number == 94:
character["Ancestors of Note"] = "Failed Rebel"
elif number == 95:
character["Ancestors of Note"] = "Traitor"
elif number == 96:
character["Ancestors of Note"] = "Cultist"
elif number == 97:
character["Ancestors of Note"] = "Villain"
elif number == 98:
character["Ancestors of Note"] = "Prophecy"
elif number == 99:
character["Ancestors of Note"] = "God-Touched"
elif number == 100:
character["Ancestors of Note"] = "Otherworldly"
number = randrange(1, 101)
if number < 21:
character["Early Childhood Instruction"] = "Outdoors"
elif number < 41:
character["Early Childhood Instruction"] = "Book Learning"
elif number < 56:
character["Early Childhood Instruction"] = "Religious"
elif number < 66:
character["Early Childhood Instruction"] = "Language"
elif number < 76:
character["Early Childhood Instruction"] = "Arts"
elif number < 86:
character["Early Childhood Instruction"] = "Multicultural"
elif number < 96:
character["Early Childhood Instruction"] = "Business/Politics"
else:
character["Early Childhood Instruction"] = "Magic"
number = randrange(1, 101)
if number < 26:
character["Formal Education"] = "Agriculture"
elif number < 31:
character["Formal Education"] = "History"
elif number < 36:
character["Formal Education"] = "Politics"
elif number < 41:
character["Formal Education"] = "Religion"
elif number < 46:
character["Formal Education"] = "Natural History"
elif number < 51:
character["Formal Education"] = "Multicultural"
elif number < 56:
character["Formal Education"] = "Arts"
elif number < 61:
character["Formal Education"] = "Literature"
elif number < 66:
character["Formal Education"] = "Math"
elif number < 71:
character["Formal Education"] = "Advanced Math"
elif number < 76:
character["Formal Education"] = "Astronomy"
elif number < 86:
character["Formal Education"] = "Finishing School"
elif number < 96:
character["Formal Education"] = "School of Hard Knocks"
else:
character["Formal Education"] = "Magic"
number = randrange(1, 101)
if number < 21:
character["Learning a Trade"] = "Farmer"
elif number < 31:
character["Learning a Trade"] = "Hunter/Trapper"
elif number < 41:
character["Learning a Trade"] = "Craft"
elif number < 51:
character["Learning a Trade"] = "Religious"
elif number < 61:
character["Learning a Trade"] = "Politics"
elif number < 71:
character["Learning a Trade"] = "Healing"
elif number < 76:
character["Learning a Trade"] = "Specialized"
elif number < 86:
character["Learning a Trade"] = "Military Training"
elif number < 91:
character["Learning a Trade"] = "Special Military Training"
elif number < 96:
character["Learning a Trade"] = "Monastery/Knightly Order"
else:
character["Learning a Trade"] = "Arcanist"
number = randrange(1, 101)
if number < 16:
character["Early Childhood Events"] = "Survived Childhood Danger"
elif number < 31:
character["Early Childhood Events"] = "Survived Major Danger to Community"
elif number < 46:
character["Early Childhood Events"] = "Undertook a Long Journey"
elif number < 56:
character["Early Childhood Events"] = "Witness"
elif number < 61:
character["Early Childhood Events"] = "Astronomical Event"
elif number < 66:
character["Early Childhood Events"] = "Personal Epiphany"
elif number < 76:
character["Early Childhood Events"] = "Became a Refugee"
elif number < 86:
character["Early Childhood Events"] = "Death in the Family"
elif number < 96:
character["Early Childhood Events"] = "Illness"
else:
character["Early Childhood Events"] = "Injury or Physical Defect"
number = randrange(1, 101)
if number < 16:
character["Youth Events"] = "Battle"
elif number < 26:
character["Youth Events"] = "Adventure"
elif number < 36:
character["Youth Events"] = "Politics"
elif number < 51:
character["Youth Events"] = "Great Romance"
elif number < 61:
character["Youth Events"] = "Religion"
elif number < 71:
character["Youth Events"] = "Arcane"
elif number < 81:
character["Youth Events"] = "Healing"
elif number < 96:
character["Youth Events"] = "Crime"
else:
character["Youth Events"] = "Discovery"
number = randrange(1, 101)
if number < 56:
character["Pivotal Events"] = "No Pivotal Events"
elif number < 66:
character["Pivotal Events"] = "Refugee"
elif number < 71:
character["Pivotal Events"] = "Cultural Shift"
elif number < 76:
character["Pivotal Events"] = "Under Siege"
elif number < 81:
character["Pivotal Events"] = "Climactic Battle"
elif number < 86:
character["Pivotal Events"] = "All-Out War"
elif number < 96:
character["Pivotal Events"] = "Community Crisis"
else:
character["Pivotal Events"] = "Religious Awakening"
number = randrange(1, 101)
if number < 56:
character["Parents"] = "Two Living Parents"
elif number < 66:
character["Parents"] = "One Living Parent"
elif number < 71:
character["Parents"] = "Both Parents Dead"
elif number < 81:
character["Parents"] = "One Ill"
elif number < 86:
character["Parents"] = "Both Ill"
elif number < 96:
character["Parents"] = "Parents Lost or Unknown"
else:
character["Parents"] = "Adoptive or Foster Parents"
number = randrange(1, 101)
if number < 26:
character["Siblings"] = "No Siblings"
elif number < 46:
sibs = randrange(1, 5)
character["Siblings"] = "Oldest (Younger Siblings: %d)" % sibs
elif number < 76:
sibs1 = randrange(1, 4)
sibs2 = randrange(1, 4)
character["Siblings"] = "Middle (Younger Siblings: %d, Older Siblings: %d)" % (sibs1, sibs2)
elif number < 96:
sibs = randrange(1, 5)
character["Siblings"] = "Youngest (Older Siblings: %d)" % sibs
else:
character["Siblings"] = "Twin"
number = randrange(1, 101)
if number < 21:
character["Grandparents"] = "No Grandparents"
elif number < 31:
character["Grandparents"] = "Mother's Parents Alive"
elif number < 41:
character["Grandparents"] = "Father's Parents Alive"
elif number < 61:
character["Grandparents"] = "One Grandparent on Each Side"
elif number < 71:
character["Grandparents"] = "Three Grandparents Alive"
elif number < 81:
character["Grandparents"] = "Great-Grandparent Alive"
else:
character["Grandparents"] = "Grandparents Unknown"
number = randrange(1, 101)
if number < 11:
character["Extended Family"] = "None"
elif number < 21:
character["Extended Family"] = "No Known Relatives"
elif number < 56:
relatives = randrange(1, 11)
character["Extended Family"] = "%d Living Relatives" % relatives
elif number < 91:
relatives = randrange(1, 13)
relatives = relatives + randrange(1, 13)
character["Extended Family"] = "%d Living Relatives" % relatives
else:
character["Extended Family"] = "Huge Extended Family"
number = randrange(1, 101)
if number < 16:
character["Friends"] = "No Friends"
elif number < 31:
character["Friends"] = "Lost"
elif number < 51:
character["Friends"] = "Few"
elif number < 81:
character["Friends"] = "Some"
else:
character["Friends"] = "Many"
number = randrange(1, 101)
if number < 16:
character["Enemies"] = "No Enemies. Yet..."
elif number < 26:
character["Enemies"] = "Minor Childhood Enemy"
elif number < 31:
character["Enemies"] = "Jilted Lover"
elif number < 36:
character["Enemies"] = "Jilted Lover's Friend or Relative"
elif number < 41:
character["Enemies"] = "Romantic Rival"
elif number < 51:
character["Enemies"] = "Enemy of the Family"
elif number < 56:
character["Enemies"] = "The Enemy of My Friend Is My Enemy"
elif number < 61:
character["Enemies"] = "Social Rival"
elif number < 66:
character["Enemies"] = "Villain"
elif number < 71:
character["Enemies"] = "Monster"
elif number < 76:
character["Enemies"] = "Alignment Enemy"
elif number < 81:
character["Enemies"] = "Political Enemy"
elif number < 86:
character["Enemies"] = "Arcane Rival"
elif number < 91:
character["Enemies"] = "Diabolic Enemy"
elif number < 96:
character["Enemies"] = "Enemy Within"
else:
character["Enemies"] = "Imaginary Foe"
number = randrange(1, 101)
if number < 16:
character["Instructors"] = "No Instructors of Note"
elif number < 41:
character["Instructors"] = "Basic"
elif number < 51:
character["Instructors"] = "Advanced"
elif number < 56:
character["Instructors"] = "Angry"
elif number < 61:
character["Instructors"] = "Vanished"
elif number < 66:
character["Instructors"] = "Favor"
elif number < 81:
character["Instructors"] = "Unrelated"
elif number < 91:
character["Instructors"] = "Lower Class"
elif number < 96:
character["Instructors"] = "Other Race"
else:
character["Instructors"] = "Exotic"
number = randrange(1, 24)
if number == 1:
character["Archetype"] = "Agent"
elif number == 2:
character["Archetype"] = "Challenger"
elif number == 3:
character["Archetype"] = "Companion"
elif number == 4:
character["Archetype"] = "Crusader"
elif number == 5:
character["Archetype"] = "Daredevil"
elif number == 6:
character["Archetype"] = "Explorer"
elif number == 7:
character["Archetype"] = "Innocent"
elif number == 8:
character["Archetype"] = "Leader"
elif number == 9:
character["Archetype"] = "Martyr"
elif number == 10:
character["Archetype"] = "Mercentary"
elif number == 11:
character["Archetype"] = "Orphan"
elif number == 12:
character["Archetype"] = "Prophet"
elif number == 13:
character["Archetype"] = "Rebel"
elif number == 14:
character["Archetype"] = "Renegade"
elif number == 15:
character["Archetype"] = "Royalty"
elif number == 16:
character["Archetype"] = "Sage"
elif number == 17:
character["Archetype"] = "Savage"
elif number == 18:
character["Archetype"] = "Seeker"
elif number == 19:
character["Archetype"] = "Simple Soul"
elif number == 20:
character["Archetype"] = "Strategist"
elif number == 21:
character["Archetype"] = "Theorist"
elif number == 22:
character["Archetype"] = "Trickster"
else:
character["Archetype"] = "Wanderer"
personalityTraits = []
traitNumber = randrange(2, 5)
traits = ["Ambitious", "Angry", "Boastful", "Bold", "Brutal", "Calm", "Carefree", "Charming", "Connected", "Conservative", "Disciplined", "Driven", "Energetic", "Erudite", "Exotic", "Fatalistic", "Flamboyant", "Funny", "Greedy", "Kind", "Loyal", "Merciful", "Naive", "Patriotic", "Peaceful", "Reformed", "Religious", "Serious", "Skilled", "Vengeful"]
while traitNumber > 0:
number = randrange(0, len(traits))
trait = traits[number]
personalityTraits.append(trait)
traits.remove(trait)
traitNumber -= 1
personalityTraits.sort()
number = len(personalityTraits)
string = ""
while number > 0:
trait = personalityTraits[0]
if number > 1:
string = string + trait + ", "
else:
string = string + trait
personalityTraits.remove(trait)
number -= 1
character["Traits"] = string
number = randrange(1, 5)
if number < 3:
character["Gender"] = "Male"
else:
character["Gender"] = "Female"
age_dic = {"Human": 15, "Dwarf": 40, "Elf": 110, "Gnome": 40, "Half-Elf": 20, "Halfling": 20, "Half-Orc": 14}
if job in ["Barbarian", "Rogue", "Sorcerer"]:
if race in ["Human", "Half-Orc"]:
number = 1
die = 4
elif race == "Dwarf":
number = 3
die = 6
elif race in ["Elf", "Gnome"]:
number = 4
die = 6
elif race == "Half-Elf":
number = 1
die = 6
else:
number = 2
die = 4
elif job in ["Bard", "Fighter", "Paladin", "Ranger"]:
if race in ["Human", "Half-Orc"]:
number = 1
die = 6
elif race == "Dwarf":
number = 5
die = 6
elif race in ["Elf", "Gnome"]:
number = 6
die = 6
elif race == "Half-Elf":
number = 2
die = 6
else:
number = 3
die = 6
else:
if race in ["Human", "Half-Orc"]:
number = 2
die = 6
elif race == "Dwarf":
number = 7
die = 6
elif race == "Elf":
number = 10
die = 6
elif race == "Gnome":
number = 9
die = 6
elif race == "Half-Elf":
number = 3
die = 6
else:
number = 4
die = 6
result = diceRoll(number, die)
age = age_dic[race] + result
character["Age"] = str(age)
gender = character["Gender"]
result = 0
if race == "Human":
if gender == "Male":
base = 58
else:
base = 53
result = diceRoll(2, 10)
elif race == "Dwarf":
if gender == "Male":
base = 45
else:
base = 43
result = diceRoll(2, 4)
elif race == "Elf":
if gender == "Male":
base = 53
else:
base = 53
result = diceRoll(2, 6)
elif race == "Gnome":
if gender == "Male":
base = 36
else:
base = 34
result = diceRoll(2, 4)
elif race == "Half-Elf":
if gender == "Male":
base = 55
else:
base = 53
result = diceRoll(2, 8)
elif race == "Half-Orc":
if gender == "Male":
base = 58
else:
base = 53
result = diceRoll(2, 12)
else:
if gender == "Male":
base = 32
else:
base = 30
result = diceRoll(2, 4)
inches = base + result
quotient = inches / 12
multiple = quotient * 12
difference = inches - multiple
height = "%s ft. %s in." % (quotient, difference)
character["Height"] = height
print "Generated by Nativity in Bits 0.1.5\nSee the Hero Builder's Guidebook (pg. 38) for more information about some of these terms.\n\nAdventurer Statistics"
print "-----------------------------------"
print "Class = " + character["Class"] + ""
print "Race = " + character["Race"] + ""
print "Alignment = " + character["Alignment"] + ""
print "Age = " + character["Age"] + ""
print "Gender = " + character["Gender"] + ""
print "Height = " + character["Height"] + ""
print "Temperature Zone = " + character["Temperature Zone"] + ""
print "Terrain = " + character["Terrain"] + ""
print "Community = " + character["Community"] + ""
print "Family Economic Status = " + character["Family Economic Status"] + ""
print "Family Social Standing = " + character["Family Social Standing"] + ""
print "Family Defense Readiness = " + character["Family Defense Readiness"] + ""
print "Family Private Ethics = " + character["Family Private Ethics"] + ""
print "Family Public Ethics = " + character["Family Public Ethics"] + ""
print "Family Religious Commitment = " + character["Family Religious Commitment"] + ""
print "Family Reputation = " + character["Family Reputation"] + ""
print "Family Political Views = " + character["Family Political Views"] + ""
print "Family Power Structure = " + character["Family Power Structure"] + ""
print "Ancestors of Note = " + character["Ancestors of Note"] + ""
print "Early Childhood Instruction = " + character["Early Childhood Instruction"] + ""
print "Formal Education = " + character["Formal Education"] + ""
print "Learning a Trade = " + character["Learning a Trade"] + ""
print "Early Childhood Events = " + character["Early Childhood Events"] + ""
print "Youth Events = " + character["Youth Events"] + ""
print "Pivotal Events = " + character["Pivotal Events"] + ""
print "Parents = " + character["Parents"] + ""
print "Siblings = " + character["Siblings"] + ""
print "Grandparents = " + character["Grandparents"] + ""
print "Extended Family = " + character["Extended Family"] + ""
print "Friends = " + character["Friends"] + ""
print "Enemies = " + character["Enemies"] + ""
print "Instructors = " + character["Instructors"] + ""
print "Personality Archetype = " + character["Archetype"] + ""
print "Personality Traits = " + character["Traits"] + ""
loop = 1
while loop == 1:
print "\n\n\nDo you want to save this data?"
print "\n--Options--"
print "1. Yes"
print "2. No\n"
try:
selection = input("Make a selection: ")
except (NameError, SyntaxError):
print "\nInvalid Selection"
else:
if selection is 1 or selection is 2:
loop = 0
if selection is 1:
write_file()
print '\nData saved in file "adventurer.txt"'
print "\nShutting down..."
else:
print "\nInvalid Selection"
|
gpl-3.0
| -7,664,956,768,203,536,000
| 29.126677
| 350
| 0.642839
| false
| 2.976244
| false
| false
| false
|
guardicore/monkey
|
monkey/infection_monkey/model/host.py
|
1
|
1374
|
__author__ = "itamar"
class VictimHost(object):
def __init__(self, ip_addr, domain_name=""):
self.ip_addr = ip_addr
self.domain_name = str(domain_name)
self.os = {}
self.services = {}
self.icmp = False
self.monkey_exe = None
self.default_tunnel = None
self.default_server = None
def as_dict(self):
return self.__dict__
def __hash__(self):
return hash(self.ip_addr)
def __eq__(self, other):
if not isinstance(other, VictimHost):
return False
return self.ip_addr.__eq__(other.ip_addr)
def __cmp__(self, other):
if not isinstance(other, VictimHost):
return -1
return self.ip_addr.__cmp__(other.ip_addr)
def __repr__(self):
return "VictimHost({0!r})".format(self.ip_addr)
def __str__(self):
victim = "Victim Host %s: " % self.ip_addr
victim += "OS - ["
for k, v in list(self.os.items()):
victim += "%s-%s " % (k, v)
victim += "] Services - ["
for k, v in list(self.services.items()):
victim += "%s-%s " % (k, v)
victim += "] ICMP: %s " % (self.icmp)
victim += "target monkey: %s" % self.monkey_exe
return victim
def set_default_server(self, default_server):
self.default_server = default_server
|
gpl-3.0
| 2,676,835,333,894,688,300
| 27.040816
| 55
| 0.524017
| false
| 3.359413
| false
| false
| false
|
cmr/automatafl
|
old_python_prototype/rl_learn.py
|
1
|
4730
|
import argparse, random
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, AlphaDropout, Dropout, Flatten
from keras.optimizers import RMSprop, Adam
from rl.agents.dqn import DQNAgent
from rl.policy import BoltzmannQPolicy
from rl.memory import SequentialMemory
from model import Game, Board, Plebeian
import model
parser = argparse.ArgumentParser(description='Train a learning agent to play Automatafl.')
parser.add_argument('save', help='Save weights to this file')
parser.add_argument('-L', '--load', dest='load', help='Load these weights before training')
parser.add_argument('-s', '--steps', dest='steps', type=int, default=100000, help='Perform this many training steps')
parser.add_argument('--dropout', dest='dropout', type=float, default=0.02, help='Drop this fraction of values betwen the internal layers to prevent overfit')
parser.add_argument('--memory', dest='memory', type=int, default=10000, help='Remember this many past moves for the learner')
parser.add_argument('--against', dest='against', help='Load this file as the adversary (instead of a random agent)')
parser.add_argument('--rand-rate', dest='rand_rate', type=float, default=0.02, help='Have the adversary move randomly at this rate')
parser.add_argument('--learn-rate', dest='learn_rate', type=float, default=0.1, help='Initial learning rate')
parser.add_argument('--layers', dest='layers', type=int, default=8, help='Use this many hidden layers')
parser.add_argument('--width', dest='width', type=int, default=128, help='Each hidden layer has this many neurons')
parser.add_argument('--update', dest='update', type=int, default=32, help='Update the target model with learned data after this many steps')
args = parser.parse_args()
plebs = [Plebeian(i) for i in range(1, 3)]
def setup_game():
return Game(*plebs, setup=[
# [2, 0, 0, 2, 0, 0, 2],
# [0, 0, 1, 2, 1, 0, 0],
# [1, 0, 0, 0, 0, 0, 1],
# [2, 0, 0, 3, 0, 0, 2],
# [1, 0, 0, 0, 0, 0, 1],
# [0, 0, 1, 2, 1, 0, 0],
# [2, 0, 0, 2, 0, 0, 2],
# ], goals=[[(0, 0), (0, 6)], [(6, 0), (6, 6)]])
[2, 0, 1, 0, 2],
[0, 0, 0, 0, 0],
[2, 0, 3, 0, 2],
[0, 0, 0, 0, 0],
[2, 0, 1, 0, 2],
], goals=[[(0, 0), (4, 0)], [(0, 4), (4, 4)]])
game = setup_game()
NUM_ACTIONS = game.NumActions()
NUM_STATES = len(game.StateVector(plebs[0]))
#print(NUM_ACTIONS)
#print(NUM_STATES)
#exit()
def make_net(primary):
mdl = Sequential()
mdl.add(Flatten(input_shape=(args.memory, NUM_STATES)))
mdl.add(Dropout(args.dropout))
mdl.add(Dense(args.width, input_shape=(NUM_STATES,), activation='relu'))
mdl.add(Dropout(args.dropout))
if primary:
for i in range(args.layers - 1):
mdl.add(Dense(args.width, activation='relu', kernel_initializer='lecun_uniform'))
mdl.add(Dropout(args.dropout))
mdl.add(Dense(NUM_ACTIONS))
return mdl
def make_agent(prim, load):
nn = make_net(True)
mem = SequentialMemory(limit=args.memory, window_length=args.memory)
pol = BoltzmannQPolicy()
dqn = DQNAgent(model=nn, nb_actions=NUM_ACTIONS, memory=mem, policy=pol, target_model_update=args.update)
dqn.compile(Adam(lr=args.learn_rate), metrics=['mae'])
if load:
dqn.load_weights(load)
return dqn
cur = make_agent(True, args.load)
if args.against:
adv = make_agent(True, args.against)
steps = 0
class GameEnv(object):
def reset(self):
global game, steps
game = setup_game()
steps = 0
print('Game reset')
return game.StateVector(plebs[0])
def render(self, mode='human', close=False):
pass
def close(self):
pass
def step(self, act):
global steps
steps += 1
game.PoseAgentMove(plebs[0], act)
if args.against and random.random() > args.rand_rate:
game.PoseAgentMove(plebs[1], adv.forward(game.StateVector(plebs[1])))
else:
game.PoseAgentMove(plebs[1], random.randrange(0, NUM_ACTIONS))
winner = None
for ev in game.GlobalEvents():
if ev.__class__ is model.TurnOver and ev.winner is not None:
winner = ev.winner
print(f'Game won on step {steps} by {winner}')
if ev.__class__ is model.Conflict:
print(f'Conflict on step {steps}')
for pleb in plebs:
pleb.Events()
retval = (
game.StateVector(plebs[0]),
game.RewardScalar(plebs[0]),
winner is not None,
{},
)
return retval
cur.fit(GameEnv(), nb_steps=args.steps, log_interval=args.update)
cur.save_weights(args.save, overwrite=True)
|
apache-2.0
| -586,225,406,380,804,200
| 35.666667
| 157
| 0.625581
| false
| 3.195946
| false
| false
| false
|
jarshwah/optimising-django-queries
|
shop/shop/models.py
|
1
|
1521
|
from django.db import models
from django.utils.functional import cached_property as buffered_property
from django.utils import timezone
class Category(models.Model):
name = models.CharField(max_length=32)
def __str__(self):
return self.name
class Feature(models.Model):
name = models.CharField(max_length=32)
value = models.CharField(max_length=32)
visible = models.BooleanField(default=True)
class Meta:
ordering = ['name']
def __str__(self):
return f'{self.name} = {self.value}'
class Product(models.Model):
name = models.CharField(max_length=32)
category = models.ForeignKey(Category)
features = models.ManyToManyField(Feature)
price = models.DecimalField(max_digits=6, decimal_places=2)
def __str__(self):
return self.name
@buffered_property
def all_features(self):
return list(self.features.all())
@property
def visible_features_python(self):
return [feature for feature in self.all_features if feature.visible]
@property
def invisible_features_python(self):
return [feature for feature in self.all_features if not feature.visible]
@property
def visible_features_database(self):
return self.features.filter(visible=True)
@property
def invisible_features_database(self):
return self.features.filter(visible=False)
class Sale(models.Model):
product = models.ForeignKey(Product)
sale_date = models.DateTimeField(default=timezone.now)
|
bsd-2-clause
| 446,022,595,812,349,600
| 25.684211
| 80
| 0.692308
| false
| 3.992126
| false
| false
| false
|
shikhir-arora/Giesela
|
musicbot/cleverbot.py
|
1
|
3577
|
"""
CleverWrap.py
Python wrapper for Cleverbot's API.
http://www.cleverbot.com/api
Copyright 2017 Andrew Edwards
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import requests
class CleverWrap:
""" A simple wrapper class for the www.cleverbot.com api. """
url = "https://www.cleverbot.com/getreply"
def __init__(self, api_key, name="CleverBot"):
""" Initialize the class with an api key and optional name
:type name: string
:type api_key: string
:type history: dict or maybe a list
:type convo_id: string
:type cs: string
:type count: int
:type time_elapsed: int
:type time_taken: int
:type output: string
"""
self.name = name
self.key = api_key
self.history = {}
self.convo_id = ""
self.cs = ""
self.count = 0
self.time_elapsed = 0
self.time_taken = 0
self.output = ""
def say(self, text):
"""
Say something to www.cleverbot.com
:type text: string
Returns: string
"""
params = {
"input": text,
"key": self.key,
"cs": self.cs,
"conversation_id": self.convo_id,
"wrapper": "CleverWrap.py"
}
reply = self._send(params)
self._process_reply(reply)
return self.output
def _send(self, params):
"""
Make the request to www.cleverbot.com
:type params: dict
Returns: dict
"""
# Get a response
try:
r = requests.get(self.url, params=params)
# catch errors, print then exit.
except requests.exceptions.RequestException as e:
print(e)
return r.json()
def _process_reply(self, reply):
""" take the cleverbot.com response and populate properties. """
self.cs = reply.get("cs", None)
self.count = int(reply.get("interaction_count", None))
self.output = reply.get("output", None).encode(
"latin-1").decode("utf-8")
self.convo_id = reply.get("conversation_id", None)
self.history = {key: value for key,
value in reply.items() if key.startswith("interaction")}
self.time_taken = int(reply.get("time_taken", None))
self.time_elapsed = int(reply.get("time_elapsed", None))
def reset(self):
"""
Drop values for self.cs and self.conversation_id
this will start a new conversation with the bot.
"""
self.cs = ""
self.convo_id = ""
|
mit
| 1,497,654,019,481,257,000
| 37.880435
| 460
| 0.621471
| false
| 4.144844
| false
| false
| false
|
sekikn/ambari
|
ambari-agent/src/main/python/ambari_agent/alerts/port_alert.py
|
2
|
7478
|
#!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
import socket
import time
from alerts.base_alert import BaseAlert
from resource_management.libraries.functions.get_port_from_url import get_port_from_url
from ambari_commons import OSCheck
from ambari_commons.inet_utils import resolve_address, get_host_from_url
logger = logging.getLogger(__name__)
# default timeouts
DEFAULT_WARNING_TIMEOUT = 1.5
DEFAULT_CRITICAL_TIMEOUT = 5.0
class PortAlert(BaseAlert):
def __init__(self, alert_meta, alert_source_meta, config):
super(PortAlert, self).__init__(alert_meta, alert_source_meta, config)
self.uri = None
self.default_port = None
self.socket_command = None
self.socket_command_response = None
self.warning_timeout = DEFAULT_WARNING_TIMEOUT
self.critical_timeout = DEFAULT_CRITICAL_TIMEOUT
if 'uri' in alert_source_meta:
self.uri = alert_source_meta['uri']
# always static
if 'default_port' in alert_source_meta:
self.default_port = alert_source_meta['default_port']
if 'reporting' in alert_source_meta:
reporting = alert_source_meta['reporting']
reporting_state_warning = self.RESULT_WARNING.lower()
reporting_state_critical = self.RESULT_CRITICAL.lower()
if reporting_state_warning in reporting and \
'value' in reporting[reporting_state_warning]:
self.warning_timeout = reporting[reporting_state_warning]['value']
if reporting_state_critical in reporting and \
'value' in reporting[reporting_state_critical]:
self.critical_timeout = reporting[reporting_state_critical]['value']
if 'parameters' in alert_source_meta:
for parameter in alert_source_meta['parameters']:
if 'socket.command' == parameter['name']:
self.socket_command = parameter['value']
if 'socket.command.response' == parameter['name']:
self.socket_command_response = parameter['value']
# check warning threshold for sanity
if self.warning_timeout >= 30:
logger.warn("[Alert][{0}] The warning threshold of {1}s is too large, resetting to {2}s".format(
self.get_name(), str(self.warning_timeout), str(DEFAULT_WARNING_TIMEOUT)))
self.warning_timeout = DEFAULT_WARNING_TIMEOUT
# check critical threshold for sanity
if self.critical_timeout >= 30:
logger.warn("[Alert][{0}] The critical threshold of {1}s is too large, resetting to {2}s".format(
self.get_name(), str(self.critical_timeout), str(DEFAULT_CRITICAL_TIMEOUT)))
self.critical_timeout = DEFAULT_CRITICAL_TIMEOUT
def _collect(self):
configurations = self.configuration_builder.get_configuration(self.cluster_id, None, None)
# can be parameterized or static
# if not parameterized, this will return the static value
uri_value = self._get_configuration_value(configurations, self.uri)
host_not_specified = False
if uri_value is None:
host_not_specified = True
uri_value = self.host_name
logger.debug("[Alert][{0}] Setting the URI to this host since it wasn't specified".format(
self.get_name()))
# in some cases, a single property is a comma-separated list like
# host1:8080,host2:8081,host3:8083
uri_value_array = uri_value.split(',')
if len(uri_value_array) > 1:
for item in uri_value_array:
if self.host_name in item:
uri_value = item
if logger.isEnabledFor(logging.DEBUG):
logger.debug("[Alert][{0}] Extracted {1} as the host name while parsing the CSV URI {2}".format(
self.get_name(), uri_value, str(uri_value_array)))
break
host = get_host_from_url(uri_value)
if host is None or host == "localhost" or host == "0.0.0.0":
host = self.host_name
host_not_specified = True
hosts = [host]
# If host is not specified in the uri, hence we are using current host name
# then also add public host name as a fallback.
if host_not_specified and host.lower() == self.host_name.lower() \
and self.host_name.lower() != self.public_host_name.lower():
hosts.append(self.public_host_name)
if logger.isEnabledFor(logging.DEBUG):
logger.debug("[Alert][{0}] List of hosts = {1}".format(self.get_name(), hosts))
try:
port = int(get_port_from_url(uri_value))
except:
if self.default_port is None:
label = 'Unable to determine port from URI {0}'.format(uri_value)
return (self.RESULT_UNKNOWN, [label])
port = self.default_port
exceptions = []
for host in hosts:
if logger.isEnabledFor(logging.DEBUG):
logger.debug("[Alert][{0}] Checking {1} on port {2}".format(
self.get_name(), host, str(port)))
s = None
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(self.critical_timeout)
if OSCheck.is_windows_family():
# on windows 0.0.0.0 is invalid address to connect but on linux it resolved to 127.0.0.1
host = resolve_address(host)
start_time = time.time()
s.connect((host, port))
if self.socket_command is not None:
s.sendall(self.socket_command)
data = s.recv(1024)
if self.socket_command_response is not None and data != self.socket_command_response:
raise Exception("Expected response {0}, Actual response {1}".format(
self.socket_command_response, data))
end_time = time.time()
milliseconds = end_time - start_time
seconds = milliseconds / 1000.0
# not sure why this happens sometimes, but we don't always get a
# socket exception if the connect() is > than the critical threshold
if seconds >= self.critical_timeout:
return (self.RESULT_CRITICAL, ['Socket Timeout', host, port])
result = self.RESULT_OK
if seconds >= self.warning_timeout:
result = self.RESULT_WARNING
return (result, [seconds, port])
except Exception as e:
exceptions.append(e)
finally:
if s is not None:
try:
s.close()
except:
# no need to log a close failure
pass
if exceptions:
return (self.RESULT_CRITICAL, [str(exceptions[0]), hosts[0], port])
def _get_reporting_text(self, state):
'''
Gets the default reporting text to use when the alert definition does not
contain any.
:param state: the state of the alert in uppercase (such as OK, WARNING, etc)
:return: the parameterized text
'''
if state == self.RESULT_OK or state == self.RESULT_WARNING:
return 'TCP OK - {0:.4f} response on port {1}'
return 'Connection failed: {0} to {1}:{2}'
|
apache-2.0
| -2,506,284,335,676,159,000
| 36.577889
| 108
| 0.664482
| false
| 3.900887
| true
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.