code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
|---|---|---|---|---|---|
# -*- coding: utf-8 -*-
# © 2016 LasLabs Inc.
# License GPL-3.0 or later (http://www.gnu.org/licenses/lgpl.html).
from odoo.exceptions import ValidationError
from odoo.tests.common import TransactionCase
class TestMedicalPatient(TransactionCase):
def setUp(self):
super(TestMedicalPatient, self).setUp()
self.medical_patient_model = self.env['medical.patient']
self.human = self.env.ref('medical_patient_species.human')
self.dog = self.env.ref('medical_patient_species.dog')
def new_patient(self, update_vals=None):
self.vals = {
'name': 'Patient',
'species_id': self.human.id,
'parent_id': None,
}
if update_vals:
self.vals.update(update_vals)
return self.medical_patient_model.create(self.vals)
def test_check_parent_id_exists_no_parent(self):
""" Test create pet with no parent_id raises ValidationError """
with self.assertRaises(ValidationError):
self.new_patient({'species_id': self.dog.id})
def test_check_parent_id_exists_with_parent(self):
""" Test create pet with parent_id not raises ValidationError """
patient_1 = self.new_patient()
try:
self.new_patient({
'species_id': self.dog.id,
'parent_id': patient_1.partner_id.id,
})
self.assertTrue(True)
except ValidationError:
self.fail("Should not raise ValidationError if parent_id exists")
def test_check_species_id(self):
""" Test create medical patient no species raises ValidationError """
patient = self.new_patient({'species_id': None})
self.assertEquals(
self.human.id,
patient.species_id.id,
)
|
laslabs/vertical-medical
|
medical_patient_species/tests/test_medical_patient.py
|
Python
|
agpl-3.0
| 1,799
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
from django.forms import (
CharField, DateField, EmailField, FileField, Form, GenericIPAddressField,
HiddenInput, ImageField, IPAddressField, MultipleChoiceField,
MultiValueField, MultiWidget, PasswordInput, SelectMultiple, SlugField,
SplitDateTimeField, SplitDateTimeWidget, TextInput, URLField,
)
from django.forms.extras import SelectDateWidget
from django.forms.utils import ErrorList
from django.test import TestCase, ignore_warnings, override_settings
from django.utils import six
from django.utils import translation
from django.utils.dates import MONTHS_AP
from django.utils.deprecation import RemovedInDjango19Warning
from django.utils.encoding import force_text, smart_text, python_2_unicode_compatible
from .test_error_messages import AssertFormErrorsMixin
class GetDate(Form):
mydate = DateField(widget=SelectDateWidget)
class GetDateShowHiddenInitial(Form):
mydate = DateField(widget=SelectDateWidget, show_hidden_initial=True)
class FormsExtraTestCase(TestCase, AssertFormErrorsMixin):
###############
# Extra stuff #
###############
# The forms library comes with some extra, higher-level Field and Widget
def test_selectdate(self):
self.maxDiff = None
w = SelectDateWidget(years=('2007', '2008', '2009', '2010', '2011', '2012', '2013', '2014', '2015', '2016'))
# Rendering the default state.
self.assertHTMLEqual(w.render('mydate', ''), """<select name="mydate_month" id="id_mydate_month">
<option value="0">---</option>
<option value="1">January</option>
<option value="2">February</option>
<option value="3">March</option>
<option value="4">April</option>
<option value="5">May</option>
<option value="6">June</option>
<option value="7">July</option>
<option value="8">August</option>
<option value="9">September</option>
<option value="10">October</option>
<option value="11">November</option>
<option value="12">December</option>
</select>
<select name="mydate_day" id="id_mydate_day">
<option value="0">---</option>
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
<option value="4">4</option>
<option value="5">5</option>
<option value="6">6</option>
<option value="7">7</option>
<option value="8">8</option>
<option value="9">9</option>
<option value="10">10</option>
<option value="11">11</option>
<option value="12">12</option>
<option value="13">13</option>
<option value="14">14</option>
<option value="15">15</option>
<option value="16">16</option>
<option value="17">17</option>
<option value="18">18</option>
<option value="19">19</option>
<option value="20">20</option>
<option value="21">21</option>
<option value="22">22</option>
<option value="23">23</option>
<option value="24">24</option>
<option value="25">25</option>
<option value="26">26</option>
<option value="27">27</option>
<option value="28">28</option>
<option value="29">29</option>
<option value="30">30</option>
<option value="31">31</option>
</select>
<select name="mydate_year" id="id_mydate_year">
<option value="0">---</option>
<option value="2007">2007</option>
<option value="2008">2008</option>
<option value="2009">2009</option>
<option value="2010">2010</option>
<option value="2011">2011</option>
<option value="2012">2012</option>
<option value="2013">2013</option>
<option value="2014">2014</option>
<option value="2015">2015</option>
<option value="2016">2016</option>
</select>""")
# Rendering the None or '' values should yield the same output.
self.assertHTMLEqual(w.render('mydate', None), w.render('mydate', ''))
# Rendering a string value.
self.assertHTMLEqual(w.render('mydate', '2010-04-15'), """<select name="mydate_month" id="id_mydate_month">
<option value="0">---</option>
<option value="1">January</option>
<option value="2">February</option>
<option value="3">March</option>
<option value="4" selected="selected">April</option>
<option value="5">May</option>
<option value="6">June</option>
<option value="7">July</option>
<option value="8">August</option>
<option value="9">September</option>
<option value="10">October</option>
<option value="11">November</option>
<option value="12">December</option>
</select>
<select name="mydate_day" id="id_mydate_day">
<option value="0">---</option>
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
<option value="4">4</option>
<option value="5">5</option>
<option value="6">6</option>
<option value="7">7</option>
<option value="8">8</option>
<option value="9">9</option>
<option value="10">10</option>
<option value="11">11</option>
<option value="12">12</option>
<option value="13">13</option>
<option value="14">14</option>
<option value="15" selected="selected">15</option>
<option value="16">16</option>
<option value="17">17</option>
<option value="18">18</option>
<option value="19">19</option>
<option value="20">20</option>
<option value="21">21</option>
<option value="22">22</option>
<option value="23">23</option>
<option value="24">24</option>
<option value="25">25</option>
<option value="26">26</option>
<option value="27">27</option>
<option value="28">28</option>
<option value="29">29</option>
<option value="30">30</option>
<option value="31">31</option>
</select>
<select name="mydate_year" id="id_mydate_year">
<option value="0">---</option>
<option value="2007">2007</option>
<option value="2008">2008</option>
<option value="2009">2009</option>
<option value="2010" selected="selected">2010</option>
<option value="2011">2011</option>
<option value="2012">2012</option>
<option value="2013">2013</option>
<option value="2014">2014</option>
<option value="2015">2015</option>
<option value="2016">2016</option>
</select>""")
# Rendering a datetime value.
self.assertHTMLEqual(w.render('mydate', datetime.date(2010, 4, 15)), w.render('mydate', '2010-04-15'))
# Invalid dates should still render the failed date.
self.assertHTMLEqual(w.render('mydate', '2010-02-31'), """<select name="mydate_month" id="id_mydate_month">
<option value="0">---</option>
<option value="1">January</option>
<option value="2" selected="selected">February</option>
<option value="3">March</option>
<option value="4">April</option>
<option value="5">May</option>
<option value="6">June</option>
<option value="7">July</option>
<option value="8">August</option>
<option value="9">September</option>
<option value="10">October</option>
<option value="11">November</option>
<option value="12">December</option>
</select>
<select name="mydate_day" id="id_mydate_day">
<option value="0">---</option>
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
<option value="4">4</option>
<option value="5">5</option>
<option value="6">6</option>
<option value="7">7</option>
<option value="8">8</option>
<option value="9">9</option>
<option value="10">10</option>
<option value="11">11</option>
<option value="12">12</option>
<option value="13">13</option>
<option value="14">14</option>
<option value="15">15</option>
<option value="16">16</option>
<option value="17">17</option>
<option value="18">18</option>
<option value="19">19</option>
<option value="20">20</option>
<option value="21">21</option>
<option value="22">22</option>
<option value="23">23</option>
<option value="24">24</option>
<option value="25">25</option>
<option value="26">26</option>
<option value="27">27</option>
<option value="28">28</option>
<option value="29">29</option>
<option value="30">30</option>
<option value="31" selected="selected">31</option>
</select>
<select name="mydate_year" id="id_mydate_year">
<option value="0">---</option>
<option value="2007">2007</option>
<option value="2008">2008</option>
<option value="2009">2009</option>
<option value="2010" selected="selected">2010</option>
<option value="2011">2011</option>
<option value="2012">2012</option>
<option value="2013">2013</option>
<option value="2014">2014</option>
<option value="2015">2015</option>
<option value="2016">2016</option>
</select>""")
# Rendering with a custom months dict.
w = SelectDateWidget(months=MONTHS_AP, years=('2013',))
self.assertHTMLEqual(w.render('mydate', ''), """<select name="mydate_month" id="id_mydate_month">
<option value="0">---</option>
<option value="1">Jan.</option>
<option value="2">Feb.</option>
<option value="3">March</option>
<option value="4">April</option>
<option value="5">May</option>
<option value="6">June</option>
<option value="7">July</option>
<option value="8">Aug.</option>
<option value="9">Sept.</option>
<option value="10">Oct.</option>
<option value="11">Nov.</option>
<option value="12">Dec.</option>
</select>
<select name="mydate_day" id="id_mydate_day">
<option value="0">---</option>
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
<option value="4">4</option>
<option value="5">5</option>
<option value="6">6</option>
<option value="7">7</option>
<option value="8">8</option>
<option value="9">9</option>
<option value="10">10</option>
<option value="11">11</option>
<option value="12">12</option>
<option value="13">13</option>
<option value="14">14</option>
<option value="15">15</option>
<option value="16">16</option>
<option value="17">17</option>
<option value="18">18</option>
<option value="19">19</option>
<option value="20">20</option>
<option value="21">21</option>
<option value="22">22</option>
<option value="23">23</option>
<option value="24">24</option>
<option value="25">25</option>
<option value="26">26</option>
<option value="27">27</option>
<option value="28">28</option>
<option value="29">29</option>
<option value="30">30</option>
<option value="31">31</option>
</select>
<select name="mydate_year" id="id_mydate_year">
<option value="0">---</option>
<option value="2013">2013</option>
</select>""")
a = GetDate({'mydate_month': '4', 'mydate_day': '1', 'mydate_year': '2008'})
self.assertTrue(a.is_valid())
self.assertEqual(a.cleaned_data['mydate'], datetime.date(2008, 4, 1))
# As with any widget that implements get_value_from_datadict,
# we must be prepared to accept the input from the "as_hidden"
# rendering as well.
self.assertHTMLEqual(a['mydate'].as_hidden(), '<input type="hidden" name="mydate" value="2008-4-1" id="id_mydate" />')
b = GetDate({'mydate': '2008-4-1'})
self.assertTrue(b.is_valid())
self.assertEqual(b.cleaned_data['mydate'], datetime.date(2008, 4, 1))
# Invalid dates shouldn't be allowed
c = GetDate({'mydate_month': '2', 'mydate_day': '31', 'mydate_year': '2010'})
self.assertFalse(c.is_valid())
self.assertEqual(c.errors, {'mydate': ['Enter a valid date.']})
# label tag is correctly associated with month dropdown
d = GetDate({'mydate_month': '1', 'mydate_day': '1', 'mydate_year': '2010'})
self.assertIn('<label for="id_mydate_month">', d.as_p())
def test_selectdate_empty_label(self):
w = SelectDateWidget(years=('2014',), empty_label='empty_label')
# Rendering the default state with empty_label setted as string.
self.assertInHTML('<option value="0">empty_label</option>', w.render('mydate', ''), count=3)
w = SelectDateWidget(years=('2014',), empty_label=('empty_year', 'empty_month', 'empty_day'))
# Rendering the default state with empty_label tuple.
self.assertHTMLEqual(w.render('mydate', ''), """<select name="mydate_month" id="id_mydate_month">
<option value="0">empty_month</option>
<option value="1">January</option>
<option value="2">February</option>
<option value="3">March</option>
<option value="4">April</option>
<option value="5">May</option>
<option value="6">June</option>
<option value="7">July</option>
<option value="8">August</option>
<option value="9">September</option>
<option value="10">October</option>
<option value="11">November</option>
<option value="12">December</option>
</select>
<select name="mydate_day" id="id_mydate_day">
<option value="0">empty_day</option>
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
<option value="4">4</option>
<option value="5">5</option>
<option value="6">6</option>
<option value="7">7</option>
<option value="8">8</option>
<option value="9">9</option>
<option value="10">10</option>
<option value="11">11</option>
<option value="12">12</option>
<option value="13">13</option>
<option value="14">14</option>
<option value="15">15</option>
<option value="16">16</option>
<option value="17">17</option>
<option value="18">18</option>
<option value="19">19</option>
<option value="20">20</option>
<option value="21">21</option>
<option value="22">22</option>
<option value="23">23</option>
<option value="24">24</option>
<option value="25">25</option>
<option value="26">26</option>
<option value="27">27</option>
<option value="28">28</option>
<option value="29">29</option>
<option value="30">30</option>
<option value="31">31</option>
</select>
<select name="mydate_year" id="id_mydate_year">
<option value="0">empty_year</option>
<option value="2014">2014</option>
</select>""")
self.assertRaisesMessage(ValueError, 'empty_label list/tuple must have 3 elements.',
SelectDateWidget, years=('2014',), empty_label=('not enough', 'values'))
def test_multiwidget(self):
# MultiWidget and MultiValueField #############################################
# MultiWidgets are widgets composed of other widgets. They are usually
# combined with MultiValueFields - a field that is composed of other fields.
# MulitWidgets can themselved be composed of other MultiWidgets.
# SplitDateTimeWidget is one example of a MultiWidget.
class ComplexMultiWidget(MultiWidget):
def __init__(self, attrs=None):
widgets = (
TextInput(),
SelectMultiple(choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo'))),
SplitDateTimeWidget(),
)
super(ComplexMultiWidget, self).__init__(widgets, attrs)
def decompress(self, value):
if value:
data = value.split(',')
return [data[0], list(data[1]), datetime.datetime.strptime(data[2], "%Y-%m-%d %H:%M:%S")]
return [None, None, None]
def format_output(self, rendered_widgets):
return '\n'.join(rendered_widgets)
w = ComplexMultiWidget()
self.assertHTMLEqual(w.render('name', 'some text,JP,2007-04-25 06:24:00'), """<input type="text" name="name_0" value="some text" />
<select multiple="multiple" name="name_1">
<option value="J" selected="selected">John</option>
<option value="P" selected="selected">Paul</option>
<option value="G">George</option>
<option value="R">Ringo</option>
</select>
<input type="text" name="name_2_0" value="2007-04-25" /><input type="text" name="name_2_1" value="06:24:00" />""")
class ComplexField(MultiValueField):
def __init__(self, required=True, widget=None, label=None, initial=None):
fields = (
CharField(),
MultipleChoiceField(choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo'))),
SplitDateTimeField()
)
super(ComplexField, self).__init__(fields, required, widget, label, initial)
def compress(self, data_list):
if data_list:
return '%s,%s,%s' % (data_list[0], ''.join(data_list[1]), data_list[2])
return None
f = ComplexField(widget=w)
self.assertEqual(f.clean(['some text', ['J', 'P'], ['2007-04-25', '6:24:00']]), 'some text,JP,2007-04-25 06:24:00')
self.assertFormErrors(['Select a valid choice. X is not one of the available choices.'], f.clean, ['some text', ['X'], ['2007-04-25', '6:24:00']])
# If insufficient data is provided, None is substituted
self.assertFormErrors(['This field is required.'], f.clean, ['some text', ['JP']])
# test with no initial data
self.assertTrue(f.has_changed(None, ['some text', ['J', 'P'], ['2007-04-25', '6:24:00']]))
# test when the data is the same as initial
self.assertFalse(f.has_changed('some text,JP,2007-04-25 06:24:00',
['some text', ['J', 'P'], ['2007-04-25', '6:24:00']]))
# test when the first widget's data has changed
self.assertTrue(f.has_changed('some text,JP,2007-04-25 06:24:00',
['other text', ['J', 'P'], ['2007-04-25', '6:24:00']]))
# test when the last widget's data has changed. this ensures that it is not
# short circuiting while testing the widgets.
self.assertTrue(f.has_changed('some text,JP,2007-04-25 06:24:00',
['some text', ['J', 'P'], ['2009-04-25', '11:44:00']]))
class ComplexFieldForm(Form):
field1 = ComplexField(widget=w)
f = ComplexFieldForm()
self.assertHTMLEqual(f.as_table(), """<tr><th><label for="id_field1_0">Field1:</label></th><td><input type="text" name="field1_0" id="id_field1_0" />
<select multiple="multiple" name="field1_1" id="id_field1_1">
<option value="J">John</option>
<option value="P">Paul</option>
<option value="G">George</option>
<option value="R">Ringo</option>
</select>
<input type="text" name="field1_2_0" id="id_field1_2_0" /><input type="text" name="field1_2_1" id="id_field1_2_1" /></td></tr>""")
f = ComplexFieldForm({'field1_0': 'some text', 'field1_1': ['J', 'P'], 'field1_2_0': '2007-04-25', 'field1_2_1': '06:24:00'})
self.assertHTMLEqual(f.as_table(), """<tr><th><label for="id_field1_0">Field1:</label></th><td><input type="text" name="field1_0" value="some text" id="id_field1_0" />
<select multiple="multiple" name="field1_1" id="id_field1_1">
<option value="J" selected="selected">John</option>
<option value="P" selected="selected">Paul</option>
<option value="G">George</option>
<option value="R">Ringo</option>
</select>
<input type="text" name="field1_2_0" value="2007-04-25" id="id_field1_2_0" /><input type="text" name="field1_2_1" value="06:24:00" id="id_field1_2_1" /></td></tr>""")
self.assertEqual(f.cleaned_data['field1'], 'some text,JP,2007-04-25 06:24:00')
@ignore_warnings(category=RemovedInDjango19Warning)
def test_ipaddress(self):
f = IPAddressField()
self.assertFormErrors(['This field is required.'], f.clean, '')
self.assertFormErrors(['This field is required.'], f.clean, None)
self.assertEqual(f.clean(' 127.0.0.1'), '127.0.0.1')
self.assertFormErrors(['Enter a valid IPv4 address.'], f.clean, 'foo')
self.assertFormErrors(['Enter a valid IPv4 address.'], f.clean, '127.0.0.')
self.assertFormErrors(['Enter a valid IPv4 address.'], f.clean, '1.2.3.4.5')
self.assertFormErrors(['Enter a valid IPv4 address.'], f.clean, '256.125.1.5')
f = IPAddressField(required=False)
self.assertEqual(f.clean(''), '')
self.assertEqual(f.clean(None), '')
self.assertEqual(f.clean(' 127.0.0.1'), '127.0.0.1')
self.assertFormErrors(['Enter a valid IPv4 address.'], f.clean, 'foo')
self.assertFormErrors(['Enter a valid IPv4 address.'], f.clean, '127.0.0.')
self.assertFormErrors(['Enter a valid IPv4 address.'], f.clean, '1.2.3.4.5')
self.assertFormErrors(['Enter a valid IPv4 address.'], f.clean, '256.125.1.5')
def test_generic_ipaddress_invalid_arguments(self):
self.assertRaises(ValueError, GenericIPAddressField, protocol="hamster")
self.assertRaises(ValueError, GenericIPAddressField, protocol="ipv4", unpack_ipv4=True)
def test_generic_ipaddress_as_generic(self):
# The edge cases of the IPv6 validation code are not deeply tested
# here, they are covered in the tests for django.utils.ipv6
f = GenericIPAddressField()
self.assertFormErrors(['This field is required.'], f.clean, '')
self.assertFormErrors(['This field is required.'], f.clean, None)
self.assertEqual(f.clean(' 127.0.0.1 '), '127.0.0.1')
self.assertFormErrors(['Enter a valid IPv4 or IPv6 address.'], f.clean, 'foo')
self.assertFormErrors(['Enter a valid IPv4 or IPv6 address.'], f.clean, '127.0.0.')
self.assertFormErrors(['Enter a valid IPv4 or IPv6 address.'], f.clean, '1.2.3.4.5')
self.assertFormErrors(['Enter a valid IPv4 or IPv6 address.'], f.clean, '256.125.1.5')
self.assertEqual(f.clean(' fe80::223:6cff:fe8a:2e8a '), 'fe80::223:6cff:fe8a:2e8a')
self.assertEqual(f.clean(' 2a02::223:6cff:fe8a:2e8a '), '2a02::223:6cff:fe8a:2e8a')
self.assertFormErrors(['This is not a valid IPv6 address.'], f.clean, '12345:2:3:4')
self.assertFormErrors(['This is not a valid IPv6 address.'], f.clean, '1::2:3::4')
self.assertFormErrors(['This is not a valid IPv6 address.'], f.clean, 'foo::223:6cff:fe8a:2e8a')
self.assertFormErrors(['This is not a valid IPv6 address.'], f.clean, '1::2:3:4:5:6:7:8')
self.assertFormErrors(['This is not a valid IPv6 address.'], f.clean, '1:2')
def test_generic_ipaddress_as_ipv4_only(self):
f = GenericIPAddressField(protocol="IPv4")
self.assertFormErrors(['This field is required.'], f.clean, '')
self.assertFormErrors(['This field is required.'], f.clean, None)
self.assertEqual(f.clean(' 127.0.0.1 '), '127.0.0.1')
self.assertFormErrors(['Enter a valid IPv4 address.'], f.clean, 'foo')
self.assertFormErrors(['Enter a valid IPv4 address.'], f.clean, '127.0.0.')
self.assertFormErrors(['Enter a valid IPv4 address.'], f.clean, '1.2.3.4.5')
self.assertFormErrors(['Enter a valid IPv4 address.'], f.clean, '256.125.1.5')
self.assertFormErrors(['Enter a valid IPv4 address.'], f.clean, 'fe80::223:6cff:fe8a:2e8a')
self.assertFormErrors(['Enter a valid IPv4 address.'], f.clean, '2a02::223:6cff:fe8a:2e8a')
def test_generic_ipaddress_as_ipv6_only(self):
f = GenericIPAddressField(protocol="IPv6")
self.assertFormErrors(['This field is required.'], f.clean, '')
self.assertFormErrors(['This field is required.'], f.clean, None)
self.assertFormErrors(['Enter a valid IPv6 address.'], f.clean, '127.0.0.1')
self.assertFormErrors(['Enter a valid IPv6 address.'], f.clean, 'foo')
self.assertFormErrors(['Enter a valid IPv6 address.'], f.clean, '127.0.0.')
self.assertFormErrors(['Enter a valid IPv6 address.'], f.clean, '1.2.3.4.5')
self.assertFormErrors(['Enter a valid IPv6 address.'], f.clean, '256.125.1.5')
self.assertEqual(f.clean(' fe80::223:6cff:fe8a:2e8a '), 'fe80::223:6cff:fe8a:2e8a')
self.assertEqual(f.clean(' 2a02::223:6cff:fe8a:2e8a '), '2a02::223:6cff:fe8a:2e8a')
self.assertFormErrors(['This is not a valid IPv6 address.'], f.clean, '12345:2:3:4')
self.assertFormErrors(['This is not a valid IPv6 address.'], f.clean, '1::2:3::4')
self.assertFormErrors(['This is not a valid IPv6 address.'], f.clean, 'foo::223:6cff:fe8a:2e8a')
self.assertFormErrors(['This is not a valid IPv6 address.'], f.clean, '1::2:3:4:5:6:7:8')
self.assertFormErrors(['This is not a valid IPv6 address.'], f.clean, '1:2')
def test_generic_ipaddress_as_generic_not_required(self):
f = GenericIPAddressField(required=False)
self.assertEqual(f.clean(''), '')
self.assertEqual(f.clean(None), '')
self.assertEqual(f.clean('127.0.0.1'), '127.0.0.1')
self.assertFormErrors(['Enter a valid IPv4 or IPv6 address.'], f.clean, 'foo')
self.assertFormErrors(['Enter a valid IPv4 or IPv6 address.'], f.clean, '127.0.0.')
self.assertFormErrors(['Enter a valid IPv4 or IPv6 address.'], f.clean, '1.2.3.4.5')
self.assertFormErrors(['Enter a valid IPv4 or IPv6 address.'], f.clean, '256.125.1.5')
self.assertEqual(f.clean(' fe80::223:6cff:fe8a:2e8a '), 'fe80::223:6cff:fe8a:2e8a')
self.assertEqual(f.clean(' 2a02::223:6cff:fe8a:2e8a '), '2a02::223:6cff:fe8a:2e8a')
self.assertFormErrors(['This is not a valid IPv6 address.'], f.clean, '12345:2:3:4')
self.assertFormErrors(['This is not a valid IPv6 address.'], f.clean, '1::2:3::4')
self.assertFormErrors(['This is not a valid IPv6 address.'], f.clean, 'foo::223:6cff:fe8a:2e8a')
self.assertFormErrors(['This is not a valid IPv6 address.'], f.clean, '1::2:3:4:5:6:7:8')
self.assertFormErrors(['This is not a valid IPv6 address.'], f.clean, '1:2')
def test_generic_ipaddress_normalization(self):
# Test the normalizing code
f = GenericIPAddressField()
self.assertEqual(f.clean(' ::ffff:0a0a:0a0a '), '::ffff:10.10.10.10')
self.assertEqual(f.clean(' ::ffff:10.10.10.10 '), '::ffff:10.10.10.10')
self.assertEqual(f.clean(' 2001:000:a:0000:0:fe:fe:beef '), '2001:0:a::fe:fe:beef')
self.assertEqual(f.clean(' 2001::a:0000:0:fe:fe:beef '), '2001:0:a::fe:fe:beef')
f = GenericIPAddressField(unpack_ipv4=True)
self.assertEqual(f.clean(' ::ffff:0a0a:0a0a'), '10.10.10.10')
def test_slugfield_normalization(self):
f = SlugField()
self.assertEqual(f.clean(' aa-bb-cc '), 'aa-bb-cc')
def test_urlfield_normalization(self):
f = URLField()
self.assertEqual(f.clean('http://example.com/ '), 'http://example.com/')
def test_smart_text(self):
class Test:
if six.PY3:
def __str__(self):
return 'ŠĐĆŽćžšđ'
else:
def __str__(self):
return 'ŠĐĆŽćžšđ'.encode('utf-8')
class TestU:
if six.PY3:
def __str__(self):
return 'ŠĐĆŽćžšđ'
def __bytes__(self):
return b'Foo'
else:
def __str__(self):
return b'Foo'
def __unicode__(self):
return '\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111'
self.assertEqual(smart_text(Test()), '\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111')
self.assertEqual(smart_text(TestU()), '\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111')
self.assertEqual(smart_text(1), '1')
self.assertEqual(smart_text('foo'), 'foo')
def test_accessing_clean(self):
class UserForm(Form):
username = CharField(max_length=10)
password = CharField(widget=PasswordInput)
def clean(self):
data = self.cleaned_data
if not self.errors:
data['username'] = data['username'].lower()
return data
f = UserForm({'username': 'SirRobin', 'password': 'blue'})
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data['username'], 'sirrobin')
def test_changing_cleaned_data_nothing_returned(self):
class UserForm(Form):
username = CharField(max_length=10)
password = CharField(widget=PasswordInput)
def clean(self):
self.cleaned_data['username'] = self.cleaned_data['username'].lower()
# don't return anything
f = UserForm({'username': 'SirRobin', 'password': 'blue'})
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data['username'], 'sirrobin')
def test_changing_cleaned_data_in_clean(self):
class UserForm(Form):
username = CharField(max_length=10)
password = CharField(widget=PasswordInput)
def clean(self):
data = self.cleaned_data
# Return a different dict. We have not changed self.cleaned_data.
return {
'username': data['username'].lower(),
'password': 'this_is_not_a_secret',
}
f = UserForm({'username': 'SirRobin', 'password': 'blue'})
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data['username'], 'sirrobin')
def test_overriding_errorlist(self):
@python_2_unicode_compatible
class DivErrorList(ErrorList):
def __str__(self):
return self.as_divs()
def as_divs(self):
if not self:
return ''
return '<div class="errorlist">%s</div>' % ''.join('<div class="error">%s</div>' % force_text(e) for e in self)
class CommentForm(Form):
name = CharField(max_length=50, required=False)
email = EmailField()
comment = CharField()
data = dict(email='invalid')
f = CommentForm(data, auto_id=False, error_class=DivErrorList)
self.assertHTMLEqual(f.as_p(), """<p>Name: <input type="text" name="name" maxlength="50" /></p>
<div class="errorlist"><div class="error">Enter a valid email address.</div></div>
<p>Email: <input type="email" name="email" value="invalid" /></p>
<div class="errorlist"><div class="error">This field is required.</div></div>
<p>Comment: <input type="text" name="comment" /></p>""")
def test_multipart_encoded_form(self):
class FormWithoutFile(Form):
username = CharField()
class FormWithFile(Form):
username = CharField()
file = FileField()
class FormWithImage(Form):
image = ImageField()
self.assertFalse(FormWithoutFile().is_multipart())
self.assertTrue(FormWithFile().is_multipart())
self.assertTrue(FormWithImage().is_multipart())
def test_selectdatewidget_required(self):
class GetNotRequiredDate(Form):
mydate = DateField(widget=SelectDateWidget, required=False)
class GetRequiredDate(Form):
mydate = DateField(widget=SelectDateWidget, required=True)
self.assertFalse(GetNotRequiredDate().fields['mydate'].widget.is_required)
self.assertTrue(GetRequiredDate().fields['mydate'].widget.is_required)
@override_settings(USE_L10N=True)
class FormsExtraL10NTestCase(TestCase):
def setUp(self):
super(FormsExtraL10NTestCase, self).setUp()
translation.activate('nl')
def tearDown(self):
translation.deactivate()
super(FormsExtraL10NTestCase, self).tearDown()
def test_l10n(self):
w = SelectDateWidget(years=('2007', '2008', '2009', '2010', '2011', '2012', '2013', '2014', '2015', '2016'))
self.assertEqual(w.value_from_datadict({'date_year': '2010', 'date_month': '8', 'date_day': '13'}, {}, 'date'), '13-08-2010')
self.assertHTMLEqual(w.render('date', '13-08-2010'), """<select name="date_day" id="id_date_day">
<option value="0">---</option>
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
<option value="4">4</option>
<option value="5">5</option>
<option value="6">6</option>
<option value="7">7</option>
<option value="8">8</option>
<option value="9">9</option>
<option value="10">10</option>
<option value="11">11</option>
<option value="12">12</option>
<option value="13" selected="selected">13</option>
<option value="14">14</option>
<option value="15">15</option>
<option value="16">16</option>
<option value="17">17</option>
<option value="18">18</option>
<option value="19">19</option>
<option value="20">20</option>
<option value="21">21</option>
<option value="22">22</option>
<option value="23">23</option>
<option value="24">24</option>
<option value="25">25</option>
<option value="26">26</option>
<option value="27">27</option>
<option value="28">28</option>
<option value="29">29</option>
<option value="30">30</option>
<option value="31">31</option>
</select>
<select name="date_month" id="id_date_month">
<option value="0">---</option>
<option value="1">januari</option>
<option value="2">februari</option>
<option value="3">maart</option>
<option value="4">april</option>
<option value="5">mei</option>
<option value="6">juni</option>
<option value="7">juli</option>
<option value="8" selected="selected">augustus</option>
<option value="9">september</option>
<option value="10">oktober</option>
<option value="11">november</option>
<option value="12">december</option>
</select>
<select name="date_year" id="id_date_year">
<option value="0">---</option>
<option value="2007">2007</option>
<option value="2008">2008</option>
<option value="2009">2009</option>
<option value="2010" selected="selected">2010</option>
<option value="2011">2011</option>
<option value="2012">2012</option>
<option value="2013">2013</option>
<option value="2014">2014</option>
<option value="2015">2015</option>
<option value="2016">2016</option>
</select>""")
# Years before 1900 work
w = SelectDateWidget(years=('1899',))
self.assertEqual(w.value_from_datadict({'date_year': '1899', 'date_month': '8', 'date_day': '13'}, {}, 'date'), '13-08-1899')
def test_l10n_date_changed(self):
"""
Ensure that DateField.has_changed() with SelectDateWidget works
correctly with a localized date format.
Refs #17165.
"""
# With Field.show_hidden_initial=False -----------------------
b = GetDate({
'mydate_year': '2008',
'mydate_month': '4',
'mydate_day': '1',
}, initial={'mydate': datetime.date(2008, 4, 1)})
self.assertFalse(b.has_changed())
b = GetDate({
'mydate_year': '2008',
'mydate_month': '4',
'mydate_day': '2',
}, initial={'mydate': datetime.date(2008, 4, 1)})
self.assertTrue(b.has_changed())
# With Field.show_hidden_initial=True ------------------------
b = GetDateShowHiddenInitial({
'mydate_year': '2008',
'mydate_month': '4',
'mydate_day': '1',
'initial-mydate': HiddenInput()._format_value(datetime.date(2008, 4, 1))
}, initial={'mydate': datetime.date(2008, 4, 1)})
self.assertFalse(b.has_changed())
b = GetDateShowHiddenInitial({
'mydate_year': '2008',
'mydate_month': '4',
'mydate_day': '22',
'initial-mydate': HiddenInput()._format_value(datetime.date(2008, 4, 1))
}, initial={'mydate': datetime.date(2008, 4, 1)})
self.assertTrue(b.has_changed())
b = GetDateShowHiddenInitial({
'mydate_year': '2008',
'mydate_month': '4',
'mydate_day': '22',
'initial-mydate': HiddenInput()._format_value(datetime.date(2008, 4, 1))
}, initial={'mydate': datetime.date(2008, 4, 22)})
self.assertTrue(b.has_changed())
b = GetDateShowHiddenInitial({
'mydate_year': '2008',
'mydate_month': '4',
'mydate_day': '22',
'initial-mydate': HiddenInput()._format_value(datetime.date(2008, 4, 22))
}, initial={'mydate': datetime.date(2008, 4, 1)})
self.assertFalse(b.has_changed())
def test_l10n_invalid_date_in(self):
# Invalid dates shouldn't be allowed
a = GetDate({'mydate_month': '2', 'mydate_day': '31', 'mydate_year': '2010'})
self.assertFalse(a.is_valid())
# 'Geef een geldige datum op.' = 'Enter a valid date.'
self.assertEqual(a.errors, {'mydate': ['Geef een geldige datum op.']})
def test_form_label_association(self):
# label tag is correctly associated with first rendered dropdown
a = GetDate({'mydate_month': '1', 'mydate_day': '1', 'mydate_year': '2010'})
self.assertIn('<label for="id_mydate_day">', a.as_p())
|
edevil/django
|
tests/forms_tests/tests/test_extra.py
|
Python
|
bsd-3-clause
| 35,908
|
# Example for: model.patch_ss_templates() and model.patch_ss()
# This will patch CYS-CYS disulfide bonds using disulfides in aligned templates:
from modeller import *
log.verbose()
env = environ()
env.io.atom_files_directory = ['.', '../atom_files']
env.libs.topology.read(file='$(LIB)/top_heav.lib')
env.libs.parameters.read(file='$(LIB)/par.lib')
# Read the sequence, calculate its topology, and coordinates:
aln = alignment(env, file='toxin.ali', align_codes=('2ctx', '2abx'))
# Superpose the two template structures without changing the alignment.
# This is for TRANSFER_XYZ to work properly. It relies on not reading
# the atom files again before TRANSFER_XYZ.
aln.malign3d(fit=False) # This is for TRANSFER_XYZ to work properly.
# Restore the alignment, and add in the model sequence, 1fas:
aln.clear()
aln.append(file='toxin.ali', align_codes=('2ctx', '2abx', '1fas'))
mdl = model(env)
mdl.generate_topology(aln['1fas'])
mdl.transfer_xyz(aln)
mdl.build(initialize_xyz=True, build_method='INTERNAL_COORDINATES')
mdl.write(file='1fas.noSS')
# Create the disulfide bonds using equivalent disulfide bonds in templates:
mdl.patch_ss_templates(aln)
# Create the stereochemical restraints
sel = selection(mdl)
mdl.restraints.make(sel, restraint_type='stereo', spline_on_site=False)
# Calculate energy to test the disulfide restraints (bonds, angles, dihedrals):
sel.energy()
mdl.read(file='1fas.noSS')
# Create the disulfide bonds guessing by coordinates
mdl.patch_ss()
# Create the stereochemical restraints
mdl.restraints.make(sel, restraint_type='stereo', spline_on_site=False)
# Calculate energy to test the disulfide restraints (bonds, angles, dihedrals):
sel.energy()
|
bjornwallner/proq2-server
|
apps/modeller9v8/examples/commands/patch_disulfides.py
|
Python
|
gpl-3.0
| 1,683
|
from ..iter_wrapper import IterWrapper
import unittest
class IterValueTest(unittest.TestCase):
""" Test cases of IterValue """
def test_iterable(self):
""" Test that an iterable is returned properly """
expected = [1,2,3]
actual = IterWrapper(expected)
self.assertEqual(expected, actual)
def test_nonIterable(self):
""" Test that a non-iterable is returned properly """
expected = 1
actual = IterWrapper(expected)
self.assertEqual(len(actual), 1)
self.assertIn(expected, actual)
|
cloew/KaoJson
|
kao_json/Test/test_iter_wrapper.py
|
Python
|
mit
| 599
|
"""
WSGI config for chp2 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "chp2.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
|
tzaffi/git-in-practice-repo
|
chp2/chp2/wsgi.py
|
Python
|
mit
| 383
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Author: Guewen Baconnier
# Copyright Camptocamp SA 2011
# SQL inspired from OpenERP original code
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from collections import defaultdict
from operator import add
from openerp.tools.float_utils import float_is_zero
from .common_balance_reports import CommonBalanceReportHeaderWebkit
from .common_partner_reports import CommonPartnersReportHeaderWebkit
class CommonPartnerBalanceReportHeaderWebkit(CommonBalanceReportHeaderWebkit,
CommonPartnersReportHeaderWebkit):
"""Define common helper for balance (trial balance, P&L,
BS oriented financial report"""
def _get_account_partners_details(self, account_by_ids, main_filter,
target_move, start, stop,
initial_balance_mode,
partner_filter_ids=False,
display_partner='all'):
res = {}
filter_from = False
if main_filter in ('filter_period', 'filter_no', 'filter_opening'):
filter_from = 'period'
elif main_filter == 'filter_date':
filter_from = 'date'
partners_init_balances_by_ids = {}
for account_id, account_details in account_by_ids.iteritems():
partners_init_balances_by_ids.update(
self._get_partners_initial_balances(
account_id, start, initial_balance_mode,
partner_filter_ids=partner_filter_ids,
# we'll never exclude reconciled entries in the legal
# reports
exclude_reconcile=False))
opening_mode = 'exclude_opening'
if main_filter == 'filter_opening':
opening_mode = 'include_opening'
# get credit and debit for partner
details = self._get_partners_totals_account(
filter_from,
account_id,
start,
stop,
target_move,
partner_filter_ids=partner_filter_ids,
mode=opening_mode)
# merge initial balances in partner details
if partners_init_balances_by_ids.get(account_id):
for partner_id, initial_balances in \
partners_init_balances_by_ids[account_id].iteritems():
if initial_balances.get('init_balance'):
details[partner_id].update(
{'init_balance': initial_balances['init_balance']})
# compute balance for the partner
for partner_id, partner_details in details.iteritems():
details[partner_id]['balance'] = details[partner_id].\
get('init_balance', 0.0) + \
details[partner_id].get('debit', 0.0) - \
details[partner_id].get('credit', 0.0)
if display_partner == 'non-zero_balance':
details = {
k: v
for k, v in details.iteritems()
if not float_is_zero(v['balance'], precision_digits=5)
}
res[account_id] = details
return res
def _get_partners_initial_balances(self, account_ids, start_period,
initial_balance_mode,
partner_filter_ids=None,
exclude_reconcile=False):
# we get the initial balance from the opening period (opening_balance)
# when the opening period is included in the start period and
# when there is at least one entry in the opening period. Otherwise we
# compute it from previous periods
if initial_balance_mode == 'opening_balance':
opening_period_selected = self.get_included_opening_period(
start_period)
res = self._compute_partners_initial_balances(
account_ids, start_period, partner_filter_ids,
force_period_ids=opening_period_selected,
exclude_reconcile=exclude_reconcile)
elif initial_balance_mode == 'initial_balance':
res = self._compute_partners_initial_balances(
account_ids, start_period, partner_filter_ids,
exclude_reconcile=exclude_reconcile)
else:
res = {}
return res
def _get_partners_totals_account(self, filter_from, account_id, start,
stop, target_move,
partner_filter_ids=None,
mode='exclude_opening'):
final_res = defaultdict(dict)
sql_select = """
SELECT account_move_line.partner_id,
sum(account_move_line.debit) AS debit,
sum(account_move_line.credit) AS credit
FROM account_move_line"""
sql_joins = ''
sql_where = "WHERE account_move_line.account_id = %(account_id)s \
AND account_move_line.state = 'valid' "
method = getattr(self, '_get_query_params_from_' + filter_from + 's')
sql_conditions, search_params = method(start, stop, mode=mode)
sql_where += sql_conditions
if partner_filter_ids:
sql_where += " AND account_move_line.partner_id \
in %(partner_ids)s"
search_params.update({'partner_ids': tuple(partner_filter_ids)})
if target_move == 'posted':
sql_joins += "INNER JOIN account_move \
ON account_move_line.move_id = account_move.id"
sql_where += " AND account_move.state = %(target_move)s"
search_params.update({'target_move': target_move})
sql_groupby = "GROUP BY account_move_line.partner_id"
search_params.update({'account_id': account_id})
query = ' '.join((sql_select, sql_joins, sql_where, sql_groupby))
self.cursor.execute(query, search_params)
res = self.cursor.dictfetchall()
if res:
for row in res:
final_res[row['partner_id']] = row
return final_res
def _get_filter_type(self, result_selection):
filter_type = ('payable', 'receivable')
if result_selection == 'customer':
filter_type = ('receivable',)
if result_selection == 'supplier':
filter_type = ('payable',)
return filter_type
def _get_partners_comparison_details(self, data, account_ids, target_move,
comparison_filter, index,
partner_filter_ids=False):
"""
@param data: data of the wizard form
@param account_ids: ids of the accounts to get details
@param comparison_filter: selected filter on the form for
the comparison (filter_no, filter_year, filter_period, filter_date)
@param index: index of the fields to get (ie. comp1_fiscalyear_id
where 1 is the index)
@param partner_filter_ids: list of ids of partners to select
@return: dict of account details (key = account id)
"""
fiscalyear = self._get_info(
data, "comp%s_fiscalyear_id" % (index,), 'account.fiscalyear')
start_period = self._get_info(
data, "comp%s_period_from" % (index,), 'account.period')
stop_period = self._get_info(
data, "comp%s_period_to" % (index,), 'account.period')
start_date = self._get_form_param("comp%s_date_from" % (index,), data)
stop_date = self._get_form_param("comp%s_date_to" % (index,), data)
init_balance = self.is_initial_balance_enabled(comparison_filter)
comp_params = {}
accounts_details_by_ids = defaultdict(dict)
if comparison_filter != 'filter_no':
start_period, stop_period, start, stop = \
self._get_start_stop_for_filter(
comparison_filter, fiscalyear, start_date, stop_date,
start_period, stop_period)
details_filter = comparison_filter
if comparison_filter == 'filter_year':
details_filter = 'filter_no'
initial_balance_mode = init_balance \
and self._get_initial_balance_mode(start) or False
accounts_by_ids = self._get_account_details(
account_ids, target_move, fiscalyear, details_filter, start,
stop, initial_balance_mode)
partner_details_by_ids = self._get_account_partners_details(
accounts_by_ids, details_filter,
target_move, start, stop, initial_balance_mode,
partner_filter_ids=partner_filter_ids,
display_partner=data['form']['display_partner']
)
for account_id in account_ids:
accounts_details_by_ids[account_id][
'account'] = accounts_by_ids[account_id]
accounts_details_by_ids[account_id][
'partners_amounts'] = partner_details_by_ids[account_id]
comp_params = {
'comparison_filter': comparison_filter,
'fiscalyear': fiscalyear,
'start': start,
'stop': stop,
'initial_balance_mode': initial_balance_mode,
}
return accounts_details_by_ids, comp_params
def compute_partner_balance_data(self, data, filter_report_type=None):
lang = self.localcontext.get('lang')
lang_ctx = lang and {'lang': lang} or {}
new_ids = data['form']['account_ids'] or data[
'form']['chart_account_id']
max_comparison = self._get_form_param(
'max_comparison', data, default=0)
main_filter = self._get_form_param('filter', data, default='filter_no')
comp_filters, nb_comparisons, comparison_mode = self._comp_filters(
data, max_comparison)
fiscalyear = self.get_fiscalyear_br(data)
start_period = self.get_start_period_br(data)
stop_period = self.get_end_period_br(data)
target_move = self._get_form_param('target_move', data, default='all')
start_date = self._get_form_param('date_from', data)
stop_date = self._get_form_param('date_to', data)
chart_account = self._get_chart_account_id_br(data)
result_selection = self._get_form_param('result_selection', data)
partner_ids = self._get_form_param('partner_ids', data)
filter_type = self._get_filter_type(result_selection)
start_period, stop_period, start, stop = \
self._get_start_stop_for_filter(
main_filter, fiscalyear, start_date, stop_date, start_period,
stop_period)
initial_balance = self.is_initial_balance_enabled(main_filter)
initial_balance_mode = initial_balance \
and self._get_initial_balance_mode(start) or False
# Retrieving accounts
account_ids = self.get_all_accounts(
new_ids, only_type=filter_type,
filter_report_type=filter_report_type)
# get details for each accounts, total of debit / credit / balance
accounts_by_ids = self._get_account_details(
account_ids, target_move, fiscalyear, main_filter, start, stop,
initial_balance_mode, context=lang_ctx)
partner_details_by_ids = self._get_account_partners_details(
accounts_by_ids, main_filter, target_move, start, stop,
initial_balance_mode, partner_filter_ids=partner_ids,
display_partner=data['form']['display_partner'])
comparison_params = []
comp_accounts_by_ids = []
for index in range(max_comparison):
if comp_filters[index] != 'filter_no':
comparison_result, comp_params = self.\
_get_partners_comparison_details(
data, account_ids,
target_move,
comp_filters[index],
index,
partner_filter_ids=partner_ids)
comparison_params.append(comp_params)
comp_accounts_by_ids.append(comparison_result)
objects = self.pool.get('account.account').browse(self.cursor,
self.uid,
account_ids,
context=lang_ctx)
init_balance_accounts = {}
comparisons_accounts = {}
partners_order_accounts = {}
partners_amounts_accounts = {}
debit_accounts = {}
credit_accounts = {}
balance_accounts = {}
for account in objects:
if not account.parent_id: # hide top level account
continue
debit_accounts[account.id] = accounts_by_ids[account.id]['debit']
credit_accounts[account.id] = accounts_by_ids[account.id]['credit']
balance_accounts[account.id] = \
accounts_by_ids[account.id]['balance']
init_balance_accounts[account.id] = accounts_by_ids[
account.id].get('init_balance', 0.0)
partners_amounts_accounts[account.id] =\
partner_details_by_ids[account.id]
comp_accounts = []
for comp_account_by_id in comp_accounts_by_ids:
values = comp_account_by_id.get(account.id)
values['account'].update(
self._get_diff(balance_accounts[account.id],
values['account'].get('balance', 0.0)))
comp_accounts.append(values)
for partner_id, partner_values in \
values['partners_amounts'].copy().iteritems():
partners_amounts_account =\
partners_amounts_accounts[account.id]
base_partner_balance =\
partners_amounts_account[partner_id]['balance']\
if partners_amounts_accounts.get(account.id)\
and partners_amounts_accounts.get(account.id)\
.get(partner_id) else 0.0
partner_values.update(self._get_diff(
base_partner_balance,
partner_values.get('balance', 0.0)))
values['partners_amounts'][
partner_id].update(partner_values)
comparisons_accounts[account.id] = comp_accounts
all_partner_ids = reduce(add, [comp['partners_amounts'].keys()
for comp in comp_accounts],
partners_amounts_accounts[account.id]
.keys())
partners_order_accounts[account.id] = \
self._order_partners(all_partner_ids)
context_report_values = {
'fiscalyear': fiscalyear,
'start_date': start_date,
'stop_date': stop_date,
'start_period': start_period,
'stop_period': stop_period,
'chart_account': chart_account,
'comparison_mode': comparison_mode,
'nb_comparison': nb_comparisons,
'comp_params': comparison_params,
'initial_balance_mode': initial_balance_mode,
'compute_diff': self._get_diff,
'init_balance_accounts': init_balance_accounts,
'comparisons_accounts': comparisons_accounts,
'partners_order_accounts': partners_order_accounts,
'partners_amounts_accounts': partners_amounts_accounts,
'debit_accounts': debit_accounts,
'credit_accounts': credit_accounts,
'balance_accounts': balance_accounts,
}
return objects, new_ids, context_report_values
|
open-synergy/account-financial-reporting
|
account_financial_report_webkit/report/common_partner_balance_reports.py
|
Python
|
agpl-3.0
| 17,006
|
def test_stub():
return True
|
openspending/datapackage-pipelines-fiscal
|
tests/test_main.py
|
Python
|
mit
| 32
|
# Copyright (c) 2017, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
import frappe
from frappe import _
def execute(filters=None):
if not filters: filters = {}
if not filters.get("date"):
frappe.throw(_("Please select date"))
columns = get_columns(filters)
date = filters.get("date")
data = []
if not filters.get("shareholder"):
pass
else:
transfers = get_all_transfers(date, filters.get("shareholder"))
for transfer in transfers:
if transfer.transfer_type == 'Transfer':
if transfer.from_shareholder == filters.get("shareholder"):
transfer.transfer_type += ' to {}'.format(transfer.to_shareholder)
else:
transfer.transfer_type += ' from {}'.format(transfer.from_shareholder)
row = [filters.get("shareholder"), transfer.date, transfer.transfer_type,
transfer.share_type, transfer.no_of_shares, transfer.rate, transfer.amount,
transfer.company, transfer.name]
data.append(row)
return columns, data
def get_columns(filters):
columns = [
_("Shareholder") + ":Link/Shareholder:150",
_("Date") + ":Date:100",
_("Transfer Type") + "::140",
_("Share Type") + "::90",
_("No of Shares") + "::90",
_("Rate") + ":Currency:90",
_("Amount") + ":Currency:90",
_("Company") + "::150",
_("Share Transfer") + ":Link/Share Transfer:90"
]
return columns
def get_all_transfers(date, shareholder):
condition = ' '
# if company:
# condition = 'AND company = %(company)s '
return frappe.db.sql("""SELECT * FROM `tabShare Transfer`
WHERE (DATE(date) <= %(date)s AND from_shareholder = %(shareholder)s {condition})
OR (DATE(date) <= %(date)s AND to_shareholder = %(shareholder)s {condition})
ORDER BY date""".format(condition=condition),
{'date': date, 'shareholder': shareholder}, as_dict=1)
|
mhbu50/erpnext
|
erpnext/accounts/report/share_ledger/share_ledger.py
|
Python
|
gpl-3.0
| 1,816
|
import marshmallow
from marshmallow import Schema as MarshmallowSchema
from marshmallow import fields # noqa
from marshmallow.exceptions import ValidationError as MarshmallowValidationError
from rest_framework.serializers import BaseSerializer, ValidationError
IS_MARSHMALLOW_LT_3 = int(marshmallow.__version__.split('.')[0]) < 3
__version__ = '4.0.2'
_schema_kwargs = (
'only', 'exclude', 'dump_only', 'load_only', 'context', 'partial'
)
class Schema(BaseSerializer, MarshmallowSchema):
def __new__(cls, *args, **kwargs):
# We're overriding the DRF implementation here, because ListSerializer
# clashes with Nested implementation.
kwargs.pop('many', False)
return super(Schema, cls).__new__(cls, *args, **kwargs)
def __init__(self, *args, **kwargs):
schema_kwargs = {
'many': kwargs.get('many', False)
}
# Remove any kwargs that are only valid for marshmallow schemas
for key in _schema_kwargs:
if key in kwargs:
schema_kwargs[key] = kwargs.pop(key)
super(Schema, self).__init__(*args, **kwargs)
# XXX: Remove parent attribute so that Field.root resolves properly
# https://github.com/marshmallow-code/django-rest-marshmallow/issues/131#issuecomment-601089549
delattr(self, 'parent')
MarshmallowSchema.__init__(self, **schema_kwargs)
def to_representation(self, instance):
if IS_MARSHMALLOW_LT_3:
return self.dump(instance).data
return self.dump(instance)
def to_internal_value(self, data):
if IS_MARSHMALLOW_LT_3:
ret = self.load(data)
if ret.errors:
raise ValidationError(ret.errors)
return ret.data
try:
return self.load(data)
except MarshmallowValidationError as err:
raise ValidationError(err.messages)
@property
def data(self):
# We're overriding the default implementation here, because the
# '_data' property clashes with marshmallow's implementation.
if hasattr(self, 'initial_data') and not hasattr(self, '_validated_data'):
msg = (
'When a serializer is passed a `data` keyword argument you '
'must call `.is_valid()` before attempting to access the '
'serialized `.data` representation.\n'
'You should either call `.is_valid()` first, '
'or access `.initial_data` instead.'
)
raise AssertionError(msg)
if not hasattr(self, '_serializer_data'):
if self.instance is not None and not getattr(self, '_errors', None):
self._serializer_data = self.to_representation(self.instance)
elif hasattr(self, '_validated_data') and not getattr(self, '_errors', None):
self._serializer_data = self.to_representation(self.validated_data)
else:
self._serializer_data = self.get_initial()
return self._serializer_data
@property
def context(self):
return self._context
@context.setter
def context(self, value):
self._context = value
get_attribute = MarshmallowSchema.get_attribute
|
tomchristie/django-rest-marshmallow
|
rest_marshmallow/__init__.py
|
Python
|
isc
| 3,269
|
# coding=utf-8
from functools import partial
import Pyro4
from pyage.core import address
from pyage.core.stop_condition import StepLimitStopCondition
from pyage_forams.solutions.distributed.neighbour_matcher import Neighbour2dMatcher
from pyage_forams.solutions.agent.remote_aggegate import create_remote_agent
from pyage_forams.solutions.distributed.request import create_dispatcher
from pyage_forams.solutions.environment import environment_factory, Environment2d
from pyage_forams.solutions.foram import create_forams
from pyage_forams.solutions.genom import GenomFactory
from pyage_forams.solutions.insolation_meter import StaticInsolation
from pyage_forams.conf.distributed2d.common import *
factory = GenomFactory(chambers_limit=5)
genom_factory = lambda: factory.generate
forams = create_forams(1, initial_energy=5)
agents = partial(create_remote_agent, "upperright")
insolation_meter = StaticInsolation
environment = environment_factory(regeneration_factor=0.1, clazz=Environment2d)
neighbour_matcher = Neighbour2dMatcher
request_dispatcher = create_dispatcher()
stop_condition = lambda: StepLimitStopCondition(90)
reproduction_minimum = lambda: 10
movement_energy = lambda: 0.25
growth_minimum = lambda: 10
energy_need = lambda: 0.5
algae_limit = lambda: 2
newborn_limit = lambda: 9
reproduction_probability = lambda: 0.8
growth_probability = lambda: 0.8
growth_cost_factor = lambda: 0.5
capacity_factor = lambda: 1.1
initial_algae_probability = lambda: 0.1
address_provider = address.SequenceAddressProvider
ns_hostname = lambda: "127.0.0.1"
pyro_daemon = Pyro4.Daemon()
daemon = lambda: pyro_daemon
neighbours = lambda: {"left": "upperleft", "lower": "lowerright"}
|
maciek123/pyage-forams
|
pyage_forams/conf/distributed2d/upperright.py
|
Python
|
gpl-2.0
| 1,686
|
"""
The child process.
"""
from __future__ import unicode_literals
from prompt_toolkit.eventloop.base import EventLoop
from prompt_toolkit.eventloop.posix_utils import PosixStdinReader
from prompt_toolkit.document import Document
from pygments.token import Token
from .key_mappings import prompt_toolkit_key_to_vt100_key
from .screen import BetterScreen
from .stream import BetterStream
from .utils import set_terminal_size, pty_make_controlling_tty
import os
import resource
import signal
import sys
import time
import traceback
__all__ = (
'Process',
)
class Process(object):
"""
Child process.
Functionality for parsing the vt100 output (the Pyte screen and stream), as
well as sending input to the process.
Usage:
p = Process(eventloop, ...):
p.start()
:param eventloop: Prompt_toolkit eventloop. Used for executing blocking
stuff in an executor, as well as adding additional readers to the
eventloop.
:param invalidate: When the screen content changes, and the renderer needs
to redraw the output, this callback is called.
:param exec_func: Callable that is called in the child process. (Usualy,
this calls execv.)
:param bell_func: Called when the process does a `bell`.
:param done_callback: Called when the process terminates.
"""
def __init__(self, eventloop, invalidate, exec_func, bell_func=None, done_callback=None):
assert isinstance(eventloop, EventLoop)
assert callable(invalidate)
assert callable(exec_func)
assert bell_func is None or callable(bell_func)
assert done_callback is None or callable(done_callback)
self.eventloop = eventloop
self.invalidate = invalidate
self.exec_func = exec_func
self.done_callback = done_callback
self.pid = None
self.is_terminated = False
self.suspended = False
self.slow_motion = False # For debugging
# Create pseudo terminal for this pane.
self.master, self.slave = os.openpty()
# Master side -> attached to terminal emulator.
self._reader = PosixStdinReader(self.master)
# Create output stream and attach to screen
self.sx = 120
self.sy = 24
self.screen = BetterScreen(self.sx, self.sy,
write_process_input=self.write_input,
bell_func=bell_func)
self.stream = BetterStream(self.screen)
self.stream.attach(self.screen)
def start(self):
"""
Start the process: fork child.
"""
self.set_size(self.sx, self.sy)
self._start()
self._connect_reader()
self._waitpid()
@classmethod
def from_command(cls, eventloop, invalidate, command, done_callback,
bell_func=None, before_exec_func=None):
"""
Create Process from command,
e.g. command=['python', '-c', 'print("test")']
:param before_exec_func: Function that is called before `exec` in the process fork.
"""
assert isinstance(command, list)
def execv():
if before_exec_func:
before_exec_func()
for p in os.environ['PATH'].split(':'):
path = os.path.join(p, command[0])
if os.path.exists(path) and os.access(path, os.X_OK):
os.execv(path, command)
return cls(eventloop, invalidate, execv,
bell_func=bell_func, done_callback=done_callback)
def _start(self):
"""
Create fork and start the child process.
"""
pid = os.fork()
if pid == 0:
self._in_child()
elif pid > 0:
# In parent.
os.close(self.slave)
self.slave = None
# We wait a very short while, to be sure the child had the time to
# call _exec. (Otherwise, we are still sharing signal handlers and
# FDs.) Resizing the pty, when the child is still in our Python
# code and has the signal handler from prompt_toolkit, but closed
# the 'fd' for 'call_from_executor', will cause OSError.
time.sleep(0.1)
self.pid = pid
def _waitpid(self):
"""
Create an executor that waits and handles process termination.
"""
def wait_for_finished():
" Wait for PID in executor. "
os.waitpid(self.pid, 0)
self.eventloop.call_from_executor(done)
def done():
" PID received. Back in the main thread. "
# Close pty and remove reader.
os.close(self.master)
self.eventloop.remove_reader(self.master)
self.master = None
# Callback.
self.is_terminated = True
self.done_callback()
self.eventloop.run_in_executor(wait_for_finished)
def set_size(self, width, height):
"""
Set terminal size.
"""
assert isinstance(width, int)
assert isinstance(height, int)
if self.master is not None:
set_terminal_size(self.master, height, width)
self.screen.resize(lines=height, columns=width)
self.screen.lines = height
self.screen.columns = width
self.sx = width
self.sy = height
def _in_child(self):
" Will be executed in the forked child. "
os.close(self.master)
# Remove signal handler for SIGWINCH as early as possible.
# (We don't want this to be triggered when execv has not been called
# yet.)
signal.signal(signal.SIGWINCH, 0)
pty_make_controlling_tty(self.slave)
# In the fork, set the stdin/out/err to our slave pty.
os.dup2(self.slave, 0)
os.dup2(self.slave, 1)
os.dup2(self.slave, 2)
# Execute in child.
try:
self._close_file_descriptors()
self.exec_func()
except Exception:
traceback.print_exc()
time.sleep(5)
os._exit(1)
os._exit(0)
def _close_file_descriptors(self):
# Do not allow child to inherit open file descriptors from parent.
# (In case that we keep running Python code. We shouldn't close them.
# because the garbage collector is still active, and he will close them
# eventually.)
max_fd = resource.getrlimit(resource.RLIMIT_NOFILE)[-1]
try:
os.closerange(3, max_fd)
except OverflowError:
# On OS X, max_fd can return very big values, than closerange
# doesn't understand, e.g. 9223372036854775807. In this case, just
# use 4096. This is what Linux systems report, and should be
# sufficient. (I hope...)
os.closerange(3, 4096)
def write_input(self, data, paste=False):
"""
Write user key strokes to the input.
:param data: (text, not bytes.) The input.
:param paste: When True, and the process running here understands
bracketed paste. Send as pasted text.
"""
# send as bracketed paste?
if paste and self.screen.bracketed_paste_enabled:
data = '\x1b[200~' + data + '\x1b[201~'
while self.master is not None:
try:
os.write(self.master, data.encode('utf-8'))
except OSError as e:
# This happens when the window resizes and a SIGWINCH was received.
# We get 'Error: [Errno 4] Interrupted system call'
if e.errno == 4:
continue
return
def write_key(self, key):
"""
Write prompt_toolkit Key.
"""
data = prompt_toolkit_key_to_vt100_key(
key, application_mode=self.screen.in_application_mode)
self.write_input(data)
def _connect_reader(self):
"""
Process stdout output from the process.
"""
if self.master is not None:
self.eventloop.add_reader(self.master, self._read)
def _read(self):
"""
Read callback, called by the eventloop.
"""
if self.slow_motion:
# Read characters one-by-one in slow motion.
d = self._reader.read(1)
else:
d = self._reader.read()
if d:
self.stream.feed(d)
self.invalidate()
else:
# End of stream. Remove child.
self.eventloop.remove_reader(self.master)
# In case of slow motion, disconnect for .5 seconds from the event loop.
if self.slow_motion:
self.eventloop.remove_reader(self.master)
def connect_with_delay():
time.sleep(.1)
self.eventloop.call_from_executor(self._connect_reader)
self.eventloop.run_in_executor(connect_with_delay)
def suspend(self):
"""
Suspend process. Stop reading stdout. (Called when going into copy mode.)
"""
self.suspended = True
self.eventloop.remove_reader(self.master)
def resume(self):
"""
Resume from 'suspend'.
"""
if self.suspended and self.master is not None:
self._connect_reader()
self.suspended = False
def get_cwd(self):
"""
The current working directory for this process. (Or `None` when
unknown.)
"""
return get_cwd_for_pid(self.pid)
def get_name(self):
"""
The name for this process. (Or `None` when unknown.)
"""
# TODO: Maybe cache for short time.
if self.master is not None:
return get_name_for_fd(self.master)
def send_signal(self, signal):
" Send signal to running process. "
assert isinstance(signal, int), type(signal)
if self.pid and not self.is_terminated:
os.kill(self.pid, signal)
def create_copy_document(self):
"""
Create a Document instance and token list that can be used in copy
mode.
"""
data_buffer = self.screen.pt_screen.data_buffer
text = []
token_list = []
first_row = min(data_buffer.keys())
last_row = max(data_buffer.keys())
def token_has_no_background(token):
try:
# Token looks like ('C', color, bgcolor, bold, underline, ...)
return token[2] is None
except IndexError:
return True
for row_index in range(first_row, last_row + 1):
row = data_buffer[row_index]
max_column = max(row.keys()) if row else 0
# Remove trailing whitespace. (If the background is transparent.)
row_data = [row[x] for x in range(0, max_column + 1)]
while (row_data and row_data[-1].char.isspace() and
token_has_no_background(row_data[-1].token)):
row_data.pop()
# Walk through row.
char_iter = iter(range(len(row_data)))
for x in char_iter:
c = row[x]
text.append(c.char)
token_list.append((c.token, c.char))
# Skip next cell when this is a double width character.
if c.width == 2:
next(char_iter)
# Add newline.
text.append('\n')
token_list.append((Token, '\n'))
# Remove newlines at the end.
while text and text[-1] == '\n':
text.pop()
token_list.pop()
# Calculate cursor position.
d = Document(text=''.join(text))
return Document(text=d.text,
cursor_position=d.translate_row_col_to_index(
row=self.screen.pt_screen.cursor_position.y,
col=self.screen.pt_screen.cursor_position.x)), token_list
def get_cwd_for_pid(pid):
"""
Return the current working directory for a given process ID.
"""
if sys.platform in ('linux', 'linux2'):
try:
return os.readlink('/proc/%s/cwd' % pid)
except OSError:
pass
def get_name_for_fd(fd):
"""
Return the process name for a given process ID.
"""
if sys.platform in ('linux', 'linux2'):
pgrp = os.tcgetpgrp(fd)
try:
with open('/proc/%s/cmdline' % pgrp, 'rb') as f:
return f.read().decode('utf-8', 'ignore').split('\0')[0]
except IOError:
pass
|
jonathanslenders/pymux-test
|
pymux/process.py
|
Python
|
bsd-3-clause
| 12,651
|
#!/usr/bin/env python2
#
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from test_framework.mininode import *
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import time
from test_framework.blocktools import create_block, create_coinbase
'''
AcceptBlockTest -- test processing of unrequested blocks.
Since behavior differs when receiving unrequested blocks from whitelisted peers
versus non-whitelisted peers, this tests the behavior of both (effectively two
separate tests running in parallel).
Setup: two nodes, node0 and node1, not connected to each other. Node0 does not
whitelist localhost, but node1 does. They will each be on their own chain for
this test.
We have one NodeConn connection to each, test_node and white_node respectively.
The test:
1. Generate one block on each node, to leave IBD.
2. Mine a new block on each tip, and deliver to each node from node's peer.
The tip should advance.
3. Mine a block that forks the previous block, and deliver to each node from
corresponding peer.
Node0 should not process this block (just accept the header), because it is
unrequested and doesn't have more work than the tip.
Node1 should process because this is coming from a whitelisted peer.
4. Send another block that builds on the forking block.
Node0 should process this block but be stuck on the shorter chain, because
it's missing an intermediate block.
Node1 should reorg to this longer chain.
4b.Send 288 more blocks on the longer chain.
Node0 should process all but the last block (too far ahead in height).
Send all headers to Node1, and then send the last block in that chain.
Node1 should accept the block because it's coming from a whitelisted peer.
5. Send a duplicate of the block in #3 to Node0.
Node0 should not process the block because it is unrequested, and stay on
the shorter chain.
6. Send Node0 an inv for the height 3 block produced in #4 above.
Node0 should figure out that Node0 has the missing height 2 block and send a
getdata.
7. Send Node0 the missing block again.
Node0 should process and the tip should advance.
'''
# TestNode: bare-bones "peer". Used mostly as a conduit for a test to sending
# p2p messages to a node, generating the messages in the main testing logic.
class TestNode(NodeConnCB):
def __init__(self):
NodeConnCB.__init__(self)
self.connection = None
self.ping_counter = 1
self.last_pong = msg_pong()
def add_connection(self, conn):
self.connection = conn
# Track the last getdata message we receive (used in the test)
def on_getdata(self, conn, message):
self.last_getdata = message
# Spin until verack message is received from the node.
# We use this to signal that our test can begin. This
# is called from the testing thread, so it needs to acquire
# the global lock.
def wait_for_verack(self):
while True:
with mininode_lock:
if self.verack_received:
return
time.sleep(0.05)
# Wrapper for the NodeConn's send_message function
def send_message(self, message):
self.connection.send_message(message)
def on_pong(self, conn, message):
self.last_pong = message
# Sync up with the node after delivery of a block
def sync_with_ping(self, timeout=30):
self.connection.send_message(msg_ping(nonce=self.ping_counter))
received_pong = False
sleep_time = 0.05
while not received_pong and timeout > 0:
time.sleep(sleep_time)
timeout -= sleep_time
with mininode_lock:
if self.last_pong.nonce == self.ping_counter:
received_pong = True
self.ping_counter += 1
return received_pong
class AcceptBlockTest(BitcoinTestFramework):
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("WORLDBITD", "worldbitd"),
help="bitcoind binary to test")
def setup_chain(self):
initialize_chain_clean(self.options.tmpdir, 2)
def setup_network(self):
# Node0 will be used to test behavior of processing unrequested blocks
# from peers which are not whitelisted, while Node1 will be used for
# the whitelisted case.
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug"],
binary=self.options.testbinary))
self.nodes.append(start_node(1, self.options.tmpdir,
["-debug", "-whitelist=127.0.0.1"],
binary=self.options.testbinary))
def run_test(self):
# Setup the p2p connections and start up the network thread.
test_node = TestNode() # connects to node0 (not whitelisted)
white_node = TestNode() # connects to node1 (whitelisted)
connections = []
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node))
connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1], white_node))
test_node.add_connection(connections[0])
white_node.add_connection(connections[1])
NetworkThread().start() # Start up network handling in another thread
# Test logic begins here
test_node.wait_for_verack()
white_node.wait_for_verack()
# 1. Have both nodes mine a block (leave IBD)
[ n.generate(1) for n in self.nodes ]
tips = [ int ("0x" + n.getbestblockhash() + "L", 0) for n in self.nodes ]
# 2. Send one block that builds on each tip.
# This should be accepted.
blocks_h2 = [] # the height 2 blocks on each node's chain
block_time = int(time.time()) + 1
for i in xrange(2):
blocks_h2.append(create_block(tips[i], create_coinbase(2), block_time))
blocks_h2[i].solve()
block_time += 1
test_node.send_message(msg_block(blocks_h2[0]))
white_node.send_message(msg_block(blocks_h2[1]))
[ x.sync_with_ping() for x in [test_node, white_node] ]
assert_equal(self.nodes[0].getblockcount(), 2)
assert_equal(self.nodes[1].getblockcount(), 2)
print "First height 2 block accepted by both nodes"
# 3. Send another block that builds on the original tip.
blocks_h2f = [] # Blocks at height 2 that fork off the main chain
for i in xrange(2):
blocks_h2f.append(create_block(tips[i], create_coinbase(2), blocks_h2[i].nTime+1))
blocks_h2f[i].solve()
test_node.send_message(msg_block(blocks_h2f[0]))
white_node.send_message(msg_block(blocks_h2f[1]))
[ x.sync_with_ping() for x in [test_node, white_node] ]
for x in self.nodes[0].getchaintips():
if x['hash'] == blocks_h2f[0].hash:
assert_equal(x['status'], "headers-only")
for x in self.nodes[1].getchaintips():
if x['hash'] == blocks_h2f[1].hash:
assert_equal(x['status'], "valid-headers")
print "Second height 2 block accepted only from whitelisted peer"
# 4. Now send another block that builds on the forking chain.
blocks_h3 = []
for i in xrange(2):
blocks_h3.append(create_block(blocks_h2f[i].sha256, create_coinbase(3), blocks_h2f[i].nTime+1))
blocks_h3[i].solve()
test_node.send_message(msg_block(blocks_h3[0]))
white_node.send_message(msg_block(blocks_h3[1]))
[ x.sync_with_ping() for x in [test_node, white_node] ]
# Since the earlier block was not processed by node0, the new block
# can't be fully validated.
for x in self.nodes[0].getchaintips():
if x['hash'] == blocks_h3[0].hash:
assert_equal(x['status'], "headers-only")
# But this block should be accepted by node0 since it has more work.
try:
self.nodes[0].getblock(blocks_h3[0].hash)
print "Unrequested more-work block accepted from non-whitelisted peer"
except:
raise AssertionError("Unrequested more work block was not processed")
# Node1 should have accepted and reorged.
assert_equal(self.nodes[1].getblockcount(), 3)
print "Successfully reorged to length 3 chain from whitelisted peer"
# 4b. Now mine 288 more blocks and deliver; all should be processed but
# the last (height-too-high) on node0. Node1 should process the tip if
# we give it the headers chain leading to the tip.
tips = blocks_h3
headers_message = msg_headers()
all_blocks = [] # node0's blocks
for j in xrange(2):
for i in xrange(288):
next_block = create_block(tips[j].sha256, create_coinbase(i + 4), tips[j].nTime+1)
next_block.solve()
if j==0:
test_node.send_message(msg_block(next_block))
all_blocks.append(next_block)
else:
headers_message.headers.append(CBlockHeader(next_block))
tips[j] = next_block
time.sleep(2)
for x in all_blocks:
try:
self.nodes[0].getblock(x.hash)
if x == all_blocks[287]:
raise AssertionError("Unrequested block too far-ahead should have been ignored")
except:
if x == all_blocks[287]:
print "Unrequested block too far-ahead not processed"
else:
raise AssertionError("Unrequested block with more work should have been accepted")
headers_message.headers.pop() # Ensure the last block is unrequested
white_node.send_message(headers_message) # Send headers leading to tip
white_node.send_message(msg_block(tips[1])) # Now deliver the tip
try:
white_node.sync_with_ping()
self.nodes[1].getblock(tips[1].hash)
print "Unrequested block far ahead of tip accepted from whitelisted peer"
except:
raise AssertionError("Unrequested block from whitelisted peer not accepted")
# 5. Test handling of unrequested block on the node that didn't process
# Should still not be processed (even though it has a child that has more
# work).
test_node.send_message(msg_block(blocks_h2f[0]))
# Here, if the sleep is too short, the test could falsely succeed (if the
# node hasn't processed the block by the time the sleep returns, and then
# the node processes it and incorrectly advances the tip).
# But this would be caught later on, when we verify that an inv triggers
# a getdata request for this block.
test_node.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 2)
print "Unrequested block that would complete more-work chain was ignored"
# 6. Try to get node to request the missing block.
# Poke the node with an inv for block at height 3 and see if that
# triggers a getdata on block 2 (it should if block 2 is missing).
with mininode_lock:
# Clear state so we can check the getdata request
test_node.last_getdata = None
test_node.send_message(msg_inv([CInv(2, blocks_h3[0].sha256)]))
test_node.sync_with_ping()
with mininode_lock:
getdata = test_node.last_getdata
# Check that the getdata includes the right block
assert_equal(getdata.inv[0].hash, blocks_h2f[0].sha256)
print "Inv at tip triggered getdata for unprocessed block"
# 7. Send the missing block for the third time (now it is requested)
test_node.send_message(msg_block(blocks_h2f[0]))
test_node.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 290)
print "Successfully reorged to longer chain from non-whitelisted peer"
[ c.disconnect_node() for c in connections ]
if __name__ == '__main__':
AcceptBlockTest().main()
|
worldbit/worldbit
|
qa/rpc-tests/p2p-acceptblock.py
|
Python
|
mit
| 12,336
|
import os
from subprocess import Popen
try:
import thread
except ImportError:
import _thread as thread # Py3K changed it.
import platform
from .util import kill_pid
from pulsar.managers.base.directory import DirectoryBaseManager
from pulsar.managers import status
from logging import getLogger
log = getLogger(__name__)
JOB_FILE_SUBMITTED = "submitted"
JOB_FILE_PID = "pid"
# Job Locks (for status updates). Following methods are locked.
# _finish_execution(self, job_id)
# _get_status(self, job_id)
# _is_cancelled(self, job_id)
# _record_pid(self, job_id, pid)
# _get_pid_for_killing_or_cancel(self, job_id)
#
class Manager(DirectoryBaseManager):
"""
A simple job manager that just directly runs jobs as given (no
queueing). Preserved for compatibilty with older versions of Pulsar
client code where Galaxy is used to maintain queue (like Galaxy's
local job runner).
"""
manager_type = "unqueued"
def __init__(self, name, app, **kwds):
super(Manager, self).__init__(name, app, **kwds)
def _record_submission(self, job_id):
self._job_directory(job_id).store_metadata(JOB_FILE_SUBMITTED, 'true')
def __get_pid(self, job_id):
pid = None
try:
pid = self._job_directory(job_id).load_metadata(JOB_FILE_PID)
if pid is not None:
pid = int(pid)
except:
pass
return pid
def setup_job(self, input_job_id, tool_id, tool_version):
job_id = self._get_job_id(input_job_id)
return self._setup_job_for_job_id(job_id, tool_id, tool_version)
def _get_job_id(self, galaxy_job_id):
return str(self.id_assigner(galaxy_job_id))
def _get_job_lock(self, job_id):
return self._job_directory(job_id).lock()
def get_status(self, job_id):
with self._get_job_lock(job_id):
return self._get_status(job_id)
def kill(self, job_id):
log.info("Attempting to kill job with job_id %s" % job_id)
job_lock = self._get_job_lock(job_id)
with job_lock:
pid = self._get_pid_for_killing_or_cancel(job_id)
if pid:
log.info("Attempting to kill pid %s" % pid)
kill_pid(pid)
def _monitor_execution(self, job_id, proc, stdout, stderr):
try:
proc.wait()
stdout.close()
stderr.close()
return_code = proc.returncode
# TODO: This is invalid if we have written a job script.
self._write_return_code(job_id, str(return_code))
finally:
with self._get_job_lock(job_id):
self._finish_execution(job_id)
# with job lock
def _finish_execution(self, job_id):
self._job_directory(job_id).remove_metadata(JOB_FILE_SUBMITTED)
self._job_directory(job_id).remove_metadata(JOB_FILE_PID)
# with job lock
def _get_status(self, job_id):
job_directory = self._job_directory(job_id)
if self._was_cancelled(job_id):
job_status = status.CANCELLED
elif job_directory.has_metadata(JOB_FILE_PID):
job_status = status.RUNNING
elif job_directory.has_metadata(JOB_FILE_SUBMITTED):
job_status = status.QUEUED
else:
job_status = status.COMPLETE
return job_status
# with job lock
def _was_cancelled(self, job_id):
return super(Manager, self)._was_cancelled(job_id)
# with job lock
def _record_pid(self, job_id, pid):
self._job_directory(job_id).store_metadata(JOB_FILE_PID, str(pid))
# with job lock
def _get_pid_for_killing_or_cancel(self, job_id):
job_status = self._get_status(job_id)
if job_status not in [status.RUNNING, status.QUEUED]:
return
pid = self.__get_pid(job_id)
self._record_cancel(job_id)
if pid is None:
self._job_directory(job_id).remove_metadata(JOB_FILE_SUBMITTED)
return pid
def _run(self, job_id, command_line, async=True):
with self._get_job_lock(job_id):
if self._was_cancelled(job_id):
return
job_directory = self.job_directory(job_id)
working_directory = job_directory.working_directory()
stdout = self._open_standard_output(job_id)
stderr = self._open_standard_error(job_id)
proc = execute(command_line=command_line,
working_directory=working_directory,
stdout=stdout,
stderr=stderr)
with self._get_job_lock(job_id):
self._record_pid(job_id, proc.pid)
if async:
thread.start_new_thread(self._monitor_execution, (job_id, proc, stdout, stderr))
else:
self._monitor_execution(job_id, proc, stdout, stderr)
def launch(self, job_id, command_line, submit_params={}, dependencies_description=None, env=[]):
command_line = self._prepare_run(job_id, command_line, dependencies_description=dependencies_description, env=env)
self._run(job_id, command_line)
def _prepare_run(self, job_id, command_line, dependencies_description, env):
self._check_execution_with_tool_file(job_id, command_line)
self._record_submission(job_id)
if platform.system().lower() == "windows":
# TODO: Don't ignore requirements and env without warning. Ideally
# process them or at least warn about them being ignored.
command_line = self._expand_command_line(command_line, dependencies_description)
else:
command_line = self._setup_job_file(job_id, command_line, dependencies_description=dependencies_description, env=env)
return command_line
def execute(command_line, working_directory, stdout, stderr):
preexec_fn = None
if not (platform.system() == 'Windows'):
preexec_fn = os.setpgrp
proc = Popen(args=command_line,
shell=True,
cwd=working_directory,
stdout=stdout,
stderr=stderr,
preexec_fn=preexec_fn)
return proc
__all__ = ['Manager']
|
jmchilton/pulsar
|
pulsar/managers/unqueued.py
|
Python
|
apache-2.0
| 6,175
|
#!/usr/bin/python
#------------------------------------------------------------------------------
class Solution:
def divide(self, dividend, divisor):
"""
:type dividend: int
:type divisor: int
:rtype: int
"""
positive = (dividend < 0) is (divisor < 0)
dividend, divisor = abs(dividend), abs(divisor)
res = 0
while dividend >= divisor:
temp, i = divisor, 1
while dividend >= temp:
dividend -= temp
res += i
i <<= 1
temp <<= 1
if not positive:
res = -res
return min(max(-2147483648, res), 2147483647)
#------------------------------------------------------------------------------
#Testing
|
kyle8998/Practice-Coding-Questions
|
leetcode/29-Medium-Divide-Two-Integers/answer.py
|
Python
|
unlicense
| 793
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import logging
import os
import shutil
from collections import defaultdict
from pathspec import PathSpec
from pathspec.gitignore import GitIgnorePattern
from twitter.common.collections.orderedset import OrderedSet
from pants.backend.jvm.subsystems.scala_platform import ScalaPlatform
from pants.backend.jvm.targets.annotation_processor import AnnotationProcessor
from pants.backend.jvm.targets.scala_library import ScalaLibrary
from pants.backend.jvm.tasks.classpath_products import ClasspathProducts
from pants.backend.jvm.tasks.ivy_task_mixin import IvyTaskMixin
from pants.backend.jvm.tasks.nailgun_task import NailgunTask
from pants.base.build_environment import get_buildroot
from pants.base.build_file import BuildFile
from pants.base.exceptions import TaskError
from pants.binaries import binary_util
from pants.build_graph.address import BuildFileAddress
from pants.build_graph.resources import Resources
from pants.util.dirutil import safe_mkdir, safe_walk
logger = logging.getLogger(__name__)
# We use custom checks for scala and java targets here for 2 reasons:
# 1.) jvm_binary could have either a scala or java source file attached so we can't do a pure
# target type test
# 2.) the target may be under development in which case it may not have sources yet - its pretty
# common to write a BUILD and ./pants idea the target inside to start development at which
# point there are no source files yet - and the developer intents to add them using the ide.
def is_scala(target):
return target.has_sources('.scala') or target.is_scala
def is_java(target):
return target.has_sources('.java') or target.is_java
class IdeGen(IvyTaskMixin, NailgunTask):
@classmethod
def subsystem_dependencies(cls):
return super(IdeGen, cls).subsystem_dependencies() + (ScalaPlatform, )
@classmethod
def register_options(cls, register):
super(IdeGen, cls).register_options(register)
register('--project-name', default='project',
help='Specifies the name to use for the generated project.')
register('--project-dir',
help='Specifies the directory to output the generated project files to.')
register('--project-cwd',
help='Specifies the directory the generated project should use as the cwd for '
'processes it launches. Note that specifying this trumps --{0}-project-dir '
'and not all project related files will be stored there.'
.format(cls.options_scope))
register('--intransitive', type=bool,
help='Limits the sources included in the generated project to just '
'those owned by the targets specified on the command line.')
register('--python', type=bool,
help='Adds python support to the generated project configuration.')
register('--java', type=bool, default=True,
help='Includes java sources in the project; otherwise compiles them and adds them '
'to the project classpath.')
register('--java-language-level', type=int, default=8,
help='Sets the java language and jdk used to compile the project\'s java sources.')
register('--java-jdk-name', default=None,
help='Sets the jdk used to compile the project\'s java sources. If unset the default '
'jdk name for the --java-language-level is used')
register('--scala', type=bool, default=True,
help='Includes scala sources in the project; otherwise compiles them and adds them '
'to the project classpath.')
register('--use-source-root', type=bool,
help='Use source roots to collapse sourcepaths in project and determine '
'which paths are used for tests. This is usually what you want if your repo '
' uses a maven style directory layout.')
register('--debug_port', type=int, default=5005,
help='Port to use for launching tasks under the debugger.')
register('--source-jars', type=bool, default=True,
help='Pull source jars from external dependencies into the project.')
register('--javadoc-jars', type=bool, default=True,
help='Pull javadoc jars from external dependencies into the project')
# Options intended to be configured primarily in pants.ini
register('--python_source_paths', type=list, advanced=True,
help='Always add these paths to the IDE as Python sources.')
register('--python_test_paths', type=list, advanced=True,
help='Always add these paths to the IDE as Python test sources.')
register('--python_lib_paths', type=list, advanced=True,
help='Always add these paths to the IDE for Python libraries.')
register('--extra-jvm-source-paths', type=list, advanced=True,
help='Always add these paths to the IDE for Java sources.')
register('--extra-jvm-test-paths', type=list, advanced=True,
help='Always add these paths to the IDE for Java test sources.')
@classmethod
def prepare(cls, options, round_manager):
super(IdeGen, cls).prepare(options, round_manager)
if options.python:
round_manager.require('python')
if options.java:
round_manager.require('java')
if options.scala:
round_manager.require('scala')
class Error(TaskError):
"""IdeGen Error."""
class TargetUtil(object):
def __init__(self, context):
self.context = context
@property
def build_graph(self):
return self.context.build_graph
def get_all_addresses(self, buildfile):
return set(self.context.address_mapper.addresses_in_spec_path(buildfile.spec_path))
def get(self, address):
self.context.build_graph.inject_address_closure(address)
return self.context.build_graph.get_target(address)
def __init__(self, *args, **kwargs):
super(IdeGen, self).__init__(*args, **kwargs)
self.project_name = self.get_options().project_name
self.python = self.get_options().python
self.skip_java = not self.get_options().java
self.skip_scala = not self.get_options().scala
self.use_source_root = self.get_options().use_source_root
self.java_language_level = self.get_options().java_language_level
if self.get_options().java_jdk_name:
self.java_jdk = self.get_options().java_jdk_name
else:
self.java_jdk = '1.{}'.format(self.java_language_level)
# Always tack on the project name to the work dir so each project gets its own linked jars,
# etc. See https://github.com/pantsbuild/pants/issues/564
if self.get_options().project_dir:
self.gen_project_workdir = os.path.abspath(
os.path.join(self.get_options().project_dir, self.project_name))
else:
self.gen_project_workdir = os.path.abspath(
os.path.join(self.workdir, self.__class__.__name__, self.project_name))
self.cwd = (
os.path.abspath(self.get_options().project_cwd) if
self.get_options().project_cwd else self.gen_project_workdir
)
self.intransitive = self.get_options().intransitive
self.debug_port = self.get_options().debug_port
def resolve_jars(self, targets):
executor = self.create_java_executor()
confs = ['default']
if self.get_options().source_jars:
confs.append('sources')
if self.get_options().javadoc_jars:
confs.append('javadoc')
compile_classpath = ClasspathProducts(self.get_options().pants_workdir)
self.resolve(executor=executor,
targets=targets,
classpath_products=compile_classpath,
confs=confs)
return compile_classpath
def _prepare_project(self):
targets, self._project = self.configure_project(
self.context.targets(),
self.debug_port)
self.configure_compile_context(targets)
def configure_project(self, targets, debug_port):
jvm_targets = [t for t in targets if t.has_label('jvm') or t.has_label('java') or
isinstance(t, Resources)]
if self.intransitive:
jvm_targets = set(self.context.target_roots).intersection(jvm_targets)
build_ignore_patterns = self.context.options.for_global_scope().ignore_patterns or []
project = Project(self.project_name,
self.python,
self.skip_java,
self.skip_scala,
self.use_source_root,
get_buildroot(),
debug_port,
self.context,
jvm_targets,
not self.intransitive,
self.TargetUtil(self.context),
PathSpec.from_lines(GitIgnorePattern, build_ignore_patterns))
if self.python:
python_source_paths = self.get_options().python_source_paths
python_test_paths = self.get_options().python_test_paths
python_lib_paths = self.get_options().python_lib_paths
project.configure_python(python_source_paths, python_test_paths, python_lib_paths)
extra_source_paths = self.get_options().extra_jvm_source_paths
extra_test_paths = self.get_options().extra_jvm_test_paths
all_targets = project.configure_jvm(extra_source_paths, extra_test_paths)
return all_targets, project
def configure_compile_context(self, targets):
"""
Trims the context's target set to just those targets needed as jars on the IDE classpath.
All other targets only contribute their external jar dependencies and excludes to the
classpath definition.
"""
def is_cp(target):
return (
target.is_codegen or
# Some IDEs need annotation processors pre-compiled, others are smart enough to detect and
# proceed in 2 compile rounds
isinstance(target, AnnotationProcessor) or
(self.skip_java and is_java(target)) or
(self.skip_scala and is_scala(target)) or
(self.intransitive and target not in self.context.target_roots)
)
jars = OrderedSet()
excludes = OrderedSet()
compiles = OrderedSet()
def prune(target):
if target.is_jvm:
if target.excludes:
excludes.update(target.excludes)
jars.update(jar for jar in target.jar_dependencies)
if is_cp(target):
target.walk(compiles.add)
for target in targets:
target.walk(prune)
# TODO(John Sirois): Restructure to use alternate_target_roots Task lifecycle method
self.context._replace_targets(compiles)
self.jar_dependencies = jars
self.context.log.debug('pruned to cp:\n\t{}'.format(
'\n\t'.join(str(t) for t in self.context.targets())
))
def map_internal_jars(self, targets):
internal_jar_dir = os.path.join(self.gen_project_workdir, 'internal-libs')
safe_mkdir(internal_jar_dir, clean=True)
internal_source_jar_dir = os.path.join(self.gen_project_workdir, 'internal-libsources')
safe_mkdir(internal_source_jar_dir, clean=True)
internal_jars = self.context.products.get('jars')
internal_source_jars = self.context.products.get('source_jars')
for target in targets:
mappings = internal_jars.get(target)
if mappings:
for base, jars in mappings.items():
if len(jars) != 1:
raise IdeGen.Error('Unexpected mapping, multiple jars for {}: {}'.format(target, jars))
jar = jars[0]
cp_jar = os.path.join(internal_jar_dir, jar)
shutil.copy(os.path.join(base, jar), cp_jar)
cp_source_jar = None
mappings = internal_source_jars.get(target)
if mappings:
for base, jars in mappings.items():
if len(jars) != 1:
raise IdeGen.Error(
'Unexpected mapping, multiple source jars for {}: {}'.format(target, jars)
)
jar = jars[0]
cp_source_jar = os.path.join(internal_source_jar_dir, jar)
shutil.copy(os.path.join(base, jar), cp_source_jar)
self._project.internal_jars.add(ClasspathEntry(cp_jar, source_jar=cp_source_jar))
def map_external_jars(self, targets):
external_jar_dir = os.path.join(self.gen_project_workdir, 'external-libs')
safe_mkdir(external_jar_dir, clean=True)
external_source_jar_dir = os.path.join(self.gen_project_workdir, 'external-libsources')
safe_mkdir(external_source_jar_dir, clean=True)
external_javadoc_jar_dir = os.path.join(self.gen_project_workdir, 'external-libjavadoc')
safe_mkdir(external_javadoc_jar_dir, clean=True)
classpath_products = self.resolve_jars(targets) or ClasspathProducts(self.get_options().pants_workdir)
cp_entry_by_classifier_by_orgname = defaultdict(lambda: defaultdict(dict))
for conf, jar_entry in classpath_products.get_artifact_classpath_entries_for_targets(targets):
coord = (jar_entry.coordinate.org, jar_entry.coordinate.name)
classifier = jar_entry.coordinate.classifier
cp_entry_by_classifier_by_orgname[coord][classifier] = jar_entry
def copy_jar(cp_entry, dest_dir):
if not cp_entry:
return None
cp_jar = os.path.join(dest_dir, os.path.basename(cp_entry.path))
shutil.copy(cp_entry.path, cp_jar)
return cp_jar
# Per org.name (aka maven "project"), collect the primary artifact and any extra classified
# artifacts, taking special note of 'sources' and 'javadoc' artifacts that IDEs handle specially
# to provide source browsing and javadocs for 3rdparty libs.
for cp_entry_by_classifier in cp_entry_by_classifier_by_orgname.values():
primary_jar = copy_jar(cp_entry_by_classifier.pop(None, None), external_jar_dir)
sources_jar = copy_jar(cp_entry_by_classifier.pop('sources', None), external_source_jar_dir)
javadoc_jar = copy_jar(cp_entry_by_classifier.pop('javadoc', None), external_javadoc_jar_dir)
if primary_jar:
self._project.external_jars.add(ClasspathEntry(jar=primary_jar,
source_jar=sources_jar,
javadoc_jar=javadoc_jar))
# Treat all other jars as opaque with no source or javadoc attachments of their own. An
# example are jars with the 'tests' classifier.
for jar_entry in cp_entry_by_classifier.values():
extra_jar = copy_jar(jar_entry, external_jar_dir)
self._project.external_jars.add(ClasspathEntry(extra_jar))
def execute(self):
"""Stages IDE project artifacts to a project directory and generates IDE configuration files."""
# Grab the targets in-play before the context is replaced by `self._prepare_project()` below.
targets = self.context.targets()
self._prepare_project()
if self.context.options.is_known_scope('compile.checkstyle'):
checkstyle_classpath = self.tool_classpath('checkstyle', scope='compile.checkstyle')
else: # Checkstyle not enabled.
checkstyle_classpath = []
if self.skip_scala:
scalac_classpath = []
else:
scalac_classpath = ScalaPlatform.global_instance().compiler_classpath(self.context.products)
self._project.set_tool_classpaths(checkstyle_classpath, scalac_classpath)
self.map_internal_jars(targets)
self.map_external_jars(targets)
idefile = self.generate_project(self._project)
if idefile:
binary_util.ui_open(idefile)
def generate_project(self, project):
raise NotImplementedError('Subclasses must generate a project for an ide')
class ClasspathEntry(object):
"""Represents a classpath entry that may have sources available."""
def __init__(self, jar, source_jar=None, javadoc_jar=None):
self.jar = jar
self.source_jar = source_jar
self.javadoc_jar = javadoc_jar
class SourceSet(object):
"""Models a set of source files."""
def __init__(self, root_dir, source_base, path, is_test=False, resources_only=False):
"""
:param string root_dir: full path to the root of the project containing this source set
:param string source_base: the relative path from root_dir to the base of this source set
:param string path: relative path from the source_base to the base of the sources in this set
:param bool is_test: true if the sources contained by this set implement test cases
:param bool resources_only: true if a target has resources but no sources.
"""
self.root_dir = root_dir
self.source_base = source_base
self.path = path
self.is_test = is_test
self.resources_only = resources_only
self._excludes = []
@property
def excludes(self):
"""Paths relative to self.path that are excluded from this source set."""
return self._excludes
@property
def _key_tuple(self):
"""Creates a tuple from the attributes used as a key to uniquely identify a SourceSet"""
return (self.root_dir, self.source_base, self.path)
def __str__(self):
return str(self._key_tuple)
def __eq__(self, other):
return self._key_tuple == other._key_tuple
def __cmp__(self, other):
return cmp(self._key_tuple, other._key_tuple)
def __hash__(self):
return hash(self._key_tuple)
def __repr__(self):
return "root_dir={} source_base={} path={} is_test={} resources_only={} _excludes={}".format(
self.root_dir,
self.source_base,
self.path,
self.is_test,
self.resources_only,
self._excludes)
class Project(object):
"""Models a generic IDE project that is comprised of a set of BUILD targets."""
@staticmethod
def extract_resource_extensions(resources):
"""Returns the set of unique extensions (including the .) from the given resource files."""
if resources:
for resource in resources:
_, ext = os.path.splitext(resource)
yield ext
@staticmethod
def _collapse_by_source_root(source_roots, source_sets):
"""Collapse SourceSets with common source roots into one SourceSet instance.
Use the registered source roots to collapse all source paths under a root.
If any test type of target is allowed under the root, the path is determined to be
a test path. This method will give unpredictable results if source root entries overlap.
:param list source_sets: SourceSets to analyze
:returns: list of SourceSets collapsed to the source root paths. There may be duplicate
entries in this list which will be removed by dedup_sources()
"""
collapsed_source_sets = []
for source in source_sets:
query = os.path.join(source.source_base, source.path)
source_root = source_roots.find_by_path(query)
if not source_root:
collapsed_source_sets.append(source)
else:
collapsed_source_sets.append(SourceSet(source.root_dir, source_root.path, "",
is_test=source.is_test,
resources_only=source.resources_only))
return collapsed_source_sets
def __init__(self, name, has_python, skip_java, skip_scala, use_source_root, root_dir,
debug_port, context, targets, transitive, target_util, build_ignore_patterns=None):
"""Creates a new, unconfigured, Project based at root_dir and comprised of the sources visible
to the given targets."""
self.context = context
self.target_util = target_util
self.name = name
self.root_dir = root_dir
self.targets = OrderedSet(targets)
self.transitive = transitive
self.sources = []
self.py_sources = []
self.py_libs = []
self.resource_extensions = set()
self.has_python = has_python
self.skip_java = skip_java
self.skip_scala = skip_scala
self.use_source_root = use_source_root
self.has_scala = False
self.has_tests = False
self.debug_port = debug_port
self.internal_jars = OrderedSet()
self.external_jars = OrderedSet()
self.build_ignore_patterns = build_ignore_patterns
def configure_python(self, source_paths, test_paths, lib_paths):
self.py_sources.extend(SourceSet(get_buildroot(), root, None) for root in source_paths)
self.py_sources.extend(SourceSet(get_buildroot(), root, None, is_test=True) for root in test_paths)
for root in lib_paths:
for path in os.listdir(os.path.join(get_buildroot(), root)):
if os.path.isdir(os.path.join(get_buildroot(), root, path)) or path.endswith('.egg'):
self.py_libs.append(SourceSet(get_buildroot(), root, path, is_test=False))
@classmethod
def dedup_sources(cls, source_set_list):
"""Remove duplicate source sets from the source_set_list.
Sometimes two targets with the same path are added to the source set. Remove duplicates
with the following rules:
1) If two targets are resources_only with different settings for is_test, is_test = False
2) If the targets have different settings for resources_only, resources_only = False
3) If the two non-resource-only targets have different settings for is_test, is_test = True
"""
deduped_sources = set(filter(lambda s: not s.resources_only and s.is_test,
source_set_list))
deduped_sources.update(filter(lambda s: not s.resources_only,
source_set_list))
deduped_sources.update(filter(lambda s : s.resources_only and not s.is_test,
source_set_list))
deduped_sources.update(filter(lambda s : s.resources_only and s.is_test,
source_set_list))
# re-sort the list, makes the generated project easier to read.
return sorted(list(deduped_sources))
def configure_jvm(self, extra_source_paths, extra_test_paths):
"""
Configures this project's source sets returning the full set of targets the project is
comprised of. The full set can be larger than the initial set of targets when any of the
initial targets only has partial ownership of its source set's directories.
"""
# TODO(John Sirois): much waste lies here, revisit structuring for more readable and efficient
# construction of source sets and excludes ... and add a test!
analyzed_targets = OrderedSet()
targeted = set()
def relative_sources(target):
sources = target.payload.sources.relative_to_buildroot()
return [os.path.relpath(source, target.target_base) for source in sources]
def source_target(target):
result = ((self.transitive or target in self.targets) and
target.has_sources() and
(not (self.skip_java and is_java(target)) and
not (self.skip_scala and is_scala(target))))
return result
def configure_source_sets(relative_base, sources, is_test=False, resources_only=False):
absolute_base = os.path.join(self.root_dir, relative_base)
paths = set([os.path.dirname(source) for source in sources])
for path in paths:
absolute_path = os.path.join(absolute_base, path)
# Note, this can add duplicate source paths to self.sources(). We'll de-dup them later,
# because we want to prefer test paths.
targeted.add(absolute_path)
source_set = SourceSet(self.root_dir, relative_base, path,
is_test=is_test, resources_only=resources_only)
self.sources.append(source_set)
def find_source_basedirs(target):
dirs = set()
if source_target(target):
absolute_base = os.path.join(self.root_dir, target.target_base)
dirs.update([os.path.join(absolute_base, os.path.dirname(source))
for source in relative_sources(target)])
return dirs
def configure_target(target):
if target not in analyzed_targets:
analyzed_targets.add(target)
self.has_scala = not self.skip_scala and (self.has_scala or is_scala(target))
# Hack for java_sources and Eclipse/IntelliJ: add java_sources to project
if isinstance(target, ScalaLibrary):
for java_source in target.java_sources:
configure_target(java_source)
# Resources are already in the target set
if target.has_resources:
resources_by_basedir = defaultdict(set)
for resources in target.resources:
analyzed_targets.add(resources)
resources_by_basedir[resources.target_base].update(relative_sources(resources))
for basedir, resources in resources_by_basedir.items():
self.resource_extensions.update(Project.extract_resource_extensions(resources))
configure_source_sets(basedir, resources, is_test=target.is_test,
resources_only=True)
if target.has_sources():
test = target.is_test
self.has_tests = self.has_tests or test
base = target.target_base
configure_source_sets(base, relative_sources(target), is_test=test,
resources_only=isinstance(target, Resources))
# TODO(Garrett Malmquist): This is dead code, and should be redone/reintegrated.
# Other BUILD files may specify sources in the same directory as this target. Those BUILD
# files might be in parent directories (globs('a/b/*.java')) or even children directories if
# this target globs children as well. Gather all these candidate BUILD files to test for
# sources they own that live in the directories this targets sources live in.
target_dirset = find_source_basedirs(target)
if not isinstance(target.address, BuildFileAddress):
return [] # Siblings only make sense for BUILD files.
candidates = OrderedSet()
build_file = target.address.build_file
dir_relpath = os.path.dirname(build_file.relpath)
for descendant in BuildFile.scan_build_files(build_file.project_tree, dir_relpath,
build_ignore_patterns=self.build_ignore_patterns):
candidates.update(self.target_util.get_all_addresses(descendant))
if not self._is_root_relpath(dir_relpath):
ancestors = self._collect_ancestor_build_files(build_file.project_tree, os.path.dirname(dir_relpath),
self.build_ignore_patterns)
for ancestor in ancestors:
candidates.update(self.target_util.get_all_addresses(ancestor))
def is_sibling(target):
return source_target(target) and target_dirset.intersection(find_source_basedirs(target))
return filter(is_sibling, [self.target_util.get(a) for a in candidates if a != target.address])
resource_targets = []
for target in self.targets:
if isinstance(target, Resources):
# Wait to process these until all resources that are reachable from other targets are
# processed. That way we'll only add a new SourceSet if this target has never been seen
# before. This allows test resource SourceSets to be properly keep the is_test property.
resource_targets.append(target)
else:
target.walk(configure_target, predicate=source_target)
for target in resource_targets:
target.walk(configure_target)
def full_path(source_set):
return os.path.join(source_set.root_dir, source_set.source_base, source_set.path)
# Check if there are any overlapping source_sets, and output an error message if so.
# Overlapping source_sets cause serious problems with package name inference.
overlap_error = ('SourceSets {current} and {previous} evaluate to the same full path.'
' This can be caused by multiple BUILD targets claiming the same source,'
' e.g., if a BUILD target in a parent directory contains an rglobs() while'
' a BUILD target in a subdirectory of that uses a globs() which claims the'
' same sources. This may cause package names to be inferred incorrectly (e.g.,'
' you might see src.com.foo.bar.Main instead of com.foo.bar.Main).')
source_full_paths = {}
for source_set in sorted(self.sources, key=full_path):
full = full_path(source_set)
if full in source_full_paths:
previous_set = source_full_paths[full]
logger.debug(overlap_error.format(current=source_set, previous=previous_set))
source_full_paths[full] = source_set
# We need to figure out excludes, in doing so there are 2 cases we should not exclude:
# 1.) targets depend on A only should lead to an exclude of B
# A/BUILD
# A/B/BUILD
#
# 2.) targets depend on A and C should not lead to an exclude of B (would wipe out C)
# A/BUILD
# A/B
# A/B/C/BUILD
#
# 1 approach: build set of all paths and parent paths containing BUILDs our targets depend on -
# these are unexcludable
unexcludable_paths = set()
for source_set in self.sources:
parent = os.path.join(self.root_dir, source_set.source_base, source_set.path)
while True:
unexcludable_paths.add(parent)
parent, _ = os.path.split(parent)
# no need to add the repo root or above, all source paths and extra paths are children
if parent == self.root_dir:
break
for source_set in self.sources:
paths = set()
source_base = os.path.join(self.root_dir, source_set.source_base)
for root, dirs, _ in safe_walk(os.path.join(source_base, source_set.path)):
if dirs:
paths.update([os.path.join(root, directory) for directory in dirs])
unused_children = paths - targeted
if unused_children:
for child in unused_children:
if child not in unexcludable_paths:
source_set.excludes.append(os.path.relpath(child, source_base))
targets = OrderedSet()
for target in self.targets:
target.walk(lambda target: targets.add(target), source_target)
targets.update(analyzed_targets - targets)
self.sources.extend(SourceSet(get_buildroot(), p, None, is_test=False) for p in extra_source_paths)
self.sources.extend(SourceSet(get_buildroot(), p, None, is_test=True) for p in extra_test_paths)
if self.use_source_root:
self.sources = Project._collapse_by_source_root(self.context.source_roots, self.sources)
self.sources = self.dedup_sources(self.sources)
return targets
def set_tool_classpaths(self, checkstyle_classpath, scalac_classpath):
self.checkstyle_classpath = checkstyle_classpath
self.scala_compiler_classpath = scalac_classpath
@classmethod
def _collect_ancestor_build_files(cls, project_tree, dir_relpath, build_ignore_patterns):
for build_file in BuildFile.get_build_files_family(project_tree, dir_relpath, build_ignore_patterns):
yield build_file
while not cls._is_root_relpath(dir_relpath):
dir_relpath = os.path.dirname(dir_relpath)
for build_file in BuildFile.get_build_files_family(project_tree, dir_relpath, build_ignore_patterns):
yield build_file
@classmethod
def _is_root_relpath(cls, relpath):
return relpath == '.' or relpath == ''
|
ity/pants
|
src/python/pants/backend/project_info/tasks/ide_gen.py
|
Python
|
apache-2.0
| 31,400
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
import itertools
import os
import re
from importlib import import_module
from django.apps import apps
from django.conf import settings
from django.contrib.admin.models import LogEntry
from django.contrib.auth import REDIRECT_FIELD_NAME, SESSION_KEY
from django.contrib.auth.forms import (
AuthenticationForm, PasswordChangeForm, SetPasswordForm,
)
from django.contrib.auth.models import User
from django.contrib.auth.tests.custom_user import CustomUser
from django.contrib.auth.views import login as login_view, redirect_to_login
from django.contrib.sessions.middleware import SessionMiddleware
from django.contrib.sites.requests import RequestSite
from django.core import mail
from django.core.urlresolvers import NoReverseMatch, reverse, reverse_lazy
from django.db import connection
from django.http import HttpRequest, QueryDict
from django.middleware.csrf import CsrfViewMiddleware, get_token
from django.test import (
TestCase, ignore_warnings, modify_settings, override_settings,
)
from django.test.utils import patch_logger
from django.utils.deprecation import RemovedInDjango110Warning
from django.utils.encoding import force_text
from django.utils.http import urlquote
from django.utils.six.moves.urllib.parse import ParseResult, urlparse
from django.utils.translation import LANGUAGE_SESSION_KEY
from .models import UUIDUser
from .settings import AUTH_TEMPLATES
@override_settings(
LANGUAGES=[
('en', 'English'),
],
LANGUAGE_CODE='en',
TEMPLATES=AUTH_TEMPLATES,
USE_TZ=False,
PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF='auth_tests.urls',
)
class AuthViewsTestCase(TestCase):
"""
Helper base class for all the follow test cases.
"""
@classmethod
def setUpTestData(cls):
cls.u1 = User.objects.create(
password='sha1$6efc0$f93efe9fd7542f25a7be94871ea45aa95de57161',
last_login=datetime.datetime(2006, 12, 17, 7, 3, 31), is_superuser=False, username='testclient',
first_name='Test', last_name='Client', email='testclient@example.com', is_staff=False, is_active=True,
date_joined=datetime.datetime(2006, 12, 17, 7, 3, 31)
)
cls.u2 = User.objects.create(
password='sha1$6efc0$f93efe9fd7542f25a7be94871ea45aa95de57161',
last_login=datetime.datetime(2006, 12, 17, 7, 3, 31), is_superuser=False, username='inactive',
first_name='Inactive', last_name='User', email='testclient2@example.com', is_staff=False, is_active=False,
date_joined=datetime.datetime(2006, 12, 17, 7, 3, 31)
)
cls.u3 = User.objects.create(
password='sha1$6efc0$f93efe9fd7542f25a7be94871ea45aa95de57161',
last_login=datetime.datetime(2006, 12, 17, 7, 3, 31), is_superuser=False, username='staff',
first_name='Staff', last_name='Member', email='staffmember@example.com', is_staff=True, is_active=True,
date_joined=datetime.datetime(2006, 12, 17, 7, 3, 31)
)
cls.u4 = User.objects.create(
password='', last_login=datetime.datetime(2006, 12, 17, 7, 3, 31), is_superuser=False,
username='empty_password', first_name='Empty', last_name='Password', email='empty_password@example.com',
is_staff=False, is_active=True, date_joined=datetime.datetime(2006, 12, 17, 7, 3, 31)
)
cls.u5 = User.objects.create(
password='$', last_login=datetime.datetime(2006, 12, 17, 7, 3, 31), is_superuser=False,
username='unmanageable_password', first_name='Unmanageable', last_name='Password',
email='unmanageable_password@example.com', is_staff=False, is_active=True,
date_joined=datetime.datetime(2006, 12, 17, 7, 3, 31)
)
cls.u6 = User.objects.create(
password='foo$bar', last_login=datetime.datetime(2006, 12, 17, 7, 3, 31), is_superuser=False,
username='unknown_password', first_name='Unknown', last_name='Password',
email='unknown_password@example.com', is_staff=False, is_active=True,
date_joined=datetime.datetime(2006, 12, 17, 7, 3, 31)
)
def login(self, username='testclient', password='password'):
response = self.client.post('/login/', {
'username': username,
'password': password,
})
self.assertIn(SESSION_KEY, self.client.session)
return response
def logout(self):
response = self.client.get('/admin/logout/')
self.assertEqual(response.status_code, 200)
self.assertNotIn(SESSION_KEY, self.client.session)
def assertFormError(self, response, error):
"""Assert that error is found in response.context['form'] errors"""
form_errors = list(itertools.chain(*response.context['form'].errors.values()))
self.assertIn(force_text(error), form_errors)
def assertURLEqual(self, url, expected, parse_qs=False):
"""
Given two URLs, make sure all their components (the ones given by
urlparse) are equal, only comparing components that are present in both
URLs.
If `parse_qs` is True, then the querystrings are parsed with QueryDict.
This is useful if you don't want the order of parameters to matter.
Otherwise, the query strings are compared as-is.
"""
fields = ParseResult._fields
for attr, x, y in zip(fields, urlparse(url), urlparse(expected)):
if parse_qs and attr == 'query':
x, y = QueryDict(x), QueryDict(y)
if x and y and x != y:
self.fail("%r != %r (%s doesn't match)" % (url, expected, attr))
@override_settings(ROOT_URLCONF='django.contrib.auth.urls')
class AuthViewNamedURLTests(AuthViewsTestCase):
def test_named_urls(self):
"Named URLs should be reversible"
expected_named_urls = [
('login', [], {}),
('logout', [], {}),
('password_change', [], {}),
('password_change_done', [], {}),
('password_reset', [], {}),
('password_reset_done', [], {}),
('password_reset_confirm', [], {
'uidb64': 'aaaaaaa',
'token': '1111-aaaaa',
}),
('password_reset_complete', [], {}),
]
for name, args, kwargs in expected_named_urls:
try:
reverse(name, args=args, kwargs=kwargs)
except NoReverseMatch:
self.fail("Reversal of url named '%s' failed with NoReverseMatch" % name)
class PasswordResetTest(AuthViewsTestCase):
def test_email_not_found(self):
"""If the provided email is not registered, don't raise any error but
also don't send any email."""
response = self.client.get('/password_reset/')
self.assertEqual(response.status_code, 200)
response = self.client.post('/password_reset/', {'email': 'not_a_real_email@email.com'})
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 0)
def test_email_found(self):
"Email is sent if a valid email address is provided for password reset"
response = self.client.post('/password_reset/', {'email': 'staffmember@example.com'})
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
self.assertIn("http://", mail.outbox[0].body)
self.assertEqual(settings.DEFAULT_FROM_EMAIL, mail.outbox[0].from_email)
# optional multipart text/html email has been added. Make sure original,
# default functionality is 100% the same
self.assertFalse(mail.outbox[0].message().is_multipart())
def test_extra_email_context(self):
"""
extra_email_context should be available in the email template context.
"""
response = self.client.post(
'/password_reset_extra_email_context/',
{'email': 'staffmember@example.com'},
)
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
self.assertIn('Email email context: "Hello!"', mail.outbox[0].body)
def test_html_mail_template(self):
"""
A multipart email with text/plain and text/html is sent
if the html_email_template parameter is passed to the view
"""
response = self.client.post('/password_reset/html_email_template/', {'email': 'staffmember@example.com'})
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0].message()
self.assertEqual(len(message.get_payload()), 2)
self.assertTrue(message.is_multipart())
self.assertEqual(message.get_payload(0).get_content_type(), 'text/plain')
self.assertEqual(message.get_payload(1).get_content_type(), 'text/html')
self.assertNotIn('<html>', message.get_payload(0).get_payload())
self.assertIn('<html>', message.get_payload(1).get_payload())
def test_email_found_custom_from(self):
"Email is sent if a valid email address is provided for password reset when a custom from_email is provided."
response = self.client.post('/password_reset_from_email/', {'email': 'staffmember@example.com'})
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual("staffmember@example.com", mail.outbox[0].from_email)
@ignore_warnings(category=RemovedInDjango110Warning)
@override_settings(ALLOWED_HOSTS=['adminsite.com'])
def test_admin_reset(self):
"If the reset view is marked as being for admin, the HTTP_HOST header is used for a domain override."
response = self.client.post('/admin_password_reset/',
{'email': 'staffmember@example.com'},
HTTP_HOST='adminsite.com'
)
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
self.assertIn("http://adminsite.com", mail.outbox[0].body)
self.assertEqual(settings.DEFAULT_FROM_EMAIL, mail.outbox[0].from_email)
# Skip any 500 handler action (like sending more mail...)
@override_settings(DEBUG_PROPAGATE_EXCEPTIONS=True)
def test_poisoned_http_host(self):
"Poisoned HTTP_HOST headers can't be used for reset emails"
# This attack is based on the way browsers handle URLs. The colon
# should be used to separate the port, but if the URL contains an @,
# the colon is interpreted as part of a username for login purposes,
# making 'evil.com' the request domain. Since HTTP_HOST is used to
# produce a meaningful reset URL, we need to be certain that the
# HTTP_HOST header isn't poisoned. This is done as a check when get_host()
# is invoked, but we check here as a practical consequence.
with patch_logger('django.security.DisallowedHost', 'error') as logger_calls:
response = self.client.post(
'/password_reset/',
{'email': 'staffmember@example.com'},
HTTP_HOST='www.example:dr.frankenstein@evil.tld'
)
self.assertEqual(response.status_code, 400)
self.assertEqual(len(mail.outbox), 0)
self.assertEqual(len(logger_calls), 1)
# Skip any 500 handler action (like sending more mail...)
@override_settings(DEBUG_PROPAGATE_EXCEPTIONS=True)
def test_poisoned_http_host_admin_site(self):
"Poisoned HTTP_HOST headers can't be used for reset emails on admin views"
with patch_logger('django.security.DisallowedHost', 'error') as logger_calls:
response = self.client.post(
'/admin_password_reset/',
{'email': 'staffmember@example.com'},
HTTP_HOST='www.example:dr.frankenstein@evil.tld'
)
self.assertEqual(response.status_code, 400)
self.assertEqual(len(mail.outbox), 0)
self.assertEqual(len(logger_calls), 1)
def _test_confirm_start(self):
# Start by creating the email
self.client.post('/password_reset/', {'email': 'staffmember@example.com'})
self.assertEqual(len(mail.outbox), 1)
return self._read_signup_email(mail.outbox[0])
def _read_signup_email(self, email):
urlmatch = re.search(r"https?://[^/]*(/.*reset/\S*)", email.body)
self.assertIsNotNone(urlmatch, "No URL found in sent email")
return urlmatch.group(), urlmatch.groups()[0]
def test_confirm_valid(self):
url, path = self._test_confirm_start()
response = self.client.get(path)
# redirect to a 'complete' page:
self.assertContains(response, "Please enter your new password")
def test_confirm_invalid(self):
url, path = self._test_confirm_start()
# Let's munge the token in the path, but keep the same length,
# in case the URLconf will reject a different length.
path = path[:-5] + ("0" * 4) + path[-1]
response = self.client.get(path)
self.assertContains(response, "The password reset link was invalid")
def test_confirm_invalid_user(self):
# Ensure that we get a 200 response for a non-existent user, not a 404
response = self.client.get('/reset/123456/1-1/')
self.assertContains(response, "The password reset link was invalid")
def test_confirm_overflow_user(self):
# Ensure that we get a 200 response for a base36 user id that overflows int
response = self.client.get('/reset/zzzzzzzzzzzzz/1-1/')
self.assertContains(response, "The password reset link was invalid")
def test_confirm_invalid_post(self):
# Same as test_confirm_invalid, but trying
# to do a POST instead.
url, path = self._test_confirm_start()
path = path[:-5] + ("0" * 4) + path[-1]
self.client.post(path, {
'new_password1': 'anewpassword',
'new_password2': ' anewpassword',
})
# Check the password has not been changed
u = User.objects.get(email='staffmember@example.com')
self.assertTrue(not u.check_password("anewpassword"))
def test_confirm_complete(self):
url, path = self._test_confirm_start()
response = self.client.post(path, {'new_password1': 'anewpassword',
'new_password2': 'anewpassword'})
# Check the password has been changed
u = User.objects.get(email='staffmember@example.com')
self.assertTrue(u.check_password("anewpassword"))
# Check we can't use the link again
response = self.client.get(path)
self.assertContains(response, "The password reset link was invalid")
def test_confirm_different_passwords(self):
url, path = self._test_confirm_start()
response = self.client.post(path, {'new_password1': 'anewpassword',
'new_password2': 'x'})
self.assertFormError(response, SetPasswordForm.error_messages['password_mismatch'])
def test_reset_redirect_default(self):
response = self.client.post('/password_reset/',
{'email': 'staffmember@example.com'})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/password_reset/done/')
def test_reset_custom_redirect(self):
response = self.client.post('/password_reset/custom_redirect/',
{'email': 'staffmember@example.com'})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/custom/')
def test_reset_custom_redirect_named(self):
response = self.client.post('/password_reset/custom_redirect/named/',
{'email': 'staffmember@example.com'})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/password_reset/')
def test_confirm_redirect_default(self):
url, path = self._test_confirm_start()
response = self.client.post(path, {'new_password1': 'anewpassword',
'new_password2': 'anewpassword'})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/reset/done/')
def test_confirm_redirect_custom(self):
url, path = self._test_confirm_start()
path = path.replace('/reset/', '/reset/custom/')
response = self.client.post(path, {'new_password1': 'anewpassword',
'new_password2': 'anewpassword'})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/custom/')
def test_confirm_redirect_custom_named(self):
url, path = self._test_confirm_start()
path = path.replace('/reset/', '/reset/custom/named/')
response = self.client.post(path, {'new_password1': 'anewpassword',
'new_password2': 'anewpassword'})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/password_reset/')
def test_confirm_display_user_from_form(self):
url, path = self._test_confirm_start()
response = self.client.get(path)
# #16919 -- The ``password_reset_confirm`` view should pass the user
# object to the ``SetPasswordForm``, even on GET requests.
# For this test, we render ``{{ form.user }}`` in the template
# ``registration/password_reset_confirm.html`` so that we can test this.
username = User.objects.get(email='staffmember@example.com').username
self.assertContains(response, "Hello, %s." % username)
# However, the view should NOT pass any user object on a form if the
# password reset link was invalid.
response = self.client.get('/reset/zzzzzzzzzzzzz/1-1/')
self.assertContains(response, "Hello, .")
@override_settings(AUTH_USER_MODEL='auth.CustomUser')
class CustomUserPasswordResetTest(AuthViewsTestCase):
user_email = 'staffmember@example.com'
@classmethod
def setUpTestData(cls):
cls.u1 = CustomUser.custom_objects.create(
password='sha1$6efc0$f93efe9fd7542f25a7be94871ea45aa95de57161',
last_login=datetime.datetime(2006, 12, 17, 7, 3, 31), email='staffmember@example.com', is_active=True,
is_admin=False, date_of_birth=datetime.date(1976, 11, 8)
)
def _test_confirm_start(self):
# Start by creating the email
response = self.client.post('/password_reset/', {'email': self.user_email})
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
return self._read_signup_email(mail.outbox[0])
def _read_signup_email(self, email):
urlmatch = re.search(r"https?://[^/]*(/.*reset/\S*)", email.body)
self.assertIsNotNone(urlmatch, "No URL found in sent email")
return urlmatch.group(), urlmatch.groups()[0]
def test_confirm_valid_custom_user(self):
url, path = self._test_confirm_start()
response = self.client.get(path)
# redirect to a 'complete' page:
self.assertContains(response, "Please enter your new password")
# then submit a new password
response = self.client.post(path, {
'new_password1': 'anewpassword',
'new_password2': 'anewpassword',
})
self.assertRedirects(response, '/reset/done/')
@override_settings(AUTH_USER_MODEL='auth.UUIDUser')
class UUIDUserPasswordResetTest(CustomUserPasswordResetTest):
def _test_confirm_start(self):
# instead of fixture
UUIDUser.objects.create_user(
email=self.user_email,
username='foo',
password='foo',
)
return super(UUIDUserPasswordResetTest, self)._test_confirm_start()
class ChangePasswordTest(AuthViewsTestCase):
def fail_login(self, password='password'):
response = self.client.post('/login/', {
'username': 'testclient',
'password': password,
})
self.assertFormError(response, AuthenticationForm.error_messages['invalid_login'] % {
'username': User._meta.get_field('username').verbose_name
})
def logout(self):
self.client.get('/logout/')
def test_password_change_fails_with_invalid_old_password(self):
self.login()
response = self.client.post('/password_change/', {
'old_password': 'donuts',
'new_password1': 'password1',
'new_password2': 'password1',
})
self.assertFormError(response, PasswordChangeForm.error_messages['password_incorrect'])
def test_password_change_fails_with_mismatched_passwords(self):
self.login()
response = self.client.post('/password_change/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'donuts',
})
self.assertFormError(response, SetPasswordForm.error_messages['password_mismatch'])
def test_password_change_succeeds(self):
self.login()
self.client.post('/password_change/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'password1',
})
self.fail_login()
self.login(password='password1')
def test_password_change_done_succeeds(self):
self.login()
response = self.client.post('/password_change/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'password1',
})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/password_change/done/')
@override_settings(LOGIN_URL='/login/')
def test_password_change_done_fails(self):
response = self.client.get('/password_change/done/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/login/?next=/password_change/done/')
def test_password_change_redirect_default(self):
self.login()
response = self.client.post('/password_change/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'password1',
})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/password_change/done/')
def test_password_change_redirect_custom(self):
self.login()
response = self.client.post('/password_change/custom/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'password1',
})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/custom/')
def test_password_change_redirect_custom_named(self):
self.login()
response = self.client.post('/password_change/custom/named/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'password1',
})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/password_reset/')
@modify_settings(MIDDLEWARE_CLASSES={
'append': 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
})
class SessionAuthenticationTests(AuthViewsTestCase):
def test_user_password_change_updates_session(self):
"""
#21649 - Ensure contrib.auth.views.password_change updates the user's
session auth hash after a password change so the session isn't logged out.
"""
self.login()
response = self.client.post('/password_change/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'password1',
})
# if the hash isn't updated, retrieving the redirection page will fail.
self.assertRedirects(response, '/password_change/done/')
class LoginTest(AuthViewsTestCase):
def test_current_site_in_context_after_login(self):
response = self.client.get(reverse('login'))
self.assertEqual(response.status_code, 200)
if apps.is_installed('django.contrib.sites'):
Site = apps.get_model('sites.Site')
site = Site.objects.get_current()
self.assertEqual(response.context['site'], site)
self.assertEqual(response.context['site_name'], site.name)
else:
self.assertIsInstance(response.context['site'], RequestSite)
self.assertIsInstance(response.context['form'], AuthenticationForm)
def test_security_check(self, password='password'):
login_url = reverse('login')
# Those URLs should not pass the security check
for bad_url in ('http://example.com',
'http:///example.com',
'https://example.com',
'ftp://exampel.com',
'///example.com',
'//example.com',
'javascript:alert("XSS")'):
nasty_url = '%(url)s?%(next)s=%(bad_url)s' % {
'url': login_url,
'next': REDIRECT_FIELD_NAME,
'bad_url': urlquote(bad_url),
}
response = self.client.post(nasty_url, {
'username': 'testclient',
'password': password,
})
self.assertEqual(response.status_code, 302)
self.assertNotIn(bad_url, response.url,
"%s should be blocked" % bad_url)
# These URLs *should* still pass the security check
for good_url in ('/view/?param=http://example.com',
'/view/?param=https://example.com',
'/view?param=ftp://exampel.com',
'view/?param=//example.com',
'https://testserver/',
'HTTPS://testserver/',
'//testserver/',
'/url%20with%20spaces/'): # see ticket #12534
safe_url = '%(url)s?%(next)s=%(good_url)s' % {
'url': login_url,
'next': REDIRECT_FIELD_NAME,
'good_url': urlquote(good_url),
}
response = self.client.post(safe_url, {
'username': 'testclient',
'password': password,
})
self.assertEqual(response.status_code, 302)
self.assertIn(good_url, response.url, "%s should be allowed" % good_url)
def test_login_form_contains_request(self):
# 15198
self.client.post('/custom_requestauth_login/', {
'username': 'testclient',
'password': 'password',
}, follow=True)
# the custom authentication form used by this login asserts
# that a request is passed to the form successfully.
def test_login_csrf_rotate(self, password='password'):
"""
Makes sure that a login rotates the currently-used CSRF token.
"""
# Do a GET to establish a CSRF token
# TestClient isn't used here as we're testing middleware, essentially.
req = HttpRequest()
CsrfViewMiddleware().process_view(req, login_view, (), {})
# get_token() triggers CSRF token inclusion in the response
get_token(req)
resp = login_view(req)
resp2 = CsrfViewMiddleware().process_response(req, resp)
csrf_cookie = resp2.cookies.get(settings.CSRF_COOKIE_NAME, None)
token1 = csrf_cookie.coded_value
# Prepare the POST request
req = HttpRequest()
req.COOKIES[settings.CSRF_COOKIE_NAME] = token1
req.method = "POST"
req.POST = {'username': 'testclient', 'password': password, 'csrfmiddlewaretoken': token1}
# Use POST request to log in
SessionMiddleware().process_request(req)
CsrfViewMiddleware().process_view(req, login_view, (), {})
req.META["SERVER_NAME"] = "testserver" # Required to have redirect work in login view
req.META["SERVER_PORT"] = 80
resp = login_view(req)
resp2 = CsrfViewMiddleware().process_response(req, resp)
csrf_cookie = resp2.cookies.get(settings.CSRF_COOKIE_NAME, None)
token2 = csrf_cookie.coded_value
# Check the CSRF token switched
self.assertNotEqual(token1, token2)
def test_session_key_flushed_on_login(self):
"""
To avoid reusing another user's session, ensure a new, empty session is
created if the existing session corresponds to a different authenticated
user.
"""
self.login()
original_session_key = self.client.session.session_key
self.login(username='staff')
self.assertNotEqual(original_session_key, self.client.session.session_key)
def test_session_key_flushed_on_login_after_password_change(self):
"""
As above, but same user logging in after a password change.
"""
self.login()
original_session_key = self.client.session.session_key
# If no password change, session key should not be flushed.
self.login()
self.assertEqual(original_session_key, self.client.session.session_key)
user = User.objects.get(username='testclient')
user.set_password('foobar')
user.save()
self.login(password='foobar')
self.assertNotEqual(original_session_key, self.client.session.session_key)
def test_login_session_without_hash_session_key(self):
"""
Session without django.contrib.auth.HASH_SESSION_KEY should login
without an exception.
"""
user = User.objects.get(username='testclient')
engine = import_module(settings.SESSION_ENGINE)
session = engine.SessionStore()
session[SESSION_KEY] = user.id
session.save()
original_session_key = session.session_key
self.client.cookies[settings.SESSION_COOKIE_NAME] = original_session_key
self.login()
self.assertNotEqual(original_session_key, self.client.session.session_key)
class LoginURLSettings(AuthViewsTestCase):
"""Tests for settings.LOGIN_URL."""
def assertLoginURLEquals(self, url, parse_qs=False):
response = self.client.get('/login_required/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, url, parse_qs=parse_qs)
@override_settings(LOGIN_URL='/login/')
def test_standard_login_url(self):
self.assertLoginURLEquals('/login/?next=/login_required/')
@override_settings(LOGIN_URL='login')
def test_named_login_url(self):
self.assertLoginURLEquals('/login/?next=/login_required/')
@override_settings(LOGIN_URL='http://remote.example.com/login')
def test_remote_login_url(self):
quoted_next = urlquote('http://testserver/login_required/')
expected = 'http://remote.example.com/login?next=%s' % quoted_next
self.assertLoginURLEquals(expected)
@override_settings(LOGIN_URL='https:///login/')
def test_https_login_url(self):
quoted_next = urlquote('http://testserver/login_required/')
expected = 'https:///login/?next=%s' % quoted_next
self.assertLoginURLEquals(expected)
@override_settings(LOGIN_URL='/login/?pretty=1')
def test_login_url_with_querystring(self):
self.assertLoginURLEquals('/login/?pretty=1&next=/login_required/', parse_qs=True)
@override_settings(LOGIN_URL='http://remote.example.com/login/?next=/default/')
def test_remote_login_url_with_next_querystring(self):
quoted_next = urlquote('http://testserver/login_required/')
expected = 'http://remote.example.com/login/?next=%s' % quoted_next
self.assertLoginURLEquals(expected)
@override_settings(LOGIN_URL=reverse_lazy('login'))
def test_lazy_login_url(self):
self.assertLoginURLEquals('/login/?next=/login_required/')
class LoginRedirectUrlTest(AuthViewsTestCase):
"""Tests for settings.LOGIN_REDIRECT_URL."""
def assertLoginRedirectURLEqual(self, url):
response = self.login()
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, url)
def test_default(self):
self.assertLoginRedirectURLEqual('/accounts/profile/')
@override_settings(LOGIN_REDIRECT_URL='/custom/')
def test_custom(self):
self.assertLoginRedirectURLEqual('/custom/')
@override_settings(LOGIN_REDIRECT_URL='password_reset')
def test_named(self):
self.assertLoginRedirectURLEqual('/password_reset/')
@override_settings(LOGIN_REDIRECT_URL='http://remote.example.com/welcome/')
def test_remote(self):
self.assertLoginRedirectURLEqual('http://remote.example.com/welcome/')
class RedirectToLoginTests(AuthViewsTestCase):
"""Tests for the redirect_to_login view"""
@override_settings(LOGIN_URL=reverse_lazy('login'))
def test_redirect_to_login_with_lazy(self):
login_redirect_response = redirect_to_login(next='/else/where/')
expected = '/login/?next=/else/where/'
self.assertEqual(expected, login_redirect_response.url)
@override_settings(LOGIN_URL=reverse_lazy('login'))
def test_redirect_to_login_with_lazy_and_unicode(self):
login_redirect_response = redirect_to_login(next='/else/where/झ/')
expected = '/login/?next=/else/where/%E0%A4%9D/'
self.assertEqual(expected, login_redirect_response.url)
class LogoutTest(AuthViewsTestCase):
def confirm_logged_out(self):
self.assertNotIn(SESSION_KEY, self.client.session)
def test_logout_default(self):
"Logout without next_page option renders the default template"
self.login()
response = self.client.get('/logout/')
self.assertContains(response, 'Logged out')
self.confirm_logged_out()
def test_14377(self):
# Bug 14377
self.login()
response = self.client.get('/logout/')
self.assertIn('site', response.context)
def test_logout_with_overridden_redirect_url(self):
# Bug 11223
self.login()
response = self.client.get('/logout/next_page/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/somewhere/')
response = self.client.get('/logout/next_page/?next=/login/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/login/')
self.confirm_logged_out()
def test_logout_with_next_page_specified(self):
"Logout with next_page option given redirects to specified resource"
self.login()
response = self.client.get('/logout/next_page/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/somewhere/')
self.confirm_logged_out()
def test_logout_with_redirect_argument(self):
"Logout with query string redirects to specified resource"
self.login()
response = self.client.get('/logout/?next=/login/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/login/')
self.confirm_logged_out()
def test_logout_with_custom_redirect_argument(self):
"Logout with custom query string redirects to specified resource"
self.login()
response = self.client.get('/logout/custom_query/?follow=/somewhere/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/somewhere/')
self.confirm_logged_out()
def test_logout_with_named_redirect(self):
"Logout resolves names or URLs passed as next_page."
self.login()
response = self.client.get('/logout/next_page/named/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/password_reset/')
self.confirm_logged_out()
def test_security_check(self, password='password'):
logout_url = reverse('logout')
# Those URLs should not pass the security check
for bad_url in ('http://example.com',
'http:///example.com',
'https://example.com',
'ftp://exampel.com',
'///example.com',
'//example.com',
'javascript:alert("XSS")'):
nasty_url = '%(url)s?%(next)s=%(bad_url)s' % {
'url': logout_url,
'next': REDIRECT_FIELD_NAME,
'bad_url': urlquote(bad_url),
}
self.login()
response = self.client.get(nasty_url)
self.assertEqual(response.status_code, 302)
self.assertNotIn(bad_url, response.url,
"%s should be blocked" % bad_url)
self.confirm_logged_out()
# These URLs *should* still pass the security check
for good_url in ('/view/?param=http://example.com',
'/view/?param=https://example.com',
'/view?param=ftp://exampel.com',
'view/?param=//example.com',
'https://testserver/',
'HTTPS://testserver/',
'//testserver/',
'/url%20with%20spaces/'): # see ticket #12534
safe_url = '%(url)s?%(next)s=%(good_url)s' % {
'url': logout_url,
'next': REDIRECT_FIELD_NAME,
'good_url': urlquote(good_url),
}
self.login()
response = self.client.get(safe_url)
self.assertEqual(response.status_code, 302)
self.assertIn(good_url, response.url, "%s should be allowed" % good_url)
self.confirm_logged_out()
def test_logout_preserve_language(self):
"""Check that language stored in session is preserved after logout"""
# Create a new session with language
engine = import_module(settings.SESSION_ENGINE)
session = engine.SessionStore()
session[LANGUAGE_SESSION_KEY] = 'pl'
session.save()
self.client.cookies[settings.SESSION_COOKIE_NAME] = session.session_key
self.client.get('/logout/')
self.assertEqual(self.client.session[LANGUAGE_SESSION_KEY], 'pl')
# Redirect in test_user_change_password will fail if session auth hash
# isn't updated after password change (#21649)
@modify_settings(MIDDLEWARE_CLASSES={
'append': 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
})
@override_settings(
PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF='auth_tests.urls_admin',
)
class ChangelistTests(AuthViewsTestCase):
def setUp(self):
# Make me a superuser before logging in.
User.objects.filter(username='testclient').update(is_staff=True, is_superuser=True)
self.login()
self.admin = User.objects.get(pk=self.u1.pk)
def get_user_data(self, user):
return {
'username': user.username,
'password': user.password,
'email': user.email,
'is_active': user.is_active,
'is_staff': user.is_staff,
'is_superuser': user.is_superuser,
'last_login_0': user.last_login.strftime('%Y-%m-%d'),
'last_login_1': user.last_login.strftime('%H:%M:%S'),
'initial-last_login_0': user.last_login.strftime('%Y-%m-%d'),
'initial-last_login_1': user.last_login.strftime('%H:%M:%S'),
'date_joined_0': user.date_joined.strftime('%Y-%m-%d'),
'date_joined_1': user.date_joined.strftime('%H:%M:%S'),
'initial-date_joined_0': user.date_joined.strftime('%Y-%m-%d'),
'initial-date_joined_1': user.date_joined.strftime('%H:%M:%S'),
'first_name': user.first_name,
'last_name': user.last_name,
}
# #20078 - users shouldn't be allowed to guess password hashes via
# repeated password__startswith queries.
def test_changelist_disallows_password_lookups(self):
# A lookup that tries to filter on password isn't OK
with patch_logger('django.security.DisallowedModelAdminLookup', 'error') as logger_calls:
response = self.client.get(reverse('auth_test_admin:auth_user_changelist') + '?password__startswith=sha1$')
self.assertEqual(response.status_code, 400)
self.assertEqual(len(logger_calls), 1)
def test_user_change_email(self):
data = self.get_user_data(self.admin)
data['email'] = 'new_' + data['email']
response = self.client.post(
reverse('auth_test_admin:auth_user_change', args=(self.admin.pk,)),
data
)
self.assertRedirects(response, reverse('auth_test_admin:auth_user_changelist'))
row = LogEntry.objects.latest('id')
self.assertEqual(row.change_message, 'Changed email.')
def test_user_not_change(self):
response = self.client.post(
reverse('auth_test_admin:auth_user_change', args=(self.admin.pk,)),
self.get_user_data(self.admin)
)
self.assertRedirects(response, reverse('auth_test_admin:auth_user_changelist'))
row = LogEntry.objects.latest('id')
self.assertEqual(row.change_message, 'No fields changed.')
def test_user_change_password(self):
user_change_url = reverse('auth_test_admin:auth_user_change', args=(self.admin.pk,))
password_change_url = reverse('auth_test_admin:auth_user_password_change', args=(self.admin.pk,))
response = self.client.get(user_change_url)
# Test the link inside password field help_text.
rel_link = re.search(
r'you can change the password using <a href="([^"]*)">this form</a>',
force_text(response.content)
).groups()[0]
self.assertEqual(
os.path.normpath(user_change_url + rel_link),
os.path.normpath(password_change_url)
)
response = self.client.post(
password_change_url,
{
'password1': 'password1',
'password2': 'password1',
}
)
self.assertRedirects(response, user_change_url)
row = LogEntry.objects.latest('id')
self.assertEqual(row.change_message, 'Changed password.')
self.logout()
self.login(password='password1')
def test_user_change_different_user_password(self):
u = User.objects.get(email='staffmember@example.com')
response = self.client.post(
reverse('auth_test_admin:auth_user_password_change', args=(u.pk,)),
{
'password1': 'password1',
'password2': 'password1',
}
)
self.assertRedirects(response, reverse('auth_test_admin:auth_user_change', args=(u.pk,)))
row = LogEntry.objects.latest('id')
self.assertEqual(row.user_id, self.admin.pk)
self.assertEqual(row.object_id, str(u.pk))
self.assertEqual(row.change_message, 'Changed password.')
def test_password_change_bad_url(self):
response = self.client.get(reverse('auth_test_admin:auth_user_password_change', args=('foobar',)))
self.assertEqual(response.status_code, 404)
@override_settings(
AUTH_USER_MODEL='auth.UUIDUser',
ROOT_URLCONF='auth_tests.urls_custom_user_admin',
)
class UUIDUserTests(TestCase):
def test_admin_password_change(self):
u = UUIDUser.objects.create_superuser(username='uuid', email='foo@bar.com', password='test')
self.assertTrue(self.client.login(username='uuid', password='test'))
user_change_url = reverse('custom_user_admin:auth_uuiduser_change', args=(u.pk,))
response = self.client.get(user_change_url)
self.assertEqual(response.status_code, 200)
password_change_url = reverse('custom_user_admin:auth_user_password_change', args=(u.pk,))
response = self.client.get(password_change_url)
self.assertEqual(response.status_code, 200)
# A LogEntry is created with pk=1 which breaks a FK constraint on MySQL
with connection.constraint_checks_disabled():
response = self.client.post(password_change_url, {
'password1': 'password1',
'password2': 'password1',
})
self.assertRedirects(response, user_change_url)
row = LogEntry.objects.latest('id')
self.assertEqual(row.user_id, 1) # harcoded in CustomUserAdmin.log_change()
self.assertEqual(row.object_id, str(u.pk))
self.assertEqual(row.change_message, 'Changed password.')
|
bikong2/django
|
tests/auth_tests/test_views.py
|
Python
|
bsd-3-clause
| 45,028
|
#
# tsne.py
#
# Implementation of t-SNE in Python. The implementation was tested on Python 2.7.10, and it requires a working
# installation of NumPy. The implementation comes with an example on the MNIST dataset. In order to plot the
# results of this example, a working installation of matplotlib is required.
#
# The example can be run by executing: `ipython tsne.py`
#
#
# Created by Laurens van der Maaten on 20-12-08.
# Copyright (c) 2008 Tilburg University. All rights reserved.
# Note: on the download page, it says:
#
# You are free to use, modify, or redistribute this software in any
# way you want, but only for non-commercial purposes. The use of the
# software is at your own risk; the authors are not responsible for
# any damage as a result from errors in the software.
#
# - https://lvdmaaten.github.io/tsne/ , 2017-04-28.
#
# for x in stamp-0[01]*.png; do pngtopnm $x | pnmquant 256 | ppmtogif > $x.gif; don
# gifsicle -m -o tsne.gif -d 10 --colors 256 stamp-0[01]*.gif
# avconv -i stamp-%04d.png -r 10 tsne.mov
import numpy as np
import pylab as plt
def Hbeta(D = np.array([]), beta = 1.0):
"""Compute the perplexity and the P-row for a specific value of the precision of a Gaussian distribution."""
# Compute P-row and corresponding perplexity
P = np.exp(-D.copy() * beta);
sumP = sum(P);
H = np.log(sumP) + beta * np.sum(D * P) / sumP;
P = P / sumP;
return H, P;
def x2p(X = np.array([]), tol = 1e-5, perplexity = 30.0):
"""Performs a binary search to get P-values in such a way that each conditional Gaussian has the same perplexity."""
# Initialize some variables
print "Computing pairwise distances..."
(n, d) = X.shape;
sum_X = np.sum(np.square(X), 1);
D = np.add(np.add(-2 * np.dot(X, X.T), sum_X).T, sum_X);
P = np.zeros((n, n));
beta = np.ones((n, 1));
logU = np.log(perplexity);
# Loop over all datapoints
for i in range(n):
# Print progress
if i % 500 == 0:
print "Computing P-values for point ", i, " of ", n, "..."
# Compute the Gaussian kernel and entropy for the current precision
betamin = -np.inf;
betamax = np.inf;
Di = D[i, np.concatenate((np.r_[0:i], np.r_[i+1:n]))];
(H, thisP) = Hbeta(Di, beta[i]);
# Evaluate whether the perplexity is within tolerance
Hdiff = H - logU;
tries = 0;
while np.abs(Hdiff) > tol and tries < 50:
# If not, increase or decrease precision
if Hdiff > 0:
betamin = beta[i].copy();
if betamax == np.inf or betamax == -np.inf:
beta[i] = beta[i] * 2;
else:
beta[i] = (beta[i] + betamax) / 2;
else:
betamax = beta[i].copy();
if betamin == np.inf or betamin == -np.inf:
beta[i] = beta[i] / 2;
else:
beta[i] = (beta[i] + betamin) / 2;
# Recompute the values
(H, thisP) = Hbeta(Di, beta[i]);
Hdiff = H - logU;
tries = tries + 1;
# Set the final row of P
P[i, np.concatenate((np.r_[0:i], np.r_[i+1:n]))] = thisP;
# Return final P-matrix
print "Mean value of sigma: ", np.mean(np.sqrt(1 / beta));
return P;
def pca(X = np.array([]), no_dims = 50):
"""Runs PCA on the NxD array X in order to reduce its dimensionality to no_dims dimensions."""
print "Preprocessing the data using PCA..."
(n, d) = X.shape;
X = X - np.tile(np.mean(X, 0), (n, 1));
(l, M) = np.linalg.eig(np.dot(X.T, X));
Y = np.dot(X, M[:,0:no_dims]);
return Y;
def tsne(X = np.array([]), no_dims = 2, initial_dims = 50, perplexity = 30.0):
"""Runs t-SNE on the dataset in the NxD array X to reduce its dimensionality to no_dims dimensions.
The syntaxis of the function is Y = tsne.tsne(X, no_dims, perplexity), where X is an NxD NumPy array."""
# Check inputs
if isinstance(no_dims, float):
print "Error: array X should have type float.";
return -1;
if round(no_dims) != no_dims:
print "Error: number of dimensions should be an integer.";
return -1;
# Initialize variables
X = pca(X, initial_dims).real;
(n, d) = X.shape;
max_iter = 1000;
initial_momentum = 0.5;
final_momentum = 0.8;
eta = 500;
min_gain = 0.01;
Y = np.random.randn(n, no_dims);
dY = np.zeros((n, no_dims));
iY = np.zeros((n, no_dims));
gains = np.ones((n, no_dims));
# Compute P-values
P = x2p(X, 1e-5, perplexity);
P = P + np.transpose(P);
P = P / np.sum(P);
P = P * 4; # early exaggeration
P = np.maximum(P, 1e-12);
# Run iterations
for iter in range(max_iter):
# Compute pairwise affinities
sum_Y = np.sum(np.square(Y), 1);
num = 1 / (1 + np.add(np.add(-2 * np.dot(Y, Y.T), sum_Y).T, sum_Y));
num[range(n), range(n)] = 0;
Q = num / np.sum(num);
Q = np.maximum(Q, 1e-12);
# Compute gradient
PQ = P - Q;
for i in range(n):
dY[i,:] = np.sum(np.tile(PQ[:,i] * num[:,i], (no_dims, 1)).T * (Y[i,:] - Y), 0);
# Perform the update
if iter < 20:
momentum = initial_momentum
else:
momentum = final_momentum
gains = (gains + 0.2) * ((dY > 0) != (iY > 0)) + (gains * 0.8) * ((dY > 0) == (iY > 0));
gains[gains < min_gain] = min_gain;
iY = momentum * iY - eta * (gains * dY);
Y = Y + iY;
Y = Y - np.tile(np.mean(Y, 0), (n, 1));
# Compute current value of cost function
if iter < 200 or (iter + 1) % 10 == 0:
C = np.sum(P * np.log(P / Q));
print "Iteration ", (iter + 1), ": error is ", C
# plt.clf()
# plt.scatter(Y[:,0], Y[:,1], s=20, c=labels,
# vmin=labels.min(), vmax=labels.max());
# plt.savefig('step-%04i.png' % iter)
# ax = plt.axis()
#mx = np.max(np.abs(ax))
#xlo,xhi, ylo,yhi = ax
#xlo,xhi = Y[:,0].min(), Y[:,0].max()
#ylo,yhi = Y[:,1].min(), Y[:,1].max()
mx = np.abs(Y).max()
xlo,xhi = -mx,mx
#mx = np.abs(Y[:,1]).max()
ylo,yhi = -mx,mx
plt.clf()
#S = mx * 0.05
#plt.clf()
ih,iw = 400,400
imgmap = np.zeros((ih,iw,3), np.uint8)
for i in range(n):
x = Y[i,0]
y = Y[i,1]
#plt.imshow(stamps[i], extent=[x-S,x+S, y-S,y+S],
# interpolation='nearest', origin='lower')
ix = int((x - xlo) / (xhi - xlo) * iw)
iy = int((y - ylo) / (yhi - ylo) * ih)
sh,sw,d = stamps[i].shape
ix = int(np.clip(ix-sw/2, 0, iw-sw))
iy = int(np.clip(iy-sh/2, 0, ih-sh))
imgmap[iy : iy+sh, ix : ix+sw, :] = np.maximum(
imgmap[iy : iy+sh, ix : ix+sw, :], stamps[i])
# plt.axis([-(mx+S), mx+S, -(mx+S), mx+S])
plt.imshow(imgmap, interpolation='nearest', origin='lower')
plt.xticks([]); plt.yticks([])
plt.title('t-SNE on DECaLS catalogs: %s' % samplename)
plt.savefig('stamp-%04i.png' % iter)
# Stop lying about P-values
if iter == 100:
P = P / 4;
# Return solution
return Y;
if __name__ == "__main__":
from astrometry.util.fits import *
import pylab as plt
from collections import Counter
TT = []
for brick in ['1498p017', '1498p020', '1498p022', '1498p025', '1501p017', '1501p020', '1501p022', '1501p025', '1503p017', '1503p020', '1503p022', '1503p025', '1506p017', '1506p020', '1506p022', '1506p025']:
B = brick[:3]
T = fits_table ('cosmos-50-rex2/metrics/%s/all-models-%s.fits' % (B,brick))
T2 = fits_table ('cosmos-50-rex2/tractor/%s/tractor-%s.fits' % (B,brick))
jpg = plt.imread('cosmos-50-rex2/coadd/%s/%s/legacysurvey-%s-image.jpg' % (B,brick,brick))
T.decam_flux = T2.decam_flux
T.decam_flux_ivar = T2.decam_flux_ivar
T.bx = T2.bx
T.by = T2.by
T.ix = np.round(T.bx).astype(int)
T.iy = np.round(T.by).astype(int)
jpg = np.flipud(jpg)
H,W,d = jpg.shape
S = 15
#print(jpg.shape, jpg.dtype)
T.cut((T.ix >= S) * (T.iy >= S) * (T.ix < (W-S)) * (T.iy < (H-S)))
stamps = []
for i in range(len(T)):
stamps.append((jpg[T.iy[i] - S : T.iy[i] + S + 1,
T.ix[i] - S : T.ix[i] + S + 1, :]))
T.stamps = stamps
TT.append(T)
T = merge_tables(TT)
print(len(T))
T.labels = np.zeros(len(T), int)
T.labels[T.type == 'REX '] = 1
T.labels[T.type == 'EXP '] = 2
T.labels[T.type == 'DEV '] = 3
T.labels[T.type == 'COMP'] = 4
T.g = -2.5 * (np.log10(T.decam_flux[:,1]) - 9)
T.r = -2.5 * (np.log10(T.decam_flux[:,2]) - 9)
T.z = -2.5 * (np.log10(T.decam_flux[:,4]) - 9)
print(Counter(T.type))
T.cut(np.isfinite(T.g) * np.isfinite(T.r) * np.isfinite(T.z))
print('Finite mags:', Counter(T.type))
T.cut((T.g > 15) * (T.g < 25) *
(T.r > 15) * (T.r < 25) *
(T.z > 15) * (T.z < 25))
print('Mags 15 to 25:', Counter(T.type))
# T.cut((T.g > 15) * (T.g < 23) *
# (T.r > 15) * (T.r < 23) *
# (T.z > 15) * (T.z < 23))
print(len(T))
mg = np.median(T.decam_flux_ivar[:,1])
mr = np.median(T.decam_flux_ivar[:,2])
mz = np.median(T.decam_flux_ivar[:,4])
T.cut((T.decam_flux_ivar[:,1] > mg/4.) *
(T.decam_flux_ivar[:,2] > mr/4.) *
(T.decam_flux_ivar[:,4] > mz/4.))
print(len(T))
print('Invvars:', Counter(T.type))
#T.cut(np.logical_or(T.type == 'EXP ', T.type == 'DEV '))
#T.cut(T.type == 'EXP ')
#samplename = 'EXP galaxies'
# T.cut(T.type == 'DEV ')
# samplename = 'DEV galaxies'
#T = T[np.argsort(T.r)[:500]]
#T.cut(T.type == 'REX ')
#T.cut(T.r < 21)
#samplename = 'REX sources'
#T.cut(np.logical_or(np.logical_or(T.type == 'EXP ', T.type == 'DEV '), T.type == 'REX '))
T.cut(T.r < 21)
samplename = 'r < 21'
#T = T[:500]
T = T[:1000]
print('Sample:', Counter(T.type))
print(Counter(T.type))
labels = T.labels
stamps = T.stamps
X = np.vstack((T.r, T.g - T.r, T.r - T.z,
T.rex_shapeexp_r)).T
assert(np.all(np.isfinite(X)))
print(X.shape)
D = X.shape[1]
for i in range(D):
for j in range(i):
plt.clf()
plt.plot(X[i,:], X[j,:], 'b.')
plt.savefig('x-%i-%i.png' % (i,j))
Y = tsne(X, 2, 50, 20.0);
# plt.scatter(Y[:,0], Y[:,1], s=20, c=labels,
# vmin=labels.min(), vmax=labels.max());
# plt.show();
# plt.savefig('1.png')
|
legacysurvey/pipeline
|
py/legacyanalysis/tsne.py
|
Python
|
gpl-2.0
| 11,089
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '/home/krl1to5/Work/FULL/Sequence-ToolKit/2016/resources/ui/dialogs/xml_preview.ui'
#
# Created by: PyQt5 UI code generator 5.5.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_main_window(object):
def setupUi(self, main_window):
main_window.setObjectName("main_window")
main_window.setWindowModality(QtCore.Qt.WindowModal)
main_window.resize(600, 500)
main_window.setMinimumSize(QtCore.QSize(600, 500))
self.central_widget = QtWidgets.QWidget(main_window)
self.central_widget.setObjectName("central_widget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.central_widget)
self.verticalLayout.setObjectName("verticalLayout")
self.xml_content = QtWidgets.QTextEdit(self.central_widget)
self.xml_content.setReadOnly(True)
self.xml_content.setTextInteractionFlags(QtCore.Qt.TextSelectableByKeyboard|QtCore.Qt.TextSelectableByMouse)
self.xml_content.setObjectName("xml_content")
self.verticalLayout.addWidget(self.xml_content)
main_window.setCentralWidget(self.central_widget)
self.tool_bar = QtWidgets.QToolBar(main_window)
self.tool_bar.setObjectName("tool_bar")
main_window.addToolBar(QtCore.Qt.TopToolBarArea, self.tool_bar)
self.action_save = QtWidgets.QAction(main_window)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(":/resources/img/icons/save.svg"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.action_save.setIcon(icon)
self.action_save.setObjectName("action_save")
self.action_save_as = QtWidgets.QAction(main_window)
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap(":/resources/img/icons/save_as.svg"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.action_save_as.setIcon(icon1)
self.action_save_as.setObjectName("action_save_as")
self.tool_bar.addAction(self.action_save)
self.tool_bar.addAction(self.action_save_as)
self.retranslateUi(main_window)
QtCore.QMetaObject.connectSlotsByName(main_window)
def retranslateUi(self, main_window):
_translate = QtCore.QCoreApplication.translate
main_window.setWindowTitle(_translate("main_window", "XML Preview"))
self.tool_bar.setWindowTitle(_translate("main_window", "Tools Bar"))
self.action_save.setText(_translate("main_window", "Save"))
self.action_save.setShortcut(_translate("main_window", "Ctrl+S"))
self.action_save_as.setText(_translate("main_window", "Save As"))
self.action_save_as.setToolTip(_translate("main_window", "Save As"))
self.action_save_as.setShortcut(_translate("main_window", "Ctrl+Shift+S"))
import img_rc
|
carlos-ferras/Sequence-ToolKit
|
view/dialogs/ui_xml_preview.py
|
Python
|
gpl-3.0
| 2,850
|
import datetime
import unittest
from django.utils.dateparse import parse_duration
from django.utils.duration import (
duration_iso_string, duration_microseconds, duration_string,
)
class TestDurationString(unittest.TestCase):
def test_simple(self):
duration = datetime.timedelta(hours=1, minutes=3, seconds=5)
self.assertEqual(duration_string(duration), '01:03:05')
def test_days(self):
duration = datetime.timedelta(days=1, hours=1, minutes=3, seconds=5)
self.assertEqual(duration_string(duration), '1 01:03:05')
def test_microseconds(self):
duration = datetime.timedelta(hours=1, minutes=3, seconds=5, microseconds=12345)
self.assertEqual(duration_string(duration), '01:03:05.012345')
def test_negative(self):
duration = datetime.timedelta(days=-1, hours=1, minutes=3, seconds=5)
self.assertEqual(duration_string(duration), '-1 01:03:05')
class TestParseDurationRoundtrip(unittest.TestCase):
def test_simple(self):
duration = datetime.timedelta(hours=1, minutes=3, seconds=5)
self.assertEqual(parse_duration(duration_string(duration)), duration)
def test_days(self):
duration = datetime.timedelta(days=1, hours=1, minutes=3, seconds=5)
self.assertEqual(parse_duration(duration_string(duration)), duration)
def test_microseconds(self):
duration = datetime.timedelta(hours=1, minutes=3, seconds=5, microseconds=12345)
self.assertEqual(parse_duration(duration_string(duration)), duration)
def test_negative(self):
duration = datetime.timedelta(days=-1, hours=1, minutes=3, seconds=5)
self.assertEqual(parse_duration(duration_string(duration)), duration)
class TestISODurationString(unittest.TestCase):
def test_simple(self):
duration = datetime.timedelta(hours=1, minutes=3, seconds=5)
self.assertEqual(duration_iso_string(duration), 'P0DT01H03M05S')
def test_days(self):
duration = datetime.timedelta(days=1, hours=1, minutes=3, seconds=5)
self.assertEqual(duration_iso_string(duration), 'P1DT01H03M05S')
def test_microseconds(self):
duration = datetime.timedelta(hours=1, minutes=3, seconds=5, microseconds=12345)
self.assertEqual(duration_iso_string(duration), 'P0DT01H03M05.012345S')
def test_negative(self):
duration = -1 * datetime.timedelta(days=1, hours=1, minutes=3, seconds=5)
self.assertEqual(duration_iso_string(duration), '-P1DT01H03M05S')
class TestParseISODurationRoundtrip(unittest.TestCase):
def test_simple(self):
duration = datetime.timedelta(hours=1, minutes=3, seconds=5)
self.assertEqual(parse_duration(duration_iso_string(duration)), duration)
def test_days(self):
duration = datetime.timedelta(days=1, hours=1, minutes=3, seconds=5)
self.assertEqual(parse_duration(duration_iso_string(duration)), duration)
def test_microseconds(self):
duration = datetime.timedelta(hours=1, minutes=3, seconds=5, microseconds=12345)
self.assertEqual(parse_duration(duration_iso_string(duration)), duration)
def test_negative(self):
duration = datetime.timedelta(days=-1, hours=1, minutes=3, seconds=5)
self.assertEqual(parse_duration(duration_iso_string(duration)).total_seconds(), duration.total_seconds())
class TestDurationMicroseconds(unittest.TestCase):
def test(self):
deltas = [
datetime.timedelta.max,
datetime.timedelta.min,
datetime.timedelta.resolution,
-datetime.timedelta.resolution,
datetime.timedelta(microseconds=8999999999999999),
]
for delta in deltas:
with self.subTest(delta=delta):
self.assertEqual(datetime.timedelta(microseconds=duration_microseconds(delta)), delta)
|
georgemarshall/django
|
tests/utils_tests/test_duration.py
|
Python
|
bsd-3-clause
| 3,864
|
# -*- coding: UTF-8 -*-
#------------------------------------------------------------------------------
# file: fields.py
# License: LICENSE.TXT
# Author: Ioannis Tziakos
#
# Copyright (c) 2011, Enthought, Inc.
# All rights reserved.
#------------------------------------------------------------------------------
import collections
import re
from line_functions import add_indent, is_empty, remove_indent, replace_at
class Field(collections.namedtuple('Field', ('name','signature','desc'))):
""" A docstring field.
The class is based on the nametuple class and represents the logic
to check, parse and refactor a docstring field.
Attributes
----------
name : str
The name if the field, usssualy the name of a parameter of atrribute.
signature : str
The signature of the field. Commonly is the class type of an argument
or the signature of a function.
desc : str
The description of the field. Given the type of the field this is a
single paragraph or a block of rst source.
"""
@classmethod
def is_field(cls, line, indent=''):
""" Check if the line is a field header.
"""
regex = indent + r'\*?\*?\w+\s:(\s+|$)'
match = re.match(regex, line)
return match
@classmethod
def parse(cls, lines):
"""Parse a field definition for a set of lines.
The field is assumed to be in one of the following formats::
<name> : <type>
<description>
or::
<name> :
<description>
or::
<name>
<description>
Arguments
---------
lines :
docstring lines of the field without any empty lines before or
after.
Returns
-------
field :
Field or subclass of Field
"""
header = lines[0].strip()
if ' :' in header:
arg_name, arg_type = re.split('\s\:\s?', header, maxsplit=1)
else:
arg_name, arg_type = header, ''
if len(lines) > 1:
lines = [line.rstrip() for line in lines]
return cls(arg_name.strip(), arg_type.strip(), lines[1:])
else:
return cls(arg_name.strip(), arg_type.strip(), [''])
def to_rst(self, indent=4):
""" Outputs field in rst as an itme in a definition list.
Arguments
---------
indent : int
The indent to use for the decription block.
Returns
-------
lines : list
A list of string lines of formated rst.
Example
-------
>>> Field('Ioannis', 'Ιωάννης', 'Is the greek guy.')
>>> print Field.to_rst()
Ioannis (Ιωάννης)
Is the greek guy.
"""
lines = []
header = '{0} ({1})'.format(self.name, self.signature)
lines.append(header)
lines += add_indent(self.desc, indent)
return lines
class AttributeField(Field):
""" Field for the argument function docstrings """
def to_rst(self, indent=4):
""" Outputs field in rst using the ``:param:`` role.
Arguments
---------
indent : int
The indent to use for the decription block.
Example
-------
>>> Field('indent', 'int', 'The indent to use for the decription block.')
>>> print Field.to_rst()
:param indent: The indent to use for the description block
:type indent: int
"""
lines = []
_type = self.signature
annotation = '{0} :annotation: = {1}'
type_str = '' if is_empty(_type) else annotation.format(indent * ' ', _type)
directive = '{0}.. attribute:: {1}'
lines += [directive.format(indent * ' ', self.name), type_str]
if type_str != '':
lines.append('')
lines += self.desc
lines.append('')
return lines
class ArgumentField(Field):
""" Field for the argument function docstrings """
def to_rst(self, indent=4):
""" Outputs field in rst using the ``:param:`` role.
Arguments
---------
indent : int
The indent to use for the decription block.
Example
-------
>>> Field('indent', 'int', 'The indent to use for the decription block.')
>>> print Field.to_rst()
:param indent: The indent to use for the description block
:type indent: int
"""
lines = []
name = self.name.replace('*','\*') # Fix cases like *args and **kwargs
indent_str = ' ' * indent
param_str = '{0}:param {1}: {2}'.format(indent_str, name, self.desc[0].strip())
type_str = '{0}:type {1}: {2}'.format(indent_str, name, self.signature)
lines.append(param_str)
lines += self.desc[1:]
if len(self.signature) > 0:
lines.append(type_str)
return lines
class ListItemField(Field):
""" Field that in rst is formated as an item in the list ignoring any
field.type information.
"""
def to_rst(self, indent=4, prefix=''):
""" Outputs field in rst using as items in an list.
Arguments
---------
indent : int
The indent to use for the decription block.
prefix : str
The prefix to use. For example if the item is part of a numbered
list then ``prefix='# '``.
Example
-------
Note
----
The field descrption is reformated into a line.
"""
indent_str = ' ' * indent
rst_pattern = '{0}{1}**{2}**{3}' if is_empty(self.desc[0]) else \
'{0}{1}**{2}** -- {3}'
description = '' if is_empty(self.desc[0]) else \
' '.join(remove_indent(self.desc))
return [rst_pattern.format(indent_str, prefix, self.name, description)]
class ListItemWithTypeField(Field):
""" Field for the return section of the function docstrings """
def to_rst(self, indent=4, prefix=''):
indent_str = ' ' * indent
_type = '' if self.signature == '' else '({0})'.format(self.signature)
rst_pattern = '{0}{1}**{2}** {3}{4}' if is_empty(self.desc[0]) else \
'{0}{1}**{2}** {3} -- {4}'
description = '' if is_empty(self.desc[0]) else \
' '.join(remove_indent(self.desc))
return [rst_pattern.format(indent_str, prefix, self.name, _type, description)]
class FunctionField(Field):
""" A field that represents a function """
@classmethod
def is_field(cls, line, indent=''):
regex = indent + r'\w+\(.*\)\s*'
match = re.match(regex, line)
return match
def to_rst(self, length, first_column, second_column):
split_result = re.split('\((.*)\)', self.name)
method_name = split_result[0]
method_text = ':meth:`{0} <{1}>`'.format(self.name, method_name)
summary = ' '.join([line.strip() for line in self.desc])
line = ' ' * length
line = replace_at(method_text, line, first_column)
line = replace_at(summary, line, second_column)
return [line]
MethodField = FunctionField
#------------------------------------------------------------------------------
# Functions to work with fields
#------------------------------------------------------------------------------
def max_name_length(method_fields):
""" Find the max length of the function name in a list of method fields.
Arguments
---------
fields : list
The list of the parsed fields.
"""
return max([field[0].find('(') for field in method_fields])
def max_header_length(fields):
""" Find the max length of the header in a list of fields.
Arguments
---------
fields : list
The list of the parsed fields.
"""
return max([len(field[0]) for field in fields])
def max_desc_length(fields):
""" Find the max length of the description in a list of fields.
Arguments
---------
fields : list
The list of the parsed fields.
"""
return max([len(' '.join([line.strip() for line in field[2]]))
for field in fields])
|
ContinuumIO/ashiba
|
enaml/docs/source/sphinxext/refactordoc/fields.py
|
Python
|
bsd-3-clause
| 8,370
|
#!/usr/bin/python
# Copyright (c) 2014 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Script to generate list of seed nodes for chainparams.cpp.
This script expects two text files in the directory that is passed as an
argument:
nodes_main.txt
nodes_test.txt
These files must consist of lines in the format
<ip>
<ip>:<port>
[<ipv6>]
[<ipv6>]:<port>
<onion>.onion
0xDDBBCCAA (IPv4 little-endian old pnSeeds format)
The output will be two data structures with the peers in binary format:
static SeedSpec6 pnSeed6_main[]={
...
}
static SeedSpec6 pnSeed6_test[]={
...
}
These should be pasted into `src/chainparamsseeds.h`.
'''
from __future__ import print_function, division
from base64 import b32decode
from binascii import a2b_hex
import sys, os
import re
# ipv4 in ipv6 prefix
pchIPv4 = bytearray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff])
# tor-specific ipv6 prefix
pchOnionCat = bytearray([0xFD,0x87,0xD8,0x7E,0xEB,0x43])
def name_to_ipv6(addr):
if len(addr)>6 and addr.endswith('.onion'):
vchAddr = b32decode(addr[0:-6], True)
if len(vchAddr) != 16-len(pchOnionCat):
raise ValueError('Invalid onion %s' % s)
return pchOnionCat + vchAddr
elif '.' in addr: # IPv4
return pchIPv4 + bytearray((int(x) for x in addr.split('.')))
elif ':' in addr: # IPv6
sub = [[], []] # prefix, suffix
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end
continue
x += 1 # :: skips to suffix
assert(x < 2)
else: # two bytes per component
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
return bytearray(sub[0] + ([0] * nullbytes) + sub[1])
elif addr.startswith('0x'): # IPv4-in-little-endian
return pchIPv4 + bytearray(reversed(a2b_hex(addr[2:])))
else:
raise ValueError('Could not parse address %s' % addr)
def parse_spec(s, defaultport):
match = re.match('\[([0-9a-fA-F:]+)\](?::([0-9]+))?$', s)
if match: # ipv6
host = match.group(1)
port = match.group(2)
elif s.count(':') > 1: # ipv6, no port
host = s
port = ''
else:
(host,_,port) = s.partition(':')
if not port:
port = defaultport
else:
port = int(port)
host = name_to_ipv6(host)
return (host,port)
def process_nodes(g, f, structname, defaultport):
g.write('static SeedSpec6 %s[] = {\n' % structname)
first = True
for line in f:
comment = line.find('#')
if comment != -1:
line = line[0:comment]
line = line.strip()
if not line:
continue
if not first:
g.write(',\n')
first = False
(host,port) = parse_spec(line, defaultport)
hoststr = ','.join(('0x%02x' % b) for b in host)
g.write(' {{%s}, %i}' % (hoststr, port))
g.write('\n};\n')
def main():
if len(sys.argv)<2:
print(('Usage: %s <path_to_nodes_txt>' % sys.argv[0]), file=sys.stderr)
exit(1)
g = sys.stdout
indir = sys.argv[1]
g.write('#ifndef NAVCOIN_CHAINPARAMSSEEDS_H\n')
g.write('#define NAVCOIN_CHAINPARAMSSEEDS_H\n')
g.write('/**\n')
g.write(' * List of fixed seed nodes for the navcoin network\n')
g.write(' * AUTOGENERATED by contrib/seeds/generate-seeds.py\n')
g.write(' *\n')
g.write(' * Each line contains a 16-byte IPv6 address and a port.\n')
g.write(' * IPv4 as well as onion addresses are wrapped inside a IPv6 address accordingly.\n')
g.write(' */\n')
with open(os.path.join(indir,'nodes_main.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_main', 44440)
g.write('\n')
with open(os.path.join(indir,'nodes_test.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_test', 15556)
g.write('\n')
with open(os.path.join(indir,'nodes_dev.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_dev', 18886)
g.write('#endif // NAVCOIN_CHAINPARAMSSEEDS_H\n')
if __name__ == '__main__':
main()
|
navcoindev/navcoin-core
|
contrib/seeds/generate-seeds.py
|
Python
|
mit
| 4,508
|
"""
# The NASA93 Data Set
Standard header:
"""
from __future__ import division,print_function
import sys
sys.dont_write_bytecode = True
from lib import *
"""
Data:
Possible Splits= ["variance", "centroid", "median"]
"""
def nasa93(weighFeature = False, split = "median"):
vl=1;l=2;n=3;h=4;vh=5;xh=6;_=0
return data(indep= [
# 0..8
'Prec', 'Flex', 'Resl', 'Team', 'Pmat', 'rely', 'data', 'cplx', 'ruse',
# 9 .. 17
'docu', 'time', 'stor', 'pvol', 'acap', 'pcap', 'pcon', 'aexp', 'plex',
# 18 .. 25
'ltex', 'tool', 'site', 'sced', 'kloc'],
less = ['effort', 'defects', 'months'],
_rows = [
[h,h,h,vh,h,h,l,h,n,n,n,n,l,n,n,n,n,n,h,n,n,l,25.9,117.6,808,15.3],
[h,h,h,vh,h,h,l,h,n,n,n,n,l,n,n,n,n,n,h,n,n,l,24.6,117.6,767,15.0],
[h,h,h,vh,h,h,l,h,n,n,n,n,l,n,n,n,n,n,h,n,n,l,7.7,31.2,240,10.1],
[h,h,h,vh,h,h,l,h,n,n,n,n,l,n,n,n,n,n,h,n,n,l,8.2,36,256,10.4],
[h,h,h,vh,h,h,l,h,n,n,n,n,l,n,n,n,n,n,h,n,n,l,9.7,25.2,302,11.0],
[h,h,h,vh,h,h,l,h,n,n,n,n,l,n,n,n,n,n,h,n,n,l,2.2,8.4,69,6.6],
[h,h,h,vh,h,h,l,h,n,n,n,n,l,n,n,n,n,n,h,n,n,l,3.5,10.8,109,7.8],
[h,h,h,vh,h,h,l,h,n,n,n,n,l,n,n,n,n,n,h,n,n,l,66.6,352.8,2077,21.0],
[h,h,h,vh,h,h,l,h,n,n,xh,xh,l,h,h,n,h,n,h,h,n,n,7.5,72,226,13.6],
[h,h,h,vh,n,n,l,h,n,n,n,n,l,h,vh,n,vh,n,h,n,n,n,20,72,566,14.4],
[h,h,h,vh,n,n,l,h,n,n,n,n,l,h,h,n,vh,n,h,n,n,n,6,24,188,9.9],
[h,h,h,vh,n,n,l,h,n,n,n,n,l,h,vh,n,vh,n,h,n,n,n,100,360,2832,25.2],
[h,h,h,vh,n,n,l,h,n,n,n,n,l,h,n,n,vh,n,l,n,n,n,11.3,36,456,12.8],
[h,h,h,vh,n,n,l,h,n,n,n,n,h,h,h,n,h,l,vl,n,n,n,100,215,5434,30.1],
[h,h,h,vh,n,n,l,h,n,n,n,n,l,h,h,n,vh,n,h,n,n,n,20,48,626,15.1],
[h,h,h,vh,n,n,l,h,n,n,n,n,l,h,n,n,n,n,vl,n,n,n,100,360,4342,28.0],
[h,h,h,vh,n,n,l,h,n,n,n,xh,l,h,vh,n,vh,n,h,n,n,n,150,324,4868,32.5],
[h,h,h,vh,n,n,l,h,n,n,n,n,l,h,h,n,h,n,h,n,n,n,31.5,60,986,17.6],
[h,h,h,vh,n,n,l,h,n,n,n,n,l,h,h,n,vh,n,h,n,n,n,15,48,470,13.6],
[h,h,h,vh,n,n,l,h,n,n,n,xh,l,h,n,n,h,n,h,n,n,n,32.5,60,1276,20.8],
[h,h,h,vh,h,h,l,h,n,n,n,n,l,n,n,n,n,n,h,n,n,l,19.7,60,614,13.9],
[h,h,h,vh,h,h,l,h,n,n,n,n,l,n,n,n,n,n,h,n,n,l,66.6,300,2077,21.0],
[h,h,h,vh,h,h,l,h,n,n,n,n,l,n,n,n,n,n,h,n,n,l,29.5,120,920,16.0],
[h,h,h,vh,n,h,n,n,n,n,h,n,n,n,h,n,h,n,n,n,n,n,15,90,575,15.2],
[h,h,h,vh,n,h,n,h,n,n,n,n,n,n,h,n,h,n,n,n,n,n,38,210,1553,21.3],
[h,h,h,vh,n,n,n,n,n,n,n,n,n,n,h,n,h,n,n,n,n,n,10,48,427,12.4],
[h,h,h,vh,h,n,vh,h,n,n,vh,vh,l,vh,n,n,h,l,h,n,n,l,15.4,70,765,14.5],
[h,h,h,vh,h,n,vh,h,n,n,vh,vh,l,vh,n,n,h,l,h,n,n,l,48.5,239,2409,21.4],
[h,h,h,vh,h,n,vh,h,n,n,vh,vh,l,vh,n,n,h,l,h,n,n,l,16.3,82,810,14.8],
[h,h,h,vh,h,n,vh,h,n,n,vh,vh,l,vh,n,n,h,l,h,n,n,l,12.8,62,636,13.6],
[h,h,h,vh,h,n,vh,h,n,n,vh,vh,l,vh,n,n,h,l,h,n,n,l,32.6,170,1619,18.7],
[h,h,h,vh,h,n,vh,h,n,n,vh,vh,l,vh,n,n,h,l,h,n,n,l,35.5,192,1763,19.3],
[h,h,h,vh,h,h,l,h,n,n,n,n,l,n,n,n,n,n,h,n,n,l,5.5,18,172,9.1],
[h,h,h,vh,h,h,l,h,n,n,n,n,l,n,n,n,n,n,h,n,n,l,10.4,50,324,11.2],
[h,h,h,vh,h,h,l,h,n,n,n,n,l,n,n,n,n,n,h,n,n,l,14,60,437,12.4],
[h,h,h,vh,n,h,n,h,n,n,n,n,n,n,n,n,n,n,n,n,n,n,6.5,42,290,12.0],
[h,h,h,vh,n,n,n,h,n,n,n,n,n,n,n,n,n,n,n,n,n,n,13,60,683,14.8],
[h,h,h,vh,h,n,n,h,n,n,n,n,n,n,h,n,n,n,h,h,n,n,90,444,3343,26.7],
[h,h,h,vh,n,n,n,h,n,n,n,n,n,n,n,n,n,n,n,n,n,n,8,42,420,12.5],
[h,h,h,vh,n,n,n,h,n,n,h,n,n,n,n,n,n,n,n,n,n,n,16,114,887,16.4],
[h,h,h,vh,h,n,h,h,n,n,vh,h,l,h,h,n,n,l,h,n,n,l,177.9,1248,7998,31.5],
[h,h,h,vh,h,h,l,h,n,n,n,n,l,n,h,n,n,n,n,n,n,n,302,2400,8543,38.4],
[h,h,h,vh,h,n,h,l,n,n,n,n,h,h,n,n,h,n,n,h,n,n,282.1,1368,9820,37.3],
[h,h,h,vh,h,h,h,l,n,n,n,n,n,h,n,n,h,n,n,n,n,n,284.7,973,8518,38.1],
[h,h,h,vh,n,h,h,n,n,n,n,n,l,n,h,n,h,n,h,n,n,n,79,400,2327,26.9],
[h,h,h,vh,l,l,n,n,n,n,n,n,l,h,vh,n,h,n,h,n,n,n,423,2400,18447,41.9],
[h,h,h,vh,h,n,n,n,n,n,n,n,l,h,vh,n,vh,l,h,n,n,n,190,420,5092,30.3],
[h,h,h,vh,h,n,n,h,n,n,n,h,n,h,n,n,h,n,h,n,n,n,47.5,252,2007,22.3],
[h,h,h,vh,l,vh,n,xh,n,n,h,h,l,n,n,n,h,n,n,h,n,n,21,107,1058,21.3],
[h,h,h,vh,l,n,h,h,n,n,vh,n,n,h,h,n,h,n,h,n,n,n,78,571.4,4815,30.5],
[h,h,h,vh,l,n,h,h,n,n,vh,n,n,h,h,n,h,n,h,n,n,n,11.4,98.8,704,15.5],
[h,h,h,vh,l,n,h,h,n,n,vh,n,n,h,h,n,h,n,h,n,n,n,19.3,155,1191,18.6],
[h,h,h,vh,l,h,n,vh,n,n,h,h,l,h,n,n,n,h,h,n,n,n,101,750,4840,32.4],
[h,h,h,vh,l,h,n,h,n,n,h,h,l,n,n,n,h,n,n,n,n,n,219,2120,11761,42.8],
[h,h,h,vh,l,h,n,h,n,n,h,h,l,n,n,n,h,n,n,n,n,n,50,370,2685,25.4],
[h,h,h,vh,h,vh,h,h,n,n,vh,vh,n,vh,vh,n,vh,n,h,h,n,l,227,1181,6293,33.8],
[h,h,h,vh,h,n,h,vh,n,n,n,n,l,h,vh,n,n,l,n,n,n,l,70,278,2950,20.2],
[h,h,h,vh,h,h,l,h,n,n,n,n,l,n,n,n,n,n,h,n,n,l,0.9,8.4,28,4.9],
[h,h,h,vh,l,vh,l,xh,n,n,xh,vh,l,h,h,n,vh,vl,h,n,n,n,980,4560,50961,96.4],
[h,h,h,vh,n,n,l,h,n,n,n,n,l,vh,vh,n,n,h,h,n,n,n,350,720,8547,35.7],
[h,h,h,vh,h,h,n,xh,n,n,h,h,l,h,n,n,n,h,h,h,n,n,70,458,2404,27.5],
[h,h,h,vh,h,h,n,xh,n,n,h,h,l,h,n,n,n,h,h,h,n,n,271,2460,9308,43.4],
[h,h,h,vh,n,n,n,n,n,n,n,n,l,h,h,n,h,n,h,n,n,n,90,162,2743,25.0],
[h,h,h,vh,n,n,n,n,n,n,n,n,l,h,h,n,h,n,h,n,n,n,40,150,1219,18.9],
[h,h,h,vh,n,h,n,h,n,n,h,n,l,h,h,n,h,n,h,n,n,n,137,636,4210,32.2],
[h,h,h,vh,n,h,n,h,n,n,h,n,h,h,h,n,h,n,h,n,n,n,150,882,5848,36.2],
[h,h,h,vh,n,vh,n,h,n,n,h,n,l,h,h,n,h,n,h,n,n,n,339,444,8477,45.9],
[h,h,h,vh,n,l,h,l,n,n,n,n,h,h,h,n,h,n,h,n,n,n,240,192,10313,37.1],
[h,h,h,vh,l,h,n,h,n,n,n,vh,l,h,h,n,h,h,h,n,n,l,144,576,6129,28.8],
[h,h,h,vh,l,n,l,n,n,n,n,vh,l,h,h,n,h,h,h,n,n,l,151,432,6136,26.2],
[h,h,h,vh,l,n,l,h,n,n,n,vh,l,h,h,n,h,h,h,n,n,l,34,72,1555,16.2],
[h,h,h,vh,l,n,n,h,n,n,n,vh,l,h,h,n,h,h,h,n,n,l,98,300,4907,24.4],
[h,h,h,vh,l,n,n,h,n,n,n,vh,l,h,h,n,h,h,h,n,n,l,85,300,4256,23.2],
[h,h,h,vh,l,n,l,n,n,n,n,vh,l,h,h,n,h,h,h,n,n,l,20,240,813,12.8],
[h,h,h,vh,l,n,l,n,n,n,n,vh,l,h,h,n,h,h,h,n,n,l,111,600,4511,23.5],
[h,h,h,vh,l,h,vh,h,n,n,n,vh,l,h,h,n,h,h,h,n,n,l,162,756,7553,32.4],
[h,h,h,vh,l,h,h,vh,n,n,n,vh,l,h,h,n,h,h,h,n,n,l,352,1200,17597,42.9],
[h,h,h,vh,l,h,n,vh,n,n,n,vh,l,h,h,n,h,h,h,n,n,l,165,97,7867,31.5],
[h,h,h,vh,h,h,n,vh,n,n,h,h,l,h,n,n,n,h,h,n,n,n,60,409,2004,24.9],
[h,h,h,vh,h,h,n,vh,n,n,h,h,l,h,n,n,n,h,h,n,n,n,100,703,3340,29.6],
[h,h,h,vh,n,h,vh,vh,n,n,xh,xh,h,n,n,n,n,l,l,n,n,n,32,1350,2984,33.6],
[h,h,h,vh,h,h,h,h,n,n,vh,xh,h,h,h,n,h,h,h,n,n,n,53,480,2227,28.8],
[h,h,h,vh,h,h,l,vh,n,n,vh,xh,l,vh,vh,n,vh,vl,vl,h,n,n,41,599,1594,23.0],
[h,h,h,vh,h,h,l,vh,n,n,vh,xh,l,vh,vh,n,vh,vl,vl,h,n,n,24,430,933,19.2],
[h,h,h,vh,h,vh,h,vh,n,n,xh,xh,n,h,h,n,h,h,h,n,n,n,165,4178.2,6266,47.3],
[h,h,h,vh,h,vh,h,vh,n,n,xh,xh,n,h,h,n,h,h,h,n,n,n,65,1772.5,2468,34.5],
[h,h,h,vh,h,vh,h,vh,n,n,xh,xh,n,h,h,n,h,h,h,n,n,n,70,1645.9,2658,35.4],
[h,h,h,vh,h,vh,h,xh,n,n,xh,xh,n,h,h,n,h,h,h,n,n,n,50,1924.5,2102,34.2],
[h,h,h,vh,l,vh,l,vh,n,n,vh,xh,l,h,n,n,l,vl,l,h,n,n,7.25,648,406,15.6],
[h,h,h,vh,h,vh,h,vh,n,n,xh,xh,n,h,h,n,h,h,h,n,n,n,233,8211,8848,53.1],
[h,h,h,vh,n,h,n,vh,n,n,vh,vh,h,n,n,n,n,l,l,n,n,n,16.3,480,1253,21.5],
[h,h,h,vh,n,h,n,vh,n,n,vh,vh,h,n,n,n,n,l,l,n,n,n, 6.2, 12,477,15.4],
[h,h,h,vh,n,h,n,vh,n,n,vh,vh,h,n,n,n,n,l,l,n,n,n, 3.0, 38,231,12.0],
],
_tunings =[[
# vlow low nom high vhigh xhigh
#scale factors:
'Prec', 6.20, 4.96, 3.72, 2.48, 1.24, _ ],[
'Flex', 5.07, 4.05, 3.04, 2.03, 1.01, _ ],[
'Resl', 7.07, 5.65, 4.24, 2.83, 1.41, _ ],[
'Pmat', 7.80, 6.24, 4.68, 3.12, 1.56, _ ],[
'Team', 5.48, 4.38, 3.29, 2.19, 1.01, _ ]],
weighFeature = weighFeature,
_split = split,
_dataTypes = [int]*22 + [float]*4,
ignores=[22],
is_continuous=[False]*22 + [True]
)
"""
Demo code:
"""
def _nasa93(): print(nasa93())
#if __name__ == '__main__': eval(todo('_nasa93()'))
|
ai-se/george
|
Models/nasa93.py
|
Python
|
mit
| 8,047
|
import os
import sys
from pathlib import Path
from pkg_resources import VersionConflict, require
from setuptools import find_packages, setup
with open("README.md") as f:
long_description = f.read()
with open("requirements.txt") as f:
requirements = f.read().splitlines()
try:
require("setuptools>=38.3")
except VersionConflict:
print("Error: version of setuptools is too old (<38.3)!")
sys.exit(1)
ROOT_DIR = Path(__file__).parent.resolve()
# Creating the version file
with open("version.txt") as f:
version = f.read()
version = version.strip()
sha = "Unknown"
if os.getenv("BUILD_VERSION"):
version = os.getenv("BUILD_VERSION")
elif sha != "Unknown":
version += "+" + sha[:7]
print("-- Building version " + version)
version_path = ROOT_DIR / "pyannote" / "audio" / "version.py"
with open(version_path, "w") as f:
f.write("__version__ = '{}'\n".format(version))
if __name__ == "__main__":
setup(
name="pyannote.audio",
namespace_packages=["pyannote"],
version=version,
packages=find_packages(),
install_requires=requirements,
description="Neural building blocks for speaker diarization",
long_description=long_description,
long_description_content_type="text/markdown",
author="Hervé Bredin",
author_email="herve.bredin@irit.fr",
url="https://github.com/pyannote/pyannote-audio",
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Topic :: Scientific/Engineering",
],
)
|
pyannote/pyannote-audio
|
setup.py
|
Python
|
mit
| 1,809
|
# -*- coding: UTF-8 -*-
# Copyright 2017 Luc Saffre
# License: BSD (see file COPYING for details)
from .demo import *
SITE = Site(globals())
# SITE = Site(
# globals(),
# remote_user_header='REMOTE_USER')
DEBUG = True
# SITE.appy_params.update(raiseOnError=True)
# SITE.appy_params.update(pythonWithUnoPath='/usr/bin/python3')
# SITE.default_build_method = "appyodt"
# SITE.webdav_url = '/'
|
khchine5/book
|
lino_book/projects/adg/settings/doctests.py
|
Python
|
bsd-2-clause
| 400
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright © 2010 University of Zürich
# Author: Rico Sennrich <sennrich@cl.uzh.ch>
# For licensing information, see LICENSE
from __future__ import division,print_function,unicode_literals
import sys
import time
import math
from operator import itemgetter
from bleualign.gale_church import align_texts
import bleualign.score as bleu
from bleualign.utils import evaluate, finalevaluation
import io
import platform
if sys.version_info >= (2,6) and platform.system() != "Windows":
import multiprocessing
multiprocessing_enabled = 1
else:
multiprocessing_enabled = 0
def collect_article(src,srctotarget,target,targettosrc,options):
EOF = False
while not EOF:
all_texts = []
all_translations = []
for text,translations in [(src,srctotarget),(target,targettosrc)]:
textlist = []
translist = [[] for i in translations]
for line in text:
if line.rstrip() == options['end_of_article_marker']:
for f in translations:
f.readline()
break
for i,f in enumerate(translations):
translist[i].append(f.readline().rstrip())
if options['factored']:
rawline = ' '.join(word.split('|')[0] for word in line.split())
textlist.append((rawline,line.rstrip()))
else:
textlist.append(line.rstrip())
else:
EOF = True
all_texts.append(textlist)
all_translations.append(translist)
sourcelist, targetlist = all_texts
translist1, translist2 = all_translations
yield sourcelist,targetlist,translist1,translist2
#takes a queue as argument and puts all articles to be aligned in it.
#best call this in a separate process because we limit the queue size for memory reasons
def tasks_producer(tasks,num_tasks,data,num_processes):
for i,task in enumerate(collect_article(*data)):
num_tasks.value += 1
tasks.put((i,task),True)
#poison pills
for i in range(num_processes):
tasks.put((None,None))
num_tasks.value -= 1 # only if this point is reached, process finishes when all tasks are done.
class Aligner:
default_options = {
#source and target files needed by Aligner
#they can be filenames, arrays of strings or io objects.
'srcfile':None, 'targetfile': None,
#the format of srcfile and targetfile
#False for normal text, True for 'text | other information', seprating by '|'
'factored': False,
#translations of srcfile and targetfile, not influenced by 'factored'
#they can be filenames, arrays of strings or io objects, too.
'srctotarget': [], 'targettosrc': [],
#run aligner without srctotarget and targettosrc
'no_translation_override':False,
#only consider target sentences for bleu-based alignment that are among top N alternatives for a given source sentence
'maxalternatives':3,
#bleu scoring algorithm works with 4-grams by default. We got better results when using 2-grams (since there are less 0 scores then)
'bleu_ngrams' : 2,
#BLEU is word-based by default, but character-level BLEU is more suitable for some languages, e.g. continuous script languages without space.
#it is a good idea to also increase bleu_ngrams when switching to character-level BLEU
'bleu_charlevel' : False,
#consider N to 1 (and 1 to N) alignment in gapfilling (complexity is size_of_gap*value^2, so don't turn this unnecessarily high)
#also, there are potential precision issues.
#set to 1 to disable bleu-based 1 to N alignments and let gale & church fill the gaps
'Nto1' : 2,
#do only gale-church, no bleualign
'galechurch': None,
#gapfillheuristics: what to do with sentences that aren't aligned one-to-one by the first BLEU pass, nor have a 1 to N alignment validated by BLEU?
#possible members are: bleu1to1, galechurch
#what they do is commented in the source code
'gapfillheuristics' : ["bleu1to1","galechurch"],
#defines string that identifies hard boundaries (articles, chapters etc.)
#string needs to be on a line of its own (see examples in eval directory)
#must be reliable (article i in the source text needs to correspond to article i in the target text)
'end_of_article_marker' : ".EOA",
#filtering out bad alignments by bleuscore
#filter has sentences or articles type
#filterthreshold means choosing the best X% of alignments (according to BLEU)
#bleuthreshold requires a sentence pair to achieve a certain BLEU score to be included in the output
#set filterlang True, whose when you want to filter alignemts which src is similar to target than translation
'filter': None, 'filterthreshold': 90, 'bleuthreshold': 0, 'filterlang': None,
#it will print unalignemt pair(zero to one or one to zero pair)
'printempty': False,
#setting output for four output filenames, it will add suffixes automatically
#or passing filenames or io object for them in respectly.
#if not passing anything or assigning None, they will use StringIO to save results.
'output': None,
'output-src': None, 'output-target': None,
'output-src-bad': None, 'output-target-bad': None,
#the best alignment of corpus for evaluation
'eval': None,
#defines amount of debugging output.
'verbosity': 1, 'log_to':sys.stdout,
#number of parallel processes
'num_processes': 1
}
def __init__(self,options):
self.src, self.target = None,None
self.srctotarget, self.targettosrc= [],[]
self.out1, self.out2, self.out_bad1, self.out_bad2 = None,None,None,None
self.sources_out,self.targets_out = [],[]
self.finalbleu = []
self.bleualign = []
self.close_src, self.close_target = False, False
self.close_srctotarget, self.close_targettosrc = [], []
self.close_out1, self.close_out2 = False, False
self.close_out_bad1, self.close_out_bad2 = False, False
self.options = self.default_options.copy()
self.options.update(options)
if not self.options['srcfile']:
raise ValueError('Source file not specified.')
if not self.options['targetfile']:
raise ValueError('Target file not specified.')
if not self.options['srctotarget'] and not self.options['targettosrc']\
and not self.options['no_translation_override']:
raise ValueError("ERROR: no translation available: BLEU scores can be computed between the source and target text, but this is not the intended usage of Bleualign and may result in poor performance! If you're *really* sure that this is what you want, set 'galechurch' for the options.")
self.src, self.close_src = \
self._inputObjectFromParameter(self.options['srcfile'])
self.target, self.close_target = \
self._inputObjectFromParameter(self.options['targetfile'])
for f in self.options['srctotarget']:
obj, close_obj = \
self._inputObjectFromParameter(f)
self.srctotarget.append(obj)
self.close_srctotarget.append(close_obj)
for f in self.options['targettosrc']:
obj, close_obj = \
self._inputObjectFromParameter(f)
self.targettosrc.append(obj)
self.close_targettosrc.append(close_obj)
self.out1,self.close_out1=self._outputObjectFromParameter(
self.options['output-src'], self.options['output'], '-s')
self.out2,self.close_out2=self._outputObjectFromParameter(
self.options['output-target'], self.options['output'], '-t')
if self.options['filter']:
self.out_bad1,self.close_out_bad1=self._outputObjectFromParameter(
self.options['output-src-bad'], self.options['output'], '-bad-s')
self.out_bad2,self.close_out_bad2=self._outputObjectFromParameter(
self.options['output-target-bad'], self.options['output'], '-bad-t')
# for passing by string array
def _stringArray2stringIo(self, stringArray):
return io.StringIO('\n'.join([line.rstrip() for line in stringArray]))
# parameter may be filename, IO object or string array
def _inputObjectFromParameter(self, parameter):
try:
inputObject = io.open(parameter, 'r', encoding='UTF-8')
close_object = True
except:
if isinstance(parameter, io.TextIOBase):
inputObject = parameter
else:
inputObject = self._stringArray2stringIo(parameter)
close_object = False
return inputObject, close_object
# parameter may be filename, IO object or string array
def _outputObjectFromParameter(self, parameter, filename, suffix):
close_object = False
if parameter:
try:
outputObject = io.open(parameter, 'w', encoding='UTF-8')
close_object = True
except:
outputObject = parameter
elif filename:
outputObject = io.open(filename + suffix, 'w', encoding='UTF-8')
else:
outputObject = io.StringIO()
return outputObject, close_object
#takes care of multiprocessing; calls process() function for each article
def mainloop(self):
results = {}
if multiprocessing_enabled and self.options['num_processes'] > 1:
tasks = multiprocessing.Queue(self.options['num_processes']+1)
manager = multiprocessing.Manager()
scores = manager.dict()
num_tasks = manager.Value('i',1)
scorers = [AlignMultiprocessed(tasks,self.options,scores,self.log) for i in range(self.options['num_processes'])]
for p in scorers:
p.start()
#this function produces the alignment tasks for the consumers in scorers
producer = multiprocessing.Process(target=tasks_producer,args=(tasks,num_tasks,(self.src,self.srctotarget,self.target,self.targettosrc,self.options),self.options['num_processes']))
producer.start()
i = 0
#get results from processed and call printout function
while i < num_tasks.value:
#wait till result #i is populated
while True:
try:
data,multialign,bleualign,scoredict = scores[i]
break
except:
time.sleep(0.1)
for p in scorers:
if p.exitcode == 1:
for p in scorers:
p.terminate()
producer.terminate()
raise RuntimeError("Multiprocessing error")
continue
(sourcelist,targetlist,translist1,translist2) = data
self.scoredict = scoredict
self.multialign = multialign
self.bleualign = bleualign
#normal case: translation from source to target exists
if translist1:
translist = translist1[0]
#no translation provided. we copy source sentences for further processing
else:
if self.options['factored']:
translist = [item[0] for item in sourcelist]
else:
translist = sourcelist
self.printout(sourcelist, translist, targetlist)
if self.options['eval']:
self.log('evaluation ' + str(i))
results[i] = evaluate(self.options,self.multialign,self.options['eval'][i],self.log)
del(scores[i])
i += 1
else:
for i,(sourcelist,targetlist,translist1,translist2) in enumerate(collect_article(self.src,self.srctotarget,self.target,self.targettosrc,self.options)):
self.log('reading in article ' + str(i) + ': ',1)
self.multialign = self.process(sourcelist,targetlist,translist1,translist2)
if translist1:
translist = translist1[0]
else:
if self.options['factored']:
translist = [item[0] for item in sourcelist]
else:
translist = sourcelist
self.printout(sourcelist, translist, targetlist)
if self.options['eval']:
self.log('evaluation ' + str(i))
results[i] = evaluate(self.options, self.multialign,self.options['eval'][i],self.log)
if self.out1:
self.out1.flush()
if self.out2:
self.out2.flush()
if self.options['eval']:
finalevaluation(results, self.log)
if self.options['filter']:
self.write_filtered()
self.close_file_streams()
return self.out1,self.out2
#results of alignment or good aligment if filtering
def results(self):
return self.out1,self.out2
#bad aligment for filtering. Otherwise, None
def results_bad(self):
return self.out_bad1,self.out_bad2
#Start different alignment runs depending on which and how many translations are sent to program; intersect results.
def process(self,sourcelist,targetlist,translist1,translist2):
multialign = []
phase1 = []
phase2 = []
#do nothing if last line in file is .EOA or file is empty.
if not targetlist or not sourcelist:
self.log('WARNING: article is empty. Skipping.',0)
return []
self.log('processing',1)
if self.options['factored']:
raw_sourcelist = [item[0] for item in sourcelist]
raw_targetlist = [item[0] for item in targetlist]
else:
raw_sourcelist = sourcelist
raw_targetlist = targetlist
for i,translist in enumerate(translist1):
self.log("computing alignment between srctotarget (file " + str(i) + ") and target text",1)
phase1.append(self.align(translist, raw_targetlist))
for i,translist in enumerate(translist2):
self.log("computing alignment between targettosrc (file " + str(i) + ") and source text",1)
phase2.append(self.align(translist, raw_sourcelist))
if not (translist1 or translist2):
if self.options['no_translation_override'] or self.options['galechurch']:
phase1 = [self.align(raw_sourcelist, raw_targetlist)]
else:
self.log("ERROR: no translation available", 1)
if multiprocessing_enabled and self.options['num_processes'] > 1:
sys.exit(1)
else:
raise RuntimeError("ERROR: no translation available")
if len(phase1) > 1:
self.log("intersecting all srctotarget alignments",1)
phase1 = sorted(set(phase1[0]).intersection(*[set(x) for x in phase1[1:]]))
elif phase1:
phase1 = phase1[0]
if len(phase2) > 1:
self.log("intersecting all targettosrc alignments",1)
phase2 = sorted(set(phase2[0]).intersection(*[set(x) for x in phase2[1:]]))
elif phase2:
phase2 = phase2[0]
if phase1 and phase2:
self.log("intersecting both directions",1)
phase3 = []
phase2mirror = [(j,k) for ((k,j),t) in phase2]
for pair,t in phase1:
if pair in phase2mirror:
phase3.append((pair,'INTERSECT: ' + t + ' - ' + phase2[phase2mirror.index(pair)][1]))
multialign = phase3
elif phase1:
multialign = phase1
elif phase2:
multialign = [((j,k),t) for ((k,j),t) in phase2]
return multialign
#Compute alignment for one article and one automatic translation.
def align(self, translist, targetlist):
if self.options["galechurch"]:
self.multialign,self.bleualign,self.scoredict = [],[],{}
translist = [item for item in enumerate(translist)]
targetlist = [item for item in enumerate(targetlist)]
churchaligns = self.gale_church(translist,targetlist)
for src,target in churchaligns:
self.addtoAlignments((src,target),'GALECHURCH')
return self.multialign
else:
self.log('Evaluating sentences with bleu',1)
self.scoredict = self.eval_sents(translist,targetlist)
self.log('finished',1)
self.log('searching for longest path of good alignments',1)
self.pathfinder(translist, targetlist)
self.log('finished',1)
self.log(time.asctime(),2)
self.log('filling gaps',1)
self.gapfinder(translist, targetlist)
self.log('finished',1)
self.log(time.asctime(),2)
return self.multialign
#use this if you want to implement your own similarity score
def eval_sents_dummy(self,translist,targetlist):
scoredict = {}
for testID,testSent in enumerate(translist):
scores = []
for refID,refSent in enumerate(targetlist):
score = 100-abs(len(testSent)-len(refSent)) #replace this with your own similarity score
if score > 0:
scores.append((score,refID,score))
scoredict[testID] = sorted(scores,key=itemgetter(0),reverse=True)[:self.options['maxalternatives']]
return scoredict
# given list of test sentences and list of reference sentences, calculate bleu scores
#if you want to replace bleu with your own similarity measure, use eval_sents_dummy
def eval_sents(self,translist,targetlist):
scoredict = {}
cooked_test = {}
cooked_test2 = {}
ngrams = self.options['bleu_ngrams']
charlevel = self.options['bleu_charlevel']
cooktarget_cache = {}
cooktarget = []
for idx, item in enumerate(targetlist):
if charlevel:
item = tuple(item)
if item in cooktarget_cache:
cooktarget.append((idx, cooktarget_cache[item]))
else:
cooked = (idx, bleu.cook_ref_set(item, ngrams))
cooktarget.append(cooked)
cooktarget_cache[item] = cooked[1]
for testID,testSent in enumerate(translist):
if charlevel:
testSent = tuple(testSent)
#copied over from bleu.py to minimize redundancy
test_normalized = bleu.normalize(testSent)
cooked_test["testlen"] = len(test_normalized)
cooked_test["guess"] = [max(len(test_normalized)-k+1,0) for k in range(1,self.options['bleu_ngrams']+1)]
counts = bleu.count_ngrams(test_normalized, self.options['bleu_ngrams'])
#separate by n-gram length. if we have no matching bigrams, we don't have to compare unigrams
ngrams_sorted = dict([(x,set()) for x in range(self.options['bleu_ngrams'])])
for ngram in counts:
ngrams_sorted[len(ngram)-1].add(ngram)
scorelist = []
scorelist_cache = {}
for (refID,(reflen, refmaxcounts, refset)) in cooktarget:
if refset in scorelist_cache:
if scorelist_cache[refset] is not None:
m, c = scorelist_cache[refset]
scorelist.append((m, refID, c))
continue
ngrams_filtered = ngrams_sorted[self.options['bleu_ngrams']-1].intersection(refset)
if ngrams_filtered:
cooked_test["reflen"] = reflen
cooked_test['correct'] = [0]*self.options['bleu_ngrams']
for ngram in ngrams_filtered:
cooked_test["correct"][self.options['bleu_ngrams']-1] += min(refmaxcounts[ngram], counts[ngram])
for order in range(self.options['bleu_ngrams']-1):
for ngram in ngrams_sorted[order].intersection(refset):
cooked_test["correct"][order] += min(refmaxcounts[ngram], counts[ngram])
#copied over from bleu.py to minimize redundancy
logbleu = 0.0
for k in range(self.options['bleu_ngrams']):
logbleu += math.log(cooked_test['correct'][k])-math.log(cooked_test['guess'][k])
logbleu /= self.options['bleu_ngrams']
logbleu += min(0,1-float(cooked_test['reflen'])/cooked_test['testlen'])
score = math.exp(logbleu)
if score > 0:
#calculate bleu score in reverse direction
cooked_test2["guess"] = [max(cooked_test['reflen']-k+1,0) for k in range(1,self.options['bleu_ngrams']+1)]
logbleu = 0.0
for k in range(self.options['bleu_ngrams']):
logbleu += math.log(cooked_test['correct'][k])-math.log(cooked_test2['guess'][k])
logbleu /= self.options['bleu_ngrams']
logbleu += min(0,1-float(cooked_test['testlen'])/cooked_test['reflen'])
score2 = math.exp(logbleu)
meanscore = (2*score*score2)/(score+score2)
scorelist.append((meanscore,refID,cooked_test['correct']))
scorelist_cache[refset] = (meanscore, cooked_test['correct'])
else:
scorelist_cache[refset] = None
else:
scorelist_cache[refset] = None
scoredict[testID] = sorted(scorelist,key=itemgetter(0),reverse=True)[:self.options['maxalternatives']]
return scoredict
#follow the backpointers in score matrix to extract best path of 1-to-1 alignments
def extract_best_path(self,pointers):
i = len(pointers)-1
j = len(pointers[0])-1
pointer = ''
best_path = []
while i >= 0 and j >= 0:
pointer = pointers[i][j]
if pointer == '^':
i -= 1
elif pointer == '<':
j -= 1
elif pointer == 'match':
best_path.append((i,j))
i -= 1
j -= 1
best_path.reverse()
return best_path
#dynamic programming search for best path of alignments (maximal score)
def pathfinder(self, translist, targetlist):
# add an extra row/column to the matrix and start filling it from 1,1 (to avoid exceptions for first row/column)
matrix = [[0 for column in range(len(targetlist)+1)] for row in range(len(translist)+1)]
pointers = [['' for column in range(len(targetlist))] for row in range(len(translist))]
for i in range(len(translist)):
alignments = dict([(target, score) for (score, target, correct) in self.scoredict[i]])
for j in range(len(targetlist)):
best_score = matrix[i][j+1]
best_pointer = '^'
score = matrix[i+1][j]
if score > best_score:
best_score = score
best_pointer = '<'
if j in alignments:
score = alignments[j] + matrix[i][j]
if score > best_score:
best_score = score
best_pointer = 'match'
matrix[i+1][j+1] = best_score
pointers[i][j] = best_pointer
self.bleualign = self.extract_best_path(pointers)
#find unaligned sentences and create work packets for gapfiller()
#gapfiller() takes two sentence pairs and all unaligned sentences in between as arguments; gapfinder() extracts these.
def gapfinder(self, translist, targetlist):
self.multialign = []
#find gaps: lastpair is considered pre-gap, pair is post-gap
lastpair = ((),())
src, target = None, None
for src,target in self.bleualign:
oldsrc, oldtarget = lastpair
#in first iteration, gap will start at 0
if not oldsrc:
oldsrc = (-1,)
if not oldtarget:
oldtarget = (-1,)
#identify gap sizes
sourcegap = list(range(oldsrc[-1]+1,src))
targetgap = list(range(oldtarget[-1]+1,target))
if targetgap or sourcegap:
lastpair = self.gapfiller(sourcegap, targetgap, lastpair, ((src,),(target,)), translist, targetlist)
else:
self.addtoAlignments(lastpair)
lastpair = ((src,),(target,))
#if self.bleualign is empty, gap will start at 0
if src is None:
src = -1
if target is None:
target = -1
#search for gap after last alignment pair
sourcegap = list(range(src+1, len(translist)))
targetgap = list(range(target+1, len(targetlist)))
if targetgap or sourcegap:
lastpair = self.gapfiller(sourcegap, targetgap, lastpair, ((),()), translist, targetlist)
self.addtoAlignments(lastpair)
#apply heuristics to align all sentences that remain unaligned after finding best path of 1-to-1 alignments
#heuristics include bleu-based 1-to-n alignment and length-based alignment
def gapfiller(self, sourcegap, targetgap, pregap, postgap, translist, targetlist):
evalsrc = []
evaltarget = []
#compile list of sentences in gap that will be considered for BLEU comparison
if self.options['Nto1'] > 1 or "bleu1to1" in self.options['gapfillheuristics']:
#concatenate all sentences in pregap alignment pair
tmpstr = ' '.join([translist[i] for i in pregap[0]])
evalsrc.append((pregap[0],tmpstr))
#concatenate all sentences in pregap alignment pair
tmpstr = ' '.join([targetlist[i] for i in pregap[1]])
evaltarget.append((pregap[1],tmpstr))
#search will be pruned to this window
if "bleu1to1" in self.options['gapfillheuristics']:
window = 10 + self.options['Nto1']
else:
window = self.options['Nto1']
for src in [j for i,j in enumerate(sourcegap) if (i < window or len(sourcegap)-i <= window)]:
Sent = translist[src]
evalsrc.append(((src,),Sent))
for target in [j for i,j in enumerate(targetgap) if (i < window or len(targetgap)-i <= window)]:
Sent = targetlist[target]
evaltarget.append(((target,),Sent))
#concatenate all sentences in postgap alignment pair
tmpstr = ' '.join([translist[i] for i in postgap[0]])
evalsrc.append((postgap[0],tmpstr))
#concatenate all sentences in postgap alignment pair
tmpstr = ' '.join([targetlist[i] for i in postgap[1]])
evaltarget.append((postgap[1],tmpstr))
nSrc = {}
for n in range(2,self.options['Nto1']+1):
nSrc[n] = self.createNSents(evalsrc,n)
for n in range(2,self.options['Nto1']+1):
evalsrc += nSrc[n]
nTar = {}
for n in range(2,self.options['Nto1']+1):
nTar[n] = self.createNSents(evaltarget,n)
for n in range(2,self.options['Nto1']+1):
evaltarget += nTar[n]
evalsrc_raw = [item[1] for item in evalsrc]
evaltarget_raw = [item[1] for item in evaltarget]
scoredict_raw = self.eval_sents(evalsrc_raw,evaltarget_raw)
scoredict = {}
for src,value in list(scoredict_raw.items()):
src = evalsrc[src][0]
if value:
newlist = []
for item in value:
score,target,score2 = item
target = evaltarget[target][0]
newlist.append((score,target,score2))
scoredict[src] = newlist
else:
scoredict[src] = []
while sourcegap or targetgap:
pregapsrc,pregaptarget = pregap
postgapsrc,postgaptarget = postgap
if sourcegap and self.options['Nto1'] > 1:
#try if concatenating source sentences together improves bleu score (beginning of gap)
if pregapsrc:
oldscore,oldtarget,oldcorrect = scoredict[pregapsrc][0]
combinedID = tuple(list(pregapsrc)+[sourcegap[0]])
if combinedID in scoredict:
newscore,newtarget,newcorrect = scoredict[combinedID][0]
if newscore > oldscore and newcorrect > oldcorrect and newtarget == pregaptarget:
#print('\nsource side: ' + str(combinedID) + ' better than ' + str(pregapsrc))
pregap = (combinedID,pregaptarget)
sourcegap.pop(0)
continue
#try if concatenating source sentences together improves bleu score (end of gap)
if postgapsrc:
oldscore,oldtarget,oldcorrect = scoredict[postgapsrc][0]
combinedID = tuple([sourcegap[-1]] + list(postgapsrc))
if combinedID in scoredict:
newscore,newtarget, newcorrect = scoredict[combinedID][0]
if newscore > oldscore and newcorrect > oldcorrect and newtarget == postgaptarget:
#print('\nsource side: ' + str(combinedID) + ' better than ' + str(postgapsrc))
postgap = (combinedID,postgaptarget)
sourcegap.pop()
continue
if targetgap and self.options['Nto1'] > 1:
#try if concatenating target sentences together improves bleu score (beginning of gap)
if pregapsrc:
newscore,newtarget,newcorrect = scoredict[pregapsrc][0]
if newtarget != pregaptarget and newtarget != postgaptarget:
#print('\ntarget side: ' + str(newtarget) + ' better than ' + str(pregaptarget))
pregap = (pregapsrc,newtarget)
for i in newtarget:
if i in targetgap:
del(targetgap[targetgap.index(i)])
continue
#try if concatenating target sentences together improves bleu score (end of gap)
if postgapsrc:
newscore,newtarget,newcorrect = scoredict[postgapsrc][0]
if newtarget != postgaptarget and newtarget != pregaptarget:
#print('\ntarget side: ' + str(newtarget) + ' better than ' + str(postgaptarget))
postgap = (postgapsrc,newtarget)
for i in newtarget:
if i in targetgap:
del(targetgap[targetgap.index(i)])
continue
#concatenation didn't help, and we still have possible one-to-one alignments
if sourcegap and targetgap:
#align first two sentences if BLEU validates this
if "bleu1to1" in self.options['gapfillheuristics']:
try:
besttarget = scoredict[(sourcegap[0],)][0][1]
except:
besttarget = 0
if besttarget == (targetgap[0],):
self.addtoAlignments(pregap)
#print('\none-to-one: ' + str((sourcegap[0],)) + ' to' + str((targetgap[0],)))
pregap = ((sourcegap[0],),besttarget)
del(sourcegap[0])
del(targetgap[0])
continue
#Alternative approach: use Gale & Church.
if "galechurch" in self.options['gapfillheuristics'] and (max(len(targetgap),len(sourcegap))<4 or max(len(targetgap),len(sourcegap))/min(len(targetgap),len(sourcegap)) < 2):
tempsrcgap = []
for src in sourcegap:
tempsrcgap.append((src,translist[src]))
temptargetgap = []
for target in targetgap:
temptargetgap.append((target,targetlist[target]))
churchaligns = self.gale_church(tempsrcgap,temptargetgap)
for src,target in churchaligns:
self.addtoAlignments((src,target),'GALECHURCH')
break
#no valid gapfiller left. break loop and ignore remaining gap
break
break
if not pregap in [i[0] for i in self.multialign]:
self.addtoAlignments(pregap)
return postgap
#Take list of (ID,Sentence) tuples for two language pairs and calculate Church & Gale alignment
#Then transform it into this program's alignment format
def gale_church(self,tempsrcgap,temptargetgap):
#get sentence lengths in characters
srclengths = [[len(i[1].strip()) for i in tempsrcgap]]
targetlengths = [[len(i[1].strip()) for i in temptargetgap]]
#call gale & church algorithm
pairs = sorted(list((align_texts(srclengths, targetlengths)[0])), key=itemgetter(0))
idict = {}
jdict = {}
newpairs = []
#store 1-to-n alignments in single pairs of tuples (instead of using multiple pairs of ints)
for i,j in pairs:
if i in idict and j in jdict:
done = 0
for iold1, jold1 in newpairs:
if done:
break
if i in iold1:
for iold2, jold2 in newpairs:
if done:
break
if j in jold2:
if not (iold1,jold1) == (iold2,jold2):
del(newpairs[newpairs.index((iold1,jold1))])
del(newpairs[newpairs.index((iold2,jold2))])
inew = tuple(sorted(list(iold1)+list(iold2)))
jnew = tuple(sorted(list(jold1)+list(jold2)))
newpairs.append((inew,jnew))
done = 1
break
elif i in idict:
for iold, jold in newpairs:
if i in iold:
jnew = tuple(sorted(list(jold)+[j]))
newpairs[newpairs.index((iold,jold))] = (iold,jnew)
jdict[j] = 0
break
elif j in jdict:
for iold, jold in newpairs:
if j in jold:
inew = tuple(sorted(list(iold)+[i]))
newpairs[newpairs.index((iold,jold))] = (inew,jold)
idict[i] = 0
break
else:
idict[i] = 0
jdict[j] = 0
newpairs.append(((i,),(j,)))
#Go from Church & Gale's numbering to our IDs
outpairs = []
for i,j in newpairs:
srcID = []
targetID = []
for src in i:
srcID.append(tempsrcgap[src][0])
for target in j:
targetID.append(temptargetgap[target][0])
#print('\nChurch & Gale: ' + str(tuple(srcID)) + ' to ' + str(tuple(targetID)))
outpairs.append((tuple(srcID),tuple(targetID)))
return outpairs
#get a list of (ID,Sentence) tuples and generate bi- or tri-sentence tuples
def createNSents(self,l,n=2):
out = []
for i in range(len(l)-n+1):
IDs = tuple([k for sublist in l[i:i+n] for k in sublist[0]])
Sents = " ".join([k[1] for k in l[i:i+n]])
out.append((IDs,Sents))
return out
def addtoAlignments(self,pair,aligntype=None):
if not (pair[0] and pair[1]):
return
if aligntype:
self.multialign.append((pair,aligntype))
else:
src,target = pair
if len(src) == 1 and len(target) == 1 and (src[0],target[0]) in self.bleualign:
self.multialign.append((pair,"BLEU"))
else:
self.multialign.append((pair,"GAPFILLER"))
def print_alignment_statistics(self, source_len, target_len):
multialignsrccount = sum([len(i[0][0]) for i in self.multialign])
multialigntargetcount = sum([len(i[0][1]) for i in self.multialign])
self.log("Results of BLEU 1-to-1 alignment",2)
if self.options['verbosity'] >= 2:
bleualignsrc = list(map(itemgetter(0),self.bleualign))
for sourceid in range(source_len):
if sourceid in bleualignsrc:
self.log('\033[92m' + str(sourceid) + ": "
+ str(self.bleualign[bleualignsrc.index(sourceid)][1]) + '\033[1;m')
else:
bestcand = self.scoredict.get(sourceid,[])
if bestcand:
bestcand = bestcand[0][1]
self.log('\033[1;31m'+str(sourceid) + ": unaligned. best cand "
+ str(bestcand)+'\033[1;m')
if source_len and target_len:
self.log("\n" + str(len(self.bleualign)) + ' out of ' + str(source_len) + ' source sentences aligned by BLEU ' + str(100*len(self.bleualign)/float(source_len)) + '%',2)
self.log("after gap filling, " + str(multialignsrccount) + ' out of '+ str(source_len) + ' source sentences aligned ' + str(100*multialignsrccount/float(source_len)) + '%',2)
self.log("after gap filling, " + str(multialigntargetcount) + ' out of '+ str(target_len) + ' target sentences aligned ' + str(100*multialigntargetcount/float(target_len)) + '%',2)
#print out some debugging info, and print output to file
def printout(self, sourcelist, translist, targetlist):
self.print_alignment_statistics(len(sourcelist), len(targetlist))
sources = []
translations = []
targets = []
sources_factored = []
targets_factored = []
if self.options['factored']:
sources_output = sources_factored
targets_output = targets_factored
else:
sources_output = sources
targets_output = targets
self.multialign = sorted(self.multialign,key=itemgetter(0))
sentscores = {}
lastsrc,lasttarget = 0,0
for j,(src,target) in enumerate([i[0] for i in self.multialign]):
self.log("alignment: {0} - {1}".format(",".join(map(str,src)), ",".join(map(str,target))),2)
if self.options['printempty']:
if src[0] != lastsrc + 1:
sources.extend([sourcelist[ID] for ID in range(lastsrc+1,src[0])])
targets.extend(['' for ID in range(lastsrc+1,src[0])])
translations.extend(['' for ID in range(lastsrc+1,src[0])])
if target[0] != lasttarget + 1:
sources.extend(['' for ID in range(lasttarget+1,target[0])])
targets.extend([targetlist[ID] for ID in range(lasttarget+1,target[0])])
translations.extend(['' for ID in range(lasttarget+1,target[0])])
lastsrc = src[-1]
lasttarget = target[-1]
translations.append(' '.join([translist[ID] for ID in src]))
if self.options['factored']:
sources.append(' '.join([sourcelist[ID][0] for ID in src]))
targets.append(' '.join([targetlist[ID][0] for ID in target]))
sources_factored.append(' '.join([sourcelist[ID][1] for ID in src]))
targets_factored.append(' '.join([targetlist[ID][1] for ID in target]))
else:
sources.append(' '.join([sourcelist[ID] for ID in src]))
targets.append(' '.join([targetlist[ID] for ID in target]))
if self.options['filter'] == 'sentences':
self.check_sentence_pair(j, sources[-1], translations[-1], targets[-1], sources_output[-1], targets_output[-1], sentscores)
if self.options['filter'] == 'sentences':
self.filter_sentence_pairs(sentscores, sources_output, targets_output)
if self.options['filter'] == 'articles':
self.filter_article_pairs(sources, translations, targets, sources_output, targets_output)
self.log("\nfinished with article",1)
self.log("\n====================\n",1)
if self.out1 and self.out2 and not self.options['filter']:
if self.options['factored']:
self.out1.write('\n'.join(sources_factored) + '\n')
self.out2.write('\n'.join(targets_factored) + '\n')
else:
self.out1.write('\n'.join(sources) + '\n')
self.out2.write('\n'.join(targets) + '\n')
#get BLEU score of sentence pair (for filtering)
def check_sentence_pair(self, j, src, trans, target, source_out, target_out, sentscores):
sentscore = self.score_article([trans],[target])
sentscore2 = self.score_article([src],[target])
if sentscore2 > sentscore and self.options['filterlang']:
self.out_bad1.write(source_out + '\n')
self.out_bad2.write(target_out + '\n')
else:
if sentscore > 0:
sentscorex = self.score_article([target],[trans])
newsentscore = (2*sentscore*sentscorex)/(sentscore+sentscorex)
else:
newsentscore = 0
sentscores[j]=newsentscore
# get BLEU score for article pair
def score_article(self,test,ref):
refs = [bleu.cook_refs([refSent],self.options['bleu_ngrams']) for refSent in ref]
testcook = []
for i,line in enumerate(test):
testcook.append(bleu.cook_test(line,refs[i],self.options['bleu_ngrams']))
score = bleu.score_cooked(testcook,self.options['bleu_ngrams'])
return score
# store BLEU score for each sentence pair (used for filtering at the very end)
def filter_sentence_pairs(self, sentscores, sources_output, targets_output):
before = len(self.sources_out)
for j,(src,target) in enumerate([i[0] for i in self.multialign]):
if j in sentscores: # false if sentence pair has been filtered out by language filter
confidence = sentscores[j]
self.finalbleu.append((confidence,sentscores.get(j),before,before+1))
before += 1
self.sources_out.append(sources_output[j])
self.targets_out.append(targets_output[j])
# store BLEU score for each article pair (used for filtering at the very end)
def filter_article_pairs(self, sources, translations, targets, sources_output, targets_output):
articlescore = self.score_article(translations,targets)
articlescore2 = self.score_article(sources,targets)
self.log('\nBLEU score for article: ' + str(articlescore) + ' / ' + str(articlescore2),1)
if articlescore2 > articlescore and self.options['filterlang']:
if self.options['factored']:
sources,targets = sources_factored,targets_factored
for i,line in enumerate(sources):
self.out_bad1.write(line + '\n')
self.out_bad2.write(targets[i] + '\n')
else:
articlescorex = self.score_article(targets,translations)
if articlescore > 0:
articlescore = (articlescore*articlescorex*2)/(articlescore+articlescorex)
before = len(self.sources_out)
after = before + len(self.multialign)
self.finalbleu.append((articlescore,articlescore2,before,after))
self.sources_out += sources_output
self.targets_out += targets_output
#filter bad sentence pairs / article pairs
def write_filtered(self):
self.finalbleu = sorted(self.finalbleu,key=itemgetter(0),reverse=True)
self.log(self.finalbleu,2)
totallength=0
totalscore=0
for (articlescore,articlescore2,before,after) in self.finalbleu:
length = after-before
totallength += length
totalscore += articlescore*length
if totallength != 0:
averagescore = totalscore/totallength
self.log("The average BLEU score is: " + str(averagescore),1)
goodlength = totallength*self.options['filterthreshold']/float(100)
totallength = 0
bad_percentiles = []
for i,(articlescore,articlescore2,before,after) in enumerate(self.finalbleu):
length = after-before
totallength += length
if totallength > goodlength:
bad_percentiles = self.finalbleu[i+1:]
self.log("\nDiscarding the following " + self.options['filter'] + " based on relative BLEU\n",2)
self.log(bad_percentiles,2)
if self.options['verbosity'] >= 3:
for score,score2,start,end in bad_percentiles:
for i in range(start,end):
self.log(score,3)
self.log(self.sources_out[i],3)
self.log(self.targets_out[i],3)
self.log('-----------------',3)
break
stopwrite = set([i[2] for i in bad_percentiles])
resumewrite = set([i[3] for i in bad_percentiles])
stopped = 0
#absolute BLEU threshold
if self.options['bleuthreshold']:
bad_sentences = []
for i,(articlescore,articlescore2,before,after) in enumerate(self.finalbleu):
if articlescore < self.options['bleuthreshold']:
bad_sentences.append((articlescore,articlescore2,before,after))
stopwrite.add(before)
resumewrite.add(after)
self.log("\nDiscarding the following " + self.options['filter'] + " based on absolute BLEU\n",2)
self.log(bad_sentences,2)
if self.options['verbosity'] >= 3:
for score,score2,start,end in bad_sentences:
for i in range(start,end):
self.log(score,3)
self.log(self.sources_out[i],3)
self.log(self.targets_out[i],3)
self.log('-----------------',3)
if self.out1 and self.out2 and self.out_bad1 and self.out_bad2:
for i,line in enumerate(self.sources_out):
if i in resumewrite:
stopped = 0
if i in stopwrite:
stopped = 1
if stopped:
self.out_bad1.write(line + '\n')
self.out_bad2.write(self.targets_out[i] + '\n')
else:
self.out1.write(line + '\n')
self.out2.write(self.targets_out[i] + '\n')
#close all files opened by __init__
def close_file_streams(self):
if self.close_src:
self.src.close()
if self.close_target:
self.target.close()
if self.close_out1:
self.out1.close()
if self.close_out2:
self.out2.close()
if self.close_out_bad1:
self.out_bad1.close()
if self.close_out_bad2:
self.out_bad2.close()
for should_be_closed,output_stream\
in zip(self.close_srctotarget,self.srctotarget):
if should_be_closed:
output_stream.close()
for should_be_closed,output_stream\
in zip(self.close_targettosrc,self.targettosrc):
if should_be_closed:
output_stream.close()
def log(self, msg, level = 1, end='\n'):
if level <= self.options['verbosity']:
print(msg, end=end, file = self.options['log_to'])
#Allows parallelizing of alignment
if multiprocessing_enabled:
class AlignMultiprocessed(multiprocessing.Process,Aligner):
def __init__(self,tasks,options,scores,log):
multiprocessing.Process.__init__(self)
self.options = options
self.tasks = tasks
self.scores = scores
self.log = log
self.bleualign = []
self.scoredict = None
def run(self):
i,data = self.tasks.get()
while i != None:
self.log('reading in article ' + str(i) + ': ',1)
sourcelist,targetlist,translist1,translist2 = data
self.multialign = self.process(sourcelist,targetlist,translist1,translist2)
self.scores[i] = (data,self.multialign,self.bleualign,self.scoredict)
i,data = self.tasks.get()
|
rsennrich/Bleualign
|
bleualign/align.py
|
Python
|
gpl-2.0
| 47,568
|
# coding=utf-8
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pytype: skip-file
from __future__ import absolute_import
from __future__ import print_function
import unittest
import mock
from apache_beam.examples.snippets.util import assert_matches_stdout
from apache_beam.testing.test_pipeline import TestPipeline
from . import tostring
def check_plants(actual):
expected = '''[START plants]
🍓,Strawberry
🥕,Carrot
🍆,Eggplant
🍅,Tomato
🥔,Potato
[END plants]'''.splitlines()[1:-1]
assert_matches_stdout(actual, expected)
def check_plant_lists(actual):
expected = '''[START plant_lists]
['🍓', 'Strawberry', 'perennial']
['🥕', 'Carrot', 'biennial']
['🍆', 'Eggplant', 'perennial']
['🍅', 'Tomato', 'annual']
['🥔', 'Potato', 'perennial']
[END plant_lists]'''.splitlines()[1:-1]
assert_matches_stdout(actual, expected)
def check_plants_csv(actual):
expected = '''[START plants_csv]
🍓,Strawberry,perennial
🥕,Carrot,biennial
🍆,Eggplant,perennial
🍅,Tomato,annual
🥔,Potato,perennial
[END plants_csv]'''.splitlines()[1:-1]
assert_matches_stdout(actual, expected)
@mock.patch('apache_beam.Pipeline', TestPipeline)
@mock.patch(
'apache_beam.examples.snippets.transforms.elementwise.tostring.print', str)
class ToStringTest(unittest.TestCase):
def test_tostring_kvs(self):
tostring.tostring_kvs(check_plants)
def test_tostring_element(self):
tostring.tostring_element(check_plant_lists)
def test_tostring_iterables(self):
tostring.tostring_iterables(check_plants_csv)
if __name__ == '__main__':
unittest.main()
|
iemejia/incubator-beam
|
sdks/python/apache_beam/examples/snippets/transforms/elementwise/tostring_test.py
|
Python
|
apache-2.0
| 2,331
|
test_type = {
'APP_TEST':"1",
'STANDARD_TEST':'0',
'SERVER_TEST':'2',
'FHIR_TEST':'3'
}
|
ideaworld/FHIR_Tester
|
FHIR_Tester_backend/home/config.py
|
Python
|
mit
| 103
|
# -*- coding: utf-8 -*-
"""
Test accounts module
"""
import os
from decimal import Decimal
from mock import patch
from django.core.urlresolvers import reverse
from django.http import HttpResponseForbidden
from django.test import TestCase
from .factories import UserFactory, UserWithAvatarFactory, AdminFactory
from .models import UserProfile, get_user_avatar_path, TeamAccessKey, Team
from .tasks import topup_accounts_task, update_portfolio_value, create_accounts_snapshot, \
update_users_classification
from .templatetags.user import user_home, user_rank
from .utils import process_username
from constance import config
from events.factories import EventFactory, BetFactory
from events.models import Event, Bet
from politikon.templatetags.format import formatted
from politikon.templatetags.path import startswith
class UserProfileModelTestCase(TestCase):
"""
Test methods for user object
"""
def test_user_creation(self):
"""
Create user and check his attributes
"""
user = UserFactory(username='johnsmith', name='John Smith')
self.assertEqual('johnsmith', user.__unicode__())
self.assertEqual('John Smith', user.name)
self.assertEqual('John Smith', user.get_short_name())
self.assertEqual(False, user.is_vip)
self.assertEqual('John Smith (johnsmith)', user.get_full_name())
self.assertEqual('John Smith (johnsmith)', user.full_name)
user.calculate_reputation()
user.save()
self.assertEqual(False, user.is_superuser)
self.assertEqual({
'user_id': 1,
'total_cash': formatted(0),
'portfolio_value': formatted(0),
'reputation': '100%',
}, user.statistics_dict)
self.assertEqual(0, user.current_portfolio_value)
def test_get_user_avatar_path(self):
"""
Get image path
"""
user = UserFactory(username='johnsmith')
path = get_user_avatar_path(user, 'my-avatar.png')
self.assertEqual('avatars/johnsmith.png', path)
def test_user_urls(self):
"""
Check is urls are valid
"""
user = UserFactory(
twitter_user='jsmith',
facebook_user='facesmith'
)
# TODO: FIXME
# url = user.get_absolute_url()
# self.assertEqual('/accounts/1/', url)
#
# url = user.get_avatar_url()
# self.assertEqual('/static/img/blank-avatar.jpg', url)
#
# url = user.get_twitter_url()
# self.assertEqual('https://twitter.com/jsmith', url)
#
# url = user.get_facebook_url()
# self.assertEqual('https://www.facebook.com/facesmith', url)
def test_twitter_user(self):
"""
Check method for account connected with twitter
"""
user = UserFactory()
url = user.get_facebook_url()
self.assertIsNone(url)
url = user.get_twitter_url()
self.assertIsNone(url)
user.twitter_user = 'jsmith'
user.save()
url = user.get_twitter_url()
self.assertEqual('https://twitter.com/jsmith', url)
def test_current_portfolio_value(self):
"""
Current portfolio value
"""
user = UserFactory()
self.assertEqual(0, user.current_portfolio_value)
event = EventFactory()
bet = BetFactory(user=user, event=event)
self.assertEqual(50, user.current_portfolio_value)
bet.outcome = Bet.NO
bet.has = 2
bet.save()
self.assertEqual(100, user.current_portfolio_value)
def test_get_avatar_url(self):
"""
Get avatar URL
"""
user = UserFactory()
self.assertEqual('/static/img/blank-avatar.jpg', user.get_avatar_url())
user2 = UserWithAvatarFactory(username='johnrambro')
self.assertEqual('avatars/johnrambro.jpg', user2.get_avatar_url())
os.remove('avatars/johnrambro.jpg')
def test_reset_account_without_bonus(self):
"""
Test reset account
"""
user = UserFactory()
user.reset_account()
self.assertEqual({
'user_id': 1,
'total_cash': formatted(1000),
'portfolio_value': formatted(0),
'reputation': "100%",
}, user.statistics_dict)
def test_reset_account_with_bonus(self):
"""
Test reset account
"""
user = UserFactory()
user.reset_account(0.1)
self.assertEqual({
'user_id': 1,
'total_cash': formatted(1100),
'portfolio_value': formatted(0),
'reputation': "110%",
}, user.statistics_dict)
def test_get_newest_results(self):
"""
Get newest results
"""
users = UserFactory.create_batch(2)
events = EventFactory.create_batch(5)
BetFactory(user=users[0], event=events[0])
bet2 = BetFactory(user=users[0], event=events[1])
bet3 = BetFactory(user=users[0], event=events[2])
bet4 = BetFactory(user=users[0], event=events[3])
bet5 = BetFactory(user=users[1], event=events[4])
events[1].outcome = Event.CANCELLED
events[1].save()
events[2].outcome = Event.FINISHED_YES
events[2].save()
events[3].outcome = Event.FINISHED_NO
events[3].save()
events[4].outcome = Event.FINISHED_YES
events[4].save()
bet2.is_new_resolved = True
bet2.save()
bet3.is_new_resolved = True
bet3.save()
bet4.is_new_resolved = True
bet4.save()
bet5.is_new_resolved = True
bet5.save()
self.assertEqual([bet2, bet3, bet4], list(users[0].get_newest_results()))
self.assertEqual([bet5], list(users[1].get_newest_results()))
class UserProfileManagerTestCase(TestCase):
"""
accounts/managers UserProfileManager
"""
def test_return_new_user_object(self):
"""
Return new user object
"""
user = UserProfile.objects.return_new_user_object(
username='j_smith',
password='password9',
)
self.assertIsInstance(user, UserProfile)
self.assertEqual('j_smith', user.username)
self.assertTrue(user.check_password('password9'))
with self.assertRaises(ValueError):
UserProfile.objects.return_new_user_object(
username=None,
)
def test_create_user(self):
"""
Create user
"""
user = UserProfile.objects.create_user(
username='j_smith',
email='j_smith@example.com',
password='password9',
)
self.assertIsInstance(user, UserProfile)
self.assertEqual('j_smith', user.username)
self.assertTrue(user.check_password('password9'))
self.assertTrue(user.is_active)
self.assertEqual({
'user_id': 1,
'total_cash': formatted(config.STARTING_CASH),
'portfolio_value': formatted(0),
'reputation': '100%',
}, user.statistics_dict)
user2 = UserProfile.objects.create_user(
username='j_smith',
email='j_smith@example.com',
)
self.assertIsInstance(user2, HttpResponseForbidden)
def test_create_superuser(self):
"""
Create superuser
"""
user = UserProfile.objects.create_superuser(
username='j_smith',
email='j_smith@example.com',
password='password9',
)
self.assertIsInstance(user, UserProfile)
self.assertEqual('j_smith', user.username)
self.assertTrue(user.check_password('password9'))
self.assertTrue(user.is_staff)
self.assertTrue(user.is_admin)
self.assertTrue(user.is_active)
self.assertEqual({
'user_id': 1,
'total_cash': formatted(0),
'portfolio_value': formatted(0),
'reputation': '100%',
}, user.statistics_dict)
user2 = UserProfile.objects.create_superuser(
username='j_smith',
email='j_smith@example.com',
)
self.assertIsInstance(user2, HttpResponseForbidden)
def test_create_user_with_random_password(self):
"""
Create user with random password
"""
user, password = UserProfile.objects.create_user_with_random_password(
username='j_smith',
)
self.assertTrue(user.check_password(password))
def test_get_users(self):
"""
Get users
"""
user1 = UserFactory()
UserFactory(is_deleted=True)
UserFactory(is_active=False)
users = UserProfile.objects.get_users()
self.assertIsInstance(users[0], UserProfile)
self.assertEqual(1, len(users))
self.assertEqual([user1], list(users))
def test_get_ranking_users(self):
"""
Get ranking users
"""
UserFactory()
UserFactory()
UserFactory(is_deleted=True)
UserFactory(is_active=False)
users = UserProfile.objects.get_ranking_users()
self.assertEqual(0, len(users))
self.assertEqual([], list(users))
# TODO mock transaction
def test_get_admins(self):
"""
Get admins
"""
UserFactory()
UserFactory(is_admin=True)
UserFactory(is_staff=True)
user4 = AdminFactory()
admins = UserProfile.objects.get_admins()
self.assertIsInstance(admins[0], UserProfile)
self.assertEqual(1, len(admins))
self.assertEqual([user4], list(admins))
def test_get_best_weekly(self):
"""
Get best weekly
"""
user1 = UserFactory(weekly_result=100)
user2 = UserFactory(weekly_result=300)
UserFactory()
AdminFactory()
users = UserProfile.objects.get_best_weekly()
self.assertEqual(0, len(users))
self.assertEqual([], list(users))
# TODO mock transaction
# self.assertIsInstance(users[0], UserProfile)
# self.assertEqual(2, len(users))
# self.assertEqual([user2, user1], list(users))
def test_get_best_monthly(self):
"""
Get best monthly
"""
UserFactory()
user2 = UserFactory(monthly_result=300)
AdminFactory()
user4 = UserFactory(monthly_result=100)
users = UserProfile.objects.get_best_monthly()
self.assertEqual(0, len(users))
self.assertEqual([], list(users))
# TODO mock transaction
# self.assertIsInstance(users[0], UserProfile)
# self.assertEqual(2, len(users))
# self.assertEqual([user2, user4], list(users))
def test_get_best_overall(self):
"""
Get best overall
"""
user1 = UserFactory()
user2 = UserFactory(reputation=Decimal(300))
AdminFactory()
user4 = UserFactory(reputation=Decimal(50))
users = UserProfile.objects.get_best_overall()
self.assertEqual(0, len(users))
self.assertEqual([], list(users))
# TODO mock transaction
# self.assertIsInstance(users[0], UserProfile)
# self.assertEqual(3, len(users))
# self.assertEqual([user2, user1, user4], list(users))
def test_get_user_positions(self):
"""
Get user positions
"""
user1 = UserFactory(weekly_result=100)
user2 = UserFactory(weekly_result=300, monthly_result=300, reputation=Decimal(300))
user3 = AdminFactory()
user4 = UserFactory(monthly_result=100, reputation=Decimal(50))
# TODO mock
self.assertEqual({
'week_rank': '-',
'month_rank': '-',
'overall_rank': '-'
}, UserProfile.objects.get_user_positions(user1))
self.assertEqual({
'week_rank': '-',
'month_rank': '-',
'overall_rank': '-'
}, UserProfile.objects.get_user_positions(user2))
self.assertEqual({
'week_rank': '-',
'month_rank': '-',
'overall_rank': '-'
}, UserProfile.objects.get_user_positions(user3))
self.assertEqual({
'week_rank': '-',
'month_rank': '-',
'overall_rank': '-'
}, UserProfile.objects.get_user_positions(user4))
# self.assertEqual({
# 'week_rank': 2,
# 'month_rank': '-',
# 'overall_rank': 2
# }, UserProfile.objects.get_user_positions(user1))
# self.assertEqual({
# 'week_rank': 1,
# 'month_rank': 1,
# 'overall_rank': 1
# }, UserProfile.objects.get_user_positions(user2))
# self.assertEqual({
# 'week_rank': '-',
# 'month_rank': '-',
# 'overall_rank': '-'
# }, UserProfile.objects.get_user_positions(user3))
# self.assertEqual({
# 'week_rank': '-',
# 'month_rank': 2,
# 'overall_rank': 3
# }, UserProfile.objects.get_user_positions(user4))
class UserPipelineTestCase(TestCase):
"""
accounts/pipeline
"""
def test_save_profile(self):
"""
Save profile
"""
user = UserFactory()
# save_profile(user,
class UserTasksTestCase(TestCase):
"""
accounts/tasks
"""
def test_topup_accounts_task(self):
"""
Topup
"""
user = UserFactory()
topup_accounts_task()
user.refresh_from_db()
self.assertEqual(config.DAILY_TOPUP, user.total_cash)
# TODO mock and test exception
@patch.object(UserProfile, 'topup_cash')
@patch('accounts.tasks.logger')
def test_topup_accounts_task_error(self, logger, topup_cash):
UserFactory()
topup_cash.side_effect = Exception()
topup_accounts_task()
logger.exception.assert_called_once()
def test_update_portfolio_value(self):
"""
Update portfolio_value
"""
price = 90
user = UserFactory()
event = EventFactory(current_sell_for_price=price)
BetFactory(user=user, event=event, has=1, outcome=True)
self.assertEqual(0, user.portfolio_value)
update_portfolio_value()
user.refresh_from_db()
# TODO FIXME
# self.assertEqual(price, user.portfolio_value)
def test_create_accounts_snapshot(self):
user = UserFactory()
create_accounts_snapshot()
# TODO mock logger and create_snapshot()
def test_update_users_classification(self):
users = UserFactory.create_batch(6)
update_users_classification()
# TODO: mock reputation changes
class UserTemplatetagsTestCase(TestCase):
"""
accounts/templatetags
"""
def test_user_home(self):
"""
User home
"""
user = UserFactory()
user_templatetag = user_home(user, 1000, True)
self.assertEqual({
'user': user,
'reputation_change': 1000,
'is_formatted': True
}, user_templatetag)
user_templatetag = user_home(user, -100)
self.assertEqual({
'user': user,
'reputation_change': -100,
'is_formatted': False
}, user_templatetag)
# TODO FIXME
# def test_user_rank(self):
# """
# User rank
# """
# user = UserFactory()
# user_templatetag = user_rank(user)
# self.assertEqual({
# 'profit': None,
# 'user': user,
# 'counter': 1,
# }, user_templatetag)
# user_templatetag_with_profit = user_rank(user, 10)
# self.assertEqual({
# 'profit': 10,
# 'user': user,
# 'counter': 1,
# }, user_templatetag_with_profit)
def test_get_reputation_history(self):
"""
Get reputation history
"""
# TODO
def test_get_reputation_change(self):
"""
Get reputation change
"""
# TODO
def test_last_week_reputation_change(self):
"""
Get last week reputation change
"""
# TODO
def test_last_month_reputation_change(self):
"""
Get last month reputation change
"""
# TODO
class PolitikonUserTemplatetagsTestCase(TestCase):
"""
politikon/templatetags
"""
def test_startswith(self):
"""
Startswith
"""
start_path = reverse('accounts:rank')
path = reverse('accounts:rank')
self.assertTrue(startswith(path, start_path))
class UserUtilsTestCase(TestCase):
"""
accounts/utils
"""
def test_process_username(self):
"""
Process username
"""
username = process_username(u"zażółćgęśląjaźń")
self.assertEqual('zazolcgeslajazn', username)
UserFactory(username='zazolcgeslajazn')
username2 = process_username(u"zażółćgęśląjaźń")
self.assertNotEqual('zazolcgeslajazn', username2)
class TeamAccessTokenModelTestCase(TestCase):
def test_distinction_of_tokens(self):
team = Team.objects.create(name='TestTeam')
key1 = TeamAccessKey.objects.create(team=team)
key2 = TeamAccessKey.objects.create(team=team)
self.assertEqual(TeamAccessKey.objects.count(), 2)
self.assertNotEqual(key1.value, key2.value)
self.assertIsNotNone(key1.team)
self.assertIsNotNone(key2.team)
self.assertIs(key1.team, team)
self.assertIs(key2.team, team)
key3 = TeamAccessKey(team=team)
key4 = TeamAccessKey(team=team)
key3.save()
key4.save()
self.assertEqual(TeamAccessKey.objects.count(), 4)
self.assertNotEqual(key3.value, key4.value)
self.assertIsNotNone(key3.team)
self.assertIsNotNone(key4.team)
self.assertIs(key3.team, team)
self.assertIs(key4.team, team)
|
KlubJagiellonski/Politikon
|
accounts/tests.py
|
Python
|
gpl-2.0
| 18,113
|
from __future__ import absolute_import
from kombu import Connection, Producer
from kombu import pools
from kombu.connection import ConnectionPool
from kombu.utils import eqhash
from .case import Case, Mock
class test_ProducerPool(Case):
Pool = pools.ProducerPool
class MyPool(pools.ProducerPool):
def __init__(self, *args, **kwargs):
self.instance = Mock()
pools.ProducerPool.__init__(self, *args, **kwargs)
def Producer(self, connection):
return self.instance
def setUp(self):
self.connections = Mock()
self.pool = self.Pool(self.connections, limit=10)
def test_close_resource(self):
self.pool.close_resource(Mock(name='resource'))
def test_releases_connection_when_Producer_raises(self):
self.pool.Producer = Mock()
self.pool.Producer.side_effect = IOError()
acq = self.pool._acquire_connection = Mock()
conn = acq.return_value = Mock()
with self.assertRaises(IOError):
self.pool.create_producer()
conn.release.assert_called_with()
def test_prepare_release_connection_on_error(self):
pp = Mock()
p = pp.return_value = Mock()
p.revive.side_effect = IOError()
acq = self.pool._acquire_connection = Mock()
conn = acq.return_value = Mock()
p._channel = None
with self.assertRaises(IOError):
self.pool.prepare(pp)
conn.release.assert_called_with()
def test_release_releases_connection(self):
p = Mock()
p.__connection__ = Mock()
self.pool.release(p)
p.__connection__.release.assert_called_with()
p.__connection__ = None
self.pool.release(p)
def test_init(self):
self.assertIs(self.pool.connections, self.connections)
def test_Producer(self):
self.assertIsInstance(self.pool.Producer(Mock()), Producer)
def test_acquire_connection(self):
self.pool._acquire_connection()
self.connections.acquire.assert_called_with(block=True)
def test_new(self):
promise = self.pool.new()
producer = promise()
self.assertIsInstance(producer, Producer)
self.connections.acquire.assert_called_with(block=True)
def test_setup_unlimited(self):
pool = self.Pool(self.connections, limit=None)
pool.setup()
self.assertFalse(pool._resource.queue)
def test_setup(self):
self.assertEqual(len(self.pool._resource.queue), self.pool.limit)
first = self.pool._resource.get_nowait()
producer = first()
self.assertIsInstance(producer, Producer)
def test_prepare(self):
connection = self.connections.acquire.return_value = Mock()
pool = self.MyPool(self.connections, limit=10)
pool.instance._channel = None
first = pool._resource.get_nowait()
producer = pool.prepare(first)
self.assertTrue(self.connections.acquire.called)
producer.revive.assert_called_with(connection)
def test_prepare_channel_already_created(self):
self.connections.acquire.return_value = Mock()
pool = self.MyPool(self.connections, limit=10)
pool.instance._channel = Mock()
first = pool._resource.get_nowait()
self.connections.acquire.reset()
producer = pool.prepare(first)
self.assertFalse(producer.revive.called)
def test_prepare_not_callable(self):
x = Producer(Mock)
self.pool.prepare(x)
def test_release(self):
p = Mock()
p.channel = Mock()
p.__connection__ = Mock()
self.pool.release(p)
p.__connection__.release.assert_called_with()
self.assertIsNone(p.channel)
class test_PoolGroup(Case):
Group = pools.PoolGroup
class MyGroup(pools.PoolGroup):
def create(self, resource, limit):
return resource, limit
def test_interface_create(self):
g = self.Group()
with self.assertRaises(NotImplementedError):
g.create(Mock(), 10)
def test_getitem_using_global_limit(self):
pools._used[0] = False
g = self.MyGroup(limit=pools.use_global_limit)
res = g['foo']
self.assertTupleEqual(res, ('foo', pools.get_limit()))
self.assertTrue(pools._used[0])
def test_getitem_using_custom_limit(self):
pools._used[0] = True
g = self.MyGroup(limit=102456)
res = g['foo']
self.assertTupleEqual(res, ('foo', 102456))
def test_delitem(self):
g = self.MyGroup()
g['foo']
del(g['foo'])
self.assertNotIn('foo', g)
def test_Connections(self):
conn = Connection('memory://')
p = pools.connections[conn]
self.assertTrue(p)
self.assertIsInstance(p, ConnectionPool)
self.assertIs(p.connection, conn)
self.assertEqual(p.limit, pools.get_limit())
def test_Producers(self):
conn = Connection('memory://')
p = pools.producers[conn]
self.assertTrue(p)
self.assertIsInstance(p, pools.ProducerPool)
self.assertIs(p.connections, pools.connections[conn])
self.assertEqual(p.limit, p.connections.limit)
self.assertEqual(p.limit, pools.get_limit())
def test_all_groups(self):
conn = Connection('memory://')
pools.connections[conn]
self.assertTrue(list(pools._all_pools()))
def test_reset(self):
pools.reset()
class MyGroup(dict):
clear_called = False
def clear(self):
self.clear_called = True
p1 = pools.connections['foo'] = Mock()
g1 = MyGroup()
pools._groups.append(g1)
pools.reset()
p1.force_close_all.assert_called_with()
self.assertTrue(g1.clear_called)
p1 = pools.connections['foo'] = Mock()
p1.force_close_all.side_effect = KeyError()
pools.reset()
def test_set_limit(self):
pools.reset()
pools.set_limit(34576)
limit = pools.get_limit()
self.assertEqual(limit, 34576)
pools.connections[Connection('memory://')]
pools.set_limit(limit + 1)
self.assertEqual(pools.get_limit(), limit + 1)
limit = pools.get_limit()
with self.assertRaises(RuntimeError):
pools.set_limit(limit - 1)
pools.set_limit(limit - 1, force=True)
self.assertEqual(pools.get_limit(), limit - 1)
pools.set_limit(pools.get_limit())
class test_fun_PoolGroup(Case):
def test_connections_behavior(self):
c1u = 'memory://localhost:123'
c2u = 'memory://localhost:124'
c1 = Connection(c1u)
c2 = Connection(c2u)
c3 = Connection(c1u)
assert eqhash(c1) != eqhash(c2)
assert eqhash(c1) == eqhash(c3)
p1 = pools.connections[c1]
p2 = pools.connections[c2]
p3 = pools.connections[c3]
self.assertIsNot(p1, p2)
self.assertIs(p1, p3)
r1 = p1.acquire()
self.assertTrue(p1._dirty)
self.assertTrue(p3._dirty)
self.assertFalse(p2._dirty)
r1.release()
self.assertFalse(p1._dirty)
self.assertFalse(p3._dirty)
|
1stvamp/kombu
|
kombu/tests/test_pools.py
|
Python
|
bsd-3-clause
| 7,242
|
#!/usr/bin/env python
# coding=utf-8
import pprint
import csv
import click
import requests
import datetime as datetime
from datetime import date
from xml.etree import ElementTree as ET
import os
# from random import sample
import random
import json
import copy
# import os
# import json
# import logging
from xml.etree.ElementTree import ParseError
def validate_d(date_text):
try:
datetime.datetime.strptime(date_text, '%Y-%m-%d')
except ValueError:
raise ValueError("Incorrect data format, should be YYYY-MM-DD")
def daterange(start_date, end_date):
for n in range(int ((end_date - start_date).days)):
yield start_date + datetime.timedelta(n)
# booking_id_secret = None
# with open(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'secrets.json')) as data_file:
# booking_id_secret = (json.load(data_file))['booking_id']
REF_API = 'api'
REF_CLIENT = 'client'
REF_AGENT = 'agent'
CONFIRMED = 'Confirmed or Completed'
bad_hotels = [{"city_code": "SHEN", "item_code": "ASC"},
{"city_code": "CHEG", "item_code": "HOW"},
{"city_code": "WUH", "item_code": "CIT"},
{"city_code": "CKG", "item_code": "94"}
]
def is_bad_hotel(city_code, item_code):
for bad_hotel in bad_hotels:
if bad_hotel['city_code'] == city_code and bad_hotel['item_code'] == item_code:
return True
return False
def find_chain(hotel_chain, supplier_id, res):
for ent in res:
if ent['hotel_chain_name'] == hotel_chain and ent['supplier_id'] == supplier_id:
return ent
return None
# "Start Hour";
# "Start Timestamp";
# "Start Day";
# "Server Name";
# "Request: Client";
# "Request: Client Account ID";
# "Request: Client Account Name";
# "Request: Client Main Account ID";
# "Request: Client Main Account Name";
# "Request: Contract Supplier ID";
# "Request: Contract Supplier Name";
# "Request: Contract Supplier Rate";
# "Request: Contract Name";
# "Response: Hotel Supplier Brand";
# "Request: Check In Date";
# "Request: Check Out Date";
# "Request: Hotel Code";
# "Request: Hotel Name";
# "Request: Hotel Chain Name";
# "Request: Hotel Region";
# "Request: Occupancy";
# "Request: Room Type Code";
# "Request: Board Type Code";
# "Request: Lead Time Days";
# "Response: Error Message";
# "Response: Error Detailed Message";
# "Component Downloads (hits)";
# "Price Difference (Increase > 2%) (hits)";
# "Price Difference (Decrease < -2%) (hits)";
# "No Price Difference (hits)";
# "Errors (hits)";
# "SI Error: Hotel Not Available (hits)";
# "SI Error: External Error (hits)";
# "SI Error: Request Restricted (hits)";
# "Error: Allotment (hits)";
# "Error: Client Originated (hits)";
# "Error: OTHER / SYSTEM (hits)";
# "Error: Duplicated Error (APITUDE Only) (hits)"
@click.command()
@click.option('--filename', default='CTRIP---API-Errors---API-valuation-step-issues--h-_v205111_s2608_2018-06-23-00-00.csv')
# @click.option('--client', default='ctrip')
# @click.option('--days', default=1, type=int)
def hb_er(filename):
# url = 'https://rbs.gta-travel.com/rbscnapi/RequestListenerServlet'
# pp = pprint
# # agent_secret = None
# # with open(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'secrets.json')) as data_file:
# # agent_secret = (json.load(data_file))[client]
# agent_secret = None
# with open(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'secrets.json')) as data_file:
# agent_secret = json.load(data_file)
# print('Search client.. ' + client)
err_records = []
with open(filename, encoding='utf-8-sig') as csvfile:
ids = set()
reader = csv.DictReader(csvfile, delimiter=';')
for row in reader:
# if row['gta_api_booking_id'] not in ids:
# bookings_c.append(row['gta_api_booking_id'])
# ids.add(row['gta_api_booking_id'])
err_records.append(row)
res = []
for record in err_records:
ent = find_chain(record['Request: Hotel Chain Name'], record['Request: Contract Supplier ID'], res)
if ent == None:
ent = {}
ent['timestamp'] = record['Start Timestamp']
ent['hotel_chain_name'] = record['Request: Hotel Chain Name']
ent['supplier_id'] = record['Request: Contract Supplier ID']
ent['supplier_name'] = record['Request: Contract Supplier Name']
# Request: Contract Supplier Name
ent['client_id'] = record['Request: Client Account ID']
ent['client_name'] = record['Request: Client Account Name']
ent['server_name'] = record['Server Name']
try:
ent['hits'] = int(record['Component Downloads (hits)'])
except ValueError:
ent['hits'] = 0
try:
ent['price_diff_>2%'] = int(record['Price Difference (Increase > 2%) (hits)'])
except ValueError:
ent['price_diff_>2%'] = 0
try:
ent['price_diff_<2%'] = int(record['Price Difference (Decrease < -2%) (hits)'])
except ValueError:
ent['price_diff_<2%'] = 0
try:
ent['no_price_diff'] = int(record['No Price Difference (hits)'])
except ValueError:
ent['no_price_diff'] = 0
try:
ent['error_hits'] = int(record['Errors (hits)'])
except ValueError:
ent['error_hits'] = 0
# except ValueError:
# print('Value error : ' + \
# str(record['Component Downloads (hits)']) + \
# str(record['Price Difference (Increase > 2%) (hits)']) + \
# str(record['Price Difference (Decrease < -2%) (hits)']) + \
# str(record['No Price Difference (hits)']) + \
# str(record['Errors (hits)'])
# )
# float('{0:.3f}'.format(float( (l2b - last_l2b) / last_l2b )))
res.append(ent)
continue
try:
ent['hits'] = ent['hits'] + int(record['Component Downloads (hits)'])
except ValueError:
pass
try:
ent['price_diff_>2%'] = ent['price_diff_>2%'] + int(record['Price Difference (Increase > 2%) (hits)'])
except ValueError:
pass
try:
ent['price_diff_<2%'] = ent['price_diff_<2%'] + int(record['Price Difference (Decrease < -2%) (hits)'])
except ValueError:
pass
try:
ent['no_price_diff'] = ent['no_price_diff'] + int(record['No Price Difference (hits)'])
except ValueError:
pass
try:
ent['error_hits'] = ent['error_hits'] + int(record['Errors (hits)'])
except ValueError:
pass
for ent in res:
ent['success_ratio'] = float('{0:.3f}'.format(float( ent['no_price_diff'] / ent['hits'] )))
ent['recommend_offline'] = 'no'
if ent['success_ratio'] < 0.5:
ent['recommend_offline'] = 'yes'
output_file_name = '_'.join(['output_hb_pa_stats',
datetime.datetime.today().date().strftime('%y%m%d'),
datetime.datetime.now().strftime('%H%M'),
'.csv'])
keys = res[0].keys()
# with open('output_SearchPrice_' + date.today().strftime('%Y_%m_%d') + '.csv', 'w', encoding='utf-8') as output_file:
# with open('output_Search_item_hr_' + datetime.datetime.today().date().strftime('%y%m%d') + '_' + datetime.datetime.now().strftime('%H%M') + '.csv', 'w', newline='', encoding='utf-8') as output_file:
with open(output_file_name, 'w', newline='', encoding='utf-8') as output_file:
dict_writer = csv.DictWriter(output_file, keys)
dict_writer.writeheader()
dict_writer.writerows(res)
# print(str(err_records[100]))
# print(str(err_records[100]['Component Downloads (hits)']))
# bookings = []
# res = []
# # filename = 'gtaConfirmRefs_5867_2017-06-30_2017-07-07.csv'
# with open(filename, encoding='utf-8-sig') as csvfile:
# ids = set()
# reader = csv.DictReader(csvfile)
# for row in reader:
# # pp.pprint(row['hotel_id'])
# if row['gta_api_booking_id'] not in ids:
# entry = dict()
# entry['client_booking_id'] = row['client_booking_id']
# entry['agent_booking_id'] = row['agent_booking_id']
# entry['gta_api_booking_id'] = row['gta_api_booking_id']
# entry['booking_status'] = row['booking_status']
# entry['booking_creation_date'] = row['booking_creation_date']
# entry['booking_departure_date'] = row['booking_departure_date']
# entry['booking_name'] = row['booking_name']
# entry['booking_net_price'] = row['booking_net_price']
# entry['booking_currency'] = row['booking_currency']
# entry['hotel_confirmation_#'] = ''
# entry['hotel_confirmation_status'] = ''
# entry['client_name'] = ''
# if 'hotel_confirmation_#' in row:
# entry['hotel_confirmation_#'] = row['hotel_confirmation_#']
# if 'hotel_confirmation_status' in row:
# entry['hotel_confirmation_status'] = row['hotel_confirmation_status']
# if 'client_name' in row:
# entry['client_name'] = row['client_name']
# bookings.append(entry)
# ids.add(row['gta_api_booking_id'])
# search_tree = ET.parse(os.path.join(os.getcwd(), 'SearchBookingItemRequest.xml'))
# for counter, booking in enumerate(bookings):
# pp.pprint('Searching booking id: ' + str(counter) + ': ' + booking['gta_api_booking_id'])
# if booking['gta_api_booking_id'] in bookings_c:
# print('Warning: already pushed to Ctrip.. skip..')
# continue
# if 'client_name' not in booking.keys():
# print('Error: No client name...')
# continue
# search_tree.find('.//RequestorID').set('Client', agent_secret[booking['client_name']]['id'])
# search_tree.find('.//RequestorID').set('EMailAddress', agent_secret[booking['client_name']]['email'])
# search_tree.find('.//RequestorID').set('Password', agent_secret[booking['client_name']]['password'])
# if not booking['hotel_confirmation_#'] and booking['hotel_confirmation_#'] != '':
# print('have hotel confirmation # already.. skipping')
# entry = copy.deepcopy(booking)
# res.append(entry)
# continue
# if not booking['hotel_confirmation_status'] and booking['hotel_confirmation_status'] != '':
# print('status updated already.. skipping')
# entry = copy.deepcopy(booking)
# res.append(entry)
# continue
# if CONFIRMED not in booking['booking_status']:
# print('Booking not confirmed.. skipping..')
# continue
# # search_tree.find('.//ItemDestination').set('DestinationCode', hotel_code['city_code'])
# # search_tree.find('.//ItemCode').text = hotel_code['item_code']
# booking_id = booking['gta_api_booking_id'].replace('041/', '')
# for search_request in search_tree.find('.//RequestDetails'):
# search_request.find('.//BookingReference').text = booking_id
# try:
# r = requests.post(url, data=ET.tostring(search_tree.getroot(), encoding='UTF-8', method='xml'), timeout=10)
# except OSError:
# pp.pprint('Error: OSError.. Searching has stopped..')
# continue
# try:
# r_tree = ET.fromstring(r.text)
# except ParseError:
# print('Error: parsing error.. skip.. 1')
# continue
# items_ele = r_tree.find('.//BookingItems')
# if items_ele == None:
# print('Error: No BookingItems found..')
# continue
# # for booking_item in r_tree.find('.//BookingItems'):
# for response in r_tree.find('.//ResponseDetails'):
# # print(booking_item.text)
# hotel_ref_ele = response.find('.//ItemConfirmationReference')
# if hotel_ref_ele != None:
# booking['hotel_confirmation_#'] = hotel_ref_ele.text
# else:
# continue
# # logic to exclude bad hotels
# city_ele = response.find('.//ItemCity')
# item_ele = response.find('.//Item')
# if city_ele != None and items_ele != None:
# city_code = city_ele.get('Code')
# item_code = item_ele.get('Code')
# if is_bad_hotel(city_code, item_code):
# print('Warning: bad hotel.. skipping.. ')
# continue
# entry = copy.deepcopy(booking)
# res.append(entry)
# # keys = res[0].keys()
# keys = res[0].keys()
# # with open('output_SearchPrice_' + date.today().strftime('%Y_%m_%d') + '.csv', 'w', encoding='utf-8') as output_file:
# with open('output_Search_item_hr_' + datetime.datetime.today().date().strftime('%y%m%d') + '_' + datetime.datetime.now().strftime('%H%M') + '.csv', 'w', newline='', encoding='utf-8') as output_file:
# dict_writer = csv.DictWriter(output_file, keys)
# dict_writer.writeheader()
# dict_writer.writerows(res)
if __name__ == '__main__':
hb_er()
|
Fatman13/gta_swarm
|
hb_er.py
|
Python
|
mit
| 12,262
|
"""
A module for shelling out.
Keep in mind that this module is insecure, in that it can give whomever has
access to the master root execution access to all salt minions.
"""
import base64
import fnmatch
import functools
import glob
import logging
import os
import re
import shutil
import subprocess
import sys
import tempfile
import time
import traceback
import salt.grains.extra
import salt.utils.args
import salt.utils.data
import salt.utils.files
import salt.utils.json
import salt.utils.path
import salt.utils.platform
import salt.utils.powershell
import salt.utils.stringutils
import salt.utils.templates
import salt.utils.timed_subprocess
import salt.utils.url
import salt.utils.user
import salt.utils.versions
import salt.utils.vt
import salt.utils.win_chcp
import salt.utils.win_dacl
import salt.utils.win_reg
from salt.exceptions import (
CommandExecutionError,
SaltInvocationError,
TimedProcTimeoutError,
)
from salt.log import LOG_LEVELS
# Only available on POSIX systems, nonfatal on windows
try:
import grp
import pwd
except ImportError:
pass
if salt.utils.platform.is_windows():
from salt.utils.win_functions import escape_argument as _cmd_quote
from salt.utils.win_runas import runas as win_runas
HAS_WIN_RUNAS = True
else:
import shlex
_cmd_quote = shlex.quote
HAS_WIN_RUNAS = False
__proxyenabled__ = ["*"]
# Define the module's virtual name
__virtualname__ = "cmd"
log = logging.getLogger(__name__)
DEFAULT_SHELL = salt.grains.extra.shell()["shell"]
# Overwriting the cmd python module makes debugging modules with pdb a bit
# harder so lets do it this way instead.
def __virtual__():
return __virtualname__
def _log_cmd(cmd):
if isinstance(cmd, (tuple, list)):
return cmd[0].strip()
else:
return str(cmd).split()[0].strip()
def _check_cb(cb_):
"""
If the callback is None or is not callable, return a lambda that returns
the value passed.
"""
if cb_ is not None:
if hasattr(cb_, "__call__"):
return cb_
else:
log.error("log_callback is not callable, ignoring")
return lambda x: x
def _python_shell_default(python_shell, __pub_jid):
"""
Set python_shell default based on remote execution and __opts__['cmd_safe']
"""
try:
# Default to python_shell=True when run directly from remote execution
# system. Cross-module calls won't have a jid.
if __pub_jid and python_shell is None:
return True
elif __opts__.get("cmd_safe", True) is False and python_shell is None:
# Override-switch for python_shell
return True
except NameError:
pass
return python_shell
def _chroot_pids(chroot):
pids = []
for root in glob.glob("/proc/[0-9]*/root"):
try:
link = os.path.realpath(root)
if link.startswith(chroot):
pids.append(int(os.path.basename(os.path.dirname(root))))
except OSError:
pass
return pids
def _render_cmd(cmd, cwd, template, saltenv=None, pillarenv=None, pillar_override=None):
"""
If template is a valid template engine, process the cmd and cwd through
that engine.
"""
if saltenv is None:
try:
saltenv = __opts__.get("saltenv", "base")
except NameError:
saltenv = "base"
if not template:
return (cmd, cwd)
# render the path as a template using path_template_engine as the engine
if template not in salt.utils.templates.TEMPLATE_REGISTRY:
raise CommandExecutionError(
"Attempted to render file paths with unavailable engine {}".format(template)
)
kwargs = {}
kwargs["salt"] = __salt__
if pillarenv is not None or pillar_override is not None:
pillarenv = pillarenv or __opts__["pillarenv"]
kwargs["pillar"] = _gather_pillar(pillarenv, pillar_override)
else:
kwargs["pillar"] = __pillar__
kwargs["grains"] = __grains__
kwargs["opts"] = __opts__
kwargs["saltenv"] = saltenv
def _render(contents):
# write out path to temp file
tmp_path_fn = salt.utils.files.mkstemp()
with salt.utils.files.fopen(tmp_path_fn, "w+") as fp_:
fp_.write(salt.utils.stringutils.to_str(contents))
data = salt.utils.templates.TEMPLATE_REGISTRY[template](
tmp_path_fn, to_str=True, **kwargs
)
salt.utils.files.safe_rm(tmp_path_fn)
if not data["result"]:
# Failed to render the template
raise CommandExecutionError(
"Failed to execute cmd with error: {}".format(data["data"])
)
else:
return data["data"]
cmd = _render(cmd)
cwd = _render(cwd)
return (cmd, cwd)
def _check_loglevel(level="info"):
"""
Retrieve the level code for use in logging.Logger.log().
"""
try:
level = level.lower()
if level == "quiet":
return None
else:
return LOG_LEVELS[level]
except (AttributeError, KeyError):
log.error(
"Invalid output_loglevel '%s'. Valid levels are: %s. Falling "
"back to 'info'.",
level,
", ".join(sorted(LOG_LEVELS, reverse=True)),
)
return LOG_LEVELS["info"]
def _parse_env(env):
if not env:
env = {}
if isinstance(env, list):
env = salt.utils.data.repack_dictlist(env)
if not isinstance(env, dict):
env = {}
return env
def _gather_pillar(pillarenv, pillar_override):
"""
Whenever a state run starts, gather the pillar data fresh
"""
pillar = salt.pillar.get_pillar(
__opts__,
__grains__,
__opts__["id"],
__opts__["saltenv"],
pillar_override=pillar_override,
pillarenv=pillarenv,
)
ret = pillar.compile_pillar()
if pillar_override and isinstance(pillar_override, dict):
ret.update(pillar_override)
return ret
def _check_avail(cmd):
"""
Check to see if the given command can be run
"""
if isinstance(cmd, list):
cmd = " ".join([str(x) if not isinstance(x, str) else x for x in cmd])
bret = True
wret = False
if __salt__["config.get"]("cmd_blacklist_glob"):
blist = __salt__["config.get"]("cmd_blacklist_glob", [])
for comp in blist:
if fnmatch.fnmatch(cmd, comp):
# BAD! you are blacklisted
bret = False
if __salt__["config.get"]("cmd_whitelist_glob", []):
blist = __salt__["config.get"]("cmd_whitelist_glob", [])
for comp in blist:
if fnmatch.fnmatch(cmd, comp):
# GOOD! You are whitelisted
wret = True
break
else:
# If no whitelist set then alls good!
wret = True
return bret and wret
def _run(
cmd,
cwd=None,
stdin=None,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
output_encoding=None,
output_loglevel="debug",
log_callback=None,
runas=None,
group=None,
shell=DEFAULT_SHELL,
python_shell=False,
env=None,
clean_env=False,
prepend_path=None,
rstrip=True,
template=None,
umask=None,
timeout=None,
with_communicate=True,
reset_system_locale=True,
ignore_retcode=False,
saltenv=None,
pillarenv=None,
pillar_override=None,
use_vt=False,
password=None,
bg=False,
encoded_cmd=False,
success_retcodes=None,
success_stdout=None,
success_stderr=None,
windows_codepage=65001,
**kwargs
):
"""
Do the DRY thing and only call subprocess.Popen() once
"""
if "pillar" in kwargs and not pillar_override:
pillar_override = kwargs["pillar"]
if output_loglevel != "quiet" and _is_valid_shell(shell) is False:
log.warning(
"Attempt to run a shell command with what may be an invalid shell! "
"Check to ensure that the shell <%s> is valid for this user.",
shell,
)
output_loglevel = _check_loglevel(output_loglevel)
log_callback = _check_cb(log_callback)
use_sudo = False
if runas is None and "__context__" in globals():
runas = __context__.get("runas")
if password is None and "__context__" in globals():
password = __context__.get("runas_password")
# Set the default working directory to the home directory of the user
# salt-minion is running as. Defaults to home directory of user under which
# the minion is running.
if not cwd:
cwd = os.path.expanduser("~{}".format("" if not runas else runas))
# make sure we can access the cwd
# when run from sudo or another environment where the euid is
# changed ~ will expand to the home of the original uid and
# the euid might not have access to it. See issue #1844
if not os.access(cwd, os.R_OK):
cwd = "/"
if salt.utils.platform.is_windows():
cwd = os.path.abspath(os.sep)
else:
# Handle edge cases where numeric/other input is entered, and would be
# yaml-ified into non-string types
cwd = str(cwd)
if bg:
ignore_retcode = True
use_vt = False
change_windows_codepage = False
if not salt.utils.platform.is_windows():
if not os.path.isfile(shell) or not os.access(shell, os.X_OK):
msg = "The shell {} is not available".format(shell)
raise CommandExecutionError(msg)
elif use_vt: # Memozation so not much overhead
raise CommandExecutionError("VT not available on windows")
else:
if windows_codepage:
if not isinstance(windows_codepage, int):
windows_codepage = int(windows_codepage)
previous_windows_codepage = salt.utils.win_chcp.get_codepage_id()
if windows_codepage != previous_windows_codepage:
change_windows_codepage = True
# The powershell binary is "powershell"
# The powershell core binary is "pwsh"
# you can also pass a path here as long as the binary name is one of the two
if any(word in shell.lower().strip() for word in ["powershell", "pwsh"]):
# Strip whitespace
if isinstance(cmd, str):
cmd = cmd.strip()
elif isinstance(cmd, list):
cmd = " ".join(cmd).strip()
cmd = cmd.replace('"', '\\"')
# If we were called by script(), then fakeout the Windows
# shell to run a Powershell script.
# Else just run a Powershell command.
stack = traceback.extract_stack(limit=2)
# extract_stack() returns a list of tuples.
# The last item in the list [-1] is the current method.
# The third item[2] in each tuple is the name of that method.
if stack[-2][2] == "script":
cmd = '"{}" -NonInteractive -NoProfile -ExecutionPolicy Bypass -Command {}'.format(
shell, cmd
)
elif encoded_cmd:
cmd = '"{}" -NonInteractive -NoProfile -EncodedCommand {}'.format(
shell, cmd
)
else:
cmd = '"{}" -NonInteractive -NoProfile -Command "{}"'.format(shell, cmd)
# munge the cmd and cwd through the template
(cmd, cwd) = _render_cmd(cmd, cwd, template, saltenv, pillarenv, pillar_override)
ret = {}
# If the pub jid is here then this is a remote ex or salt call command and needs to be
# checked if blacklisted
if "__pub_jid" in kwargs:
if not _check_avail(cmd):
raise CommandExecutionError(
'The shell command "{}" is not permitted'.format(cmd)
)
env = _parse_env(env)
for bad_env_key in (x for x, y in env.items() if y is None):
log.error(
"Environment variable '%s' passed without a value. "
"Setting value to an empty string",
bad_env_key,
)
env[bad_env_key] = ""
if output_loglevel is not None:
# Always log the shell commands at INFO unless quiet logging is
# requested. The command output is what will be controlled by the
# 'loglevel' parameter.
msg = "Executing command {}{}{} {}{}in directory '{}'{}".format(
"'" if not isinstance(cmd, list) else "",
_log_cmd(cmd),
"'" if not isinstance(cmd, list) else "",
"as user '{}' ".format(runas) if runas else "",
"in group '{}' ".format(group) if group else "",
cwd,
". Executing command in the background, no output will be logged."
if bg
else "",
)
log.info(log_callback(msg))
if runas and salt.utils.platform.is_windows():
if not HAS_WIN_RUNAS:
msg = "missing salt/utils/win_runas.py"
raise CommandExecutionError(msg)
if isinstance(cmd, (list, tuple)):
cmd = " ".join(cmd)
return win_runas(cmd, runas, password, cwd)
if runas and salt.utils.platform.is_darwin():
# We need to insert the user simulation into the command itself and not
# just run it from the environment on macOS as that method doesn't work
# properly when run as root for certain commands.
if isinstance(cmd, (list, tuple)):
cmd = " ".join(map(_cmd_quote, cmd))
# Ensure directory is correct before running command
cmd = "cd -- {dir} && {{ {cmd}\n }}".format(dir=_cmd_quote(cwd), cmd=cmd)
# Ensure environment is correct for a newly logged-in user by running
# the command under bash as a login shell
try:
user_shell = __salt__["user.info"](runas)["shell"]
if re.search("bash$", user_shell):
cmd = "{shell} -l -c {cmd}".format(
shell=user_shell, cmd=_cmd_quote(cmd)
)
except KeyError:
pass
# Ensure the login is simulated correctly (note: su runs sh, not bash,
# which causes the environment to be initialised incorrectly, which is
# fixed by the previous line of code)
cmd = "su -l {} -c {}".format(_cmd_quote(runas), _cmd_quote(cmd))
# Set runas to None, because if you try to run `su -l` after changing
# user, su will prompt for the password of the user and cause salt to
# hang.
runas = None
if runas:
# Save the original command before munging it
try:
pwd.getpwnam(runas)
except KeyError:
raise CommandExecutionError("User '{}' is not available".format(runas))
if group:
if salt.utils.platform.is_windows():
msg = "group is not currently available on Windows"
raise SaltInvocationError(msg)
if not which_bin(["sudo"]):
msg = "group argument requires sudo but not found"
raise CommandExecutionError(msg)
try:
grp.getgrnam(group)
except KeyError:
raise CommandExecutionError("Group '{}' is not available".format(runas))
else:
use_sudo = True
if runas or group:
try:
# Getting the environment for the runas user
# Use markers to thwart any stdout noise
# There must be a better way to do this.
import uuid
marker = "<<<" + str(uuid.uuid4()) + ">>>"
marker_b = marker.encode(__salt_system_encoding__)
py_code = (
"import sys, os, itertools; sys.stdout.write('{0}'); "
"sys.stdout.write('\\0'.join(itertools.chain(*os.environ.items()))); "
"sys.stdout.write('{0}');".format(marker)
)
if use_sudo:
env_cmd = ["sudo"]
# runas is optional if use_sudo is set.
if runas:
env_cmd.extend(["-u", runas])
if group:
env_cmd.extend(["-g", group])
if shell != DEFAULT_SHELL:
env_cmd.extend(["-s", "--", shell, "-c"])
else:
env_cmd.extend(["-i", "--"])
env_cmd.extend([sys.executable])
elif __grains__["os"] in ["FreeBSD"]:
env_cmd = (
"su",
"-",
runas,
"-c",
"{} -c {}".format(shell, sys.executable),
)
elif __grains__["os_family"] in ["Solaris"]:
env_cmd = ("su", "-", runas, "-c", sys.executable)
elif __grains__["os_family"] in ["AIX"]:
env_cmd = ("su", "-", runas, "-c", sys.executable)
else:
env_cmd = ("su", "-s", shell, "-", runas, "-c", sys.executable)
msg = "env command: {}".format(env_cmd)
log.debug(log_callback(msg))
env_bytes, env_encoded_err = subprocess.Popen(
env_cmd,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
).communicate(salt.utils.stringutils.to_bytes(py_code))
marker_count = env_bytes.count(marker_b)
if marker_count == 0:
# Possibly PAM prevented the login
log.error(
"Environment could not be retrieved for user '%s': "
"stderr=%r stdout=%r",
runas,
env_encoded_err,
env_bytes,
)
# Ensure that we get an empty env_runas dict below since we
# were not able to get the environment.
env_bytes = b""
elif marker_count != 2:
raise CommandExecutionError(
"Environment could not be retrieved for user '{}'",
info={"stderr": repr(env_encoded_err), "stdout": repr(env_bytes)},
)
else:
# Strip the marker
env_bytes = env_bytes.split(marker_b)[1]
env_runas = dict(list(zip(*[iter(env_bytes.split(b"\0"))] * 2)))
env_runas = {
salt.utils.stringutils.to_str(k): salt.utils.stringutils.to_str(v)
for k, v in env_runas.items()
}
env_runas.update(env)
# Fix platforms like Solaris that don't set a USER env var in the
# user's default environment as obtained above.
if env_runas.get("USER") != runas:
env_runas["USER"] = runas
# Fix some corner cases where shelling out to get the user's
# environment returns the wrong home directory.
runas_home = os.path.expanduser("~{}".format(runas))
if env_runas.get("HOME") != runas_home:
env_runas["HOME"] = runas_home
env = env_runas
except ValueError as exc:
log.exception("Error raised retrieving environment for user %s", runas)
raise CommandExecutionError(
"Environment could not be retrieved for user '{}': {}".format(
runas, exc
)
)
if reset_system_locale is True:
if not salt.utils.platform.is_windows():
# Default to C!
# Salt only knows how to parse English words
# Don't override if the user has passed LC_ALL
env.setdefault("LC_CTYPE", "C")
env.setdefault("LC_NUMERIC", "C")
env.setdefault("LC_TIME", "C")
env.setdefault("LC_COLLATE", "C")
env.setdefault("LC_MONETARY", "C")
env.setdefault("LC_MESSAGES", "C")
env.setdefault("LC_PAPER", "C")
env.setdefault("LC_NAME", "C")
env.setdefault("LC_ADDRESS", "C")
env.setdefault("LC_TELEPHONE", "C")
env.setdefault("LC_MEASUREMENT", "C")
env.setdefault("LC_IDENTIFICATION", "C")
env.setdefault("LANGUAGE", "C")
if clean_env:
run_env = env
else:
if salt.utils.platform.is_windows():
import nt
run_env = nt.environ.copy()
else:
run_env = os.environ.copy()
run_env.update(env)
if prepend_path:
run_env["PATH"] = ":".join((prepend_path, run_env["PATH"]))
if "NOTIFY_SOCKET" not in env:
run_env.pop("NOTIFY_SOCKET", None)
if python_shell is None:
python_shell = False
new_kwargs = {
"cwd": cwd,
"shell": python_shell,
"env": run_env,
"stdin": str(stdin) if stdin is not None else stdin,
"stdout": stdout,
"stderr": stderr,
"with_communicate": with_communicate,
"timeout": timeout,
"bg": bg,
}
if "stdin_raw_newlines" in kwargs:
new_kwargs["stdin_raw_newlines"] = kwargs["stdin_raw_newlines"]
if umask is not None:
_umask = str(umask).lstrip("0")
if _umask == "":
msg = "Zero umask is not allowed."
raise CommandExecutionError(msg)
try:
_umask = int(_umask, 8)
except ValueError:
raise CommandExecutionError("Invalid umask: '{}'".format(umask))
else:
_umask = None
if runas or group or umask:
new_kwargs["preexec_fn"] = functools.partial(
salt.utils.user.chugid_and_umask, runas, _umask, group
)
if not salt.utils.platform.is_windows():
# close_fds is not supported on Windows platforms if you redirect
# stdin/stdout/stderr
if new_kwargs["shell"] is True:
new_kwargs["executable"] = shell
if salt.utils.platform.is_freebsd() and sys.version_info < (3, 9):
# https://bugs.python.org/issue38061
new_kwargs["close_fds"] = False
else:
new_kwargs["close_fds"] = True
if not os.path.isabs(cwd) or not os.path.isdir(cwd):
raise CommandExecutionError(
"Specified cwd '{}' either not absolute or does not exist".format(cwd)
)
if (
python_shell is not True
and not salt.utils.platform.is_windows()
and not isinstance(cmd, list)
):
cmd = salt.utils.args.shlex_split(cmd)
if success_retcodes is None:
success_retcodes = [0]
else:
try:
success_retcodes = [
int(i) for i in salt.utils.args.split_input(success_retcodes)
]
except ValueError:
raise SaltInvocationError("success_retcodes must be a list of integers")
if success_stdout is None:
success_stdout = []
else:
success_stdout = salt.utils.args.split_input(success_stdout)
if success_stderr is None:
success_stderr = []
else:
success_stderr = salt.utils.args.split_input(success_stderr)
if not use_vt:
# This is where the magic happens
try:
if change_windows_codepage:
salt.utils.win_chcp.set_codepage_id(windows_codepage)
try:
proc = salt.utils.timed_subprocess.TimedProc(cmd, **new_kwargs)
except OSError as exc:
msg = "Unable to run command '{}' with the context '{}', reason: {}".format(
cmd if output_loglevel is not None else "REDACTED",
new_kwargs,
exc,
)
raise CommandExecutionError(msg)
try:
proc.run()
except TimedProcTimeoutError as exc:
ret["stdout"] = str(exc)
ret["stderr"] = ""
ret["retcode"] = None
ret["pid"] = proc.process.pid
# ok return code for timeouts?
ret["retcode"] = 1
return ret
finally:
if change_windows_codepage:
salt.utils.win_chcp.set_codepage_id(previous_windows_codepage)
if output_loglevel != "quiet" and output_encoding is not None:
log.debug(
"Decoding output from command %s using %s encoding",
cmd,
output_encoding,
)
try:
out = salt.utils.stringutils.to_unicode(
proc.stdout, encoding=output_encoding
)
except TypeError:
# stdout is None
out = ""
except UnicodeDecodeError:
out = salt.utils.stringutils.to_unicode(
proc.stdout, encoding=output_encoding, errors="replace"
)
if output_loglevel != "quiet":
log.error(
"Failed to decode stdout from command %s, non-decodable "
"characters have been replaced",
_log_cmd(cmd),
)
try:
err = salt.utils.stringutils.to_unicode(
proc.stderr, encoding=output_encoding
)
except TypeError:
# stderr is None
err = ""
except UnicodeDecodeError:
err = salt.utils.stringutils.to_unicode(
proc.stderr, encoding=output_encoding, errors="replace"
)
if output_loglevel != "quiet":
log.error(
"Failed to decode stderr from command %s, non-decodable "
"characters have been replaced",
_log_cmd(cmd),
)
if rstrip:
if out is not None:
out = out.rstrip()
if err is not None:
err = err.rstrip()
ret["pid"] = proc.process.pid
ret["retcode"] = proc.process.returncode
if ret["retcode"] in success_retcodes:
ret["retcode"] = 0
ret["stdout"] = out
ret["stderr"] = err
if any(
[stdo in ret["stdout"] for stdo in success_stdout]
+ [stde in ret["stderr"] for stde in success_stderr]
):
ret["retcode"] = 0
else:
formatted_timeout = ""
if timeout:
formatted_timeout = " (timeout: {}s)".format(timeout)
if output_loglevel is not None:
msg = "Running {} in VT{}".format(cmd, formatted_timeout)
log.debug(log_callback(msg))
stdout, stderr = "", ""
now = time.time()
if timeout:
will_timeout = now + timeout
else:
will_timeout = -1
try:
proc = salt.utils.vt.Terminal(
cmd,
shell=True,
log_stdout=True,
log_stderr=True,
cwd=cwd,
preexec_fn=new_kwargs.get("preexec_fn", None),
env=run_env,
log_stdin_level=output_loglevel,
log_stdout_level=output_loglevel,
log_stderr_level=output_loglevel,
stream_stdout=True,
stream_stderr=True,
)
ret["pid"] = proc.pid
while proc.has_unread_data:
try:
try:
time.sleep(0.5)
try:
cstdout, cstderr = proc.recv()
except OSError:
cstdout, cstderr = "", ""
if cstdout:
stdout += cstdout
else:
stdout = ""
if cstderr:
stderr += cstderr
else:
stderr = ""
if timeout and (time.time() > will_timeout):
ret["stderr"] = "SALT: Timeout after {}s\n{}".format(
timeout, stderr
)
ret["retcode"] = None
break
except KeyboardInterrupt:
ret["stderr"] = "SALT: User break\n{}".format(stderr)
ret["retcode"] = 1
break
except salt.utils.vt.TerminalException as exc:
log.error("VT: %s", exc, exc_info_on_loglevel=logging.DEBUG)
ret = {"retcode": 1, "pid": "2"}
break
# only set stdout on success as we already mangled in other
# cases
ret["stdout"] = stdout
if not proc.isalive():
# Process terminated, i.e., not canceled by the user or by
# the timeout
ret["stderr"] = stderr
ret["retcode"] = proc.exitstatus
if ret["retcode"] in success_retcodes:
ret["retcode"] = 0
if any(
[stdo in ret["stdout"] for stdo in success_stdout]
+ [stde in ret["stderr"] for stde in success_stderr]
):
ret["retcode"] = 0
ret["pid"] = proc.pid
finally:
proc.close(terminate=True, kill=True)
try:
if ignore_retcode:
__context__["retcode"] = 0
else:
__context__["retcode"] = ret["retcode"]
except NameError:
# Ignore the context error during grain generation
pass
# Log the output
if output_loglevel is not None:
if not ignore_retcode and ret["retcode"] != 0:
if output_loglevel < LOG_LEVELS["error"]:
output_loglevel = LOG_LEVELS["error"]
msg = "Command '{}' failed with return code: {}".format(
_log_cmd(cmd), ret["retcode"]
)
log.error(log_callback(msg))
if ret["stdout"]:
log.log(output_loglevel, "stdout: %s", log_callback(ret["stdout"]))
if ret["stderr"]:
log.log(output_loglevel, "stderr: %s", log_callback(ret["stderr"]))
if ret["retcode"]:
log.log(output_loglevel, "retcode: %s", ret["retcode"])
return ret
def _run_quiet(
cmd,
cwd=None,
stdin=None,
output_encoding=None,
runas=None,
shell=DEFAULT_SHELL,
python_shell=False,
env=None,
template=None,
umask=None,
timeout=None,
reset_system_locale=True,
saltenv=None,
pillarenv=None,
pillar_override=None,
success_retcodes=None,
success_stdout=None,
success_stderr=None,
):
"""
Helper for running commands quietly for minion startup
"""
return _run(
cmd,
runas=runas,
cwd=cwd,
stdin=stdin,
stderr=subprocess.STDOUT,
output_encoding=output_encoding,
output_loglevel="quiet",
log_callback=None,
shell=shell,
python_shell=python_shell,
env=env,
template=template,
umask=umask,
timeout=timeout,
reset_system_locale=reset_system_locale,
saltenv=saltenv,
pillarenv=pillarenv,
pillar_override=pillar_override,
success_retcodes=success_retcodes,
success_stdout=success_stdout,
success_stderr=success_stderr,
)["stdout"]
def _run_all_quiet(
cmd,
cwd=None,
stdin=None,
runas=None,
shell=DEFAULT_SHELL,
python_shell=False,
env=None,
template=None,
umask=None,
timeout=None,
reset_system_locale=True,
saltenv=None,
pillarenv=None,
pillar_override=None,
output_encoding=None,
success_retcodes=None,
success_stdout=None,
success_stderr=None,
):
"""
Helper for running commands quietly for minion startup.
Returns a dict of return data.
output_loglevel argument is ignored. This is here for when we alias
cmd.run_all directly to _run_all_quiet in certain chicken-and-egg
situations where modules need to work both before and after
the __salt__ dictionary is populated (cf dracr.py)
"""
return _run(
cmd,
runas=runas,
cwd=cwd,
stdin=stdin,
shell=shell,
python_shell=python_shell,
env=env,
output_encoding=output_encoding,
output_loglevel="quiet",
log_callback=None,
template=template,
umask=umask,
timeout=timeout,
reset_system_locale=reset_system_locale,
saltenv=saltenv,
pillarenv=pillarenv,
pillar_override=pillar_override,
success_retcodes=success_retcodes,
success_stdout=success_stdout,
success_stderr=success_stderr,
)
def run(
cmd,
cwd=None,
stdin=None,
runas=None,
group=None,
shell=DEFAULT_SHELL,
python_shell=None,
env=None,
clean_env=False,
template=None,
rstrip=True,
umask=None,
output_encoding=None,
output_loglevel="debug",
log_callback=None,
hide_output=False,
timeout=None,
reset_system_locale=True,
ignore_retcode=False,
saltenv=None,
use_vt=False,
bg=False,
password=None,
encoded_cmd=False,
raise_err=False,
prepend_path=None,
success_retcodes=None,
success_stdout=None,
success_stderr=None,
**kwargs
):
r"""
Execute the passed command and return the output as a string
:param str cmd: The command to run. ex: ``ls -lart /home``
:param str cwd: The directory from which to execute the command. Defaults
to the home directory of the user specified by ``runas`` (or the user
under which Salt is running if ``runas`` is not specified).
:param str stdin: A string of standard input can be specified for the
command to be run using the ``stdin`` parameter. This can be useful in
cases where sensitive information must be read from standard input.
:param str runas: Specify an alternate user to run the command. The default
behavior is to run as the user under which Salt is running.
.. warning::
For versions 2018.3.3 and above on macosx while using runas,
on linux while using run, to pass special characters to the
command you need to escape the characters on the shell.
Example:
.. code-block:: bash
cmd.run 'echo '\''h=\"baz\"'\''' runas=macuser
:param str group: Group to run command as. Not currently supported
on Windows.
:param str password: Windows only. Required when specifying ``runas``. This
parameter will be ignored on non-Windows platforms.
.. versionadded:: 2016.3.0
:param str shell: Specify an alternate shell. Defaults to the system's
default shell.
:param bool python_shell: If ``False``, let python handle the positional
arguments. Set to ``True`` to use shell features, such as pipes or
redirection.
:param bool bg: If ``True``, run command in background and do not await or
deliver its results
.. versionadded:: 2016.3.0
:param dict env: Environment variables to be set prior to execution.
.. note::
When passing environment variables on the CLI, they should be
passed as the string representation of a dictionary.
.. code-block:: bash
salt myminion cmd.run 'some command' env='{"FOO": "bar"}'
.. note::
When using environment variables on Window's, case-sensitivity
matters, i.e. Window's uses `Path` as opposed to `PATH` for other
systems.
:param bool clean_env: Attempt to clean out all other shell environment
variables and set only those provided in the 'env' argument to this
function.
:param str prepend_path: $PATH segment to prepend (trailing ':' not
necessary) to $PATH
.. versionadded:: 2018.3.0
:param str template: If this setting is applied then the named templating
engine will be used to render the downloaded file. Currently jinja,
mako, and wempy are supported.
:param bool rstrip: Strip all whitespace off the end of output before it is
returned.
:param str umask: The umask (in octal) to use when running the command.
:param str output_encoding: Control the encoding used to decode the
command's output.
.. note::
This should not need to be used in most cases. By default, Salt
will try to use the encoding detected from the system locale, and
will fall back to UTF-8 if this fails. This should only need to be
used in cases where the output of the command is encoded in
something other than the system locale or UTF-8.
To see the encoding Salt has detected from the system locale, check
the `locale` line in the output of :py:func:`test.versions_report
<salt.modules.test.versions_report>`.
.. versionadded:: 2018.3.0
:param str output_loglevel: Control the loglevel at which the output from
the command is logged to the minion log.
.. note::
The command being run will still be logged at the ``debug``
loglevel regardless, unless ``quiet`` is used for this value.
:param bool ignore_retcode: If the exit code of the command is nonzero,
this is treated as an error condition, and the output from the command
will be logged to the minion log. However, there are some cases where
programs use the return code for signaling and a nonzero exit code
doesn't necessarily mean failure. Pass this argument as ``True`` to
skip logging the output if the command has a nonzero exit code.
:param bool hide_output: If ``True``, suppress stdout and stderr in the
return data.
.. note::
This is separate from ``output_loglevel``, which only handles how
Salt logs to the minion log.
.. versionadded:: 2018.3.0
:param int timeout: A timeout in seconds for the executed process to return.
:param bool use_vt: Use VT utils (saltstack) to stream the command output
more interactively to the console and the logs. This is experimental.
:param bool encoded_cmd: Specify if the supplied command is encoded.
Only applies to shell 'powershell' and 'pwsh'.
.. versionadded:: 2018.3.0
Older versions of powershell seem to return raw xml data in the return.
To avoid raw xml data in the return, prepend your command with the
following before encoding:
`$ProgressPreference='SilentlyContinue'; <your command>`
The following powershell code block will encode the `Write-Output`
command so that it will not have the raw xml data in the return:
.. code-block:: powershell
# target string
$Command = '$ProgressPreference="SilentlyContinue"; Write-Output "hello"'
# Convert to Base64 encoded string
$Encoded = [convert]::ToBase64String([System.Text.encoding]::Unicode.GetBytes($command))
Write-Output $Encoded
:param bool raise_err: If ``True`` and the command has a nonzero exit code,
a CommandExecutionError exception will be raised.
.. warning::
This function does not process commands through a shell
unless the python_shell flag is set to True. This means that any
shell-specific functionality such as 'echo' or the use of pipes,
redirection or &&, should either be migrated to cmd.shell or
have the python_shell=True flag set here.
The use of python_shell=True means that the shell will accept _any_ input
including potentially malicious commands such as 'good_command;rm -rf /'.
Be absolutely certain that you have sanitized your input prior to using
python_shell=True
:param list success_retcodes: This parameter will allow a list of
non-zero return codes that should be considered a success. If the
return code returned from the run matches any in the provided list,
the return code will be overridden with zero.
.. versionadded:: 2019.2.0
:param list success_stdout: This parameter will allow a list of
strings that when found in standard out should be considered a success.
If stdout returned from the run matches any in the provided list,
the return code will be overridden with zero.
.. versionadded:: 3004
:param list success_stderr: This parameter will allow a list of
strings that when found in standard error should be considered a success.
If stderr returned from the run matches any in the provided list,
the return code will be overridden with zero.
.. versionadded:: 3004
:param bool stdin_raw_newlines: False
If ``True``, Salt will not automatically convert the characters ``\\n``
present in the ``stdin`` value to newlines.
.. versionadded:: 2019.2.0
:param int windows_codepage: 65001
Only applies to Windows: the minion uses `C:\Windows\System32\chcp.com` to
verify or set the code page before the command `cmd` is executed.
Code page 65001 corresponds with UTF-8 and allows international localization of Windows.
.. versionadded:: 3002
CLI Example:
.. code-block:: bash
salt '*' cmd.run "ls -l | awk '/foo/{print \\$2}'"
The template arg can be set to 'jinja' or another supported template
engine to render the command arguments before execution.
For example:
.. code-block:: bash
salt '*' cmd.run template=jinja "ls -l /tmp/{{grains.id}} | awk '/foo/{print \\$2}'"
Specify an alternate shell with the shell parameter:
.. code-block:: bash
salt '*' cmd.run "Get-ChildItem C:\\ " shell='powershell'
A string of standard input can be specified for the command to be run using
the ``stdin`` parameter. This can be useful in cases where sensitive
information must be read from standard input.
.. code-block:: bash
salt '*' cmd.run "grep f" stdin='one\\ntwo\\nthree\\nfour\\nfive\\n'
If an equal sign (``=``) appears in an argument to a Salt command it is
interpreted as a keyword argument in the format ``key=val``. That
processing can be bypassed in order to pass an equal sign through to the
remote shell command by manually specifying the kwarg:
.. code-block:: bash
salt '*' cmd.run cmd='sed -e s/=/:/g'
"""
python_shell = _python_shell_default(python_shell, kwargs.get("__pub_jid", ""))
ret = _run(
cmd,
runas=runas,
group=group,
shell=shell,
python_shell=python_shell,
cwd=cwd,
stdin=stdin,
stderr=subprocess.STDOUT,
env=env,
clean_env=clean_env,
prepend_path=prepend_path,
template=template,
rstrip=rstrip,
umask=umask,
output_encoding=output_encoding,
output_loglevel=output_loglevel,
log_callback=log_callback,
timeout=timeout,
reset_system_locale=reset_system_locale,
ignore_retcode=ignore_retcode,
saltenv=saltenv,
use_vt=use_vt,
bg=bg,
password=password,
encoded_cmd=encoded_cmd,
success_retcodes=success_retcodes,
success_stdout=success_stdout,
success_stderr=success_stderr,
**kwargs
)
log_callback = _check_cb(log_callback)
lvl = _check_loglevel(output_loglevel)
if lvl is not None:
if not ignore_retcode and ret["retcode"] != 0:
if lvl < LOG_LEVELS["error"]:
lvl = LOG_LEVELS["error"]
msg = "Command '{}' failed with return code: {}".format(
_log_cmd(cmd), ret["retcode"]
)
log.error(log_callback(msg))
if raise_err:
raise CommandExecutionError(
log_callback(ret["stdout"] if not hide_output else "")
)
log.log(lvl, "output: %s", log_callback(ret["stdout"]))
return ret["stdout"] if not hide_output else ""
def shell(
cmd,
cwd=None,
stdin=None,
runas=None,
group=None,
shell=DEFAULT_SHELL,
env=None,
clean_env=False,
template=None,
rstrip=True,
umask=None,
output_encoding=None,
output_loglevel="debug",
log_callback=None,
hide_output=False,
timeout=None,
reset_system_locale=True,
ignore_retcode=False,
saltenv=None,
use_vt=False,
bg=False,
password=None,
prepend_path=None,
success_retcodes=None,
success_stdout=None,
success_stderr=None,
**kwargs
):
"""
Execute the passed command and return the output as a string.
.. versionadded:: 2015.5.0
:param str cmd: The command to run. ex: ``ls -lart /home``
:param str cwd: The directory from which to execute the command. Defaults
to the home directory of the user specified by ``runas`` (or the user
under which Salt is running if ``runas`` is not specified).
:param str stdin: A string of standard input can be specified for the
command to be run using the ``stdin`` parameter. This can be useful in
cases where sensitive information must be read from standard input.
:param str runas: Specify an alternate user to run the command. The default
behavior is to run as the user under which Salt is running. If running
on a Windows minion you must also use the ``password`` argument, and
the target user account must be in the Administrators group.
.. warning::
For versions 2018.3.3 and above on macosx while using runas,
to pass special characters to the command you need to escape
the characters on the shell.
Example:
.. code-block:: bash
cmd.shell 'echo '\\''h=\\"baz\\"'\\''' runas=macuser
:param str group: Group to run command as. Not currently supported
on Windows.
:param str password: Windows only. Required when specifying ``runas``. This
parameter will be ignored on non-Windows platforms.
.. versionadded:: 2016.3.0
:param int shell: Shell to execute under. Defaults to the system default
shell.
:param bool bg: If True, run command in background and do not await or
deliver its results
:param dict env: Environment variables to be set prior to execution.
.. note::
When passing environment variables on the CLI, they should be
passed as the string representation of a dictionary.
.. code-block:: bash
salt myminion cmd.shell 'some command' env='{"FOO": "bar"}'
.. note::
When using environment variables on Window's, case-sensitivity
matters, i.e. Window's uses `Path` as opposed to `PATH` for other
systems.
:param bool clean_env: Attempt to clean out all other shell environment
variables and set only those provided in the 'env' argument to this
function.
:param str prepend_path: $PATH segment to prepend (trailing ':' not necessary)
to $PATH
.. versionadded:: 2018.3.0
:param str template: If this setting is applied then the named templating
engine will be used to render the downloaded file. Currently jinja,
mako, and wempy are supported.
:param bool rstrip: Strip all whitespace off the end of output before it is
returned.
:param str umask: The umask (in octal) to use when running the command.
:param str output_encoding: Control the encoding used to decode the
command's output.
.. note::
This should not need to be used in most cases. By default, Salt
will try to use the encoding detected from the system locale, and
will fall back to UTF-8 if this fails. This should only need to be
used in cases where the output of the command is encoded in
something other than the system locale or UTF-8.
To see the encoding Salt has detected from the system locale, check
the `locale` line in the output of :py:func:`test.versions_report
<salt.modules.test.versions_report>`.
.. versionadded:: 2018.3.0
:param str output_loglevel: Control the loglevel at which the output from
the command is logged to the minion log.
.. note::
The command being run will still be logged at the ``debug``
loglevel regardless, unless ``quiet`` is used for this value.
:param bool ignore_retcode: If the exit code of the command is nonzero,
this is treated as an error condition, and the output from the command
will be logged to the minion log. However, there are some cases where
programs use the return code for signaling and a nonzero exit code
doesn't necessarily mean failure. Pass this argument as ``True`` to
skip logging the output if the command has a nonzero exit code.
:param bool hide_output: If ``True``, suppress stdout and stderr in the
return data.
.. note::
This is separate from ``output_loglevel``, which only handles how
Salt logs to the minion log.
.. versionadded:: 2018.3.0
:param int timeout: A timeout in seconds for the executed process to
return.
:param bool use_vt: Use VT utils (saltstack) to stream the command output
more interactively to the console and the logs. This is experimental.
.. warning::
This passes the cmd argument directly to the shell without any further
processing! Be absolutely sure that you have properly sanitized the
command passed to this function and do not use untrusted inputs.
:param list success_retcodes: This parameter will allow a list of
non-zero return codes that should be considered a success. If the
return code returned from the run matches any in the provided list,
the return code will be overridden with zero.
.. versionadded:: 2019.2.0
:param list success_stdout: This parameter will allow a list of
strings that when found in standard out should be considered a success.
If stdout returned from the run matches any in the provided list,
the return code will be overridden with zero.
.. versionadded:: 3004
:param list success_stderr: This parameter will allow a list of
strings that when found in standard error should be considered a success.
If stderr returned from the run matches any in the provided list,
the return code will be overridden with zero.
.. versionadded:: 3004
:param bool stdin_raw_newlines: False
If ``True``, Salt will not automatically convert the characters ``\\n``
present in the ``stdin`` value to newlines.
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt '*' cmd.shell "ls -l | awk '/foo/{print \\$2}'"
The template arg can be set to 'jinja' or another supported template
engine to render the command arguments before execution.
For example:
.. code-block:: bash
salt '*' cmd.shell template=jinja "ls -l /tmp/{{grains.id}} | awk '/foo/{print \\$2}'"
Specify an alternate shell with the shell parameter:
.. code-block:: bash
salt '*' cmd.shell "Get-ChildItem C:\\ " shell='powershell'
A string of standard input can be specified for the command to be run using
the ``stdin`` parameter. This can be useful in cases where sensitive
information must be read from standard input.
.. code-block:: bash
salt '*' cmd.shell "grep f" stdin='one\\ntwo\\nthree\\nfour\\nfive\\n'
If an equal sign (``=``) appears in an argument to a Salt command it is
interpreted as a keyword argument in the format ``key=val``. That
processing can be bypassed in order to pass an equal sign through to the
remote shell command by manually specifying the kwarg:
.. code-block:: bash
salt '*' cmd.shell cmd='sed -e s/=/:/g'
"""
if "python_shell" in kwargs:
python_shell = kwargs.pop("python_shell")
else:
python_shell = True
return run(
cmd,
cwd=cwd,
stdin=stdin,
runas=runas,
group=group,
shell=shell,
env=env,
clean_env=clean_env,
prepend_path=prepend_path,
template=template,
rstrip=rstrip,
umask=umask,
output_encoding=output_encoding,
output_loglevel=output_loglevel,
log_callback=log_callback,
hide_output=hide_output,
timeout=timeout,
reset_system_locale=reset_system_locale,
ignore_retcode=ignore_retcode,
saltenv=saltenv,
use_vt=use_vt,
python_shell=python_shell,
bg=bg,
password=password,
success_retcodes=success_retcodes,
success_stdout=success_stdout,
success_stderr=success_stderr,
**kwargs
)
def run_stdout(
cmd,
cwd=None,
stdin=None,
runas=None,
group=None,
shell=DEFAULT_SHELL,
python_shell=None,
env=None,
clean_env=False,
template=None,
rstrip=True,
umask=None,
output_encoding=None,
output_loglevel="debug",
log_callback=None,
hide_output=False,
timeout=None,
reset_system_locale=True,
ignore_retcode=False,
saltenv=None,
use_vt=False,
password=None,
prepend_path=None,
success_retcodes=None,
success_stdout=None,
success_stderr=None,
**kwargs
):
"""
Execute a command, and only return the standard out
:param str cmd: The command to run. ex: ``ls -lart /home``
:param str cwd: The directory from which to execute the command. Defaults
to the home directory of the user specified by ``runas`` (or the user
under which Salt is running if ``runas`` is not specified).
:param str stdin: A string of standard input can be specified for the
command to be run using the ``stdin`` parameter. This can be useful in
cases where sensitive information must be read from standard input.
:param str runas: Specify an alternate user to run the command. The default
behavior is to run as the user under which Salt is running. If running
on a Windows minion you must also use the ``password`` argument, and
the target user account must be in the Administrators group.
.. warning::
For versions 2018.3.3 and above on macosx while using runas,
to pass special characters to the command you need to escape
the characters on the shell.
Example:
.. code-block:: bash
cmd.run_stdout 'echo '\\''h=\\"baz\\"'\\''' runas=macuser
:param str password: Windows only. Required when specifying ``runas``. This
parameter will be ignored on non-Windows platforms.
.. versionadded:: 2016.3.0
:param str group: Group to run command as. Not currently supported
on Windows.
:param str shell: Specify an alternate shell. Defaults to the system's
default shell.
:param bool python_shell: If False, let python handle the positional
arguments. Set to True to use shell features, such as pipes or
redirection.
:param dict env: Environment variables to be set prior to execution.
.. note::
When passing environment variables on the CLI, they should be
passed as the string representation of a dictionary.
.. code-block:: bash
salt myminion cmd.run_stdout 'some command' env='{"FOO": "bar"}'
.. note::
When using environment variables on Window's, case-sensitivity
matters, i.e. Window's uses `Path` as opposed to `PATH` for other
systems.
:param bool clean_env: Attempt to clean out all other shell environment
variables and set only those provided in the 'env' argument to this
function.
:param str prepend_path: $PATH segment to prepend (trailing ':' not necessary)
to $PATH
.. versionadded:: 2018.3.0
:param str template: If this setting is applied then the named templating
engine will be used to render the downloaded file. Currently jinja,
mako, and wempy are supported.
:param bool rstrip: Strip all whitespace off the end of output before it is
returned.
:param str umask: The umask (in octal) to use when running the command.
:param str output_encoding: Control the encoding used to decode the
command's output.
.. note::
This should not need to be used in most cases. By default, Salt
will try to use the encoding detected from the system locale, and
will fall back to UTF-8 if this fails. This should only need to be
used in cases where the output of the command is encoded in
something other than the system locale or UTF-8.
To see the encoding Salt has detected from the system locale, check
the `locale` line in the output of :py:func:`test.versions_report
<salt.modules.test.versions_report>`.
.. versionadded:: 2018.3.0
:param str output_loglevel: Control the loglevel at which the output from
the command is logged to the minion log.
.. note::
The command being run will still be logged at the ``debug``
loglevel regardless, unless ``quiet`` is used for this value.
:param bool ignore_retcode: If the exit code of the command is nonzero,
this is treated as an error condition, and the output from the command
will be logged to the minion log. However, there are some cases where
programs use the return code for signaling and a nonzero exit code
doesn't necessarily mean failure. Pass this argument as ``True`` to
skip logging the output if the command has a nonzero exit code.
:param bool hide_output: If ``True``, suppress stdout and stderr in the
return data.
.. note::
This is separate from ``output_loglevel``, which only handles how
Salt logs to the minion log.
.. versionadded:: 2018.3.0
:param int timeout: A timeout in seconds for the executed process to
return.
:param bool use_vt: Use VT utils (saltstack) to stream the command output
more interactively to the console and the logs. This is experimental.
:param list success_retcodes: This parameter will allow a list of
non-zero return codes that should be considered a success. If the
return code returned from the run matches any in the provided list,
the return code will be overridden with zero.
.. versionadded:: 2019.2.0
:param list success_stdout: This parameter will allow a list of
strings that when found in standard out should be considered a success.
If stdout returned from the run matches any in the provided list,
the return code will be overridden with zero.
.. versionadded:: 3004
:param list success_stderr: This parameter will allow a list of
strings that when found in standard error should be considered a success.
If stderr returned from the run matches any in the provided list,
the return code will be overridden with zero.
.. versionadded:: 3004
:param bool stdin_raw_newlines: False
If ``True``, Salt will not automatically convert the characters ``\\n``
present in the ``stdin`` value to newlines.
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt '*' cmd.run_stdout "ls -l | awk '/foo/{print \\$2}'"
The template arg can be set to 'jinja' or another supported template
engine to render the command arguments before execution.
For example:
.. code-block:: bash
salt '*' cmd.run_stdout template=jinja "ls -l /tmp/{{grains.id}} | awk '/foo/{print \\$2}'"
A string of standard input can be specified for the command to be run using
the ``stdin`` parameter. This can be useful in cases where sensitive
information must be read from standard input.
.. code-block:: bash
salt '*' cmd.run_stdout "grep f" stdin='one\\ntwo\\nthree\\nfour\\nfive\\n'
"""
python_shell = _python_shell_default(python_shell, kwargs.get("__pub_jid", ""))
ret = _run(
cmd,
runas=runas,
group=group,
cwd=cwd,
stdin=stdin,
shell=shell,
python_shell=python_shell,
env=env,
clean_env=clean_env,
prepend_path=prepend_path,
template=template,
rstrip=rstrip,
umask=umask,
output_encoding=output_encoding,
output_loglevel=output_loglevel,
log_callback=log_callback,
timeout=timeout,
reset_system_locale=reset_system_locale,
ignore_retcode=ignore_retcode,
saltenv=saltenv,
use_vt=use_vt,
password=password,
success_retcodes=success_retcodes,
success_stdout=success_stdout,
success_stderr=success_stderr,
**kwargs
)
return ret["stdout"] if not hide_output else ""
def run_stderr(
cmd,
cwd=None,
stdin=None,
runas=None,
group=None,
shell=DEFAULT_SHELL,
python_shell=None,
env=None,
clean_env=False,
template=None,
rstrip=True,
umask=None,
output_encoding=None,
output_loglevel="debug",
log_callback=None,
hide_output=False,
timeout=None,
reset_system_locale=True,
ignore_retcode=False,
saltenv=None,
use_vt=False,
password=None,
prepend_path=None,
success_retcodes=None,
success_stdout=None,
success_stderr=None,
**kwargs
):
"""
Execute a command and only return the standard error
:param str cmd: The command to run. ex: ``ls -lart /home``
:param str cwd: The directory from which to execute the command. Defaults
to the home directory of the user specified by ``runas`` (or the user
under which Salt is running if ``runas`` is not specified).
:param str stdin: A string of standard input can be specified for the
command to be run using the ``stdin`` parameter. This can be useful in
cases where sensitive information must be read from standard input.
:param str runas: Specify an alternate user to run the command. The default
behavior is to run as the user under which Salt is running. If running
on a Windows minion you must also use the ``password`` argument, and
the target user account must be in the Administrators group.
.. warning::
For versions 2018.3.3 and above on macosx while using runas,
to pass special characters to the command you need to escape
the characters on the shell.
Example:
.. code-block:: bash
cmd.run_stderr 'echo '\\''h=\\"baz\\"'\\''' runas=macuser
:param str password: Windows only. Required when specifying ``runas``. This
parameter will be ignored on non-Windows platforms.
.. versionadded:: 2016.3.0
:param str group: Group to run command as. Not currently supported
on Windows.
:param str shell: Specify an alternate shell. Defaults to the system's
default shell.
:param bool python_shell: If False, let python handle the positional
arguments. Set to True to use shell features, such as pipes or
redirection.
:param dict env: Environment variables to be set prior to execution.
.. note::
When passing environment variables on the CLI, they should be
passed as the string representation of a dictionary.
.. code-block:: bash
salt myminion cmd.run_stderr 'some command' env='{"FOO": "bar"}'
.. note::
When using environment variables on Window's, case-sensitivity
matters, i.e. Window's uses `Path` as opposed to `PATH` for other
systems.
:param bool clean_env: Attempt to clean out all other shell environment
variables and set only those provided in the 'env' argument to this
function.
:param str prepend_path: $PATH segment to prepend (trailing ':' not
necessary) to $PATH
.. versionadded:: 2018.3.0
:param str template: If this setting is applied then the named templating
engine will be used to render the downloaded file. Currently jinja,
mako, and wempy are supported.
:param bool rstrip: Strip all whitespace off the end of output before it is
returned.
:param str umask: The umask (in octal) to use when running the command.
:param str output_encoding: Control the encoding used to decode the
command's output.
.. note::
This should not need to be used in most cases. By default, Salt
will try to use the encoding detected from the system locale, and
will fall back to UTF-8 if this fails. This should only need to be
used in cases where the output of the command is encoded in
something other than the system locale or UTF-8.
To see the encoding Salt has detected from the system locale, check
the `locale` line in the output of :py:func:`test.versions_report
<salt.modules.test.versions_report>`.
.. versionadded:: 2018.3.0
:param str output_loglevel: Control the loglevel at which the output from
the command is logged to the minion log.
.. note::
The command being run will still be logged at the ``debug``
loglevel regardless, unless ``quiet`` is used for this value.
:param bool ignore_retcode: If the exit code of the command is nonzero,
this is treated as an error condition, and the output from the command
will be logged to the minion log. However, there are some cases where
programs use the return code for signaling and a nonzero exit code
doesn't necessarily mean failure. Pass this argument as ``True`` to
skip logging the output if the command has a nonzero exit code.
:param bool hide_output: If ``True``, suppress stdout and stderr in the
return data.
.. note::
This is separate from ``output_loglevel``, which only handles how
Salt logs to the minion log.
.. versionadded:: 2018.3.0
:param int timeout: A timeout in seconds for the executed process to
return.
:param bool use_vt: Use VT utils (saltstack) to stream the command output
more interactively to the console and the logs. This is experimental.
:param list success_retcodes: This parameter will allow a list of
non-zero return codes that should be considered a success. If the
return code returned from the run matches any in the provided list,
the return code will be overridden with zero.
.. versionadded:: 2019.2.0
:param list success_stdout: This parameter will allow a list of
strings that when found in standard out should be considered a success.
If stdout returned from the run matches any in the provided list,
the return code will be overridden with zero.
.. versionadded:: 3004
:param list success_stderr: This parameter will allow a list of
strings that when found in standard error should be considered a success.
If stderr returned from the run matches any in the provided list,
the return code will be overridden with zero.
.. versionadded:: 3004
:param bool stdin_raw_newlines: False
If ``True``, Salt will not automatically convert the characters ``\\n``
present in the ``stdin`` value to newlines.
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt '*' cmd.run_stderr "ls -l | awk '/foo/{print \\$2}'"
The template arg can be set to 'jinja' or another supported template
engine to render the command arguments before execution.
For example:
.. code-block:: bash
salt '*' cmd.run_stderr template=jinja "ls -l /tmp/{{grains.id}} | awk '/foo/{print \\$2}'"
A string of standard input can be specified for the command to be run using
the ``stdin`` parameter. This can be useful in cases where sensitive
information must be read from standard input.
.. code-block:: bash
salt '*' cmd.run_stderr "grep f" stdin='one\\ntwo\\nthree\\nfour\\nfive\\n'
"""
python_shell = _python_shell_default(python_shell, kwargs.get("__pub_jid", ""))
ret = _run(
cmd,
runas=runas,
group=group,
cwd=cwd,
stdin=stdin,
shell=shell,
python_shell=python_shell,
env=env,
clean_env=clean_env,
prepend_path=prepend_path,
template=template,
rstrip=rstrip,
umask=umask,
output_encoding=output_encoding,
output_loglevel=output_loglevel,
log_callback=log_callback,
timeout=timeout,
reset_system_locale=reset_system_locale,
ignore_retcode=ignore_retcode,
use_vt=use_vt,
saltenv=saltenv,
password=password,
success_retcodes=success_retcodes,
success_stdout=success_stdout,
success_stderr=success_stderr,
**kwargs
)
return ret["stderr"] if not hide_output else ""
def run_all(
cmd,
cwd=None,
stdin=None,
runas=None,
group=None,
shell=DEFAULT_SHELL,
python_shell=None,
env=None,
clean_env=False,
template=None,
rstrip=True,
umask=None,
output_encoding=None,
output_loglevel="debug",
log_callback=None,
hide_output=False,
timeout=None,
reset_system_locale=True,
ignore_retcode=False,
saltenv=None,
use_vt=False,
redirect_stderr=False,
password=None,
encoded_cmd=False,
prepend_path=None,
success_retcodes=None,
success_stdout=None,
success_stderr=None,
**kwargs
):
"""
Execute the passed command and return a dict of return data
:param str cmd: The command to run. ex: ``ls -lart /home``
:param str cwd: The directory from which to execute the command. Defaults
to the home directory of the user specified by ``runas`` (or the user
under which Salt is running if ``runas`` is not specified).
:param str stdin: A string of standard input can be specified for the
command to be run using the ``stdin`` parameter. This can be useful in
cases where sensitive information must be read from standard input.
:param str runas: Specify an alternate user to run the command. The default
behavior is to run as the user under which Salt is running. If running
on a Windows minion you must also use the ``password`` argument, and
the target user account must be in the Administrators group.
.. warning::
For versions 2018.3.3 and above on macosx while using runas,
to pass special characters to the command you need to escape
the characters on the shell.
Example:
.. code-block:: bash
cmd.run_all 'echo '\\''h=\\"baz\\"'\\''' runas=macuser
:param str password: Windows only. Required when specifying ``runas``. This
parameter will be ignored on non-Windows platforms.
.. versionadded:: 2016.3.0
:param str group: Group to run command as. Not currently supported
on Windows.
:param str shell: Specify an alternate shell. Defaults to the system's
default shell.
:param bool python_shell: If False, let python handle the positional
arguments. Set to True to use shell features, such as pipes or
redirection.
:param dict env: Environment variables to be set prior to execution.
.. note::
When passing environment variables on the CLI, they should be
passed as the string representation of a dictionary.
.. code-block:: bash
salt myminion cmd.run_all 'some command' env='{"FOO": "bar"}'
.. note::
When using environment variables on Window's, case-sensitivity
matters, i.e. Window's uses `Path` as opposed to `PATH` for other
systems.
:param bool clean_env: Attempt to clean out all other shell environment
variables and set only those provided in the 'env' argument to this
function.
:param str prepend_path: $PATH segment to prepend (trailing ':' not
necessary) to $PATH
.. versionadded:: 2018.3.0
:param str template: If this setting is applied then the named templating
engine will be used to render the downloaded file. Currently jinja,
mako, and wempy are supported.
:param bool rstrip: Strip all whitespace off the end of output before it is
returned.
:param str umask: The umask (in octal) to use when running the command.
:param str output_encoding: Control the encoding used to decode the
command's output.
.. note::
This should not need to be used in most cases. By default, Salt
will try to use the encoding detected from the system locale, and
will fall back to UTF-8 if this fails. This should only need to be
used in cases where the output of the command is encoded in
something other than the system locale or UTF-8.
To see the encoding Salt has detected from the system locale, check
the `locale` line in the output of :py:func:`test.versions_report
<salt.modules.test.versions_report>`.
.. versionadded:: 2018.3.0
:param str output_loglevel: Control the loglevel at which the output from
the command is logged to the minion log.
.. note::
The command being run will still be logged at the ``debug``
loglevel regardless, unless ``quiet`` is used for this value.
:param bool ignore_retcode: If the exit code of the command is nonzero,
this is treated as an error condition, and the output from the command
will be logged to the minion log. However, there are some cases where
programs use the return code for signaling and a nonzero exit code
doesn't necessarily mean failure. Pass this argument as ``True`` to
skip logging the output if the command has a nonzero exit code.
:param bool hide_output: If ``True``, suppress stdout and stderr in the
return data.
.. note::
This is separate from ``output_loglevel``, which only handles how
Salt logs to the minion log.
.. versionadded:: 2018.3.0
:param int timeout: A timeout in seconds for the executed process to
return.
:param bool use_vt: Use VT utils (saltstack) to stream the command output
more interactively to the console and the logs. This is experimental.
:param bool encoded_cmd: Specify if the supplied command is encoded.
Only applies to shell 'powershell' and 'pwsh'.
.. versionadded:: 2018.3.0
Older versions of powershell seem to return raw xml data in the return.
To avoid raw xml data in the return, prepend your command with the
following before encoding:
`$ProgressPreference='SilentlyContinue'; <your command>`
The following powershell code block will encode the `Write-Output`
command so that it will not have the raw xml data in the return:
.. code-block:: powershell
# target string
$Command = '$ProgressPreference="SilentlyContinue"; Write-Output "hello"'
# Convert to Base64 encoded string
$Encoded = [convert]::ToBase64String([System.Text.encoding]::Unicode.GetBytes($command))
Write-Output $Encoded
:param bool redirect_stderr: If set to ``True``, then stderr will be
redirected to stdout. This is helpful for cases where obtaining both
the retcode and output is desired, but it is not desired to have the
output separated into both stdout and stderr.
.. versionadded:: 2015.8.2
:param str password: Windows only. Required when specifying ``runas``. This
parameter will be ignored on non-Windows platforms.
.. versionadded:: 2016.3.0
:param bool bg: If ``True``, run command in background and do not await or
deliver its results
.. versionadded:: 2016.3.6
:param list success_retcodes: This parameter will allow a list of
non-zero return codes that should be considered a success. If the
return code returned from the run matches any in the provided list,
the return code will be overridden with zero.
.. versionadded:: 2019.2.0
:param list success_stdout: This parameter will allow a list of
strings that when found in standard out should be considered a success.
If stdout returned from the run matches any in the provided list,
the return code will be overridden with zero.
.. versionadded:: 3004
:param list success_stderr: This parameter will allow a list of
strings that when found in standard error should be considered a success.
If stderr returned from the run matches any in the provided list,
the return code will be overridden with zero.
.. versionadded:: 3004
:param bool stdin_raw_newlines: False
If ``True``, Salt will not automatically convert the characters ``\\n``
present in the ``stdin`` value to newlines.
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt '*' cmd.run_all "ls -l | awk '/foo/{print \\$2}'"
The template arg can be set to 'jinja' or another supported template
engine to render the command arguments before execution.
For example:
.. code-block:: bash
salt '*' cmd.run_all template=jinja "ls -l /tmp/{{grains.id}} | awk '/foo/{print \\$2}'"
A string of standard input can be specified for the command to be run using
the ``stdin`` parameter. This can be useful in cases where sensitive
information must be read from standard input.
.. code-block:: bash
salt '*' cmd.run_all "grep f" stdin='one\\ntwo\\nthree\\nfour\\nfive\\n'
"""
python_shell = _python_shell_default(python_shell, kwargs.get("__pub_jid", ""))
stderr = subprocess.STDOUT if redirect_stderr else subprocess.PIPE
ret = _run(
cmd,
runas=runas,
group=group,
cwd=cwd,
stdin=stdin,
stderr=stderr,
shell=shell,
python_shell=python_shell,
env=env,
clean_env=clean_env,
prepend_path=prepend_path,
template=template,
rstrip=rstrip,
umask=umask,
output_encoding=output_encoding,
output_loglevel=output_loglevel,
log_callback=log_callback,
timeout=timeout,
reset_system_locale=reset_system_locale,
ignore_retcode=ignore_retcode,
saltenv=saltenv,
use_vt=use_vt,
password=password,
encoded_cmd=encoded_cmd,
success_retcodes=success_retcodes,
success_stdout=success_stdout,
success_stderr=success_stderr,
**kwargs
)
if hide_output:
ret["stdout"] = ret["stderr"] = ""
return ret
def retcode(
cmd,
cwd=None,
stdin=None,
runas=None,
group=None,
shell=DEFAULT_SHELL,
python_shell=None,
env=None,
clean_env=False,
template=None,
umask=None,
output_encoding=None,
output_loglevel="debug",
log_callback=None,
timeout=None,
reset_system_locale=True,
ignore_retcode=False,
saltenv=None,
use_vt=False,
password=None,
success_retcodes=None,
success_stdout=None,
success_stderr=None,
**kwargs
):
"""
Execute a shell command and return the command's return code.
:param str cmd: The command to run. ex: ``ls -lart /home``
:param str cwd: The directory from which to execute the command. Defaults
to the home directory of the user specified by ``runas`` (or the user
under which Salt is running if ``runas`` is not specified).
:param str stdin: A string of standard input can be specified for the
command to be run using the ``stdin`` parameter. This can be useful in
cases where sensitive information must be read from standard input.
:param str runas: Specify an alternate user to run the command. The default
behavior is to run as the user under which Salt is running. If running
on a Windows minion you must also use the ``password`` argument, and
the target user account must be in the Administrators group.
.. warning::
For versions 2018.3.3 and above on macosx while using runas,
to pass special characters to the command you need to escape
the characters on the shell.
Example:
.. code-block:: bash
cmd.retcode 'echo '\\''h=\\"baz\\"'\\''' runas=macuser
:param str password: Windows only. Required when specifying ``runas``. This
parameter will be ignored on non-Windows platforms.
.. versionadded:: 2016.3.0
:param str group: Group to run command as. Not currently supported
on Windows.
:param str shell: Specify an alternate shell. Defaults to the system's
default shell.
:param bool python_shell: If False, let python handle the positional
arguments. Set to True to use shell features, such as pipes or
redirection.
:param dict env: Environment variables to be set prior to execution.
.. note::
When passing environment variables on the CLI, they should be
passed as the string representation of a dictionary.
.. code-block:: bash
salt myminion cmd.retcode 'some command' env='{"FOO": "bar"}'
.. note::
When using environment variables on Window's, case-sensitivity
matters, i.e. Window's uses `Path` as opposed to `PATH` for other
systems.
:param bool clean_env: Attempt to clean out all other shell environment
variables and set only those provided in the 'env' argument to this
function.
:param str template: If this setting is applied then the named templating
engine will be used to render the downloaded file. Currently jinja,
mako, and wempy are supported.
:param bool rstrip: Strip all whitespace off the end of output before it is
returned.
:param str umask: The umask (in octal) to use when running the command.
:param str output_encoding: Control the encoding used to decode the
command's output.
.. note::
This should not need to be used in most cases. By default, Salt
will try to use the encoding detected from the system locale, and
will fall back to UTF-8 if this fails. This should only need to be
used in cases where the output of the command is encoded in
something other than the system locale or UTF-8.
To see the encoding Salt has detected from the system locale, check
the `locale` line in the output of :py:func:`test.versions_report
<salt.modules.test.versions_report>`.
.. versionadded:: 2018.3.0
:param str output_loglevel: Control the loglevel at which the output from
the command is logged to the minion log.
.. note::
The command being run will still be logged at the ``debug``
loglevel regardless, unless ``quiet`` is used for this value.
:param bool ignore_retcode: If the exit code of the command is nonzero,
this is treated as an error condition, and the output from the command
will be logged to the minion log. However, there are some cases where
programs use the return code for signaling and a nonzero exit code
doesn't necessarily mean failure. Pass this argument as ``True`` to
skip logging the output if the command has a nonzero exit code.
:param int timeout: A timeout in seconds for the executed process to return.
:param bool use_vt: Use VT utils (saltstack) to stream the command output
more interactively to the console and the logs. This is experimental.
:rtype: int
:rtype: None
:returns: Return Code as an int or None if there was an exception.
:param list success_retcodes: This parameter will allow a list of
non-zero return codes that should be considered a success. If the
return code returned from the run matches any in the provided list,
the return code will be overridden with zero.
.. versionadded:: 2019.2.0
:param list success_stdout: This parameter will allow a list of
strings that when found in standard out should be considered a success.
If stdout returned from the run matches any in the provided list,
the return code will be overridden with zero.
.. versionadded:: 3004
:param list success_stderr: This parameter will allow a list of
strings that when found in standard error should be considered a success.
If stderr returned from the run matches any in the provided list,
the return code will be overridden with zero.
.. versionadded:: 3004
:param bool stdin_raw_newlines: False
If ``True``, Salt will not automatically convert the characters ``\\n``
present in the ``stdin`` value to newlines.
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt '*' cmd.retcode "file /bin/bash"
The template arg can be set to 'jinja' or another supported template
engine to render the command arguments before execution.
For example:
.. code-block:: bash
salt '*' cmd.retcode template=jinja "file {{grains.pythonpath[0]}}/python"
A string of standard input can be specified for the command to be run using
the ``stdin`` parameter. This can be useful in cases where sensitive
information must be read from standard input.
.. code-block:: bash
salt '*' cmd.retcode "grep f" stdin='one\\ntwo\\nthree\\nfour\\nfive\\n'
"""
python_shell = _python_shell_default(python_shell, kwargs.get("__pub_jid", ""))
ret = _run(
cmd,
runas=runas,
group=group,
cwd=cwd,
stdin=stdin,
stderr=subprocess.STDOUT,
shell=shell,
python_shell=python_shell,
env=env,
clean_env=clean_env,
template=template,
umask=umask,
output_encoding=output_encoding,
output_loglevel=output_loglevel,
log_callback=log_callback,
timeout=timeout,
reset_system_locale=reset_system_locale,
ignore_retcode=ignore_retcode,
saltenv=saltenv,
use_vt=use_vt,
password=password,
success_retcodes=success_retcodes,
success_stdout=success_stdout,
success_stderr=success_stderr,
**kwargs
)
return ret["retcode"]
def _retcode_quiet(
cmd,
cwd=None,
stdin=None,
runas=None,
group=None,
shell=DEFAULT_SHELL,
python_shell=False,
env=None,
clean_env=False,
template=None,
umask=None,
output_encoding=None,
log_callback=None,
timeout=None,
reset_system_locale=True,
ignore_retcode=False,
saltenv=None,
use_vt=False,
password=None,
success_retcodes=None,
success_stdout=None,
success_stderr=None,
**kwargs
):
"""
Helper for running commands quietly for minion startup. Returns same as
the retcode() function.
"""
return retcode(
cmd,
cwd=cwd,
stdin=stdin,
runas=runas,
group=group,
shell=shell,
python_shell=python_shell,
env=env,
clean_env=clean_env,
template=template,
umask=umask,
output_encoding=output_encoding,
output_loglevel="quiet",
log_callback=log_callback,
timeout=timeout,
reset_system_locale=reset_system_locale,
ignore_retcode=ignore_retcode,
saltenv=saltenv,
use_vt=use_vt,
password=password,
success_retcodes=success_retcodes,
success_stdout=success_stdout,
success_stderr=success_stderr,
**kwargs
)
def script(
source,
args=None,
cwd=None,
stdin=None,
runas=None,
group=None,
shell=DEFAULT_SHELL,
python_shell=None,
env=None,
template=None,
umask=None,
output_encoding=None,
output_loglevel="debug",
log_callback=None,
hide_output=False,
timeout=None,
reset_system_locale=True,
saltenv=None,
use_vt=False,
bg=False,
password=None,
success_retcodes=None,
success_stdout=None,
success_stderr=None,
**kwargs
):
"""
Download a script from a remote location and execute the script locally.
The script can be located on the salt master file server or on an HTTP/FTP
server.
The script will be executed directly, so it can be written in any available
programming language.
:param str source: The location of the script to download. If the file is
located on the master in the directory named spam, and is called eggs,
the source string is salt://spam/eggs
:param str args: String of command line args to pass to the script. Only
used if no args are specified as part of the `name` argument. To pass a
string containing spaces in YAML, you will need to doubly-quote it:
.. code-block:: bash
salt myminion cmd.script salt://foo.sh "arg1 'arg two' arg3"
:param str cwd: The directory from which to execute the command. Defaults
to the directory returned from Python's tempfile.mkstemp.
:param str stdin: A string of standard input can be specified for the
command to be run using the ``stdin`` parameter. This can be useful in
cases where sensitive information must be read from standard input.
:param str runas: Specify an alternate user to run the command. The default
behavior is to run as the user under which Salt is running. If running
on a Windows minion you must also use the ``password`` argument, and
the target user account must be in the Administrators group.
.. note::
For Window's users, specifically Server users, it may be necessary
to specify your runas user using the User Logon Name instead of the
legacy logon name. Traditionally, logons would be in the following
format.
``Domain/user``
In the event this causes issues when executing scripts, use the UPN
format which looks like the following.
``user@domain.local``
More information <https://github.com/saltstack/salt/issues/55080>
:param str password: Windows only. Required when specifying ``runas``. This
parameter will be ignored on non-Windows platforms.
.. versionadded:: 2016.3.0
:param str group: Group to run script as. Not currently supported
on Windows.
:param str shell: Specify an alternate shell. Defaults to the system's
default shell.
:param bool python_shell: If False, let python handle the positional
arguments. Set to True to use shell features, such as pipes or
redirection.
:param bool bg: If True, run script in background and do not await or
deliver its results
:param dict env: Environment variables to be set prior to execution.
.. note::
When passing environment variables on the CLI, they should be
passed as the string representation of a dictionary.
.. code-block:: bash
salt myminion cmd.script 'some command' env='{"FOO": "bar"}'
.. note::
When using environment variables on Window's, case-sensitivity
matters, i.e. Window's uses `Path` as opposed to `PATH` for other
systems.
:param str template: If this setting is applied then the named templating
engine will be used to render the downloaded file. Currently jinja,
mako, and wempy are supported.
:param str umask: The umask (in octal) to use when running the command.
:param str output_encoding: Control the encoding used to decode the
command's output.
.. note::
This should not need to be used in most cases. By default, Salt
will try to use the encoding detected from the system locale, and
will fall back to UTF-8 if this fails. This should only need to be
used in cases where the output of the command is encoded in
something other than the system locale or UTF-8.
To see the encoding Salt has detected from the system locale, check
the `locale` line in the output of :py:func:`test.versions_report
<salt.modules.test.versions_report>`.
.. versionadded:: 2018.3.0
:param str output_loglevel: Control the loglevel at which the output from
the command is logged to the minion log.
.. note::
The command being run will still be logged at the ``debug``
loglevel regardless, unless ``quiet`` is used for this value.
:param bool ignore_retcode: If the exit code of the command is nonzero,
this is treated as an error condition, and the output from the command
will be logged to the minion log. However, there are some cases where
programs use the return code for signaling and a nonzero exit code
doesn't necessarily mean failure. Pass this argument as ``True`` to
skip logging the output if the command has a nonzero exit code.
:param bool hide_output: If ``True``, suppress stdout and stderr in the
return data.
.. note::
This is separate from ``output_loglevel``, which only handles how
Salt logs to the minion log.
.. versionadded:: 2018.3.0
:param int timeout: If the command has not terminated after timeout
seconds, send the subprocess sigterm, and if sigterm is ignored, follow
up with sigkill
:param bool use_vt: Use VT utils (saltstack) to stream the command output
more interactively to the console and the logs. This is experimental.
:param list success_retcodes: This parameter will allow a list of
non-zero return codes that should be considered a success. If the
return code returned from the run matches any in the provided list,
the return code will be overridden with zero.
.. versionadded:: 2019.2.0
:param list success_stdout: This parameter will allow a list of
strings that when found in standard out should be considered a success.
If stdout returned from the run matches any in the provided list,
the return code will be overridden with zero.
.. versionadded:: 3004
:param list success_stderr: This parameter will allow a list of
strings that when found in standard error should be considered a success.
If stderr returned from the run matches any in the provided list,
the return code will be overridden with zero.
.. versionadded:: 3004
:param bool stdin_raw_newlines: False
If ``True``, Salt will not automatically convert the characters ``\\n``
present in the ``stdin`` value to newlines.
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt '*' cmd.script salt://scripts/runme.sh
salt '*' cmd.script salt://scripts/runme.sh 'arg1 arg2 "arg 3"'
salt '*' cmd.script salt://scripts/windows_task.ps1 args=' -Input c:\\tmp\\infile.txt' shell='powershell'
.. code-block:: bash
salt '*' cmd.script salt://scripts/runme.sh stdin='one\\ntwo\\nthree\\nfour\\nfive\\n'
"""
if saltenv is None:
try:
saltenv = __opts__.get("saltenv", "base")
except NameError:
saltenv = "base"
python_shell = _python_shell_default(python_shell, kwargs.get("__pub_jid", ""))
def _cleanup_tempfile(path):
try:
__salt__["file.remove"](path)
except (SaltInvocationError, CommandExecutionError) as exc:
log.error(
"cmd.script: Unable to clean tempfile '%s': %s",
path,
exc,
exc_info_on_loglevel=logging.DEBUG,
)
if "__env__" in kwargs:
# "env" is not supported; Use "saltenv".
kwargs.pop("__env__")
win_cwd = False
if salt.utils.platform.is_windows() and runas and cwd is None:
# Create a temp working directory
cwd = tempfile.mkdtemp(dir=__opts__["cachedir"])
win_cwd = True
salt.utils.win_dacl.set_permissions(
obj_name=cwd, principal=runas, permissions="full_control"
)
path = salt.utils.files.mkstemp(
dir=cwd, suffix=os.path.splitext(salt.utils.url.split_env(source)[0])[1]
)
if template:
if "pillarenv" in kwargs or "pillar" in kwargs:
pillarenv = kwargs.get("pillarenv", __opts__.get("pillarenv"))
kwargs["pillar"] = _gather_pillar(pillarenv, kwargs.get("pillar"))
fn_ = __salt__["cp.get_template"](source, path, template, saltenv, **kwargs)
if not fn_:
_cleanup_tempfile(path)
# If a temp working directory was created (Windows), let's remove that
if win_cwd:
_cleanup_tempfile(cwd)
return {
"pid": 0,
"retcode": 1,
"stdout": "",
"stderr": "",
"cache_error": True,
}
else:
fn_ = __salt__["cp.cache_file"](source, saltenv)
if not fn_:
_cleanup_tempfile(path)
# If a temp working directory was created (Windows), let's remove that
if win_cwd:
_cleanup_tempfile(cwd)
return {
"pid": 0,
"retcode": 1,
"stdout": "",
"stderr": "",
"cache_error": True,
}
shutil.copyfile(fn_, path)
if not salt.utils.platform.is_windows():
os.chmod(path, 320)
os.chown(path, __salt__["file.user_to_uid"](runas), -1)
if salt.utils.platform.is_windows() and shell.lower() != "powershell":
cmd_path = _cmd_quote(path, escape=False)
else:
cmd_path = _cmd_quote(path)
ret = _run(
cmd_path + " " + str(args) if args else cmd_path,
cwd=cwd,
stdin=stdin,
output_encoding=output_encoding,
output_loglevel=output_loglevel,
log_callback=log_callback,
runas=runas,
group=group,
shell=shell,
python_shell=python_shell,
env=env,
umask=umask,
timeout=timeout,
reset_system_locale=reset_system_locale,
saltenv=saltenv,
use_vt=use_vt,
bg=bg,
password=password,
success_retcodes=success_retcodes,
success_stdout=success_stdout,
success_stderr=success_stderr,
**kwargs
)
_cleanup_tempfile(path)
# If a temp working directory was created (Windows), let's remove that
if win_cwd:
_cleanup_tempfile(cwd)
if hide_output:
ret["stdout"] = ret["stderr"] = ""
return ret
def script_retcode(
source,
args=None,
cwd=None,
stdin=None,
runas=None,
group=None,
shell=DEFAULT_SHELL,
python_shell=None,
env=None,
template="jinja",
umask=None,
timeout=None,
reset_system_locale=True,
saltenv=None,
output_encoding=None,
output_loglevel="debug",
log_callback=None,
use_vt=False,
password=None,
success_retcodes=None,
success_stdout=None,
success_stderr=None,
**kwargs
):
"""
Download a script from a remote location and execute the script locally.
The script can be located on the salt master file server or on an HTTP/FTP
server.
The script will be executed directly, so it can be written in any available
programming language.
The script can also be formatted as a template, the default is jinja.
Only evaluate the script return code and do not block for terminal output
:param str source: The location of the script to download. If the file is
located on the master in the directory named spam, and is called eggs,
the source string is salt://spam/eggs
:param str args: String of command line args to pass to the script. Only
used if no args are specified as part of the `name` argument. To pass a
string containing spaces in YAML, you will need to doubly-quote it:
"arg1 'arg two' arg3"
:param str cwd: The directory from which to execute the command. Defaults
to the home directory of the user specified by ``runas`` (or the user
under which Salt is running if ``runas`` is not specified).
:param str stdin: A string of standard input can be specified for the
command to be run using the ``stdin`` parameter. This can be useful in
cases where sensitive information must be read from standard input.
:param str runas: Specify an alternate user to run the command. The default
behavior is to run as the user under which Salt is running. If running
on a Windows minion you must also use the ``password`` argument, and
the target user account must be in the Administrators group.
:param str password: Windows only. Required when specifying ``runas``. This
parameter will be ignored on non-Windows platforms.
.. versionadded:: 2016.3.0
:param str group: Group to run script as. Not currently supported
on Windows.
:param str shell: Specify an alternate shell. Defaults to the system's
default shell.
:param bool python_shell: If False, let python handle the positional
arguments. Set to True to use shell features, such as pipes or
redirection.
:param dict env: Environment variables to be set prior to execution.
.. note::
When passing environment variables on the CLI, they should be
passed as the string representation of a dictionary.
.. code-block:: bash
salt myminion cmd.script_retcode 'some command' env='{"FOO": "bar"}'
.. note::
When using environment variables on Window's, case-sensitivity
matters, i.e. Window's uses `Path` as opposed to `PATH` for other
systems.
:param str template: If this setting is applied then the named templating
engine will be used to render the downloaded file. Currently jinja,
mako, and wempy are supported.
:param str umask: The umask (in octal) to use when running the command.
:param str output_encoding: Control the encoding used to decode the
command's output.
.. note::
This should not need to be used in most cases. By default, Salt
will try to use the encoding detected from the system locale, and
will fall back to UTF-8 if this fails. This should only need to be
used in cases where the output of the command is encoded in
something other than the system locale or UTF-8.
To see the encoding Salt has detected from the system locale, check
the `locale` line in the output of :py:func:`test.versions_report
<salt.modules.test.versions_report>`.
.. versionadded:: 2018.3.0
:param str output_loglevel: Control the loglevel at which the output from
the command is logged to the minion log.
.. note::
The command being run will still be logged at the ``debug``
loglevel regardless, unless ``quiet`` is used for this value.
:param bool ignore_retcode: If the exit code of the command is nonzero,
this is treated as an error condition, and the output from the command
will be logged to the minion log. However, there are some cases where
programs use the return code for signaling and a nonzero exit code
doesn't necessarily mean failure. Pass this argument as ``True`` to
skip logging the output if the command has a nonzero exit code.
:param int timeout: If the command has not terminated after timeout
seconds, send the subprocess sigterm, and if sigterm is ignored, follow
up with sigkill
:param bool use_vt: Use VT utils (saltstack) to stream the command output
more interactively to the console and the logs. This is experimental.
:param list success_retcodes: This parameter will allow a list of
non-zero return codes that should be considered a success. If the
return code returned from the run matches any in the provided list,
the return code will be overridden with zero.
.. versionadded:: 2019.2.0
:param list success_stdout: This parameter will allow a list of
strings that when found in standard out should be considered a success.
If stdout returned from the run matches any in the provided list,
the return code will be overridden with zero.
.. versionadded:: 3004
:param list success_stderr: This parameter will allow a list of
strings that when found in standard error should be considered a success.
If stderr returned from the run matches any in the provided list,
the return code will be overridden with zero.
.. versionadded:: 3004
:param bool stdin_raw_newlines: False
If ``True``, Salt will not automatically convert the characters ``\\n``
present in the ``stdin`` value to newlines.
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt '*' cmd.script_retcode salt://scripts/runme.sh
salt '*' cmd.script_retcode salt://scripts/runme.sh 'arg1 arg2 "arg 3"'
salt '*' cmd.script_retcode salt://scripts/windows_task.ps1 args=' -Input c:\\tmp\\infile.txt' shell='powershell'
A string of standard input can be specified for the command to be run using
the ``stdin`` parameter. This can be useful in cases where sensitive
information must be read from standard input.
.. code-block:: bash
salt '*' cmd.script_retcode salt://scripts/runme.sh stdin='one\\ntwo\\nthree\\nfour\\nfive\\n'
"""
if "__env__" in kwargs:
# "env" is not supported; Use "saltenv".
kwargs.pop("__env__")
return script(
source=source,
args=args,
cwd=cwd,
stdin=stdin,
runas=runas,
group=group,
shell=shell,
python_shell=python_shell,
env=env,
template=template,
umask=umask,
timeout=timeout,
reset_system_locale=reset_system_locale,
saltenv=saltenv,
output_encoding=output_encoding,
output_loglevel=output_loglevel,
log_callback=log_callback,
use_vt=use_vt,
password=password,
success_retcodes=success_retcodes,
success_stdout=success_stdout,
success_stderr=success_stderr,
**kwargs
)["retcode"]
def which(cmd):
"""
Returns the path of an executable available on the minion, None otherwise
CLI Example:
.. code-block:: bash
salt '*' cmd.which cat
"""
return salt.utils.path.which(cmd)
def which_bin(cmds):
"""
Returns the first command found in a list of commands
CLI Example:
.. code-block:: bash
salt '*' cmd.which_bin '[pip2, pip, pip-python]'
"""
return salt.utils.path.which_bin(cmds)
def has_exec(cmd):
"""
Returns true if the executable is available on the minion, false otherwise
CLI Example:
.. code-block:: bash
salt '*' cmd.has_exec cat
"""
return which(cmd) is not None
def exec_code(lang, code, cwd=None, args=None, **kwargs):
"""
Pass in two strings, the first naming the executable language, aka -
python2, python3, ruby, perl, lua, etc. the second string containing
the code you wish to execute. The stdout will be returned.
All parameters from :mod:`cmd.run_all <salt.modules.cmdmod.run_all>` except python_shell can be used.
CLI Example:
.. code-block:: bash
salt '*' cmd.exec_code ruby 'puts "cheese"'
salt '*' cmd.exec_code ruby 'puts "cheese"' args='["arg1", "arg2"]' env='{"FOO": "bar"}'
"""
return exec_code_all(lang, code, cwd, args, **kwargs)["stdout"]
def exec_code_all(lang, code, cwd=None, args=None, **kwargs):
"""
Pass in two strings, the first naming the executable language, aka -
python2, python3, ruby, perl, lua, etc. the second string containing
the code you wish to execute. All cmd artifacts (stdout, stderr, retcode, pid)
will be returned.
All parameters from :mod:`cmd.run_all <salt.modules.cmdmod.run_all>` except python_shell can be used.
CLI Example:
.. code-block:: bash
salt '*' cmd.exec_code_all ruby 'puts "cheese"'
salt '*' cmd.exec_code_all ruby 'puts "cheese"' args='["arg1", "arg2"]' env='{"FOO": "bar"}'
"""
powershell = lang.lower().startswith("powershell")
if powershell:
codefile = salt.utils.files.mkstemp(suffix=".ps1")
else:
codefile = salt.utils.files.mkstemp()
with salt.utils.files.fopen(codefile, "w+t", binary=False) as fp_:
fp_.write(salt.utils.stringutils.to_str(code))
if powershell:
cmd = [lang, "-File", codefile]
else:
cmd = [lang, codefile]
if isinstance(args, str):
cmd.append(args)
elif isinstance(args, list):
cmd += args
def _cleanup_tempfile(path):
try:
__salt__["file.remove"](path)
except (SaltInvocationError, CommandExecutionError) as exc:
log.error(
"cmd.exec_code_all: Unable to clean tempfile '%s': %s",
path,
exc,
exc_info_on_loglevel=logging.DEBUG,
)
runas = kwargs.get("runas")
if runas is not None:
if not salt.utils.platform.is_windows():
os.chown(codefile, __salt__["file.user_to_uid"](runas), -1)
ret = run_all(cmd, cwd=cwd, python_shell=False, **kwargs)
_cleanup_tempfile(codefile)
return ret
def tty(device, echo=""):
"""
Echo a string to a specific tty
CLI Example:
.. code-block:: bash
salt '*' cmd.tty tty0 'This is a test'
salt '*' cmd.tty pts3 'This is a test'
"""
if device.startswith("tty"):
teletype = "/dev/{}".format(device)
elif device.startswith("pts"):
teletype = "/dev/{}".format(device.replace("pts", "pts/"))
else:
return {"Error": "The specified device is not a valid TTY"}
try:
with salt.utils.files.fopen(teletype, "wb") as tty_device:
tty_device.write(salt.utils.stringutils.to_bytes(echo))
return {"Success": "Message was successfully echoed to {}".format(teletype)}
except OSError:
return {"Error": "Echoing to {} returned error".format(teletype)}
def run_chroot(
root,
cmd,
cwd=None,
stdin=None,
runas=None,
group=None,
shell=DEFAULT_SHELL,
python_shell=True,
binds=None,
env=None,
clean_env=False,
template=None,
rstrip=True,
umask=None,
output_encoding=None,
output_loglevel="quiet",
log_callback=None,
hide_output=False,
timeout=None,
reset_system_locale=True,
ignore_retcode=False,
saltenv=None,
use_vt=False,
bg=False,
success_retcodes=None,
success_stdout=None,
success_stderr=None,
**kwargs
):
"""
.. versionadded:: 2014.7.0
This function runs :mod:`cmd.run_all <salt.modules.cmdmod.run_all>` wrapped
within a chroot, with dev and proc mounted in the chroot
:param str root: Path to the root of the jail to use.
:param str stdin: A string of standard input can be specified for
the command to be run using the ``stdin`` parameter. This can
be useful in cases where sensitive information must be read
from standard input.:
:param str runas: User to run script as.
:param str group: Group to run script as.
:param str shell: Shell to execute under. Defaults to the system
default shell.
:param str cmd: The command to run. ex: ``ls -lart /home``
:param str cwd: The directory from which to execute the command. Defaults
to the home directory of the user specified by ``runas`` (or the user
under which Salt is running if ``runas`` is not specified).
:parar str stdin: A string of standard input can be specified for the
command to be run using the ``stdin`` parameter. This can be useful in
cases where sensitive information must be read from standard input.
:param str runas: Specify an alternate user to run the command. The default
behavior is to run as the user under which Salt is running. If running
on a Windows minion you must also use the ``password`` argument, and
the target user account must be in the Administrators group.
:param str shell: Specify an alternate shell. Defaults to the system's
default shell.
:param bool python_shell: If False, let python handle the positional
arguments. Set to True to use shell features, such as pipes or
redirection.
:param list binds: List of directories that will be exported inside
the chroot with the bind option.
.. versionadded:: 3000
:param dict env: Environment variables to be set prior to execution.
.. note::
When passing environment variables on the CLI, they should be
passed as the string representation of a dictionary.
.. code-block:: bash
salt myminion cmd.run_chroot 'some command' env='{"FOO": "bar"}'
.. note::
When using environment variables on Window's, case-sensitivity
matters, i.e. Window's uses `Path` as opposed to `PATH` for other
systems.
:param dict clean_env: Attempt to clean out all other shell environment
variables and set only those provided in the 'env' argument to this
function.
:param str template: If this setting is applied then the named templating
engine will be used to render the downloaded file. Currently jinja,
mako, and wempy are supported.
:param bool rstrip: Strip all whitespace off the end of output
before it is returned.
:param str umask: The umask (in octal) to use when running the
command.
:param str output_encoding: Control the encoding used to decode the
command's output.
.. note::
This should not need to be used in most cases. By default, Salt
will try to use the encoding detected from the system locale, and
will fall back to UTF-8 if this fails. This should only need to be
used in cases where the output of the command is encoded in
something other than the system locale or UTF-8.
To see the encoding Salt has detected from the system locale, check
the `locale` line in the output of :py:func:`test.versions_report
<salt.modules.test.versions_report>`.
.. versionadded:: 2018.3.0
:param str output_loglevel: Control the loglevel at which the output from
the command is logged to the minion log.
.. note::
The command being run will still be logged at the ``debug``
loglevel regardless, unless ``quiet`` is used for this value.
:param bool ignore_retcode: If the exit code of the command is nonzero,
this is treated as an error condition, and the output from the command
will be logged to the minion log. However, there are some cases where
programs use the return code for signaling and a nonzero exit code
doesn't necessarily mean failure. Pass this argument as ``True`` to
skip logging the output if the command has a nonzero exit code.
:param bool hide_output: If ``True``, suppress stdout and stderr in the
return data.
.. note::
This is separate from ``output_loglevel``, which only handles how
Salt logs to the minion log.
.. versionadded:: 2018.3.0
:param int timeout:
A timeout in seconds for the executed process to return.
:param bool use_vt:
Use VT utils (saltstack) to stream the command output more
interactively to the console and the logs. This is experimental.
:param success_retcodes: This parameter will allow a list of
non-zero return codes that should be considered a success. If the
return code returned from the run matches any in the provided list,
the return code will be overridden with zero.
.. versionadded:: 2019.2.0
:param list success_stdout: This parameter will allow a list of
strings that when found in standard out should be considered a success.
If stdout returned from the run matches any in the provided list,
the return code will be overridden with zero.
.. versionadded:: 3004
:param list success_stderr: This parameter will allow a list of
strings that when found in standard error should be considered a success.
If stderr returned from the run matches any in the provided list,
the return code will be overridden with zero.
.. versionadded:: 3004
CLI Example:
.. code-block:: bash
salt '*' cmd.run_chroot /var/lib/lxc/container_name/rootfs 'sh /tmp/bootstrap.sh'
"""
__salt__["mount.mount"](os.path.join(root, "dev"), "devtmpfs", fstype="devtmpfs")
__salt__["mount.mount"](os.path.join(root, "proc"), "proc", fstype="proc")
__salt__["mount.mount"](os.path.join(root, "sys"), "sysfs", fstype="sysfs")
binds = binds if binds else []
for bind_exported in binds:
bind_exported_to = os.path.relpath(bind_exported, os.path.sep)
bind_exported_to = os.path.join(root, bind_exported_to)
__salt__["mount.mount"](bind_exported_to, bind_exported, opts="default,bind")
# Execute chroot routine
sh_ = "/bin/sh"
if os.path.isfile(os.path.join(root, "bin/bash")):
sh_ = "/bin/bash"
if isinstance(cmd, (list, tuple)):
cmd = " ".join([str(i) for i in cmd])
# If runas and group are provided, we expect that the user lives
# inside the chroot, not outside.
if runas:
userspec = "--userspec {}:{}".format(runas, group if group else "")
else:
userspec = ""
cmd = "chroot {} {} {} -c {}".format(userspec, root, sh_, _cmd_quote(cmd))
run_func = __context__.pop("cmd.run_chroot.func", run_all)
ret = run_func(
cmd,
cwd=cwd,
stdin=stdin,
shell=shell,
python_shell=python_shell,
env=env,
clean_env=clean_env,
template=template,
rstrip=rstrip,
umask=umask,
output_encoding=output_encoding,
output_loglevel=output_loglevel,
log_callback=log_callback,
timeout=timeout,
reset_system_locale=reset_system_locale,
ignore_retcode=ignore_retcode,
saltenv=saltenv,
pillarenv=kwargs.get("pillarenv"),
pillar=kwargs.get("pillar"),
use_vt=use_vt,
success_retcodes=success_retcodes,
success_stdout=success_stdout,
success_stderr=success_stderr,
bg=bg,
)
# Kill processes running in the chroot
for i in range(6):
pids = _chroot_pids(root)
if not pids:
break
for pid in pids:
# use sig 15 (TERM) for first 3 attempts, then 9 (KILL)
sig = 15 if i < 3 else 9
os.kill(pid, sig)
if _chroot_pids(root):
log.error(
"Processes running in chroot could not be killed, "
"filesystem will remain mounted"
)
for bind_exported in binds:
bind_exported_to = os.path.relpath(bind_exported, os.path.sep)
bind_exported_to = os.path.join(root, bind_exported_to)
__salt__["mount.umount"](bind_exported_to)
__salt__["mount.umount"](os.path.join(root, "sys"))
__salt__["mount.umount"](os.path.join(root, "proc"))
__salt__["mount.umount"](os.path.join(root, "dev"))
if hide_output:
ret["stdout"] = ret["stderr"] = ""
return ret
def _is_valid_shell(shell):
"""
Attempts to search for valid shells on a system and
see if a given shell is in the list
"""
if salt.utils.platform.is_windows():
return True # Don't even try this for Windows
shells = "/etc/shells"
available_shells = []
if os.path.exists(shells):
try:
with salt.utils.files.fopen(shells, "r") as shell_fp:
lines = [
salt.utils.stringutils.to_unicode(x)
for x in shell_fp.read().splitlines()
]
for line in lines:
if line.startswith("#"):
continue
else:
available_shells.append(line)
except OSError:
return True
else:
# No known method of determining available shells
return None
if shell in available_shells:
return True
else:
return False
def shells():
"""
Lists the valid shells on this system via the /etc/shells file
.. versionadded:: 2015.5.0
CLI Example:
.. code-block:: bash
salt '*' cmd.shells
"""
shells_fn = "/etc/shells"
ret = []
if os.path.exists(shells_fn):
try:
with salt.utils.files.fopen(shells_fn, "r") as shell_fp:
lines = [
salt.utils.stringutils.to_unicode(x)
for x in shell_fp.read().splitlines()
]
for line in lines:
line = line.strip()
if line.startswith("#"):
continue
elif not line:
continue
else:
ret.append(line)
except OSError:
log.error("File '%s' was not found", shells_fn)
return ret
def shell_info(shell, list_modules=False):
"""
.. versionadded:: 2016.11.0
Provides information about a shell or script languages which often use
``#!``. The values returned are dependent on the shell or scripting
languages all return the ``installed``, ``path``, ``version``,
``version_raw``
Args:
shell (str): Name of the shell. Support shells/script languages include
bash, cmd, perl, php, powershell, python, ruby and zsh
list_modules (bool): True to list modules available to the shell.
Currently only lists powershell modules.
Returns:
dict: A dictionary of information about the shell
.. code-block:: python
{'version': '<2 or 3 numeric components dot-separated>',
'version_raw': '<full version string>',
'path': '<full path to binary>',
'installed': <True, False or None>,
'<attribute>': '<attribute value>'}
.. note::
- ``installed`` is always returned, if ``None`` or ``False`` also
returns error and may also return ``stdout`` for diagnostics.
- ``version`` is for use in determine if a shell/script language has a
particular feature set, not for package management.
- The shell must be within the executable search path.
CLI Example:
.. code-block:: bash
salt '*' cmd.shell_info bash
salt '*' cmd.shell_info powershell
:codeauthor: Damon Atkins <https://github.com/damon-atkins>
"""
regex_shells = {
"bash": [r"version (\d\S*)", "bash", "--version"],
"bash-test-error": [
r"versioZ ([-\w.]+)",
"bash",
"--version",
], # used to test an error result
"bash-test-env": [
r"(HOME=.*)",
"bash",
"-c",
"declare",
], # used to test an error result
"zsh": [r"^zsh (\d\S*)", "zsh", "--version"],
"tcsh": [r"^tcsh (\d\S*)", "tcsh", "--version"],
"cmd": [r"Version ([\d.]+)", "cmd.exe", "/C", "ver"],
"powershell": [
r"PSVersion\s+(\d\S*)",
"powershell",
"-NonInteractive",
"$PSVersionTable",
],
"perl": [r"^(\d\S*)", "perl", "-e", 'printf "%vd\n", $^V;'],
"python": [r"^Python (\d\S*)", "python", "-V"],
"ruby": [r"^ruby (\d\S*)", "ruby", "-v"],
"php": [r"^PHP (\d\S*)", "php", "-v"],
}
# Ensure ret['installed'] always as a value of True, False or None (not sure)
ret = {"installed": False}
if salt.utils.platform.is_windows() and shell == "powershell":
pw_keys = salt.utils.win_reg.list_keys(
hive="HKEY_LOCAL_MACHINE", key="Software\\Microsoft\\PowerShell"
)
pw_keys.sort(key=int)
if not pw_keys:
return {
"error": (
"Unable to locate 'powershell' Reason: Cannot be found in registry."
),
"installed": False,
}
for reg_ver in pw_keys:
install_data = salt.utils.win_reg.read_value(
hive="HKEY_LOCAL_MACHINE",
key="Software\\Microsoft\\PowerShell\\{}".format(reg_ver),
vname="Install",
)
if (
install_data.get("vtype") == "REG_DWORD"
and install_data.get("vdata") == 1
):
details = salt.utils.win_reg.list_values(
hive="HKEY_LOCAL_MACHINE",
key="Software\\Microsoft\\PowerShell\\{}\\PowerShellEngine".format(
reg_ver
),
)
# reset data, want the newest version details only as powershell
# is backwards compatible
ret = {}
# if all goes well this will become True
ret["installed"] = None
ret["path"] = which("powershell.exe")
for attribute in details:
if attribute["vname"].lower() == "(default)":
continue
elif attribute["vname"].lower() == "powershellversion":
ret["psversion"] = attribute["vdata"]
ret["version_raw"] = attribute["vdata"]
elif attribute["vname"].lower() == "runtimeversion":
ret["crlversion"] = attribute["vdata"]
if ret["crlversion"][0].lower() == "v":
ret["crlversion"] = ret["crlversion"][1::]
elif attribute["vname"].lower() == "pscompatibleversion":
# reg attribute does not end in s, the powershell
# attribute does
ret["pscompatibleversions"] = (
attribute["vdata"].replace(" ", "").split(",")
)
else:
# keys are lower case as python is case sensitive the
# registry is not
ret[attribute["vname"].lower()] = attribute["vdata"]
else:
if shell not in regex_shells:
return {
"error": (
"Salt does not know how to get the version number for {}".format(
shell
)
),
"installed": None,
}
shell_data = regex_shells[shell]
pattern = shell_data.pop(0)
# We need to make sure HOME set, so shells work correctly
# salt-call will general have home set, the salt-minion service may not
# We need to assume ports of unix shells to windows will look after
# themselves in setting HOME as they do it in many different ways
if salt.utils.platform.is_windows():
import nt
newenv = nt.environ
else:
newenv = os.environ
if ("HOME" not in newenv) and (not salt.utils.platform.is_windows()):
newenv["HOME"] = os.path.expanduser("~")
log.debug("HOME environment set to %s", newenv["HOME"])
try:
proc = salt.utils.timed_subprocess.TimedProc(
shell_data,
stdin=None,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
timeout=10,
env=newenv,
)
except OSError as exc:
return {
"error": "Unable to run command '{}' Reason: {}".format(
" ".join(shell_data), exc
),
"installed": False,
}
try:
proc.run()
except TimedProcTimeoutError as exc:
return {
"error": "Unable to run command '{}' Reason: Timed out.".format(
" ".join(shell_data)
),
"installed": False,
}
ret["path"] = which(shell_data[0])
pattern_result = re.search(pattern, proc.stdout, flags=re.IGNORECASE)
# only set version if we find it, so code later on can deal with it
if pattern_result:
ret["version_raw"] = pattern_result.group(1)
if "version_raw" in ret:
version_results = re.match(r"(\d[\d.]*)", ret["version_raw"])
if version_results:
ret["installed"] = True
ver_list = version_results.group(1).split(".")[:3]
if len(ver_list) == 1:
ver_list.append("0")
ret["version"] = ".".join(ver_list[:3])
else:
ret["installed"] = None # Have an unexpected result
# Get a list of the PowerShell modules which are potentially available
# to be imported
if shell == "powershell" and ret["installed"] and list_modules:
ret["modules"] = salt.utils.powershell.get_modules()
if "version" not in ret:
ret["error"] = (
"The version regex pattern for shell {}, could not "
"find the version string".format(shell)
)
ret["stdout"] = proc.stdout # include stdout so they can see the issue
log.error(ret["error"])
return ret
def powershell(
cmd,
cwd=None,
stdin=None,
runas=None,
shell="powershell",
env=None,
clean_env=False,
template=None,
rstrip=True,
umask=None,
output_encoding=None,
output_loglevel="debug",
hide_output=False,
timeout=None,
reset_system_locale=True,
ignore_retcode=False,
saltenv=None,
use_vt=False,
password=None,
depth=None,
encode_cmd=False,
success_retcodes=None,
success_stdout=None,
success_stderr=None,
**kwargs
):
"""
Execute the passed PowerShell command and return the output as a dictionary.
Other ``cmd.*`` functions (besides ``cmd.powershell_all``)
return the raw text output of the command. This
function appends ``| ConvertTo-JSON`` to the command and then parses the
JSON into a Python dictionary. If you want the raw textual result of your
PowerShell command you should use ``cmd.run`` with the ``shell=powershell``
option.
For example:
.. code-block:: bash
salt '*' cmd.run '$PSVersionTable.CLRVersion' shell=powershell
salt '*' cmd.run 'Get-NetTCPConnection' shell=powershell
.. versionadded:: 2016.3.0
.. warning::
This passes the cmd argument directly to PowerShell
without any further processing! Be absolutely sure that you
have properly sanitized the command passed to this function
and do not use untrusted inputs.
In addition to the normal ``cmd.run`` parameters, this command offers the
``depth`` parameter to change the Windows default depth for the
``ConvertTo-JSON`` powershell command. The Windows default is 2. If you need
more depth, set that here.
.. note::
For some commands, setting the depth to a value greater than 4 greatly
increases the time it takes for the command to return and in many cases
returns useless data.
:param str cmd: The powershell command to run.
:param str cwd: The directory from which to execute the command. Defaults
to the home directory of the user specified by ``runas`` (or the user
under which Salt is running if ``runas`` is not specified).
:param str stdin: A string of standard input can be specified for the
command to be run using the ``stdin`` parameter. This can be useful in cases
where sensitive information must be read from standard input.
:param str runas: Specify an alternate user to run the command. The default
behavior is to run as the user under which Salt is running. If running
on a Windows minion you must also use the ``password`` argument, and
the target user account must be in the Administrators group.
:param str password: Windows only. Required when specifying ``runas``. This
parameter will be ignored on non-Windows platforms.
.. versionadded:: 2016.3.0
:param str shell: Specify an alternate shell. Defaults to "powershell". Can
also use "pwsh" for powershell core if present on the system
:param bool python_shell: If False, let python handle the positional
arguments. Set to True to use shell features, such as pipes or
redirection.
:param dict env: Environment variables to be set prior to execution.
.. note::
When passing environment variables on the CLI, they should be
passed as the string representation of a dictionary.
.. code-block:: bash
salt myminion cmd.powershell 'some command' env='{"FOO": "bar"}'
.. note::
When using environment variables on Window's, case-sensitivity
matters, i.e. Window's uses `Path` as opposed to `PATH` for other
systems.
:param bool clean_env: Attempt to clean out all other shell environment
variables and set only those provided in the 'env' argument to this
function.
:param str template: If this setting is applied then the named templating
engine will be used to render the downloaded file. Currently jinja,
mako, and wempy are supported.
:param bool rstrip: Strip all whitespace off the end of output before it is
returned.
:param str umask: The umask (in octal) to use when running the command.
:param str output_encoding: Control the encoding used to decode the
command's output.
.. note::
This should not need to be used in most cases. By default, Salt
will try to use the encoding detected from the system locale, and
will fall back to UTF-8 if this fails. This should only need to be
used in cases where the output of the command is encoded in
something other than the system locale or UTF-8.
To see the encoding Salt has detected from the system locale, check
the `locale` line in the output of :py:func:`test.versions_report
<salt.modules.test.versions_report>`.
.. versionadded:: 2018.3.0
:param str output_loglevel: Control the loglevel at which the output from
the command is logged to the minion log.
.. note::
The command being run will still be logged at the ``debug``
loglevel regardless, unless ``quiet`` is used for this value.
:param bool ignore_retcode: If the exit code of the command is nonzero,
this is treated as an error condition, and the output from the command
will be logged to the minion log. However, there are some cases where
programs use the return code for signaling and a nonzero exit code
doesn't necessarily mean failure. Pass this argument as ``True`` to
skip logging the output if the command has a nonzero exit code.
:param bool hide_output: If ``True``, suppress stdout and stderr in the
return data.
.. note::
This is separate from ``output_loglevel``, which only handles how
Salt logs to the minion log.
.. versionadded:: 2018.3.0
:param int timeout: A timeout in seconds for the executed process to return.
:param bool use_vt: Use VT utils (saltstack) to stream the command output
more interactively to the console and the logs. This is experimental.
:param bool reset_system_locale: Resets the system locale
:param str saltenv: The salt environment to use. Default is 'base'
:param int depth: The number of levels of contained objects to be included.
Default is 2. Values greater than 4 seem to greatly increase the time
it takes for the command to complete for some commands. eg: ``dir``
.. versionadded:: 2016.3.4
:param bool encode_cmd: Encode the command before executing. Use in cases
where characters may be dropped or incorrectly converted when executed.
Default is False.
:param list success_retcodes: This parameter will allow a list of
non-zero return codes that should be considered a success. If the
return code returned from the run matches any in the provided list,
the return code will be overridden with zero.
.. versionadded:: 2019.2.0
:param list success_stdout: This parameter will allow a list of
strings that when found in standard out should be considered a success.
If stdout returned from the run matches any in the provided list,
the return code will be overridden with zero.
.. versionadded:: 3004
:param list success_stderr: This parameter will allow a list of
strings that when found in standard error should be considered a success.
If stderr returned from the run matches any in the provided list,
the return code will be overridden with zero.
.. versionadded:: 3004
:param bool stdin_raw_newlines: False
If ``True``, Salt will not automatically convert the characters ``\\n``
present in the ``stdin`` value to newlines.
.. versionadded:: 2019.2.0
:returns:
:dict: A dictionary of data returned by the powershell command.
CLI Example:
.. code-block:: powershell
salt '*' cmd.powershell "$PSVersionTable.CLRVersion"
"""
if shell not in ["powershell", "pwsh"]:
raise CommandExecutionError(
"Must specify a valid powershell binary. Must be 'powershell' or 'pwsh'"
)
if "python_shell" in kwargs:
python_shell = kwargs.pop("python_shell")
else:
python_shell = True
# Append PowerShell Object formatting
# ConvertTo-JSON is only available on PowerShell 3.0 and later
psversion = shell_info("powershell")["psversion"]
if salt.utils.versions.version_cmp(psversion, "2.0") == 1:
cmd += " | ConvertTo-JSON"
if depth is not None:
cmd += " -Depth {}".format(depth)
# Put the whole command inside a try / catch block
# Some errors in PowerShell are not "Terminating Errors" and will not be
# caught in a try/catch block. For example, the `Get-WmiObject` command will
# often return a "Non Terminating Error". To fix this, make sure
# `-ErrorAction Stop` is set in the powershell command
cmd = "try {" + cmd + '} catch { "{}" }'
if encode_cmd:
# Convert the cmd to UTF-16LE without a BOM and base64 encode.
# Just base64 encoding UTF-8 or including a BOM is not valid.
log.debug("Encoding PowerShell command '%s'", cmd)
cmd = "$ProgressPreference='SilentlyContinue'; {}".format(cmd)
cmd_utf16 = cmd.encode("utf-16-le")
cmd = base64.standard_b64encode(cmd_utf16)
cmd = salt.utils.stringutils.to_str(cmd)
encoded_cmd = True
else:
encoded_cmd = False
# Retrieve the response, while overriding shell with 'powershell'
response = run(
cmd,
cwd=cwd,
stdin=stdin,
runas=runas,
shell=shell,
env=env,
clean_env=clean_env,
template=template,
rstrip=rstrip,
umask=umask,
output_encoding=output_encoding,
output_loglevel=output_loglevel,
hide_output=hide_output,
timeout=timeout,
reset_system_locale=reset_system_locale,
ignore_retcode=ignore_retcode,
saltenv=saltenv,
use_vt=use_vt,
python_shell=python_shell,
password=password,
encoded_cmd=encoded_cmd,
success_retcodes=success_retcodes,
success_stdout=success_stdout,
success_stderr=success_stderr,
**kwargs
)
# Sometimes Powershell returns an empty string, which isn't valid JSON
if response == "":
response = "{}"
try:
return salt.utils.json.loads(response)
except Exception: # pylint: disable=broad-except
log.error("Error converting PowerShell JSON return", exc_info=True)
return {}
def powershell_all(
cmd,
cwd=None,
stdin=None,
runas=None,
shell="powershell",
env=None,
clean_env=False,
template=None,
rstrip=True,
umask=None,
output_encoding=None,
output_loglevel="debug",
quiet=False,
timeout=None,
reset_system_locale=True,
ignore_retcode=False,
saltenv=None,
use_vt=False,
password=None,
depth=None,
encode_cmd=False,
force_list=False,
success_retcodes=None,
success_stdout=None,
success_stderr=None,
**kwargs
):
"""
Execute the passed PowerShell command and return a dictionary with a result
field representing the output of the command, as well as other fields
showing us what the PowerShell invocation wrote to ``stderr``, the process
id, and the exit code of the invocation.
This function appends ``| ConvertTo-JSON`` to the command before actually
invoking powershell.
An unquoted empty string is not valid JSON, but it's very normal for the
Powershell output to be exactly that. Therefore, we do not attempt to parse
empty Powershell output (which would result in an exception). Instead we
treat this as a special case and one of two things will happen:
- If the value of the ``force_list`` parameter is ``True``, then the
``result`` field of the return dictionary will be an empty list.
- If the value of the ``force_list`` parameter is ``False``, then the
return dictionary **will not have a result key added to it**. We aren't
setting ``result`` to ``None`` in this case, because ``None`` is the
Python representation of "null" in JSON. (We likewise can't use ``False``
for the equivalent reason.)
If Powershell's output is not an empty string and Python cannot parse its
content, then a ``CommandExecutionError`` exception will be raised.
If Powershell's output is not an empty string, Python is able to parse its
content, and the type of the resulting Python object is other than ``list``
then one of two things will happen:
- If the value of the ``force_list`` parameter is ``True``, then the
``result`` field will be a singleton list with the Python object as its
sole member.
- If the value of the ``force_list`` parameter is ``False``, then the value
of ``result`` will be the unmodified Python object.
If Powershell's output is not an empty string, Python is able to parse its
content, and the type of the resulting Python object is ``list``, then the
value of ``result`` will be the unmodified Python object. The
``force_list`` parameter has no effect in this case.
.. note::
An example of why the ``force_list`` parameter is useful is as
follows: The Powershell command ``dir x | Convert-ToJson`` results in
- no output when x is an empty directory.
- a dictionary object when x contains just one item.
- a list of dictionary objects when x contains multiple items.
By setting ``force_list`` to ``True`` we will always end up with a
list of dictionary items, representing files, no matter how many files
x contains. Conversely, if ``force_list`` is ``False``, we will end
up with no ``result`` key in our return dictionary when x is an empty
directory, and a dictionary object when x contains just one file.
If you want a similar function but with a raw textual result instead of a
Python dictionary, you should use ``cmd.run_all`` in combination with
``shell=powershell``.
The remaining fields in the return dictionary are described in more detail
in the ``Returns`` section.
Example:
.. code-block:: bash
salt '*' cmd.run_all '$PSVersionTable.CLRVersion' shell=powershell
salt '*' cmd.run_all 'Get-NetTCPConnection' shell=powershell
.. versionadded:: 2018.3.0
.. warning::
This passes the cmd argument directly to PowerShell without any further
processing! Be absolutely sure that you have properly sanitized the
command passed to this function and do not use untrusted inputs.
In addition to the normal ``cmd.run`` parameters, this command offers the
``depth`` parameter to change the Windows default depth for the
``ConvertTo-JSON`` powershell command. The Windows default is 2. If you need
more depth, set that here.
.. note::
For some commands, setting the depth to a value greater than 4 greatly
increases the time it takes for the command to return and in many cases
returns useless data.
:param str cmd: The powershell command to run.
:param str cwd: The directory from which to execute the command. Defaults
to the home directory of the user specified by ``runas`` (or the user
under which Salt is running if ``runas`` is not specified).
:param str stdin: A string of standard input can be specified for the
command to be run using the ``stdin`` parameter. This can be useful in
cases where sensitive information must be read from standard input.
:param str runas: Specify an alternate user to run the command. The default
behavior is to run as the user under which Salt is running. If running
on a Windows minion you must also use the ``password`` argument, and
the target user account must be in the Administrators group.
:param str password: Windows only. Required when specifying ``runas``. This
parameter will be ignored on non-Windows platforms.
:param str shell: Specify an alternate shell. Defaults to "powershell". Can
also use "pwsh" for powershell core if present on the system
:param bool python_shell: If False, let python handle the positional
arguments. Set to True to use shell features, such as pipes or
redirection.
:param dict env: Environment variables to be set prior to execution.
.. note::
When passing environment variables on the CLI, they should be
passed as the string representation of a dictionary.
.. code-block:: bash
salt myminion cmd.powershell_all 'some command' env='{"FOO": "bar"}'
.. note::
When using environment variables on Window's, case-sensitivity
matters, i.e. Window's uses `Path` as opposed to `PATH` for other
systems.
:param bool clean_env: Attempt to clean out all other shell environment
variables and set only those provided in the 'env' argument to this
function.
:param str template: If this setting is applied then the named templating
engine will be used to render the downloaded file. Currently jinja,
mako, and wempy are supported.
:param bool rstrip: Strip all whitespace off the end of output before it is
returned.
:param str umask: The umask (in octal) to use when running the command.
:param str output_encoding: Control the encoding used to decode the
command's output.
.. note::
This should not need to be used in most cases. By default, Salt
will try to use the encoding detected from the system locale, and
will fall back to UTF-8 if this fails. This should only need to be
used in cases where the output of the command is encoded in
something other than the system locale or UTF-8.
To see the encoding Salt has detected from the system locale, check
the `locale` line in the output of :py:func:`test.versions_report
<salt.modules.test.versions_report>`.
.. versionadded:: 2018.3.0
:param str output_loglevel: Control the loglevel at which the output from
the command is logged to the minion log.
.. note::
The command being run will still be logged at the ``debug``
loglevel regardless, unless ``quiet`` is used for this value.
:param bool ignore_retcode: If the exit code of the command is nonzero,
this is treated as an error condition, and the output from the command
will be logged to the minion log. However, there are some cases where
programs use the return code for signaling and a nonzero exit code
doesn't necessarily mean failure. Pass this argument as ``True`` to
skip logging the output if the command has a nonzero exit code.
:param int timeout: A timeout in seconds for the executed process to
return.
:param bool use_vt: Use VT utils (saltstack) to stream the command output
more interactively to the console and the logs. This is experimental.
:param bool reset_system_locale: Resets the system locale
:param bool ignore_retcode: If the exit code of the command is nonzero,
this is treated as an error condition, and the output from the command
will be logged to the minion log. However, there are some cases where
programs use the return code for signaling and a nonzero exit code
doesn't necessarily mean failure. Pass this argument as ``True`` to
skip logging the output if the command has a nonzero exit code.
:param str saltenv: The salt environment to use. Default is 'base'
:param int depth: The number of levels of contained objects to be included.
Default is 2. Values greater than 4 seem to greatly increase the time
it takes for the command to complete for some commands. eg: ``dir``
:param bool encode_cmd: Encode the command before executing. Use in cases
where characters may be dropped or incorrectly converted when executed.
Default is False.
:param bool force_list: The purpose of this parameter is described in the
preamble of this function's documentation. Default value is False.
:param list success_retcodes: This parameter will allow a list of
non-zero return codes that should be considered a success. If the
return code returned from the run matches any in the provided list,
the return code will be overridden with zero.
.. versionadded:: 2019.2.0
:param list success_stdout: This parameter will allow a list of
strings that when found in standard out should be considered a success.
If stdout returned from the run matches any in the provided list,
the return code will be overridden with zero.
.. versionadded:: 3004
:param list success_stderr: This parameter will allow a list of
strings that when found in standard error should be considered a success.
If stderr returned from the run matches any in the provided list,
the return code will be overridden with zero.
.. versionadded:: 3004
:param bool stdin_raw_newlines: False
If ``True``, Salt will not automatically convert the characters ``\\n``
present in the ``stdin`` value to newlines.
.. versionadded:: 2019.2.0
:return: A dictionary with the following entries:
result
For a complete description of this field, please refer to this
function's preamble. **This key will not be added to the dictionary
when force_list is False and Powershell's output is the empty
string.**
stderr
What the PowerShell invocation wrote to ``stderr``.
pid
The process id of the PowerShell invocation
retcode
This is the exit code of the invocation of PowerShell.
If the final execution status (in PowerShell) of our command
(with ``| ConvertTo-JSON`` appended) is ``False`` this should be non-0.
Likewise if PowerShell exited with ``$LASTEXITCODE`` set to some
non-0 value, then ``retcode`` will end up with this value.
:rtype: dict
CLI Example:
.. code-block:: bash
salt '*' cmd.powershell_all "$PSVersionTable.CLRVersion"
CLI Example:
.. code-block:: bash
salt '*' cmd.powershell_all "dir mydirectory" force_list=True
"""
if shell not in ["powershell", "pwsh"]:
raise CommandExecutionError(
"Must specify a valid powershell binary. Must be 'powershell' or 'pwsh'"
)
if "python_shell" in kwargs:
python_shell = kwargs.pop("python_shell")
else:
python_shell = True
# Append PowerShell Object formatting
cmd += " | ConvertTo-JSON"
if depth is not None:
cmd += " -Depth {}".format(depth)
if encode_cmd:
# Convert the cmd to UTF-16LE without a BOM and base64 encode.
# Just base64 encoding UTF-8 or including a BOM is not valid.
log.debug("Encoding PowerShell command '%s'", cmd)
cmd = "$ProgressPreference='SilentlyContinue'; {}".format(cmd)
cmd_utf16 = cmd.encode("utf-16-le")
cmd = base64.standard_b64encode(cmd_utf16)
cmd = salt.utils.stringutils.to_str(cmd)
encoded_cmd = True
else:
encoded_cmd = False
# Retrieve the response, while overriding shell with 'powershell'
response = run_all(
cmd,
cwd=cwd,
stdin=stdin,
runas=runas,
shell=shell,
env=env,
clean_env=clean_env,
template=template,
rstrip=rstrip,
umask=umask,
output_encoding=output_encoding,
output_loglevel=output_loglevel,
quiet=quiet,
timeout=timeout,
reset_system_locale=reset_system_locale,
ignore_retcode=ignore_retcode,
saltenv=saltenv,
use_vt=use_vt,
python_shell=python_shell,
password=password,
encoded_cmd=encoded_cmd,
success_retcodes=success_retcodes,
success_stdout=success_stdout,
success_stderr=success_stderr,
**kwargs
)
stdoutput = response["stdout"]
# if stdoutput is the empty string and force_list is True we return an empty list
# Otherwise we return response with no result key
if not stdoutput:
response.pop("stdout")
if force_list:
response["result"] = []
return response
# If we fail to parse stdoutput we will raise an exception
try:
result = salt.utils.json.loads(stdoutput)
except Exception: # pylint: disable=broad-except
err_msg = "cmd.powershell_all " + "cannot parse the Powershell output."
response["cmd"] = cmd
raise CommandExecutionError(message=err_msg, info=response)
response.pop("stdout")
if type(result) is not list:
if force_list:
response["result"] = [result]
else:
response["result"] = result
else:
# result type is list so the force_list param has no effect
response["result"] = result
return response
def run_bg(
cmd,
cwd=None,
runas=None,
group=None,
shell=DEFAULT_SHELL,
python_shell=None,
env=None,
clean_env=False,
template=None,
umask=None,
timeout=None,
output_encoding=None,
output_loglevel="debug",
log_callback=None,
reset_system_locale=True,
ignore_retcode=False,
saltenv=None,
password=None,
prepend_path=None,
success_retcodes=None,
success_stdout=None,
success_stderr=None,
**kwargs
):
r"""
.. versionadded:: 2016.3.0
Execute the passed command in the background and return its PID
.. note::
If the init system is systemd and the backgrounded task should run even
if the salt-minion process is restarted, prepend ``systemd-run
--scope`` to the command. This will reparent the process in its own
scope separate from salt-minion, and will not be affected by restarting
the minion service.
:param str cmd: The command to run. ex: ``ls -lart /home``
:param str cwd: The directory from which to execute the command. Defaults
to the home directory of the user specified by ``runas`` (or the user
under which Salt is running if ``runas`` is not specified).
:param str group: Group to run command as. Not currently supported
on Windows.
:param str shell: Shell to execute under. Defaults to the system default
shell.
:param str output_encoding: Control the encoding used to decode the
command's output.
.. note::
This should not need to be used in most cases. By default, Salt
will try to use the encoding detected from the system locale, and
will fall back to UTF-8 if this fails. This should only need to be
used in cases where the output of the command is encoded in
something other than the system locale or UTF-8.
To see the encoding Salt has detected from the system locale, check
the `locale` line in the output of :py:func:`test.versions_report
<salt.modules.test.versions_report>`.
.. versionadded:: 2018.3.0
:param str output_loglevel: Control the loglevel at which the output from
the command is logged to the minion log.
.. note::
The command being run will still be logged at the ``debug``
loglevel regardless, unless ``quiet`` is used for this value.
:param bool ignore_retcode: If the exit code of the command is nonzero,
this is treated as an error condition, and the output from the command
will be logged to the minion log. However, there are some cases where
programs use the return code for signaling and a nonzero exit code
doesn't necessarily mean failure. Pass this argument as ``True`` to
skip logging the output if the command has a nonzero exit code.
:param str runas: Specify an alternate user to run the command. The default
behavior is to run as the user under which Salt is running. If running
on a Windows minion you must also use the ``password`` argument, and
the target user account must be in the Administrators group.
.. warning::
For versions 2018.3.3 and above on macosx while using runas,
to pass special characters to the command you need to escape
the characters on the shell.
Example:
.. code-block:: bash
cmd.run_bg 'echo '\''h=\"baz\"'\''' runas=macuser
:param str password: Windows only. Required when specifying ``runas``. This
parameter will be ignored on non-Windows platforms.
.. versionadded:: 2016.3.0
:param str shell: Specify an alternate shell. Defaults to the system's
default shell.
:param bool python_shell: If False, let python handle the positional
arguments. Set to True to use shell features, such as pipes or
redirection.
:param dict env: Environment variables to be set prior to execution.
.. note::
When passing environment variables on the CLI, they should be
passed as the string representation of a dictionary.
.. code-block:: bash
salt myminion cmd.run_bg 'some command' env='{"FOO": "bar"}'
.. note::
When using environment variables on Window's, case-sensitivity
matters, i.e. Window's uses `Path` as opposed to `PATH` for other
systems.
:param bool clean_env: Attempt to clean out all other shell environment
variables and set only those provided in the 'env' argument to this
function.
:param str prepend_path: $PATH segment to prepend (trailing ':' not
necessary) to $PATH
.. versionadded:: 2018.3.0
:param str template: If this setting is applied then the named templating
engine will be used to render the downloaded file. Currently jinja,
mako, and wempy are supported.
:param str umask: The umask (in octal) to use when running the command.
:param int timeout: A timeout in seconds for the executed process to return.
.. warning::
This function does not process commands through a shell unless the
``python_shell`` argument is set to ``True``. This means that any
shell-specific functionality such as 'echo' or the use of pipes,
redirection or &&, should either be migrated to cmd.shell or have the
python_shell=True flag set here.
The use of ``python_shell=True`` means that the shell will accept _any_
input including potentially malicious commands such as 'good_command;rm
-rf /'. Be absolutely certain that you have sanitized your input prior
to using ``python_shell=True``.
:param list success_retcodes: This parameter will allow a list of
non-zero return codes that should be considered a success. If the
return code returned from the run matches any in the provided list,
the return code will be overridden with zero.
.. versionadded:: 2019.2.0
:param list success_stdout: This parameter will allow a list of
strings that when found in standard out should be considered a success.
If stdout returned from the run matches any in the provided list,
the return code will be overridden with zero.
.. versionadded:: 3004
:param list success_stderr: This parameter will allow a list of
strings that when found in standard error should be considered a success.
If stderr returned from the run matches any in the provided list,
the return code will be overridden with zero.
.. versionadded:: 3004
:param bool stdin_raw_newlines: False
If ``True``, Salt will not automatically convert the characters ``\\n``
present in the ``stdin`` value to newlines.
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt '*' cmd.run_bg "fstrim-all"
The template arg can be set to 'jinja' or another supported template
engine to render the command arguments before execution.
For example:
.. code-block:: bash
salt '*' cmd.run_bg template=jinja "ls -l /tmp/{{grains.id}} | awk '/foo/{print \\$2}'"
Specify an alternate shell with the shell parameter:
.. code-block:: bash
salt '*' cmd.run_bg "Get-ChildItem C:\\ " shell='powershell'
If an equal sign (``=``) appears in an argument to a Salt command it is
interpreted as a keyword argument in the format ``key=val``. That
processing can be bypassed in order to pass an equal sign through to the
remote shell command by manually specifying the kwarg:
.. code-block:: bash
salt '*' cmd.run_bg cmd='ls -lR / | sed -e s/=/:/g > /tmp/dontwait'
"""
python_shell = _python_shell_default(python_shell, kwargs.get("__pub_jid", ""))
res = _run(
cmd,
stdin=None,
stderr=None,
stdout=None,
output_encoding=output_encoding,
output_loglevel=output_loglevel,
use_vt=None,
bg=True,
with_communicate=False,
rstrip=False,
runas=runas,
group=group,
shell=shell,
python_shell=python_shell,
cwd=cwd,
env=env,
clean_env=clean_env,
prepend_path=prepend_path,
template=template,
umask=umask,
log_callback=log_callback,
timeout=timeout,
reset_system_locale=reset_system_locale,
saltenv=saltenv,
password=password,
success_retcodes=success_retcodes,
success_stdout=success_stdout,
success_stderr=success_stderr,
**kwargs
)
return {"pid": res["pid"]}
|
saltstack/salt
|
salt/modules/cmdmod.py
|
Python
|
apache-2.0
| 166,299
|
import numpy as np
from copy import deepcopy
from tools.belief import DiscreteBelief
#################################################################
# Implements the Tiger POMDP problem
#################################################################
class TigerPOMDP():
# constructor
def __init__(self,
seed=999, # random seed
rlisten=-1.0, rtiger=-100.0, rescape=10.0, # reward values
pcorrect=0.85, # correct observation prob
discount=0.95): # discount
self.random_state = np.random.RandomState(seed)
self.rlisten = rlisten
self.rtiger = rtiger
self.rescape = rescape
self.pcorrect = pcorrect
self.discount = discount
# transition arrs
self.tstates = [0, 1] # left, right
# actions
self.tactions = [0, 1, 2] # open left, open right, listen
# observations arrs
self.tobs = [0, 1] # observed on the left, observed on the right
# belief and observation shape
self.belief_shape = (2,1)
self.observation_shape = (1,1)
#################################################################
# Setters
#################################################################
def set_discount(self, d):
self.discount = d
def set_rewards(self, rl, rt, re):
self.rlisten = rl
self.rtiger = rt
self.rescape = re
def set_listen_prob(self, pc):
self.pcorrect = pc
#################################################################
# S, A, O Spaces
#################################################################
def states(self):
return self.tstates
def actions(self):
return self.tactions
def observations(self):
return self.tobs
#################################################################
# Reward Function
#################################################################
def reward(self, s, a):
r = 0.0
rt = self.rtiger
re = self.rescape
if a == 2:
r += self.rlisten
elif a == 1:
r = (r + rt) if s == 1 else (r + re)
else:
r = (r + rt) if s == 0 else (r + re)
return r
#################################################################
# Distribution Functions
#################################################################
# returns the transtion distriubtion of s' from the (s,a) pair
def transition(self, s, a, dist):
if a == 0 or a == 1:
dist[0] = 0.5
dist[1] = 0.5
elif s == 0:
dist[0] = 1.0
dist[1] = 0.0
else:
dist[0] = 0.0
dist[1] = 1.0
return dist
# sample the transtion distribution
def sample_state(self, d):
sidx = self.categorical(d)
return self.tstates[sidx]
# returns the observation dsitribution of o from the (s,a) pair
def observation(self, s, a, dist):
p = self.pcorrect
if a == 2:
if s == 0:
dist[0] = p
dist[1] = 1.0 - p
else:
dist[0] = 1.0 - p
dist[1] = p
else:
dist[0] = 0.5
dist[1] = 0.5
return dist
# sample the observation distirbution
def sample_observation(self, d):
oidx = self.categorical(d)
return self.tobs[oidx]
# pdf should be in a distributions module
def transition_pdf(self, d, dval):
assert dval < 2, "Attempting to retrive pdf value larger than state size"
return d[dval]
def observation_pdf(self, d, dval):
assert dval < 2, "Attempting to retrive pdf value larger than state size"
return d[dval]
# numpy categorical sampling hack
def categorical(self, d):
return np.flatnonzero( self.random_state.multinomial(1,d,1) )[0]
#################################################################
# Create functions
#################################################################
def create_transition_distribution(self):
td = np.array([0.5, 0.5])
return td
def create_observation_distribution(self):
od = np.array([0.5, 0.5])
return od
def create_belief(self):
return DiscreteBelief(self.n_states())
def initial_belief(self):
return DiscreteBelief(self.n_states())
def initial_state(self):
return self.random_state.randint(2)
#################################################################
# Misc Functions
#################################################################
def isterminal(self, s):
# no terminal state in model
return False
def index2action(self, ai):
return ai
def n_states(self):
return 2
def n_actions(self):
return 3
def n_obsevations(self):
return 2
#################################################################
# Policies
#################################################################
def optimal_policy(self):
def pol(b):
if b[0] < 0.04:
return 0
elif b[0] > 0.96:
return 1
else:
return 2
return pol
|
sisl/Chimp
|
chimp/simulators/pomdp/models/tiger.py
|
Python
|
apache-2.0
| 5,397
|
import csv
import sys
from models import *
from datetime import datetime
import codecs
import json
# from models import Attorney, Organization
from flask_mail import Message
def load_attorneys_from_csv(filename):
with codecs.open(filename, mode='rb', encoding='utf-8') as csvfile:
attorneys = [row for row in csv.reader(csvfile.read().splitlines())]
attorneys.pop(0)
try:
for attorney in attorneys:
# Check to see if the email address is in the system, and if it is, simply add the new record...
if check_new_email(attorney[3]):
a = Attorney.objects.get(email_address=attorney[3])
else:
a = Attorney()
a.first_name = attorney[0]
a.middle_initial = attorney[1]
a.last_name = attorney[2]
a.email_address = attorney[3]
a.organization_name = Organization.objects(
organization_name=attorney[4]
).upsert_one(organization_name=attorney[4]) \
.organization_name
if len(a.records) <= 1:
a.records.append({
'year': attorney[5],
'honor_choice': attorney[6],
'rule_49_choice': attorney[7],
'date_modified': datetime.now(),
'method_added': u'bulk'
})
a.save()
print(attorney[3] + " is loaded.")
except:
print( "Unexpected error:", sys.exc_info()[0])
raise
return True
def check_new_email(email_address):
try:
Attorney.objects.get(email_address=email_address)
return True
except Attorney.DoesNotExist:
return False
if __name__ == "__main__":
import sys
import os
from models import *
MONGODB_URI = os.environ.get(
"MONGOLAB_URI", 'mongodb://localhost/honorroll')
mongo_client = connect(host=MONGODB_URI)
filename = sys.argv[1]
load_attorneys_from_csv(filename)
|
mitzvotech/honorroll
|
app/utils.py
|
Python
|
mit
| 2,178
|
#!/usr/bin/env python
# Metarunlog, experiment management tool.
# Author: Tom Sercu
# Date: 2015-01-23
from metarunlog import cfg # NOTE cfg is modified by MetaRunLog._loadBasedirConfig() with custom configuration.
from metarunlog.exceptions import *
from metarunlog.util import nowstring, sshify, _decode_dict, _decode_list, get_commit
from metarunlog.confParser import ConfParser
import os
import sys
import math
from os import listdir
from os.path import isdir, isfile, join, relpath, expanduser
import argparse
import subprocess
try:
import simplejson as json # way better error messaging
except:
import json
import pdb
import datetime
from collections import OrderedDict
from shutil import copy as shcopy
import shutil
import jinja2
from jinja2 import Template, Environment, meta
import itertools
import getpass
DEBUG = True
def initBasedir(basedir, args):
""" init basedir"""
# check if it already is a valid basedir, then raise an exception
try:
mrlState = MetaRunLog(basedir)
except NoBasedirConfig:
pass # should be raised
else:
return "{} already has valid .mrl.cfg file, remove it first to re-init.".format(basedir)
# initialize .mrl.cfg file as a json-dict copy of cfg.py template
with open(join(basedir, '.mrl.cfg'),"w") as fh:
# copy the cfg attributes into ordered dict
bconf = {k:getattr(cfg,k) for k in dir(cfg) if '__' not in k}
bconf['outdir'] = args.outdir
json.dump(bconf, fh, indent=2)
fh.write("\n")
try:
mrlState = MetaRunLog(basedir)
except Exception as e:
print("initBasedir() - Failed to init basedirconfig")
raise
else:
return join(basedir, ".mrl.cfg")
class MetaRunLog:
"""
Metarunlog state from which all actions are coordinated.
Needs to be run from a valid basedir (with .mrl.cfg file).
Initialization overwrites cfg module from .mrl.cfg: metarunlog.cfg.py should be
seen as a template for .mrl.cfg.
"""
def __init__(self, basedir):
self.basedir = basedir
self._loadBasedirConfig()
self.outdir = join(self.basedir, cfg.outdir)
if not isdir(self.outdir): raise InvalidOutDirException("Need output directory " + self.outdir + " Fix your .mrl.cfg file")
self.expDirList = sorted([x for x in listdir(self.outdir) if self._checkValidExp(x)])
self.expList = [int(x) for x in self.expDirList]
self.lastExpId = None if not self.expList else self.expList[-1]
def _loadBasedirConfig(self):
try:
with open(join(self.basedir, '.mrl.cfg')) as fh:
bconf = json.load(fh, object_hook=_decode_dict)
for k,v in bconf.iteritems():
setattr(cfg,k,v)
return True
except Exception as e:
raise NoBasedirConfig(self.basedir, str(e))
def new(self,args):
expId = 1 if not self.lastExpId else self.lastExpId+1
expConfig = OrderedDict()
expConfig ['expId'] = expId
expConfig['basedir'] = self.basedir
gitclean = not bool(args.notclean)
expConfig['gitFailUntracked']= args.gitFailUntracked
untracked = 'no' if args.gitFailUntracked == 'no' else 'normal'
uncommited = subprocess.check_output("git status --porcelain --untracked=" + untracked, shell=True)
#print "uncommited: ", uncommited
if gitclean and uncommited:
raise NoCleanStateException("new: uncommited files -- please commit changes first\n" + uncommited)
expConfig['gitHash'], expConfig['gitDescription'] = get_commit()
if not gitclean and uncommited: expConfig['gitHash'] += '-sloppy'
expConfig['timestamp'] = nowstring()
expConfig['user'] = getpass.getuser()
expConfig['description'] = args.description if args.description else ""
# make dir and symlink
expDir = self._getExpDir(expId, True)
if self.lastExpId and os.path.lexists(join(self.basedir,'last')):
os.remove(join(self.basedir,'last'))
os.symlink(relpath(expDir,self.basedir), join(self.basedir, 'last'))
os.mkdir(expDir)
# After this point: expDir is made, no more exception throwing! copy config files
try:
if args.copyConfigFrom == 'no':
pass
else:
srcDir = self._getExpDir(self._resolveExpId(args.copyConfigFrom))
self._copyConfigFrom(srcDir, expDir)
except (InvalidExpIdException, IOError) as e:
print("Can't copy config files. ")
print(e)
print("Still succesfully created new experiment directory.")
self._saveExpDotmrl(expDir, expConfig)
self._putEmptyNote(expDir, expConfig['description'])
self.lastExpId = expId
self.expList.append(expId)
return expDir
def info(self, args):
""" load info from experiment id and print it """
expId, expDir, expConfig = self._loadExp(args.expId)
subExpList = self._getSubExperiments(expDir)
items = ['',expDir,'']
maxkeylen = max(len(k) for k,v in expConfig.iteritems())
items += ["%*s : %.80s" % (maxkeylen,k,str(v)) for k,v in expConfig.iteritems()]
items += ["%*s : %.80s" % (maxkeylen, 'subExperiments', str(subExpList))]
return "\n".join(items)
def ls(self, args):
for expId in self.expList[::-1]:
expConfig = self._getExpConf(expId)
row = self._fmtSingleExp(expId)
if args.tm:
row += "\t" + expConfig['timestamp']
if args.ghash:
row += "\t" + expConfig['gitHash']
if args.gdesc:
row += "\t" + expConfig['gitDescription']
if args.desc:
expDir = self._getExpDir(expId)
row += "\t" + self._expIsDoneIndicator(expDir) + expConfig['description']
print row
return ""
def makebatch(self, args):
expId, expDir, expConfig = self._loadExp(args.expId)
# check if already expanded in batch and cancel
oldSubExpList = self._getSubExperiments(expDir)
if oldSubExpList:
if args.replace:
# just remove all these subfolders. Drastic but it wouldn't make sense to keep results from old config files.
print "Will remove all old subexperiments: {}".format(oldSubExpList)
for subExp in oldSubExpList:
shutil.rmtree(join(expDir, subExp))
else:
raise BatchException("Experiment {} is already expanded into subexperiments: {}".\
format(expId, str(oldSubExpList)))
# make ConfParser object
confP = ConfParser(join(expDir, cfg.confTemplFile))
# check if ConfParser output is non-empty
if not confP.output:
err = "ConfParser output is empty, are you sure {} is a batch template?"
err = err.format(join(expDir, cfg.confTemplFile))
raise BatchException(err)
# generate output directories, write the output config files
for i, params, fileContent in confP.output:
self._newSubExp(expDir, i, {'params': params}, fileContent)
# update the current .mrl file
subExpList = self._getSubExperiments(expDir)
return "Generated subExperiments {} for expId {}.\n".format(subExpList, expId)
def analyze(self, args):
## Load modules only needed for analyzing and rendering the html file
import pandas as pd
import renderHtml
try:
import mrl_analyze
except Exception as e:
print("Could not load local mrl_analyze module: {}".format(str(e)))
expId, expDir, expConfig = self._loadExp(args.expId)
print "Analyze expId {} in path {}".format(expId, expDir)
outdir = args.outdir if args.outdir else join(expDir, cfg.analysis_outdir)
if not os.path.exists(outdir): os.mkdir(outdir)
# load the params into dataframe
subExpIds = self._getSubExperiments(expDir)
if not subExpIds:
raise InvalidExpIdException("Exp {} not expanded into subExps".format(expId))
paramList = [self._loadSubExp(join(expDir,subExpId))['params'] for subExpId in subExpIds]
Dparams = pd.DataFrame(paramList, index=subExpIds)
outhtml = renderHtml.HtmlFile()
title = '{} {} - {}'.format(cfg.name, cfg.singleExpFormat.format(expId=expId), expConfig['timestamp'].split('T')[0])
if 'description' in expConfig and expConfig['description']: title += ' - ' + expConfig['description']
outhtml.addTitle(self._expIsDoneIndicator(expDir) + title)
outhtml.parseNote(join(expDir, cfg.note_fn))
# TODO keep analysis functions in order by using ordereddict in .mrl.cfg and cfg.py
#### (1) analysis_overview functions
for funcname, xtrargs in sorted(cfg.analysis_overview.items()):
outhtml.addHeader('{} - {}'.format('overview', funcname), 1, funcname)
retval = getattr(mrl_analyze, funcname)(expDir, outdir, subExpIds, Dparams, *xtrargs)
outhtml.addRetVal(retval)
#### (2) per exp functions
for subExpId in subExpIds:
subExpDir = join(expDir, subExpId)
outhtml.addHeader('{} - {}'.format('subExp', subExpId), 1, subExpId)
for funcname, xtrargs in cfg.analysis_subexp.items():
outhtml.addHeader('{}'.format(funcname), 2)
retval = getattr(mrl_analyze, funcname)(subExpDir, outdir, Dparams, subExpId, *xtrargs)
outhtml.addRetVal(retval)
#### (3) render and optionally copy over to webdir
outhtml.render(join(outdir, cfg.analysis_outfn))
if cfg.analysis_webdir:
webdir = join(cfg.analysis_webdir, self._fmtSingleExp(expId))
subprocess.call("rsync -az {}/* {}/".format(outdir, webdir), shell=True)
print "Copied to webdir {}".format(webdir)
def execWithHooks(self, mode, args):
noneFunc = lambda x:None # empty dummy func
hookBefore = getattr(self, 'before_' + args.mode, noneFunc)
hookAfter = getattr(self, 'after_' + args.mode, noneFunc)
# NOTE each hook before/after/func itself has to get expId, expDir from args itself.
hookBefore(args)
ret = getattr(self, args.mode)(args)
hookAfter(args)
return ret
def _loadSubExp(self, subExpDir):
try:
with open(join(subExpDir, '.mrl')) as fh:
return json.load(fh, object_hook=_decode_dict)
except (IOError, KeyError) as e: # dummy
return {'subExpDir': subExpDir, 'params':{}}
except ValueError as e:
raise type(e)('Failed to load .mrl file for subExp {} \n{}'.format(subExpDir, e.message))
def _getSubExperiments(self, expDir):
""" returns a list of the existing subexperiments as formatted strings """
subexp = []
for subdir in sorted(listdir(expDir)):
try:
if self._fmtSubExp(int(subdir)) == subdir:
subexp.append(str(subdir))
except ValueError:
pass
return subexp
def _getRunLocations(self, expId, subExpId, expConfig, relativeTo=''):
expDir = self._getExpDir(expId)
subExpList = self._getSubExperiments(expDir)
if subExpList:
if subExpId == 'all':
locs = subExpList
elif subExpId.isdigit():
if self._fmtSubExp(int(subExpId)) not in subExpList:
raise SubExpIdException("subExpId {} out of range (batch size {} in expConfig)".format(subExpId,len(subExpList)))
locs = [self._fmtSubExp(int(subExpId)), ]
else:
# TODO list of subexpids? can be handy
raise SubExpIdException("Don't understand subExpId {}.".format(subExpId))
else:
locs = ['']
if relativeTo != expDir:
locs = [join(expDir, loc) for loc in locs]
if relativeTo:
locs = [relpath(loc, relativeTo) for loc in locs]
return locs
def _fmtSingleExp(self, expId):
# TODO change fmtSingleExp to fetch date from a list initialized in init, then fill in that date here.
return cfg.singleExpFormat.format(expId=expId)
def _fmtSubExp(self, subExpId):
return cfg.subExpFormat.format(subExpId=subExpId)
def _relpathUser(self, path):
return '~/' + relpath(path, expanduser('~'))
def _copyConfigFrom(self, src, dst):
for cfn in cfg.copyFiles:
shcopy(join(src,cfn), join(dst, cfn))
def _getCmdParams(self, expConfig, args, relloc):
"""
Make the dictionary that is needed to render a job template into actual commands.
note optional parameters override everything except relloc and absloc. And device, by startSsh or startPbs
"""
cmdParams = {'mrlOutdir': self.outdir, 'mrlBasedir': self.basedir}
cmdParams.update(expConfig)
cmdParams.update({k:getattr(cfg, k) for k in dir(cfg) if '_' not in k}) # access to cfg params
cmdParams.update({k:v for k,v in vars(args).items() if v}) # the optional params if supplied
cmdParams.update({'relloc': relloc, 'absloc': join(self.outdir, relloc)})
return cmdParams
def _saveExpDotmrl(self, expDir, expConfig):
# write .mrl file and save as current expId
with open(join(expDir, '.mrl'),'w') as fh:
json.dump(expConfig, fh, indent=2)
fh.write("\n")
def _checkValidExp(self, name):
if len(name) != len(self._fmtSingleExp(0)): return False
try:
return self._fmtSingleExp(int(name)) == name
except:
return False
def _loadExp(self, argExpId):
expId = self._resolveExpId(argExpId)
expDir = self._getExpDir(expId)
expConfig = self._getExpConf(expId)
# Load .mrl.cfg file if it exists
try:
with open(join(expDir, '.mrl.cfg')) as fh:
bconf = json.load(fh, object_hook=_decode_dict)
for k,v in bconf.iteritems():
setattr(cfg,k,v)
except IOError: #file doesnt exist -> write a template
open(join(expDir, '.mrl.cfg'),'w').write("{\n}\n")
return (expId, expDir, expConfig)
def _resolveExpId(self, expId):
""" resolves expId from int, 'last' or path, and returns directory,
or raise error if not found """
if expId == 'last' and self.lastExpId is not None:
return self.lastExpId
elif type(expId) == int:
if expId in self.expList:
return expId
else:
raise InvalidExpIdException("Invalid experiment id (not in list): {}".format(int(expId)))
elif expId.isdigit():
if int(expId) in self.expList:
return int(expId)
else:
raise InvalidExpIdException("Invalid experiment id (not in list): {}".format(int(expId)))
elif isdir(expId) and relpath(expId, self.outdir) in self.expDirList:
return self.expList[self.expDirList.index(relpath(expId, self.outdir))]
else:
raise InvalidExpIdException("This is not recognized as a experiment location: " + expId)
def _getExpDir(self, expId, new=False):
if not new and not expId in self.expList:
raise InvalidExpIdException("Experiment {} not found.".format(expId))
return join(self.outdir, self._fmtSingleExp(expId))
def _newSubExp(self, expDir, subExpId, dotmrl, confContent):
subExpDir = join(expDir, self._fmtSubExp(subExpId))
os.mkdir(subExpDir)
self._copyConfigFrom(expDir, subExpDir)
with open(join(subExpDir, '.mrl'), 'w') as fh:
json.dump(dotmrl, fh, indent=2)
fh.write("\n")
with open(join(subExpDir, cfg.confTemplFile), "w") as fh:
fh.write(confContent)
fh.write("\n")
def _getExpConf(self, expId):
with open(join(self._getExpDir(expId), '.mrl')) as fh:
expConfig = json.JSONDecoder(object_pairs_hook=OrderedDict).decode(fh.read())
return expConfig
def _putEmptyNote(self, expDir, description):
with open(join(expDir, cfg.note_fn),'w') as fh:
if description:
fh.write('### ' + description + '\n')
fh.write('#### Goal\n\n#### Observations\n\n#### Conclusions\n')
def _expIsDone(self, expDir):
return os.path.exists(os.path.join(expDir, '.mrl.done'))
def _expIsDoneIndicator(self, expDir):
return ' ' if self._expIsDone(expDir) else '** '
def main():
try:
sys.path.append(os.getcwd()) # include modules in basedir like myAnalyze
mrlState = MetaRunLog(os.getcwd())
except InvalidOutDirException as e:
print(e)
return
except NoBasedirConfig as e:
parser = argparse.ArgumentParser(
description='No valid .mrl.cfg file found. Choose init to initialize, \
or raiseException to see what went wrong.')
subparsers = parser.add_subparsers(title='subcommands', description='valid subcommands')
# init basedir
parser_init = subparsers.add_parser('init', help='init current directory as basedir. Should typically be your git top-level dir.')
parser_init.add_argument('outdir', default=cfg.outdir, help='where experiment run directories will be made and config/models files will be saved.')
parser_init.set_defaults(mode='init')
parser_raise = subparsers.add_parser('raise', help='Raise the NoBasedirConfig exception')
parser_raise.set_defaults(mode='raise')
args = parser.parse_args()
if args.mode == 'init':
try:
print initBasedir(os.getcwd(), args)
except InvalidOutDirException as e2:
print(e2)
return
elif args.mode == 'raise':
raise
# No Exception: Resume normal operation.
# Extend MetaRunLog with mrl_hooks
try:
import mrl_hooks # from basedir, user-supplied
for hook in cfg.hooks:
setattr(MetaRunLog, hook, getattr(mrl_hooks, hook))
except ImportError:
if cfg.hooks:
print('Warning: no valid mlr_hooks.py file - will ignore cfg.hooks')
mrl_hooks = None
# CL menu
parser = argparse.ArgumentParser(description='Metarunlog.')
subparsers = parser.add_subparsers(title='subcommands', description='valid subcommands')
# new
parser_new = subparsers.add_parser('new', help='new experiment directory.')
parser_new.add_argument('-nc', '--notclean', action='store_const', const=True)
parser_new.add_argument('-gfut', '--gitFailUntracked', choices=['no', 'yes'], default = cfg.gitFailUntrackedDefault)
parser_new.add_argument('-cp', '--copyConfigFrom', default = 'last', nargs='?')
parser_new.add_argument('description', help='Description', nargs='?')
parser_new.set_defaults(mode='new')
# info
parser_info = subparsers.add_parser('info', help='show experiment info.')
parser_info.add_argument('expId', default='last', help='exp number, directory, or last', nargs='?')
parser_info.set_defaults(mode='info')
parser_infosc = subparsers.add_parser('last', help='shortcut for mrl info last.')
parser_infosc.set_defaults(mode='info', expId='last')
# ls
parser_ls = subparsers.add_parser('ls', help = 'list output dir, newest first.')
parser_ls.add_argument('-tm', action='store_const', const=True, help='Show timestamp')
parser_ls.add_argument('-ghash', action='store_const', const=True, help='Show git hash')
parser_ls.add_argument('-gdesc', action='store_const', const=True, help='Show git description')
parser_ls.add_argument('-desc', action='store_const', const=True, help='Show experiment description')
parser_ls.set_defaults(mode='ls')
# makebatch
parser_batch = subparsers.add_parser('makebatch', help = 'make batch of config files from batch config template')
parser_batch.add_argument('expId', help='experiment ID', default='last', nargs='?')
parser_batch.add_argument('-replace', help='Overwrite config files if already expanded', action='store_const', const=True)
parser_batch.set_defaults(mode='makebatch')
# analyze
parser_Analyze = subparsers.add_parser('analyze', help = 'Analyze expId by running the functions from analyze module, specified in .mrl.cfg')
parser_Analyze.add_argument('expId', help='experiment ID', default='last', nargs='?')
parser_Analyze.add_argument('-outdir', help='path to output directory, default: expDir/analysis/')
parser_Analyze.set_defaults(mode='analyze')
# functions registered as standalone hooks
if mrl_hooks:
for hook in cfg.hooks:
if 'before_' in hook or 'after_' in hook:
continue
parser_hook = subparsers.add_parser(hook, help = 'custom function from mrl_hooks.py')
parser_hook.add_argument('expId', help='experiment ID', default='last', nargs='?')
for xarg, defaultval in cfg.hooks[hook].iteritems():
if defaultval:
parser_hook.add_argument('-' + xarg, default=defaultval, help='optional xarg, default: {}'.format(defaultval), nargs='?')
else: # named argument, yet required. Slightly bad form.
parser_hook.add_argument('-' + xarg, help='required xarg', required=True) #, nargs='?')
parser_hook.set_defaults(mode=hook)
#PARSE
args = parser.parse_args()
try:
ret = mrlState.execWithHooks(args.mode, args)
if ret: print(ret)
except (NoCleanStateException,\
InvalidExpIdException,\
BatchException,\
ConfParserException) as e:
print(e)
except Exception as e:
print "Unchecked exception"
raise
|
tomsercu/metarunlog
|
metarunlog/__init__.py
|
Python
|
mit
| 22,170
|
#===--- protocol_graph.py -------------------------*- coding: utf-8 -*----===#
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2016 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See http://swift.org/LICENSE.txt for license information
# See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
#===----------------------------------------------------------------------===#
#
# Create a graph of the protocol refinement relationships, associated
# types, operator requirements, and defaulted generic operators.
#
# run as follows to view the Nth-largest connected component in a web browser:
#
# N=0 && rm -f /tmp/protocols.dot && \
# python protocol_graph.py stdlib.swift > /tmp/p0.dot && \
# (ccomps -zX#$N -o /tmp/protocols.dot /tmp/p0.dot || true) \
# && dot -Tsvg /tmp/protocols.dot > /tmp/protocols.svg \
# && open /tmp/protocols.svg
#===----------------------------------------------------------------------===#
from __future__ import print_function
import re
import sys
import os
import cgi
# Open 'stdlib.swift' in this directory if no path specified.
args = list(sys.argv) + [os.path.join(os.path.dirname(__file__), 'stdlib.swift')]
reFlags = re.MULTILINE | re.VERBOSE
# Pattern to recognize stdlib identifiers (FIXME: doesn't handle Unicode).
identifier = '[A-Za-z_][A-Za-z0-9_]*'
# Pattern to recognize a (possibly-generic) operator decl.
operator = r'''
(?:(?:prefix|postfix).*)? func \s*
(?=\S)[^A-Za-z_] # non-space, non-identifier: begins an operator name
(?:(?=\S)[^(){])* # rest of operator name
\s*
(<[^>{]+>)? # generic parameter list
\s*
\([^)]*\) # function parameter list
'''
# substitute local variables into the string
def interpolate(string):
import inspect
frame = inspect.currentframe()
return string % frame.f_back.f_locals
# Given the bodyText of a protocol definition, return a list of
# associated type and operator requirements.
def bodyLines(bodyText):
return [
cgi.escape(b.group(0)) for b in
re.finditer(
r'(typealias\s*' + identifier + r'(\s*[:,]\s*' + identifier + ')?|' + operator + '.*)',
bodyText, reFlags)
]
# Mapping from protocol to associated type / operator requirements
body = {}
# Mapping from a parent protocol to set of children.
graph = {}
# Mapping from protocol to generic operators taking instances as arguments
genericOperators = {}
comments = r'//.* | /[*] (.|\n)*? [*]/' # FIXME: doesn't respect strings or comment nesting)
# read source, stripping all comments
with open(args[1]) as src:
sourceSansComments = re.sub(comments, '', src.read(), flags=reFlags)
genericParameterConstraint = interpolate(r' (%(identifier)s) \s* : \s* (%(identifier)s) ')
def parseGenericOperator(m):
genericParams = m.group(5)
genericOperator = cgi.escape(m.group(0).strip())
functionParamStart = m.end(5) - m.start(0)
functionParams = genericOperator[functionParamStart:]
for m2 in re.finditer(genericParameterConstraint, genericParams, reFlags):
typeParameter = m2.group(1)
protocol = m2.group(2)
# we're only interested if we can find a function parameter of that type
if not re.search(r':\s*%s\s*[,)]' % typeParameter, functionParams):
continue
# Make some replacements in the signature to limit the graph size
letterTau = 'τ'
letterPi = 'π'
abbreviatedSignature = re.sub(
r'\b%s\b' % protocol, letterPi,
re.sub(r'\b%s\b' % typeParameter, letterTau, genericOperator))
genericOperators.setdefault(protocol, set()).add(abbreviatedSignature)
def parseProtocol(m):
child = m.group(1)
# skip irrelevant protocols
if re.match(r'_Builtin.*Convertible', child):
return
graph.setdefault(child, set())
body[child] = bodyLines(m.group(3))
if m.group(2):
for parent in m.group(2).strip().split(","):
if re.match(r'_Builtin.*Convertible', parent):
return
graph.setdefault(parent.strip(), set()).add(child)
protocolsAndOperators = interpolate(r'''
\bprotocol \s+ (%(identifier)s) \s*
(?::\s*([^{]+))? # refinements
{([^{}\n]*(.*\n)*?)} # body
|
%(operator)s [^{]*(?={) # operator definition up to the open brace
''')
# Main parsing loop
for m in re.finditer(protocolsAndOperators, sourceSansComments, reFlags):
if m.group(1):
parseProtocol(m)
elif m.group(5):
parseGenericOperator(m)
# otherwise we matched some non-generic operator
# Find clusters of protocols that have the same name when underscores
# are stripped
# map from potential cluster name to nodes in the cluster
clusterBuilder = {}
for n in graph:
clusterBuilder.setdefault(n.translate(None, '_'), set()).add(n)
# Grab the clusters with more than one member.
clusters = dict((c, nodes) for (c, nodes) in clusterBuilder.items() if len(nodes) > 1)
# A set of all intra-cluster edges
clusterEdges = set(
(s, t) for (c, elements) in clusters.items()
for s in elements
for t in graph[s] if t in elements)
print('digraph ProtocolHierarchies {')
# ; packmode="array1"
print(' mclimit = 100; ranksep=1.5; ')
print(' edge [dir="back"];')
print(' node [shape = box, fontname = Helvetica, fontsize = 10];')
for c in sorted(clusters):
print(' subgraph "cluster_%s" {' % c)
for (s, t) in sorted(clusterEdges):
if s in clusters[c]:
print('%s -> %s [weight=100];' % (s, t))
print('}')
for node in sorted(graph.keys()):
requirements = body.get(node, [])
generics = sorted(genericOperators.get(node, set()))
style = 'solid' if node.startswith('_') else 'bold'
divider = '<HR/>\n' if len(requirements) != 0 and len(generics) != 0 else ''
label = node if len(requirements + generics) == 0 else (
'\n<TABLE BORDER="0">\n<TR><TD>\n%s\n</TD></TR><HR/>\n%s%s%s</TABLE>\n' % (
node,
'\n'.join('<TR><TD>%s</TD></TR>' % r for r in requirements),
divider,
'\n'.join('<TR><TD>%s</TD></TR>' % g for g in generics)))
print(interpolate(' %(node)s [style = %(style)s, label=<%(label)s>]'))
for (parent, children) in sorted(graph.items()):
print(' %s -> {' % parent, end=' ')
print('; '.join(
sorted(child for child in children if not (parent, child) in clusterEdges)), end=' ')
print('}')
print('}')
|
slavapestov/swift
|
utils/protocol_graph.py
|
Python
|
apache-2.0
| 6,618
|
#########
# Copyright (c) 2013 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import logging
import os
import random
import string
import tempfile
import uuid
from cloudify.mocks import MockCloudifyContext
from cloudify.context import BootstrapContext
VAGRANT_MACHINE_IP = "10.0.0.5"
MANAGER_IP = '10.0.0.1'
VAGRANT_PATH = os.path.join(tempfile.gettempdir(), "vagrant-vms")
logging.basicConfig(
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
def id_generator(size=6, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for x in range(size))
def get_logger(name):
logger = logging.getLogger(name)
logger.level = logging.DEBUG
return logger
def get_local_context(overriding_properties=None):
blueprint_id = 'mock_blueprint'
deployment_id = 'deployment-{0}'.format(str(uuid.uuid4())[:5])
properties = {
'cloudify_agent': {
'disable_requiretty': False,
}
}
if overriding_properties:
properties.update(overriding_properties)
return MockCloudifyContext(
blueprint_id=blueprint_id,
deployment_id=deployment_id,
properties=properties,
runtime_properties={
'ip': 'localhost'
}
)
def get_remote_context(overriding_properties=None):
blueprint_id = 'mock_blueprint'
node_id = 'node-{0}'.format(str(uuid.uuid4())[:5])
properties = {
'cloudify_agent': {
'user': 'vagrant',
'host': VAGRANT_MACHINE_IP,
'key': '~/.vagrant.d/insecure_private_key',
'port': 2222
}
}
if overriding_properties:
properties.update(overriding_properties)
return MockCloudifyContext(
blueprint_id=blueprint_id,
node_id=node_id,
properties=properties,
runtime_properties={
'ip': '127.0.0.1'
},
bootstrap_context=BootstrapContext({
'cloudify_agent': {
'min_workers': 2,
'max_workers': 5,
'user': 'john doe',
'remote_execution_port': 2222
}
})
)
|
cloudify-cosmo/cloudify-agent-installer-plugin
|
worker_installer/tests/__init__.py
|
Python
|
apache-2.0
| 2,712
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import sys
import json
import urllib.request
from xml.sax import make_parser
from xml.sax.handler import ContentHandler
from smallsmilhandler import SmallSMILHandler
class KaraokeLocal():
"""
Programa principal
"""
def __init__ (self):
self.original = ""
self.resultante = ""
parser = make_parser()
ccHandler = SmallSMILHandler()
parser.setContentHandler(ccHandler)
parser.parse(open(sys.argv[1]))
self.etiquetas = ccHandler.get_tags()
def __str__ (self):
cadena = ''
for linea in self.etiquetas:
cadena = cadena + linea[0]
dicc = linea[1]
for elem in dicc:
if dicc[elem] != "":
cadena = cadena + '\t' + elem + '="' + dicc[elem] + '"\t'
return (cadena)
def do_local(self):
ccHandler = SmallSMILHandler()
for linea in ccHandler.get_tags():
dicc = linea[1]
for elem in dicc:
if elem == 'src':
if dicc[elem].startswith('http://'):
ddp2 = urllib.request.urlopen(dicc[elem])
ddp2.read()
ddp2.close()
indice = str.rfind(dicc[elem], '/')
dicc[elem] = dicc[elem][indice+1:]
def do_json(self, original, resultante):
if self.resultante == None:
json.dump(self.etiquetas, open("karaoke.json",'w'), sort_keys=True, indent=4, separators=(',', ': '))
else:
json.dump(self.etiquetas, open("locaL.json",'w'), sort_keys=True, indent=4, separators=(',', ': '))
if __name__ == "__main__":
try:
pr = KaraokeLocal()
parser = make_parser()
ccHandler = SmallSMILHandler()
parser.setContentHandler(ccHandler)
parser.parse(open(sys.argv[1]))
except:
sys.exit("Usage: python3 karaoke.py file.smil.")
print (pr)
pr.do_json(pr.original,pr.resultante)
pr.do_local()
pr.do_json(pr.original,"local.json")
print (pr)
|
daviddpurjc/ptavi-p3
|
karaoke.py
|
Python
|
apache-2.0
| 2,151
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any
VERSION = "unknown"
class AzureBlobStorageConfiguration(Configuration):
"""Configuration for AzureBlobStorage.
Note that all parameters used to create this instance are saved as instance
attributes.
:param url: The URL of the service account, container, or blob that is the target of the desired operation.
:type url: str
"""
def __init__(
self,
url, # type: str
**kwargs # type: Any
):
# type: (...) -> None
if url is None:
raise ValueError("Parameter 'url' must not be None.")
super(AzureBlobStorageConfiguration, self).__init__(**kwargs)
self.url = url
self.version = "2021-04-10"
kwargs.setdefault('sdk_moniker', 'azureblobstorage/{}'.format(VERSION))
self._configure(**kwargs)
def _configure(
self,
**kwargs # type: Any
):
# type: (...) -> None
self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get('http_logging_policy') or policies.HttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get('retry_policy') or policies.RetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get('redirect_policy') or policies.RedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get('authentication_policy')
|
Azure/azure-sdk-for-python
|
sdk/storage/azure-storage-blob/azure/storage/blob/_generated/_configuration.py
|
Python
|
mit
| 2,479
|
import sys
import os
import subprocess
import re
def run(input_fasta_dirpath):
"""This script is very simple, and not very important. It simply reformats the fasta files that were downloaded from the IMGT website, so that there are no white space lines, and that each sequence for each gene is only one line. Note that this script loads all the sequences into memory, so it would not be advised to use this on very large fasta files. Also, this script will over-write your input fasta files, so beware of that too."""
if input_fasta_dirpath[-1] != '/':
input_fasta_dirpath += '/'
for i in os.listdir(input_fasta_dirpath):
if i[-6:] == '.fasta':
seq_data = {}
filein = open(input_fasta_dirpath + i, "r")
for j in filein:
if j[0] == '>':
gene_name = j[1:-1].split('|')[1]
seq_data[gene_name] = ''
elif j[0] != "\s":
#imgt sequences can sometimes have annoying '.' (gaps)
#in them so we remove these
seq = re.sub('\.', '', j[:-1])
seq_data[gene_name] += seq
filein.close()
subprocess.call(['rm', input_fasta_dirpath + i])
fileout = open(input_fasta_dirpath + i, "w")
for i in sorted(seq_data):
fileout.write('>' + i + '\n' + seq_data[i] + '\n')
fileout.close()
return
if __name__ == '__main__':
run(sys.argv[1])
|
nbstrauli/influenza_vaccination_project
|
scripts/pipeline/reformat_imgt_fasta_files.py
|
Python
|
cc0-1.0
| 1,511
|
#
# file: __init__.py
#
# author: Copyright (C) 2017 Kamil Szczygiel http://www.distortec.com http://www.freddiechopin.info
#
# This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0. If a copy of the MPL was not
# distributed with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
########################################################################################################################
# registerPrettyPrinters()
########################################################################################################################
def registerPrettyPrinters(obj):
"""Register pretty-printers."""
import PrettyPrinters.estd
PrettyPrinters.estd.registerPrettyPrinters(obj)
import PrettyPrinters.distortos
PrettyPrinters.distortos.registerPrettyPrinters(obj)
|
jasmin-j/distortos
|
scripts/PrettyPrinters/__init__.py
|
Python
|
mpl-2.0
| 826
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from Models.FeatureProcessing import *
from keras.models import Sequential
from keras.layers import Activation, Dense, LSTM
from keras.optimizers import Adam, SGD
import numpy as np
import abc
from ClassificationModule import ClassificationModule
class descriptionreponamelstm(ClassificationModule):
"""A basic lstm neural network"""
def __init__(self, num_hidden_layers=3):
ClassificationModule.__init__(self, "Description and reponame LSTM", "A LSTM reading the description and reponame character by character")
hidden_size = 300
self.maxlen = 300
# Set output_size
self.output_size = 7 # Hardcoded for 7 classes
model = Sequential()
# Maximum of self.maxlen charcters allowed, each in one-hot-encoded array
model.add(LSTM(hidden_size, input_shape=(self.maxlen, getLstmCharLength())))
for _ in range(num_hidden_layers):
model.add(Dense(hidden_size))
model.add(Dense(self.output_size))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=SGD(),
metrics=['accuracy'])
self.model = model
print "\t-", self.name
def resetAllTraining(self):
"""Reset classification module to status before training"""
resetWeights(self.model)
def trainOnSample(self, sample, nb_epoch=1, shuffle=True, verbose=True):
"""Trainiere (inkrementell) mit Sample. Evtl zusätzlich mit best. Menge alter Daten, damit overfitten auf neue Daten verhindert wird."""
readme_vec = self.formatInputData(sample)
label_index = getLabelIndex(sample)
label_one_hot = np.expand_dims(oneHot(label_index), axis=0) # [1, 0, 0, ..] -> [[1, 0, 0, ..]] Necessary for keras
self.model.fit(readme_vec, label_one_hot, nb_epoch=nb_epoch, shuffle=shuffle, verbose=verbose)
def train(self, samples, nb_epoch=200, shuffle=True, verbose=True):
"""Trainiere mit Liste von Daten. Evtl weitere Paramter nötig (nb_epoch, learning_rate, ...)"""
train_samples = []
train_lables = []
for sample in samples:
formatted_sample = self.formatInputData(sample)[0].tolist()
train_samples.append(formatted_sample)
train_lables.append(oneHot(getLabelIndex(sample)))
train_lables = np.asarray(train_lables)
train_result = self.model.fit(train_samples, train_lables, nb_epoch=nb_epoch, shuffle=shuffle, verbose=verbose, class_weight=getClassWeights())
self.isTrained = True
return train_result
def predictLabel(self, sample):
"""Gibt zurück, wie der Klassifikator ein gegebenes Sample klassifizieren würde"""
if not self.isTrained:
return 0
sample = self.formatInputData(sample)
return np.argmax(self.model.predict(sample))
def predictLabelAndProbability(self, sample):
"""Return the probability the module assignes each label"""
if not self.isTrained:
return [0, 0, 0, 0, 0, 0, 0, 0]
sample = self.formatInputData(sample)
prediction = self.model.predict(sample)[0]
return [np.argmax(prediction)] + list(prediction) # [0] So 1-D array is returned
def formatInputData(self, sample):
"""Extract description and transform to vector"""
sd = getDescription(sample)
sd += getName(sample)
# Returns numpy array which contains 1 array with features
return np.expand_dims(lstmEncode(sd, maxlen=self.maxlen), axis=0)
|
Ichaelus/Github-Classifier
|
Application/Models/ClassificationModules/descriptionreponamelstm.py
|
Python
|
mit
| 3,641
|
import hashlib
import binascii
try:
from Crypto.Cipher import AES
except ImportError: # Damn you, OS X
import crypto, sys
sys.modules['Crypto'] = crypto
from crypto.Cipher import AES
PADDING = b'\x42'
BLOCK_SIZE = 16
KEY_LENGTH = 32
STREAM_LENGTH = 3
def _ensure_bytes_key(key, length):
if type(key) is str:
key = bytes(key, encoding='utf-8')
key += bytes(length)
return key[:length]
def _pad_data(data, block_size):
padding_required = block_size - (len(data) % block_size)
data += PADDING * padding_required
assert len(data) % block_size == 0
return data
def _unpad_data(data, dest_size, block_size):
return data[:dest_size]
def _keystream(key_length, seed):
last_key = seed
key_length *= 2 # Hex characters that make up a byte
while True:
next_key = ""
while len(next_key) < key_length:
next_key += hashlib.sha1((next_key if next_key else last_key).encode('utf-8')).hexdigest()
yield binascii.unhexlify(next_key[:key_length])
last_key = next_key
def get_ecb_encrypter(key, block_size=BLOCK_SIZE, key_length=KEY_LENGTH):
"""
Creates a function which can be used to encrypt a bytes literal with ECB
encryption and a given key.
:param key: The key string to use for the encrypter. This will be padded to `key_length` bytes.
:param block_size: Optional. The size for the blocks to encrypt. Must be a multiple of 16.
:param key_length: Optional. The length of the key. Must be 16, 24 or 32.
:return: A function which takes a bytes literal and returns the encrypted bytes literal.
"""
key = _ensure_bytes_key(key, length=key_length)
def encrypter(x):
x = _pad_data(x, block_size=block_size)
cipher = AES.AESCipher(key[:32], AES.MODE_ECB)
return cipher.encrypt(x)
return encrypter
def get_ecb_decrypter(key, block_size=BLOCK_SIZE, key_length=KEY_LENGTH):
"""
Creates a function which can be used to decrypt a bytes literal
encrypted using with ECB encryption and a given key.
Please note that the resulting decrypter will accept input data which is not
a multiple of the block size (generally 16). This may be the case when using
this function with encrypted data which has been truncated (to fit an image, for example).
If that's the case it will pad the input data. This means that the last decrypted output
block may contain random noise.
:param key: The key string to use for the decrypter. This will be padded to `key_length` bytes.
:param block_size: Optional. The size for the blocks to encrypt. Must be a multiple of 16.
:param key_length: Optional. The length of the key. Must be 16, 24 or 32.
:return: A function which takes an encrypted bytes literal and returns the decrypted bytes literal.
"""
key = _ensure_bytes_key(key, length=key_length)
def decrypter(x):
cipher = AES.AESCipher(key[:32], AES.MODE_ECB)
dest_size = len(x)
x = _pad_data(x, block_size=block_size) # Yes, this is a hack -- read above.
x = cipher.decrypt(x)
x = _unpad_data(x, dest_size=dest_size, block_size=block_size)
return x
return decrypter
def _strxor(a, b):
if len(a) != len(b):
raise ValueError("Inputs need to be of the same length.")
if type(a) is str:
a = bytes(a, 'latin1')
if type(b) is str:
b = bytes(b, 'latin1')
return bytes([i ^ j for i, j in zip(a, b)])
def get_stream_cipher(seed, size=STREAM_LENGTH):
"""
Creates a function which can be used as a stream cipher, and will be able to
encrypt *and* decrypt bytes literals.
Please note that the generated function is NOT a secure stream cipher in any way.
The purpose of this function is to create an image which looks similar to one encrypted
with a proper stream cipher and deciphers correctly.
:param seed: A string that will be used to seed the stream cipher.
:param size: The size (in bytes) of the state of the stream cipher. This must be a divisor of the input size.
It defaults to three bytes -- as that's always the case for uncompressed RGB images.
:return: A function which takes a bytes literal and returns a bytes literal.
"""
def encrypter(x):
y = b''
for data_start_index, key in zip(range(0, len(x), size),
_keystream(key_length=size, seed=seed)):
y += _strxor(x[data_start_index:data_start_index + size], key)
return y
return encrypter
|
AlfioEmanueleFresta/practical-ecb-lib
|
cp_ecb/encryption.py
|
Python
|
gpl-3.0
| 4,616
|
import os
from textwrap import dedent
import os.path
import shutil
try:
import unittest2 as unittest
except ImportError:
import unittest
from rope.base.exceptions import RopeError, ResourceNotFoundError
from rope.base.fscommands import FileSystemCommands
from rope.base.libutils import path_to_resource
from rope.base.project import Project, NoProject, _realpath
from ropetest import testutils
from rope.base.resourceobserver import FilteredResourceObserver
class ProjectTest(unittest.TestCase):
def setUp(self):
unittest.TestCase.setUp(self)
self.project = testutils.sample_project(
foldername="sampleproject", ropefolder=None
)
self.project_root = self.project.address
self._make_sample_project()
self.no_project = NoProject()
def _make_sample_project(self):
self.sample_file = "sample_file.txt"
self.sample_path = os.path.join(self.project_root, "sample_file.txt")
if not os.path.exists(self.project_root):
os.mkdir(self.project_root)
self.sample_folder = "sample_folder"
os.mkdir(os.path.join(self.project_root, self.sample_folder))
sample = open(self.sample_path, "w")
sample.write("sample text\n")
sample.close()
def tearDown(self):
testutils.remove_project(self.project)
unittest.TestCase.tearDown(self)
def test_project_creation(self):
self.assertEqual(_realpath(self.project_root), self.project.address)
def test_getting_project_file(self):
project_file = self.project.get_resource(self.sample_file)
self.assertTrue(project_file is not None)
def test_project_file_reading(self):
projectFile = self.project.get_resource(self.sample_file)
self.assertEqual("sample text\n", projectFile.read())
def test_getting_not_existing_project_file(self):
with self.assertRaises(ResourceNotFoundError):
self.project.get_resource("DoesNotExistFile.txt")
def test_writing_in_project_files(self):
project_file = self.project.get_resource(self.sample_file)
project_file.write("another text\n")
self.assertEqual("another text\n", project_file.read())
def test_creating_files(self):
project_file = "newfile.txt"
self.project.root.create_file(project_file)
newFile = self.project.get_resource(project_file)
self.assertTrue(newFile is not None)
def test_creating_files_that_already_exist(self):
with self.assertRaises(RopeError):
self.project.root.create_file(self.sample_file)
def test_making_root_folder_if_it_does_not_exist(self):
project = Project("sampleproject2")
try:
self.assertTrue(
os.path.exists("sampleproject2") and os.path.isdir("sampleproject2")
)
finally:
testutils.remove_project(project)
def test_failure_when_project_root_exists_and_is_a_file(self):
project_root = "sampleproject2"
try:
open(project_root, "w").close()
with self.assertRaises(RopeError):
Project(project_root)
finally:
testutils.remove_recursively(project_root)
def test_creating_folders(self):
folderName = "SampleFolder"
self.project.root.create_folder(folderName)
folderPath = os.path.join(self.project.address, folderName)
self.assertTrue(os.path.exists(folderPath) and os.path.isdir(folderPath))
def test_making_folder_that_already_exists(self):
folderName = "SampleFolder"
with self.assertRaises(RopeError):
self.project.root.create_folder(folderName)
self.project.root.create_folder(folderName)
def test_failing_if_creating_folder_while_file_already_exists(self):
folderName = "SampleFolder"
with self.assertRaises(RopeError):
self.project.root.create_file(folderName)
self.project.root.create_folder(folderName)
def test_creating_file_inside_folder(self):
folder_name = "sampleFolder"
file_name = "sample2.txt"
file_path = folder_name + "/" + file_name
parent_folder = self.project.root.create_folder(folder_name)
parent_folder.create_file(file_name)
file = self.project.get_resource(file_path)
file.write("sample notes")
self.assertEqual(file_path, file.path)
self.assertEqual(
"sample notes", open(os.path.join(self.project.address, file_path)).read()
)
def test_failing_when_creating_file_inside_non_existent_folder(self):
with self.assertRaises(ResourceNotFoundError):
self.project.root.create_file("NonexistentFolder/SomeFile.txt")
def test_nested_directories(self):
folder_name = "SampleFolder"
parent = self.project.root.create_folder(folder_name)
parent.create_folder(folder_name)
folder_path = os.path.join(self.project.address, folder_name, folder_name)
self.assertTrue(os.path.exists(folder_path) and os.path.isdir(folder_path))
def test_removing_files(self):
self.assertTrue(os.path.exists(self.sample_path))
self.project.get_resource(self.sample_file).remove()
self.assertFalse(os.path.exists(self.sample_path))
def test_removing_files_invalidating_in_project_resource_pool(self):
root_folder = self.project.root
my_file = root_folder.create_file("my_file.txt")
my_file.remove()
self.assertFalse(root_folder.has_child("my_file.txt"))
def test_removing_directories(self):
self.assertTrue(
os.path.exists(os.path.join(self.project.address, self.sample_folder))
)
self.project.get_resource(self.sample_folder).remove()
self.assertFalse(
os.path.exists(os.path.join(self.project.address, self.sample_folder))
)
def test_removing_non_existent_files(self):
with self.assertRaises(ResourceNotFoundError):
self.project.get_resource("NonExistentFile.txt").remove()
def test_removing_nested_files(self):
file_name = self.sample_folder + "/sample_file.txt"
self.project.root.create_file(file_name)
self.project.get_resource(file_name).remove()
self.assertTrue(
os.path.exists(os.path.join(self.project.address, self.sample_folder))
)
self.assertTrue(
not os.path.exists(os.path.join(self.project.address, file_name))
)
def test_file_get_name(self):
file = self.project.get_resource(self.sample_file)
self.assertEqual(self.sample_file, file.name)
file_name = "nestedFile.txt"
parent = self.project.get_resource(self.sample_folder)
filePath = self.sample_folder + "/" + file_name
parent.create_file(file_name)
nestedFile = self.project.get_resource(filePath)
self.assertEqual(file_name, nestedFile.name)
def test_folder_get_name(self):
folder = self.project.get_resource(self.sample_folder)
self.assertEqual(self.sample_folder, folder.name)
def test_file_get_path(self):
file = self.project.get_resource(self.sample_file)
self.assertEqual(self.sample_file, file.path)
fileName = "nestedFile.txt"
parent = self.project.get_resource(self.sample_folder)
filePath = self.sample_folder + "/" + fileName
parent.create_file(fileName)
nestedFile = self.project.get_resource(filePath)
self.assertEqual(filePath, nestedFile.path)
def test_folder_get_path(self):
folder = self.project.get_resource(self.sample_folder)
self.assertEqual(self.sample_folder, folder.path)
def test_is_folder(self):
self.assertTrue(self.project.get_resource(self.sample_folder).is_folder())
self.assertTrue(not self.project.get_resource(self.sample_file).is_folder())
def testget_children(self):
children = self.project.get_resource(self.sample_folder).get_children()
self.assertEqual([], children)
def test_nonempty_get_children(self):
file_name = "nestedfile.txt"
filePath = self.sample_folder + "/" + file_name
parent = self.project.get_resource(self.sample_folder)
parent.create_file(file_name)
children = parent.get_children()
self.assertEqual(1, len(children))
self.assertEqual(filePath, children[0].path)
def test_nonempty_get_children2(self):
file_name = "nestedfile.txt"
folder_name = "nestedfolder.txt"
filePath = self.sample_folder + "/" + file_name
folderPath = self.sample_folder + "/" + folder_name
parent = self.project.get_resource(self.sample_folder)
parent.create_file(file_name)
parent.create_folder(folder_name)
children = parent.get_children()
self.assertEqual(2, len(children))
self.assertTrue(filePath == children[0].path or filePath == children[1].path)
self.assertTrue(
folderPath == children[0].path or folderPath == children[1].path
)
def test_does_not_fail_for_permission_denied(self):
bad_dir = os.path.join(self.sample_folder, "bad_dir")
os.makedirs(bad_dir)
self.addCleanup(shutil.rmtree, bad_dir)
os.chmod(bad_dir, 0o000)
try:
parent = self.project.get_resource(self.sample_folder)
parent.get_children()
finally:
os.chmod(bad_dir, 0o755)
def test_getting_files(self):
files = self.project.root.get_files()
self.assertEqual(1, len(files))
self.assertTrue(self.project.get_resource(self.sample_file) in files)
def test_getting_folders(self):
folders = self.project.root.get_folders()
self.assertEqual(1, len(folders))
self.assertTrue(self.project.get_resource(self.sample_folder) in folders)
def test_nested_folder_get_files(self):
parent = self.project.root.create_folder("top")
parent.create_file("file1.txt")
parent.create_file("file2.txt")
files = parent.get_files()
self.assertEqual(2, len(files))
self.assertTrue(self.project.get_resource("top/file2.txt") in files)
self.assertEqual(0, len(parent.get_folders()))
def test_nested_folder_get_folders(self):
parent = self.project.root.create_folder("top")
parent.create_folder("dir1")
parent.create_folder("dir2")
folders = parent.get_folders()
self.assertEqual(2, len(folders))
self.assertTrue(self.project.get_resource("top/dir1") in folders)
self.assertEqual(0, len(parent.get_files()))
def test_root_folder(self):
root_folder = self.project.root
self.assertEqual(2, len(root_folder.get_children()))
self.assertEqual("", root_folder.path)
self.assertEqual("", root_folder.name)
def test_get_all_files(self):
files = tuple(self.project.get_files())
self.assertEqual(1, len(files))
self.assertEqual(self.sample_file, files[0].name)
def test_get_all_files_after_changing(self):
self.assertEqual(1, len(self.project.get_files()))
myfile = self.project.root.create_file("myfile.txt")
self.assertEqual(2, len(self.project.get_files()))
myfile.move("newfile.txt")
self.assertEqual(2, len(self.project.get_files()))
self.project.get_file("newfile.txt").remove()
self.assertEqual(1, len(self.project.get_files()))
def test_multifile_get_all_files(self):
fileName = "nestedFile.txt"
parent = self.project.get_resource(self.sample_folder)
parent.create_file(fileName)
files = list(self.project.get_files())
self.assertEqual(2, len(files))
self.assertTrue(fileName == files[0].name or fileName == files[1].name)
def test_ignoring_dot_pyc_files_in_get_files(self):
root = self.project.address
src_folder = os.path.join(root, "src")
os.mkdir(src_folder)
test_pyc = os.path.join(src_folder, "test.pyc")
open(test_pyc, "w").close()
for x in self.project.get_files():
self.assertNotEqual("src/test.pyc", x.path)
def test_folder_creating_files(self):
projectFile = "NewFile.txt"
self.project.root.create_file(projectFile)
new_file = self.project.get_resource(projectFile)
self.assertTrue(new_file is not None and not new_file.is_folder())
def test_folder_creating_nested_files(self):
project_file = "NewFile.txt"
parent_folder = self.project.get_resource(self.sample_folder)
parent_folder.create_file(project_file)
new_file = self.project.get_resource(self.sample_folder + "/" + project_file)
self.assertTrue(new_file is not None and not new_file.is_folder())
def test_folder_creating_files2(self):
projectFile = "newfolder"
self.project.root.create_folder(projectFile)
new_folder = self.project.get_resource(projectFile)
self.assertTrue(new_folder is not None and new_folder.is_folder())
def test_folder_creating_nested_files2(self):
project_file = "newfolder"
parent_folder = self.project.get_resource(self.sample_folder)
parent_folder.create_folder(project_file)
new_folder = self.project.get_resource(self.sample_folder + "/" + project_file)
self.assertTrue(new_folder is not None and new_folder.is_folder())
def test_folder_get_child(self):
folder = self.project.root
folder.create_file("myfile.txt")
folder.create_folder("myfolder")
self.assertEqual(
self.project.get_resource("myfile.txt"), folder.get_child("myfile.txt")
)
self.assertEqual(
self.project.get_resource("myfolder"), folder.get_child("myfolder")
)
def test_folder_get_child_nested(self):
root = self.project.root
folder = root.create_folder("myfolder")
folder.create_file("myfile.txt")
folder.create_folder("myfolder")
self.assertEqual(
self.project.get_resource("myfolder/myfile.txt"),
folder.get_child("myfile.txt"),
)
self.assertEqual(
self.project.get_resource("myfolder/myfolder"), folder.get_child("myfolder")
)
def test_project_root_is_root_folder(self):
self.assertEqual("", self.project.root.path)
def test_moving_files(self):
root_folder = self.project.root
my_file = root_folder.create_file("my_file.txt")
my_file.move("my_other_file.txt")
self.assertFalse(my_file.exists())
root_folder.get_child("my_other_file.txt")
def test_moving_folders(self):
root_folder = self.project.root
my_folder = root_folder.create_folder("my_folder")
my_file = my_folder.create_file("my_file.txt")
my_folder.move("new_folder")
self.assertFalse(root_folder.has_child("my_folder"))
self.assertFalse(my_file.exists())
self.assertTrue(root_folder.get_child("new_folder") is not None)
def test_moving_destination_folders(self):
root_folder = self.project.root
my_folder = root_folder.create_folder("my_folder")
my_file = root_folder.create_file("my_file.txt")
my_file.move("my_folder")
self.assertFalse(root_folder.has_child("my_file.txt"))
self.assertFalse(my_file.exists())
my_folder.get_child("my_file.txt")
def test_moving_files_and_resource_objects(self):
root_folder = self.project.root
my_file = root_folder.create_file("my_file.txt")
old_hash = hash(my_file)
my_file.move("my_other_file.txt")
self.assertEqual(old_hash, hash(my_file))
def test_file_encoding_reading(self):
sample_file = self.project.root.create_file("my_file.txt")
contents = (
b"# -*- coding: utf-8 -*-\n"
+ br"#\N{LATIN SMALL LETTER I WITH DIAERESIS}\n"
).decode("utf8")
file = open(sample_file.real_path, "wb")
file.write(contents.encode("utf-8"))
file.close()
self.assertEqual(contents, sample_file.read())
def test_file_encoding_writing(self):
sample_file = self.project.root.create_file("my_file.txt")
contents = (
b"# -*- coding: utf-8 -*-\n" + br"\N{LATIN SMALL LETTER I WITH DIAERESIS}\n"
).decode("utf8")
sample_file.write(contents)
self.assertEqual(contents, sample_file.read())
def test_using_utf8_when_writing_in_case_of_errors(self):
sample_file = self.project.root.create_file("my_file.txt")
contents = br"\n\N{LATIN SMALL LETTER I WITH DIAERESIS}\n".decode("utf8")
sample_file.write(contents)
self.assertEqual(contents, sample_file.read())
def test_encoding_declaration_in_the_second_line(self):
sample_file = self.project.root.create_file("my_file.txt")
contents = b"\n# -*- coding: latin-1 -*-\n\xa9\n"
file = open(sample_file.real_path, "wb")
file.write(contents)
file.close()
self.assertEqual(contents, sample_file.read().encode("latin-1"))
def test_not_an_encoding_declaration(self):
sample_file = self.project.root.create_file("my_file.txt")
contents = b"def my_method(self, encoding='latin-1'):\n var = {}\n\xc2\xa9\n"
file = open(sample_file.real_path, "wb")
file.write(contents)
file.close()
self.assertEqual(contents, sample_file.read().encode("utf-8"))
self.assertNotEqual(contents, sample_file.read().encode("latin-1"))
def test_read_bytes(self):
sample_file = self.project.root.create_file("my_file.txt")
contents = b"\n# -*- coding: latin-1 -*-\n\xa9\n"
file = open(sample_file.real_path, "wb")
file.write(contents)
file.close()
self.assertEqual(contents, sample_file.read_bytes())
def test_file_with_unix_line_ending(self):
sample_file = self.project.root.create_file("my_file.txt")
contents = b"1\n"
file = open(sample_file.real_path, "wb")
file.write(contents)
file.close()
self.assertIsNone(sample_file.newlines)
self.assertEqual("1\n", sample_file.read())
self.assertEqual("\n", sample_file.newlines)
sample_file.write("1\n")
self.assertEqual(b"1\n", sample_file.read_bytes())
def test_file_with_dos_line_ending(self):
sample_file = self.project.root.create_file("my_file.txt")
contents = b"1\r\n"
file = open(sample_file.real_path, "wb")
file.write(contents)
file.close()
self.assertIsNone(sample_file.newlines)
self.assertEqual("1\n", sample_file.read())
self.assertEqual("\r\n", sample_file.newlines)
sample_file.write("1\n")
self.assertEqual(b"1\r\n", sample_file.read_bytes())
def test_file_with_mac_line_ending(self):
sample_file = self.project.root.create_file("my_file.txt")
contents = b"1\r"
file = open(sample_file.real_path, "wb")
file.write(contents)
file.close()
self.assertIsNone(sample_file.newlines)
self.assertEqual("1\n", sample_file.read())
self.assertEqual("\r", sample_file.newlines)
sample_file.write("1\n")
self.assertEqual(b"1\r", sample_file.read_bytes())
def test_file_binary(self):
sample_file = self.project.root.create_file("my_file.txt")
contents = b"1\r\n"
file = open(sample_file.real_path, "wb")
file.write(contents)
file.close()
self.assertIsNone(sample_file.newlines)
self.assertEqual(b"1\r\n", sample_file.read_bytes())
self.assertIsNone(sample_file.newlines)
sample_file.write(b"1\nx\r")
self.assertEqual((b"1\nx\r"), sample_file.read_bytes())
# TODO: Detecting utf-16 encoding
def xxx_test_using_utf16(self):
sample_file = self.project.root.create_file("my_file.txt")
contents = b"# -*- coding: utf-16 -*-\n# This is a sample file ...\n"
file = open(sample_file.real_path, "w")
file.write(contents.encode("utf-16"))
file.close()
sample_file.write(contents)
self.assertEqual(contents, sample_file.read())
# XXX: supporting utf_8_sig
def xxx_test_file_encoding_reading_for_notepad_styles(self):
sample_file = self.project.root.create_file("my_file.txt")
contents = "#\N{LATIN SMALL LETTER I WITH DIAERESIS}\n"
file = open(sample_file.real_path, "w")
# file.write('\xef\xbb\xbf')
file.write(contents.encode("utf-8-sig"))
file.close()
self.assertEqual(contents, sample_file.read())
def test_using_project_get_file(self):
myfile = self.project.get_file(self.sample_file)
self.assertTrue(myfile.exists())
def test_using_file_create(self):
myfile = self.project.get_file("myfile.txt")
self.assertFalse(myfile.exists())
myfile.create()
self.assertTrue(myfile.exists())
self.assertFalse(myfile.is_folder())
def test_using_folder_create(self):
myfolder = self.project.get_folder("myfolder")
self.assertFalse(myfolder.exists())
myfolder.create()
self.assertTrue(myfolder.exists())
self.assertTrue(myfolder.is_folder())
def test_exception_when_creating_twice(self):
with self.assertRaises(RopeError):
myfile = self.project.get_file("myfile.txt")
myfile.create()
myfile.create()
def test_exception_when_parent_does_not_exist(self):
with self.assertRaises(ResourceNotFoundError):
myfile = self.project.get_file("myfolder/myfile.txt")
myfile.create()
def test_simple_path_to_resource(self):
myfile = self.project.root.create_file("myfile.txt")
self.assertEqual(myfile, path_to_resource(self.project, myfile.real_path))
self.assertEqual(
myfile, path_to_resource(self.project, myfile.real_path, type="file")
)
myfolder = self.project.root.create_folder("myfolder")
self.assertEqual(myfolder, path_to_resource(self.project, myfolder.real_path))
self.assertEqual(
myfolder, path_to_resource(self.project, myfolder.real_path, type="folder")
)
@testutils.skipNotPOSIX()
def test_ignoring_symlinks_inside_project(self):
project2 = testutils.sample_project(folder_name="sampleproject2")
mod = project2.root.create_file("mod.py")
try:
path = os.path.join(self.project.address, "linkedfile.txt")
os.symlink(mod.real_path, path)
files = self.project.root.get_files()
self.assertEqual(1, len(files))
finally:
testutils.remove_project(project2)
def test_getting_empty_source_folders(self):
self.assertEqual([], self.project.get_source_folders())
def test_root_source_folder(self):
self.project.root.create_file("sample.py")
source_folders = self.project.get_source_folders()
self.assertEqual(1, len(source_folders))
self.assertTrue(self.project.root in source_folders)
def test_root_source_folder2(self):
self.project.root.create_file("mod1.py")
self.project.root.create_file("mod2.py")
source_folders = self.project.get_source_folders()
self.assertEqual(1, len(source_folders))
self.assertTrue(self.project.root in source_folders)
def test_src_source_folder(self):
src = self.project.root.create_folder("src")
src.create_file("sample.py")
source_folders = self.project.get_source_folders()
self.assertEqual(1, len(source_folders))
self.assertTrue(self.project.get_resource("src") in source_folders)
def test_packages(self):
src = self.project.root.create_folder("src")
pkg = src.create_folder("package")
pkg.create_file("__init__.py")
source_folders = self.project.get_source_folders()
self.assertEqual(1, len(source_folders))
self.assertTrue(src in source_folders)
def test_multi_source_folders(self):
src = self.project.root.create_folder("src")
package = src.create_folder("package")
package.create_file("__init__.py")
test = self.project.root.create_folder("test")
test.create_file("alltests.py")
source_folders = self.project.get_source_folders()
self.assertEqual(2, len(source_folders))
self.assertTrue(src in source_folders)
self.assertTrue(test in source_folders)
def test_multi_source_folders2(self):
testutils.create_module(self.project, "mod1")
src = self.project.root.create_folder("src")
package = testutils.create_package(self.project, "package", src)
testutils.create_module(self.project, "mod2", package)
source_folders = self.project.get_source_folders()
self.assertEqual(2, len(source_folders))
self.assertTrue(self.project.root in source_folders and src in source_folders)
class ResourceObserverTest(unittest.TestCase):
def setUp(self):
super(ResourceObserverTest, self).setUp()
self.project = testutils.sample_project()
def tearDown(self):
testutils.remove_project(self.project)
super(ResourceObserverTest, self).tearDown()
def test_resource_change_observer(self):
sample_file = self.project.root.create_file("my_file.txt")
sample_file.write("a sample file version 1")
sample_observer = _SampleObserver()
self.project.add_observer(sample_observer)
sample_file.write("a sample file version 2")
self.assertEqual(1, sample_observer.change_count)
self.assertEqual(sample_file, sample_observer.last_changed)
def test_resource_change_observer_after_removal(self):
sample_file = self.project.root.create_file("my_file.txt")
sample_file.write("text")
sample_observer = _SampleObserver()
self.project.add_observer(
FilteredResourceObserver(sample_observer, [sample_file])
)
sample_file.remove()
self.assertEqual(1, sample_observer.change_count)
self.assertEqual(sample_file, sample_observer.last_removed)
def test_resource_change_observer2(self):
sample_file = self.project.root.create_file("my_file.txt")
sample_observer = _SampleObserver()
self.project.add_observer(sample_observer)
self.project.remove_observer(sample_observer)
sample_file.write("a sample file version 2")
self.assertEqual(0, sample_observer.change_count)
def test_resource_change_observer_for_folders(self):
root_folder = self.project.root
my_folder = root_folder.create_folder("my_folder")
my_folder_observer = _SampleObserver()
root_folder_observer = _SampleObserver()
self.project.add_observer(
FilteredResourceObserver(my_folder_observer, [my_folder])
)
self.project.add_observer(
FilteredResourceObserver(root_folder_observer, [root_folder])
)
my_file = my_folder.create_file("my_file.txt")
self.assertEqual(1, my_folder_observer.change_count)
my_file.move("another_file.txt")
self.assertEqual(2, my_folder_observer.change_count)
self.assertEqual(1, root_folder_observer.change_count)
self.project.get_resource("another_file.txt").remove()
self.assertEqual(2, my_folder_observer.change_count)
self.assertEqual(2, root_folder_observer.change_count)
def test_resource_change_observer_after_moving(self):
sample_file = self.project.root.create_file("my_file.txt")
sample_observer = _SampleObserver()
self.project.add_observer(sample_observer)
sample_file.move("new_file.txt")
self.assertEqual(1, sample_observer.change_count)
self.assertEqual(
(sample_file, self.project.get_resource("new_file.txt")),
sample_observer.last_moved,
)
def test_revalidating_files(self):
root = self.project.root
my_file = root.create_file("my_file.txt")
sample_observer = _SampleObserver()
self.project.add_observer(FilteredResourceObserver(sample_observer, [my_file]))
os.remove(my_file.real_path)
self.project.validate(root)
self.assertEqual(my_file, sample_observer.last_removed)
self.assertEqual(1, sample_observer.change_count)
def test_revalidating_files_and_no_changes2(self):
root = self.project.root
my_file = root.create_file("my_file.txt")
sample_observer = _SampleObserver()
self.project.add_observer(FilteredResourceObserver(sample_observer, [my_file]))
self.project.validate(root)
self.assertEqual(None, sample_observer.last_moved)
self.assertEqual(0, sample_observer.change_count)
def test_revalidating_folders(self):
root = self.project.root
my_folder = root.create_folder("myfolder")
my_file = my_folder.create_file("myfile.txt") # noqa
sample_observer = _SampleObserver()
self.project.add_observer(
FilteredResourceObserver(sample_observer, [my_folder])
)
testutils.remove_recursively(my_folder.real_path)
self.project.validate(root)
self.assertEqual(my_folder, sample_observer.last_removed)
self.assertEqual(1, sample_observer.change_count)
def test_removing_and_adding_resources_to_filtered_observer(self):
my_file = self.project.root.create_file("my_file.txt")
sample_observer = _SampleObserver()
filtered_observer = FilteredResourceObserver(sample_observer)
self.project.add_observer(filtered_observer)
my_file.write("1")
self.assertEqual(0, sample_observer.change_count)
filtered_observer.add_resource(my_file)
my_file.write("2")
self.assertEqual(1, sample_observer.change_count)
filtered_observer.remove_resource(my_file)
my_file.write("3")
self.assertEqual(1, sample_observer.change_count)
def test_validation_and_changing_files(self):
my_file = self.project.root.create_file("my_file.txt")
sample_observer = _SampleObserver()
timekeeper = _MockChangeIndicator()
filtered_observer = FilteredResourceObserver(
sample_observer, [my_file], timekeeper=timekeeper
)
self.project.add_observer(filtered_observer)
self._write_file(my_file.real_path)
timekeeper.set_indicator(my_file, 1)
self.project.validate(self.project.root)
self.assertEqual(1, sample_observer.change_count)
def test_validation_and_changing_files2(self):
my_file = self.project.root.create_file("my_file.txt")
sample_observer = _SampleObserver()
timekeeper = _MockChangeIndicator()
self.project.add_observer(
FilteredResourceObserver(sample_observer, [my_file], timekeeper=timekeeper)
)
timekeeper.set_indicator(my_file, 1)
my_file.write("hey")
self.assertEqual(1, sample_observer.change_count)
self.project.validate(self.project.root)
self.assertEqual(1, sample_observer.change_count)
def test_not_reporting_multiple_changes_to_folders(self):
root = self.project.root
file1 = root.create_file("file1.txt")
file2 = root.create_file("file2.txt")
sample_observer = _SampleObserver()
self.project.add_observer(
FilteredResourceObserver(sample_observer, [root, file1, file2])
)
os.remove(file1.real_path)
os.remove(file2.real_path)
self.assertEqual(0, sample_observer.change_count)
self.project.validate(self.project.root)
self.assertEqual(3, sample_observer.change_count)
def _write_file(self, path):
my_file = open(path, "w")
my_file.write("\n")
my_file.close()
def test_moving_and_being_interested_about_a_folder_and_a_child(self):
my_folder = self.project.root.create_folder("my_folder")
my_file = my_folder.create_file("my_file.txt")
sample_observer = _SampleObserver()
filtered_observer = FilteredResourceObserver(
sample_observer, [my_folder, my_file]
)
self.project.add_observer(filtered_observer)
my_folder.move("new_folder")
self.assertEqual(2, sample_observer.change_count)
def test_contains_for_folders(self):
folder1 = self.project.root.create_folder("folder")
folder2 = self.project.root.create_folder("folder2")
self.assertFalse(folder1.contains(folder2))
def test_validating_when_created(self):
root = self.project.root
my_file = self.project.get_file("my_file.txt")
sample_observer = _SampleObserver()
self.project.add_observer(FilteredResourceObserver(sample_observer, [my_file]))
open(my_file.real_path, "w").close()
self.project.validate(root)
self.assertEqual(my_file, sample_observer.last_created)
self.assertEqual(1, sample_observer.change_count)
def test_validating_twice_when_created(self):
root = self.project.root
my_file = self.project.get_file("my_file.txt")
sample_observer = _SampleObserver()
self.project.add_observer(FilteredResourceObserver(sample_observer, [my_file]))
open(my_file.real_path, "w").close()
self.project.validate(root)
self.project.validate(root)
self.assertEqual(my_file, sample_observer.last_created)
self.assertEqual(1, sample_observer.change_count)
def test_changes_and_adding_resources(self):
root = self.project.root # noqa
file1 = self.project.get_file("file1.txt")
file2 = self.project.get_file("file2.txt")
file1.create()
sample_observer = _SampleObserver()
self.project.add_observer(
FilteredResourceObserver(sample_observer, [file1, file2])
)
file1.move(file2.path)
self.assertEqual(2, sample_observer.change_count)
self.assertEqual(file2, sample_observer.last_created)
self.assertEqual((file1, file2), sample_observer.last_moved)
def test_validating_get_files_list(self):
root = self.project.root # noqa
self.assertEqual(0, len(self.project.get_files()))
file = open(os.path.join(self.project.address, "myfile.txt"), "w")
file.close()
self.project.validate()
self.assertEqual(1, len(self.project.get_files()))
def test_clear_observered_resources_for_filtered_observers(self):
sample_file = self.project.root.create_file("myfile.txt")
sample_observer = _SampleObserver()
filtered = FilteredResourceObserver(sample_observer)
self.project.add_observer(filtered)
filtered.add_resource(sample_file)
filtered.clear_resources()
sample_file.write("1")
self.assertEqual(0, sample_observer.change_count)
class _MockChangeIndicator(object):
def __init__(self):
self.times = {}
def set_indicator(self, resource, time):
self.times[resource] = time
def get_indicator(self, resource):
return self.times.get(resource, 0)
class _SampleObserver(object):
def __init__(self):
self.change_count = 0
self.last_changed = None
self.last_moved = None
self.last_created = None
self.last_removed = None
def resource_changed(self, resource):
self.last_changed = resource
self.change_count += 1
def resource_moved(self, resource, new_resource):
self.last_moved = (resource, new_resource)
self.change_count += 1
def resource_created(self, resource):
self.last_created = resource
self.change_count += 1
def resource_removed(self, resource):
self.last_removed = resource
self.change_count += 1
class OutOfProjectTest(unittest.TestCase):
def setUp(self):
super(OutOfProjectTest, self).setUp()
self.test_directory = "temp_test_directory"
testutils.remove_recursively(self.test_directory)
os.mkdir(self.test_directory)
self.project = testutils.sample_project()
self.no_project = NoProject()
def tearDown(self):
testutils.remove_project(self.project)
testutils.remove_recursively(self.test_directory)
super(OutOfProjectTest, self).tearDown()
def test_simple_out_of_project_file(self):
sample_file_path = os.path.join(self.test_directory, "sample.txt")
sample_file = open(sample_file_path, "w")
sample_file.write("sample content\n")
sample_file.close()
sample_resource = self.no_project.get_resource(sample_file_path)
self.assertEqual("sample content\n", sample_resource.read())
def test_simple_out_of_project_folder(self):
sample_folder_path = os.path.join(self.test_directory, "sample_folder")
os.mkdir(sample_folder_path)
sample_folder = self.no_project.get_resource(sample_folder_path)
self.assertEqual([], sample_folder.get_children())
sample_file_path = os.path.join(sample_folder_path, "sample.txt")
open(sample_file_path, "w").close()
sample_resource = self.no_project.get_resource(sample_file_path)
self.assertEqual(sample_resource, sample_folder.get_children()[0])
def test_using_absolute_path(self):
sample_file_path = os.path.join(self.test_directory, "sample.txt")
open(sample_file_path, "w").close()
normal_sample_resource = self.no_project.get_resource(sample_file_path)
absolute_sample_resource = self.no_project.get_resource(
os.path.abspath(sample_file_path)
)
self.assertEqual(normal_sample_resource, absolute_sample_resource)
def test_folder_get_child(self):
sample_folder_path = os.path.join(self.test_directory, "sample_folder")
os.mkdir(sample_folder_path)
sample_folder = self.no_project.get_resource(sample_folder_path)
self.assertEqual([], sample_folder.get_children())
sample_file_path = os.path.join(sample_folder_path, "sample.txt")
open(sample_file_path, "w").close()
sample_resource = self.no_project.get_resource(sample_file_path)
self.assertTrue(sample_folder.has_child("sample.txt"))
self.assertFalse(sample_folder.has_child("doesnothave.txt"))
self.assertEqual(sample_resource, sample_folder.get_child("sample.txt"))
def test_out_of_project_files_and_path_to_resource(self):
sample_file_path = os.path.join(self.test_directory, "sample.txt")
sample_file = open(sample_file_path, "w")
sample_file.write("sample content\n")
sample_file.close()
sample_resource = self.no_project.get_resource(sample_file_path)
self.assertEqual(
sample_resource, path_to_resource(self.project, sample_file_path)
)
class _MockFSCommands(object):
def __init__(self):
self.log = ""
self.fscommands = FileSystemCommands()
def create_file(self, path):
self.log += "create_file "
self.fscommands.create_file(path)
def create_folder(self, path):
self.log += "create_folder "
self.fscommands.create_folder(path)
def move(self, path, new_location):
self.log += "move "
self.fscommands.move(path, new_location)
def remove(self, path):
self.log += "remove "
self.fscommands.remove(path)
def read(self, path):
self.log += "read "
return self.fscommands.read(path)
class _DeprecatedFSCommands(object):
def __init__(self):
self.log = ""
self.fscommands = FileSystemCommands()
def create_file(self, path):
self.log += "create_file "
self.fscommands.create_file(path)
def create_folder(self, path):
self.log += "create_folder "
self.fscommands.create_folder(path)
def move(self, path, new_location):
self.log += "move "
self.fscommands.move(path, new_location)
def remove(self, path):
self.log += "remove "
self.fscommands.remove(path)
class RopeFolderTest(unittest.TestCase):
def setUp(self):
super(RopeFolderTest, self).setUp()
self.project = None
def tearDown(self):
if self.project:
testutils.remove_project(self.project)
super(RopeFolderTest, self).tearDown()
def test_none_project_rope_folder(self):
self.project = testutils.sample_project(ropefolder=None)
self.assertTrue(self.project.ropefolder is None)
def test_getting_project_rope_folder(self):
self.project = testutils.sample_project(ropefolder=".ropeproject")
self.assertTrue(self.project.ropefolder.exists())
self.assertTrue(".ropeproject", self.project.ropefolder.path)
def test_setting_ignored_resources(self):
self.project = testutils.sample_project(ignored_resources=["myfile.txt"])
myfile = self.project.get_file("myfile.txt")
file2 = self.project.get_file("file2.txt")
self.assertTrue(self.project.is_ignored(myfile))
self.assertFalse(self.project.is_ignored(file2))
def test_ignored_folders(self):
self.project = testutils.sample_project(ignored_resources=["myfolder"])
myfolder = self.project.root.create_folder("myfolder")
self.assertTrue(self.project.is_ignored(myfolder))
myfile = myfolder.create_file("myfile.txt")
self.assertTrue(self.project.is_ignored(myfile))
def test_ignored_resources_and_get_files(self):
self.project = testutils.sample_project(
ignored_resources=["myfile.txt"], ropefolder=None
)
myfile = self.project.get_file("myfile.txt")
self.assertEqual(0, len(self.project.get_files()))
myfile.create()
self.assertEqual(0, len(self.project.get_files()))
def test_ignored_resources_and_get_files2(self):
self.project = testutils.sample_project(
ignored_resources=["myfile.txt"], ropefolder=None
)
myfile = self.project.root.create_file("myfile.txt") # noqa
self.assertEqual(0, len(self.project.get_files()))
def test_setting_ignored_resources_patterns(self):
self.project = testutils.sample_project(ignored_resources=["m?file.*"])
myfile = self.project.get_file("myfile.txt")
file2 = self.project.get_file("file2.txt")
self.assertTrue(self.project.is_ignored(myfile))
self.assertFalse(self.project.is_ignored(file2))
def test_star_should_not_include_slashes(self):
self.project = testutils.sample_project(ignored_resources=["f*.txt"])
folder = self.project.root.create_folder("folder")
file1 = folder.create_file("myfile.txt")
file2 = folder.create_file("file2.txt")
self.assertFalse(self.project.is_ignored(file1))
self.assertTrue(self.project.is_ignored(file2))
def test_normal_fscommands(self):
fscommands = _MockFSCommands()
self.project = testutils.sample_project(fscommands=fscommands)
myfile = self.project.get_file("myfile.txt")
myfile.create()
self.assertTrue("create_file ", fscommands.log)
def test_fscommands_and_ignored_resources(self):
fscommands = _MockFSCommands()
self.project = testutils.sample_project(
fscommands=fscommands, ignored_resources=["myfile.txt"], ropefolder=None
)
myfile = self.project.get_file("myfile.txt")
myfile.create()
self.assertEqual("", fscommands.log)
def test_deprecated_fscommands(self):
fscommands = _DeprecatedFSCommands()
self.project = testutils.sample_project(fscommands=fscommands)
myfile = self.project.get_file("myfile.txt")
myfile.create()
self.assertTrue("create_file ", fscommands.log)
def test_ignored_resources_and_prefixes(self):
self.project = testutils.sample_project(ignored_resources=[".hg"])
myfile = self.project.root.create_file(".hgignore")
self.assertFalse(self.project.is_ignored(myfile))
def test_loading_config_dot_py(self):
self.project = testutils.sample_project(ropefolder=".ropeproject")
config = self.project.get_file(".ropeproject/config.py")
if not config.exists():
config.create()
config.write(
dedent("""\
def set_prefs(prefs):
prefs["ignored_resources"] = ["myfile.txt"]
def project_opened(project):
project.root.create_file("loaded")
""")
)
self.project.close()
self.project = Project(self.project.address, ropefolder=".ropeproject")
self.assertTrue(self.project.get_file("loaded").exists())
myfile = self.project.get_file("myfile.txt")
self.assertTrue(self.project.is_ignored(myfile))
def test_ignoring_syntax_errors(self):
self.project = testutils.sample_project(
ropefolder=None, ignore_syntax_errors=True
)
mod = testutils.create_module(self.project, "mod")
mod.write("xyz print")
pymod = self.project.get_pymodule(mod) # noqa
def test_compressed_history(self):
self.project = testutils.sample_project(compress_history=True)
mod = testutils.create_module(self.project, "mod")
mod.write("")
def test_compressed_objectdb(self):
self.project = testutils.sample_project(compress_objectdb=True)
mod = testutils.create_module(self.project, "mod")
self.project.pycore.analyze_module(mod)
def test_nested_dot_ropeproject_folder(self):
self.project = testutils.sample_project(ropefolder=".f1/f2")
ropefolder = self.project.ropefolder
self.assertEqual(".f1/f2", ropefolder.path)
self.assertTrue(ropefolder.exists())
|
python-rope/rope
|
ropetest/projecttest.py
|
Python
|
lgpl-3.0
| 46,324
|
from platforms.desktop.desktop_service import DesktopService
from specializations.dlv.dlv_answer_sets import DLVAnswerSets
class DLVDesktopService(DesktopService):
"""Is an extention of DesktopService for DLV's solver"""
def __init__(self, exe_path):
super(DLVDesktopService, self).__init__(exe_path)
self._load_from_stdin_option = "--"
def _get_output(self, output, error):
"""Return a new DLVAnwerSets from output and error given"""
return DLVAnswerSets(output, error)
|
SimoneLucia/EmbASP-Python
|
specializations/dlv/desktop/dlv_desktop_service.py
|
Python
|
mit
| 529
|
#!/usr/bin/python
import ldap
import Pmw
import subprocess
import os
from Tkinter import *
class Sistema:
dados_impressao=""
def __init__(self,raiz):
self.run(raiz)
def run(self,raiz):
# Create the ScrolledListBox.
self.box = Pmw.ScrolledListBox(raiz,labelpos='nw',label_text='Lista de Impressao',listbox_height = 6,selectioncommand=self.login,usehullsize = 1,hull_width = 400,hull_height = 300,)
self.box.pack(fill = 'both', expand = 1, padx = 5, pady = 5)
self.reload()
def reload(self):
self.box.clear()
p = subprocess.Popen('/usr/local/ibquota/lista_impressao.pl', stdout=subprocess.PIPE)
output, error = p.communicate()
for linha in output.split('\n'):
if len(linha) > 1:
dados=linha.split(':')
lista = "%s %s %s %s" %(dados[0].strip(),dados[1],dados[2],dados[3].strip())
self.box.insert('end', lista)
def check_credentials(self):
username = self.nome.get()
password = self.senha.get()
dados_user=self.dados_impressao.split()
LDAP_SERVER = 'ldap://xx.xx.xx.xx'
# fully qualified AD user name
LDAP_USERNAME = '%s@DOMINIO' % username
# your password
LDAP_PASSWORD = password
base_dn = 'DC=DOMINIO,DC=DOMINIO'
ldap_filter = 'userPrincipalName=%s@DOMINIO' % username
attrs = ['memberOf']
if username == dados_user[1]:
try:
# build a client
ldap_client = ldap.initialize(LDAP_SERVER)
# perform a synchronous bind
ldap_client.set_option(ldap.OPT_REFERRALS,0)
ldap_client.simple_bind_s(LDAP_USERNAME, LDAP_PASSWORD)
except ldap.INVALID_CREDENTIALS:
ldap_client.unbind()
self.msg['text']='Senha incorreta!'
return False
except ldap.SERVER_DOWN:
self.msg['text']='Senha incorreta!'
return False
# all is well
# get all user groups and store it in cerrypy session for future use
# cherrypy.session[username] = str(ldap_client.search_s(base_dn,ldap.SCOPE_SUBTREE, ldap_filter, attrs)[0][1]['memberOf'])
#print str(ldap_client.search_s(base_dn,ldap.SCOPE_SUBTREE, ldap_filter, attrs)[0])
comando="/usr/local/ibquota/imprimir.pl %s %s %s %s" %(dados_user[0],dados_user[1], dados_user[2],dados_user[3])
os.system(comando)
self.reload()
ldap_client.unbind()
self.raiz.destroy()
return True
else:
self.msg['text']='Usuario nao solicitou impressao'
return False
def login(self):
sels = self.box.getcurselection()
if len(sels) != 0:
self.dados_impressao= sels[0]
self.raiz = Tk()
self.raiz.title('Sistema de Impressao - Autenticao')
self.impressao= Frame(self.raiz)
self.impressao.pack()
Label(self.impressao,text=sels[0]).grid(row=1, column=1,sticky=W, pady=3)
self.login = Frame(self.raiz)
self.login.pack()
Label(self.login,text='Nome:').grid(row=1, column=1,sticky=W, pady=3)
Label(self.login,text='Senha:').grid(row=2, column=1,sticky=W, pady=3)
self.msg=Label(self.login,text='')
self.msg.grid(row=3, column=1, columnspan=2)
self.nome=Entry(self.login, width=10, text='teste')
self.nome.grid(row=1, column=2, sticky=E+W, pady=3)
self.nome.focus_force()
self.senha=Entry(self.login, width=5, fg='darkgray',show='l',font=('Wingdings','10'))
self.senha.grid(row=2,column=2, sticky=E+W, pady=3)
self.ok=Button(self.login, width=8, command=self.check_credentials,text='OK')
self.ok.grid(row=4, column=1, padx=2, pady=3)
self.close=Button(self.login, width=8, command=self.fechar,text='Fechar')
self.close.grid(row=4, column=2, padx=2, pady=3)
def fechar(self): self.raiz.destroy()
inst1=Tk()
inst1.title("Controle de Impressao")
telas=Sistema(inst1)
reload = Button(inst1, text = 'Recarregar', command = telas.reload)
reload.pack(side = 'bottom')
inst1.mainloop()
|
clodonil/controle-impressao
|
controle_de_impressao.py
|
Python
|
lgpl-3.0
| 4,148
|
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# LICENSE
#
# Copyright (C) 2010-2022 GEM Foundation, G. Weatherill, M. Pagani,
# D. Monelli.
#
# The Hazard Modeller's Toolkit is free software: you can redistribute
# it and/or modify it under the terms of the GNU Affero General Public
# License as published by the Free Software Foundation, either version
# 3 of the License, or (at your option) any later version.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>
#
# DISCLAIMER
#
# The software Hazard Modeller's Toolkit (openquake.hmtk) provided herein
# is released as a prototype implementation on behalf of
# scientists and engineers working within the GEM Foundation (Global
# Earthquake Model).
#
# It is distributed for the purpose of open collaboration and in the
# hope that it will be useful to the scientific, engineering, disaster
# risk and software design communities.
#
# The software is NOT distributed as part of GEM's OpenQuake suite
# (https://www.globalquakemodel.org/tools-products) and must be considered as a
# separate entity. The software provided herein is designed and implemented
# by scientific staff. It is not developed to the design standards, nor
# subject to same level of critical review by professional software
# developers, as GEM's OpenQuake software suite.
#
# Feedback and contribution to the software is welcome, and can be
# directed to the hazard scientific staff of the GEM Model Facility
# (hazard@globalquakemodel.org).
#
# The Hazard Modeller's Toolkit (openquake.hmtk) is therefore distributed WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# The GEM Foundation, and the authors of the software, assume no
# liability for use of the software.
'''
Class to implement set of functionalities for selecting events from
and earthquake catalogue
'''
import numpy as np
from datetime import datetime
from copy import deepcopy
from openquake.hazardlib.geo.point import Point
from openquake.hazardlib.geo.mesh import Mesh
from openquake.hmtk.seismicity.catalogue import Catalogue
from openquake.hmtk.seismicity.utils import decimal_time
def _check_depth_limits(input_dict):
'''Returns the default upper and lower depth values if not in dictionary
:param input_dict:
Dictionary corresponding to the kwargs dictionary of calling function
:returns:
'upper_depth': Upper seismogenic depth (float)
'lower_depth': Lower seismogenic depth (float)
'''
if ('upper_depth' in input_dict.keys()) and input_dict['upper_depth']:
if input_dict['upper_depth'] < 0.:
raise ValueError('Upper seismogenic depth must be positive')
else:
upper_depth = input_dict['upper_depth']
else:
upper_depth = 0.0
if ('lower_depth' in input_dict.keys()) and input_dict['lower_depth']:
if input_dict['lower_depth'] < upper_depth:
raise ValueError('Lower depth must take a greater value than'
' upper depth!')
else:
lower_depth = input_dict['lower_depth']
else:
lower_depth = np.inf
return upper_depth, lower_depth
def _get_decimal_from_datetime(time):
'''
As the decimal time function requires inputs in the form of numpy
arrays need to convert each value in the datetime object to a single
numpy array
'''
# Get decimal seconds from seconds + microseconds
temp_seconds = float(time.second) + (float(time.microsecond) / 1.0E6)
return decimal_time(np.array([time.year], dtype=int),
np.array([time.month], dtype=int),
np.array([time.day], dtype=int),
np.array([time.hour], dtype=int),
np.array([time.minute], dtype=int),
np.array([temp_seconds], dtype=int))
class CatalogueSelector(object):
'''
Class to implement methods for selecting subsets of the catalogue
according to various attribute criteria.
:attr catalogue: The catalogue to which the selection is applied as
instance of openquake.hmtk.seismicity.catalogue.Catalogue
:attr create_copy: Boolean to indicate whether to create copy of the
original catalogue before selecting {default = True}
'''
def __init__(self, master_catalogue, create_copy=True):
'''
Instantiate
:param master_catalogue:
Instance of openquake.hmtk.seismicity.catalogue.Catalogue class
:param bool create_copy: Option to create copy of te class before
selecting (i.e. preserving original class)
'''
self.catalogue = master_catalogue
self.copycat = create_copy
def select_catalogue(self, valid_id):
'''
Method to post-process the catalogue based on the selection options
:param numpy.ndarray valid_id:
Boolean vector indicating whether each event is selected (True)
or not (False)
:returns:
Catalogue of selected events as instance of
openquake.hmtk.seismicity.catalogue.Catalogue class
'''
if not np.any(valid_id):
# No events selected - create clean instance of class
output = Catalogue()
output.processes = self.catalogue.processes
elif np.all(valid_id):
if self.copycat:
output = deepcopy(self.catalogue)
else:
output = self.catalogue
else:
if self.copycat:
output = deepcopy(self.catalogue)
else:
output = self.catalogue
output.purge_catalogue(valid_id)
return output
def within_polygon(self, polygon, distance=None, **kwargs):
'''
Select earthquakes within polygon
:param polygon:
Centre point as instance of nhlib.geo.polygon.Polygon class
:param float distance:
Buffer distance (km) (can take negative values)
:returns:
Instance of :class:`openquake.hmtk.seismicity.catalogue.Catalogue`
containing only selected events
'''
if distance:
# If a distance is specified then dilate the polyon by distance
zone_polygon = polygon.dilate(distance)
else:
zone_polygon = polygon
# Make valid all events inside depth range
upper_depth, lower_depth = _check_depth_limits(kwargs)
valid_depth = np.logical_and(
self.catalogue.data['depth'] >= upper_depth,
self.catalogue.data['depth'] < lower_depth)
# Events outside polygon returned to invalid assignment
catalogue_mesh = Mesh(self.catalogue.data['longitude'],
self.catalogue.data['latitude'],
self.catalogue.data['depth'])
valid_id = np.logical_and(valid_depth,
zone_polygon.intersects(catalogue_mesh))
return self.select_catalogue(valid_id)
def circular_distance_from_point(self, point, distance, **kwargs):
'''
Select earthquakes within a distance from a Point
:param point:
Centre point as instance of nhlib.geo.point.Point class
:param float distance:
Distance (km)
:returns:
Instance of :class:`openquake.hmtk.seismicity.catalogue.Catalogue`
containing only selected events
'''
if kwargs['distance_type'] == 'epicentral':
locations = Mesh(
self.catalogue.data['longitude'],
self.catalogue.data['latitude'],
np.zeros(len(self.catalogue.data['longitude']), dtype=float))
point = Point(point.longitude, point.latitude, 0.0)
else:
locations = self.catalogue.hypocentres_as_mesh()
is_close = point.closer_than(locations, distance)
return self.select_catalogue(is_close)
def cartesian_square_centred_on_point(self, point, distance, **kwargs):
'''
Select earthquakes from within a square centered on a point
:param point:
Centre point as instance of nhlib.geo.point.Point class
:param distance:
Distance (km)
:returns:
Instance of :class:`openquake.hmtk.seismicity.catalogue.Catalogue`
class containing only selected events
'''
point_surface = Point(point.longitude, point.latitude, 0.)
# As distance is
north_point = point_surface.point_at(distance, 0., 0.)
east_point = point_surface.point_at(distance, 0., 90.)
south_point = point_surface.point_at(distance, 0., 180.)
west_point = point_surface.point_at(distance, 0., 270.)
is_long = np.logical_and(
self.catalogue.data['longitude'] >= west_point.longitude,
self.catalogue.data['longitude'] < east_point.longitude)
is_surface = np.logical_and(
is_long,
self.catalogue.data['latitude'] >= south_point.latitude,
self.catalogue.data['latitude'] < north_point.latitude)
upper_depth, lower_depth = _check_depth_limits(kwargs)
is_valid = np.logical_and(
is_surface,
self.catalogue.data['depth'] >= upper_depth,
self.catalogue.data['depth'] < lower_depth)
return self.select_catalogue(is_valid)
def within_joyner_boore_distance(self, surface, distance, **kwargs):
'''
Select events within a Joyner-Boore distance of a fault
:param surface:
Fault surface as instance of
nhlib.geo.surface.base.SimpleFaultSurface or as instance of
nhlib.geo.surface.ComplexFaultSurface
:param float distance:
Rupture distance (km)
:returns:
Instance of :class:`openquake.hmtk.seismicity.catalogue.Catalogue`
containing only selected events
'''
upper_depth, lower_depth = _check_depth_limits(kwargs)
rjb = surface.get_joyner_boore_distance(
self.catalogue.hypocentres_as_mesh())
is_valid = np.logical_and(
rjb <= distance,
np.logical_and(self.catalogue.data['depth'] >= upper_depth,
self.catalogue.data['depth'] < lower_depth))
return self.select_catalogue(is_valid)
def within_rupture_distance(self, surface, distance, **kwargs):
'''
Select events within a rupture distance from a fault surface
:param surface:
Fault surface as instance of nhlib.geo.surface.base.BaseSurface
:param float distance:
Rupture distance (km)
:returns:
Instance of :class:`openquake.hmtk.seismicity.catalogue.Catalogue`
containing only selected events
'''
# Check for upper and lower depths
upper_depth, lower_depth = _check_depth_limits(kwargs)
rrupt = surface.get_min_distance(self.catalogue.hypocentres_as_mesh())
is_valid = np.logical_and(
rrupt <= distance,
np.logical_and(self.catalogue.data['depth'] >= upper_depth,
self.catalogue.data['depth'] < lower_depth))
return self.select_catalogue(is_valid)
def within_time_period(self, start_time=None, end_time=None):
'''
Select earthquakes occurring within a given time period
:param start_time:
Earliest time (as datetime.datetime object)
:param end_time:
Latest time (as datetime.datetime object)
:returns:
Instance of :class:`openquake.hmtk.seismicity.catalogue.Catalogue`
containing only selected events
'''
time_value = self.catalogue.get_decimal_time()
if not start_time:
if not end_time:
# No times input, therefore skip everything and return catalog
return self.catalogue
else:
start_time = np.min(self.catalogue.data['year'])
else:
start_time = _get_decimal_from_datetime(start_time)
if not end_time:
end_time = _get_decimal_from_datetime(datetime.now())
else:
end_time = _get_decimal_from_datetime(end_time)
# Get decimal time values
time_value = self.catalogue.get_decimal_time()
is_valid = np.logical_and(time_value >= start_time,
time_value < end_time)
return self.select_catalogue(is_valid)
def within_depth_range(self, lower_depth=None, upper_depth=None):
'''
Selects events within a specified depth range
:param float lower_depth:
Lower depth for consideration
:param float upper_depth:
Upper depth for consideration
:returns:
Instance of :class:`openquake.hmtk.seismicity.catalogue.Catalogue`
containing only selected events
'''
if not lower_depth:
if not upper_depth:
# No limiting depths defined - so return entire catalogue!
return self.catalogue
else:
lower_depth = np.inf
if not upper_depth:
upper_depth = 0.0
is_valid = np.logical_and(self.catalogue.data['depth'] >= upper_depth,
self.catalogue.data['depth'] < lower_depth)
return self.select_catalogue(is_valid)
def within_magnitude_range(self, lower_mag=None, upper_mag=None):
'''
:param float lower_mag:
Lower magnitude for consideration
:param float upper_mag:
Upper magnitude for consideration
:returns:
Instance of openquake.hmtk.seismicity.catalogue.Catalogue class containing
only selected events
'''
if not lower_mag:
if not upper_mag:
# No limiting magnitudes defined - return entire catalogue!
return self.catalogue
else:
lower_mag = -np.inf
if not upper_mag:
upper_mag = np.inf
is_valid = np.logical_and(
self.catalogue.data['magnitude'] >= lower_mag,
self.catalogue.data['magnitude'] < upper_mag)
return self.select_catalogue(is_valid)
def create_cluster_set(self, vcl):
"""
For a given catalogue and list of cluster IDs this function splits
the catalogue into a dictionary containing an individual catalogue
of events within each cluster
:param numpy.ndarray vcl:
Cluster ID list
:returns:
Dictionary of instances of the :class:
openquake.hmtk.seismicity.catalogue.Catalogue, where each instance
if the catalogue of each cluster
"""
num_clust = np.max(vcl)
cluster_set = []
for clid in range(0, num_clust + 1):
idx = np.where(vcl == clid)[0]
cluster_cat = deepcopy(self.catalogue)
cluster_cat.select_catalogue_events(idx)
cluster_set.append((clid, cluster_cat))
return dict(cluster_set)
def within_bounding_box(self, limits):
"""
Selects the earthquakes within a bounding box.
:parameter limits:
A list or a numpy array with four elements in the following order:
- min x (longitude)
- min y (latitude)
- max x (longitude)
- max y (latitude)
:returns:
Returns a :class:htmk.seismicity.catalogue.Catalogue` instance
"""
is_valid = np.logical_and(
self.catalogue.data['longitude'] >= limits[0],
np.logical_and(self.catalogue.data['longitude'] <= limits[2],
np.logical_and(
self.catalogue.data['latitude'] >= limits[1],
self.catalogue.data['latitude'] <= limits[3])))
return self.select_catalogue(is_valid)
|
gem/oq-engine
|
openquake/hmtk/seismicity/selector.py
|
Python
|
agpl-3.0
| 16,424
|
#!/usr/bin/env python3
import json
import fileinput
import argparse
import os
from batman import batman
from alfred import alfred
from rrd import rrd
from nodedb import NodeDB
from d3mapbuilder import D3MapBuilder
# Force encoding to UTF-8
import locale # Ensures that subsequent open()s
locale.getpreferredencoding = lambda _=None: 'UTF-8' # are UTF-8 encoded.
import sys
#sys.stdin = open('/dev/stdin', 'r')
#sys.stdout = open('/dev/stdout', 'w')
#sys.stderr = open('/dev/stderr', 'w')
parser = argparse.ArgumentParser()
parser.add_argument('-a', '--aliases',
help='read aliases from FILE',
action='append',
metavar='FILE')
parser.add_argument('-m', '--mesh', action='append',
help='batman mesh interface')
parser.add_argument('-o', '--obscure', action='store_true',
help='obscure client macs')
parser.add_argument('-A', '--alfred', action='store_true',
help='retrieve aliases from alfred')
parser.add_argument('-d', '--destination-directory', action='store',
help='destination directory for generated files',required=True)
args = parser.parse_args()
options = vars(args)
db = NodeDB()
if options['mesh']:
for mesh_interface in options['mesh']:
bm = batman(mesh_interface)
db.parse_vis_data(bm.vis_data(options['alfred']))
for gw in bm.gateway_list():
db.mark_gateways(gw['mac'])
else:
bm = batman()
db.parse_vis_data(bm.vis_data(options['alfred']))
for gw in bm.gateway_list():
db.mark_gateways([gw['mac']])
if options['aliases']:
for aliases in options['aliases']:
db.import_aliases(json.load(open(aliases)))
if options['alfred']:
af = alfred()
db.add_node_info(af.aliases())
db.count_clients()
if options['obscure']:
db.obscure_clients()
scriptdir = os.path.dirname(os.path.realpath(__file__))
m = D3MapBuilder(db)
#Write nodes json
nodes_json = open(options['destination_directory'] + '/nodes.json.new','w')
nodes_json.write(m.build())
nodes_json.close()
#Move to destination
os.rename(options['destination_directory'] + '/nodes.json.new',options['destination_directory'] + '/nodes.json')
rrd = rrd(scriptdir + "/nodedb/", options['destination_directory'] + "/nodes")
rrd.update_database(db)
rrd.update_images()
|
FreifunkMD/ffmap-backend
|
bat2nodes.py
|
Python
|
bsd-3-clause
| 2,343
|
import ninjag
import os
from read_all import read_all
def test():
f_inputs = [
"input/in1_const.yaml",
"input/in1_rules.yaml",
"input/in1_tasks.yaml",
]
f_answer = "output/out1_combined.ninja"
f_solution = "solution/sol1.ninja"
cmd = " ".join(["ninjag", f_answer, *f_inputs])
os.system(cmd)
answer = read_all(f_answer)
solution = read_all(f_solution)
assert answer == solution
|
yuhangwang/ninjag-python
|
test/frontend/c/test_2.py
|
Python
|
mit
| 443
|
# -*- coding: utf-8 -*-
"""
Test cases to cover Accounts-related behaviors of the User API application
"""
from copy import deepcopy
import datetime
import hashlib
import json
import ddt
from django.conf import settings
from django.core.urlresolvers import reverse
from django.test.testcases import TransactionTestCase
from django.test.utils import override_settings
import mock
import pytz
from rest_framework.test import APIClient, APITestCase
from openedx.core.djangoapps.user_api.accounts import ACCOUNT_VISIBILITY_PREF_KEY
from openedx.core.djangoapps.user_api.models import UserPreference
from openedx.core.djangoapps.user_api.preferences.api import set_user_preference
from openedx.core.djangolib.testing.utils import CacheIsolationTestCase, skip_unless_lms
from student.models import (
CourseEnrollment,
CourseEnrollmentAllowed,
ManualEnrollmentAudit,
PasswordHistory,
PendingEmailChange,
PendingNameChange,
Registration,
SocialLink,
UserProfile,
get_retired_username_by_username,
get_retired_email_by_email,
)
from student.tests.factories import (
TEST_PASSWORD,
ContentTypeFactory,
CourseEnrollmentAllowedFactory,
PendingEmailChangeFactory,
PermissionFactory,
SuperuserFactory,
UserFactory
)
from .. import ALL_USERS_VISIBILITY, PRIVATE_VISIBILITY
TEST_PROFILE_IMAGE_UPLOADED_AT = datetime.datetime(2002, 1, 9, 15, 43, 1, tzinfo=pytz.UTC)
# this is used in one test to check the behavior of profile image url
# generation with a relative url in the config.
TEST_PROFILE_IMAGE_BACKEND = deepcopy(settings.PROFILE_IMAGE_BACKEND)
TEST_PROFILE_IMAGE_BACKEND['options']['base_url'] = '/profile-images/'
class UserAPITestCase(APITestCase):
"""
The base class for all tests of the User API
"""
def setUp(self):
super(UserAPITestCase, self).setUp()
self.anonymous_client = APIClient()
self.different_user = UserFactory.create(password=TEST_PASSWORD)
self.different_client = APIClient()
self.staff_user = UserFactory(is_staff=True, password=TEST_PASSWORD)
self.staff_client = APIClient()
self.user = UserFactory.create(password=TEST_PASSWORD) # will be assigned to self.client by default
def login_client(self, api_client, user):
"""Helper method for getting the client and user and logging in. Returns client. """
client = getattr(self, api_client)
user = getattr(self, user)
client.login(username=user.username, password=TEST_PASSWORD)
return client
def send_patch(self, client, json_data, content_type="application/merge-patch+json", expected_status=200):
"""
Helper method for sending a patch to the server, defaulting to application/merge-patch+json content_type.
Verifies the expected status and returns the response.
"""
# pylint: disable=no-member
response = client.patch(self.url, data=json.dumps(json_data), content_type=content_type)
self.assertEqual(expected_status, response.status_code)
return response
def send_get(self, client, query_parameters=None, expected_status=200):
"""
Helper method for sending a GET to the server. Verifies the expected status and returns the response.
"""
url = self.url + '?' + query_parameters if query_parameters else self.url # pylint: disable=no-member
response = client.get(url)
self.assertEqual(expected_status, response.status_code)
return response
# pylint: disable=no-member
def send_put(self, client, json_data, content_type="application/json", expected_status=204):
"""
Helper method for sending a PUT to the server. Verifies the expected status and returns the response.
"""
response = client.put(self.url, data=json.dumps(json_data), content_type=content_type)
self.assertEqual(expected_status, response.status_code)
return response
# pylint: disable=no-member
def send_delete(self, client, expected_status=204):
"""
Helper method for sending a DELETE to the server. Verifies the expected status and returns the response.
"""
response = client.delete(self.url)
self.assertEqual(expected_status, response.status_code)
return response
def create_mock_profile(self, user):
"""
Helper method that creates a mock profile for the specified user
:return:
"""
legacy_profile = UserProfile.objects.get(id=user.id)
legacy_profile.country = "US"
legacy_profile.level_of_education = "m"
legacy_profile.year_of_birth = 2000
legacy_profile.goals = "world peace"
legacy_profile.mailing_address = "Park Ave"
legacy_profile.gender = "f"
legacy_profile.bio = "Tired mother of twins"
legacy_profile.profile_image_uploaded_at = TEST_PROFILE_IMAGE_UPLOADED_AT
legacy_profile.language_proficiencies.create(code='en')
legacy_profile.save()
def _verify_profile_image_data(self, data, has_profile_image):
"""
Verify the profile image data in a GET response for self.user
corresponds to whether the user has or hasn't set a profile
image.
"""
template = '{root}/{filename}_{{size}}.{extension}'
if has_profile_image:
url_root = 'http://example-storage.com/profile-images'
filename = hashlib.md5('secret' + self.user.username).hexdigest()
file_extension = 'jpg'
template += '?v={}'.format(TEST_PROFILE_IMAGE_UPLOADED_AT.strftime("%s"))
else:
url_root = 'http://testserver/static'
filename = 'default'
file_extension = 'png'
template = template.format(root=url_root, filename=filename, extension=file_extension)
self.assertEqual(
data['profile_image'],
{
'has_image': has_profile_image,
'image_url_full': template.format(size=50),
'image_url_small': template.format(size=10),
}
)
@ddt.ddt
@skip_unless_lms
class TestOwnUsernameAPI(CacheIsolationTestCase, UserAPITestCase):
"""
Unit tests for the Accounts API.
"""
shard = 2
ENABLED_CACHES = ['default']
def setUp(self):
super(TestOwnUsernameAPI, self).setUp()
self.url = reverse("own_username_api")
def _verify_get_own_username(self, queries, expected_status=200):
"""
Internal helper to perform the actual assertion
"""
with self.assertNumQueries(queries):
response = self.send_get(self.client, expected_status=expected_status)
if expected_status == 200:
data = response.data
self.assertEqual(1, len(data))
self.assertEqual(self.user.username, data["username"])
def test_get_username(self):
"""
Test that a client (logged in) can get her own username.
"""
self.client.login(username=self.user.username, password=TEST_PASSWORD)
self._verify_get_own_username(16)
def test_get_username_inactive(self):
"""
Test that a logged-in client can get their
username, even if inactive.
"""
self.client.login(username=self.user.username, password=TEST_PASSWORD)
self.user.is_active = False
self.user.save()
self._verify_get_own_username(16)
def test_get_username_not_logged_in(self):
"""
Test that a client (not logged in) gets a 401
when trying to retrieve their username.
"""
# verify that the endpoint is inaccessible when not logged in
self._verify_get_own_username(12, expected_status=401)
@ddt.ddt
@skip_unless_lms
@mock.patch('openedx.core.djangoapps.user_api.accounts.image_helpers._PROFILE_IMAGE_SIZES', [50, 10])
@mock.patch.dict(
'django.conf.settings.PROFILE_IMAGE_SIZES_MAP',
{'full': 50, 'small': 10},
clear=True
)
class TestAccountsAPI(CacheIsolationTestCase, UserAPITestCase):
"""
Unit tests for the Accounts API.
"""
shard = 2
ENABLED_CACHES = ['default']
def setUp(self):
super(TestAccountsAPI, self).setUp()
self.url = reverse("accounts_api", kwargs={'username': self.user.username})
def _verify_full_shareable_account_response(self, response, account_privacy=None, badges_enabled=False):
"""
Verify that the shareable fields from the account are returned
"""
data = response.data
self.assertEqual(10, len(data))
self.assertEqual(self.user.username, data["username"])
self.assertEqual("US", data["country"])
self._verify_profile_image_data(data, True)
self.assertIsNone(data["time_zone"])
self.assertEqual([{"code": "en"}], data["language_proficiencies"])
self.assertEqual("Tired mother of twins", data["bio"])
self.assertEqual(account_privacy, data["account_privacy"])
self.assertEqual(badges_enabled, data['accomplishments_shared'])
def _verify_private_account_response(self, response, requires_parental_consent=False, account_privacy=None):
"""
Verify that only the public fields are returned if a user does not want to share account fields
"""
data = response.data
self.assertEqual(3, len(data))
self.assertEqual(self.user.username, data["username"])
self._verify_profile_image_data(data, not requires_parental_consent)
self.assertEqual(account_privacy, data["account_privacy"])
def _verify_full_account_response(self, response, requires_parental_consent=False):
"""
Verify that all account fields are returned (even those that are not shareable).
"""
data = response.data
self.assertEqual(19, len(data))
self.assertEqual(self.user.username, data["username"])
self.assertEqual(self.user.first_name + " " + self.user.last_name, data["name"])
self.assertEqual("US", data["country"])
self.assertEqual("f", data["gender"])
self.assertEqual(2000, data["year_of_birth"])
self.assertEqual("m", data["level_of_education"])
self.assertEqual("world peace", data["goals"])
self.assertEqual("Park Ave", data['mailing_address'])
self.assertEqual(self.user.email, data["email"])
self.assertTrue(data["is_active"])
self.assertIsNotNone(data["date_joined"])
self.assertEqual("Tired mother of twins", data["bio"])
self._verify_profile_image_data(data, not requires_parental_consent)
self.assertEquals(requires_parental_consent, data["requires_parental_consent"])
self.assertEqual([{"code": "en"}], data["language_proficiencies"])
self.assertEqual(UserPreference.get_value(self.user, 'account_privacy'), data["account_privacy"])
def test_anonymous_access(self):
"""
Test that an anonymous client (not logged in) cannot call GET or PATCH.
"""
self.send_get(self.anonymous_client, expected_status=401)
self.send_patch(self.anonymous_client, {}, expected_status=401)
def test_unsupported_methods(self):
"""
Test that DELETE, POST, and PUT are not supported.
"""
self.client.login(username=self.user.username, password=TEST_PASSWORD)
self.assertEqual(405, self.client.put(self.url).status_code)
self.assertEqual(405, self.client.post(self.url).status_code)
self.assertEqual(405, self.client.delete(self.url).status_code)
@ddt.data(
("client", "user"),
("staff_client", "staff_user"),
)
@ddt.unpack
def test_get_account_unknown_user(self, api_client, user):
"""
Test that requesting a user who does not exist returns a 404.
"""
client = self.login_client(api_client, user)
response = client.get(reverse("accounts_api", kwargs={'username': "does_not_exist"}))
self.assertEqual(403 if user == "staff_user" else 404, response.status_code)
# Note: using getattr so that the patching works even if there is no configuration.
# This is needed when testing CMS as the patching is still executed even though the
# suite is skipped.
@mock.patch.dict(getattr(settings, "ACCOUNT_VISIBILITY_CONFIGURATION", {}), {"default_visibility": "all_users"})
def test_get_account_different_user_visible(self):
"""
Test that a client (logged in) can only get the shareable fields for a different user.
This is the case when default_visibility is set to "all_users".
"""
self.different_client.login(username=self.different_user.username, password=TEST_PASSWORD)
self.create_mock_profile(self.user)
with self.assertNumQueries(21):
response = self.send_get(self.different_client)
self._verify_full_shareable_account_response(response, account_privacy=ALL_USERS_VISIBILITY)
# Note: using getattr so that the patching works even if there is no configuration.
# This is needed when testing CMS as the patching is still executed even though the
# suite is skipped.
@mock.patch.dict(getattr(settings, "ACCOUNT_VISIBILITY_CONFIGURATION", {}), {"default_visibility": "private"})
def test_get_account_different_user_private(self):
"""
Test that a client (logged in) can only get the shareable fields for a different user.
This is the case when default_visibility is set to "private".
"""
self.different_client.login(username=self.different_user.username, password=TEST_PASSWORD)
self.create_mock_profile(self.user)
with self.assertNumQueries(21):
response = self.send_get(self.different_client)
self._verify_private_account_response(response, account_privacy=PRIVATE_VISIBILITY)
@mock.patch.dict(settings.FEATURES, {'ENABLE_OPENBADGES': True})
@ddt.data(
("client", "user", PRIVATE_VISIBILITY),
("different_client", "different_user", PRIVATE_VISIBILITY),
("staff_client", "staff_user", PRIVATE_VISIBILITY),
("client", "user", ALL_USERS_VISIBILITY),
("different_client", "different_user", ALL_USERS_VISIBILITY),
("staff_client", "staff_user", ALL_USERS_VISIBILITY),
)
@ddt.unpack
def test_get_account_private_visibility(self, api_client, requesting_username, preference_visibility):
"""
Test the return from GET based on user visibility setting.
"""
def verify_fields_visible_to_all_users(response):
"""
Confirms that private fields are private, and public/shareable fields are public/shareable
"""
if preference_visibility == PRIVATE_VISIBILITY:
self._verify_private_account_response(response, account_privacy=PRIVATE_VISIBILITY)
else:
self._verify_full_shareable_account_response(response, ALL_USERS_VISIBILITY, badges_enabled=True)
client = self.login_client(api_client, requesting_username)
# Update user account visibility setting.
set_user_preference(self.user, ACCOUNT_VISIBILITY_PREF_KEY, preference_visibility)
self.create_mock_profile(self.user)
response = self.send_get(client)
if requesting_username == "different_user":
verify_fields_visible_to_all_users(response)
else:
self._verify_full_account_response(response)
# Verify how the view parameter changes the fields that are returned.
response = self.send_get(client, query_parameters='view=shared')
verify_fields_visible_to_all_users(response)
def test_get_account_default(self):
"""
Test that a client (logged in) can get her own account information (using default legacy profile information,
as created by the test UserFactory).
"""
def verify_get_own_information(queries):
"""
Internal helper to perform the actual assertions
"""
with self.assertNumQueries(queries):
response = self.send_get(self.client)
data = response.data
self.assertEqual(19, len(data))
self.assertEqual(self.user.username, data["username"])
self.assertEqual(self.user.first_name + " " + self.user.last_name, data["name"])
for empty_field in ("year_of_birth", "level_of_education", "mailing_address", "bio"):
self.assertIsNone(data[empty_field])
self.assertIsNone(data["country"])
self.assertEqual("m", data["gender"])
self.assertEqual("Learn a lot", data["goals"])
self.assertEqual(self.user.email, data["email"])
self.assertIsNotNone(data["date_joined"])
self.assertEqual(self.user.is_active, data["is_active"])
self._verify_profile_image_data(data, False)
self.assertTrue(data["requires_parental_consent"])
self.assertEqual([], data["language_proficiencies"])
self.assertEqual(PRIVATE_VISIBILITY, data["account_privacy"])
# Badges aren't on by default, so should not be present.
self.assertEqual(False, data["accomplishments_shared"])
self.client.login(username=self.user.username, password=TEST_PASSWORD)
verify_get_own_information(19)
# Now make sure that the user can get the same information, even if not active
self.user.is_active = False
self.user.save()
verify_get_own_information(13)
def test_get_account_empty_string(self):
"""
Test the conversion of empty strings to None for certain fields.
"""
legacy_profile = UserProfile.objects.get(id=self.user.id)
legacy_profile.country = ""
legacy_profile.level_of_education = ""
legacy_profile.gender = ""
legacy_profile.bio = ""
legacy_profile.save()
self.client.login(username=self.user.username, password=TEST_PASSWORD)
with self.assertNumQueries(19):
response = self.send_get(self.client)
for empty_field in ("level_of_education", "gender", "country", "bio"):
self.assertIsNone(response.data[empty_field])
@ddt.data(
("different_client", "different_user"),
("staff_client", "staff_user"),
)
@ddt.unpack
def test_patch_account_disallowed_user(self, api_client, user):
"""
Test that a client cannot call PATCH on a different client's user account (even with
is_staff access).
"""
client = self.login_client(api_client, user)
self.send_patch(client, {}, expected_status=403 if user == "staff_user" else 404)
@ddt.data(
("client", "user"),
("staff_client", "staff_user"),
)
@ddt.unpack
def test_patch_account_unknown_user(self, api_client, user):
"""
Test that trying to update a user who does not exist returns a 404.
"""
client = self.login_client(api_client, user)
response = client.patch(
reverse("accounts_api", kwargs={'username': "does_not_exist"}),
data=json.dumps({}), content_type="application/merge-patch+json"
)
self.assertEqual(404, response.status_code)
@ddt.data(
("gender", "f", "not a gender", u'"not a gender" is not a valid choice.'),
("level_of_education", "none", u"ȻħȺɍłɇs", u'"ȻħȺɍłɇs" is not a valid choice.'),
("country", "GB", "XY", u'"XY" is not a valid choice.'),
("year_of_birth", 2009, "not_an_int", u"A valid integer is required."),
("name", "bob", "z" * 256, u"Ensure this value has at most 255 characters (it has 256)."),
("name", u"ȻħȺɍłɇs", "z ", u"The name field must be at least 2 characters long."),
("goals", "Smell the roses"),
("mailing_address", "Sesame Street"),
# Note that we store the raw data, so it is up to client to escape the HTML.
(
"bio", u"<html>Lacrosse-playing superhero 壓是進界推日不復女</html>",
"z" * 3001, u"Ensure this value has at most 3000 characters (it has 3001)."
),
("account_privacy", ALL_USERS_VISIBILITY),
("account_privacy", PRIVATE_VISIBILITY),
# Note that email is tested below, as it is not immediately updated.
# Note that language_proficiencies is tested below as there are multiple error and success conditions.
)
@ddt.unpack
def test_patch_account(self, field, value, fails_validation_value=None, developer_validation_message=None):
"""
Test the behavior of patch, when using the correct content_type.
"""
client = self.login_client("client", "user")
if field == 'account_privacy':
# Ensure the user has birth year set, and is over 13, so
# account_privacy behaves normally
legacy_profile = UserProfile.objects.get(id=self.user.id)
legacy_profile.year_of_birth = 2000
legacy_profile.save()
response = self.send_patch(client, {field: value})
self.assertEqual(value, response.data[field])
if fails_validation_value:
error_response = self.send_patch(client, {field: fails_validation_value}, expected_status=400)
self.assertEqual(
u'This value is invalid.',
error_response.data["field_errors"][field]["user_message"]
)
self.assertEqual(
u"Value '{value}' is not valid for field '{field}': {messages}".format(
value=fails_validation_value, field=field, messages=[developer_validation_message]
),
error_response.data["field_errors"][field]["developer_message"]
)
elif field != "account_privacy":
# If there are no values that would fail validation, then empty string should be supported;
# except for account_privacy, which cannot be an empty string.
response = self.send_patch(client, {field: ""})
self.assertEqual("", response.data[field])
def test_patch_inactive_user(self):
""" Verify that a user can patch her own account, even if inactive. """
self.client.login(username=self.user.username, password=TEST_PASSWORD)
self.user.is_active = False
self.user.save()
response = self.send_patch(self.client, {"goals": "to not activate account"})
self.assertEqual("to not activate account", response.data["goals"])
@ddt.unpack
def test_patch_account_noneditable(self):
"""
Tests the behavior of patch when a read-only field is attempted to be edited.
"""
client = self.login_client("client", "user")
def verify_error_response(field_name, data):
"""
Internal helper to check the error messages returned
"""
self.assertEqual(
"This field is not editable via this API", data["field_errors"][field_name]["developer_message"]
)
self.assertEqual(
"The '{0}' field cannot be edited.".format(field_name), data["field_errors"][field_name]["user_message"]
)
for field_name in ["username", "date_joined", "is_active", "profile_image", "requires_parental_consent"]:
response = self.send_patch(client, {field_name: "will_error", "gender": "o"}, expected_status=400)
verify_error_response(field_name, response.data)
# Make sure that gender did not change.
response = self.send_get(client)
self.assertEqual("m", response.data["gender"])
# Test error message with multiple read-only items
response = self.send_patch(client, {"username": "will_error", "date_joined": "xx"}, expected_status=400)
self.assertEqual(2, len(response.data["field_errors"]))
verify_error_response("username", response.data)
verify_error_response("date_joined", response.data)
def test_patch_bad_content_type(self):
"""
Test the behavior of patch when an incorrect content_type is specified.
"""
self.client.login(username=self.user.username, password=TEST_PASSWORD)
self.send_patch(self.client, {}, content_type="application/json", expected_status=415)
self.send_patch(self.client, {}, content_type="application/xml", expected_status=415)
def test_patch_account_empty_string(self):
"""
Tests the behavior of patch when attempting to set fields with a select list of options to the empty string.
Also verifies the behaviour when setting to None.
"""
self.client.login(username=self.user.username, password=TEST_PASSWORD)
for field_name in ["gender", "level_of_education", "country"]:
response = self.send_patch(self.client, {field_name: ""})
# Although throwing a 400 might be reasonable, the default DRF behavior with ModelSerializer
# is to convert to None, which also seems acceptable (and is difficult to override).
self.assertIsNone(response.data[field_name])
# Verify that the behavior is the same for sending None.
response = self.send_patch(self.client, {field_name: ""})
self.assertIsNone(response.data[field_name])
def test_patch_name_metadata(self):
"""
Test the metadata stored when changing the name field.
"""
def get_name_change_info(expected_entries):
"""
Internal method to encapsulate the retrieval of old names used
"""
legacy_profile = UserProfile.objects.get(id=self.user.id)
name_change_info = legacy_profile.get_meta()["old_names"]
self.assertEqual(expected_entries, len(name_change_info))
return name_change_info
def verify_change_info(change_info, old_name, requester, new_name):
"""
Internal method to validate name changes
"""
self.assertEqual(3, len(change_info))
self.assertEqual(old_name, change_info[0])
self.assertEqual("Name change requested through account API by {}".format(requester), change_info[1])
self.assertIsNotNone(change_info[2])
# Verify the new name was also stored.
get_response = self.send_get(self.client)
self.assertEqual(new_name, get_response.data["name"])
self.client.login(username=self.user.username, password=TEST_PASSWORD)
legacy_profile = UserProfile.objects.get(id=self.user.id)
self.assertEqual({}, legacy_profile.get_meta())
old_name = legacy_profile.name
# First change the name as the user and verify meta information.
self.send_patch(self.client, {"name": "Mickey Mouse"})
name_change_info = get_name_change_info(1)
verify_change_info(name_change_info[0], old_name, self.user.username, "Mickey Mouse")
# Now change the name again and verify meta information.
self.send_patch(self.client, {"name": "Donald Duck"})
name_change_info = get_name_change_info(2)
verify_change_info(name_change_info[0], old_name, self.user.username, "Donald Duck",)
verify_change_info(name_change_info[1], "Mickey Mouse", self.user.username, "Donald Duck")
@mock.patch.dict(
'django.conf.settings.PROFILE_IMAGE_SIZES_MAP',
{'full': 50, 'medium': 30, 'small': 10},
clear=True
)
def test_patch_email(self):
"""
Test that the user can request an email change through the accounts API.
Full testing of the helper method used (do_email_change_request) exists in the package with the code.
Here just do minimal smoke testing.
"""
client = self.login_client("client", "user")
old_email = self.user.email
new_email = "newemail@example.com"
response = self.send_patch(client, {"email": new_email, "goals": "change my email"})
# Since request is multi-step, the email won't change on GET immediately (though goals will update).
self.assertEqual(old_email, response.data["email"])
self.assertEqual("change my email", response.data["goals"])
# Now call the method that will be invoked with the user clicks the activation key in the received email.
# First we must get the activation key that was sent.
pending_change = PendingEmailChange.objects.filter(user=self.user)
self.assertEqual(1, len(pending_change))
activation_key = pending_change[0].activation_key
confirm_change_url = reverse(
"confirm_email_change", kwargs={'key': activation_key}
)
response = self.client.post(confirm_change_url)
self.assertEqual(200, response.status_code)
get_response = self.send_get(client)
self.assertEqual(new_email, get_response.data["email"])
@ddt.data(
("not_an_email",),
("",),
(None,),
)
@ddt.unpack
def test_patch_invalid_email(self, bad_email):
"""
Test a few error cases for email validation (full test coverage lives with do_email_change_request).
"""
client = self.login_client("client", "user")
# Try changing to an invalid email to make sure error messages are appropriately returned.
error_response = self.send_patch(client, {"email": bad_email}, expected_status=400)
field_errors = error_response.data["field_errors"]
self.assertEqual(
"Error thrown from validate_new_email: 'Valid e-mail address required.'",
field_errors["email"]["developer_message"]
)
self.assertEqual("Valid e-mail address required.", field_errors["email"]["user_message"])
def test_patch_language_proficiencies(self):
"""
Verify that patching the language_proficiencies field of the user
profile completely overwrites the previous value.
"""
client = self.login_client("client", "user")
# Patching language_proficiencies exercises the
# `LanguageProficiencySerializer.get_identity` method, which compares
# identifies language proficiencies based on their language code rather
# than django model id.
for proficiencies in ([{"code": "en"}, {"code": "fr"}, {"code": "es"}], [{"code": "fr"}], [{"code": "aa"}], []):
response = self.send_patch(client, {"language_proficiencies": proficiencies})
self.assertItemsEqual(response.data["language_proficiencies"], proficiencies)
@ddt.data(
(
u"not_a_list",
{u'non_field_errors': [u'Expected a list of items but got type "unicode".']}
),
(
[u"not_a_JSON_object"],
[{u'non_field_errors': [u'Invalid data. Expected a dictionary, but got unicode.']}]
),
(
[{}],
[{'code': [u'This field is required.']}]
),
(
[{u"code": u"invalid_language_code"}],
[{'code': [u'"invalid_language_code" is not a valid choice.']}]
),
(
[{u"code": u"kw"}, {u"code": u"el"}, {u"code": u"kw"}],
[u'The language_proficiencies field must consist of unique languages.']
),
)
@ddt.unpack
def test_patch_invalid_language_proficiencies(self, patch_value, expected_error_message):
"""
Verify we handle error cases when patching the language_proficiencies
field.
"""
client = self.login_client("client", "user")
response = self.send_patch(client, {"language_proficiencies": patch_value}, expected_status=400)
self.assertEqual(
response.data["field_errors"]["language_proficiencies"]["developer_message"],
u"Value '{patch_value}' is not valid for field 'language_proficiencies': {error_message}".format(
patch_value=patch_value,
error_message=expected_error_message
)
)
@mock.patch('openedx.core.djangoapps.user_api.accounts.serializers.AccountUserSerializer.save')
def test_patch_serializer_save_fails(self, serializer_save):
"""
Test that AccountUpdateErrors are passed through to the response.
"""
serializer_save.side_effect = [Exception("bummer"), None]
self.client.login(username=self.user.username, password=TEST_PASSWORD)
error_response = self.send_patch(self.client, {"goals": "save an account field"}, expected_status=400)
self.assertEqual(
"Error thrown when saving account updates: 'bummer'",
error_response.data["developer_message"]
)
self.assertIsNone(error_response.data["user_message"])
@override_settings(PROFILE_IMAGE_BACKEND=TEST_PROFILE_IMAGE_BACKEND)
def test_convert_relative_profile_url(self):
"""
Test that when TEST_PROFILE_IMAGE_BACKEND['base_url'] begins
with a '/', the API generates the full URL to profile images based on
the URL of the request.
"""
self.client.login(username=self.user.username, password=TEST_PASSWORD)
response = self.send_get(self.client)
self.assertEqual(
response.data["profile_image"],
{
"has_image": False,
"image_url_full": "http://testserver/static/default_50.png",
"image_url_small": "http://testserver/static/default_10.png"
}
)
@ddt.data(
("client", "user", True),
("different_client", "different_user", False),
("staff_client", "staff_user", True),
)
@ddt.unpack
def test_parental_consent(self, api_client, requesting_username, has_full_access):
"""
Verifies that under thirteens never return a public profile.
"""
client = self.login_client(api_client, requesting_username)
# Set the user to be ten years old with a public profile
legacy_profile = UserProfile.objects.get(id=self.user.id)
current_year = datetime.datetime.now().year
legacy_profile.year_of_birth = current_year - 10
legacy_profile.save()
set_user_preference(self.user, ACCOUNT_VISIBILITY_PREF_KEY, ALL_USERS_VISIBILITY)
# Verify that the default view is still private (except for clients with full access)
response = self.send_get(client)
if has_full_access:
data = response.data
self.assertEqual(19, len(data))
self.assertEqual(self.user.username, data["username"])
self.assertEqual(self.user.first_name + " " + self.user.last_name, data["name"])
self.assertEqual(self.user.email, data["email"])
self.assertEqual(current_year - 10, data["year_of_birth"])
for empty_field in ("country", "level_of_education", "mailing_address", "bio"):
self.assertIsNone(data[empty_field])
self.assertEqual("m", data["gender"])
self.assertEqual("Learn a lot", data["goals"])
self.assertTrue(data["is_active"])
self.assertIsNotNone(data["date_joined"])
self._verify_profile_image_data(data, False)
self.assertTrue(data["requires_parental_consent"])
self.assertEqual(PRIVATE_VISIBILITY, data["account_privacy"])
else:
self._verify_private_account_response(
response, requires_parental_consent=True, account_privacy=PRIVATE_VISIBILITY
)
# Verify that the shared view is still private
response = self.send_get(client, query_parameters='view=shared')
self._verify_private_account_response(
response, requires_parental_consent=True, account_privacy=PRIVATE_VISIBILITY
)
@skip_unless_lms
class TestAccountAPITransactions(TransactionTestCase):
"""
Tests the transactional behavior of the account API
"""
shard = 2
def setUp(self):
super(TestAccountAPITransactions, self).setUp()
self.client = APIClient()
self.user = UserFactory.create(password=TEST_PASSWORD)
self.url = reverse("accounts_api", kwargs={'username': self.user.username})
@mock.patch('student.views.do_email_change_request')
def test_update_account_settings_rollback(self, mock_email_change):
"""
Verify that updating account settings is transactional when a failure happens.
"""
# Send a PATCH request with updates to both profile information and email.
# Throw an error from the method that is used to process the email change request
# (this is the last thing done in the api method). Verify that the profile did not change.
mock_email_change.side_effect = [ValueError, "mock value error thrown"]
self.client.login(username=self.user.username, password=TEST_PASSWORD)
old_email = self.user.email
json_data = {"email": "foo@bar.com", "gender": "o"}
response = self.client.patch(self.url, data=json.dumps(json_data), content_type="application/merge-patch+json")
self.assertEqual(400, response.status_code)
# Verify that GET returns the original preferences
response = self.client.get(self.url)
data = response.data
self.assertEqual(old_email, data["email"])
self.assertEqual(u"m", data["gender"])
|
ahmedaljazzar/edx-platform
|
openedx/core/djangoapps/user_api/accounts/tests/test_views.py
|
Python
|
agpl-3.0
| 37,691
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
{
"name": "Product Purchase Warrant",
"version": "1.0",
"depends": ["base", "stock", "product"],
"author": "OdooMRP team",
"contributors": ["Mikel Arregi <mikelarregi@avanzosc.es>"],
"category": "Product",
"description": """
Sets a purchase warranty term on product supplier info,
and apply it on incoming lots for this product and supplier
""",
'data': ["views/stock_view.xml", "views/product_view.xml",
"wizard/stock_transfer_details_view.xml"
],
"installable": True,
"auto_install": False,
}
|
StefanRijnhart/odoomrp-wip
|
product_purchase_warrant/__openerp__.py
|
Python
|
agpl-3.0
| 1,425
|
# GrandPA, a LedBar lighting controller.
#
# Copyright (c) 2010 aszlig <"^[0-9]+$"@regexmail.net>
#
# GrandPA is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GrandPA is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GrandPA. If not, see <http://www.gnu.org/licenses/>.
COLORMAP = {
(0xaa, 0x00, 0x00): 'bright_red', # dark red
(0x00, 0xaa, 0x00): 'japanese_laurel', # dark green
(0x00, 0x00, 0xaa): 'dark_blue', # dark blue
(0x00, 0xaa, 0xaa): 'persian_green', # dark cyan
(0xaa, 0x00, 0xaa): 'flirt', # dark magenta
(0xaa, 0x55, 0x00): 'chelsea_gem', # dark yellow
(0xaa, 0xaa, 0xaa): 'silver_chalice', # dark white
(0xff, 0x55, 0x55): 'persimmon', # light red
(0x55, 0xff, 0x55): 'screaming_green', # light green
(0x55, 0x55, 0xff): 'dodger_blue', # light blue
(0x55, 0xff, 0xff): 'aquamarine', # light cyan
(0xff, 0x55, 0xff): 'pink_flamingo', # light magenta
(0xff, 0xff, 0x55): 'gorse', # light yellow
(0xff, 0xff, 0xff): 'white', # light white
(0x55, 0x55, 0x55): 'emperor', # light black
}
class Color(object):
def __init__(self, red=0, green=0, blue=0, alpha=255):
self.__red = int(red)
self.__green = int(green)
self.__blue = int(blue)
self.__alpha = int(alpha)
self.reset_changed()
def __correct_channel(self, value):
if value > 255:
return 255
elif value < 0:
return 0
return value
def _set_red(self, val):
self._changed[0] = True
self.__red = self.__correct_channel(val)
red = property(lambda x: x.__red, _set_red)
def _set_green(self, val):
self._changed[1] = True
self.__green = self.__correct_channel(val)
green = property(lambda x: x.__green, _set_green)
def _set_blue(self, val):
self._changed[2] = True
self.__blue = self.__correct_channel(val)
blue = property(lambda x: x.__blue, _set_blue)
def _set_alpha(self, val):
self._changed[3] = True
self.__alpha = self.__correct_channel(val)
alpha = property(lambda x: x.__alpha, _set_alpha)
def __set_channels_direct(self, red, green, blue, alpha):
if red is not None:
self.__red = int(red)
if green is not None:
self.__green = int(green)
if blue is not None:
self.__blue = int(blue)
if alpha is not None:
self.__alpha = int(alpha)
@property
def has_changed(self):
return any(self._changed)
def __repr__(self):
return "Color(%d, %d, %d, %d)" % self.to_tuple(alpha=True)
def __add__(self, color):
"""
Alpha blending between own instance and color.
"""
(sr, sg, sb, sa) = self.to_tuple(floats=True, alpha=True)
(dr, dg, db, da) = color.to_tuple(floats=True, alpha=True)
if sa <= 0:
return Color(dr * 255, dg * 255, db * 255, da * 255)
a = 1.0 - (1.0 - da) * (1.0 - sa)
r = dr * da / a + sr * sa * (1.0 - da) / a
g = dg * da / a + sg * sa * (1.0 - da) / a
b = db * da / a + sb * sa * (1.0 - da) / a
return Color(r * 255, g * 255, b * 255, a * 255)
def __nonzero__(self):
return self.__red > 0 or self.green > 0 or self.blue > 0
def _convert(self, converter, floats=False, alpha=False, use_alpha=None):
chans = [self.__red, self.__green, self.__blue]
if alpha:
chans.append(self.__alpha)
else:
chans = [c / 255.0 * self.__alpha for c in chans]
if use_alpha is not None:
chans = [c / 255.0 * use_alpha for c in chans]
if floats:
chans = [c / 255.0 for c in chans]
else:
chans = [int(c) for c in chans]
return converter(chans)
def to_tuple(self, *args, **kwargs):
return self._convert(tuple, *args, **kwargs)
def to_list(self, *args, **kwargs):
return self._convert(list, *args, **kwargs)
def copy(self):
return Color(*self.to_tuple(alpha=True))
def set_channels(self, red, green, blue, alpha=255):
"""
Set colors and/or the alpha value.
"""
self.red = int(red)
self.green = int(green)
self.blue = int(blue)
self.alpha = int(alpha)
def set_color(self, color):
if isinstance(color, Color):
self.red = color.red
self.green = color.green
self.blue = color.blue
self.alpha = color.alpha
else:
self.set_channels(*color)
def get_name(self):
"""
Get color name for GrandPA curses attributes.
"""
cur_color = self.to_tuple()
highest = max(cur_color)
if highest <= 0:
return None
if highest > 170:
mod = 255.0 / highest
elif highest > 85:
mod = 170.0 / highest
else:
mod = 85 / highest
red, green, blue = [c * mod for c in cur_color]
def _keyfunc(color):
r = red - color[0]
g = green - color[1]
b = blue - color[2]
# calculate the euclidean distance
return r * r + g * g + b * b
match = min(COLORMAP, key=_keyfunc)
return COLORMAP.get(match)
def patch_color(self, color, in_place=False):
"""
Patch differences to color *IN PLACE*.
"""
if not color.has_changed:
return
diff = zip(color._changed, color.to_tuple(alpha=True))
new = [None if not f else c for f, c in diff]
self.__set_channels_direct(*new)
def reset_changed(self):
self._changed = [False] * 4
def fade_to(self, color, steps=255):
step = int(steps / 255)
col = color.copy()
for alpha in xrange(0, 255, step):
col.alpha = alpha
yield self.__add__(col)
if __name__ == '__main__':
c1 = Color(255, 255, 255)
c2 = Color(0, 0, 0)
c2.red = 22
c2.alpha = 0
c1.patch_color(c2)
print c1
raise SystemExit
print Color(100, 200, 0).get_name()
print Color(100, 200, 0).get_name()
for c in Color(255, 0, 0, 20).fade_to(Color(0, 0, 255)):
print c
|
aszlig/GrandPA
|
grandpa/color.py
|
Python
|
gpl-3.0
| 6,805
|
#!/usr/bin/env python
#coding=utf-8
# Nathive (and this file) is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or newer.
#
# You should have received a copy of the GNU General Public License along with
# this file. If not, see <http://www.gnu.org/licenses/>.
import gtk
def margin(parent, width, height=0):
"""Put an invisible separator between two widgets.
@parent: The parent widget.
@size: Separator width or height."""
if not height: height = width
box = gtk.HBox(False, 0)
box.set_size_request(width, height)
parent.pack_start(box, False, False, 0)
def separator(parent):
"""Put a horizontal separator between two widgets.
@parent: The parent widget."""
separator = gtk.HSeparator()
parent.pack_start(separator, False, False, 4)
def expander(parent):
box = gtk.HBox(False, 0)
parent.pack_start(box, True, True, 0)
|
johnnyLadders/Nathive_CITA
|
nathive/gui/utils.py
|
Python
|
gpl-3.0
| 1,028
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import gapic_v1 # type: ignore
from google.api_core import grpc_helpers_async # type: ignore
from google.api_core import operations_v1 # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import packaging.version
import grpc # type: ignore
from grpc.experimental import aio # type: ignore
from google.cloud.aiplatform_v1beta1.types import index_endpoint
from google.cloud.aiplatform_v1beta1.types import index_endpoint as gca_index_endpoint
from google.cloud.aiplatform_v1beta1.types import index_endpoint_service
from google.longrunning import operations_pb2 # type: ignore
from .base import IndexEndpointServiceTransport, DEFAULT_CLIENT_INFO
from .grpc import IndexEndpointServiceGrpcTransport
class IndexEndpointServiceGrpcAsyncIOTransport(IndexEndpointServiceTransport):
"""gRPC AsyncIO backend transport for IndexEndpointService.
A service for managing Vertex AI's IndexEndpoints.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_grpc_channel: aio.Channel
_stubs: Dict[str, Callable] = {}
@classmethod
def create_channel(
cls,
host: str = "aiplatform.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> aio.Channel:
"""Create and return a gRPC AsyncIO channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
aio.Channel: A gRPC AsyncIO channel object.
"""
return grpc_helpers_async.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
def __init__(
self,
*,
host: str = "aiplatform.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: aio.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id=None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
channel (Optional[aio.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
self._operations_client = None
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
credentials=self._credentials,
credentials_file=credentials_file,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@property
def grpc_channel(self) -> aio.Channel:
"""Create the channel designed to connect to this service.
This property caches on the instance; repeated calls return
the same channel.
"""
# Return the channel from cache.
return self._grpc_channel
@property
def operations_client(self) -> operations_v1.OperationsAsyncClient:
"""Create the client designed to process long-running operations.
This property caches on the instance; repeated calls return the same
client.
"""
# Sanity check: Only create a new client if we do not already have one.
if self._operations_client is None:
self._operations_client = operations_v1.OperationsAsyncClient(
self.grpc_channel
)
# Return the client from cache.
return self._operations_client
@property
def create_index_endpoint(
self,
) -> Callable[
[index_endpoint_service.CreateIndexEndpointRequest],
Awaitable[operations_pb2.Operation],
]:
r"""Return a callable for the create index endpoint method over gRPC.
Creates an IndexEndpoint.
Returns:
Callable[[~.CreateIndexEndpointRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_index_endpoint" not in self._stubs:
self._stubs["create_index_endpoint"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.IndexEndpointService/CreateIndexEndpoint",
request_serializer=index_endpoint_service.CreateIndexEndpointRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["create_index_endpoint"]
@property
def get_index_endpoint(
self,
) -> Callable[
[index_endpoint_service.GetIndexEndpointRequest],
Awaitable[index_endpoint.IndexEndpoint],
]:
r"""Return a callable for the get index endpoint method over gRPC.
Gets an IndexEndpoint.
Returns:
Callable[[~.GetIndexEndpointRequest],
Awaitable[~.IndexEndpoint]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_index_endpoint" not in self._stubs:
self._stubs["get_index_endpoint"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.IndexEndpointService/GetIndexEndpoint",
request_serializer=index_endpoint_service.GetIndexEndpointRequest.serialize,
response_deserializer=index_endpoint.IndexEndpoint.deserialize,
)
return self._stubs["get_index_endpoint"]
@property
def list_index_endpoints(
self,
) -> Callable[
[index_endpoint_service.ListIndexEndpointsRequest],
Awaitable[index_endpoint_service.ListIndexEndpointsResponse],
]:
r"""Return a callable for the list index endpoints method over gRPC.
Lists IndexEndpoints in a Location.
Returns:
Callable[[~.ListIndexEndpointsRequest],
Awaitable[~.ListIndexEndpointsResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_index_endpoints" not in self._stubs:
self._stubs["list_index_endpoints"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.IndexEndpointService/ListIndexEndpoints",
request_serializer=index_endpoint_service.ListIndexEndpointsRequest.serialize,
response_deserializer=index_endpoint_service.ListIndexEndpointsResponse.deserialize,
)
return self._stubs["list_index_endpoints"]
@property
def update_index_endpoint(
self,
) -> Callable[
[index_endpoint_service.UpdateIndexEndpointRequest],
Awaitable[gca_index_endpoint.IndexEndpoint],
]:
r"""Return a callable for the update index endpoint method over gRPC.
Updates an IndexEndpoint.
Returns:
Callable[[~.UpdateIndexEndpointRequest],
Awaitable[~.IndexEndpoint]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_index_endpoint" not in self._stubs:
self._stubs["update_index_endpoint"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.IndexEndpointService/UpdateIndexEndpoint",
request_serializer=index_endpoint_service.UpdateIndexEndpointRequest.serialize,
response_deserializer=gca_index_endpoint.IndexEndpoint.deserialize,
)
return self._stubs["update_index_endpoint"]
@property
def delete_index_endpoint(
self,
) -> Callable[
[index_endpoint_service.DeleteIndexEndpointRequest],
Awaitable[operations_pb2.Operation],
]:
r"""Return a callable for the delete index endpoint method over gRPC.
Deletes an IndexEndpoint.
Returns:
Callable[[~.DeleteIndexEndpointRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_index_endpoint" not in self._stubs:
self._stubs["delete_index_endpoint"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.IndexEndpointService/DeleteIndexEndpoint",
request_serializer=index_endpoint_service.DeleteIndexEndpointRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["delete_index_endpoint"]
@property
def deploy_index(
self,
) -> Callable[
[index_endpoint_service.DeployIndexRequest], Awaitable[operations_pb2.Operation]
]:
r"""Return a callable for the deploy index method over gRPC.
Deploys an Index into this IndexEndpoint, creating a
DeployedIndex within it.
Only non-empty Indexes can be deployed.
Returns:
Callable[[~.DeployIndexRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "deploy_index" not in self._stubs:
self._stubs["deploy_index"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.IndexEndpointService/DeployIndex",
request_serializer=index_endpoint_service.DeployIndexRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["deploy_index"]
@property
def undeploy_index(
self,
) -> Callable[
[index_endpoint_service.UndeployIndexRequest],
Awaitable[operations_pb2.Operation],
]:
r"""Return a callable for the undeploy index method over gRPC.
Undeploys an Index from an IndexEndpoint, removing a
DeployedIndex from it, and freeing all resources it's
using.
Returns:
Callable[[~.UndeployIndexRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "undeploy_index" not in self._stubs:
self._stubs["undeploy_index"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.IndexEndpointService/UndeployIndex",
request_serializer=index_endpoint_service.UndeployIndexRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["undeploy_index"]
__all__ = ("IndexEndpointServiceGrpcAsyncIOTransport",)
|
sasha-gitg/python-aiplatform
|
google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/grpc_asyncio.py
|
Python
|
apache-2.0
| 20,111
|
from django.views.generic.base import TemplateView
from django.shortcuts import get_object_or_404
from daprojects_core import models
from .maps import get_map_for_project
class HomeView(TemplateView):
template_name = 'daprojects_webapp/home.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['projects_and_maps'] = [
(project, get_map_for_project(project)) for project in models.Project.objects.all()
]
return context
class ProjectView(TemplateView):
template_name = 'daprojects_webapp/project.html'
def get_context_data(self, project_id=None, **kwargs):
context = super().get_context_data(**kwargs)
project = get_object_or_404(models.Project, pk=project_id)
map = get_map_for_project(project)
context['project'] = project
context['map'] = map
context['modules_and_placeholders'] = zip(project.first_level_modules, map.placeholders)
return context
class ModuleView(TemplateView):
template_name = 'daprojects_webapp/module.html'
def get_context_data(self, project_id=None, module_id=None, **kwargs):
context = super().get_context_data(**kwargs)
project = get_object_or_404(models.Project, pk=project_id)
module = get_object_or_404(models.Module, pk=module_id, project=project)
context['project'] = project
context['module'] = module
return context
|
PIWEEK/dungeons-and-projects
|
server/daprojects_webapp/views.py
|
Python
|
agpl-3.0
| 1,477
|
__copyright__ = "Uwe Krien"
__license__ = "GPLv3"
import pandas as pd
# import geopandas as gpd
import numpy as np
import config as cfg
import configuration as config
import logging
import os
import feedin as f
import pvlib
import datetime
import oemof.tools.logger as logger
from matplotlib import pyplot as plt
# import plots
def get_full_load_hours():
"""pass"""
c = config.get_configuration()
feedinpath = os.path.join(c.paths['feedin'], '{type}', c.pattern['feedin'])
my_idx = pd.MultiIndex(levels=[[], []], labels=[[], []],
names=['year', 'key'])
df = pd.DataFrame(index=my_idx, columns=['wind'])
years = list()
for vtype in ['solar', 'wind']:
for year in range(1970, 2020):
if os.path.isfile(feedinpath.format(year=year, type=vtype.lower())):
years.append(year)
years = list(set(years))
# opening one file to get the keys of the weather fields and the columns of
# the solar file (the columns represent the sets).
file = pd.HDFStore(feedinpath.format(year=years[0], type='solar'))
keys = file.keys()
columns = list(file[keys[0]].columns)
for col in columns:
df[col] = ''
file.close()
for key in keys:
df.loc[(0, int(key[2:])), :] = 0
df.loc[(0, 0), :] = 0
for year in years:
df.loc[(year, 0), :] = 0
logging.info("Processing: {0}".format(year))
solar = pd.HDFStore(feedinpath.format(year=year, type='solar'))
wind = pd.HDFStore(feedinpath.format(year=year, type='wind'))
for key in keys:
skey = int(key[2:])
df.loc[(year, skey), 'wind'] = wind[key].sum()
df.loc[(0, skey), 'wind'] += df.loc[(year, skey), 'wind']
df.loc[(year, 0), 'wind'] += df.loc[(year, skey), 'wind']
df.loc[(0, 0), 'wind'] += df.loc[(year, skey), 'wind']
df.loc[(year, skey), columns] = solar[key].sum()
df.loc[(0, skey), columns] += df.loc[(year, skey), columns]
df.loc[(year, 0), columns] += df.loc[(year, skey), columns]
df.loc[(0, 0), columns] += df.loc[(year, skey), columns]
solar.close()
wind.close()
df.loc[(year, 0), :] = (df.loc[(year, 0), :] / len(keys))
for key in keys:
df.loc[(0, int(key[2:])), :] = df.loc[(0, int(key[2:])), :] / len(years)
df.loc[(0, 0), :] = df.loc[(0, 0), :] / (len(years) * len(keys))
df.sort_index(inplace=True)
df.to_csv(os.path.join(c.paths['analysis'], 'full_load_hours.csv'))
def analyse_pv_types(year, key, orientation):
c = config.get_configuration()
weatherpath = os.path.join(c.paths['weather'], c.pattern['weather'])
weather = pd.read_hdf(weatherpath.format(year=year), 'A' + str(key))
latlon = pd.read_csv(os.path.join(c.paths['geometry'],
c.files['grid_centroid']),
index_col='gid').loc[key]
location = {'latitude': latlon['st_y'], 'longitude': latlon['st_x']}
weather = f.adapt_weather_to_pvlib(weather, location)
sandia_modules = pvlib.pvsystem.retrieve_sam('sandiamod')
sapm_inverters = pvlib.pvsystem.retrieve_sam('sandiainverter')
invertername = 'ABB__MICRO_0_25_I_OUTD_US_208_208V__CEC_2014_'
for modu in sandia_modules.keys():
if 'BP_Solar' in modu:
print(modu)
exit(0)
df_ts_ac = pd.DataFrame()
df = pd.DataFrame()
length = len(sandia_modules.keys())
for smod in sandia_modules.keys():
name = smod # .replace('__', '_')
logging.info("{0}, {1}".format(name, length))
length -= 1
smodule = {
'module_parameters': sandia_modules[smod],
'inverter_parameters': sapm_inverters[invertername],
'surface_azimuth': orientation['azimuth'],
'surface_tilt': orientation['tilt'],
'albedo': 0.2}
p_peak = (
smodule['module_parameters'].Impo *
smodule['module_parameters'].Vmpo)
mc = f.feedin_pvlib_modelchain(location, smodule, weather)
df_ts_ac[name] = mc.ac.clip(0).fillna(0).div(p_peak)
df.loc[name, 'ac'] = df_ts_ac[name][:8760].sum()
df.loc[name, 'dc_norm'] = mc.dc.p_mp.clip(0).div(p_peak).sum()
df.loc[name, 'dc'] = mc.dc.p_mp.clip(0).sum()
df.to_csv(os.path.join(c.paths['analysis'], 'module_feedin.csv'))
df_ts_ac.to_csv(os.path.join(c.paths['analysis'],
'module_feedin_ac_ts.csv'))
def analyse_performance_ratio(year, key):
c = config.get_configuration()
sets = dict()
set_ids = ['solar_set1', 'solar_set2', 'solar_set3', 'solar_set4']
sets['system'] = list()
for s in set_ids:
m = cfg.get(s, 'module_name')
i = cfg.get(s, 'inverter_name')
sets['system'].append((m, i))
sets['azimuth'] = [120, 180, 240]
sets['tilt'] = [0, 30, 60, 90]
weatherpath = os.path.join(c.paths['weather'], c.pattern['weather'])
weather = pd.read_hdf(weatherpath.format(year=year), 'A' + str(key))
latlon = pd.read_csv(os.path.join(c.paths['geometry'],
c.files['coastdatgrid_centroids']),
index_col='gid').loc[key]
location = {'latitude': latlon['st_y'], 'longitude': latlon['st_x']}
weather = f.adapt_weather_to_pvlib(weather, location)
sandia_modules = pvlib.pvsystem.retrieve_sam('sandiamod')
sapm_inverters = pvlib.pvsystem.retrieve_sam('sandiainverter')
my_index = pd.MultiIndex(levels=[[], [], []],
labels=[[], [], []],
names=['name', 'azimuth', 'tilt'])
cols = ['irrad', 'dc', 'ac', 'dc/i', 'ac/i', 'ac/dc']
df = pd.DataFrame(columns=cols, index=my_index)
for system in sets['system']:
for tlt in sets['tilt']:
if tlt == 0:
az_s = [0]
else:
az_s = sets['azimuth']
for az in az_s:
name = system[0].replace('_', '')[:10]
logging.info("{0}, {1}, {2}".format(system, tlt, az))
smodule = {
'module_parameters': sandia_modules[system[0]],
'inverter_parameters': sapm_inverters[system[1]],
'surface_azimuth': az,
'surface_tilt': tlt,
'albedo': 0.2}
p_peak = (
smodule['module_parameters'].Impo *
smodule['module_parameters'].Vmpo)
area = smodule['module_parameters'].Area
mc = f.feedin_pvlib_modelchain(location, smodule, weather)
dc = mc.dc.p_mp.clip(0).div(p_peak).sum()
ac = mc.ac.clip(0).div(p_peak).sum()
i = mc.total_irrad['poa_global'].multiply(area).div(
p_peak).sum()
df.loc[(name, az, tlt), 'dc'] = dc
df.loc[(name, az, tlt), 'ac'] = ac
df.loc[(name, az, tlt), 'irrad'] = i
df.loc[(name, az, tlt), 'dc/i'] = dc / i
df.loc[(name, az, tlt), 'ac/i'] = ac / i
df.loc[(name, az, tlt), 'ac/dc'] = ac / dc
# df_ts.to_csv(os.path.join(paths['analysis'], 'orientation_feedin.csv'))
df.to_csv(os.path.join(c.paths['analysis'], 'performance_ratio.csv'))
def get_index_of_max(df):
column = None
idx = None
max_value = df.max().max()
for col in df:
try:
idx = df[col][df[col] == max_value].index[0]
column = col
except IndexError:
pass
return column, idx
def analyse_pv_orientation_region():
c = config.get_configuration()
weatherpath = os.path.join(c.paths['weather'], c.pattern['weather'])
my_index = pd.MultiIndex(levels=[[], [], []],
labels=[[], [], []],
names=['coastdat', 'year', 'system'])
my_cols = pd.MultiIndex(levels=[[], []],
labels=[[], []],
names=['type', 'angle'])
df = pd.DataFrame(columns=my_cols, index=my_index)
key = 1141078
for n in range(22):
key += 1
key -= 1000
for year in [2008]:
weather = pd.read_hdf(weatherpath.format(year=year), 'A' + str(key))
latlon = pd.read_csv(
os.path.join(c.paths['geometry'],
c.files['coastdatgrid_centroids']),
index_col='gid').loc[key]
location = {'latitude': latlon['st_y'], 'longitude': latlon['st_x']}
weather = f.adapt_weather_to_pvlib(weather, location)
sandia_modules = pvlib.pvsystem.retrieve_sam('sandiamod')
sapm_inverters = pvlib.pvsystem.retrieve_sam('sandiainverter')
systems = {
# 1: {'m': 'LG_LG290N1C_G3__2013_',
# 'i': 'ABB__MICRO_0_25_I_OUTD_US_208_208V__CEC_2014_'},
2: {'m': 'BP_Solar_BP2150S__2000__E__',
'i':
'SolarBridge_Technologies__P235HV_240_240V__CEC_2011_'},
# 3: {'m': 'Solar_Frontier_SF_160S__2013_',
# 'i': 'ABB__MICRO_0_25_I_OUTD_US_208_208V__CEC_2014_'}
}
for system in systems.values():
name = system['m'][:2].replace('o', 'F')
logging.info("{0} - {1} - {2}".format(key, year, name))
azimuth = np.arange(190, 201, 0.5)
tilt = np.arange(32, 39, 0.5)
dc = pd.DataFrame()
ac = pd.DataFrame()
ir = pd.DataFrame()
for az in azimuth:
for tlt in tilt:
smodule = {
'module_parameters': sandia_modules[system['m']],
'inverter_parameters': sapm_inverters[system['i']],
'surface_azimuth': az,
'surface_tilt': tlt,
'albedo': 0.2}
p_peak = (
smodule['module_parameters'].Impo *
smodule['module_parameters'].Vmpo)
mc = f.feedin_pvlib_modelchain(location, smodule,
weather)
dc.loc[az, tlt] = mc.dc.p_mp.clip(0).div(p_peak).sum()
ac.loc[az, tlt] = mc.ac.clip(0).div(p_peak).sum()
ir.loc[az, tlt] = mc.total_irrad['poa_global'].clip(
0).sum()
dc_max = get_index_of_max(dc)
df.loc[(key, year, name), ('dc', 'tilt')] = dc_max[0]
df.loc[(key, year, name), ('dc', 'azimuth')] = dc_max[1]
ac_max = get_index_of_max(dc)
df.loc[(key, year, name), ('ac', 'tilt')] = ac_max[0]
df.loc[(key, year, name), ('ac', 'azimuth')] = ac_max[1]
ir_max = get_index_of_max(dc)
df.loc[(key, year, name), ('ir', 'tilt')] = ir_max[0]
df.loc[(key, year, name), ('ir', 'azimuth')] = ir_max[1]
df.to_csv(os.path.join(c.paths['analysis'],
'optimal_orientation_multi_BP.csv'))
logging.info('Done')
def analyse_optimal_orientation_file():
c = config.get_configuration()
df = pd.read_csv(os.path.join(c.paths['analysis'],
'optimal_orientation_multi.csv'),
index_col=[0, 1, 2], header=[0, 1])
df.sort_index(axis=0, inplace=True)
df.sort_index(axis=1, inplace=True)
df['avg', 'azimuth'] = df.loc[:, (slice(None), 'azimuth')].sum(1).div(3)
df['avg', 'tilt'] = df.loc[:, (slice(None), 'tilt')].sum(1).div(3)
print(df.index)
print(df['avg'].groupby('year').mean())
def analyse_pv_orientation(year, key, module_name):
c = config.get_configuration()
weatherpath = os.path.join(c.paths['weather'], c.pattern['weather'])
weather = pd.read_hdf(weatherpath.format(year=year), 'A' + str(key))
latlon = pd.read_csv(os.path.join(c.paths['geometry'],
c.files['grid_centroid']),
index_col='gid').loc[key]
location = {'latitude': latlon['st_y'], 'longitude': latlon['st_x']}
weather = f.adapt_weather_to_pvlib(weather, location)
sandia_modules = pvlib.pvsystem.retrieve_sam('sandiamod')
sapm_inverters = pvlib.pvsystem.retrieve_sam('sandiainverter')
invertername = 'SMA_America__SB5000US_11_208V__CEC_2007_'
azimuth = range(0, 361, 10)
tilt = range(0, 91, 10)
# df_ts = pd.DataFrame()
df_dc = pd.DataFrame()
df_ac = pd.DataFrame()
df_sun = pd.DataFrame()
length = len(azimuth) * len(tilt)
# from matplotlib import pyplot as plt
for az in azimuth:
for tlt in tilt:
name = 'az{0}_tlt{1}'.format(az, tlt)
logging.info("{0}, {1}".format(name, length))
length -= 1
smodule = {
'module_parameters': sandia_modules[module_name],
'inverter_parameters': sapm_inverters[invertername],
'surface_azimuth': az,
'surface_tilt': tlt,
'albedo': 0.2}
p_peak = (
smodule['module_parameters'].Impo *
smodule['module_parameters'].Vmpo)
mc = f.feedin_pvlib_modelchain(location, smodule, weather)
df_dc.loc[az, tlt] = mc.dc.p_mp.clip(0).div(p_peak).sum()
df_ac.loc[az, tlt] = mc.ac.clip(0).div(p_peak).sum()
# print(mc.total_irrad.columns)
# print(mc.total_irrad['poa_global'].fillna(0).div(p_peak).sum())
df_sun.loc[az, tlt] = mc.total_irrad['poa_global'].div(p_peak).sum()
# df_ts.to_csv(os.path.join(paths['analysis'], 'orientation_feedin.csv'))
df_sun.to_csv(os.path.join(c.paths['analysis'], 'sun.csv'))
df_dc.to_csv(os.path.join(c.paths['analysis'], 'orientation_feedin_dc.csv'))
df_ac.to_csv(os.path.join(c.paths['analysis'], 'orientation_feedin_ac.csv'))
def analyse_inverter(year, key, module_name, orientation):
c = config.get_configuration()
weatherpath = os.path.join(c.paths['weather'], c.pattern['weather'])
weather = pd.read_hdf(weatherpath.format(year=year), 'A' + str(key))
latlon = pd.read_csv(os.path.join(c.paths['geometry'],
c.files['grid_centroid']),
index_col='gid').loc[key]
location = {'latitude': latlon['st_y'], 'longitude': latlon['st_x']}
weather = f.adapt_weather_to_pvlib(weather, location)
sandia_modules = pvlib.pvsystem.retrieve_sam('sandiamod')
sapm_inverters = pvlib.pvsystem.retrieve_sam('sandiainverter')
inv = pd.DataFrame()
failed = pd.Series()
length = len(sapm_inverters.keys())
for sinv in sapm_inverters.keys():
name = sinv # .replace('__', '_')
logging.info("{0}, {1}".format(name, length))
length -= 1
smodule = {
'module_parameters': sandia_modules[module_name],
'inverter_parameters': sapm_inverters[sinv],
'surface_azimuth': orientation['azimuth'],
'surface_tilt': orientation['tilt'],
'albedo': 0.2}
p_peak = (
smodule['module_parameters'].Impo *
smodule['module_parameters'].Vmpo)
try:
mc = f.feedin_pvlib_modelchain(location, smodule, weather)
inv.loc[name, 'ac'] = mc.ac.clip(0).fillna(0).div(p_peak).sum()
inv.loc[name, 'dc'] = mc.dc.p_mp.clip(0).fillna(0).div(p_peak).sum()
except ValueError:
logging.info("Inverter {0} failed.".format(name))
failed.loc[name] = 'failed'
inv.to_csv(os.path.join(c.paths['analysis'],
'sapm_inverters_feedin_full2.csv'))
failed.to_csv(os.path.join(c.paths['analysis'],
'sapm_inverters_failed.csv'))
def single_pv_set(year, key, module_name, inverter_name, orientation):
c = config.get_configuration()
weatherpath = os.path.join(c.paths['weather'], c.pattern['weather'])
weather = pd.read_hdf(weatherpath.format(year=year), 'A' + str(key))
latlon = pd.read_csv(os.path.join(c.paths['geometry'],
c.files['grid_centroid']),
index_col='gid').loc[key]
location = {'latitude': latlon['st_y'], 'longitude': latlon['st_x']}
weather = f.adapt_weather_to_pvlib(weather, location)
sandia_modules = pvlib.pvsystem.retrieve_sam('sandiamod')
sapm_inverters = pvlib.pvsystem.retrieve_sam('sandiainverter')
smodule = {
'module_parameters': sandia_modules[module_name],
'inverter_parameters': sapm_inverters[inverter_name],
'surface_azimuth': orientation['azimuth'],
'surface_tilt': orientation['tilt'],
'albedo': 0.2}
p_peak = (
smodule['module_parameters'].Impo *
smodule['module_parameters'].Vmpo)
mc = f.feedin_pvlib_modelchain(location, smodule, weather)
ac = mc.ac # .clip(0).fillna(0).div(p_peak)
dc = mc.dc.p_mp # .clip(0).fillna(0).div(p_peak)
print('ac:', ac.sum())
print('dc:', dc.sum())
def analyse_feedin_de(year):
c = config.get_configuration()
# read renewable powerplants
pp = pd.read_csv(os.path.join(c.paths['renewable'],
c.pattern['grouped'].format(cat='renewable')),
index_col=[0, 1, 2, 3])
# group renewable powerplants
my_index = pp.loc['Wind', year].groupby(level=0).sum().index
powerplants_renewable = pd.DataFrame(index=my_index)
for pptype in pp.index.levels[0]:
powerplants_renewable[pptype] = pp.loc[pptype, year].groupby(
level=0).sum()
# read wind feedin time series (feedin_wind)
feedin_wind = pd.read_csv(
os.path.join(c.paths['feedin'], 'wind', 'de21',
c.pattern['feedin_de21'].format(year=year, type='wind')),
index_col=0, header=[0, 1])
# multiply time series with installed capacity
wind = pd.DataFrame()
for reg in feedin_wind.columns.levels[0]:
wind[reg] = feedin_wind[reg].multiply(
powerplants_renewable.loc[reg, 'Wind'])
wind = wind.sum(1)
wind.to_csv(os.path.join(c.paths['analysis'], 'wind_de.csv'))
# read solar feedin time series (feedin_solar)
feedin_solar = pd.read_csv(
os.path.join(
c.paths['feedin'], 'solar', 'de21',
c.pattern['feedin_de21'].format(year=year, type='solar')),
index_col=0, header=[0, 1, 2], parse_dates=True)
set_name = {
'M_STP280S__I_GEPVb_5000_NA_240': 0.2,
'M_BP2150S__I_P235HV_240': 0.2,
'M_LG290G3__I_ABB_MICRO_025_US208': 0.3,
'M_SF160S___I_ABB_MICRO_025_US208': 0.3,
}
orientation = {
'tlt000_az000_alb02': 0.1,
'tlt090_az120_alb02': 0.0,
'tlt090_az180_alb02': 0.1,
'tlt090_az240_alb02': 0.0,
'tltopt_az120_alb02': 0.2,
'tltopt_az180_alb02': 0.4,
'tltopt_az240_alb02': 0.2,
}
solar = pd.DataFrame(index=feedin_solar.index)
for reg in feedin_solar.columns.levels[0]:
solar[reg] = 0
for pvset in set_name.keys():
for subset in orientation.keys():
if reg in powerplants_renewable.index:
solar[reg] += feedin_solar[reg, pvset, subset].multiply(
powerplants_renewable.loc[reg, 'Solar']).multiply(
set_name[pvset] * orientation[subset])
solar = solar.sum(1)
solar.to_csv(os.path.join(c.paths['analysis'], 'solar_de.csv'))
re_file = os.path.join(c.paths['time_series'],
c.files['renewables_time_series'])
start = datetime.datetime(year, 1, 1, 0, 0)
end = datetime.datetime(year, 12, 31, 23, 0)
ts = pd.read_csv(re_file, index_col='cet', parse_dates=True).loc[start:end]
print(ts['DE_solar_generation'].sum())
print(solar[:8760].sum())
print((solar[:8760].sum()) / (34.93 * 1000000))
new = pd.DataFrame()
new['own'] = solar[:8760]
new['other'] = ts['DE_solar_generation']
new.plot()
plt.show()
def get_maximum_value(filename, pathname=None, icol=None):
if pathname is None:
c = config.get_configuration()
pathname = c.paths['analysis']
if icol is None:
icol = [0]
table = pd.read_csv(os.path.join(pathname, filename), index_col=icol)
idx = None
column = None
if isinstance(table, pd.Series):
max_value = table.max()
idx = table[table == max_value].index[0]
elif isinstance(table, pd.DataFrame):
max_value = table.max().max()
for col in table:
try:
idx = table[col][table[col] == max_value].index[0]
column = col
except IndexError:
pass
print(column, idx)
# print("Maximum value of {0} is {1}".format(df_file, df.max().max()))
# return df.max().max()
def analyse_pv_capacity():
c = config.get_configuration()
cap = pd.read_csv(
os.path.join(c.paths['renewable'], c.pattern['grouped'].format(
cat='renewable')), index_col=[0, 1, 2])
cap_full = pd.read_csv(
os.path.join(c.paths['renewable'], c.pattern['prepared'].format(
cat='renewable')), index_col=['commissioning_date'],
parse_dates=True)
print(cap_full.loc[cap_full['energy_source_level_2'] == 'Solar'][
'electrical_capacity'].sum())
print(cap_full.columns)
select = cap_full.loc[pd.notnull(cap_full['decommissioning_date'])]
select = select.loc[select['energy_source_level_2'] == 'Solar'][
'electrical_capacity']
print(select.sum())
for y in range(2012, 2017):
print(y, 'my', cap.loc['Solar', y]['capacity'].sum())
re_file = os.path.join(c.paths['time_series'],
c.files['renewables_time_series'])
ts = pd.read_csv(re_file, index_col='cet', parse_dates=True)
for y in range(2012, 2016):
start = ts.loc[datetime.datetime(y, 1, 1, 0, 0)]['DE_solar_capacity']
end = ts.loc[datetime.datetime(y, 12, 31, 0, 0)]['DE_solar_capacity']
print(y, 'avg', (start + end) / 2)
for y in range(2012, 2017):
start = ts.loc[datetime.datetime(y, 1, 1, 0, 0)]['DE_solar_capacity']
print(y, 'start', start)
new = pd.DataFrame()
new['other'] = ts['DE_solar_capacity']
new['own'] = 0
new['quaschning'] = 0
new['fraunhofer'] = 0
quaschning = {
2016: 41.27,
2015: 39.74,
2014: 38.24,
2013: 36.34,
2012: 33.03}
fraunhofer = {
2016: 40.85,
2015: 39.33,
2014: 37.90,
2013: 36.71,
2012: 33.03}
for y in range(2012, 2017):
start = datetime.datetime(y, 1, 1, 0, 0)
end = datetime.datetime(y, 12, 31, 23, 0)
new.loc[(new.index <= end) & (new.index >= start), 'own'] = cap.loc[
'Solar', y]['capacity'].sum()
new.loc[(new.index <= end) & (new.index >= start), 'quaschning'] = (
quaschning[y] * 1000)
new.loc[(new.index <= end) & (new.index >= start), 'fraunhofer'] = (
fraunhofer[y] * 1000)
new.plot()
plt.show()
def weather_statistics():
c = config.get_configuration()
years = list()
for y in range(1970, 2020):
if os.path.isfile(os.path.join(c.paths['weather'],
c.pattern['weather'].format(year=y))):
years.append(y)
mypath = c.paths['geometry']
myfile = 'intersection_region_coastdatgrid.csv'
df = pd.read_csv(os.path.join(mypath, myfile), index_col='id')
ids = df[df.region_number < 19].coastdat.unique()
ghi = pd.DataFrame()
if not os.path.isfile(
os.path.join(c.paths['analysis'], 'ghi_coastdat.csv')):
for year in years:
print(year)
weather = pd.HDFStore(os.path.join(
c.paths['weather'], c.pattern['weather'].format(year=year)),
mode='r')
for cid in ids:
wdf = weather['/A{0}'.format(cid)]
ghi.loc[cid, year] = (wdf.dhi + wdf.dirhi).sum() / 1000
ghi.to_csv(os.path.join(c.paths['analysis'], 'ghi_coastdat.csv'))
df = pd.read_csv(os.path.join(c.paths['analysis'], 'ghi_coastdat.csv'),
index_col=[0])
df.columns = pd.to_numeric(df.columns)
dwd = pd.read_csv(os.path.join(c.paths['external'], 'dwd_ghi.csv'),
index_col=[0])
print(type(pd.Series(df.max())))
dwd['coastdat_max'] = round(df.max())
dwd['coastdat_mean'] = round(df.sum() / len(df))
dwd['coastdat_min'] = round(df.min())
print(dwd)
# dwd.plot(style=['b-.', 'b:', 'g-.', 'g:'], linewidth=[1,3,1,1,3,1])
fig, ax = plt.subplots()
ax = dwd[['dwd_max', 'coastdat_max']].plot(style=['b:', 'g:'], ax=ax)
ax = dwd[['dwd_mean', 'coastdat_mean']].plot(style=['b-', 'g-'], ax=ax,
linewidth=3)
dwd[['dwd_min', 'coastdat_min']].plot(style=['b-.', 'g-.'], ax=ax)
# dwd.plot(kind='bar')
plt.show()
n_df = 2
n_col = 3
n_ind = 17
neu1 = pd.DataFrame()
neu1['DWD (min)'] = dwd['dwd_min']
neu1['DWD (mean)'] = dwd['dwd_mean'] - dwd['dwd_min']
neu1['DWD (max)'] = dwd['dwd_max'] - dwd['dwd_mean']
neu1['dif'] = ((dwd['dwd_mean'] - dwd['coastdat_mean']) /
dwd['coastdat_mean'])
print(neu1)
neu2 = pd.DataFrame()
neu2['coastdat_min'] = dwd['coastdat_min']
neu2['coastdat_mean'] = dwd['coastdat_mean'] - dwd['coastdat_min']
neu2['coastDat-2 (max)'] = dwd['coastdat_max'] - dwd['coastdat_mean']
fig = plt.figure(figsize=(18, 9))
plt.rc('legend', **{'fontsize': 26})
plt.rcParams.update({'font.size': 26})
fig.subplots_adjust(left=0.09, bottom=0.1, right=0.88, top=0.98,
wspace=0.03, hspace=0.2)
axe = fig.add_subplot(111)
axe = neu1.plot(kind='bar', stacked=True, ax=axe, color=['#ffffff',
'green',
'green'])
axe = neu2.plot(kind='bar', stacked=True, ax=axe, color=['#ffffff',
'#286cf8',
'#286cf8'])
h, l = axe.get_legend_handles_labels() # get the handles we want to modify
for i in range(0, n_df * n_col, n_col): # len(h) = n_col * n_df
for j, pa in enumerate(h[i:i+n_col]):
for rect in pa.patches: # for each index
rect.set_x(
rect.get_x() + 1 / float(n_df + 1) * i / float(n_col))
rect.set_width(1 / float(n_df + 5))
axe.set_xticks((np.arange(0, 2 * n_ind, 4) - 0.5 / float(n_df + 1)) / 2.)
axe.set_xticklabels(np.arange(1998, 2015, 2), rotation=0)
# axe.set_title('Deutschlandweites Jahresmittel im Vergleich '
# '(DWD - coastDat-2) Quelle:' +
# 'http://www.dwd.de/DE/leistungen/solarenergie/' +
# 'lstrahlungskarten_su.html')
box = axe.get_position()
axe.set_position([box.x0, box.y0, box.width * 0.9, box.height])
h = h[1:2] + h[4:5]
l = ['DWD', 'coastDat-2']
l1 = axe.legend(h, l, loc='center left', bbox_to_anchor=(1, 0.5))
axe.set_ylabel('Globalstrahlung (horizontal) in $\mathrm{kWh/m^2}$')
axe.set_xlabel('Jahr')
axe.add_artist(l1)
x = np.arange(-0.19, 16.3, 1)
axe.plot(x, np.array(dwd['dwd_mean']), 'D', markersize=10, color='#004200')
x = np.arange(0.15, 17.1, 1)
axe.plot(x, np.array(dwd['coastdat_mean']), 'D', markersize=10,
color='#133374')
plt.show()
print(round((dwd.dwd_mean.div(dwd.coastdat_mean) - 1) * 100))
print(((dwd.dwd_mean.div(dwd.coastdat_mean) - 1) * 100).sum() / len(dwd))
exit(0)
def something():
c = config.get_configuration()
cap = pd.read_csv(
os.path.join(c.paths['analysis'], 'pv_data.csv'),
header=1, index_col='year')
print(cap.columns)
cap['inst_mean'] = cap.inst - (cap.inst - cap.inst.shift(1)) / 2
cap['diff'] = cap.inst - cap.inst.shift(1)
cap['VLSt'] = (cap.inst_mean / cap.erzeug) * 1000
cap['factor'] = cap['VLSt'] / cap['mean']
print(cap)
print(cap.sum() / 5)
if __name__ == "__main__":
# initialise logger
logger.define_logging()
# analyse_pv_orientation_region()
# weather_statistics()
# something()
# analyse_optimal_orientation_file()
# get_maximum_value('performance_ratio.csv', icol=[0, 1, 2])
# get_maximum_value('orientation_feedin_dc_high_resolution.csv')
# analyse_performance_ratio(2003, 1129087)
# analyse_pv_capacity()
analyse_feedin_de(2014)
# get_full_load_hours()
# analyse_pv_types(2003, 1129087, orientation={'azimuth': 180, 'tilt': 32})
# analyse_pv_orientation(2003, 1129087, 'LG_LG290N1C_G3__2013_')
# analyse_inverter(2003, 1129087, 'BP_Solar_BP2150S__2000__E__',
# orientation={'azimuth': 180, 'tilt': 35})
# single_pv_set(2003, 1129087, 'LG_LG290N1C_G3__2013_',
# 'SMA_America__SB9000TL_US_12__240V__240V__CEC_2012_',
# orientation={'azimuth': 180, 'tilt': 35})
|
rl-institut/reegis_hp
|
reegis_hp/de21/analysis.py
|
Python
|
gpl-3.0
| 29,649
|
import re
import json
import itertools
from .common import InfoExtractor
from .subtitles import SubtitlesInfoExtractor
from ..utils import (
compat_urllib_request,
compat_str,
get_element_by_attribute,
get_element_by_id,
ExtractorError,
)
class DailymotionBaseInfoExtractor(InfoExtractor):
@staticmethod
def _build_request(url):
"""Build a request with the family filter disabled"""
request = compat_urllib_request.Request(url)
request.add_header('Cookie', 'family_filter=off')
return request
class DailymotionIE(DailymotionBaseInfoExtractor, SubtitlesInfoExtractor):
"""Information Extractor for Dailymotion"""
_VALID_URL = r'(?i)(?:https?://)?(?:www\.)?dailymotion\.[a-z]{2,3}/(?:embed/)?video/([^/]+)'
IE_NAME = u'dailymotion'
_TESTS = [
{
u'url': u'http://www.dailymotion.com/video/x33vw9_tutoriel-de-youtubeur-dl-des-video_tech',
u'file': u'x33vw9.mp4',
u'md5': u'392c4b85a60a90dc4792da41ce3144eb',
u'info_dict': {
u"uploader": u"Amphora Alex and Van .",
u"title": u"Tutoriel de Youtubeur\"DL DES VIDEO DE YOUTUBE\""
}
},
# Vevo video
{
u'url': u'http://www.dailymotion.com/video/x149uew_katy-perry-roar-official_musi',
u'file': u'USUV71301934.mp4',
u'info_dict': {
u'title': u'Roar (Official)',
u'uploader': u'Katy Perry',
u'upload_date': u'20130905',
},
u'params': {
u'skip_download': True,
},
u'skip': u'VEVO is only available in some countries',
},
]
def _real_extract(self, url):
# Extract id and simplified title from URL
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group(1).split('_')[0].split('?')[0]
video_extension = 'mp4'
url = 'http://www.dailymotion.com/video/%s' % video_id
# Retrieve video webpage to extract further information
request = self._build_request(url)
webpage = self._download_webpage(request, video_id)
# Extract URL, uploader and title from webpage
self.report_extraction(video_id)
# It may just embed a vevo video:
m_vevo = re.search(
r'<link rel="video_src" href="[^"]*?vevo.com[^"]*?videoId=(?P<id>[\w]*)',
webpage)
if m_vevo is not None:
vevo_id = m_vevo.group('id')
self.to_screen(u'Vevo video detected: %s' % vevo_id)
return self.url_result(u'vevo:%s' % vevo_id, ie='Vevo')
video_uploader = self._search_regex([r'(?im)<span class="owner[^\"]+?">[^<]+?<a [^>]+?>([^<]+?)</a>',
# Looking for official user
r'<(?:span|a) .*?rel="author".*?>([^<]+?)</'],
webpage, 'video uploader')
video_upload_date = None
mobj = re.search(r'<div class="[^"]*uploaded_cont[^"]*" title="[^"]*">([0-9]{2})-([0-9]{2})-([0-9]{4})</div>', webpage)
if mobj is not None:
video_upload_date = mobj.group(3) + mobj.group(2) + mobj.group(1)
embed_url = 'http://www.dailymotion.com/embed/video/%s' % video_id
embed_page = self._download_webpage(embed_url, video_id,
u'Downloading embed page')
info = self._search_regex(r'var info = ({.*?}),$', embed_page,
'video info', flags=re.MULTILINE)
info = json.loads(info)
if info.get('error') is not None:
msg = 'Couldn\'t get video, Dailymotion says: %s' % info['error']['title']
raise ExtractorError(msg, expected=True)
# TODO: support choosing qualities
for key in ['stream_h264_hd1080_url','stream_h264_hd_url',
'stream_h264_hq_url','stream_h264_url',
'stream_h264_ld_url']:
if info.get(key):#key in info and info[key]:
max_quality = key
self.to_screen(u'Using %s' % key)
break
else:
raise ExtractorError(u'Unable to extract video URL')
video_url = info[max_quality]
# subtitles
video_subtitles = self.extract_subtitles(video_id)
if self._downloader.params.get('listsubtitles', False):
self._list_available_subtitles(video_id)
return
return [{
'id': video_id,
'url': video_url,
'uploader': video_uploader,
'upload_date': video_upload_date,
'title': self._og_search_title(webpage),
'ext': video_extension,
'subtitles': video_subtitles,
'thumbnail': info['thumbnail_url']
}]
def _get_available_subtitles(self, video_id):
try:
sub_list = self._download_webpage(
'https://api.dailymotion.com/video/%s/subtitles?fields=id,language,url' % video_id,
video_id, note=False)
except ExtractorError as err:
self._downloader.report_warning(u'unable to download video subtitles: %s' % compat_str(err))
return {}
info = json.loads(sub_list)
if (info['total'] > 0):
sub_lang_list = dict((l['language'], l['url']) for l in info['list'])
return sub_lang_list
self._downloader.report_warning(u'video doesn\'t have subtitles')
return {}
class DailymotionPlaylistIE(DailymotionBaseInfoExtractor):
IE_NAME = u'dailymotion:playlist'
_VALID_URL = r'(?:https?://)?(?:www\.)?dailymotion\.[a-z]{2,3}/playlist/(?P<id>.+?)/'
_MORE_PAGES_INDICATOR = r'<div class="next">.*?<a.*?href="/playlist/.+?".*?>.*?</a>.*?</div>'
_PAGE_TEMPLATE = 'https://www.dailymotion.com/playlist/%s/%s'
def _extract_entries(self, id):
video_ids = []
for pagenum in itertools.count(1):
request = self._build_request(self._PAGE_TEMPLATE % (id, pagenum))
webpage = self._download_webpage(request,
id, u'Downloading page %s' % pagenum)
playlist_el = get_element_by_attribute(u'class', u'video_list', webpage)
video_ids.extend(re.findall(r'data-id="(.+?)" data-ext-id', playlist_el))
if re.search(self._MORE_PAGES_INDICATOR, webpage, re.DOTALL) is None:
break
return [self.url_result('http://www.dailymotion.com/video/%s' % video_id, 'Dailymotion')
for video_id in video_ids]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
playlist_id = mobj.group('id')
webpage = self._download_webpage(url, playlist_id)
return {'_type': 'playlist',
'id': playlist_id,
'title': get_element_by_id(u'playlist_name', webpage),
'entries': self._extract_entries(playlist_id),
}
class DailymotionUserIE(DailymotionPlaylistIE):
IE_NAME = u'dailymotion:user'
_VALID_URL = r'(?:https?://)?(?:www\.)?dailymotion\.[a-z]{2,3}/user/(?P<user>[^/]+)'
_MORE_PAGES_INDICATOR = r'<div class="next">.*?<a.*?href="/user/.+?".*?>.*?</a>.*?</div>'
_PAGE_TEMPLATE = 'http://www.dailymotion.com/user/%s/%s'
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
user = mobj.group('user')
webpage = self._download_webpage(url, user)
full_user = self._html_search_regex(
r'<a class="label" href="/%s".*?>(.*?)</' % re.escape(user),
webpage, u'user', flags=re.DOTALL)
return {
'_type': 'playlist',
'id': user,
'title': full_user,
'entries': self._extract_entries(user),
}
|
daviddupont69/CouchPotatoServer
|
couchpotato/core/providers/trailer/vftrailers/youtube_dl/extractor/dailymotion.py
|
Python
|
gpl-3.0
| 7,924
|
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages # Always prefer setuptools over distutils
from codecs import open # To use a consistent encoding
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the relevant file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='''ckanext-resourcemeta''',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# http://packaging.python.org/en/latest/tutorial.html#version
version='0.0.1',
description='''Resource Metadata extension for CKAN''',
long_description=long_description,
# The project's main homepage.
url='https://github.com//ckanext-resourcemeta',
# Author details
author='''Tanzania Open Data Initiative''',
author_email='''''',
# Choose your license
license='AGPL',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 4 - Beta',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2.7',
],
# What does your project relate to?
keywords='''CKAN''',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
namespace_packages=['ckanext'],
install_requires=[
# CKAN extensions should not list dependencies here, but in a separate
# ``requirements.txt`` file.
#
# http://docs.ckan.org/en/latest/extensions/best-practices.html#add-third-party-libraries-to-requirements-txt
],
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
include_package_data=True,
package_data={
},
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages.
# see http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
data_files=[],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points='''
[ckan.plugins]
resourcemeta=ckanext.resourcemeta.plugin:ResourcemetaPlugin
[babel.extractors]
ckan = ckan.lib.extract:extract_ckan
''',
# If you are changing from the default layout of your extension, you may
# have to change the message extractors, you can read more about babel
# message extraction at
# http://babel.pocoo.org/docs/messages/#extraction-method-mapping-and-configuration
message_extractors={
'ckanext': [
('**.py', 'python', None),
('**.js', 'javascript', None),
('**/templates/**.html', 'ckan', None),
],
}
)
|
WorldBank-Transport/ckanext-resourcemeta
|
setup.py
|
Python
|
agpl-3.0
| 3,649
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import wraps
from oslo_log import log as logging
from oslo_utils.excutils import save_and_reraise_exception
from blazar.db import api as db_api
from blazar import exceptions
LOG = logging.getLogger(__name__)
class BaseStatus(object):
"""Base class of status."""
# All statuses
ALL = ()
# Valid status transitions
NEXT_STATUSES = {}
@classmethod
def is_valid_transition(cls, current_status, next_status, **kwargs):
"""Check validity of a status transition.
:param current_status: Current status
:param next_status: Next status
:return: True if the transition is valid
"""
if next_status not in cls.NEXT_STATUSES[current_status]:
LOG.warning('Invalid transition from %s to %s.',
current_status, next_status)
return False
return True
class EventStatus(BaseStatus):
"""Event status class."""
# Statuses of an event
UNDONE = 'UNDONE'
IN_PROGRESS = 'IN_PROGRESS'
DONE = 'DONE'
ERROR = 'ERROR'
ALL = (UNDONE, IN_PROGRESS, DONE, ERROR)
# Valid status transitions
NEXT_STATUSES = {
UNDONE: (IN_PROGRESS,),
IN_PROGRESS: (DONE, ERROR),
DONE: (),
ERROR: ()
}
class ReservationStatus(BaseStatus):
"""Reservation status class."""
# Statuses of a reservation
PENDING = 'pending'
ACTIVE = 'active'
DELETED = 'deleted'
ERROR = 'error'
ALL = (PENDING, ACTIVE, DELETED, ERROR)
# Valid status transitions
NEXT_STATUSES = {
PENDING: (ACTIVE, DELETED, ERROR),
ACTIVE: (DELETED, ERROR),
DELETED: (),
ERROR: (DELETED,)
}
class LeaseStatus(BaseStatus):
"""Lease status class."""
# Stable statuses of a lease
PENDING = 'PENDING'
ACTIVE = 'ACTIVE'
TERMINATED = 'TERMINATED'
ERROR = 'ERROR'
STABLE = (PENDING, ACTIVE, TERMINATED, ERROR)
# Transitional statuses of a lease
CREATING = 'CREATING'
STARTING = 'STARTING'
UPDATING = 'UPDATING'
TERMINATING = 'TERMINATING'
DELETING = 'DELETING'
TRANSITIONAL = (CREATING, STARTING, UPDATING, TERMINATING, DELETING)
# All statuses
ALL = STABLE + TRANSITIONAL
# Valid status transitions
NEXT_STATUSES = {
PENDING: (STARTING, UPDATING, DELETING),
ACTIVE: (TERMINATING, UPDATING, DELETING),
TERMINATED: (UPDATING, DELETING),
ERROR: (TERMINATING, UPDATING, DELETING),
CREATING: (PENDING, DELETING),
STARTING: (ACTIVE, ERROR, DELETING),
UPDATING: STABLE + (DELETING,),
TERMINATING: (TERMINATED, ERROR, DELETING),
DELETING: (ERROR,)
}
@classmethod
def is_valid_transition(cls, current, next, **kwargs):
"""Check validity of a status transition.
:param current: Current status
:param next: Next status
:return: True if the transition is valid
"""
if super(LeaseStatus, cls).is_valid_transition(current,
next, **kwargs):
if cls.is_valid_combination(kwargs['lease_id'], next):
return True
else:
LOG.warning('Invalid combination of statuses.')
return False
@classmethod
def is_valid_combination(cls, lease_id, status):
"""Validator for the combination of statuses.
Check if the combination of statuses of lease, reservations and events
is valid
:param lease_id: Lease ID
:param status: Lease status
:return: True if the combination is valid
"""
# Validate reservation statuses
reservations = db_api.reservation_get_all_by_lease_id(lease_id)
if any([r['status'] not in COMBINATIONS[status]['reservation']
for r in reservations]):
return False
# Validate event statuses
for event_type in ('start_lease', 'end_lease'):
event = db_api.event_get_first_sorted_by_filters(
'lease_id', 'asc',
{'lease_id': lease_id, 'event_type': event_type}
)
if event['status'] not in COMBINATIONS[status][event_type]:
return False
return True
@classmethod
def is_stable(cls, lease_id):
"""Check if the lease status is stable
:param lease_id: Lease ID
:return: True if the status is in (PENDING, ACTIVE, TERMINATED, ERROR)
"""
lease = db_api.lease_get(lease_id)
return (lease['status'] in cls.STABLE)
@classmethod
def lease_status(cls, transition, result_in, non_fatal_exceptions=[]):
"""Decorator for managing a lease status.
This checks and updates a lease status before and after executing a
decorated function.
:param transition: A status which is set while executing the
decorated function.
:param result_in: A tuple of statuses to which a lease transits after
executing the decorated function.
:param non_fatal_exceptions: A list of exceptions that are non fatal.
If one is raised during execution, the lease status
will be restored.
"""
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
# Update a lease status
lease_id = kwargs['lease_id']
lease = db_api.lease_get(lease_id)
original_status = lease['status']
if cls.is_valid_transition(original_status,
transition,
lease_id=lease_id):
db_api.lease_update(lease_id,
{'status': transition})
LOG.debug('Status of lease %s changed from %s to %s.',
lease_id, original_status, transition)
else:
LOG.warning('Aborting %s. '
'Invalid lease status transition '
'from %s to %s.',
func.__name__, original_status,
transition)
raise exceptions.InvalidStatus
# Executing the wrapped function
try:
result = func(*args, **kwargs)
except Exception as e:
with save_and_reraise_exception():
if type(e) in non_fatal_exceptions:
LOG.exception(
'Non-fatal exception during transition '
'of lease %s', lease_id)
db_api.lease_update(lease_id,
{'status': original_status})
else:
LOG.exception(
'Lease %s went into ERROR status. %s',
lease_id, str(e))
db_api.lease_update(lease_id,
{'status': cls.ERROR})
# Update a lease status if it exists
if db_api.lease_get(lease_id):
next_status = cls.derive_stable_status(lease_id)
if (next_status in result_in
and cls.is_valid_transition(transition,
next_status,
lease_id=lease_id)):
db_api.lease_update(lease_id,
{'status': next_status})
LOG.debug('Status of lease %s changed from %s to %s.',
lease_id, transition, next_status)
else:
LOG.error('Lease %s went into ERROR status.',
lease_id)
db_api.lease_update(lease_id, {'status': cls.ERROR})
raise exceptions.InvalidStatus
return result
return wrapper
return decorator
@classmethod
def derive_stable_status(cls, lease_id):
"""Derive stable lease status.
This derives a lease status from statuses of reservations and events.
:param lease_id: Lease ID
:return: Derived lease status
"""
# Possible lease statuses. Key is a tuple of (lease_start event
# status, lease_end event status)
possible_statuses = {
(EventStatus.UNDONE, EventStatus.UNDONE): cls.PENDING,
(EventStatus.DONE, EventStatus.UNDONE): cls.ACTIVE,
(EventStatus.DONE, EventStatus.DONE): cls.TERMINATED
}
# Derive a lease status from event statuses
event_statuses = {}
for event_type in ('start_lease', 'end_lease'):
event = db_api.event_get_first_sorted_by_filters(
'lease_id', 'asc',
{'lease_id': lease_id, 'event_type': event_type}
)
event_statuses[event_type] = event['status']
try:
status = possible_statuses[(event_statuses['start_lease'],
event_statuses['end_lease'])]
except KeyError:
status = cls.ERROR
# Check the combination of statuses.
if cls.is_valid_combination(lease_id, status):
return status
else:
return cls.ERROR
COMBINATIONS = {
LeaseStatus.CREATING: {
'reservation': (ReservationStatus.PENDING,),
'start_lease': (EventStatus.UNDONE,),
'end_lease': (EventStatus.UNDONE,)
},
LeaseStatus.PENDING: {
'reservation': (ReservationStatus.PENDING,),
'start_lease': (EventStatus.UNDONE,),
'end_lease': (EventStatus.UNDONE,)
},
LeaseStatus.STARTING: {
'reservation': (ReservationStatus.PENDING,
ReservationStatus.ACTIVE,
ReservationStatus.ERROR),
'start_lease': (EventStatus.IN_PROGRESS,),
'end_lease': (EventStatus.UNDONE,)
},
LeaseStatus.ACTIVE: {
'reservation': (ReservationStatus.ACTIVE,),
'start_lease': (EventStatus.DONE,),
'end_lease': (EventStatus.UNDONE,)
},
LeaseStatus.TERMINATING: {
'reservation': (ReservationStatus.ACTIVE,
ReservationStatus.DELETED,
ReservationStatus.ERROR),
'start_lease': (EventStatus.DONE,
EventStatus.ERROR),
'end_lease': (EventStatus.IN_PROGRESS,)
},
LeaseStatus.TERMINATED: {
'reservation': (ReservationStatus.DELETED,),
'start_lease': (EventStatus.DONE,),
'end_lease': (EventStatus.DONE,)
},
LeaseStatus.DELETING: {
'reservation': ReservationStatus.ALL,
'start_lease': (EventStatus.UNDONE,
EventStatus.DONE,
EventStatus.ERROR),
'end_lease': (EventStatus.UNDONE,
EventStatus.DONE,
EventStatus.ERROR)
},
LeaseStatus.UPDATING: {
'reservation': ReservationStatus.ALL,
'start_lease': (EventStatus.UNDONE,
EventStatus.DONE,
EventStatus.ERROR),
'end_lease': (EventStatus.UNDONE,
EventStatus.DONE,
EventStatus.ERROR)
},
LeaseStatus.ERROR: {
'reservation': ReservationStatus.ERROR,
'start_lease': (EventStatus.DONE,
EventStatus.ERROR),
'end_lease': (EventStatus.UNDONE,
EventStatus.ERROR)
}
}
event = EventStatus
reservation = ReservationStatus
lease = LeaseStatus
|
openstack/blazar
|
blazar/status.py
|
Python
|
apache-2.0
| 12,588
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-07-21 08:04
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0031_appversion_last'),
]
operations = [
migrations.RenameField(
model_name='appversion',
old_name='last',
new_name='is_last',
),
]
|
Vespapp/vespapp-web
|
api/migrations/0032_auto_20160721_1004.py
|
Python
|
gpl-3.0
| 423
|
from dal import autocomplete
from django import forms
from django.utils.translation import gettext_lazy as _
from ajapaik.ajapaik.models import Photo, Album
from ajapaik.ajapaik_face_recognition.models import FaceRecognitionRectangle, \
FaceRecognitionUserSuggestion, FaceRecognitionRectangleFeedback
class FaceRecognitionAddPersonForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(FaceRecognitionAddPersonForm, self).__init__(*args, **kwargs)
self.fields['gender'].widget = forms.RadioSelect(choices=[(1, _('Male')), (0, _('Female'))])
class Meta:
model = Album
fields = ('name', 'date_of_birth', 'gender', 'is_public_figure', 'description')
labels = {
'name': _('Person name'),
'date_of_birth': _('Date of birth'),
'gender': _('Gender'),
'is_public_figure': _('Is public figure'),
'description': _('Description')
}
widgets = {
'name': forms.TextInput(attrs={'placeholder': _('Firstname Lastname')}),
'date_of_birth': forms.DateInput(attrs={'type': 'date'}),
'description': forms.Textarea(attrs={'rows': 1, 'cols': 40, 'placeholder': _(
'Additional remarks about the person (other names etc)')}),
}
class FaceRecognitionSuggestionForm(forms.ModelForm):
subject_album = forms.ModelChoiceField(
queryset=Album.objects.all(),
required=True,
widget=autocomplete.ModelSelect2(url='subject-album-autocomplete')
)
rectangle = forms.ModelChoiceField(queryset=FaceRecognitionRectangle.objects.all(), widget=forms.HiddenInput())
class Meta:
model = FaceRecognitionUserSuggestion
fields = ('subject_album', 'rectangle')
class FaceRecognitionRectangleFeedbackForm(forms.ModelForm):
class Meta:
model = FaceRecognitionRectangleFeedback
fields = ('rectangle', 'is_correct')
class FaceRecognitionRectangleSubmitForm(forms.Form):
photo = forms.ModelChoiceField(queryset=Photo.objects.filter(rephoto_of_id__isnull=True))
x1 = forms.IntegerField(min_value=1)
y1 = forms.IntegerField(min_value=1)
x2 = forms.IntegerField(min_value=1)
y2 = forms.IntegerField(min_value=1)
seen_width = forms.IntegerField(min_value=1)
seen_height = forms.IntegerField(min_value=1)
|
Ajapaik/ajapaik-web
|
ajapaik/ajapaik_face_recognition/forms.py
|
Python
|
gpl-3.0
| 2,362
|
# DrawingThought.py
# This file is part of Labyrinth
#
# Copyright (C) 2006 - Don Scorgie <Don@Scorgieorg>
#
# Labyrinth is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Labyrinth is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Labyrinth; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor,
# Boston, MA 02110-1301 USA
#
from gi.repository import Gtk
import xml.dom.minidom as dom
import xml.dom
import gettext
_ = gettext.gettext
import math
import logging
import cairo
from BaseThought import *
import utils
import UndoManager
STYLE_CONTINUE=0
STYLE_END=1
STYLE_BEGIN=2
ndraw =0
SMOOTH = 5
class DrawingThought (ResizableThought):
class DrawingPoint (object):
def __init__ (self, coords, style=STYLE_CONTINUE, color = Gdk.Color(0,0,0), width = 2):
self.x, self.y = coords
self.style = style
if color == None:
color = Gdk.Color(0,0,0)
self.color = color
self.width = 1
def move_by (self, x, y):
self.x += x
self.y += y
def __init__ (self, coords, pango_context, thought_number, save, undo, loading, background_color, foreground_color):
global ndraw
super (DrawingThought, self).__init__(coords, save, "drawing_thought", undo, background_color, foreground_color)
ndraw+=1
self.identity = thought_number
self.points = []
self.text = _("Drawing #%d" % ndraw)
self.drawing = 0
self.all_okay = True
self.coords_smooth = []
def draw (self, context):
ResizableThought.draw(self, context)
cwidth = context.get_line_width ()
context.set_line_width (2)
context.set_line_join(cairo.LINE_JOIN_BEVEL)
context.set_line_cap(cairo.LINE_CAP_ROUND)
if len (self.points) > 0:
for p in self.points:
if p.style == STYLE_BEGIN:
context.move_to (p.x, p.y)
r,g,b = utils.gtk_to_cairo_color(self.foreground_color)
context.set_source_rgb (r, g, b)
elif p.style == STYLE_END:
context.line_to (p.x, p.y)
context.stroke()
else:
context.line_to (p.x, p.y)
context.set_line_width (cwidth)
context.stroke ()
return
def recalc_edges (self):
self.lr = (self.ul[0]+self.width, self.ul[1]+self.height)
def undo_drawing (self, action, mode):
self.undo.block ()
if mode == UndoManager.UNDO:
choose = 1
for p in action.args[0]:
self.points.remove (p)
else:
choose = 2
for p in action.args[0]:
self.points.append (p)
self.ul = action.args[choose][0]
self.width = action.args[choose][1]
self.height = action.args[choose][2]
self.recalc_edges ()
self.emit ("update_links")
self.emit ("update_view")
self.undo.unblock ()
def process_button_down (self, event, coords):
if ResizableThought.process_button_down(self, event, coords):
return True
if event.button == 1:
self.button_down = True
self.drawing = 2
if not event.get_state() & Gdk.ModifierType.SHIFT_MASK:
self.drawing = 1
self.orig_size = (self.ul, self.width, self.height)
self.ins_points = []
self.del_points = []
return True
return False
def process_button_release (self, event, transformed):
if len(self.points) > 0:
self.points[-1].style=STYLE_END
if self.orig_size:
if self.drawing == 0:
# correct sizes after creation
if self.creating:
orig_size = self.width >= MIN_SIZE or self.height >= MIN_SIZE
self.width = orig_size and max(MIN_SIZE, self.width) or DEFAULT_WIDTH
self.height = orig_size and max(MIN_SIZE, self.height) or DEFAULT_HEIGHT
self.recalc_edges()
self.creating = False
else:
self.undo.add_undo (UndoManager.UndoAction (self, UNDO_RESIZE, \
self.undo_resize, self.orig_size, (self.ul, self.width, self.height)))
elif self.drawing == 1:
self.undo.add_undo (UndoManager.UndoAction (self, UNDO_DRAW, \
self.undo_drawing, self.ins_points, self.orig_size, \
(self.ul, self.width, self.height)))
elif self.drawing == 2:
self.undo.add_undo (UndoManager.UndoAction (self, UNDO_ERASE, \
self.undo_erase, self.ins_points))
self.drawing = 0
return ResizableThought.process_button_release(self, event, transformed)
def leave(self):
ResizableThought.leave(self)
self.drawing = 0
def undo_erase (self, action, mode):
self.undo.block ()
action.args[0].reverse ()
if mode == UndoManager.UNDO:
for x in action.args[0]:
if x[0] == 0:
self.points.remove (x[2])
else:
self.points.insert (x[1],x[2])
else:
for x in action.args[0]:
if x[0] == 0:
self.points.insert (x[1], x[2])
else:
self.points.remove (x[2])
self.undo.unblock ()
self.emit ("update_view")
def handle_motion (self, event, coords):
if ResizableThought.handle_motion(self, event, coords):
return True
if not self.editing:
return False
# Smooth drawing and reduce number of points
self.coords_smooth.append(coords)
if len(self.coords_smooth) < SMOOTH:
return False
else:
coords = (float(sum([i[0] for i in self.coords_smooth])) / SMOOTH,
float(sum([i[1] for i in self.coords_smooth])) / SMOOTH)
self.coords_smooth = []
if self.drawing == 1:
if coords[0] < self.ul[0]+5:
self.ul = (coords[0]-5, self.ul[1])
elif coords[0] > self.lr[0]-5:
self.lr = (coords[0]+5, self.lr[1])
if coords[1] < self.ul[1]+5:
self.ul = (self.ul[0], coords[1]-5)
elif coords[1] > self.lr[1]-5:
self.lr = (self.lr[0], coords[1]+5)
if self.min_x is None or coords[0] < self.min_x:
self.min_x = coords[0]-10
elif self.max_x is None or coords[0] > self.max_x:
self.max_x = coords[0]+5
if self.min_y is None or coords[1] < self.min_y:
self.min_y = coords[1]-10
elif self.max_y is None or coords[1] > self.max_y:
self.max_y = coords[1]+5
self.width = self.lr[0] - self.ul[0]
self.height = self.lr[1] - self.ul[1]
if len(self.points) == 0 or self.points[-1].style == STYLE_END:
p = self.DrawingPoint (coords, STYLE_BEGIN, self.foreground_color)
else:
p = self.DrawingPoint (coords, STYLE_CONTINUE)
self.points.append (p)
self.ins_points.append (p)
return True
elif self.drawing == 2 and len (self.points) > 0:
out = self.points[0]
loc = []
handle = []
ins_point = -1
for x in self.points:
ins_point += 1
dist = (x.x - coords[0])**2 + (x.y - coords[1])**2
if dist < 16:
if x == self.points[0]:
out = None
loc.append ((ins_point, x, dist))
else:
if len(loc) != 0:
handle.append ((loc, out, x))
loc = []
elif x.style != STYLE_BEGIN:
x1 = x.x - out.x
y1 = x.y - out.y
d_rsqr = x1**2 + y1 **2
d = ((out.x-coords[0])*(x.y-coords[1]) - (x.x-coords[0])*(out.y-coords[1]))
det = (d_rsqr*16) - d**2
if det > 0:
xt = -99999
yt = -99999
xalt = -99999
yalt = -99999
if y1 < 0:
sgn = -1
else:
sgn = 1
xt = (((d*y1) + sgn*x1 * math.sqrt (det)) / d_rsqr) +coords[0]
xalt = (((d*y1) - sgn*x1 * math.sqrt (det)) / d_rsqr) +coords[0]
yt = (((-d*x1) + abs(y1)*math.sqrt(det)) / d_rsqr) + coords[1]
yalt = (((-d*x1) - abs(y1)*math.sqrt(det)) / d_rsqr) +coords[1]
x1_inside = (xt > x.x and xt < out.x) or (xt > out.x and xt < x.x)
x2_inside = (xalt > x.x and xalt < out.x) or (xalt > out.x and xalt < x.x)
y1_inside = (yt > x.y and yt < out.y) or (yt > out.y and yt < x.y)
y2_inside = (yalt > x.y and yalt < out.y) or (yalt > out.y and yalt < x.y)
if (x1_inside and x2_inside and y1_inside and y2_inside):
if abs (xalt - x.x) < abs (xt - x.x):
handle.append ((None, out, x, ins_point, xt, xalt, yt, yalt))
else:
handle.append ((None, out, x, ins_point, xalt, xt, yalt, yt))
elif x.x == out.x and y1_inside and y2_inside:
if abs (yalt - x.y) < abs (yt - x.y):
handle.append ((None, out, x, ins_point, xt, xalt, yt, yalt))
else:
handle.append ((None, out, x, ins_point, xalt, xt, yalt, yt))
elif x.y == out.y and x1_inside and x2_inside:
if abs (xalt - x.x) < abs (xt - x.x):
handle.append ((None, out, x, ins_point, xt, xalt, yt, yalt))
else:
handle.append ((None, out, x, ins_point, xalt, xt, yalt, yt))
out = x
if loc:
handle.append ((loc, out, None))
appends = []
dels = []
for l in handle:
inside = l[0]
prev = l[1]
next = l[2]
if not inside:
ins = l[3]
x1 = l[4]
x2 = l[5]
y1 = l[6]
y2 = l[7]
p1 = self.DrawingPoint ((x1,y1), STYLE_END)
p2 = self.DrawingPoint ((x2,y2), STYLE_BEGIN)
appends.append ((p1, ins))
appends.append ((p2, ins))
else:
first = inside[0][1]
last = inside[-1][1]
done_ins = 0
if last.style != STYLE_END:
end_dist = math.sqrt (inside[-1][2]) - 4
alpha = math.atan2 ((last.y-next.y), (last.x-next.x))
new_x = end_dist * math.cos(alpha) + last.x
new_y = end_dist * math.sin(alpha) + last.y
p = self.DrawingPoint ((new_x, new_y), STYLE_BEGIN)
appends.append ((p, inside[-1][0]))
done_ins = 1
if first.style != STYLE_BEGIN:
start_dist = math.sqrt (inside[0][2]) - 4
alpha = math.atan2 ((first.y-prev.y),(first.x-prev.x))
new_x = start_dist * math.cos (alpha) + first.x
new_y = start_dist * math.sin (alpha) + first.y
p = self.DrawingPoint ((new_x, new_y), STYLE_END)
appends.append ((p, inside[0][0]-done_ins))
for i in inside:
dels.append (i[1])
inserts = 0
for x in appends:
self.points.insert (x[1]+inserts, x[0])
self.ins_points.append ((0, x[1]+inserts, x[0]))
inserts+=1
for x in dels:
self.ins_points.append ((1, self.points.index (x), x))
self.points.remove (x)
return True
return False
def move_content_by(self, x, y):
map(lambda p : p.move_by(x,y), self.points)
ResizableThought.move_content_by(self, x, y)
def update_save (self):
next = self.element.firstChild
while next:
m = next.nextSibling
if next.nodeName == "point":
self.element.removeChild (next)
next.unlink ()
next = m
text = self.extended_buffer.get_text ()
if text:
self.extended_buffer.update_save()
else:
try:
self.element.removeChild(self.extended_buffer.element)
except xml.dom.NotFoundErr:
pass
self.element.setAttribute ("ul-coords", str(self.ul))
self.element.setAttribute ("lr-coords", str(self.lr))
self.element.setAttribute ("identity", str(self.identity))
self.element.setAttribute ("background-color", self.background_color.to_string())
self.element.setAttribute ("foreground-color", self.foreground_color.to_string())
self.element.setAttribute ("min_x", str(self.min_x))
self.element.setAttribute ("min_y", str(self.min_y))
self.element.setAttribute ("max_x", str(self.max_x))
self.element.setAttribute ("max_y", str(self.max_y))
if self.am_selected:
self.element.setAttribute ("current_root", "true")
else:
try:
self.element.removeAttribute ("current_root")
except xml.dom.NotFoundErr:
pass
if self.am_primary:
self.element.setAttribute ("primary_root", "true");
else:
try:
self.element.removeAttribute ("primary_root")
except xml.dom.NotFoundErr:
pass
doc = self.element.ownerDocument
for p in self.points:
elem = doc.createElement ("point")
self.element.appendChild (elem)
elem.setAttribute ("coords", str((p.x,p.y)))
elem.setAttribute ("type", str(p.style))
elem.setAttribute ("color", p.color.to_string())
return
def load (self, node, tar):
tmp = node.getAttribute ("ul-coords")
self.ul = utils.parse_coords (tmp)
tmp = node.getAttribute ("lr-coords")
self.lr = utils.parse_coords (tmp)
self.identity = int (node.getAttribute ("identity"))
try:
tmp = node.getAttribute ("background-color")
self.background_color = Gdk.color_parse(tmp)
tmp = node.getAttribute ("foreground-color")
self.foreground_color = Gdk.color_parse(tmp)
except ValueError:
pass
def get_min_max(node, name):
attr = node.getAttribute(name)
if attr == 'None':
return None
else:
return float(attr)
self.min_x = get_min_max(node, 'min_x')
self.min_y = get_min_max(node, 'min_y')
self.max_x = get_min_max(node, 'max_x')
self.max_y = get_min_max(node, 'max_y')
self.width = self.lr[0] - self.ul[0]
self.height = self.lr[1] - self.ul[1]
self.am_selected = node.hasAttribute ("current_root")
self.am_primary = node.hasAttribute ("primary_root")
for n in node.childNodes:
if n.nodeName == "Extended":
self.extended_buffer.load(n)
elif n.nodeName == "point":
style = int (n.getAttribute ("type"))
tmp = n.getAttribute ("coords")
c = utils.parse_coords (tmp)
col = None
try:
tmp = n.getAttribute ("color")
col = Gdk.color_parse (tmp)
except ValueError:
pass
self.points.append (self.DrawingPoint (c, style, col))
else:
print "Unknown node type: "+str(n.nodeName)
def export (self, context, move_x, move_y):
utils.export_thought_outline (context, self.ul, self.lr, self.background_color, self.am_selected, self.am_primary, utils.STYLE_NORMAL,
(move_x, move_y))
cwidth = context.get_line_width ()
context.set_line_width (1)
if len (self.points) > 0:
for p in self.points:
if p.style == STYLE_BEGIN:
context.move_to (p.x+move_x, p.y+move_y)
else:
context.line_to (p.x+move_x,p.y+move_y)
context.set_line_width (cwidth)
r,g,b = utils.gtk_to_cairo_color(self.foreground_color)
context.set_source_rgb (r, g, b)
context.stroke ()
return
def inside(self, inside):
if self.editing:
self.emit ("change_mouse_cursor", Gdk.PENCIL)
else:
ResizableThought.inside(self, inside)
def enter (self):
self.editing = True
|
Boquete/activity-labyrinth
|
src/DrawingThought.py
|
Python
|
gpl-2.0
| 14,244
|
#!/usr/bin/env python
'''
make_dummy.py - this file is part of S3QL (http://s3ql.googlecode.com)
Creates a dummy copy of an entire bucket. The dummy will appear to contain
all the data of the original bucket. However, in fact only the metadata
will be copied and all files contain just \0 bytes.
---
Copyright (C) 2010 Nikolaus Rath <Nikolaus@rath.org>
This program can be distributed under the terms of the GNU GPLv3.
'''
from __future__ import division, print_function, absolute_import
import sys
import os
import logging
import tempfile
# We are running from the S3QL source directory, make sure
# that we use modules from this directory
basedir = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '..'))
if (os.path.exists(os.path.join(basedir, 'setup.py')) and
os.path.exists(os.path.join(basedir, 'src', 's3ql', '__init__.py'))):
sys.path = [os.path.join(basedir, 'src')] + sys.path
from s3ql.common import (setup_logging, QuietError,
unlock_bucket, get_backend)
from s3ql.backends.common import ChecksumError
from s3ql.parse_args import ArgumentParser, storage_url_type
log = logging.getLogger('make_dummy')
def parse_args(args):
'''Parse command line'''
parser = ArgumentParser(
description="Create a dummy-copy of the source bucket. The target will "
'contain a file system with the same structure, but all files'
'will just contain \\0 bytes.')
parser.add_authfile()
parser.add_quiet()
parser.add_debug_modules()
parser.add_version()
parser.add_ssl()
parser.add_argument("src", metavar='<source storage-url>',
type=storage_url_type,
help='Source storage URL')
parser.add_argument("dest", metavar='<dest storage-url>',
type=storage_url_type,
help='Destination storage URL')
return parser.parse_args(args)
def main(args=None):
if args is None:
args = sys.argv[1:]
options = parse_args(args)
setup_logging(options)
with get_backend(options.src, options.authfile,
options.ssl) as (src_conn, src_name):
if not src_name in src_conn:
raise QuietError("Source bucket does not exist.")
src_bucket = src_conn.get_bucket(src_name)
try:
unlock_bucket(options.authfile, options.src, src_bucket)
except ChecksumError:
raise QuietError('Checksum error - incorrect password?')
with get_backend(options.dest, options.authfile,
options.ssl) as (dest_conn, dest_name):
if dest_name in dest_conn:
raise QuietError("Bucket already exists!\n"
"(you can delete an existing bucket with s3qladm --delete)\n")
dest_bucket = dest_conn.create_bucket(dest_name, compression=None)
copy_objects(src_bucket, dest_bucket)
def copy_objects(src_bucket, dest_bucket):
log.info('Copying...')
for (no, key) in enumerate(src_bucket):
if no != 0 and no % 5000 == 0:
log.info('Copied %d objects so far..', no)
if key.startswith('s3ql_data_'):
dest_bucket[key] = key
elif key == 's3ql_passphrase' or key.startswith('s3ql_metadata_bak'):
pass
else:
log.info('Copying %s..', key)
fh = tempfile.TemporaryFile()
meta = src_bucket.fetch_fh(key, fh, plain=True)
fh.seek(0)
dest_bucket.store_fh(key, fh, meta)
fh.close()
log.info('Done.')
if __name__ == '__main__':
main(sys.argv[1:])
|
drewlu/ossql
|
contrib/make_dummy.py
|
Python
|
gpl-3.0
| 3,830
|
# Test methods with long descriptive names can omit docstrings
# pylint: disable=missing-docstring
import unittest
import Orange
from Orange.clustering.dbscan import DBSCAN
class TestDBSCAN(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.iris = Orange.data.Table('iris')
def test_dbscan_parameters(self):
dbscan = DBSCAN(eps=0.1, min_samples=7, metric='euclidean',
algorithm='auto', leaf_size=12, p=None)
c = dbscan(self.iris)
def test_predict_table(self):
dbscan = DBSCAN()
c = dbscan(self.iris)
table = self.iris[:20]
p = c(table)
def test_predict_numpy(self):
dbscan = DBSCAN()
c = dbscan(self.iris)
X = self.iris.X[::20]
p = c(X)
|
cheral/orange3
|
Orange/tests/test_clustering_dbscan.py
|
Python
|
bsd-2-clause
| 784
|
"""Unittest for repo_specific_model. """
import logging
from unittest import mock
import pytest
from label_microservice import mlp
from label_microservice import repo_specific_model
from label_microservice import test_util
def test_predict_labels():
"""A unittest for predict labels.
This function mocks out the embedding service and the MLPWorker.
"""
model = repo_specific_model.RepoSpecificLabelModel()
model._mlp_predictor = mock.MagicMock(spec=mlp.MLPWrapper)
model._mlp_predictor.predict_probabilities.return_value = [[.2, .9]]
model._label_names = ["label1", "label2"]
model._label_thresholds = {
"label1": .5 ,
"label2": .5
}
model._get_issue_embedding = mock.MagicMock()
model._get_issue_embedding.return_value = [(10, 10)]
results = model.predict_issue_labels("some title", "some text")
expected = {
"label2": .9,
}
test_util.assert_dict_equal(expected, results)
@mock.patch("repo_specific_model.requests.post")
def test_get_issue_embedding_not_found(mock_post):
"Testing get_issue_embedding function when embedding service returns 404."""
model = repo_specific_model.RepoSpecificLabelModel()
model._embedding_api_key = "1234abcd"
mock_post.return_value.status_code = 404
issue_embedding = model._get_issue_embedding("title", "text")
# issue_embedding should be None
assert not issue_embedding
if __name__ == "__main__":
logging.basicConfig(
level=logging.INFO,
format=('%(levelname)s|%(asctime)s'
'|%(pathname)s|%(lineno)d| %(message)s'),
datefmt='%Y-%m-%dT%H:%M:%S',
)
logging.getLogger().setLevel(logging.INFO)
pytest.main()
|
kubeflow/code-intelligence
|
py/label_microservice/repo_specific_model_test.py
|
Python
|
mit
| 1,647
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('branch', '0028_auto_20141203_0800'),
]
operations = [
migrations.AddField(
model_name='branch',
name='banned',
field=models.ManyToManyField(to=settings.AUTH_USER_MODEL, null=True, blank=True, verbose_name='Membres bannis', related_name='banned_users'),
preserve_default=True,
),
migrations.AlterField(
model_name='branch',
name='name',
field=models.CharField(max_length=255, verbose_name="Branch's name"),
preserve_default=True,
),
migrations.AlterField(
model_name='demand',
name='donor',
field=models.ForeignKey(blank=True, null=True, related_name='demand_donor', verbose_name='Sender', to=settings.AUTH_USER_MODEL),
preserve_default=True,
),
migrations.AlterField(
model_name='demand',
name='receive_help_from_who',
field=models.IntegerField(choices=[(5, 'All'), (3, 'Verified member'), (6, 'Mes membres favoris')], default=5, verbose_name='Who can see and respond to demand/offer'),
preserve_default=True,
),
migrations.AlterField(
model_name='demand',
name='receiver',
field=models.ForeignKey(blank=True, null=True, related_name='demand_receiver', verbose_name='Receiver', to=settings.AUTH_USER_MODEL),
preserve_default=True,
),
migrations.AlterField(
model_name='offer',
name='donor',
field=models.ForeignKey(blank=True, null=True, related_name='offer_donor', verbose_name='Sender', to=settings.AUTH_USER_MODEL),
preserve_default=True,
),
migrations.AlterField(
model_name='offer',
name='receive_help_from_who',
field=models.IntegerField(choices=[(5, 'All'), (3, 'Verified member'), (6, 'Mes membres favoris')], default=5, verbose_name='Who can see and respond to demand/offer'),
preserve_default=True,
),
migrations.AlterField(
model_name='offer',
name='receiver',
field=models.ForeignKey(blank=True, null=True, related_name='offer_receiver', verbose_name='Receiver', to=settings.AUTH_USER_MODEL),
preserve_default=True,
),
]
|
MaximeBiset/care4care
|
branch/migrations/0029_auto_20141203_1434.py
|
Python
|
agpl-3.0
| 2,637
|
import sys
sys.setrecursionlimit(2*10**5)
def dfs(v, graph, memo):
if memo[v] != -1:
return memo[v]
ret = 0
for c in graph[v]:
ret = max(ret, dfs(c, graph, memo) + 1)
memo[v] = ret
return ret
def main():
N, M = map(int, input().split())
graph = [[] for _ in range(N)]
for _ in range(M):
x, y = map(int, input().split())
x, y = x - 1, y - 1
graph[x].append(y)
memo = [-1] * N
ans = 0
for i in range(N):
ans = max(ans, dfs(i, graph, memo))
print(ans)
if __name__ == '__main__':
main()
|
knuu/competitive-programming
|
atcoder/dp/edu_dp_g.py
|
Python
|
mit
| 589
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains simple statistical algorithms that are
straightforwardly implemented as a single python function (or family of
functions).
This module should generally not be used directly. Everything in
`__all__` is imported into `astropy.stats`, and hence that package
should be used for access.
"""
import math
import numpy as np
from astropy.utils import isiterable
from . import _stats
__all__ = ['gaussian_fwhm_to_sigma', 'gaussian_sigma_to_fwhm',
'binom_conf_interval', 'binned_binom_proportion',
'poisson_conf_interval', 'median_absolute_deviation', 'mad_std',
'signal_to_noise_oir_ccd', 'bootstrap', 'kuiper', 'kuiper_two',
'kuiper_false_positive_probability', 'cdf_from_intervals',
'interval_overlap_length', 'histogram_intervals', 'fold_intervals']
__doctest_skip__ = ['binned_binom_proportion']
__doctest_requires__ = {'binom_conf_interval': ['scipy.special'],
'poisson_conf_interval': ['scipy.special',
'scipy.optimize',
'scipy.integrate']}
gaussian_sigma_to_fwhm = 2.0 * math.sqrt(2.0 * math.log(2.0))
"""
Factor with which to multiply Gaussian 1-sigma standard deviation to
convert it to full width at half maximum (FWHM).
"""
gaussian_fwhm_to_sigma = 1. / gaussian_sigma_to_fwhm
"""
Factor with which to multiply Gaussian full width at half maximum (FWHM)
to convert it to 1-sigma standard deviation.
"""
# TODO Note scipy dependency
def binom_conf_interval(k, n, conf=0.68269, interval='wilson'):
r"""Binomial proportion confidence interval given k successes,
n trials.
Parameters
----------
k : int or numpy.ndarray
Number of successes (0 <= ``k`` <= ``n``).
n : int or numpy.ndarray
Number of trials (``n`` > 0). If both ``k`` and ``n`` are arrays,
they must have the same shape.
conf : float in [0, 1], optional
Desired probability content of interval. Default is 0.68269,
corresponding to 1 sigma in a 1-dimensional Gaussian distribution.
interval : {'wilson', 'jeffreys', 'flat', 'wald'}, optional
Formula used for confidence interval. See notes for details. The
``'wilson'`` and ``'jeffreys'`` intervals generally give similar
results, while 'flat' is somewhat different, especially for small
values of ``n``. ``'wilson'`` should be somewhat faster than
``'flat'`` or ``'jeffreys'``. The 'wald' interval is generally not
recommended. It is provided for comparison purposes. Default is
``'wilson'``.
Returns
-------
conf_interval : numpy.ndarray
``conf_interval[0]`` and ``conf_interval[1]`` correspond to the lower
and upper limits, respectively, for each element in ``k``, ``n``.
Notes
-----
In situations where a probability of success is not known, it can
be estimated from a number of trials (N) and number of
observed successes (k). For example, this is done in Monte
Carlo experiments designed to estimate a detection efficiency. It
is simple to take the sample proportion of successes (k/N)
as a reasonable best estimate of the true probability
:math:`\epsilon`. However, deriving an accurate confidence
interval on :math:`\epsilon` is non-trivial. There are several
formulas for this interval (see [1]_). Four intervals are implemented
here:
**1. The Wilson Interval.** This interval, attributed to Wilson [2]_,
is given by
.. math::
CI_{\rm Wilson} = \frac{k + \kappa^2/2}{N + \kappa^2}
\pm \frac{\kappa n^{1/2}}{n + \kappa^2}
((\hat{\epsilon}(1 - \hat{\epsilon}) + \kappa^2/(4n))^{1/2}
where :math:`\hat{\epsilon} = k / N` and :math:`\kappa` is the
number of standard deviations corresponding to the desired
confidence interval for a *normal* distribution (for example,
1.0 for a confidence interval of 68.269%). For a
confidence interval of 100(1 - :math:`\alpha`)%,
.. math::
\kappa = \Phi^{-1}(1-\alpha/2) = \sqrt{2}{\rm erf}^{-1}(1-\alpha).
**2. The Jeffreys Interval.** This interval is derived by applying
Bayes' theorem to the binomial distribution with the
noninformative Jeffreys prior [3]_, [4]_. The noninformative Jeffreys
prior is the Beta distribution, Beta(1/2, 1/2), which has the density
function
.. math::
f(\epsilon) = \pi^{-1} \epsilon^{-1/2}(1-\epsilon)^{-1/2}.
The justification for this prior is that it is invariant under
reparameterizations of the binomial proportion.
The posterior density function is also a Beta distribution: Beta(k
+ 1/2, N - k + 1/2). The interval is then chosen so that it is
*equal-tailed*: Each tail (outside the interval) contains
:math:`\alpha`/2 of the posterior probability, and the interval
itself contains 1 - :math:`\alpha`. This interval must be
calculated numerically. Additionally, when k = 0 the lower limit
is set to 0 and when k = N the upper limit is set to 1, so that in
these cases, there is only one tail containing :math:`\alpha`/2
and the interval itself contains 1 - :math:`\alpha`/2 rather than
the nominal 1 - :math:`\alpha`.
**3. A Flat prior.** This is similar to the Jeffreys interval,
but uses a flat (uniform) prior on the binomial proportion
over the range 0 to 1 rather than the reparametrization-invariant
Jeffreys prior. The posterior density function is a Beta distribution:
Beta(k + 1, N - k + 1). The same comments about the nature of the
interval (equal-tailed, etc.) also apply to this option.
**4. The Wald Interval.** This interval is given by
.. math::
CI_{\rm Wald} = \hat{\epsilon} \pm
\kappa \sqrt{\frac{\hat{\epsilon}(1-\hat{\epsilon})}{N}}
The Wald interval gives acceptable results in some limiting
cases. Particularly, when N is very large, and the true proportion
:math:`\epsilon` is not "too close" to 0 or 1. However, as the
later is not verifiable when trying to estimate :math:`\epsilon`,
this is not very helpful. Its use is not recommended, but it is
provided here for comparison purposes due to its prevalence in
everyday practical statistics.
References
----------
.. [1] Brown, Lawrence D.; Cai, T. Tony; DasGupta, Anirban (2001).
"Interval Estimation for a Binomial Proportion". Statistical
Science 16 (2): 101-133. doi:10.1214/ss/1009213286
.. [2] Wilson, E. B. (1927). "Probable inference, the law of
succession, and statistical inference". Journal of the American
Statistical Association 22: 209-212.
.. [3] Jeffreys, Harold (1946). "An Invariant Form for the Prior
Probability in Estimation Problems". Proc. R. Soc. Lond.. A 24 186
(1007): 453-461. doi:10.1098/rspa.1946.0056
.. [4] Jeffreys, Harold (1998). Theory of Probability. Oxford
University Press, 3rd edition. ISBN 978-0198503682
Examples
--------
Integer inputs return an array with shape (2,):
>>> binom_conf_interval(4, 5, interval='wilson')
array([ 0.57921724, 0.92078259])
Arrays of arbitrary dimension are supported. The Wilson and Jeffreys
intervals give similar results, even for small k, N:
>>> binom_conf_interval([0, 1, 2, 5], 5, interval='wilson')
array([[ 0. , 0.07921741, 0.21597328, 0.83333304],
[ 0.16666696, 0.42078276, 0.61736012, 1. ]])
>>> binom_conf_interval([0, 1, 2, 5], 5, interval='jeffreys')
array([[ 0. , 0.0842525 , 0.21789949, 0.82788246],
[ 0.17211754, 0.42218001, 0.61753691, 1. ]])
>>> binom_conf_interval([0, 1, 2, 5], 5, interval='flat')
array([[ 0. , 0.12139799, 0.24309021, 0.73577037],
[ 0.26422963, 0.45401727, 0.61535699, 1. ]])
In contrast, the Wald interval gives poor results for small k, N.
For k = 0 or k = N, the interval always has zero length.
>>> binom_conf_interval([0, 1, 2, 5], 5, interval='wald')
array([[ 0. , 0.02111437, 0.18091075, 1. ],
[ 0. , 0.37888563, 0.61908925, 1. ]])
For confidence intervals approaching 1, the Wald interval for
0 < k < N can give intervals that extend outside [0, 1]:
>>> binom_conf_interval([0, 1, 2, 5], 5, interval='wald', conf=0.99)
array([[ 0. , -0.26077835, -0.16433593, 1. ],
[ 0. , 0.66077835, 0.96433593, 1. ]])
"""
if conf < 0. or conf > 1.:
raise ValueError('conf must be between 0. and 1.')
alpha = 1. - conf
k = np.asarray(k).astype(int)
n = np.asarray(n).astype(int)
if (n <= 0).any():
raise ValueError('n must be positive')
if (k < 0).any() or (k > n).any():
raise ValueError('k must be in {0, 1, .., n}')
if interval == 'wilson' or interval == 'wald':
from scipy.special import erfinv
kappa = np.sqrt(2.) * min(erfinv(conf), 1.e10) # Avoid overflows.
k = k.astype(float)
n = n.astype(float)
p = k / n
if interval == 'wilson':
midpoint = (k + kappa ** 2 / 2.) / (n + kappa ** 2)
halflength = (kappa * np.sqrt(n)) / (n + kappa ** 2) * \
np.sqrt(p * (1 - p) + kappa ** 2 / (4 * n))
conf_interval = np.array([midpoint - halflength,
midpoint + halflength])
# Correct intervals out of range due to floating point errors.
conf_interval[conf_interval < 0.] = 0.
conf_interval[conf_interval > 1.] = 1.
else:
midpoint = p
halflength = kappa * np.sqrt(p * (1. - p) / n)
conf_interval = np.array([midpoint - halflength,
midpoint + halflength])
elif interval == 'jeffreys' or interval == 'flat':
from scipy.special import betaincinv
if interval == 'jeffreys':
lowerbound = betaincinv(k + 0.5, n - k + 0.5, 0.5 * alpha)
upperbound = betaincinv(k + 0.5, n - k + 0.5, 1. - 0.5 * alpha)
else:
lowerbound = betaincinv(k + 1, n - k + 1, 0.5 * alpha)
upperbound = betaincinv(k + 1, n - k + 1, 1. - 0.5 * alpha)
# Set lower or upper bound to k/n when k/n = 0 or 1
# We have to treat the special case of k/n being scalars,
# which is an ugly kludge
if lowerbound.ndim == 0:
if k == 0:
lowerbound = 0.
elif k == n:
upperbound = 1.
else:
lowerbound[k == 0] = 0
upperbound[k == n] = 1
conf_interval = np.array([lowerbound, upperbound])
else:
raise ValueError(f'Unrecognized interval: {interval:s}')
return conf_interval
# TODO Note scipy dependency (needed in binom_conf_interval)
def binned_binom_proportion(x, success, bins=10, range=None, conf=0.68269,
interval='wilson'):
"""Binomial proportion and confidence interval in bins of a continuous
variable ``x``.
Given a set of datapoint pairs where the ``x`` values are
continuously distributed and the ``success`` values are binomial
("success / failure" or "true / false"), place the pairs into
bins according to ``x`` value and calculate the binomial proportion
(fraction of successes) and confidence interval in each bin.
Parameters
----------
x : list_like
Values.
success : list_like (bool)
Success (`True`) or failure (`False`) corresponding to each value
in ``x``. Must be same length as ``x``.
bins : int or sequence of scalars, optional
If bins is an int, it defines the number of equal-width bins
in the given range (10, by default). If bins is a sequence, it
defines the bin edges, including the rightmost edge, allowing
for non-uniform bin widths (in this case, 'range' is ignored).
range : (float, float), optional
The lower and upper range of the bins. If `None` (default),
the range is set to ``(x.min(), x.max())``. Values outside the
range are ignored.
conf : float in [0, 1], optional
Desired probability content in the confidence
interval ``(p - perr[0], p + perr[1])`` in each bin. Default is
0.68269.
interval : {'wilson', 'jeffreys', 'flat', 'wald'}, optional
Formula used to calculate confidence interval on the
binomial proportion in each bin. See `binom_conf_interval` for
definition of the intervals. The 'wilson', 'jeffreys',
and 'flat' intervals generally give similar results. 'wilson'
should be somewhat faster, while 'jeffreys' and 'flat' are
marginally superior, but differ in the assumed prior.
The 'wald' interval is generally not recommended.
It is provided for comparison purposes. Default is 'wilson'.
Returns
-------
bin_ctr : numpy.ndarray
Central value of bins. Bins without any entries are not returned.
bin_halfwidth : numpy.ndarray
Half-width of each bin such that ``bin_ctr - bin_halfwidth`` and
``bin_ctr + bins_halfwidth`` give the left and right side of each bin,
respectively.
p : numpy.ndarray
Efficiency in each bin.
perr : numpy.ndarray
2-d array of shape (2, len(p)) representing the upper and lower
uncertainty on p in each bin.
See Also
--------
binom_conf_interval : Function used to estimate confidence interval in
each bin.
Examples
--------
Suppose we wish to estimate the efficiency of a survey in
detecting astronomical sources as a function of magnitude (i.e.,
the probability of detecting a source given its magnitude). In a
realistic case, we might prepare a large number of sources with
randomly selected magnitudes, inject them into simulated images,
and then record which were detected at the end of the reduction
pipeline. As a toy example, we generate 100 data points with
randomly selected magnitudes between 20 and 30 and "observe" them
with a known detection function (here, the error function, with
50% detection probability at magnitude 25):
>>> from scipy.special import erf
>>> from scipy.stats.distributions import binom
>>> def true_efficiency(x):
... return 0.5 - 0.5 * erf((x - 25.) / 2.)
>>> mag = 20. + 10. * np.random.rand(100)
>>> detected = binom.rvs(1, true_efficiency(mag))
>>> bins, binshw, p, perr = binned_binom_proportion(mag, detected, bins=20)
>>> plt.errorbar(bins, p, xerr=binshw, yerr=perr, ls='none', marker='o',
... label='estimate')
.. plot::
import numpy as np
from scipy.special import erf
from scipy.stats.distributions import binom
import matplotlib.pyplot as plt
from astropy.stats import binned_binom_proportion
def true_efficiency(x):
return 0.5 - 0.5 * erf((x - 25.) / 2.)
np.random.seed(400)
mag = 20. + 10. * np.random.rand(100)
np.random.seed(600)
detected = binom.rvs(1, true_efficiency(mag))
bins, binshw, p, perr = binned_binom_proportion(mag, detected, bins=20)
plt.errorbar(bins, p, xerr=binshw, yerr=perr, ls='none', marker='o',
label='estimate')
X = np.linspace(20., 30., 1000)
plt.plot(X, true_efficiency(X), label='true efficiency')
plt.ylim(0., 1.)
plt.title('Detection efficiency vs magnitude')
plt.xlabel('Magnitude')
plt.ylabel('Detection efficiency')
plt.legend()
plt.show()
The above example uses the Wilson confidence interval to calculate
the uncertainty ``perr`` in each bin (see the definition of various
confidence intervals in `binom_conf_interval`). A commonly used
alternative is the Wald interval. However, the Wald interval can
give nonsensical uncertainties when the efficiency is near 0 or 1,
and is therefore **not** recommended. As an illustration, the
following example shows the same data as above but uses the Wald
interval rather than the Wilson interval to calculate ``perr``:
>>> bins, binshw, p, perr = binned_binom_proportion(mag, detected, bins=20,
... interval='wald')
>>> plt.errorbar(bins, p, xerr=binshw, yerr=perr, ls='none', marker='o',
... label='estimate')
.. plot::
import numpy as np
from scipy.special import erf
from scipy.stats.distributions import binom
import matplotlib.pyplot as plt
from astropy.stats import binned_binom_proportion
def true_efficiency(x):
return 0.5 - 0.5 * erf((x - 25.) / 2.)
np.random.seed(400)
mag = 20. + 10. * np.random.rand(100)
np.random.seed(600)
detected = binom.rvs(1, true_efficiency(mag))
bins, binshw, p, perr = binned_binom_proportion(mag, detected, bins=20,
interval='wald')
plt.errorbar(bins, p, xerr=binshw, yerr=perr, ls='none', marker='o',
label='estimate')
X = np.linspace(20., 30., 1000)
plt.plot(X, true_efficiency(X), label='true efficiency')
plt.ylim(0., 1.)
plt.title('The Wald interval can give nonsensical uncertainties')
plt.xlabel('Magnitude')
plt.ylabel('Detection efficiency')
plt.legend()
plt.show()
"""
x = np.ravel(x)
success = np.ravel(success).astype(bool)
if x.shape != success.shape:
raise ValueError('sizes of x and success must match')
# Put values into a histogram (`n`). Put "successful" values
# into a second histogram (`k`) with identical binning.
n, bin_edges = np.histogram(x, bins=bins, range=range)
k, bin_edges = np.histogram(x[success], bins=bin_edges)
bin_ctr = (bin_edges[:-1] + bin_edges[1:]) / 2.
bin_halfwidth = bin_ctr - bin_edges[:-1]
# Remove bins with zero entries.
valid = n > 0
bin_ctr = bin_ctr[valid]
bin_halfwidth = bin_halfwidth[valid]
n = n[valid]
k = k[valid]
p = k / n
bounds = binom_conf_interval(k, n, conf=conf, interval=interval)
perr = np.abs(bounds - p)
return bin_ctr, bin_halfwidth, p, perr
def _check_poisson_conf_inputs(sigma, background, conflevel, name):
if sigma != 1:
raise ValueError("Only sigma=1 supported for interval {}"
.format(name))
if background != 0:
raise ValueError("background not supported for interval {}"
.format(name))
if conflevel is not None:
raise ValueError("conflevel not supported for interval {}"
.format(name))
def poisson_conf_interval(n, interval='root-n', sigma=1, background=0,
conflevel=None):
r"""Poisson parameter confidence interval given observed counts
Parameters
----------
n : int or numpy.ndarray
Number of counts (0 <= ``n``).
interval : {'root-n','root-n-0','pearson','sherpagehrels','frequentist-confidence', 'kraft-burrows-nousek'}, optional
Formula used for confidence interval. See notes for details.
Default is ``'root-n'``.
sigma : float, optional
Number of sigma for confidence interval; only supported for
the 'frequentist-confidence' mode.
background : float, optional
Number of counts expected from the background; only supported for
the 'kraft-burrows-nousek' mode. This number is assumed to be determined
from a large region so that the uncertainty on its value is negligible.
conflevel : float, optional
Confidence level between 0 and 1; only supported for the
'kraft-burrows-nousek' mode.
Returns
-------
conf_interval : numpy.ndarray
``conf_interval[0]`` and ``conf_interval[1]`` correspond to the lower
and upper limits, respectively, for each element in ``n``.
Notes
-----
The "right" confidence interval to use for Poisson data is a
matter of debate. The CDF working group [recommends][pois_eb]
using root-n throughout, largely in the interest of
comprehensibility, but discusses other possibilities. The ATLAS
group also [discusses][ErrorBars] several possibilities but
concludes that no single representation is suitable for all cases.
The suggestion has also been [floated][ac12] that error bars should be
attached to theoretical predictions instead of observed data,
which this function will not help with (but it's easy; then you
really should use the square root of the theoretical prediction).
The intervals implemented here are:
**1. 'root-n'** This is a very widely used standard rule derived
from the maximum-likelihood estimator for the mean of the Poisson
process. While it produces questionable results for small n and
outright wrong results for n=0, it is standard enough that people are
(supposedly) used to interpreting these wonky values. The interval is
.. math::
CI = (n-\sqrt{n}, n+\sqrt{n})
**2. 'root-n-0'** This is identical to the above except that where
n is zero the interval returned is (0,1).
**3. 'pearson'** This is an only-slightly-more-complicated rule
based on Pearson's chi-squared rule (as [explained][pois_eb] by
the CDF working group). It also has the nice feature that
if your theory curve touches an endpoint of the interval, then your
data point is indeed one sigma away. The interval is
.. math::
CI = (n+0.5-\sqrt{n+0.25}, n+0.5+\sqrt{n+0.25})
**4. 'sherpagehrels'** This rule is used by default in the fitting
package 'sherpa'. The [documentation][sherpa_gehrels] claims it is
based on a numerical approximation published in
[Gehrels 1986][gehrels86] but it does not actually appear there.
It is symmetrical, and while the upper limits
are within about 1% of those given by 'frequentist-confidence', the
lower limits can be badly wrong. The interval is
.. math::
CI = (n-1-\sqrt{n+0.75}, n+1+\sqrt{n+0.75})
**5. 'frequentist-confidence'** These are frequentist central
confidence intervals:
.. math::
CI = (0.5 F_{\chi^2}^{-1}(\alpha;2n),
0.5 F_{\chi^2}^{-1}(1-\alpha;2(n+1)))
where :math:`F_{\chi^2}^{-1}` is the quantile of the chi-square
distribution with the indicated number of degrees of freedom and
:math:`\alpha` is the one-tailed probability of the normal
distribution (at the point given by the parameter 'sigma'). See
[Maxwell 2011][maxw11] for further details.
**6. 'kraft-burrows-nousek'** This is a Bayesian approach which allows
for the presence of a known background :math:`B` in the source signal
:math:`N`.
For a given confidence level :math:`CL` the confidence interval
:math:`[S_\mathrm{min}, S_\mathrm{max}]` is given by:
.. math::
CL = \int^{S_\mathrm{max}}_{S_\mathrm{min}} f_{N,B}(S)dS
where the function :math:`f_{N,B}` is:
.. math::
f_{N,B}(S) = C \frac{e^{-(S+B)}(S+B)^N}{N!}
and the normalization constant :math:`C`:
.. math::
C = \left[ \int_0^\infty \frac{e^{-(S+B)}(S+B)^N}{N!} dS \right] ^{-1}
= \left( \sum^N_{n=0} \frac{e^{-B}B^n}{n!} \right)^{-1}
See [KraftBurrowsNousek][kbn1991] for further details.
These formulas implement a positive, uniform prior.
[KraftBurrowsNousek][kbn1991] discuss this choice in more detail and show
that the problem is relatively insensitive to the choice of prior.
This functions has an optional dependency: Either scipy or
`mpmath <http://mpmath.org/>`_ need to be available. (Scipy only works for
N < 100).
Examples
--------
>>> poisson_conf_interval(np.arange(10), interval='root-n').T
array([[ 0. , 0. ],
[ 0. , 2. ],
[ 0.58578644, 3.41421356],
[ 1.26794919, 4.73205081],
[ 2. , 6. ],
[ 2.76393202, 7.23606798],
[ 3.55051026, 8.44948974],
[ 4.35424869, 9.64575131],
[ 5.17157288, 10.82842712],
[ 6. , 12. ]])
>>> poisson_conf_interval(np.arange(10), interval='root-n-0').T
array([[ 0. , 1. ],
[ 0. , 2. ],
[ 0.58578644, 3.41421356],
[ 1.26794919, 4.73205081],
[ 2. , 6. ],
[ 2.76393202, 7.23606798],
[ 3.55051026, 8.44948974],
[ 4.35424869, 9.64575131],
[ 5.17157288, 10.82842712],
[ 6. , 12. ]])
>>> poisson_conf_interval(np.arange(10), interval='pearson').T
array([[ 0. , 1. ],
[ 0.38196601, 2.61803399],
[ 1. , 4. ],
[ 1.69722436, 5.30277564],
[ 2.43844719, 6.56155281],
[ 3.20871215, 7.79128785],
[ 4. , 9. ],
[ 4.8074176 , 10.1925824 ],
[ 5.62771868, 11.37228132],
[ 6.45861873, 12.54138127]])
>>> poisson_conf_interval(np.arange(10),
... interval='frequentist-confidence').T
array([[ 0. , 1.84102165],
[ 0.17275378, 3.29952656],
[ 0.70818544, 4.63785962],
[ 1.36729531, 5.91818583],
[ 2.08566081, 7.16275317],
[ 2.84030886, 8.38247265],
[ 3.62006862, 9.58364155],
[ 4.41852954, 10.77028072],
[ 5.23161394, 11.94514152],
[ 6.05653896, 13.11020414]])
>>> poisson_conf_interval(7,
... interval='frequentist-confidence').T
array([ 4.41852954, 10.77028072])
>>> poisson_conf_interval(10, background=1.5, conflevel=0.95,
... interval='kraft-burrows-nousek').T
array([ 3.47894005, 16.113329533]) # doctest: +FLOAT_CMP
[pois_eb]: http://www-cdf.fnal.gov/physics/statistics/notes/pois_eb.txt
[ErrorBars]: http://www.pp.rhul.ac.uk/~cowan/atlas/ErrorBars.pdf
[ac12]: http://adsabs.harvard.edu/abs/2012EPJP..127...24A
[maxw11]: http://adsabs.harvard.edu/abs/2011arXiv1102.0822M
[gehrels86]: http://adsabs.harvard.edu/abs/1986ApJ...303..336G
[sherpa_gehrels]: http://cxc.harvard.edu/sherpa4.4/statistics/#chigehrels
[kbn1991]: http://adsabs.harvard.edu/abs/1991ApJ...374..344K
"""
if not np.isscalar(n):
n = np.asanyarray(n)
if interval == 'root-n':
_check_poisson_conf_inputs(sigma, background, conflevel, interval)
conf_interval = np.array([n - np.sqrt(n),
n + np.sqrt(n)])
elif interval == 'root-n-0':
_check_poisson_conf_inputs(sigma, background, conflevel, interval)
conf_interval = np.array([n - np.sqrt(n),
n + np.sqrt(n)])
if np.isscalar(n):
if n == 0:
conf_interval[1] = 1
else:
conf_interval[1, n == 0] = 1
elif interval == 'pearson':
_check_poisson_conf_inputs(sigma, background, conflevel, interval)
conf_interval = np.array([n + 0.5 - np.sqrt(n + 0.25),
n + 0.5 + np.sqrt(n + 0.25)])
elif interval == 'sherpagehrels':
_check_poisson_conf_inputs(sigma, background, conflevel, interval)
conf_interval = np.array([n - 1 - np.sqrt(n + 0.75),
n + 1 + np.sqrt(n + 0.75)])
elif interval == 'frequentist-confidence':
_check_poisson_conf_inputs(1., background, conflevel, interval)
import scipy.stats
alpha = scipy.stats.norm.sf(sigma)
conf_interval = np.array([0.5 * scipy.stats.chi2(2 * n).ppf(alpha),
0.5 * scipy.stats.chi2(2 * n + 2).isf(alpha)])
if np.isscalar(n):
if n == 0:
conf_interval[0] = 0
else:
conf_interval[0, n == 0] = 0
elif interval == 'kraft-burrows-nousek':
if conflevel is None:
raise ValueError('Set conflevel for method {}. (sigma is '
'ignored.)'.format(interval))
conflevel = np.asanyarray(conflevel)
if np.any(conflevel <= 0) or np.any(conflevel >= 1):
raise ValueError('Conflevel must be a number between 0 and 1.')
background = np.asanyarray(background)
if np.any(background < 0):
raise ValueError('Background must be >= 0.')
conf_interval = np.vectorize(_kraft_burrows_nousek,
cache=True)(n, background, conflevel)
conf_interval = np.vstack(conf_interval)
else:
raise ValueError("Invalid method for Poisson confidence intervals: "
"{}".format(interval))
return conf_interval
def median_absolute_deviation(data, axis=None, func=None, ignore_nan=False):
"""
Calculate the median absolute deviation (MAD).
The MAD is defined as ``median(abs(a - median(a)))``.
Parameters
----------
data : array-like
Input array or object that can be converted to an array.
axis : {int, sequence of int, None}, optional
Axis along which the MADs are computed. The default (`None`) is
to compute the MAD of the flattened array.
func : callable, optional
The function used to compute the median. Defaults to `numpy.ma.median`
for masked arrays, otherwise to `numpy.median`.
ignore_nan : bool
Ignore NaN values (treat them as if they are not in the array) when
computing the median. This will use `numpy.ma.median` if ``axis`` is
specified, or `numpy.nanmedian` if ``axis==None`` and numpy's version
is >1.10 because nanmedian is slightly faster in this case.
Returns
-------
mad : float or `~numpy.ndarray`
The median absolute deviation of the input array. If ``axis``
is `None` then a scalar will be returned, otherwise a
`~numpy.ndarray` will be returned.
Examples
--------
Generate random variates from a Gaussian distribution and return the
median absolute deviation for that distribution::
>>> import numpy as np
>>> from astropy.stats import median_absolute_deviation
>>> rand = np.random.RandomState(12345)
>>> from numpy.random import randn
>>> mad = median_absolute_deviation(rand.randn(1000))
>>> print(mad) # doctest: +FLOAT_CMP
0.65244241428454486
See Also
--------
mad_std
"""
if func is None:
# Check if the array has a mask and if so use np.ma.median
# See https://github.com/numpy/numpy/issues/7330 why using np.ma.median
# for normal arrays should not be done (summary: np.ma.median always
# returns an masked array even if the result should be scalar). (#4658)
if isinstance(data, np.ma.MaskedArray):
is_masked = True
func = np.ma.median
if ignore_nan:
data = np.ma.masked_invalid(data)
elif ignore_nan:
is_masked = False
func = np.nanmedian
else:
is_masked = False
func = np.median
else:
is_masked = None
data = np.asanyarray(data)
# np.nanmedian has `keepdims`, which is a good option if we're not allowing
# user-passed functions here
data_median = func(data, axis=axis)
# broadcast the median array before subtraction
if axis is not None:
if isiterable(axis):
for ax in sorted(list(axis)):
data_median = np.expand_dims(data_median, axis=ax)
else:
data_median = np.expand_dims(data_median, axis=axis)
result = func(np.abs(data - data_median), axis=axis, overwrite_input=True)
if axis is None and np.ma.isMaskedArray(result):
# return scalar version
result = result.item()
elif np.ma.isMaskedArray(result) and not is_masked:
# if the input array was not a masked array, we don't want to return a
# masked array
result = result.filled(fill_value=np.nan)
return result
def mad_std(data, axis=None, func=None, ignore_nan=False):
r"""
Calculate a robust standard deviation using the `median absolute
deviation (MAD)
<https://en.wikipedia.org/wiki/Median_absolute_deviation>`_.
The standard deviation estimator is given by:
.. math::
\sigma \approx \frac{\textrm{MAD}}{\Phi^{-1}(3/4)}
\approx 1.4826 \ \textrm{MAD}
where :math:`\Phi^{-1}(P)` is the normal inverse cumulative
distribution function evaluated at probability :math:`P = 3/4`.
Parameters
----------
data : array-like
Data array or object that can be converted to an array.
axis : {int, sequence of int, None}, optional
Axis along which the robust standard deviations are computed.
The default (`None`) is to compute the robust standard deviation
of the flattened array.
func : callable, optional
The function used to compute the median. Defaults to `numpy.ma.median`
for masked arrays, otherwise to `numpy.median`.
ignore_nan : bool
Ignore NaN values (treat them as if they are not in the array) when
computing the median. This will use `numpy.ma.median` if ``axis`` is
specified, or `numpy.nanmedian` if ``axis=None`` and numpy's version is
>1.10 because nanmedian is slightly faster in this case.
Returns
-------
mad_std : float or `~numpy.ndarray`
The robust standard deviation of the input data. If ``axis`` is
`None` then a scalar will be returned, otherwise a
`~numpy.ndarray` will be returned.
Examples
--------
>>> import numpy as np
>>> from astropy.stats import mad_std
>>> rand = np.random.RandomState(12345)
>>> madstd = mad_std(rand.normal(5, 2, (100, 100)))
>>> print(madstd) # doctest: +FLOAT_CMP
2.0232764659422626
See Also
--------
biweight_midvariance, biweight_midcovariance, median_absolute_deviation
"""
# NOTE: 1. / scipy.stats.norm.ppf(0.75) = 1.482602218505602
MAD = median_absolute_deviation(
data, axis=axis, func=func, ignore_nan=ignore_nan)
return MAD * 1.482602218505602
def signal_to_noise_oir_ccd(t, source_eps, sky_eps, dark_eps, rd, npix,
gain=1.0):
"""Computes the signal to noise ratio for source being observed in the
optical/IR using a CCD.
Parameters
----------
t : float or numpy.ndarray
CCD integration time in seconds
source_eps : float
Number of electrons (photons) or DN per second in the aperture from the
source. Note that this should already have been scaled by the filter
transmission and the quantum efficiency of the CCD. If the input is in
DN, then be sure to set the gain to the proper value for the CCD.
If the input is in electrons per second, then keep the gain as its
default of 1.0.
sky_eps : float
Number of electrons (photons) or DN per second per pixel from the sky
background. Should already be scaled by filter transmission and QE.
This must be in the same units as source_eps for the calculation to
make sense.
dark_eps : float
Number of thermal electrons per second per pixel. If this is given in
DN or ADU, then multiply by the gain to get the value in electrons.
rd : float
Read noise of the CCD in electrons. If this is given in
DN or ADU, then multiply by the gain to get the value in electrons.
npix : float
Size of the aperture in pixels
gain : float, optional
Gain of the CCD. In units of electrons per DN.
Returns
----------
SNR : float or numpy.ndarray
Signal to noise ratio calculated from the inputs
"""
signal = t * source_eps * gain
noise = np.sqrt(t * (source_eps * gain + npix *
(sky_eps * gain + dark_eps)) + npix * rd ** 2)
return signal / noise
def bootstrap(data, bootnum=100, samples=None, bootfunc=None):
"""Performs bootstrap resampling on numpy arrays.
Bootstrap resampling is used to understand confidence intervals of sample
estimates. This function returns versions of the dataset resampled with
replacement ("case bootstrapping"). These can all be run through a function
or statistic to produce a distribution of values which can then be used to
find the confidence intervals.
Parameters
----------
data : numpy.ndarray
N-D array. The bootstrap resampling will be performed on the first
index, so the first index should access the relevant information
to be bootstrapped.
bootnum : int, optional
Number of bootstrap resamples
samples : int, optional
Number of samples in each resample. The default `None` sets samples to
the number of datapoints
bootfunc : function, optional
Function to reduce the resampled data. Each bootstrap resample will
be put through this function and the results returned. If `None`, the
bootstrapped data will be returned
Returns
-------
boot : numpy.ndarray
If bootfunc is None, then each row is a bootstrap resample of the data.
If bootfunc is specified, then the columns will correspond to the
outputs of bootfunc.
Examples
--------
Obtain a twice resampled array:
>>> from astropy.stats import bootstrap
>>> import numpy as np
>>> from astropy.utils import NumpyRNGContext
>>> bootarr = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 0])
>>> with NumpyRNGContext(1):
... bootresult = bootstrap(bootarr, 2)
...
>>> bootresult # doctest: +FLOAT_CMP
array([[6., 9., 0., 6., 1., 1., 2., 8., 7., 0.],
[3., 5., 6., 3., 5., 3., 5., 8., 8., 0.]])
>>> bootresult.shape
(2, 10)
Obtain a statistic on the array
>>> with NumpyRNGContext(1):
... bootresult = bootstrap(bootarr, 2, bootfunc=np.mean)
...
>>> bootresult # doctest: +FLOAT_CMP
array([4. , 4.6])
Obtain a statistic with two outputs on the array
>>> test_statistic = lambda x: (np.sum(x), np.mean(x))
>>> with NumpyRNGContext(1):
... bootresult = bootstrap(bootarr, 3, bootfunc=test_statistic)
>>> bootresult # doctest: +FLOAT_CMP
array([[40. , 4. ],
[46. , 4.6],
[35. , 3.5]])
>>> bootresult.shape
(3, 2)
Obtain a statistic with two outputs on the array, keeping only the first
output
>>> bootfunc = lambda x:test_statistic(x)[0]
>>> with NumpyRNGContext(1):
... bootresult = bootstrap(bootarr, 3, bootfunc=bootfunc)
...
>>> bootresult # doctest: +FLOAT_CMP
array([40., 46., 35.])
>>> bootresult.shape
(3,)
"""
if samples is None:
samples = data.shape[0]
# make sure the input is sane
if samples < 1 or bootnum < 1:
raise ValueError("neither 'samples' nor 'bootnum' can be less than 1.")
if bootfunc is None:
resultdims = (bootnum,) + (samples,) + data.shape[1:]
else:
# test number of outputs from bootfunc, avoid single outputs which are
# array-like
try:
resultdims = (bootnum, len(bootfunc(data)))
except TypeError:
resultdims = (bootnum,)
# create empty boot array
boot = np.empty(resultdims)
for i in range(bootnum):
bootarr = np.random.randint(low=0, high=data.shape[0], size=samples)
if bootfunc is None:
boot[i] = data[bootarr]
else:
boot[i] = bootfunc(data[bootarr])
return boot
def _scipy_kraft_burrows_nousek(N, B, CL):
'''Upper limit on a poisson count rate
The implementation is based on Kraft, Burrows and Nousek
`ApJ 374, 344 (1991) <http://adsabs.harvard.edu/abs/1991ApJ...374..344K>`_.
The XMM-Newton upper limit server uses the same formalism.
Parameters
----------
N : int
Total observed count number
B : float
Background count rate (assumed to be known with negligible error
from a large background area).
CL : float
Confidence level (number between 0 and 1)
Returns
-------
S : source count limit
Notes
-----
Requires `scipy`. This implementation will cause Overflow Errors for about
N > 100 (the exact limit depends on details of how scipy was compiled).
See `~astropy.stats.mpmath_poisson_upper_limit` for an implementation that
is slower, but can deal with arbitrarily high numbers since it is based on
the `mpmath <http://mpmath.org/>`_ library.
'''
from scipy.optimize import brentq
from scipy.integrate import quad
from math import exp
def eqn8(N, B):
n = np.arange(N + 1, dtype=np.float64)
# Create an array containing the factorials. scipy.special.factorial
# requires SciPy 0.14 (#5064) therefore this is calculated by using
# numpy.cumprod. This could be replaced by factorial again as soon as
# older SciPy are not supported anymore but the cumprod alternative
# might also be a bit faster.
factorial_n = np.ones(n.shape, dtype=np.float64)
np.cumprod(n[1:], out=factorial_n[1:])
return 1. / (exp(-B) * np.sum(np.power(B, n) / factorial_n))
# The parameters of eqn8 do not vary between calls so we can calculate the
# result once and reuse it. The same is True for the factorial of N.
# eqn7 is called hundred times so "caching" these values yields a
# significant speedup (factor 10).
eqn8_res = eqn8(N, B)
factorial_N = float(math.factorial(N))
def eqn7(S, N, B):
SpB = S + B
return eqn8_res * (exp(-SpB) * SpB**N / factorial_N)
def eqn9_left(S_min, S_max, N, B):
return quad(eqn7, S_min, S_max, args=(N, B), limit=500)
def find_s_min(S_max, N, B):
'''
Kraft, Burrows and Nousek suggest to integrate from N-B in both
directions at once, so that S_min and S_max move similarly (see
the article for details). Here, this is implemented differently:
Treat S_max as the optimization parameters in func and then
calculate the matching s_min that has has eqn7(S_max) =
eqn7(S_min) here.
'''
y_S_max = eqn7(S_max, N, B)
if eqn7(0, N, B) >= y_S_max:
return 0.
else:
return brentq(lambda x: eqn7(x, N, B) - y_S_max, 0, N - B)
def func(s):
s_min = find_s_min(s, N, B)
out = eqn9_left(s_min, s, N, B)
return out[0] - CL
S_max = brentq(func, N - B, 100)
S_min = find_s_min(S_max, N, B)
return S_min, S_max
def _mpmath_kraft_burrows_nousek(N, B, CL):
'''Upper limit on a poisson count rate
The implementation is based on Kraft, Burrows and Nousek in
`ApJ 374, 344 (1991) <http://adsabs.harvard.edu/abs/1991ApJ...374..344K>`_.
The XMM-Newton upper limit server used the same formalism.
Parameters
----------
N : int
Total observed count number
B : float
Background count rate (assumed to be known with negligible error
from a large background area).
CL : float
Confidence level (number between 0 and 1)
Returns
-------
S : source count limit
Notes
-----
Requires the `mpmath <http://mpmath.org/>`_ library. See
`~astropy.stats.scipy_poisson_upper_limit` for an implementation
that is based on scipy and evaluates faster, but runs only to about
N = 100.
'''
from mpmath import mpf, factorial, findroot, fsum, power, exp, quad
N = mpf(N)
B = mpf(B)
CL = mpf(CL)
def eqn8(N, B):
sumterms = [power(B, n) / factorial(n) for n in range(int(N) + 1)]
return 1. / (exp(-B) * fsum(sumterms))
eqn8_res = eqn8(N, B)
factorial_N = factorial(N)
def eqn7(S, N, B):
SpB = S + B
return eqn8_res * (exp(-SpB) * SpB**N / factorial_N)
def eqn9_left(S_min, S_max, N, B):
def eqn7NB(S):
return eqn7(S, N, B)
return quad(eqn7NB, [S_min, S_max])
def find_s_min(S_max, N, B):
'''
Kraft, Burrows and Nousek suggest to integrate from N-B in both
directions at once, so that S_min and S_max move similarly (see
the article for details). Here, this is implemented differently:
Treat S_max as the optimization parameters in func and then
calculate the matching s_min that has has eqn7(S_max) =
eqn7(S_min) here.
'''
y_S_max = eqn7(S_max, N, B)
if eqn7(0, N, B) >= y_S_max:
return 0.
else:
def eqn7ysmax(x):
return eqn7(x, N, B) - y_S_max
return findroot(eqn7ysmax, (N - B) / 2.)
def func(s):
s_min = find_s_min(s, N, B)
out = eqn9_left(s_min, s, N, B)
return out - CL
S_max = findroot(func, N - B, tol=1e-4)
S_min = find_s_min(S_max, N, B)
return float(S_min), float(S_max)
def _kraft_burrows_nousek(N, B, CL):
'''Upper limit on a poisson count rate
The implementation is based on Kraft, Burrows and Nousek in
`ApJ 374, 344 (1991) <http://adsabs.harvard.edu/abs/1991ApJ...374..344K>`_.
The XMM-Newton upper limit server used the same formalism.
Parameters
----------
N : int
Total observed count number
B : float
Background count rate (assumed to be known with negligible error
from a large background area).
CL : float
Confidence level (number between 0 and 1)
Returns
-------
S : source count limit
Notes
-----
This functions has an optional dependency: Either `scipy` or `mpmath
<http://mpmath.org/>`_ need to be available. (Scipy only works for
N < 100).
'''
try:
import scipy # noqa
HAS_SCIPY = True
except ImportError:
HAS_SCIPY = False
try:
import mpmath # noqa
HAS_MPMATH = True
except ImportError:
HAS_MPMATH = False
if HAS_SCIPY and N <= 100:
try:
return _scipy_kraft_burrows_nousek(N, B, CL)
except OverflowError:
if not HAS_MPMATH:
raise ValueError('Need mpmath package for input numbers this '
'large.')
if HAS_MPMATH:
return _mpmath_kraft_burrows_nousek(N, B, CL)
raise ImportError('Either scipy or mpmath are required.')
def kuiper_false_positive_probability(D, N):
"""Compute the false positive probability for the Kuiper statistic.
Uses the set of four formulas described in Paltani 2004; they report
the resulting function never underestimates the false positive
probability but can be a bit high in the N=40..50 range.
(They quote a factor 1.5 at the 1e-7 level.)
Parameters
----------
D : float
The Kuiper test score.
N : float
The effective sample size.
Returns
-------
fpp : float
The probability of a score this large arising from the null hypothesis.
Notes
-----
Eq 7 of Paltani 2004 appears to incorrectly quote the original formula
(Stephens 1965). This function implements the original formula, as it
produces a result closer to Monte Carlo simulations.
References
----------
.. [1] Paltani, S., "Searching for periods in X-ray observations using
Kuiper's test. Application to the ROSAT PSPC archive",
Astronomy and Astrophysics, v.240, p.789-790, 2004.
.. [2] Stephens, M. A., "The goodness-of-fit statistic VN: distribution
and significance points", Biometrika, v.52, p.309, 1965.
"""
try:
from scipy.special import factorial, comb
except ImportError:
# Retained for backwards compatibility with older versions of scipy
# (factorial appears to have moved here in 0.14)
from scipy.misc import factorial, comb
if D < 0. or D > 2.:
raise ValueError("Must have 0<=D<=2 by definition of the Kuiper test")
if D < 2. / N:
return 1. - factorial(N) * (D - 1. / N)**(N - 1)
elif D < 3. / N:
k = -(N * D - 1.) / 2.
r = np.sqrt(k**2 - (N * D - 2.)**2 / 2.)
a, b = -k + r, -k - r
return 1 - (factorial(N - 1) * (b**(N - 1) * (1 - a) - a**(N - 1) * (1 - b))
/ N**(N - 2) / (b - a))
elif (D > 0.5 and N % 2 == 0) or (D > (N - 1.) / (2. * N) and N % 2 == 1):
# NOTE: the upper limit of this sum is taken from Stephens 1965
t = np.arange(np.floor(N * (1 - D)) + 1)
y = D + t / N
Tt = y**(t - 3) * (y**3 * N
- y**2 * t * (3 - 2 / N)
+ y * t * (t - 1) * (3 - 2 / N) / N
- t * (t - 1) * (t - 2) / N**2)
term = Tt * comb(N, t) * (1 - D - t / N)**(N - t - 1)
return term.sum()
else:
z = D * np.sqrt(N)
# When m*z>18.82 (sqrt(-log(finfo(double))/2)), exp(-2m**2z**2)
# underflows. Cutting off just before avoids triggering a (pointless)
# underflow warning if `under="warn"`.
ms = np.arange(1, 18.82 / z)
S1 = (2 * (4 * ms**2 * z**2 - 1) * np.exp(-2 * ms**2 * z**2)).sum()
S2 = (ms**2 * (4 * ms**2 * z**2 - 3) * np.exp(-2 * ms**2 * z**2)).sum()
return S1 - 8 * D / 3 * S2
def kuiper(data, cdf=lambda x: x, args=()):
"""Compute the Kuiper statistic.
Use the Kuiper statistic version of the Kolmogorov-Smirnov test to
find the probability that a sample like ``data`` was drawn from the
distribution whose CDF is given as ``cdf``.
.. warning::
This will not work correctly for distributions that are actually
discrete (Poisson, for example).
Parameters
----------
data : array-like
The data values.
cdf : callable
A callable to evaluate the CDF of the distribution being tested
against. Will be called with a vector of all values at once.
The default is a uniform distribution.
args : list-like, optional
Additional arguments to be supplied to cdf.
Returns
-------
D : float
The raw statistic.
fpp : float
The probability of a D this large arising with a sample drawn from
the distribution whose CDF is cdf.
Notes
-----
The Kuiper statistic resembles the Kolmogorov-Smirnov test in that
it is nonparametric and invariant under reparameterizations of the data.
The Kuiper statistic, in addition, is equally sensitive throughout
the domain, and it is also invariant under cyclic permutations (making
it particularly appropriate for analyzing circular data).
Returns (D, fpp), where D is the Kuiper D number and fpp is the
probability that a value as large as D would occur if data was
drawn from cdf.
.. warning::
The fpp is calculated only approximately, and it can be
as much as 1.5 times the true value.
Stephens 1970 claims this is more effective than the KS at detecting
changes in the variance of a distribution; the KS is (he claims) more
sensitive at detecting changes in the mean.
If cdf was obtained from data by fitting, then fpp is not correct and
it will be necessary to do Monte Carlo simulations to interpret D.
D should normally be independent of the shape of CDF.
References
----------
.. [1] Stephens, M. A., "Use of the Kolmogorov-Smirnov, Cramer-Von Mises
and Related Statistics Without Extensive Tables", Journal of the
Royal Statistical Society. Series B (Methodological), Vol. 32,
No. 1. (1970), pp. 115-122.
"""
data = np.sort(data)
cdfv = cdf(data, *args)
N = len(data)
D = (np.amax(cdfv - np.arange(N) / float(N)) +
np.amax((np.arange(N) + 1) / float(N) - cdfv))
return D, kuiper_false_positive_probability(D, N)
def kuiper_two(data1, data2):
"""Compute the Kuiper statistic to compare two samples.
Parameters
----------
data1 : array-like
The first set of data values.
data2 : array-like
The second set of data values.
Returns
-------
D : float
The raw test statistic.
fpp : float
The probability of obtaining two samples this different from
the same distribution.
.. warning::
The fpp is quite approximate, especially for small samples.
"""
data1 = np.sort(data1)
data2 = np.sort(data2)
n1, = data1.shape
n2, = data2.shape
common_type = np.find_common_type([], [data1.dtype, data2.dtype])
if not (np.issubdtype(common_type, np.number)
and not np.issubdtype(common_type, np.complexfloating)):
raise ValueError('kuiper_two only accepts real inputs')
# nans, if any, are at the end after sorting.
if np.isnan(data1[-1]) or np.isnan(data2[-1]):
raise ValueError('kuiper_two only accepts non-nan inputs')
D = _stats.ks_2samp(np.asarray(data1, common_type),
np.asarray(data2, common_type))
Ne = len(data1) * len(data2) / float(len(data1) + len(data2))
return D, kuiper_false_positive_probability(D, Ne)
def fold_intervals(intervals):
"""Fold the weighted intervals to the interval (0,1).
Convert a list of intervals (ai, bi, wi) to a list of non-overlapping
intervals covering (0,1). Each output interval has a weight equal
to the sum of the wis of all the intervals that include it. All intervals
are interpreted modulo 1, and weights are accumulated counting
multiplicity. This is appropriate, for example, if you have one or more
blocks of observation and you want to determine how much observation
time was spent on different parts of a system's orbit (the blocks
should be converted to units of the orbital period first).
Parameters
----------
intervals : list of three-element tuples (ai,bi,wi)
The intervals to fold; ai and bi are the limits of the interval, and
wi is the weight to apply to the interval.
Returns
-------
breaks : array of floats length N
The endpoints of a set of intervals covering [0,1]; breaks[0]=0 and
breaks[-1] = 1
weights : array of floats of length N-1
The ith element is the sum of number of times the interval
breaks[i],breaks[i+1] is included in each interval times the weight
associated with that interval.
"""
r = []
breaks = set()
tot = 0
for (a, b, wt) in intervals:
tot += (np.ceil(b) - np.floor(a)) * wt
fa = a % 1
breaks.add(fa)
r.append((0, fa, -wt))
fb = b % 1
breaks.add(fb)
r.append((fb, 1, -wt))
breaks.add(0.)
breaks.add(1.)
breaks = sorted(breaks)
breaks_map = dict([(f, i) for (i, f) in enumerate(breaks)])
totals = np.zeros(len(breaks) - 1)
totals += tot
for (a, b, wt) in r:
totals[breaks_map[a]:breaks_map[b]] += wt
return np.array(breaks), totals
def cdf_from_intervals(breaks, totals):
"""Construct a callable piecewise-linear CDF from a pair of arrays.
Take a pair of arrays in the format returned by fold_intervals and
make a callable cumulative distribution function on the interval
(0,1).
Parameters
----------
breaks : array of floats of length N
The boundaries of successive intervals.
totals : array of floats of length N-1
The weight for each interval.
Returns
-------
f : callable
A cumulative distribution function corresponding to the
piecewise-constant probability distribution given by breaks, weights
"""
if breaks[0] != 0 or breaks[-1] != 1:
raise ValueError("Intervals must be restricted to [0,1]")
if np.any(np.diff(breaks) <= 0):
raise ValueError("Breaks must be strictly increasing")
if np.any(totals < 0):
raise ValueError(
"Total weights in each subinterval must be nonnegative")
if np.all(totals == 0):
raise ValueError("At least one interval must have positive exposure")
b = breaks.copy()
c = np.concatenate(((0,), np.cumsum(totals * np.diff(b))))
c /= c[-1]
return lambda x: np.interp(x, b, c, 0, 1)
def interval_overlap_length(i1, i2):
"""Compute the length of overlap of two intervals.
Parameters
----------
i1, i2 : pairs of two floats
The two intervals.
Returns
-------
l : float
The length of the overlap between the two intervals.
"""
(a, b) = i1
(c, d) = i2
if a < c:
if b < c:
return 0.
elif b < d:
return b - c
else:
return d - c
elif a < d:
if b < d:
return b - a
else:
return d - a
else:
return 0
def histogram_intervals(n, breaks, totals):
"""Histogram of a piecewise-constant weight function.
This function takes a piecewise-constant weight function and
computes the average weight in each histogram bin.
Parameters
----------
n : int
The number of bins
breaks : array of floats of length N
Endpoints of the intervals in the PDF
totals : array of floats of length N-1
Probability densities in each bin
Returns
-------
h : array of floats
The average weight for each bin
"""
h = np.zeros(n)
start = breaks[0]
for i in range(len(totals)):
end = breaks[i + 1]
for j in range(n):
ol = interval_overlap_length((float(j) / n,
float(j + 1) / n), (start, end))
h[j] += ol / (1. / n) * totals[i]
start = end
return h
|
bsipocz/astropy
|
astropy/stats/funcs.py
|
Python
|
bsd-3-clause
| 58,579
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Openssh(AutotoolsPackage):
"""OpenSSH is the premier connectivity tool for remote login with the
SSH protocol. It encrypts all traffic to eliminate
eavesdropping, connection hijacking, and other attacks. In
addition, OpenSSH provides a large suite of secure tunneling
capabilities, several authentication methods, and sophisticated
configuration options.
"""
homepage = "https://www.openssh.com/"
url = "https://mirrors.sonic.net/pub/OpenBSD/OpenSSH/portable/openssh-7.5p1.tar.gz"
version('7.5p1', '652fdc7d8392f112bef11cacf7e69e23')
version('7.4p1', 'b2db2a83caf66a208bb78d6d287cdaa3')
version('7.3p1', 'dfadd9f035d38ce5d58a3bf130b86d08')
version('7.2p2', '13009a9156510d8f27e752659075cced')
version('7.1p2', '4d8547670e2a220d5ef805ad9e47acf2')
version('7.0p1', '831883f251ac34f0ab9c812acc24ee69')
version('6.9p1', '0b161c44fc31fbc6b76a6f8ae639f16f')
version('6.8p1', '08f72de6751acfbd0892b5f003922701')
version('6.7p1', '3246aa79317b1d23cae783a3bf8275d6')
version('6.6p1', '3e9800e6bca1fbac0eea4d41baa7f239')
depends_on('openssl')
depends_on('libedit')
depends_on('ncurses')
depends_on('zlib')
|
skosukhin/spack
|
var/spack/repos/builtin/packages/openssh/package.py
|
Python
|
lgpl-2.1
| 2,494
|
"""Tests for Oscapcontent
:Requirement: Oscapcontent
:CaseAutomation: Automated
:CaseLevel: Acceptance
:CaseComponent: SCAPPlugin
:Assignee: jpathan
:TestType: Functional
:CaseImportance: High
:Upstream: No
"""
import os
import pytest
from nailgun import entities
from robottelo import ssh
from robottelo.config import settings
from robottelo.constants import ANY_CONTEXT
from robottelo.datafactory import gen_string
@pytest.fixture(scope='module')
def oscap_content_path():
_, file_name = os.path.split(settings.oscap.content_path)
local_file = f"/tmp/{file_name}"
ssh.download_file(settings.oscap.content_path, local_file)
return local_file
@pytest.mark.tier1
@pytest.mark.upgrade
def test_positive_end_to_end(session, oscap_content_path):
"""Perform end to end testing for openscap content component
:id: 9870555d-0b60-41ab-a481-81d4d3f78fec
:Steps:
1. Create an openscap content.
2. Read values from created entity.
3. Update the openscap content with new name.
4. Delete openscap content
:expectedresults: All expected CRUD actions finished successfully
:CaseLevel: Integration
"""
title = gen_string('alpha')
new_title = gen_string('alpha')
org = entities.Organization().create()
loc = entities.Location().create()
with session:
session.oscapcontent.create(
{
'file_upload.title': title,
'file_upload.scap_file': oscap_content_path,
'organizations.resources.assigned': [org.name],
'locations.resources.assigned': [loc.name],
}
)
oscap_values = session.oscapcontent.read(title)
assert oscap_values['file_upload']['title'] == title
assert (
oscap_values['file_upload']['uploaded_scap_file']
== oscap_content_path.rsplit('/', 1)[-1]
)
assert org.name in oscap_values['organizations']['resources']['assigned']
assert loc.name in oscap_values['locations']['resources']['assigned']
session.oscapcontent.update(title, {'file_upload.title': new_title})
session.location.search('abc') # workaround for issue SatelliteQE/airgun#382.
assert session.oscapcontent.search(new_title)[0]['Title'] == new_title
session.location.search('abc')
assert not session.oscapcontent.search(title)
session.location.search('abc')
session.oscapcontent.delete(new_title)
session.location.search('abc')
assert not session.oscapcontent.search(new_title)
@pytest.mark.tier1
def test_negative_create_with_same_name(session, oscap_content_path):
"""Create OpenScap content with same name
:id: f5c6491d-b83c-4ca2-afdf-4bb93e6dd92b
:Steps:
1. Create an openscap content.
2. Provide all the appropriate parameters.
3. Create openscap content with same name
:expectedresults: Creating content for OpenScap is not successful.
:BZ: 1474172
:customerscenario: true
:CaseImportance: Critical
"""
content_name = gen_string('alpha')
with session:
session.organization.select(org_name=ANY_CONTEXT['org'])
session.location.select(loc_name=ANY_CONTEXT['location'])
session.oscapcontent.create(
{'file_upload.title': content_name, 'file_upload.scap_file': oscap_content_path}
)
assert session.oscapcontent.search(content_name)[0]['Title'] == content_name
with pytest.raises(AssertionError) as context:
session.oscapcontent.create(
{'file_upload.title': content_name, 'file_upload.scap_file': oscap_content_path}
)
assert 'has already been taken' in str(context.value)
|
jyejare/robottelo
|
tests/foreman/ui/test_oscapcontent.py
|
Python
|
gpl-3.0
| 3,766
|
import codecs
import os
import os.path
from os.path import join
from setuptools import setup
import cffi_build.cffi_build as cffi_build
def read(rel_path):
here = os.path.abspath(os.path.dirname(__file__))
with codecs.open(os.path.join(here, rel_path), 'r') as fp:
return fp.read()
def get_version(rel_path):
for line in read(rel_path).splitlines():
if line.startswith('__version__'):
delim = '"' if '"' in line else "'"
return line.split(delim)[1]
else:
raise RuntimeError("Unable to find version string.")
# Temp fix for CoppeliaSim 4.1
if 'COPPELIASIM_ROOT' not in os.environ:
raise RuntimeError('COPPELIASIM_ROOT not defined.')
usrset_file = os.path.join(os.environ['COPPELIASIM_ROOT'], 'system', 'usrset.txt')
usrset = ''
if os.path.isfile(usrset_file):
with open(usrset_file, 'r') as f:
usrset = f.read()
if 'allowOldEduRelease' not in usrset:
with open(usrset_file, 'a+') as f:
f.write('\nallowOldEduRelease=7501\n')
setup(name='PyRep',
# Version A.B.C.D.
# A.B.C info corresponds to the CoppeliaSim version needed.
# D info corresponds to the PyRep version.
version=get_version("pyrep/__init__.py"),
description='Python CoppeliaSim wrapper',
author='Stephen James',
author_email='slj12@ic.ac.uk',
url='https://www.doc.ic.ac.uk/~slj12',
packages=['pyrep',
'pyrep.backend',
'pyrep.objects',
'pyrep.sensors',
'pyrep.robots',
'pyrep.robots.arms',
'pyrep.robots.end_effectors',
'pyrep.robots.mobiles',
'pyrep.robots.configuration_paths',
'pyrep.textures',
'pyrep.misc',
],
ext_modules=[cffi_build.ffibuilder.distutils_extension(
join('build', 'pyrep', 'backend'))],
)
|
stepjam/PyRep
|
setup.py
|
Python
|
mit
| 1,927
|
'''
Author: Giggle Leo
Date : 8 September 2014
Description : physics library
'''
from numpy import *
from numpy.linalg import *
from matplotlib.pyplot import *
import scipy.sparse as sps
import pdb,time
import cPickle as pickle
__all__=['sx','sy','sz','s','s1x','s1y','s1z','s1','fermi','s2vec','vec2s',
'ind2c','c2ind','perm_parity','bcast_dot','quicksave','quickload',
'inherit_docstring_from']
############################ DEFINITIONS ##############################
# pauli spin
sx = array([[0, 1],[ 1, 0]])
sy = array([[0, -1j],[1j, 0]])
sz = array([[1, 0],[0, -1]])
s=[identity(2),sx,sy,sz]
# spin 1 matrices.
s1x=array([[0,1,0],[1,0,1],[0,1,0]])/sqrt(2)
s1y=array([[0,-1j,0],[1j,0,-1j],[0,1j,0]])/sqrt(2)
s1z=array([[1,0,0],[0,0,0],[0,0,-1]])
s1=[identity(3),s1x,s1y,s1z]
############################ FUNCTIONS ##############################
def bcast_dot(A,B):
'''
dot product broadcast version.
'''
return einsum('...jk,...kl->...jl', A, B)
def fermi(elist,T=0):
'''
Fermi statistics, python implimentation.
Parameters:
:elist: float/ndarray, the energy.
:T: float, the temperature.
Return:
float/ndarray, Fermionic disctribution.
'''
elist=asarray(elist)
if T<0.:
raise ValueError('Negative temperature is not allowed!')
elif T==0:
if ndim(elist)!=0:
f=zeros(elist.shape,dtype='float64')
f[elist<0]=1.
f[elist==0]=0.5
return f
else:
if elist>0:
return 0.
elif elist==0:
return 0.5
else:
return 1.
else:
f=1./(1.+exp(-abs(elist)/T))
if ndim(elist)!=0:
posmask=elist>0
f[posmask]=1.-f[posmask]
elif elist>0:
f=1.-f
return f
def s2vec(s):
'''
Transform a spin to a 4 dimensional vector, corresponding to s0,sx,sy,sz component.
s:
the spin.
'''
res=array([trace(s),trace(dot(sx,s)),trace(dot(sy,s)),trace(dot(sz,s))])/2
return res
def vec2s(n):
'''
Transform a vector of length 3 or 4 to a pauli matrix.
n:
a 1-D array of length 3 or 4 to specify the `direction` of spin.
*return*:
2 x 2 matrix.
'''
if len(n)<=3:
res=zeros([2,2],dtype='complex128')
for i in xrange(len(n)):
res+=s[i+1]*n[i]
return res
elif len(n)==4:
return identity(2)*n[0]+sx*n[1]+sy*n[2]+sz*n[3]
else:
raise Exception('length of vector %s too large.'%len(n))
def c2ind(c,N):
'''
Get the index of the total space N from the index of the subspace exression (n1 x n2 x n3...)
Parameters:
:c: 1D array/2D array, a list of indexes like [i,j,k,...]
:N: 1D array, the space config [n1,n2,n3...].
Return:
integer/1D array, indices.
'''
assert(shape(c)[-1]==len(N))
c=array(c)
n=c.shape[-1]
cc=c[...,0]
for i in xrange(n-1):
cc=cc*N[i+1]+c[...,i+1]
return cc
def ind2c(ind,N):
'''
Trun global index into sub-indices.
Parameters:
:ind: integer, the index of total space.
:N: 1D array, the space config [n1,n2,n3...].
Return:
1D array, the subindices.
'''
dim=len(N)
indl=ndarray(list(shape(ind))+[dim],dtype='int32')
for i in xrange(dim):
indl[...,-1-i]=ind%N[-1-i]
ind=ind/N[-1-i]
return indl
def perm_parity(perm):
"""
Returns the parity of the perm(0 or 1).
"""
size=len(perm)
unchecked=ones(size,dtype='bool')
#c counts the number of cycles in the perm including 1 cycles
c=0
for j in xrange(size):
if unchecked[j]:
c=c+1
unchecked[j]=False
i=j
while perm[i]!=j:
i=perm[i]
unchecked[i]=False
return (size-c)%2
def quicksave(filename,obj):
'''Save an instance.'''
f=open(filename,'wb')
pickle.dump(obj,f,2)
f.close()
def quickload(filename):
'''Load an instance.'''
f=open(filename,'rb')
obj=pickle.load(f)
f.close()
return obj
def inherit_docstring_from(cls):
def docstring_inheriting_decorator(fn):
fn.__doc__ = getattr(cls, fn.__name__).__doc__
return fn
return docstring_inheriting_decorator
|
GiggleLiu/tba
|
hgen/utils.py
|
Python
|
gpl-2.0
| 4,377
|
# -*- coding:utf-8 -*-
#定义函数
def add(a, b):
print "ADDING %d + %d" % (a, b)
return a + b
def subtract(a, b):
print "SUBTRACTING %d - %d" % (a, b)
return a - b
def multiply(a, b):
print "MULTIPLYING %d * %d" % (a, b)
return a * b
def divide(a, b):
print "DIVIDING %d / %d\n" % (a, b)
return a / b
print "Let's do some math with just functions!i\n"
#计算并返回最后结果
age = add(30, 5)
height = subtract(78, 4)
weight = multiply(90, 2)
iq = divide(100, 2)
#输出最后计算结果
print "Age: %d, Height: %d, Weight: %d, IQ: %d\n" % (age, height, weight, iq)
# A puzzle for the extra credit, type it in anyway.
print "Here is a puzzle."
#函数嵌套调用
what = add(age, subtract(height, multiply(weight, divide(iq, 2))))
print "That becomes: ", what, "Can you do it by hand?"
|
Mbdn/Python-Beginner
|
ex21.py
|
Python
|
mit
| 841
|
"""Helper functions for tests using REST API."""
from collections import namedtuple
import pytest
from cfme.exceptions import OptionNotAvailable
from cfme.utils.wait import wait_for
def assert_response(
rest_obj, success=None, http_status=None, results_num=None, task_wait=600):
"""
Asserts that the response HTTP status code and content is as expected.
If specific http_status is not given, we simply check that the status was a
successful response code via requests.Response.__bool__()
If response status code is '204', ensures there is no content.
Example of verifying a success response:
.. code-block:: python
assert_response(appliance)
Example of verifying a failure response:
.. code-block:: python
with error.expected('ActiveRecord::RecordNotFound'):
collection.action.delete(some_stuff)
assert_response(appliance, http_status=404)
Note: For below args, 'results' refers to rest_obj.last_response.json()['results']
Args:
rest_obj -- instance of cfme.utils.Appliance (with rest_api attr defined)
or cfme.utils.appliance.MiqApi
success -- if defined, checks each result in results to ensure that result['success']
is equal to the value defined here
http_status (int or tuple of int) -- expected http response status codes, if None,
we simply verify that the response was a success
results_num (int) -- specifies expected number of results
task_wait (int) -- if any result in results contains a 'task_id', this method will polls
the API to ensure that task has moved to 'finished' and wait 'task_wait' seconds for
that state change to occur
"""
# check if `rest_obj` is an object with attribute referencing rest_api instance
rest_api = rest_obj.rest_api if hasattr(rest_obj, 'rest_api') else rest_obj
last_response = rest_api.response
if http_status:
# Convert single int to tuple if needed
if isinstance(http_status, int):
http_status = (http_status,)
# Check the response codes
assert last_response.status_code in http_status,\
'The status code {} doesn\'t match the expected status code {}'.format(
last_response.status_code, http_status)
else:
# No specific status_code specified, simply check if response was a success
assert last_response, 'The request failed with {}'.format(last_response.status_code)
try:
content = last_response.json()
except Exception:
if last_response.status_code == 204:
# 204 == No Content: check that message-body is empty and return
assert not last_response.text.strip(), 'No content expected'
return
else:
raise AssertionError('No JSON content returned')
def _check_result(result):
# check that result contains data to catch bugs like BZ 1414845
assert result, 'The result should not be empty'
if success is not None:
assert 'success' in result
assert result['success'] is success
elif 'success' in result and last_response:
# expect True if 'success' is present and HTTP status is success
assert result['success'], 'The response "success" is {}'.format(result['success'])
# if the request succeeded and there is a 'task_id' present in the response,
# check the corresponding resource in /api/task/:task_id
if task_wait and 'task_id' in result and result.get('success') and last_response:
task = rest_api.get_entity('tasks', result['task_id'])
task.wait_exists(num_sec=5)
wait_for(
lambda: task.state.lower() == 'finished',
fail_func=task.reload,
num_sec=task_wait,
message='task state finished',
)
task_message = getattr(task, 'message', '')
assert task.status.lower() == 'ok', (
'Task failed with status "{}", message "{}"'.format(task.status, task_message))
if 'results' in content:
results = content['results']
results_len = len(results)
if results_num is not None:
assert results_len == results_num,\
'The number of results {} doesn\'t match the expected number {}'.format(
results_len, results_num)
for result in results:
_check_result(result)
else:
_check_result(content)
# preserve the original response
rest_api.response = last_response
def get_vms_in_service(service):
"""Gets list of vm entities associated with the service."""
rest_api = service.collection._api
service.vms.reload()
# return entities under /api/vms, not under /api/services/:id/vms subcollection
# where "actions" are not available
return [rest_api.get_entity('vms', vm['id']) for vm in service.vms.all]
def create_resource(rest_api, col_name, col_data, col_action='create', substr_search=False):
"""Creates new resource in collection."""
collection = getattr(rest_api.collections, col_name)
try:
action = getattr(collection.action, col_action)
except AttributeError:
raise OptionNotAvailable(
"Action `{}` for {} is not implemented in this version".format(col_action, col_name))
entities = action(*col_data)
action_response = rest_api.response
search_str = '%{}%' if substr_search else '{}'
for entity in col_data:
if entity.get('name'):
wait_for(lambda: collection.find_by(
name=search_str.format(entity.get('name'))) or False, num_sec=180, delay=10)
elif entity.get('description'):
wait_for(lambda: collection.find_by(
description=search_str.format(entity.get('description'))) or False,
num_sec=180, delay=10)
else:
raise NotImplementedError
# make sure action response is preserved
rest_api.response = action_response
return entities
def delete_resources_from_collection(
resources, collection=None, not_found=None, num_sec=10, delay=2, check_response=True):
"""Checks that delete from collection works as expected."""
collection = collection or resources[0].collection
def _assert_response(*args, **kwargs):
if check_response:
assert_response(collection._api, *args, **kwargs)
collection.action.delete(*resources)
_assert_response()
for resource in resources:
resource.wait_not_exists(num_sec=num_sec, delay=delay)
if not_found:
with pytest.raises(Exception, match='ActiveRecord::RecordNotFound'):
collection.action.delete(*resources)
_assert_response(http_status=404)
else:
collection.action.delete(*resources)
_assert_response(success=False)
def delete_resources_from_detail(
resources, method='POST', num_sec=10, delay=2, check_response=True):
"""Checks that delete from detail works as expected."""
method = method.upper()
rest_api = resources[0].collection._api
def _assert_response(*args, **kwargs):
if check_response:
assert_response(rest_api, *args, **kwargs)
for resource in resources:
getattr(resource.action.delete, method)()
_assert_response()
# Wait for resource non-existence in separate loop so the delete actions are
# not delayed by waiting for the previously deleted resource to disappear.
# This way the combined wait time is likely to be much shorter.
for resource in resources:
resource.wait_not_exists(num_sec=num_sec, delay=delay)
with pytest.raises(Exception, match='ActiveRecord::RecordNotFound'):
getattr(resource.action.delete, method)()
_assert_response(http_status=404)
def query_resource_attributes(resource, soft_assert=None):
"""Checks that all available attributes/subcollections are really accessible."""
collection = resource.collection
rest_api = collection._api
options = rest_api.options(collection._href)
attrs_to_query = options['virtual_attributes'] + options['relationships']
subcolls_to_check = options['subcollections']
FailedRecord = namedtuple('FailedRecord', ['name', 'type', 'error', 'response'])
service_href = resource.href
failed = []
missing = []
for attr in attrs_to_query:
try:
response = rest_api.get('{}?attributes={}'.format(service_href, attr))
assert rest_api.response, 'Failed response'
except Exception as err:
failed.append(FailedRecord(attr, 'attribute', err, rest_api.response))
continue
if attr not in response:
missing.append(attr)
for subcol in subcolls_to_check:
try:
if subcol == 'metric_rollups':
response = rest_api.get(
"{}/{}?capture_interval=hourly&start_date=2019-01-01".format(
service_href, subcol))
assert rest_api.response, "Failed response"
continue
subcol_rest = getattr(resource, subcol)
subcol_rest.reload()
except Exception as err:
failed.append(FailedRecord(subcol, 'subcollection', err, rest_api.response))
outcome = namedtuple('AttrCheck', ['failed', 'missing'])(failed, missing)
if soft_assert:
for failure in outcome.failed:
soft_assert(False, '{0} "{1}": status: {2}, error: `{3}`'.format(
failure.type, failure.name, failure.response.status_code, failure.error))
return outcome
|
izapolsk/integration_tests
|
cfme/utils/rest.py
|
Python
|
gpl-2.0
| 9,765
|
import os
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, "README.txt")) as f:
README = f.read()
with open(os.path.join(here, "CHANGES.txt")) as f:
CHANGES = f.read()
requires = []
tests_require = [
"WebTest >= 1.3.1", # py3 compat
"pytest",
"pytest-cov",
]
setup(
name="pcaplugin",
version="0.0",
description="PCA Plugin example",
long_description=README + "\n\n" + CHANGES,
classifiers=[
"Programming Language :: Python",
"Framework :: Pyramid",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: WSGI :: Application",
],
author="",
author_email="",
url="",
keywords="web pyramid pylons",
packages=find_packages(),
include_package_data=True,
zip_safe=False,
extras_require={"testing": tests_require,},
install_requires=requires,
entry_points={
"pcaexample.plugins": ["examplePlugin = pcaplugin.plugin:PCAExamplePlugin",],
},
)
|
qlands/pyramid-pca-jinja2
|
pcaplugin/setup.py
|
Python
|
agpl-3.0
| 1,054
|
# Copyright (C) 2010-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# This tests the scafacos p2nfft dipolar calculations by matching against
# reference data from direct summation. In 2d, reference data from the mdlc
# test case is used
import numpy as np
import unittest as ut
import unittest_decorators as utx
import espressomd
import espressomd.magnetostatics as magnetostatics
from tests_common import abspath
@utx.skipIfMissingFeatures(["SCAFACOS_DIPOLES"])
class Scafacos1d2d(ut.TestCase):
system = espressomd.System(box_l=[1.0, 1.0, 1.0])
system.time_step = 0.01
system.cell_system.skin = 0.5
system.periodicity = [1, 1, 1]
def tearDown(self):
self.system.part.clear()
self.system.actors.clear()
self.system.periodicity = [1, 1, 1]
def vector_error(self, a, b):
return np.sum(np.linalg.norm(a - b, axis=1)) / np.sqrt(a.shape[0])
def test_scafacos(self):
s = self.system
rho = 0.3
# This is only for box size calculation. The actual particle number is
# lower, because particles are removed from the mdlc gap region
n_particle = 100
particle_radius = 0.5
box_l = np.cbrt(4 * n_particle * np.pi / (3 * rho)) * particle_radius
s.box_l = 3 * [box_l]
for dim in (2, 1):
print("Dimension", dim)
# Read reference data
if dim == 2:
file_prefix = "data/mdlc"
s.periodicity = [1, 1, 0]
else:
s.periodicity = [1, 0, 0]
file_prefix = "data/scafacos_dipoles_1d"
ref_E_path = abspath(file_prefix + "_reference_data_energy.dat")
ref_E = float(np.genfromtxt(ref_E_path))
# Particles
data = np.genfromtxt(abspath(
file_prefix + "_reference_data_forces_torques.dat"))
s.part.add(pos=data[:, 1:4], dip=data[:, 4:7])
s.part[:].rotation = (1, 1, 1)
if dim == 2:
scafacos = magnetostatics.Scafacos(
prefactor=1,
method_name="p2nfft",
method_params={
"p2nfft_verbose_tuning": 0,
"pnfft_N": "80,80,160",
"pnfft_window_name": "bspline",
"pnfft_m": "4",
"p2nfft_ignore_tolerance": "1",
"pnfft_diff_ik": "0",
"p2nfft_r_cut": "6",
"p2nfft_alpha": "0.8",
"p2nfft_epsB": "0.05"})
s.actors.add(scafacos)
# change box geometry in x,y direction to ensure that
# scafacos survives it
s.box_l = np.array((1, 1, 1.3)) * box_l
else:
# 1d periodic in x
scafacos = magnetostatics.Scafacos(
prefactor=1,
method_name="p2nfft",
method_params={
"p2nfft_verbose_tuning": 1,
"pnfft_N": "32,128,128",
"pnfft_direct": 0,
"p2nfft_r_cut": 2.855,
"p2nfft_alpha": "1.5",
"p2nfft_intpol_order": "-1",
"p2nfft_reg_kernel_name": "ewald",
"p2nfft_p": "16",
"p2nfft_ignore_tolerance": "1",
"pnfft_window_name": "bspline",
"pnfft_m": "8",
"pnfft_diff_ik": "1",
"p2nfft_epsB": "0.125"})
s.box_l = np.array((1, 1, 1)) * box_l
s.actors.add(scafacos)
s.integrator.run(0)
# Calculate errors
err_f = self.vector_error(s.part[:].f, data[:, 7:10])
err_t = self.vector_error(s.part[:].torque_lab, data[:, 10:13])
err_e = s.analysis.energy()["dipolar"] - ref_E
tol_f = 2E-3
tol_t = 2E-3
tol_e = 1E-3
self.assertLessEqual(
abs(err_e), tol_e, "Energy difference too large")
self.assertLessEqual(
abs(err_t), tol_t, "Torque difference too large")
self.assertLessEqual(
abs(err_f), tol_f, "Force difference too large")
s.part.clear()
s.actors.clear()
if __name__ == "__main__":
ut.main()
|
fweik/espresso
|
testsuite/python/scafacos_dipoles_1d_2d.py
|
Python
|
gpl-3.0
| 5,148
|
from Exercise import *
class Metric( Exercise ):
"""Metric is just another sort of exercise"""
def __init__( self, description_dict ):
super( Metric, self ).__init__( description_dict )
@classmethod
def init_from_json( cls, dict_from_json ):
metric = cls( dict_from_json )
return metric
|
noooway/exj
|
Metric.py
|
Python
|
mit
| 329
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" P1 tests for Account
"""
# Import Local Modules
from marvin.cloudstackTestCase import cloudstackTestCase
from marvin.lib.utils import (random_gen,
cleanup_resources,
validateList)
from marvin.cloudstackAPI import *
from marvin.lib.base import (Domain,
Account,
ServiceOffering,
VirtualMachine,
Network,
User,
NATRule,
Template,
PublicIPAddress, Role)
from marvin.lib.common import (get_domain,
get_zone,
get_test_template,
list_accounts,
list_virtual_machines,
list_service_offering,
list_templates,
list_users,
get_builtin_template_info,
wait_for_cleanup)
from nose.plugins.attrib import attr
from marvin.cloudstackException import CloudstackAPIException
from marvin.codes import PASS
import time
from pyVmomi.VmomiSupport import GetVersionFromVersionUri
class Services:
"""Test Account Services
"""
def __init__(self):
self.services = {
"domain": {
"name": "Domain",
},
"account": {
"email": "test@test.com",
"firstname": "Test",
"lastname": "User",
"username": "test",
# Random characters are appended for unique
# username
"password": "fr3sca",
},
"role": {
"name": "MarvinFake Role",
"type": "User",
"description": "Fake Role created by Marvin test"
},
"user": {
"email": "user@test.com",
"firstname": "User",
"lastname": "User",
"username": "User",
# Random characters are appended for unique
# username
"password": "fr3sca",
},
"service_offering": {
"name": "Tiny Instance",
"displaytext": "Tiny Instance",
"cpunumber": 1,
"cpuspeed": 100,
# in MHz
"memory": 128,
# In MBs
},
"virtual_machine": {
"displayname": "Test VM",
"username": "root",
"password": "password",
"ssh_port": 22,
"hypervisor": 'XenServer',
# Hypervisor type should be same as
# hypervisor type of cluster
"privateport": 22,
"publicport": 22,
"protocol": 'TCP',
},
"template": {
"displaytext": "Public Template",
"name": "Public template",
"ostype": 'CentOS 5.6 (64-bit)',
"url": "",
"hypervisor": '',
"format": '',
"isfeatured": True,
"ispublic": True,
"isextractable": True,
"templatefilter": "self"
},
"natrule": {
"publicport": 22,
"privateport": 22,
"protocol": 'TCP',
},
"ostype": 'CentOS 5.6 (64-bit)',
"sleep": 60,
"timeout": 10,
}
class TestAccounts(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.testClient = super(TestAccounts, cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
cls.services = Services().services
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
cls.hypervisor = cls.testClient.getHypervisorInfo()
cls.services['mode'] = cls.zone.networktype
cls.template = get_test_template(
cls.api_client,
cls.zone.id,
cls.hypervisor
)
cls.services["virtual_machine"]["zoneid"] = cls.zone.id
cls.services["virtual_machine"]["template"] = cls.template.id
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offering"]
)
cls._cleanup = [cls.service_offering]
return
@classmethod
def tearDownClass(cls):
try:
# Cleanup resources used
cleanup_resources(cls.api_client, reversed(cls._cleanup))
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
return
def tearDown(self):
try:
# Clean up, terminate the created accounts, domains etc
cleanup_resources(self.apiclient, reversed(self.cleanup))
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(
tags=[
"advanced",
"basic",
"eip",
"advancedns",
"sg"],
required_hardware="false")
def test_01_create_account(self):
"""Test Create Account and user for that account
"""
# Validate the following
# 1. Create an Account. Verify the account is created.
# 2. Create User associated with that account. Verify the created user
# Create an account
account = Account.create(
self.apiclient,
self.services["account"]
)
self.debug("Created account: %s" % account.name)
self.cleanup.append(account)
list_accounts_response = list_accounts(
self.apiclient,
id=account.id
)
self.assertEqual(
isinstance(list_accounts_response, list),
True,
"Check list accounts for valid data"
)
self.assertNotEqual(
len(list_accounts_response),
0,
"Check List Account response"
)
account_response = list_accounts_response[0]
self.assertEqual(
account.accounttype,
account_response.accounttype,
"Check Account Type of Created account"
)
self.assertEqual(
account.name,
account_response.name,
"Check Account Name of Created account"
)
# Create an User associated with account
user = User.create(
self.apiclient,
self.services["user"],
account=account.name,
domainid=account.domainid
)
self.debug("Created user: %s" % user.id)
list_users_response = list_users(
self.apiclient,
id=user.id
)
self.assertEqual(
isinstance(list_users_response, list),
True,
"Check list users for valid data"
)
self.assertNotEqual(
len(list_users_response),
0,
"Check List User response"
)
user_response = list_users_response[0]
self.assertEqual(
user.username,
user_response.username,
"Check username of Created user"
)
self.assertEqual(
user.state,
user_response.state,
"Check state of created user"
)
self.assertEqual(
"native",
user_response.usersource,
"Check user source of created user"
)
return
@attr(tags=["advanced", "basic", "eip", "advancedns", "sg"],
required_hardware="false")
def test_02_update_account(self):
"""
Tests that accounts can be updated with new name, network domain, dynamic role
:return:
"""
dynamic_roles_active = self.apiclient.listCapabilities(listCapabilities.listCapabilitiesCmd()).dynamicrolesenabled
if not dynamic_roles_active:
self.skipTest("Dynamic Role-Based API checker not enabled, skipping test")
ts = str(time.time())
network_domain = 'mycloud.com'
# role will be in cleanup list first so it won't be deleted before the account it is on.
role = Role.create(self.apiclient, self.services['role'])
self.cleanup.append(role)
account = Account.create(self.apiclient, self.services['account'])
self.cleanup.append(account)
account.update(self.apiclient, newname=account.name + ts)
account.update(self.apiclient, roleid=role.id)
account.update(self.apiclient, networkdomain=network_domain)
list_accounts_response = list_accounts(self.apiclient, id=account.id)
test_account = list_accounts_response[0]
self.assertEqual(
test_account.roleid, role.id,
"Check the role for the account is changed")
self.assertEqual(
test_account.networkdomain, network_domain,
"Check the domain for the account is changed")
self.assertEqual(
test_account.name, account.name + ts,
"Check the name for the account is changed")
try:
account.update(self.apiclient, newname="")
self.fail("Account name change to empty name succeeded. Must be error.")
except CloudstackAPIException:
pass
class TestRemoveUserFromAccount(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.testClient = super(
TestRemoveUserFromAccount,
cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
cls.services = Services().services
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
cls.hypervisor = cls.testClient.getHypervisorInfo()
cls.services['mode'] = cls.zone.networktype
cls.template = get_test_template(
cls.api_client,
cls.zone.id,
cls.hypervisor
)
cls.services["virtual_machine"]["zoneid"] = cls.zone.id
cls.services["virtual_machine"]["template"] = cls.template.id
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offering"]
)
# Create an account
cls.account = Account.create(
cls.api_client,
cls.services["account"]
)
cls._cleanup = [cls.account,
cls.service_offering,
]
return
@classmethod
def tearDownClass(cls):
try:
# Cleanup resources used
cleanup_resources(cls.api_client, reversed(cls._cleanup))
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
return
def tearDown(self):
try:
# Clean up, terminate the created instance, users etc
cleanup_resources(self.apiclient, reversed(self.cleanup))
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(
tags=[
"advanced",
"basic",
"eip",
"advancedns",
"sg"],
required_hardware="false")
def test_01_user_remove_VM_running(self):
"""Test Remove one user from the account
"""
# Validate the following
# 1. Create an account with 2 users.
# 2. Start 2 VMs; one for each user of the account
# 3. Remove one user from the account. Verify that account
# still exists.
# 4. Verify that VM started by the removed user are still running
# Create an User associated with account and VMs
user_1 = User.create(
self.apiclient,
self.services["user"],
account=self.account.name,
domainid=self.account.domainid
)
self.debug("Created user: %s" % user_1.id)
user_2 = User.create(
self.apiclient,
self.services["user"],
account=self.account.name,
domainid=self.account.domainid
)
self.debug("Created user: %s" % user_2.id)
self.cleanup.append(user_2)
vm_1 = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id
)
self.debug("Deployed VM in account: %s, ID: %s" % (
self.account.name,
vm_1.id
))
self.cleanup.append(vm_1)
vm_2 = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id
)
self.debug("Deployed VM in account: %s, ID: %s" % (
self.account.name,
vm_2.id
))
self.cleanup.append(vm_2)
# Remove one of the user
self.debug("Deleting user: %s" % user_1.id)
user_1.delete(self.apiclient)
# Account should exist after deleting user
accounts_response = list_accounts(
self.apiclient,
id=self.account.id
)
self.assertEqual(
isinstance(accounts_response, list),
True,
"Check for valid list accounts response"
)
self.assertNotEqual(
len(accounts_response),
0,
"Check List Account response"
)
vm_response = list_virtual_machines(
self.apiclient,
account=self.account.name,
domainid=self.account.domainid
)
self.assertEqual(
isinstance(vm_response, list),
True,
"Check for valid list VM response"
)
self.assertNotEqual(
len(vm_response),
0,
"Check List VM response"
)
# VMs associated with that account should be running
for vm in vm_response:
self.assertEqual(
vm.state,
'Running',
"Check state of VMs associated with account"
)
return
class TestNonRootAdminsPrivileges(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.testClient = super(
TestNonRootAdminsPrivileges,
cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
cls.services = Services().services
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
cls.services['mode'] = cls.zone.networktype
cls._cleanup = []
# Create an account, domain etc
cls.domain = Domain.create(
cls.api_client,
cls.services["domain"],
)
cls._cleanup.append(cls.domain)
cls.account = Account.create(
cls.api_client,
cls.services["account"],
admin=True,
domainid=cls.domain.id
)
cls._cleanup.append(cls.account)
return
@classmethod
def tearDownClass(cls):
try:
# Cleanup resources used
cleanup_resources(cls.api_client, reversed(cls._cleanup))
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
return
def tearDown(self):
try:
# Clean up, terminate the created accounts
cleanup_resources(self.apiclient, reversed(self.cleanup))
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(
tags=[
"advanced",
"basic",
"eip",
"advancedns",
"sg"],
required_hardware="false")
def test_01_non_root_admin_Privileges(self):
"""Test to verify Non Root admin previleges"""
# Validate the following
# 1. Create few accounts/users in ROOT domain
# 2. Verify listAccounts API gives only accounts associated with new
# domain.
# Create accounts for ROOT domain
account_1 = Account.create(
self.apiclient,
self.services["account"]
)
self.debug("Created account: %s" % account_1.name)
self.cleanup.append(account_1)
account_2 = Account.create(
self.apiclient,
self.services["account"]
)
self.debug("Created account: %s" % account_2.name)
self.cleanup.append(account_2)
accounts_response = list_accounts(
self.apiclient,
domainid=self.domain.id,
listall=True
)
self.assertEqual(
isinstance(accounts_response, list),
True,
"Check list accounts response for valid data"
)
self.assertEqual(
len(accounts_response),
1,
"Check List accounts response"
)
# Verify only account associated with domain is listed
for account in accounts_response:
self.assertEqual(
account.domainid,
self.domain.id,
"Check domain ID of account"
)
return
class TestServiceOfferingSiblings(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.api_client = super(
TestServiceOfferingSiblings,
cls
).getClsTestClient().getApiClient()
cls.services = Services().services
cls._cleanup = []
# Create Domains, accounts etc
cls.domain_1 = Domain.create(
cls.api_client,
cls.services["domain"]
)
cls._cleanup.append(cls.domain_1)
cls.domain_2 = Domain.create(
cls.api_client,
cls.services["domain"]
)
cls._cleanup.append(cls.domain_2)
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offering"],
domainid=cls.domain_1.id
)
cls._cleanup.append(cls.service_offering)
# Create account for doamin_1
cls.account_1 = Account.create(
cls.api_client,
cls.services["account"],
admin=True,
domainid=cls.domain_1.id
)
cls._cleanup.append(cls.account_1)
# Create an account for domain_2
cls.account_2 = Account.create(
cls.api_client,
cls.services["account"],
admin=True,
domainid=cls.domain_2.id
)
cls._cleanup.append(cls.account_2)
return
@classmethod
def tearDownClass(cls):
try:
# Cleanup resources used
cleanup_resources(cls.api_client, reversed(cls._cleanup))
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
return
def tearDown(self):
try:
# Clean up, terminate the created domains, accounts
cleanup_resources(self.apiclient, reversed(self.cleanup))
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(
tags=[
"advanced",
"basic",
"eip",
"advancedns",
"sg"],
required_hardware="false")
def test_01_service_offering_siblings(self):
"""Test to verify service offerings at same level in hierarchy"""
# Validate the following
# 1. Verify service offering is visible for domain_1
# 2. Verify service offering is not visible for domain_2
service_offerings = list_service_offering(
self.apiclient,
domainid=self.domain_1.id
)
self.assertEqual(
isinstance(service_offerings, list),
True,
"Check if valid list service offerings response"
)
self.assertNotEqual(
len(service_offerings),
0,
"Check List Service Offerings response"
)
for service_offering in service_offerings:
self.debug("Validating service offering: %s" % service_offering.id)
self.assertEqual(
service_offering.id,
self.service_offering.id,
"Check Service offering ID for domain" +
str(self.domain_1.name)
)
# Verify private service offering is not visible to other domain
service_offerings = list_service_offering(
self.apiclient,
domainid=self.domain_2.id
)
self.assertEqual(
service_offerings,
None,
"Check List Service Offerings response for other domain"
)
return
class TestServiceOfferingHierarchy(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.api_client = super(
TestServiceOfferingHierarchy,
cls
).getClsTestClient().getApiClient()
cls.services = Services().services
cls._cleanup = []
# Create domain, service offerings etc
cls.domain_1 = Domain.create(
cls.api_client,
cls.services["domain"]
)
cls._cleanup.append(cls.domain_1)
cls.domain_2 = Domain.create(
cls.api_client,
cls.services["domain"],
parentdomainid=cls.domain_1.id
)
cls._cleanup.append(cls.domain_2)
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offering"],
domainid=cls.domain_1.id
)
cls._cleanup.append(cls.service_offering)
# Create account for doamin_1
cls.account_1 = Account.create(
cls.api_client,
cls.services["account"],
admin=True,
domainid=cls.domain_1.id
)
cls._cleanup.append(cls.account_1)
# Create an account for domain_2
cls.account_2 = Account.create(
cls.api_client,
cls.services["account"],
admin=True,
domainid=cls.domain_2.id
)
cls._cleanup.append(cls.account_2)
return
@classmethod
def tearDownClass(cls):
try:
# Cleanup resources used
cleanup_resources(cls.api_client, reversed(cls._cleanup))
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
return
def tearDown(self):
try:
# Clean up, terminate the created instance, volumes and snapshots
cleanup_resources(self.apiclient, reversed(self.cleanup))
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(
tags=[
"advanced",
"basic",
"eip",
"advancedns",
"sg"],
required_hardware="false")
def test_01_service_offering_hierarchy(self):
"""Test to verify service offerings at same level in hierarchy"""
# Validate the following
# 1. Verify service offering is visible for domain_1
# 2. Verify service offering is also visible for domain_2
service_offerings = list_service_offering(
self.apiclient,
domainid=self.domain_1.id
)
self.assertEqual(
isinstance(service_offerings, list),
True,
"Check List Service Offerings for a valid response"
)
self.assertNotEqual(
len(service_offerings),
0,
"Check List Service Offerings response"
)
for service_offering in service_offerings:
self.assertEqual(
service_offering.id,
self.service_offering.id,
"Check Service offering ID for domain" +
str(self.domain_1.name)
)
# Verify private service offering is not visible to other domain
service_offerings = list_service_offering(
self.apiclient,
domainid=self.domain_2.id
)
self.assertEqual(
service_offerings,
None,
"Check List Service Offerings for a valid response"
)
return
class TestTemplateHierarchy(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.testClient = super(TestTemplateHierarchy, cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
cls.hypervisor = cls.testClient.getHypervisorInfo()
cls.services = Services().services
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
cls.services['mode'] = cls.zone.networktype
cls._cleanup = []
# Create domains, accounts and template
cls.domain_1 = Domain.create(
cls.api_client,
cls.services["domain"]
)
cls._cleanup.append(cls.domain_1)
cls.domain_2 = Domain.create(
cls.api_client,
cls.services["domain"],
parentdomainid=cls.domain_1.id
)
cls._cleanup.append(cls.domain_2)
# Create account for doamin_1
cls.account_1 = Account.create(
cls.api_client,
cls.services["account"],
admin=True,
domainid=cls.domain_1.id
)
cls._cleanup.append(cls.account_1)
# Create an account for domain_2
cls.account_2 = Account.create(
cls.api_client,
cls.services["account"],
admin=True,
domainid=cls.domain_2.id
)
cls._cleanup.append(cls.account_2)
builtin_info = get_builtin_template_info(cls.api_client, cls.zone.id)
cls.services["template"]["url"] = builtin_info[0]
cls.services["template"]["hypervisor"] = builtin_info[1]
cls.services["template"]["format"] = builtin_info[2]
# Register new template
cls.template = Template.register(
cls.api_client,
cls.services["template"],
zoneid=cls.zone.id,
account=cls.account_1.name,
domainid=cls.domain_1.id,
hypervisor=cls.hypervisor
)
cls._cleanup.append(cls.template)
# Wait for template to download
cls.template.download(cls.api_client)
# Wait for template status to be changed across
time.sleep(60)
return
@classmethod
def tearDownClass(cls):
try:
# Cleanup resources used
cleanup_resources(cls.api_client, reversed(cls._cleanup))
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
return
def tearDown(self):
try:
# Clean up, terminate the created instance, volumes and snapshots
cleanup_resources(self.apiclient, reversed(self.cleanup))
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(tags=["advanced", "basic", "eip", "advancedns", "sg"], required_hardware="true")
def test_01_template_hierarchy(self):
"""Test to verify template at same level in hierarchy"""
# Validate the following
# 1. Verify template is visible for domain_1
# 2. Verify template is also visible for domain_2
# Sleep to ensure that template state is reflected across
templates = list_templates(
self.apiclient,
templatefilter='self',
account=self.account_1.name,
domainid=self.domain_1.id
)
self.assertEqual(
isinstance(templates, list),
True,
"Template response %s is not a list" % templates
)
self.assertNotEqual(
len(templates),
0,
"No templates found"
)
for template in templates:
self.assertEqual(
template.id,
self.template.id,
"Check Template ID for domain" + str(self.domain_1.name)
)
# Verify private service offering is not visible to other domain
templates = list_templates(
self.apiclient,
id=self.template.id,
templatefilter='all',
account=self.account_2.name,
domainid=self.domain_2.id
)
self.assertEqual(
isinstance(templates, list),
True,
"Template response %s is not a list" % templates
)
self.assertNotEqual(
len(templates),
0,
"No templates found"
)
for template in templates:
self.assertEqual(
template.id,
self.template.id,
"Check Template ID for domain" + str(self.domain_2.name)
)
return
class TestAddVmToSubDomain(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.testClient = super(TestAddVmToSubDomain, cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
cls.services = Services().services
cls.domain = get_domain(cls.api_client)
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
cls.hypervisor = cls.testClient.getHypervisorInfo()
cls.services['mode'] = cls.zone.networktype
cls._cleanup = []
cls.sub_domain = Domain.create(
cls.api_client,
cls.services["domain"],
parentdomainid=cls.domain.id
)
cls._cleanup.append(cls.sub_domain)
# Create account for doamin_1
cls.account_1 = Account.create(
cls.api_client,
cls.services["account"],
admin=True,
domainid=cls.domain.id
)
cls._cleanup.append(cls.account_1)
# Create an account for domain_2
cls.account_2 = Account.create(
cls.api_client,
cls.services["account"],
admin=True,
domainid=cls.sub_domain.id
)
cls._cleanup.append(cls.account_2)
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offering"],
domainid=cls.domain.id
)
cls._cleanup.append(cls.service_offering)
cls.template = get_test_template(
cls.api_client,
cls.zone.id,
cls.hypervisor
)
cls.services["virtual_machine"]["zoneid"] = cls.zone.id
cls.vm_1 = VirtualMachine.create(
cls.api_client,
cls.services["virtual_machine"],
templateid=cls.template.id,
accountid=cls.account_1.name,
domainid=cls.account_1.domainid,
serviceofferingid=cls.service_offering.id
)
cls.vm_2 = VirtualMachine.create(
cls.api_client,
cls.services["virtual_machine"],
templateid=cls.template.id,
accountid=cls.account_2.name,
domainid=cls.account_2.domainid,
serviceofferingid=cls.service_offering.id
)
return
@classmethod
def tearDownClass(cls):
try:
# Clean up, terminate the created resources
cleanup_resources(cls.api_client, reversed(cls._cleanup))
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
return
def tearDown(self):
try:
# Clean up, terminate the created resources
cleanup_resources(self.apiclient, reversed(self.cleanup))
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(
tags=[
"advanced",
"basic",
"eip",
"advancedns",
"sg"],
required_hardware="false")
def test_01_add_vm_to_subdomain(self):
""" Test Sub domain allowed to launch VM when a Domain
level zone is created"""
# Validate the following
# 1. Verify VM created by Account_1 is in Running state
# 2. Verify VM created by Account_2 is in Running state
vm_response = list_virtual_machines(
self.apiclient,
id=self.vm_1.id
)
self.assertEqual(
isinstance(vm_response, list),
True,
"Check List VM for a valid response"
)
self.assertNotEqual(
len(vm_response),
0,
"Check List Template response"
)
for vm in vm_response:
self.debug("VM ID: %s and state: %s" % (vm.id, vm.state))
self.assertEqual(
vm.state,
'Running',
"Check State of Virtual machine"
)
vm_response = list_virtual_machines(
self.apiclient,
id=self.vm_2.id
)
self.assertNotEqual(
len(vm_response),
0,
"Check List Template response"
)
for vm in vm_response:
self.debug("VM ID: %s and state: %s" % (vm.id, vm.state))
self.assertEqual(
vm.state,
'Running',
"Check State of Virtual machine"
)
return
class TestUserDetails(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.testClient = super(TestUserDetails, cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
cls.services = Services().services
cls.domain = get_domain(cls.api_client)
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
cls.services['mode'] = cls.zone.networktype
cls._cleanup = []
return
@classmethod
def tearDownClass(cls):
try:
# Cleanup resources used
cleanup_resources(cls.api_client, reversed(cls._cleanup))
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
return
def tearDown(self):
try:
# Clean up, terminate the created network offerings
cleanup_resources(self.apiclient, reversed(self.cleanup))
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(tags=[
"role",
"accounts",
"simulator",
"advanced",
"advancedns",
"basic",
"eip",
"sg"
])
def test_updateUserDetails(self):
"""Test user update API
"""
# Steps for test scenario
# 1. create a user account
# 2. update the user details (firstname, lastname, user) with
# updateUser API
# 3. listUsers in the account
# 4. delete the account
# Validate the following
# 1. listAccounts should show account created successfully
# 2. updateUser API should return valid response
# 3. user should be updated with new details
self.debug("Creating an user account..")
self.account = Account.create(
self.apiclient,
self.services["account"],
domainid=self.domain.id
)
self.cleanup.append(self.account)
# Fetching the user details of account
self.debug(
"Fetching user details for account: %s" %
self.account.name)
users = User.list(
self.apiclient,
account=self.account.name,
domainid=self.account.domainid
)
self.assertEqual(
isinstance(users, list),
True,
"List users should return a valid list for account"
)
user_1 = users[0]
self.debug("Updating the details of user: %s" % user_1.name)
firstname = random_gen()
lastname = random_gen()
self.debug("New firstname: %s, lastname: %s" % (firstname, lastname))
User.update(
self.apiclient,
user_1.id,
firstname=firstname,
lastname=lastname
)
# Fetching the user details of account
self.debug(
"Fetching user details for user: %s" % user_1.name)
users = User.list(
self.apiclient,
id=user_1.id,
listall=True
)
self.assertEqual(
isinstance(users, list),
True,
"List users should return a valid list for account"
)
user_1 = users[0]
self.assertEqual(
user_1.firstname,
firstname,
"User's first name should be updated with new one"
)
self.assertEqual(
user_1.lastname,
lastname,
"User's last name should be updated with new one"
)
return
@attr(tags=[
"role",
"accounts",
"simulator",
"advanced",
"advancedns",
"basic",
"eip",
"sg"
])
def test_updateAdminDetails(self):
"""Test update admin details
"""
# Steps for test scenario
# 1. create a admin account
# 2. update the user details (firstname, lastname, user) with
# updateUser API
# 3. listUsers in the account
# 4. delete the account
# Validate the following
# 1. listAccounts should show account created successfully
# 2. updateUser API should return valid response
# 3. user should be updated with new details
self.debug("Creating a ROOT admin account")
self.account = Account.create(
self.apiclient,
self.services["account"],
admin=True,
)
self.cleanup.append(self.account)
# Fetching the user details of account
self.debug(
"Fetching user details for account: %s" %
self.account.name)
users = User.list(
self.apiclient,
account=self.account.name,
domainid=self.account.domainid
)
self.assertEqual(
isinstance(users, list),
True,
"List users should return a valid list for account"
)
user_1 = users[0]
self.debug("Updating the details of user: %s" % user_1.name)
firstname = random_gen()
lastname = random_gen()
self.debug("New firstname: %s, lastname: %s" % (firstname, lastname))
User.update(
self.apiclient,
user_1.id,
firstname=firstname,
lastname=lastname
)
# Fetching the user details of account
self.debug(
"Fetching user details for user: %s" % user_1.name)
users = User.list(
self.apiclient,
id=user_1.id,
listall=True
)
self.assertEqual(
isinstance(users, list),
True,
"List users should return a valid list for account"
)
user_1 = users[0]
self.assertEqual(
user_1.firstname,
firstname,
"User's first name should be updated with new one"
)
self.assertEqual(
user_1.lastname,
lastname,
"User's last name should be updated with new one"
)
return
@attr(tags=[
"role",
"accounts",
"simulator",
"advanced",
"advancedns",
"basic",
"eip",
"sg"
])
def test_updateDomainAdminDetails(self):
"""Test update domain admin details
"""
# Steps for test scenario
# 2. update the user details (firstname, lastname, user) with
# updateUser API
# 3. listUsers in the account
# 4. delete the account
# Validate the following
# 1. listAccounts should show account created successfully
# 2. updateUser API should return valid response
# 3. user should be updated with new details
self.debug("Creating a domain admin account")
self.account = Account.create(
self.apiclient,
self.services["account"],
admin=True,
domainid=self.domain.id
)
self.cleanup.append(self.account)
# Fetching the user details of account
self.debug(
"Fetching user details for account: %s" %
self.account.name)
users = User.list(
self.apiclient,
account=self.account.name,
domainid=self.account.domainid
)
self.assertEqual(
isinstance(users, list),
True,
"List users should return a valid list for account"
)
user_1 = users[0]
self.debug("Updating the details of user: %s" % user_1.name)
firstname = random_gen()
lastname = random_gen()
self.debug("New firstname: %s, lastname: %s" % (firstname, lastname))
User.update(
self.apiclient,
user_1.id,
firstname=firstname,
lastname=lastname
)
# Fetching the user details of account
self.debug(
"Fetching user details for user: %s" % user_1.name)
users = User.list(
self.apiclient,
id=user_1.id,
listall=True
)
self.assertEqual(
isinstance(users, list),
True,
"List users should return a valid list for account"
)
user_1 = users[0]
self.assertEqual(
user_1.firstname,
firstname,
"User's first name should be updated with new one"
)
self.assertEqual(
user_1.lastname,
lastname,
"User's last name should be updated with new one"
)
return
class TestUserLogin(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.testClient = super(TestUserLogin, cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
cls.services = Services().services
cls.domain = get_domain(cls.api_client)
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
cls.services['mode'] = cls.zone.networktype
cls._cleanup = []
return
@classmethod
def tearDownClass(cls):
try:
# Cleanup resources used
cleanup_resources(cls.api_client, reversed(cls._cleanup))
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
return
def tearDown(self):
try:
# Clean up, terminate the created network offerings
cleanup_resources(self.apiclient, reversed(self.cleanup))
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(tags=["login", "accounts", "simulator", "advanced",
"advancedns", "basic", "eip", "sg"])
def test_LoginApiUuidResponse(self):
"""Test if Login API does not return UUID's
"""
# Steps for test scenario
# 1. create a user account
# 2. login to the user account with given credentials (loginCmd)
# 3. delete the user account
# Validate the following
# 1. listAccounts should return account created
# 2. loginResponse should have UUID only is response. Assert by
# checking database id is not same as response id
# Login also succeeds with non NULL sessionId in response
self.debug("Creating an user account..")
self.account = Account.create(
self.apiclient,
self.services["account"],
domainid=self.domain.id
)
self.cleanup.append(self.account)
self.debug("Logging into the cloudstack with login API")
respose = User.login(
self.apiclient,
username=self.account.name,
password=self.services["account"]["password"]
)
self.debug("Login API response: %s" % respose)
self.assertNotEqual(
respose.sessionkey,
None,
"Login to the CloudStack should be successful" +
"response shall have non Null key"
)
return
@attr(tags=["simulator", "advanced",
"advancedns", "basic", "eip", "sg"])
def test_ApiListDomain(self):
"""Test case to check the correctness of List domain API, to make sure that no field is missed in the output.
"""
# Steps for test scenario
# 1. create a domain
# 2. Create a sub-domain with domain created in step 1 as parent.
# Validate the following
# 1. listDomains returns created domain and sub-domain
# 2. The list Domain response has all the expected 44 elements/fields in it.
listDomainResponseElements = ["id", "name", "level", "parentdomainid", "parentdomainname", "haschild", "path",
"state",
"vmlimit", "vmtotal", "vmavailable", "iplimit", "iptotal", "ipavailable",
"volumelimit",
"volumetotal", "volumeavailable", "snapshotlimit", "snapshottotal",
"snapshotavailable",
"templatelimit", "templatetotal", "templateavailable", "projectlimit",
"projecttotal", "projectavailable",
"networklimit", "networktotal", "networkavailable", "vpclimit", "vpctotal",
"vpcavailable",
"cpulimit", "cputotal", "cpuavailable", "memorylimit", "memorytotal",
"memoryavailable", "primarystoragelimit",
"primarystoragetotal", "primarystorageavailable", "secondarystoragelimit",
"secondarystoragetotal", "secondarystorageavailable"
]
self.debug("Creating a domain for testing list domain reponse")
domain = Domain.create(
self.apiclient,
self.services["domain"],
parentdomainid=self.domain.id
)
self.cleanup.append(domain)
self.debug("Domain: %s is created successfully." % domain.name)
self.debug("Validating the created domain")
list_domain = Domain.list(self.api_client, id=domain.id)
domain_list_validation_result = validateList(list_domain)
self.assertEqual(domain_list_validation_result[0], PASS,
"Domain list validation failed due to %s" %
domain_list_validation_result[2])
subDomain = Domain.create(
self.apiclient,
self.services["domain"],
parentdomainid=domain.id
)
self.cleanup.append(subDomain)
self.debug("Sub-Domain: %s is created successfully." % subDomain.name)
self.debug("Validating the created sub-domain")
list_sub_domain = Domain.list(self.api_client, id=subDomain.id)
subdomain_list_validation_result = validateList(list_sub_domain)
self.assertEqual(subdomain_list_validation_result[0], PASS,
"Sub-Domain list validation failed due to %s" %
subdomain_list_validation_result[2])
self.debug("Checking that the listDomain response has all the elements.")
domainOutputString = list_domain[0].__dict__
for element in listDomainResponseElements:
self.assertTrue((element.lower() in domainOutputString), element + " field is missing in list domain rsponse.")
self.debug("Verified that the listDomain response has all the elements.")
self.debug("Checking that the list sub-domain response has all the elements.")
subdomainOutputString = list_sub_domain[0].__dict__
for element in listDomainResponseElements:
self.assertTrue((element.lower() in subdomainOutputString), element + " field is missing in list domain rsponse.")
self.debug("Verified that the list sub-Domain response has all the elements.")
return
@attr(tags=["login", "accounts", "simulator", "advanced",
"advancedns", "basic", "eip", "sg"])
def test_LoginApiDomain(self):
"""Test login API with domain
"""
# Steps for test scenario
# 1. create a domain
# 2. create user in the domain
# 3. login to the user account above using UUID domain/user
# 4. delete the user account
# Validate the following
# 1. listDomains returns created domain
# 2. listAccounts returns created user
# 3. loginResponse should have UUID only in responses
# Login also succeeds with non NULL sessionId in response
self.debug("Creating a domain for login with API domain test")
domain = Domain.create(
self.apiclient,
self.services["domain"],
parentdomainid=self.domain.id
)
self.cleanup.append(domain)
self.debug("Domain: %s is created succesfully." % domain.name)
self.debug(
"Checking if the created domain is listed in list domains API")
domains = Domain.list(self.apiclient, id=domain.id, listall=True)
self.assertEqual(
isinstance(domains, list),
True,
"List domains shall return a valid response"
)
self.debug("Creating an user account in domain: %s" % domain.name)
self.account = Account.create(
self.apiclient,
self.services["account"],
domainid=domain.id
)
self.cleanup.append(self.account)
accounts = Account.list(
self.apiclient,
name=self.account.name,
domainid=self.account.domainid,
listall=True
)
self.assertEqual(
isinstance(accounts, list),
True,
"List accounts should return a valid response"
)
self.debug("Logging into the cloudstack with login API")
respose = User.login(
self.apiclient,
username=self.account.name,
password=self.services["account"]["password"],
domainid=domain.id)
self.debug("Login API response: %s" % respose)
self.assertNotEqual(
respose.sessionkey,
None,
"Login to the CloudStack should be successful" +
"response shall have non Null key"
)
return
class TestUserAPIKeys(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.testClient = super(TestUserAPIKeys, cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
cls.services = Services().services
cls.domain = get_domain(cls.api_client)
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
cls.services['mode'] = cls.zone.networktype
cls._cleanup = []
# Create an account, domain etc
cls.domain = Domain.create(
cls.api_client,
cls.services["domain"],
)
cls._cleanup.append(cls.domain)
cls.account = Account.create(
cls.api_client,
cls.services["account"],
admin=False,
domainid=cls.domain.id
)
cls._cleanup.append(cls.account)
cls.domain_2 = Domain.create(
cls.api_client,
cls.services["domain"],
)
cls._cleanup.append(cls.domain_2)
cls.account_2 = Account.create(
cls.api_client,
cls.services["account"],
admin=False,
domainid=cls.domain_2.id
)
cls._cleanup.append(cls.account_2)
return
@classmethod
def tearDownClass(cls):
try:
# Cleanup resources used
cleanup_resources(cls.api_client, reversed(cls._cleanup))
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
return
def tearDown(self):
try:
# Clean up, terminate the created network offerings
cleanup_resources(self.apiclient, reversed(self.cleanup))
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(tags=[
"role",
"accounts",
"simulator",
"advanced",
"advancedns",
"basic",
"eip",
"sg"
])
def test_user_key_renew_same_account(self):
# Create an User associated with the account
user_1 = User.create(
self.apiclient,
self.services["user"],
account=self.account.name,
domainid=self.domain.id
)
self.cleanup.append(user_1)
account_response = list_accounts(
self.apiclient,
id=self.account.id
)[0]
self.assertEqual(
hasattr(account_response, 'user'),
True,
"Users are included in account response")
account_users = account_response.user
self.assertEqual(
isinstance(account_users, list),
True,
"Check account for valid data"
)
self.assertNotEqual(
len(account_users),
0,
"Check number of User in Account")
[user] = [u for u in account_users if u.username == user_1.username]
self.assertEqual(
user.apikey,
None,
"Check that the user don't have an API key yet")
self.debug("Register API keys for user")
userkeys = User.registerUserKeys(self.apiclient, user_1.id)
users = list_accounts(
self.apiclient,
id=self.account.id
)[0].user
[user] = [u for u in users if u.id == user_1.id]
self.assertEqual(
user.apikey,
userkeys.apikey,
"Check User api key")
user.secretkey = self.get_secret_key(user.id)
self.assertEqual(
user.secretkey,
userkeys.secretkey,
"Check User having secret key")
self.debug("Get test client with user keys")
cs_api = self.testClient.getUserApiClient(
UserName=self.account.name,
DomainName=self.account.domain)
self.debug("Renew API keys for user using current keys")
new_keys = User.registerUserKeys(cs_api, user_1.id)
self.assertNotEqual(
userkeys.apikey,
new_keys.apikey,
"Check API key is different")
new_keys.secretkey = self.get_secret_key(user_1.id)
self.assertNotEqual(
userkeys.secretkey,
new_keys.secretkey,
"Check secret key is different")
def get_secret_key(self, id):
cmd = getUserKeys.getUserKeysCmd()
cmd.id = id
keypair = self.apiclient.getUserKeys(cmd)
return keypair.secretkey
@attr(tags=[
"role",
"accounts",
"simulator",
"advanced",
"advancedns",
"basic",
"eip",
"sg"
])
def test_user_cannot_renew_other_keys(self):
cs_api = self.testClient.getUserApiClient(
UserName=self.account.name,
DomainName=self.account.domain)
self.debug("Try to change API key of an account in another domain")
users = list_accounts(
self.apiclient,
id=self.account_2.id
)[0].user
with self.assertRaises(CloudstackAPIException) as e:
User.registerUserKeys(cs_api, users[0].id)
class TestDomainForceRemove(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.testClient = super(TestDomainForceRemove, cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
cls.services = Services().services
cls.domain = get_domain(cls.api_client)
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
cls.hypervisor = cls.testClient.getHypervisorInfo()
cls.services['mode'] = cls.zone.networktype
cls.template = get_test_template(
cls.api_client,
cls.zone.id,
cls.hypervisor
)
cls.services["virtual_machine"]["zoneid"] = cls.zone.id
cls._cleanup = []
return
@classmethod
def tearDownClass(cls):
try:
# Clean up, terminate the created resources
cleanup_resources(cls.api_client, reversed(cls._cleanup))
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
return
def tearDown(self):
try:
# Clean up, terminate the created resources
cleanup_resources(self.apiclient, reversed(self.cleanup))
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(
tags=[
"domains",
"advanced",
"advancedns",
"simulator",
"dvs"],
required_hardware="false")
def test_forceDeleteDomain(self):
""" Test delete domain with force option"""
# Steps for validations
# 1. create a domain DOM
# 2. create 2 users under this domain
# 3. deploy 1 VM into each of these user accounts
# 4. create PF / FW rules for port 22 on these VMs for their
# respective accounts
# 5. delete the domain with force=true option
# Validate the following
# 1. listDomains should list the created domain
# 2. listAccounts should list the created accounts
# 3. listvirtualmachines should show the Running VMs
# 4. PF and FW rules should be shown in listFirewallRules
# 5. domain should delete successfully and above three list calls
# should show all the resources now deleted. listRouters should
# not return any routers in the deleted accounts/domains
self.debug("Creating a domain for login with API domain test")
domain = Domain.create(
self.apiclient,
self.services["domain"],
parentdomainid=self.domain.id
)
self.debug("Domain is created succesfully.")
self.debug(
"Checking if the created domain is listed in list domains API")
domains = Domain.list(self.apiclient, id=domain.id, listall=True)
self.assertEqual(
isinstance(domains, list),
True,
"List domains shall return a valid response"
)
self.debug("Creating 2 user accounts in domain: %s" % domain.name)
self.account_1 = Account.create(
self.apiclient,
self.services["account"],
domainid=domain.id
)
self.account_2 = Account.create(
self.apiclient,
self.services["account"],
domainid=domain.id
)
try:
self.debug("Creating a tiny service offering for VM deployment")
self.service_offering = ServiceOffering.create(
self.apiclient,
self.services["service_offering"],
domainid=self.domain.id
)
self.debug("Deploying virtual machine in account 1: %s" %
self.account_1.name)
vm_1 = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
templateid=self.template.id,
accountid=self.account_1.name,
domainid=self.account_1.domainid,
serviceofferingid=self.service_offering.id
)
self.debug("Deploying virtual machine in account 2: %s" %
self.account_2.name)
VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
templateid=self.template.id,
accountid=self.account_2.name,
domainid=self.account_2.domainid,
serviceofferingid=self.service_offering.id
)
networks = Network.list(
self.apiclient,
account=self.account_1.name,
domainid=self.account_1.domainid,
listall=True
)
self.assertEqual(
isinstance(networks, list),
True,
"List networks should return a valid response"
)
network_1 = networks[0]
self.debug("Default network in account 1: %s is %s" % (
self.account_1.name,
network_1.name))
src_nat_list = PublicIPAddress.list(
self.apiclient,
associatednetworkid=network_1.id,
account=self.account_1.name,
domainid=self.account_1.domainid,
listall=True,
issourcenat=True,
)
self.assertEqual(
isinstance(src_nat_list, list),
True,
"List Public IP should return a valid source NAT"
)
self.assertNotEqual(
len(src_nat_list),
0,
"Length of response from listPublicIp should not be 0"
)
src_nat = src_nat_list[0]
self.debug(
"Trying to create a port forwarding rule in source NAT: %s" %
src_nat.ipaddress)
# Create NAT rule
nat_rule = NATRule.create(
self.apiclient,
vm_1,
self.services["natrule"],
ipaddressid=src_nat.id
)
self.debug("Created PF rule on source NAT: %s" % src_nat.ipaddress)
nat_rules = NATRule.list(self.apiclient, id=nat_rule.id)
self.assertEqual(
isinstance(nat_rules, list),
True,
"List NAT should return a valid port forwarding rules"
)
self.assertNotEqual(
len(nat_rules),
0,
"Length of response from listLbRules should not be 0"
)
except Exception as e:
self.cleanup.append(self.domain)
self.cleanup.append(self.account_1)
self.cleanup.append(self.account_2)
self.cleanup.append(self.service_offering)
self.fail(e)
self.debug("Deleting domain with force option")
try:
domain.delete(self.apiclient, cleanup=True)
except Exception as e:
self.debug("Waiting for account.cleanup.interval" +
" to cleanup any remaining resouces")
# Sleep 3*account.gc to ensure that all resources are deleted
wait_for_cleanup(self.apiclient, ["account.cleanup.interval"] * 3)
with self.assertRaises(CloudstackAPIException):
Domain.list(
self.apiclient,
id=domain.id,
listall=True
)
self.debug("Checking if the resources in domain are deleted")
with self.assertRaises(CloudstackAPIException):
Account.list(
self.apiclient,
name=self.account_1.name,
domainid=self.account_1.domainid,
listall=True
)
return
@attr(
tags=[
"domains",
"advanced",
"advancedns",
"simulator"],
required_hardware="false")
def test_DeleteDomain(self):
""" Test delete domain without force option"""
# Steps for validations
# 1. create a domain DOM
# 2. create 2 users under this domain
# 3. deploy 1 VM into each of these user accounts
# 4. create PF / FW rules for port 22 on these VMs for their
# respective accounts
# 5. delete the domain with force=false option
# Validate the following
# 1. listDomains should list the created domain
# 2. listAccounts should list the created accounts
# 3. listvirtualmachines should show the Running VMs
# 4. PF and FW rules should be shown in listFirewallRules
# 5. domain deletion should fail saying there are resources under use
self.debug("Creating a domain for login with API domain test")
domain = Domain.create(
self.apiclient,
self.services["domain"],
parentdomainid=self.domain.id
)
# in this test delete domain *should* fail so we need to housekeep:
self.cleanup.append(domain)
self.debug("Domain: %s is created successfully." % domain.name)
self.debug(
"Checking if the created domain is listed in list domains API")
domains = Domain.list(self.apiclient, id=domain.id, listall=True)
self.assertEqual(
isinstance(domains, list),
True,
"List domains shall return a valid response"
)
self.debug("Creating 2 user accounts in domain: %s" % domain.name)
self.account_1 = Account.create(
self.apiclient,
self.services["account"],
domainid=domain.id
)
self.cleanup.append(self.account_1)
self.account_2 = Account.create(
self.apiclient,
self.services["account"],
domainid=domain.id
)
self.cleanup.append(self.account_2)
self.debug("Creating a tiny service offering for VM deployment")
self.service_offering = ServiceOffering.create(
self.apiclient,
self.services["service_offering"],
domainid=self.domain.id
)
self.cleanup.append(self.service_offering)
self.debug("Deploying virtual machine in account 1: %s" %
self.account_1.name)
vm_1 = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
templateid=self.template.id,
accountid=self.account_1.name,
domainid=self.account_1.domainid,
serviceofferingid=self.service_offering.id
)
self.debug("Deploying virtual machine in account 2: %s" %
self.account_2.name)
VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
templateid=self.template.id,
accountid=self.account_2.name,
domainid=self.account_2.domainid,
serviceofferingid=self.service_offering.id
)
networks = Network.list(
self.apiclient,
account=self.account_1.name,
domainid=self.account_1.domainid,
listall=True
)
self.assertEqual(
isinstance(networks, list),
True,
"List networks should return a valid response"
)
network_1 = networks[0]
self.debug("Default network in account 1: %s is %s" % (
self.account_1.name,
network_1.name))
src_nat_list = PublicIPAddress.list(
self.apiclient,
associatednetworkid=network_1.id,
account=self.account_1.name,
domainid=self.account_1.domainid,
listall=True,
issourcenat=True,
)
self.assertEqual(
isinstance(src_nat_list, list),
True,
"List Public IP should return a valid source NAT"
)
self.assertNotEqual(
len(src_nat_list),
0,
"Length of response from listPublicIp should not be 0"
)
src_nat = src_nat_list[0]
self.debug(
"Trying to create a port forwarding rule in source NAT: %s" %
src_nat.ipaddress)
# Create NAT rule
nat_rule = NATRule.create(
self.apiclient,
vm_1,
self.services["natrule"],
ipaddressid=src_nat.id
)
self.debug("Created PF rule on source NAT: %s" % src_nat.ipaddress)
nat_rules = NATRule.list(self.apiclient, id=nat_rule.id)
self.assertEqual(
isinstance(nat_rules, list),
True,
"List NAT should return a valid port forwarding rules"
)
self.assertNotEqual(
len(nat_rules),
0,
"Length of response from listLbRules should not be 0"
)
self.debug("Deleting domain without force option")
with self.assertRaises(Exception):
domain.delete(self.apiclient, cleanup=False)
return
class TestMoveUser(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.testClient = super(TestMoveUser, cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
cls.testdata = cls.testClient.getParsedTestDataConfig()
cls.domain = get_domain(cls.api_client)
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
cls.testdata['mode'] = cls.zone.networktype
cls.template = get_test_template(
cls.api_client,
cls.zone.id,
cls.testdata["ostype"]
)
cls.testdata["virtual_machine"]["zoneid"] = cls.zone.id
cls._cleanup = []
return
@classmethod
def tearDownClass(cls):
try:
# Clean up, terminate the created resources
cleanup_resources(cls.api_client, reversed(cls._cleanup))
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
self.testdata = self.testClient.getParsedTestDataConfig()
self.account1 = Account.create(
self.apiclient,
self.testdata["acl"]["accountD1"],
domainid=self.domain.id
)
self.cleanup.append(self.account1)
self.account2 = Account.create(
self.apiclient,
self.testdata["acl"]["accountD1A"],
domainid=self.domain.id
)
self.cleanup.append(self.account2)
self.user = User.create(
self.apiclient,
self.testdata["user"],
account=self.account1.name,
domainid=self.account1.domainid
)
return
def tearDown(self):
try:
# Clean up, terminate the created resources
cleanup_resources(self.apiclient, reversed(self.cleanup))
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(tags=["domains", "advanced", "advancedns", "simulator","dvs"], required_hardware="false")
def test_move_user_to_accountID(self):
self.user.move(self.api_client, dest_accountid=self.account2.id)
self.assertEqual(
self.account2.name,
self.user.list(self.apiclient, id=self.user.id)[0].account,
"Check user source of created user"
)
return
@attr(tags=["domains", "advanced", "advancedns", "simulator","dvs"], required_hardware="false")
def test_move_user_to_account_name(self):
self.user.move(self.api_client, dest_account=self.account2.name)
self.assertEqual(
self.account2.name,
self.user.list(self.apiclient, id=self.user.id)[0].account,
"Check user source of created user"
)
return
@attr(tags=["domains", "advanced", "advancedns", "simulator","dvs"], required_hardware="false")
def test_move_user_to_different_domain(self):
domain2 = Domain.create(self.api_client,
self.testdata["domain"],
parentdomainid=self.domain.id
)
self.cleanup.append(domain2)
account_different_domain = Account.create(
self.apiclient,
self.testdata["acl"]["accountD1B"],
domainid=domain2.id
)
self.cleanup.append(account_different_domain)
try:
self.user.move(self.api_client, dest_account=account_different_domain.name)
except Exception:
pass
else:
self.fail("It should not be allowed to move users across accounts in different domains, failing")
return
@attr(tags=["domains", "advanced", "advancedns", "simulator","dvs"], required_hardware="false")
def test_move_user_incorrect_account_id(self):
try:
self.user.move(self.api_client, dest_accountid='incorrect-account-id')
except Exception:
pass
else:
self.fail("moving to non-existing account should not be possible, failing")
return
@attr(tags=["domains", "advanced", "advancedns", "simulator","dvs"], required_hardware="false")
def test_move_user_incorrect_account_name(self):
try:
self.user.move(self.api_client, dest_account='incorrect-account-name')
except Exception:
pass
else:
self.fail("moving to non-existing account should not be possible, failing")
return
|
DaanHoogland/cloudstack
|
test/integration/smoke/test_accounts.py
|
Python
|
apache-2.0
| 77,523
|
# Copyright 2020 The gRPC Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests the behaviour of the Call classes under a secure channel."""
import logging
import unittest
import grpc
from grpc.experimental import aio
from src.proto.grpc.testing import messages_pb2
from src.proto.grpc.testing import test_pb2_grpc
from tests.unit import resources
from tests_aio.unit._test_base import AioTestBase
from tests_aio.unit._test_server import start_test_server
_SERVER_HOST_OVERRIDE = 'foo.test.google.fr'
_NUM_STREAM_RESPONSES = 5
_RESPONSE_PAYLOAD_SIZE = 42
class _SecureCallMixin:
"""A Mixin to run the call tests over a secure channel."""
async def setUp(self):
server_credentials = grpc.ssl_server_credentials([
(resources.private_key(), resources.certificate_chain())
])
channel_credentials = grpc.ssl_channel_credentials(
resources.test_root_certificates())
self._server_address, self._server = await start_test_server(
secure=True, server_credentials=server_credentials)
channel_options = ((
'grpc.ssl_target_name_override',
_SERVER_HOST_OVERRIDE,
),)
self._channel = aio.secure_channel(self._server_address,
channel_credentials, channel_options)
self._stub = test_pb2_grpc.TestServiceStub(self._channel)
async def tearDown(self):
await self._channel.close()
await self._server.stop(None)
class TestUnaryUnarySecureCall(_SecureCallMixin, AioTestBase):
"""unary_unary Calls made over a secure channel."""
async def test_call_ok_over_secure_channel(self):
call = self._stub.UnaryCall(messages_pb2.SimpleRequest())
response = await call
self.assertIsInstance(response, messages_pb2.SimpleResponse)
self.assertEqual(await call.code(), grpc.StatusCode.OK)
async def test_call_with_credentials(self):
call_credentials = grpc.composite_call_credentials(
grpc.access_token_call_credentials("abc"),
grpc.access_token_call_credentials("def"),
)
call = self._stub.UnaryCall(messages_pb2.SimpleRequest(),
credentials=call_credentials)
response = await call
self.assertIsInstance(response, messages_pb2.SimpleResponse)
class TestUnaryStreamSecureCall(_SecureCallMixin, AioTestBase):
"""unary_stream calls over a secure channel"""
async def test_unary_stream_async_generator_secure(self):
request = messages_pb2.StreamingOutputCallRequest()
request.response_parameters.extend(
messages_pb2.ResponseParameters(size=_RESPONSE_PAYLOAD_SIZE,)
for _ in range(_NUM_STREAM_RESPONSES))
call_credentials = grpc.composite_call_credentials(
grpc.access_token_call_credentials("abc"),
grpc.access_token_call_credentials("def"),
)
call = self._stub.StreamingOutputCall(request,
credentials=call_credentials)
async for response in call:
self.assertIsInstance(response,
messages_pb2.StreamingOutputCallResponse)
self.assertEqual(len(response.payload.body), _RESPONSE_PAYLOAD_SIZE)
self.assertEqual(await call.code(), grpc.StatusCode.OK)
# Prepares the request that stream in a ping-pong manner.
_STREAM_OUTPUT_REQUEST_ONE_RESPONSE = messages_pb2.StreamingOutputCallRequest()
_STREAM_OUTPUT_REQUEST_ONE_RESPONSE.response_parameters.append(
messages_pb2.ResponseParameters(size=_RESPONSE_PAYLOAD_SIZE))
class TestStreamStreamSecureCall(_SecureCallMixin, AioTestBase):
_STREAM_ITERATIONS = 2
async def test_async_generator_secure_channel(self):
async def request_generator():
for _ in range(self._STREAM_ITERATIONS):
yield _STREAM_OUTPUT_REQUEST_ONE_RESPONSE
call_credentials = grpc.composite_call_credentials(
grpc.access_token_call_credentials("abc"),
grpc.access_token_call_credentials("def"),
)
call = self._stub.FullDuplexCall(request_generator(),
credentials=call_credentials)
async for response in call:
self.assertEqual(_RESPONSE_PAYLOAD_SIZE, len(response.payload.body))
self.assertEqual(await call.code(), grpc.StatusCode.OK)
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
unittest.main(verbosity=2)
|
ejona86/grpc
|
src/python/grpcio_tests/tests_aio/unit/secure_call_test.py
|
Python
|
apache-2.0
| 5,070
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""nsx_router_mappings
Revision ID: 4ca36cfc898c
Revises: 3d3cb89d84ee
Create Date: 2014-01-08 10:41:43.373031
"""
# revision identifiers, used by Alembic.
revision = '4ca36cfc898c'
down_revision = '3d3cb89d84ee'
# Change to ['*'] if this migration applies to all plugins
migration_for_plugins = [
'neutron.plugins.nicira.NeutronPlugin.NvpPluginV2',
'neutron.plugins.nicira.NeutronServicePlugin.NvpAdvancedPlugin',
'neutron.plugins.vmware.plugin.NsxPlugin',
'neutron.plugins.vmware.plugin.NsxServicePlugin'
]
from alembic import op
import sqlalchemy as sa
from neutron.db import migration
def upgrade(active_plugins=None, options=None):
if not migration.should_run(active_plugins, migration_for_plugins):
return
# Create table for router/lrouter mappings
op.create_table(
'neutron_nsx_router_mappings',
sa.Column('neutron_id', sa.String(length=36), nullable=False),
sa.Column('nsx_id', sa.String(length=36), nullable=True),
sa.ForeignKeyConstraint(['neutron_id'], ['routers.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('neutron_id'),
)
# Execute statement to a record in nsx_router_mappings for
# each record in routers
op.execute("INSERT INTO neutron_nsx_router_mappings SELECT id,id "
"from routers")
def downgrade(active_plugins=None, options=None):
pass
|
shakamunyi/neutron-vrrp
|
neutron/db/migration/alembic_migrations/versions/4ca36cfc898c_nsx_router_mappings.py
|
Python
|
apache-2.0
| 1,994
|
# -*- coding: utf-8 -*-
########################################################################
# #
# python-OBD: A python OBD-II serial module derived from pyobd #
# #
# Copyright 2004 Donour Sizemore (donour@uchicago.edu) #
# Copyright 2009 Secons Ltd. (www.obdtester.com) #
# Copyright 2009 Peter J. Creath #
# Copyright 2015 Brendan Whitfield (bcw7044@rit.edu) #
# #
########################################################################
# #
# utils.py #
# #
# This file is part of python-OBD (a derivative of pyOBD) #
# #
# python-OBD is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 2 of the License, or #
# (at your option) any later version. #
# #
# python-OBD is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with python-OBD. If not, see <http://www.gnu.org/licenses/>. #
# #
########################################################################
import errno
import glob
import logging
import string
import sys
import serial
logger = logging.getLogger(__name__)
class OBDStatus:
""" Values for the connection status flags """
NOT_CONNECTED = "Not Connected"
ELM_CONNECTED = "ELM Connected"
OBD_CONNECTED = "OBD Connected"
CAR_CONNECTED = "Car Connected"
class BitArray:
"""
Class for representing bitarrays (inefficiently)
There's a nice C-optimized lib for this: https://github.com/ilanschnell/bitarray
but python-OBD doesn't use it enough to be worth adding the dependency.
But, if this class starts getting used too much, we should switch to that lib.
"""
def __init__(self, _bytearray):
self.bits = ""
for b in _bytearray:
v = bin(b)[2:]
self.bits += ("0" * (8 - len(v))) + v # pad it with zeros
def __getitem__(self, key):
if isinstance(key, int):
if key >= 0 and key < len(self.bits):
return self.bits[key] == "1"
else:
return False
elif isinstance(key, slice):
bits = self.bits[key]
if bits:
return [b == "1" for b in bits]
else:
return []
def num_set(self):
return self.bits.count("1")
def num_cleared(self):
return self.bits.count("0")
def value(self, start, stop):
bits = self.bits[start:stop]
if bits:
return int(bits, 2)
else:
return 0
def __len__(self):
return len(self.bits)
def __str__(self):
return self.bits
def __iter__(self):
return [b == "1" for b in self.bits].__iter__()
def bytes_to_int(bs):
""" converts a big-endian byte array into a single integer """
v = 0
p = 0
for b in reversed(bs):
v += b * (2 ** p)
p += 8
return v
def bytes_to_hex(bs):
h = ""
for b in bs:
bh = hex(b)[2:]
h += ("0" * (2 - len(bh))) + bh
return h
def twos_comp(val, num_bits):
"""compute the 2's compliment of int value val"""
if ((val & (1 << (num_bits - 1))) != 0):
val = val - (1 << num_bits)
return val
def isHex(_hex):
return all([c in string.hexdigits for c in _hex])
def contiguous(l, start, end):
""" checks that a list of integers are consequtive """
if not l:
return False
if l[0] != start:
return False
if l[-1] != end:
return False
# for consequtiveness, look at the integers in pairs
pairs = zip(l, l[1:])
if not all([p[0] + 1 == p[1] for p in pairs]):
return False
return True
def try_port(portStr):
"""returns boolean for port availability"""
try:
s = serial.Serial(portStr)
s.close() # explicit close 'cause of delayed GC in java
return True
except serial.SerialException:
pass
except OSError as e:
if e.errno != errno.ENOENT: # permit "no such file or directory" errors
raise e
return False
def scan_serial():
"""scan for available ports. return a list of serial names"""
available = []
possible_ports = []
if sys.platform.startswith('linux') or sys.platform.startswith('cygwin'):
possible_ports += glob.glob("/dev/rfcomm[0-9]*")
possible_ports += glob.glob("/dev/ttyUSB[0-9]*")
elif sys.platform.startswith('win'):
possible_ports += ["\\.\COM%d" % i for i in range(256)]
elif sys.platform.startswith('darwin'):
exclude = [
'/dev/tty.Bluetooth-Incoming-Port',
'/dev/tty.Bluetooth-Modem'
]
possible_ports += [port for port in glob.glob('/dev/tty.*') if port not in exclude]
# possible_ports += glob.glob('/dev/pts/[0-9]*') # for obdsim
for port in possible_ports:
if try_port(port):
available.append(port)
return available
|
brendan-w/python-OBD
|
obd/utils.py
|
Python
|
gpl-2.0
| 6,075
|
from matplotlib import use, cm
use('TkAgg')
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import axes3d
from sklearn import linear_model
from gradientDescent import gradientDescent
from computeCost import computeCost
from warmUpExercise import warmUpExercise
from plotData import plotData
from show import show
## Machine Learning Online Class - Exercise 1: Linear Regression
# Instructions
# ------------
#
# This file contains code that helps you get started on the
# linear exercise. You will need to complete the following modules
# in this exericse:
#
# warmUpExercise.py
# plotData.py
# gradientDescent.py
# computeCost.py
# gradientDescentMulti.py
# computeCostMulti.py
# featureNormalize.py
# normalEqn.py
#
# For this exercise, you will not need to change any code in this file,
# or any other files other than those mentioned above.
#
# x refers to the population size in 10,000s
# y refers to the profit in $10,000s
# ==================== Part 1: Basic Function ====================
# Complete warmUpExercise.py
print("Running warmUpExercise ...")
print('5x5 Identity Matrix:')
warmup = warmUpExercise()
print(warmup)
input("Program paused. Press Enter to continue...")
# ======================= Part 2: Plotting =======================
data = np.loadtxt('ex1data1.txt', delimiter=',')
m = data.shape[0]
X = np.vstack(zip(np.ones(m),data[:,0]))
y = data[:, 1]
# Plot Data
# Note: You have to complete the code in plotData.py
print('Plotting Data ...')
plt.figure(figsize=(10, 10))
plotData(data)
input("Program paused. Press Enter to continue...")
# =================== Part 3: Gradient descent ===================
print('Running Gradient Descent ...')
theta = np.zeros(2)
# compute and display initial cost
J = computeCost(X, y, theta)
print('cost: %0.4f ' % J)
# Some gradient descent settings
iterations = 5000
alpha = 0.01
# run gradient descent
theta, J_history = gradientDescent(X, y, theta, alpha, iterations)
# print theta to screen
print('Theta found by gradient descent: ')
print('%s %s \n' % (theta[0], theta[1]))
# Plot the linear fit
plt.clf()
plotData(data, show=False)
plt.plot(X[:, 1], X.dot(theta), '-', label='Linear regression')
plt.legend(loc='upper right', shadow=True, fontsize='x-large', numpoints=1)
plt.show()
input("Program paused. Press Enter to continue...")
# Predict values for population sizes of 35,000 and 70,000
predict1 = np.array([1, 3.5]).dot(theta)
predict2 = np.array([1, 7]).dot(theta)
print('For population = 35,000, we predict a profit of {:.4f}'.format(predict1*10000))
print('For population = 70,000, we predict a profit of {:.4f}'.format(predict2*10000))
# ============= Part 4: Visualizing J(theta_0, theta_1) =============
print('Visualizing J(theta_0, theta_1) ...')
# Grid over which we will calculate J
theta0_vals = np.linspace(-10, 10, X.shape[0])
theta1_vals = np.linspace(-1, 4, X.shape[0])
# initialize J_vals to a matrix of 0's
J_vals=np.array(np.zeros(X.shape[0]).T)
for i in range(theta0_vals.size):
col = []
for j in range(theta1_vals.size):
t = np.array([theta0_vals[i],theta1_vals[j]])
col.append(computeCost(X, y, t.T))
J_vals=np.column_stack((J_vals,col))
# Because of the way meshgrids work in the surf command, we need to
# transpose J_vals before calling surf, or else the axes will be flipped
J_vals = J_vals[:,1:].T
theta0_vals, theta1_vals = np.meshgrid(theta0_vals, theta1_vals)
# Surface plot
fig = plt.figure(figsize=(10, 10))
ax = fig.gca(projection='3d')
ax.plot_surface(theta0_vals, theta1_vals, J_vals, rstride=8, cstride=8, alpha=0.3,
cmap=cm.coolwarm, linewidth=0, antialiased=False)
ax.set_xlabel(r'$\theta_0$')
ax.set_ylabel(r'$\theta_1$')
ax.set_zlabel(r'J($\theta$)')
plt.show()
input("Program paused. Press Enter to continue...")
# Contour plot
plt.clf()
# Plot J_vals as 15 contours spaced logarithmically between 0.01 and 100
ax = plt.contour(theta0_vals, theta1_vals, J_vals, np.logspace(-2, 3, 20))
plt.clabel(ax, inline=1, fontsize=10)
plt.xlabel(r'$\theta_0$')
plt.ylabel(r'$\theta_1$')
plt.plot(0.0, 0.0, 'rx', linewidth=2, markersize=10)
plt.show()
input("Program paused. Press Enter to continue...")
# =============Use Scikit-learn =============
regr = linear_model.LinearRegression(fit_intercept=False, normalize=True)
regr.fit(X, y)
print('Theta found by scikit: ')
print('%s %s \n' % (regr.coef_[0], regr.coef_[1]))
predict1 = np.array([1, 3.5]).dot(regr.coef_)
predict2 = np.array([1, 7]).dot(regr.coef_)
print('For population = 35,000, we predict a profit of {:.4f}'.format(predict1*10000))
print('For population = 70,000, we predict a profit of {:.4f}'.format(predict2*10000))
plt.clf()
plotData(data, show=False)
plt.plot(X[:, 1], X.dot(regr.coef_), '-', color='black', label='Linear regression wit scikit')
plt.legend(loc='upper right', shadow=True, fontsize='x-large', numpoints=1)
plt.show()
input("Program paused. Press Enter to continue...")
|
robotenique/mlAlgorithms
|
supervised/gradDescent/ex1.py
|
Python
|
unlicense
| 5,002
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo.addons.sale.tests.test_sale_common import TestSale
class TestSaleExpense(TestSale):
def test_sale_expense(self):
""" Test the behaviour of sales orders when managing expenses """
# create a so with a product invoiced on delivery
prod = self.env.ref('product.product_product_1')
so = self.env['sale.order'].create({
'partner_id': self.partner.id,
'partner_invoice_id': self.partner.id,
'partner_shipping_id': self.partner.id,
'order_line': [(0, 0, {'name': prod.name, 'product_id': prod.id, 'product_uom_qty': 2, 'product_uom': prod.uom_id.id, 'price_unit': prod.list_price})],
'pricelist_id': self.env.ref('product.list0').id,
})
so._compute_tax_id()
so.action_confirm()
so._create_analytic_account() # normally created at so confirmation when you use the right products
init_price = so.amount_total
# create some expense and validate it (expense at cost)
prod_exp_1 = self.env.ref('hr_expense.air_ticket')
company = self.env.ref('base.main_company')
journal = self.env['account.journal'].create({'name': 'Purchase Journal - Test', 'code': 'HRTPJ', 'type': 'purchase', 'company_id': company.id})
account_payable = self.env['account.account'].create({'code': 'X1111', 'name': 'HR Expense - Test Payable Account', 'user_type_id': self.env.ref('account.data_account_type_payable').id, 'reconcile': True})
employee = self.env['hr.employee'].create({'name': 'Test employee', 'user_id': self.user.id, 'address_home_id': self.user.partner_id.id})
self.user.partner_id.property_account_payable_id = account_payable.id
# Submit to Manager
sheet = self.env['hr.expense.sheet'].create({
'name': 'Expense for John Smith',
'employee_id': employee.id,
'journal_id': journal.id,
})
exp = self.env['hr.expense'].create({
'name': 'Air Travel',
'product_id': prod_exp_1.id,
'analytic_account_id': so.project_id.id,
'unit_amount': 621.54,
'employee_id': employee.id,
'sheet_id': sheet.id
})
# Approve
sheet.approve_expense_sheets()
# Create Expense Entries
sheet.action_sheet_move_create()
# expense should now be in sales order
self.assertTrue(prod_exp_1 in map(lambda so: so.product_id, so.order_line), 'Sale Expense: expense product should be in so')
sol = so.order_line.filtered(lambda sol: sol.product_id.id == prod_exp_1.id)
self.assertEqual((sol.price_unit, sol.qty_delivered), (621.54, 1.0), 'Sale Expense: error when invoicing an expense at cost')
self.assertEqual(so.amount_total, init_price, 'Sale Expense: price of so not updated after adding expense')
# create some expense and validate it (expense at sales price)
init_price = so.amount_total
prod_exp_2 = self.env.ref('hr_expense.car_travel')
# Submit to Manager
sheet = self.env['hr.expense.sheet'].create({
'name': 'Expense for John Smith',
'employee_id': employee.id,
'journal_id': journal.id,
})
exp = self.env['hr.expense'].create({
'name': 'Car Travel',
'product_id': prod_exp_2.id,
'analytic_account_id': so.project_id.id,
'product_uom_id': self.env.ref('product.product_uom_km').id,
'unit_amount': 0.15,
'quantity': 100,
'employee_id': employee.id,
'sheet_id': sheet.id
})
# Approve
sheet.approve_expense_sheets()
# Create Expense Entries
sheet.action_sheet_move_create()
# expense should now be in sales order
self.assertTrue(prod_exp_2 in map(lambda so: so.product_id, so.order_line), 'Sale Expense: expense product should be in so')
sol = so.order_line.filtered(lambda sol: sol.product_id.id == prod_exp_2.id)
self.assertEqual((sol.price_unit, sol.qty_delivered), (prod_exp_2.list_price, 100.0), 'Sale Expense: error when invoicing an expense at cost')
self.assertEqual(so.amount_total, init_price, 'Sale Expense: price of so not updated after adding expense')
# self.assertTrue(so.invoice_status, 'no', 'Sale Expense: expenses should not impact the invoice_status of the so')
# both expenses should be invoiced
inv_id = so.action_invoice_create()
inv = self.env['account.invoice'].browse(inv_id)
self.assertEqual(inv.amount_untaxed, 621.54 + (prod_exp_2.list_price * 100.0), 'Sale Expense: invoicing of expense is wrong')
|
chienlieu2017/it_management
|
odoo/addons/sale_expense/tests/test_sale_expense.py
|
Python
|
gpl-3.0
| 4,816
|
import logging
from datetime import datetime
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.http import JsonResponse
from django.shortcuts import get_object_or_404, redirect, render
from django.utils.crypto import get_random_string
from django.views import View
from django.views.generic.detail import DetailView
from django.views.generic.edit import UpdateView
from docs_manager.mixins import DocsMixin
from git_manager.helpers.github_helper import GitHubHelper, get_github_helper
from git_manager.mixins.repo_file_list import (get_files_for_step,
get_files_for_steps,)
from pylint_manager.helper import return_results_for_file
from quality_manager.mixins import get_most_recent_measurement
from recommendations.utils import get_recommendations
from ..forms import ExperimentEditForm
from ..helper import get_readme_of_experiment, verify_and_get_experiment
from ..mixins import ExperimentContextMixin
from ..models import *
from ..tables import ExperimentTable
logger = logging.getLogger(__name__)
class ExperimentDetailView(DocsMixin, ExperimentPackageTypeMixin, DetailView):
"""Detail view of an experiment, showing the experiment steps and
for each experiment step the files in that folder"""
model = Experiment
template_name = "experiments_manager/experiment_detail/experiment_detail.html"
def dispatch(self, request, *args, **kwargs):
experiment = verify_and_get_experiment(self.request, self.kwargs['pk'])
active_step = experiment.get_active_step()
if not active_step and not experiment.completed:
return redirect(to=reverse('experimentsteps_choose', kwargs={'experiment_id': experiment.id}))
return super(ExperimentDetailView, self).dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
experiment = verify_and_get_experiment(self.request, self.kwargs['pk'])
self.object_type = self.get_requirement_type(experiment)
context = super(ExperimentDetailView, self).get_context_data(**kwargs)
context['steps'] = experiment.chosenexperimentsteps_set.all()
context['object_type'] = self.object_type
if not experiment.completed:
active_step = experiment.get_active_step()
context['active_step_id'] = active_step.id
context['final_step'] = experiment.get_active_step().step_nr == experiment.chosenexperimentsteps_set.count()
context['recommendations'] = get_recommendations(active_step)
context['index_active'] = True
logger.info('%s viewed index for %s', self.request.user, experiment)
return context
class FileListForStep(View):
"""Shows a list of files for a specific experiment step
Expects in request.GET the experiment_id and step_id
Checks if user requesting is owner of experiment"""
def get(self, request):
assert 'experiment_id' in request.GET
assert 'step_id' in request.GET
step_id = request.GET['step_id']
experiment_id = request.GET['experiment_id']
experiment = verify_and_get_experiment(request, experiment_id)
step = get_object_or_404(ChosenExperimentSteps, pk=step_id)
github_helper = get_github_helper(request, experiment)
file_list = get_files_for_step(step, experiment, github_helper)
return_dict = []
for content_file in file_list:
file_dict = {'file_name': content_file.name,
'file_url': reverse('file_detail', kwargs={'experiment_id': experiment.id})
+ '?file_name=' + content_file.path,
'type': content_file.type,
'file_path': content_file.path,
'slug': content_file.slug}
if hasattr(content_file, 'pylint_results'):
file_dict['static_code_analysis'] = content_file.pylint_results
return_dict.append(file_dict)
return JsonResponse({'files': return_dict})
class FileViewGitRepository(ExperimentContextMixin, View):
"""Shows the source code of a file
Attaches pylint results, if available, and incorporates them at the right line"""
def get(self, request, experiment_id):
context = super(FileViewGitRepository, self).get(request, experiment_id)
file_name = request.GET['file_name']
context['file_name'] = file_name
experiment = verify_and_get_experiment(request, experiment_id)
github_helper = GitHubHelper(request.user, experiment.git_repo.name)
content_file = github_helper.view_file(file_name)
pylint_results = return_results_for_file(experiment, file_name)
context['content_file'] = self.add_pylint_results_to_content(pylint_results, content_file)
return render(request, 'experiments_manager/file_detail.html', context)
def add_pylint_results_to_content(self, pylint_results, content_file):
counter = 0
new_content_file_str = ''
for line in content_file.split('\n'):
pylint_for_line = pylint_results.filter(line_nr=counter)
if pylint_for_line:
new_content_file_str += "{0}\n".format(line)
for pylint_line in pylint_for_line:
new_content_file_str += '<span class="nocode" id="{0}style">{1}</span>'.format(pylint_line.pylint_type,
pylint_line.message)
else:
new_content_file_str += line + '\n'
counter += 1
return new_content_file_str
@login_required
def index(request):
"""Experiment table showing all current and previous experiments of owner"""
owner = WorkbenchUser.objects.get(user=request.user)
experiments = Experiment.objects.filter(owner=owner).order_by('-created')
table = ExperimentTable(experiments)
return render(request, 'experiments_manager/experiments_table.html', {'table': table})
@login_required
def complete_step_and_go_to_next(request, experiment_id, create_package):
""""Completes an experiment step and moves on to the next.
If no next step is available, redirect to publication page
In request.GET, if create_package is 1, user is redirected to pacakge create page"""
experiment = verify_and_get_experiment(request, experiment_id)
active_step = ChosenExperimentSteps.objects.filter(experiment=experiment, active=True)
if active_step:
active_step = active_step[0]
active_step.active = False
active_step.completed = True
active_step.completed_at = datetime.now()
active_step.save()
completed_step_id = active_step.pk
next_step_nr = active_step.step_nr + 1
next_step = ChosenExperimentSteps.objects.filter(experiment=experiment, step_nr=next_step_nr)
if next_step.count() != 0: # if there is a next step
next_step = next_step[0] # set this step as active
next_step.active = True
next_step.save() # and save it
logger.info('%s completed the step %s and moved on to %s for experiment %s', request.user, active_step,
next_step, experiment)
if int(create_package) == 1: # if a package is to be created, redirect
return redirect(to=reverse('internalpackage_create_fromexperiment',
kwargs={'experiment_id': experiment_id, 'step_id': completed_step_id}))
if next_step: # if a next step exists, redirect back to experiment detail page
return redirect(to=reverse('experiment_detail', kwargs={'pk': experiment_id, 'slug': experiment.slug()}))
# if no next step or active step exists, redirect to publish page
return redirect(to=reverse('experiment_publish', kwargs={'pk': experiment_id, 'slug': experiment.slug()}))
@login_required
def experiment_publish(request, pk, slug):
"""Experiment publish view: sets experiment to completed, saves it and shows publish view"""
experiment = verify_and_get_experiment(request, pk)
experiment.completed = True
experiment.save()
return render(request, 'experiments_manager/experiment_publish.html', {'experiment': experiment})
@login_required
def experiment_generate_uuid_and_make_public(request, pk, slug):
"""Publishes an experiment by generating a unique ID of length 32,
setting the experiment public, creating a Git release and saving the GitHub release URL"""
experiment = verify_and_get_experiment(request, pk)
if not experiment.unique_id:
experiment.unique_id = get_random_string(32)
experiment.public = True
experiment.save()
# create github release
github_helper = GitHubHelper(experiment.owner, experiment.git_repo.name)
tag_name = 'release_{0}'.format(experiment.unique_id)
git_release = github_helper.create_release(tag_name=tag_name, name=experiment.title,
body='This is an automatically created release by the MOOC workbench',
pre_release=False)
experiment.publish_url_zip = git_release.html_url
# https://github.com/PyGithub/PyGithub/pull/522#pullrequestreview-22171287
# once merged and released, change this to zipball_url pls kthxby
experiment.save()
return redirect(to=reverse('experiment_readonly', kwargs={'unique_id': experiment.unique_id}))
class ExperimentReadOnlyView(DocsMixin, ExperimentPackageTypeMixin, DetailView):
"""Read-only view of an experiment, provides a complete overview of the experiment work done.
This view is excluded from required sign in."""
model = Experiment
template_name = "experiments_manager/experiment_detail/experiment_readonly.html"
def get_object(self, queryset=None):
return Experiment.objects.get(unique_id=self.kwargs['unique_id'])
def get_context_data(self, **kwargs):
experiment = self.object
self.object_type = self.get_requirement_type(experiment)
github_helper = GitHubHelper(experiment.owner, experiment.git_repo.name)
context = super(ExperimentReadOnlyView, self).get_context_data(**kwargs)
context['steps'] = get_files_for_steps(experiment, github_helper)
context['object_type'] = self.object_type
context['object_id'] = experiment.pk
context['completed'] = True
context['object_list'] = self.object.requirements.all()
context['data_schema_list'] = self.object.schema.fields.all()
context['readme'] = get_readme_of_experiment(self.object)
context['experiment_id'] = self.object.id
return context
class ExperimentEditView(UpdateView):
model = Experiment
form_class = ExperimentEditForm
template_name = 'experiments_manager/experiment_edit.html'
def get_form(self, form_class=None):
form_instance = super(ExperimentEditView, self).get_form(form_class)
form_instance.initial = {'title': self.object.title,
'description': self.object.description,
'github_url': self.object.git_repo.github_url}
return form_instance
def get_context_data(self, **kwargs):
context = super(ExperimentEditView, self).get_context_data(**kwargs)
context['settings_active'] = True
return context
def form_valid(self, form):
git_repo = self.object.git_repo
git_repo.github_url = form.cleaned_data['github_url']
git_repo.save()
messages.add_message(self.request, messages.SUCCESS, 'Changes saved successfully.')
return super(ExperimentEditView, self).form_valid(form)
def get_success_url(self):
return reverse('experiment_edit', kwargs={'pk': self.object.id})
@login_required
def experimentstep_scorecard(request, pk, slug):
"""Shows the score card for the relevant experiment/ experiment step
To do: clean up this code"""
experiment = verify_and_get_experiment(request, pk)
completed_step = experiment.get_active_step()
context = {}
context['completed_step'] = completed_step
context['experiment'] = experiment
testing_results = get_most_recent_measurement(completed_step, 'Testing')
context['testing'] = testing_results
docs_results = get_most_recent_measurement(completed_step, 'Documentation')
context['docs'] = docs_results
ci_results = get_most_recent_measurement(completed_step, 'Use of CI')
context['ci'] = ci_results
vcs_results = get_most_recent_measurement(completed_step, 'Version control use')
context['vcs'] = vcs_results
dependency_results = get_most_recent_measurement(completed_step, 'Dependencies defined')
context['dependency'] = dependency_results
pylint_results = get_most_recent_measurement(completed_step, 'Static code analysis')
context['pylint'] = pylint_results
context['final_step'] = experiment.chosenexperimentsteps_set.count() == experiment.get_active_step().step_nr
return render(request, 'experiments_manager/experiment_scorecard.html', context)
@login_required
def readme_of_experiment(request, experiment_id):
"""Retrieves read me from GitHub repository of experiment"""
experiment = verify_and_get_experiment(request, experiment_id)
content_file = get_readme_of_experiment(experiment)
return render(request, 'experiments_manager/experiment_detail/experiment_readme.html', {'readme': content_file,
'object': experiment,
'readme_active': True})
@login_required
def experiment_issues(request, experiment_id):
"""Retrieves the GitHub issues of experiment
To do: this view is currently not used."""
experiment = verify_and_get_experiment(request, experiment_id)
github_helper = GitHubHelper(request.user, experiment.git_repo.name)
context = {'object': experiment,
'object_type': experiment.get_object_type(),
'issues_active': True,
'issues': github_helper.get_issues()}
return render(request, 'experiments_manager/experiment_detail/experiment_issues.html', context)
@login_required
def experiment_single_issue(request, experiment_id, issue_nr):
"""View single GitHub issue from experiment
Todo: this view is currently not used"""
experiment = verify_and_get_experiment(request, experiment_id)
github_helper = GitHubHelper(request.user, experiment.git_repo.name)
issue = github_helper.get_single_issue(int(issue_nr))
return JsonResponse({'contents': issue.body, 'title': issue.title})
|
MOOCworkbench/MOOCworkbench
|
experiments_manager/views/views.py
|
Python
|
mit
| 14,934
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-08-26 13:33
from __future__ import unicode_literals
import django.contrib.postgres.fields.jsonb
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('annotations', '0007_auto_20170826_1446'),
]
def forward_func(apps, schema_editor):
Annotation = apps.get_model("annotations", "Annotation")
db_alias = schema_editor.connection.alias
# Set vector NULL where not_in_image
for annotation in Annotation.objects.using(db_alias).all():
if annotation.not_in_image:
annotation.vector = None
annotation.save()
def backward_func(apps, schema_editor):
raise NotImplementedError('not completely reversible in one transaction!')
Annotation = apps.get_model("annotations", "Annotation")
db_alias = schema_editor.connection.alias
# Set not_in_image and vector to a not-NULL value where vector is NULL
for annotation in Annotation.objects.using(db_alias).all():
if annotation.vector is None:
annotation.vector = ''
annotation.not_in_image = True
annotation.save()
operations = [
migrations.AlterField(
model_name='annotation',
name='vector',
field=django.contrib.postgres.fields.jsonb.JSONField(null=True),
),
migrations.RunPython(forward_func, backward_func, atomic=True),
migrations.RemoveField(
model_name='annotation',
name='not_in_image',
),
]
|
bit-bots/imagetagger
|
src/imagetagger/annotations/migrations/0008_auto_20170826_1533.py
|
Python
|
mit
| 1,648
|
# coding: utf-8
"""
MailMojo API
v1 of the MailMojo API # noqa: E501
OpenAPI spec version: 1.1.0
Contact: hjelp@mailmojo.no
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import io
import json
import logging
import re
import ssl
import certifi
# python 2 and python 3 compatibility library
import six
from six.moves.urllib.parse import urlencode
try:
import urllib3
except ImportError:
raise ImportError('Swagger python client requires urllib3.')
logger = logging.getLogger(__name__)
class RESTResponse(io.IOBase):
def __init__(self, resp):
self.urllib3_response = resp
self.status = resp.status
self.reason = resp.reason
self.data = resp.data
def getheaders(self):
"""Returns a dictionary of the response headers."""
return self.urllib3_response.getheaders()
def getheader(self, name, default=None):
"""Returns a given response header."""
return self.urllib3_response.getheader(name, default)
class RESTClientObject(object):
def __init__(self, configuration, pools_size=4, maxsize=None):
# urllib3.PoolManager will pass all kw parameters to connectionpool
# https://github.com/shazow/urllib3/blob/f9409436f83aeb79fbaf090181cd81b784f1b8ce/urllib3/poolmanager.py#L75 # noqa: E501
# https://github.com/shazow/urllib3/blob/f9409436f83aeb79fbaf090181cd81b784f1b8ce/urllib3/connectionpool.py#L680 # noqa: E501
# maxsize is the number of requests to host that are allowed in parallel # noqa: E501
# Custom SSL certificates and client certificates: http://urllib3.readthedocs.io/en/latest/advanced-usage.html # noqa: E501
# cert_reqs
if configuration.verify_ssl:
cert_reqs = ssl.CERT_REQUIRED
else:
cert_reqs = ssl.CERT_NONE
# ca_certs
if configuration.ssl_ca_cert:
ca_certs = configuration.ssl_ca_cert
else:
# if not set certificate file, use Mozilla's root certificates.
ca_certs = certifi.where()
addition_pool_args = {}
if configuration.assert_hostname is not None:
addition_pool_args['assert_hostname'] = configuration.assert_hostname # noqa: E501
if maxsize is None:
if configuration.connection_pool_maxsize is not None:
maxsize = configuration.connection_pool_maxsize
else:
maxsize = 4
# https pool manager
if configuration.proxy:
self.pool_manager = urllib3.ProxyManager(
num_pools=pools_size,
maxsize=maxsize,
cert_reqs=cert_reqs,
ca_certs=ca_certs,
cert_file=configuration.cert_file,
key_file=configuration.key_file,
proxy_url=configuration.proxy,
**addition_pool_args
)
else:
self.pool_manager = urllib3.PoolManager(
num_pools=pools_size,
maxsize=maxsize,
cert_reqs=cert_reqs,
ca_certs=ca_certs,
cert_file=configuration.cert_file,
key_file=configuration.key_file,
**addition_pool_args
)
def request(self, method, url, query_params=None, headers=None,
body=None, post_params=None, _preload_content=True,
_request_timeout=None):
"""Perform requests.
:param method: http request method
:param url: http request url
:param query_params: query parameters in the url
:param headers: http request headers
:param body: request json body, for `application/json`
:param post_params: request post parameters,
`application/x-www-form-urlencoded`
and `multipart/form-data`
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
"""
method = method.upper()
assert method in ['GET', 'HEAD', 'DELETE', 'POST', 'PUT',
'PATCH', 'OPTIONS']
if post_params and body:
raise ValueError(
"body parameter cannot be used with post_params parameter."
)
post_params = post_params or {}
headers = headers or {}
timeout = None
if _request_timeout:
if isinstance(_request_timeout, (int, ) if six.PY3 else (int, long)): # noqa: E501,F821
timeout = urllib3.Timeout(total=_request_timeout)
elif (isinstance(_request_timeout, tuple) and
len(_request_timeout) == 2):
timeout = urllib3.Timeout(
connect=_request_timeout[0], read=_request_timeout[1])
if 'Content-Type' not in headers:
headers['Content-Type'] = 'application/json'
try:
# For `POST`, `PUT`, `PATCH`, `OPTIONS`, `DELETE`
if method in ['POST', 'PUT', 'PATCH', 'OPTIONS', 'DELETE']:
if query_params:
url += '?' + urlencode(query_params)
if re.search('json', headers['Content-Type'], re.IGNORECASE):
request_body = '{}'
if body is not None:
request_body = json.dumps(body)
r = self.pool_manager.request(
method, url,
body=request_body,
preload_content=_preload_content,
timeout=timeout,
headers=headers)
elif headers['Content-Type'] == 'application/x-www-form-urlencoded': # noqa: E501
r = self.pool_manager.request(
method, url,
fields=post_params,
encode_multipart=False,
preload_content=_preload_content,
timeout=timeout,
headers=headers)
elif headers['Content-Type'] == 'multipart/form-data':
# must del headers['Content-Type'], or the correct
# Content-Type which generated by urllib3 will be
# overwritten.
del headers['Content-Type']
r = self.pool_manager.request(
method, url,
fields=post_params,
encode_multipart=True,
preload_content=_preload_content,
timeout=timeout,
headers=headers)
# Pass a `string` parameter directly in the body to support
# other content types than Json when `body` argument is
# provided in serialized form
elif isinstance(body, str):
request_body = body
r = self.pool_manager.request(
method, url,
body=request_body,
preload_content=_preload_content,
timeout=timeout,
headers=headers)
else:
# Cannot generate the request from given parameters
msg = """Cannot prepare a request message for provided
arguments. Please check that your arguments match
declared content type."""
raise ApiException(status=0, reason=msg)
# For `GET`, `HEAD`
else:
r = self.pool_manager.request(method, url,
fields=query_params,
preload_content=_preload_content,
timeout=timeout,
headers=headers)
except urllib3.exceptions.SSLError as e:
msg = "{0}\n{1}".format(type(e).__name__, str(e))
raise ApiException(status=0, reason=msg)
if _preload_content:
r = RESTResponse(r)
# In the python 3, the response.data is bytes.
# we need to decode it to string.
if six.PY3:
r.data = r.data.decode('utf8')
# log response body
logger.debug("response body: %s", r.data)
if not 200 <= r.status <= 299:
raise ApiException(http_resp=r)
return r
def GET(self, url, headers=None, query_params=None, _preload_content=True,
_request_timeout=None):
return self.request("GET", url,
headers=headers,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
query_params=query_params)
def HEAD(self, url, headers=None, query_params=None, _preload_content=True,
_request_timeout=None):
return self.request("HEAD", url,
headers=headers,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
query_params=query_params)
def OPTIONS(self, url, headers=None, query_params=None, post_params=None,
body=None, _preload_content=True, _request_timeout=None):
return self.request("OPTIONS", url,
headers=headers,
query_params=query_params,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
def DELETE(self, url, headers=None, query_params=None, body=None,
_preload_content=True, _request_timeout=None):
return self.request("DELETE", url,
headers=headers,
query_params=query_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
def POST(self, url, headers=None, query_params=None, post_params=None,
body=None, _preload_content=True, _request_timeout=None):
return self.request("POST", url,
headers=headers,
query_params=query_params,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
def PUT(self, url, headers=None, query_params=None, post_params=None,
body=None, _preload_content=True, _request_timeout=None):
return self.request("PUT", url,
headers=headers,
query_params=query_params,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
def PATCH(self, url, headers=None, query_params=None, post_params=None,
body=None, _preload_content=True, _request_timeout=None):
return self.request("PATCH", url,
headers=headers,
query_params=query_params,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
class ApiException(Exception):
def __init__(self, status=None, reason=None, http_resp=None):
if http_resp:
self.status = http_resp.status
self.reason = http_resp.reason
self.body = http_resp.data
self.headers = http_resp.getheaders()
else:
self.status = status
self.reason = reason
self.body = None
self.headers = None
def __str__(self):
"""Custom error messages for exception"""
error_message = "({0})\n"\
"Reason: {1}\n".format(self.status, self.reason)
if self.headers:
error_message += "HTTP response headers: {0}\n".format(
self.headers)
if self.body:
error_message += "HTTP response body: {0}\n".format(self.body)
return error_message
|
eliksir/mailmojo-python-sdk
|
mailmojo_sdk/rest.py
|
Python
|
apache-2.0
| 13,147
|
"""
Reimport of fabric.api.
Provides some useful wrappers of stuff.
"""
from fabric.api import *
from fabric import api as _api
from braid import info as _info
def sudo(*args, **kwargs):
"""
Only calls sudo if not already root.
FIXME: Should handle the case where the desired user isn't root.
"""
func = _api.run if _info.isRoot() else _api.sudo
func(*args, **kwargs)
|
alex/braid
|
braid/api.py
|
Python
|
mit
| 396
|
# io.py 21/08/2015 D.J.Whale
#
# Handle the input/output streams to a running program.
# This decides how to read and write data to and from a running program,
# or to and from a stored program file.
#
# Depending on the configuration, it can use decimal, binary or hexadecimal
# of any width in characters/bytes. You can also set a default base and width
# that will be used if not supplied.
import decimal
import binary
import hexadecimal
DECIMAL = 10
BINARY = 2
HEXADECIMAL = 16
thebase = DECIMAL
def configure(base):
global thebase
thebase = base
def read(base=None, width=None, file=None):
if base == None:
base = thebase
if base == DECIMAL:
return decimal.read(width=width, file=file)
elif base == BINARY:
return binary.read(width=width, file=file)
elif base == HEXADECIMAL:
return hexadecimal.read(width=width, file=file)
else:
raise ValueError("Unsupported base:" + str(base))
def write(number, base=None, width=None, file=None):
if base == None:
base = thebase
if base == DECIMAL:
decimal.write(number, width=width, file=file)
elif base == BINARY:
binary.write(number, width=width, file=file)
elif base == HEXADECIMAL:
hexadecimal.write(number, width=width, file=file)
else:
raise ValueError("Unsupported base:" + str(base))
#def writeln(number, base=None, width= None, file=file):
# write(number, base=base, width=width, file=file)
# if file == None:
# END
|
whaleygeek/MyLittleComputer
|
src/python/io.py
|
Python
|
mit
| 1,524
|
# -*- coding: utf-8 -*-
# Copyright (c) 2014 Neil Nutt <neilnutt@googlemail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Current package imports
import floodestimation,wx,wx.adv
import os
import FrontPage,CatchmentDescriptors,QMED
import config as c
#from floodestimation.loaders import load_catchment
from floodestimation import db
from floodestimation.collections import CatchmentCollections
from floodestimation.analysis import QmedAnalysis
from floodestimation.analysis import GrowthCurveAnalysis
from floodestimation.entities import Catchment
from project_file import save_project, load_project
class Analysis(object):
def __init__(self):
self.name = None
self.catchment = Catchment("River Town", "River Burn")
self.db_session = db.Session()
self.gauged_catchments = CatchmentCollections(self.db_session)
self.qmed = None
def finish(self):
self.db_session.close()
def run_qmed_analysis(self):
self.qmed_analysis = QmedAnalysis(self.catchment, self.gauged_catchments)
self.results = self.qmed_analysis.results_log
self.results['qmed_all_methods'] = self.qmed_analysis.qmed_all_methods()
def run_growthcurve(self):
results = {}
analysis = GrowthCurveAnalysis(self.catchment, self.gauged_catchments, results_log=results)
gc = analysis.growth_curve()
aeps = [0.5, 0.2, 0.1, 0.05, 0.03333, 0.02, 0.01333, 0.01, 0.005, 0.002, 0.001]
growth_factors = gc(aeps)
flows = growth_factors * self.qmed
results['aeps'] = aeps
results['growth_factors'] = growth_factors
results['flows'] = flows
self.results['gc'] = results
class MainFrame(wx.Frame):
def __init__(self,parent):
super(MainFrame, self).__init__(parent,title="Statistical Flood Estimation Tool",size=(600,600))
# --- initialize other settings
self.dirName = ""
self.fileName = ""
self.windowName = 'Main Window'
self.SetName(self.windowName)
c.analysis = Analysis()
self.InitUI()
self.Centre()
self.Show()
def InitUI(self):
self.panel = wx.Panel(self,-1)
menubar = wx.MenuBar()
# Defining the file menu
fileMenu = wx.Menu()
#mN = wx.MenuItem(fileMenu, wx.ID_NEW, '&New\tCtrl+N')
mO = wx.MenuItem(fileMenu, wx.ID_OPEN, '&Open\tCtrl+O')
mSA = wx.MenuItem(fileMenu, wx.ID_SAVEAS, '&Save as\tCtrl+ALT+S')
mS = wx.MenuItem(fileMenu, wx.ID_SAVE, '&Save\tCtrl+S')
#fileMenu.Append(mN)
fileMenu.Append(mO)
fileMenu.Append(mS)
fileMenu.Append(mSA)
fileMenu.AppendSeparator()
mQ = wx.MenuItem(fileMenu, wx.ID_EXIT, '&Quit\tCtrl+Q')
fileMenu.Append(mQ)
#self.Bind(wx.EVT_MENU, self.OnNew, mN)
self.Bind(wx.EVT_MENU, self.OnFileOpen, mO)
self.Bind(wx.EVT_MENU, self.OnFileSave, mS)
self.Bind(wx.EVT_MENU, self.OnFileSaveAs, mSA)
self.Bind(wx.EVT_MENU, self.OnQuit, mQ)
# Defining the help menu
helpMenu = wx.Menu()
mAbout = wx.MenuItem(helpMenu, wx.ID_ABOUT, '&About')
helpMenu.Append(mAbout)
self.Bind(wx.EVT_MENU, self.OnAbout, mAbout)
# Applying menus to the menu bar
menubar.Append(fileMenu, '&File')
menubar.Append(helpMenu,'&Help')
self.SetMenuBar(menubar)
# Here we create a notebook on the panel
nb = wx.Notebook(self.panel)
# create the page windows as children of the notebook
self.page1 = FrontPage.Fpanel(nb,self)
self.page2 = CatchmentDescriptors.Fpanel(nb,self.page1)
self.page3 = QMED.Fpanel(nb,self.page2)
#self.page4 = GrowthCurve.PoolingPanel(nb,self.page2,self.page3)
#self.page4 = GrowthCurve.MainPanel(nb,self.page2,self.page3)
#self.page5 = Summary.Fpanel(nb)
# add the pages to the notebook with the label to show on the tab
nb.AddPage(self.page1, "Overview")
nb.AddPage(self.page2, "CDS")
nb.AddPage(self.page3, "QMED")
#nb.AddPage(self.page4, "FGC")
#nb.AddPage(self.page5, "Summary")
# finally, put the notebook in a sizer for the panel to manage
# the layout
sizer = wx.BoxSizer()
sizer.Add(nb, 1, wx.EXPAND)
self.panel.SetSizer(sizer)
nb.Bind(wx.EVT_NOTEBOOK_PAGE_CHANGING, self.OnPageChanging)
nb.Bind(wx.EVT_NOTEBOOK_PAGE_CHANGED, self.OnPageChanged)
self.panel.Layout()
self.Layout()
self.Refresh()
def OnPageChanging(self,e):
#self.Refresh()
#self.Update()
e.Skip()
def OnPageChanged(self,e):
#self.page2.title_label.SetLabel(str(self.page1.title.GetValue()))
#self.Refresh()
#self.Update()
e.Skip()
def OnAbout(self, e):
description = """ The Statistical Flood Estimation Tool is a means of implementing current statistical
procedures for estimating the magnitude of flood flows in the United Kingdom using the methods
detailed in the Flood Estimation Handbook and subsequent updates. It has been developed by the not
for profit Open Hydrology (OH) community of software developers. The software makes extensive use
of the floodestimation library which is also developed by OH.
This is an early development version, it is intended that additional features will be implemented in
the coming months and years.
"""
licence = """The Statistical Flood Estimation Tool is free software; you can redistribute
it and/or modify it under the terms of the GNU General Public License as
published by the Free Software Foundation; either version 2 of the License,
or (at your option) any later version.
The Statistical Flood Estimation Tool is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details. You should have
received a copy of the GNU General Public License along with File Hunter;
if not, write to the Free Software Foundation, Inc., 59 Temple Place,
Suite 330, Boston, MA 02111-1307 USA
EXCEPTION CLAUSE:
A. Organisations (commercial, academic, educational, private individual or
any other body) must publicly state via this software project's website
that they have undertaken a validation process of this software prior to
its use. In submitting their public declaration, organisations should
provide details of the findings of their review including any caveats or
exclusions of use. Organisations must record errors or bugs they find within
the project's online issue tracking system within its GitHub repository.
This exclusion of use permits reasonable use of the software by organisations
for testing and validation.
Software project website:
https://github.com/OpenHydrology/StatisticalFloodEstimationTool/wiki
"""
info = wx.adv.AboutDialogInfo()
info.SetIcon(wx.Icon('..\\art\\OH.darkgrey.250x250.png', wx.BITMAP_TYPE_PNG))
info.SetName('Statistical Flood Estimation Tool')
info.SetVersion('Pre-release 0.0.2')
info.SetDescription(description)
info.SetCopyright('(C) 2015 Open Hydrology developer community')
info.SetWebSite('https://github.com/OpenHydrology/StatisticalFloodEstimationTool')
info.SetLicence(licence)
info.AddDeveloper('Neil Nutt - Project Founder - neilnutt[at]googlemail[dot]com')
info.AddDeveloper('\nFlorenz Hollebrandse - Developer - f.a.p.hollebrandse[at]protonmail[dot]ch')
info.AddDeveloper('\nMichael Spencer - Communications - spencer.mike.r[at]gmail[dot]com')
wx.adv.AboutBox(info)
def OnPreferences(self,e):
'''
Load up preferences screen
'''
import Preferences
Preferences.PreferencesFrame(self).Show()
self.Refresh()
self.Update()
def OnFileOpen(self, e):
""" File|Open event - Open dialog box. """
dlg = wx.FileDialog(self, "Open", self.dirName, self.fileName,
"Project directory (*.ini)|*.ini;*.hyd|Project archive (*.hyd)|*.ini;*.hyd", wx.FD_OPEN)
if (dlg.ShowModal() == wx.ID_OK):
self.fileName = dlg.GetFilename()
self.dirName = dlg.GetDirectory()
filePath=os.path.join(self.dirName,self.fileName)
load_project(filePath,self)
dlg.Destroy()
#---------------------------------------
def OnFileSave(self, e):
""" File|Save event - Just Save it if it's got a name. """
if (self.fileName != "") and (self.dirName != ""):
saveFile = os.path.join(self.dirName,self.fileName)
save_project(self,c.analysis.catchment,saveFile)
else:
### - If no name yet, then use the OnFileSaveAs to get name/directory
return self.OnFileSaveAs(e)
#---------------------------------------
def OnFileSaveAs(self, e):
""" File|SaveAs event - Prompt for File Name. """
ret = False
dlg = wx.FileDialog(self, "Save As", self.dirName, self.fileName,
"Project directory (*.ini)|*.ini;*.hyd|Project archive (*.hyd)|*.ini;*.hyd", wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT)
if (dlg.ShowModal() == wx.ID_OK):
self.fileName = dlg.GetFilename()
self.dirName = dlg.GetDirectory()
### - Use the OnFileSave to save the file
if self.OnFileSave(e):
self.SetTitle(self.fileName)
ret = True
dlg.Destroy()
return ret
def OnQuit(self, event):
dlg = wx.MessageDialog(self,
"Do you really want to close this application?",
"Confirm Exit", wx.OK|wx.CANCEL|wx.ICON_QUESTION)
result = dlg.ShowModal()
dlg.Destroy()
if result == wx.ID_OK:
self.Destroy()
if __name__ == "__main__":
app = wx.App(redirect=False)
#app = wx.App(redirect=True,filename='error_log.txt')
MainFrame(None).Show()
app.MainLoop()
|
OpenHydrology/StatisticalFloodEstimationTool
|
floodestimationgui/gui.py
|
Python
|
gpl-3.0
| 10,902
|
__author__ = 'Brandon'
from helper import greeting
greeting("This is back in the main branch")
|
bmblank/cs3240-labdemo
|
mainTest.py
|
Python
|
mit
| 96
|
#!/usr/bin/env python
import fileinput
plain = open('plaintext.txt','rb')
key = open('key.txt','rb')
cipher = open('ciphertext.txt','wb')
plain = bytearray(plain.read())
key = bytearray(key.read())
for i,b in enumerate(plain):
if (i + 1) % 16 == 0:
cipher.write('0x%0.2x\n' % (b ^ key[i]))
else:
cipher.write('0x%0.2x ' % (b ^ key[i]))
|
theonewolf/Spoon-Quest-2012
|
answers/challenge3/cipher.py
|
Python
|
mit
| 364
|
from tests.package.test_python import TestPythonPackageBase
class TestPythonPy2Can(TestPythonPackageBase):
__test__ = True
config = TestPythonPackageBase.config + \
"""
BR2_PACKAGE_PYTHON=y
BR2_PACKAGE_PYTHON_CAN=y
"""
sample_scripts = ["tests/package/sample_python_can.py"]
timeout = 40
class TestPythonPy3Can(TestPythonPackageBase):
__test__ = True
config = TestPythonPackageBase.config + \
"""
BR2_PACKAGE_PYTHON3=y
BR2_PACKAGE_PYTHON_CAN=y
"""
sample_scripts = ["tests/package/sample_python_can.py"]
timeout = 40
|
masahir0y/buildroot-yamada
|
support/testing/tests/package/test_python_can.py
|
Python
|
gpl-2.0
| 617
|
from apps.api.utils import SharedAPIRootRouter
from apps.slack import views
urlpatterns = []
router = SharedAPIRootRouter()
router.register('slack', views.InviteViewSet, base_name='slack')
|
dotKom/onlineweb4
|
apps/slack/urls.py
|
Python
|
mit
| 191
|
import astropy.io.fits
import numpy as np
import matplotlib.pyplot as plt
# Create an empty numpy array. 2D; spectra with 4 data elements.
filtered = np.zeros((2040,4))
combined_extracted_1d_spectra_ = astropy.io.fits.open("xtfbrsnN20160705S0025.fits")
exptime = float(combined_extracted_1d_spectra_[0].header['EXPTIME'])
wstart = combined_extracted_1d_spectra_[1].header['CRVAL1']
wdelt = combined_extracted_1d_spectra_[1].header['CD1_1']
for i in range(len(filtered)):
filtered[i][0] = wstart + (i*wdelt)
print "Wavelength array: \n", filtered
f = open("hk.txt")
lines = f.readlines()
f.close()
lines = [lines[i].strip().split() for i in range(len(lines))]
for i in range(len(lines)):
lines[i][0] = float(lines[i][0])*10**4
for i in range(len(filtered)):
mindif = min(lines, key=lambda x:abs(x[0]-filtered[i][0]))
filtered[i][1] = mindif[2]
calibspec = np.load("calibspec.npy")
"""
effspec = np.load("effspec.npy")
print "Effspec:\n", effspec
calibspec = np.zeros((2040))
for i in range(len(effspec)):
if effspec[i] != 0:
calibspec[i] = combined_extracted_1d_spectra_[1].data[i]/exptime/effspec[i]
else:
calibspec[i] = 0
"""
filter_weighted_flux = []
temp_percentages = []
for i in range(len(calibspec)):
filtered[i][2] = calibspec[i]
filtered[i][3] = filtered[i][1] * filtered[i][2] * 0.01
filter_weighted_flux.append(filtered[i][3])
temp_percentages.append(filtered[i][1]*0.01)
print "\nIntegral of filter_weighted_flux:"
print np.trapz(filter_weighted_flux)
print "\nIntegral of percentages:"
print np.trapz(temp_percentages)
print "Integral of filter_weighted_flux divided by integral of percentages:"
print np.trapz(filter_weighted_flux)/np.trapz(temp_percentages)
plt.figure(1)
plt.plot(calibspec)
plt.plot(filter_weighted_flux, "r--")
plt.figure(2)
plt.plot(temp_percentages)
plt.show()
|
mrlb05/Nifty
|
tests/generate_response_curve.py
|
Python
|
mit
| 1,827
|
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import tensorflow as tf
from nets import vgg
from sys import argv
from util import run_model
def main():
"""
You can also run these commands manually to generate the pb file
1. git clone https://github.com/tensorflow/models.git
2. export PYTHONPATH=Path_to_your_model_folder
3. python alexnet.py
"""
height, width = 224, 224
inputs = tf.Variable(tf.random_uniform((1, height, width, 3)), name='input')
net, end_points = vgg.vgg_16(inputs, is_training = False)
print("nodes in the graph")
for n in end_points:
print(n + " => " + str(end_points[n]))
net_outputs = map(lambda x: tf.get_default_graph().get_tensor_by_name(x), argv[2].split())
run_model(net_outputs, argv[1])
if __name__ == "__main__":
main()
|
122689305/BigDL
|
spark/dl/src/test/resources/tf/models/vgg16.py
|
Python
|
apache-2.0
| 1,359
|
"""
Transition
====================
"""
import spirit.spiritlib as spiritlib
import ctypes
### Load Library
_spirit = spiritlib.load_spirit_library()
_Homogeneous = _spirit.Transition_Homogeneous
_Homogeneous.argtypes = [ctypes.c_void_p, ctypes.c_int, ctypes.c_int, ctypes.c_int]
_Homogeneous.restype = None
def homogeneous(p_state, idx_1, idx_2, idx_chain=-1):
"""Generate homogeneous transition between two images of a chain."""
_Homogeneous(ctypes.c_void_p(p_state), ctypes.c_int(idx_1), ctypes.c_int(idx_2),
ctypes.c_int(idx_chain))
_Homogeneous_Interpolate = _spirit.Transition_Homogeneous_Insert_Interpolated
_Homogeneous_Interpolate.argtypes = [ctypes.c_void_p, ctypes.c_int, ctypes.c_int]
_Homogeneous_Interpolate.restype = None
def homogeneous_insert_interpolated(p_state, n_interpolate, idx_chain=-1):
"""Make chain denser by inserting n_interpolate images between all images."""
_Homogeneous_Interpolate(ctypes.c_void_p(p_state), ctypes.c_int(n_interpolate), ctypes.c_int(idx_chain))
_Add_Noise_Temperature = _spirit.Transition_Add_Noise_Temperature
_Add_Noise_Temperature.argtypes = [ctypes.c_void_p, ctypes.c_float, ctypes.c_int,
ctypes.c_int, ctypes.c_int]
_Add_Noise_Temperature.restype = None
def add_noise(p_state, temperature, idx_1, idx_2, idx_chain=-1):
"""Add some temperature-scaled noise to a transition between two images of a chain."""
_Add_Noise_Temperature(ctypes.c_void_p(p_state), ctypes.c_float(temperature),
ctypes.c_int(idx_1), ctypes.c_int(idx_2), ctypes.c_int(idx_chain))
|
spirit-code/spirit
|
core/python/spirit/transition.py
|
Python
|
mit
| 1,673
|
# coding:utf8
class Plugin(object):
__doc__ = '''Плагин предназначен для остановки бота.
Для использования необходимо иметь уровень доступа {protection} или выше
Ключевые слова: [{keywords}]
Использование: {keyword}
Пример: {keyword}'''
name = 'stop'
keywords = (u'стоп', name, '!')
protection = 3
argument_required = False
def respond(self, msg, rsp, utils, *args, **kwargs):
utils.stop_bot()
rsp.text = u'Завершаю работу. Удачного времени суток!'
return rsp
|
Fogapod/VKBot
|
bot/plugins/plugin_stop.py
|
Python
|
mit
| 684
|
""" Automation of IPA subid feature bugs
:requirement: IDM-IPA-REQ: ipa subid range
:casecomponent: sssd
:subsystemteam: sst_idm_sssd
:upstream: yes
"""
import pytest
import subprocess
import time
import os
from sssd.testlib.common.utils import SSHClient
test_password = "Secret123"
user = 'admin'
def execute_cmd(multihost, command):
""" Execute command on client """
cmd = multihost.client[0].run_command(command)
return cmd
def ipa_subid_find(multihost):
ssh1 = SSHClient(multihost.client[0].ip,
username=user, password=test_password)
(result, result1, exit_status) = ssh1.exec_command(f"ipa "
f"subid-find"
f" --owner "
f"{user}")
user_details = result1.readlines()
global uid_start, uid_range, gid_start, gid_range
uid_start = int(user_details[5].split(': ')[1].split('\n')[0])
uid_range = int(user_details[6].split(': ')[1].split('\n')[0])
gid_start = int(user_details[7].split(': ')[1].split('\n')[0])
gid_range = int(user_details[8].split(': ')[1].split('\n')[0])
ssh1.close()
@pytest.mark.usefixtures('environment_setup',
'subid_generate',
'bkp_cnfig_for_subid_files')
@pytest.mark.tier1
class TestSubid(object):
"""
This is for ipa bugs automation
"""
def test_podmanmap_feature(self, multihost):
"""
:Title: Podman supports subid ranges managed by FreeIPA
:id: 0e86df9c-50f1-11ec-82f3-845cf3eff344
:customerscenario: true
:bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1803943
:steps:
1. Test podman finds proper uid_map
2. Test podman finds proper gid_map
:expectedresults:
1. Should succeed
2. Should succeed
"""
ipa_subid_find(multihost)
ssh1 = SSHClient(multihost.client[0].ip,
username=user,
password=test_password)
map1 = "/proc/self/uid_map"
(results1, results2, results3) = ssh1.exec_command(f"podman "
f"unshare "
f"cat "
f"{map1}")
actual_result = results2.readlines()
assert str(uid_start) == actual_result[1].split()[1]
assert str(uid_range) == actual_result[1].split()[2]
map2 = "/proc/self/gid_map"
(results1, results2, results3) = ssh1.exec_command(f"podman "
f"unshare "
f"cat "
f"{map2}")
actual_result = results2.readlines()
assert str(gid_start) == actual_result[1].split()[1]
assert str(gid_range) == actual_result[1].split()[2]
ssh1.close()
def test_subid_feature(self, multihost):
"""
:Title: support subid ranges managed by FreeIPA
:id: 50bcdc28-00c8-11ec-bef4-845cf3eff344
:customerscenario: true
:bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1803943
:steps:
1. Test newuidmap command
2. Test newgidmap command
:expectedresults:
1. Should succeed
2. Should succeed
"""
ipa_subid_find(multihost)
ssh1 = SSHClient(multihost.client[0].ip,
username=user, password=test_password)
(results1, results2, results3) = ssh1.exec_command("unshare"
" -U bash"
" -c 'echo $$"
">/tmp/unshare.pid;"
"sleep 1000'")
time.sleep(2)
proces_id = int(execute_cmd(multihost,
"cat "
"/tmp/unshare.pid").stdout_text.strip())
uid = 0
gid = 1000
count = 1
(std_out, std_err, exit_status) = ssh1.exec_command(f"newuidmap "
f"{proces_id}"
f" {uid}"
f" {uid_start}"
f" {count}")
for i in exit_status.readlines():
assert "write to uid_map failed" not in i
(result, result1, exit_status) = ssh1.exec_command(f"newgidmap "
f"{proces_id} "
f"{gid} "
f"{gid_start} "
f"{count}")
for i in exit_status.readlines():
assert "write to gid_map failed" not in i
result = execute_cmd(multihost, f"cat /proc/{proces_id}/uid_map")
assert str(uid) == result.stdout_text.split()[0]
assert str(uid_start) == result.stdout_text.split()[1]
assert str(count) == result.stdout_text.split()[2]
result = execute_cmd(multihost, f"cat /proc/{proces_id}/gid_map")
assert str(gid) == result.stdout_text.split()[0]
assert str(gid_start) == result.stdout_text.split()[1]
assert str(count) == result.stdout_text.split()[2]
multihost.client[0].run_command(f'kill -9 {proces_id}')
multihost.client[0].run_command("rm -vf "
"/tmp/unshare.pid")
ssh1.close()
def test_list_subid_ranges(self, multihost):
"""
:Title: support subid ranges managed by FreeIPA
:id: 4ab33f84-00c8-11ec-ad91-845cf3eff344
:customerscenario: true
:bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1803943
:steps:
1. Test list_subid_ranges command
2. Test list_subid_ranges -g command
:expectedresults:
1. Should succeed
2. Should succeed
"""
ipa_subid_find(multihost)
ssh1 = SSHClient(multihost.client[0].ip,
username=user, password=test_password)
cmd = multihost.client[0].run_command(f"cd /tmp/; "
f"./list_subid_ranges "
f"{user}")
assert str(user) == cmd.stdout_text.split()[1]
assert str(uid_start) == cmd.stdout_text.split()[2]
assert str(uid_range) == cmd.stdout_text.split()[3]
cmd = multihost.client[0].run_command(f"cd /tmp/;"
f" ./list_subid_ranges"
f" -g {user}")
assert str(user) == cmd.stdout_text.split()[1]
assert str(gid_start) == cmd.stdout_text.split()[2]
assert str(gid_range) == cmd.stdout_text.split()[3]
ssh1.close()
|
SSSD/sssd
|
src/tests/multihost/ipa/test_subid_ranges.py
|
Python
|
gpl-3.0
| 7,326
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2017 Martine Lenders <m.lenders@fu-berlin.de>
#
# Distributed under terms of the MIT license.
from __future__ import print_function
import re
import sys
def generate_changelog(template_filename, changelog_filename, output_filename):
with open(template_filename) as template, \
open(changelog_filename) as changelog, \
open(output_filename, "w") as output:
changelog_lines = []
release_title = re.compile(r"((RIOT-\d{4}\.\d{2}(\.\d+)? - "
r"Release Notes)|(Release 2013\.08))")
notes_template = re.compile(r"\[Notes\]")
title = 0
prev_newline = False
# Traverse changelog file line-wise
for line in changelog:
# Remove duplicate newlines
if line == "\n" and not prev_newline:
prev_newline = True
elif line == "\n" and prev_newline:
continue
else:
prev_newline = False
if title: # if a release title was previously detected
changelog_lines.append("\n") # Remove the underline
title = False
prev_newline = True # this introduces a newline, so count it
elif release_title.match(line):
# if line contains a release title
release_match = re.search(r"(\d{4}\.\d{2}(\.\d+)?)", line)
assert(release_match is not None)
# parse out release number
release = release_match.group(1)
title = "Release %s" % release
tag = "release-%s" % release.replace('.', '-')
# append as level 1 title with reference anchor tag
changelog_lines.append("# %s {#%s}\n" % (title, tag))
title = True
else:
# append non-title log lines as verbatim (so notation is kept)
changelog_lines.append(" %s" % line)
for line in template:
# Traverse template file line-wise
if notes_template.match(line):
# if template string is matched: start adding changelog lines
for line_log in changelog_lines:
print(line_log, file=output, end="")
else:
# just print the template line
print(line, file=output, end="")
if __name__ == "__main__":
if len(sys.argv) < 4:
print("usage %s <md template> <changelog> <output md>" % sys.argv[0], file=sys.stderr)
sys.exit(1)
generate_changelog(sys.argv[1], sys.argv[2], sys.argv[3])
|
jia200x/RIOT
|
doc/doxygen/generate-changelog.py
|
Python
|
lgpl-2.1
| 2,736
|
# -*- coding: utf-8 -*-
from cms_sitemap import *
|
team-xue/xue
|
xue/cms/sitemaps/__init__.py
|
Python
|
bsd-3-clause
| 49
|
"""
# Licensed to the Apache Software Foundation (ASF) under one *
# or more contributor license agreements. See the NOTICE file *
# distributed with this work for additional information *
# regarding copyright ownership. The ASF licenses this file *
# to you under the Apache License, Version 2.0 (the *
# "License"); you may not use this file except in compliance *
# with the License. You may obtain a copy of the License at *
# *
# http://www.apache.org/licenses/LICENSE-2.0 *
# *
# Unless required by applicable law or agreed to in writing, *
# software distributed under the License is distributed on an *
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY *
# KIND, either express or implied. See the License for the *
# specific language governing permissions and limitations *
# under the License.
"""
from __future__ import absolute_import
from ..msg.Field import *
from ..msg.ImportExportHelper import *
from ..msg.StructValue import *
from ..msg.ValueFactory import *
from ..support.Class2TypeMap import *
from ..support.Validator_object import *
class SetSerializer(ImportExportHelper):
"""
Serializer for generic Set.
"""
FIELD_NAME = "keys"
@classmethod
def init(cls, typ, class2type):
field = typ.getField(cls.FIELD_NAME)
class2type.put(set, typ)
typ.setComponentType(set)
typ.setImportExportHelper(SetSerializer(typ, field))
typ.putValidator(field, Validator_object.get(1))
typ.lock()
def __init__(self, typ, field):
self.__type = typ
self.__field = field
def importValue(self, struct):
struct.checkType(self.__type)
return set(struct.get(self.__field))
def exportValue(self, vf, value):
struct = StructValue(self.__type, vf)
struct.put(self.__field, list(value))
return struct
|
OBIGOGIT/etch
|
binding-python/runtime/src/main/python/etch/binding/util/SetSerializer.py
|
Python
|
apache-2.0
| 2,079
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.