hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ed7122cf74b8267894bbe1566c04da46ac268882
| 906
|
py
|
Python
|
src/djanban/apps/work_hours_packages/migrations/0007_auto_20170529_1414.py
|
diegojromerolopez/djanban
|
6451688d49cf235d03c604b19a6a8480b33eed87
|
[
"MIT"
] | 33
|
2017-06-14T18:04:25.000Z
|
2021-06-15T07:07:56.000Z
|
src/djanban/apps/work_hours_packages/migrations/0007_auto_20170529_1414.py
|
diegojromerolopez/djanban
|
6451688d49cf235d03c604b19a6a8480b33eed87
|
[
"MIT"
] | 1
|
2017-05-10T08:45:55.000Z
|
2017-05-10T08:45:55.000Z
|
src/djanban/apps/work_hours_packages/migrations/0007_auto_20170529_1414.py
|
diegojromerolopez/djanban
|
6451688d49cf235d03c604b19a6a8480b33eed87
|
[
"MIT"
] | 8
|
2017-08-27T11:14:25.000Z
|
2021-03-03T12:11:16.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-05-29 12:14
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('work_hours_packages', '0006_auto_20170529_1406'),
]
operations = [
migrations.AlterField(
model_name='workhourspackage',
name='number_of_hours',
field=models.DecimalField(decimal_places=2, help_text='Number of hours of this package.', max_digits=10, verbose_name='Number of hours'),
),
migrations.AlterField(
model_name='workhourspackage',
name='offset_hours',
field=models.IntegerField(blank=True, default=0, help_text='This hours will be added as an initial offset of the spent time measurements gotten in the date interval', verbose_name='Offset hours'),
),
]
| 34.846154
| 208
| 0.668874
|
ade4627deecaf734eb4e3e379ca04534bc7c452c
| 2,557
|
py
|
Python
|
examples/information_extraction/parse_subtrees.py
|
cmgreivel/spaCy
|
a31506e06060c559abfeda043503935691af2e98
|
[
"MIT"
] | 88
|
2018-05-06T17:28:23.000Z
|
2022-03-06T20:19:16.000Z
|
examples/information_extraction/parse_subtrees.py
|
cmgreivel/spaCy
|
a31506e06060c559abfeda043503935691af2e98
|
[
"MIT"
] | 12
|
2018-07-19T15:11:57.000Z
|
2021-08-05T11:58:29.000Z
|
examples/information_extraction/parse_subtrees.py
|
cmgreivel/spaCy
|
a31506e06060c559abfeda043503935691af2e98
|
[
"MIT"
] | 10
|
2018-07-28T22:43:04.000Z
|
2020-11-22T22:58:21.000Z
|
#!/usr/bin/env python
# coding: utf8
"""This example shows how to navigate the parse tree including subtrees
attached to a word.
Based on issue #252:
"In the documents and tutorials the main thing I haven't found is
examples on how to break sentences down into small sub thoughts/chunks. The
noun_chunks is handy, but having examples on using the token.head to find small
(near-complete) sentence chunks would be neat. Lets take the example sentence:
"displaCy uses CSS and JavaScript to show you how computers understand language"
This sentence has two main parts (XCOMP & CCOMP) according to the breakdown:
[displaCy] uses CSS and Javascript [to + show]
show you how computers understand [language]
I'm assuming that we can use the token.head to build these groups."
Compatible with: spaCy v2.0.0+
"""
from __future__ import unicode_literals, print_function
import plac
import spacy
@plac.annotations(
model=("Model to load", "positional", None, str))
def main(model='en_core_web_sm'):
nlp = spacy.load(model)
print("Loaded model '%s'" % model)
doc = nlp("displaCy uses CSS and JavaScript to show you how computers "
"understand language")
# The easiest way is to find the head of the subtree you want, and then use
# the `.subtree`, `.children`, `.lefts` and `.rights` iterators. `.subtree`
# is the one that does what you're asking for most directly:
for word in doc:
if word.dep_ in ('xcomp', 'ccomp'):
print(''.join(w.text_with_ws for w in word.subtree))
# It'd probably be better for `word.subtree` to return a `Span` object
# instead of a generator over the tokens. If you want the `Span` you can
# get it via the `.right_edge` and `.left_edge` properties. The `Span`
# object is nice because you can easily get a vector, merge it, etc.
for word in doc:
if word.dep_ in ('xcomp', 'ccomp'):
subtree_span = doc[word.left_edge.i : word.right_edge.i + 1]
print(subtree_span.text, '|', subtree_span.root.text)
# You might also want to select a head, and then select a start and end
# position by walking along its children. You could then take the
# `.left_edge` and `.right_edge` of those tokens, and use it to calculate
# a span.
if __name__ == '__main__':
plac.call(main)
# Expected output:
# to show you how computers understand language
# how computers understand language
# to show you how computers understand language | show
# how computers understand language | understand
| 39.338462
| 80
| 0.702777
|
018c11f48fe73b1e03815454a32e9d7efee9430c
| 3,763
|
py
|
Python
|
travis_pypi_setup.py
|
lettherebe-test/live_demo
|
605392e521a51b6e97475ef03f297709bd7ea957
|
[
"BSD-3-Clause"
] | null | null | null |
travis_pypi_setup.py
|
lettherebe-test/live_demo
|
605392e521a51b6e97475ef03f297709bd7ea957
|
[
"BSD-3-Clause"
] | null | null | null |
travis_pypi_setup.py
|
lettherebe-test/live_demo
|
605392e521a51b6e97475ef03f297709bd7ea957
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Update encrypted deploy password in Travis config file
"""
from __future__ import print_function
import base64
import json
import os
from getpass import getpass
import yaml
from cryptography.hazmat.primitives.serialization import load_pem_public_key
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric.padding import PKCS1v15
try:
from urllib import urlopen
except:
from urllib.request import urlopen
GITHUB_REPO = 'lettherebe-test/live_demo'
TRAVIS_CONFIG_FILE = os.path.join(
os.path.dirname(os.path.abspath(__file__)), '.travis.yml')
def load_key(pubkey):
"""Load public RSA key, with work-around for keys using
incorrect header/footer format.
Read more about RSA encryption with cryptography:
https://cryptography.io/latest/hazmat/primitives/asymmetric/rsa/
"""
try:
return load_pem_public_key(pubkey.encode(), default_backend())
except ValueError:
# workaround for https://github.com/travis-ci/travis-api/issues/196
pubkey = pubkey.replace('BEGIN RSA', 'BEGIN').replace('END RSA', 'END')
return load_pem_public_key(pubkey.encode(), default_backend())
def encrypt(pubkey, password):
"""Encrypt password using given RSA public key and encode it with base64.
The encrypted password can only be decrypted by someone with the
private key (in this case, only Travis).
"""
key = load_key(pubkey)
encrypted_password = key.encrypt(password, PKCS1v15())
return base64.b64encode(encrypted_password)
def fetch_public_key(repo):
"""Download RSA public key Travis will use for this repo.
Travis API docs: http://docs.travis-ci.com/api/#repository-keys
"""
keyurl = 'https://api.travis-ci.org/repos/{0}/key'.format(repo)
data = json.loads(urlopen(keyurl).read().decode())
if 'key' not in data:
errmsg = "Could not find public key for repo: {}.\n".format(repo)
errmsg += "Have you already added your GitHub repo to Travis?"
raise ValueError(errmsg)
return data['key']
def prepend_line(filepath, line):
"""Rewrite a file adding a line to its beginning.
"""
with open(filepath) as f:
lines = f.readlines()
lines.insert(0, line)
with open(filepath, 'w') as f:
f.writelines(lines)
def load_yaml_config(filepath):
with open(filepath) as f:
return yaml.load(f)
def save_yaml_config(filepath, config):
with open(filepath, 'w') as f:
yaml.dump(config, f, default_flow_style=False)
def update_travis_deploy_password(encrypted_password):
"""Update the deploy section of the .travis.yml file
to use the given encrypted password.
"""
config = load_yaml_config(TRAVIS_CONFIG_FILE)
config['deploy']['password'] = dict(secure=encrypted_password)
save_yaml_config(TRAVIS_CONFIG_FILE, config)
line = ('# This file was autogenerated and will overwrite'
' each time you run travis_pypi_setup.py\n')
prepend_line(TRAVIS_CONFIG_FILE, line)
def main(args):
public_key = fetch_public_key(args.repo)
password = args.password or getpass('PyPI password: ')
update_travis_deploy_password(encrypt(public_key, password.encode()))
print("Wrote encrypted password to .travis.yml -- you're ready to deploy")
if '__main__' == __name__:
import argparse
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--repo', default=GITHUB_REPO,
help='GitHub repo (default: %s)' % GITHUB_REPO)
parser.add_argument('--password',
help='PyPI password (will prompt if not provided)')
args = parser.parse_args()
main(args)
| 30.593496
| 79
| 0.700771
|
64b38b01c675fbe80396bc5fc0e4c4f40a497be8
| 23
|
py
|
Python
|
auth0/__init__.py
|
GDGSNF/auth0-python
|
2714a3a5d6acac6c6589b01c8d94d5d5cd6deb0d
|
[
"MIT"
] | null | null | null |
auth0/__init__.py
|
GDGSNF/auth0-python
|
2714a3a5d6acac6c6589b01c8d94d5d5cd6deb0d
|
[
"MIT"
] | null | null | null |
auth0/__init__.py
|
GDGSNF/auth0-python
|
2714a3a5d6acac6c6589b01c8d94d5d5cd6deb0d
|
[
"MIT"
] | null | null | null |
__version__ = '3.21.0'
| 11.5
| 22
| 0.652174
|
67bed3d69752092d0b87f7ef3b3588a86aef3dfd
| 29,036
|
py
|
Python
|
tests/modeladmin/tests.py
|
beniwohli/django
|
514b2c989a948e3c59bda0da0c9427acf643cf5b
|
[
"PSF-2.0",
"BSD-3-Clause"
] | 12
|
2018-06-30T15:20:10.000Z
|
2020-10-20T02:15:00.000Z
|
tests/modeladmin/tests.py
|
beniwohli/django
|
514b2c989a948e3c59bda0da0c9427acf643cf5b
|
[
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null |
tests/modeladmin/tests.py
|
beniwohli/django
|
514b2c989a948e3c59bda0da0c9427acf643cf5b
|
[
"PSF-2.0",
"BSD-3-Clause"
] | 5
|
2018-07-17T05:41:04.000Z
|
2020-07-31T12:30:46.000Z
|
from datetime import date
from django import forms
from django.contrib.admin.models import ADDITION, CHANGE, DELETION, LogEntry
from django.contrib.admin.options import (
HORIZONTAL, VERTICAL, ModelAdmin, TabularInline,
get_content_type_for_model,
)
from django.contrib.admin.sites import AdminSite
from django.contrib.admin.widgets import (
AdminDateWidget, AdminRadioSelect, AutocompleteSelect,
AutocompleteSelectMultiple,
)
from django.contrib.auth.models import User
from django.db import models
from django.forms.widgets import Select
from django.test import SimpleTestCase, TestCase
from django.test.utils import isolate_apps
from .models import Band, Concert, Song
class MockRequest:
pass
class MockSuperUser:
def has_perm(self, perm):
return True
request = MockRequest()
request.user = MockSuperUser()
class ModelAdminTests(TestCase):
def setUp(self):
self.band = Band.objects.create(
name='The Doors',
bio='',
sign_date=date(1965, 1, 1),
)
self.site = AdminSite()
def test_modeladmin_str(self):
ma = ModelAdmin(Band, self.site)
self.assertEqual(str(ma), 'modeladmin.ModelAdmin')
# form/fields/fieldsets interaction ##############################
def test_default_fields(self):
ma = ModelAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields), ['name', 'bio', 'sign_date'])
self.assertEqual(list(ma.get_fields(request)), ['name', 'bio', 'sign_date'])
self.assertEqual(list(ma.get_fields(request, self.band)), ['name', 'bio', 'sign_date'])
self.assertIsNone(ma.get_exclude(request, self.band))
def test_default_fieldsets(self):
# fieldsets_add and fieldsets_change should return a special data structure that
# is used in the templates. They should generate the "right thing" whether we
# have specified a custom form, the fields argument, or nothing at all.
#
# Here's the default case. There are no custom form_add/form_change methods,
# no fields argument, and no fieldsets argument.
ma = ModelAdmin(Band, self.site)
self.assertEqual(ma.get_fieldsets(request), [(None, {'fields': ['name', 'bio', 'sign_date']})])
self.assertEqual(ma.get_fieldsets(request, self.band), [(None, {'fields': ['name', 'bio', 'sign_date']})])
def test_get_fieldsets(self):
# get_fieldsets() is called when figuring out form fields (#18681).
class BandAdmin(ModelAdmin):
def get_fieldsets(self, request, obj=None):
return [(None, {'fields': ['name', 'bio']})]
ma = BandAdmin(Band, self.site)
form = ma.get_form(None)
self.assertEqual(form._meta.fields, ['name', 'bio'])
class InlineBandAdmin(TabularInline):
model = Concert
fk_name = 'main_band'
can_delete = False
def get_fieldsets(self, request, obj=None):
return [(None, {'fields': ['day', 'transport']})]
ma = InlineBandAdmin(Band, self.site)
form = ma.get_formset(None).form
self.assertEqual(form._meta.fields, ['day', 'transport'])
def test_lookup_allowed_allows_nonexistent_lookup(self):
"""
A lookup_allowed allows a parameter whose field lookup doesn't exist.
(#21129).
"""
class BandAdmin(ModelAdmin):
fields = ['name']
ma = BandAdmin(Band, self.site)
self.assertTrue(ma.lookup_allowed('name__nonexistent', 'test_value'))
@isolate_apps('modeladmin')
def test_lookup_allowed_onetoone(self):
class Department(models.Model):
code = models.CharField(max_length=4, unique=True)
class Employee(models.Model):
department = models.ForeignKey(Department, models.CASCADE, to_field="code")
class EmployeeProfile(models.Model):
employee = models.OneToOneField(Employee, models.CASCADE)
class EmployeeInfo(models.Model):
employee = models.OneToOneField(Employee, models.CASCADE)
description = models.CharField(max_length=100)
class EmployeeProfileAdmin(ModelAdmin):
list_filter = [
'employee__employeeinfo__description',
'employee__department__code',
]
ma = EmployeeProfileAdmin(EmployeeProfile, self.site)
# Reverse OneToOneField
self.assertIs(ma.lookup_allowed('employee__employeeinfo__description', 'test_value'), True)
# OneToOneField and ForeignKey
self.assertIs(ma.lookup_allowed('employee__department__code', 'test_value'), True)
def test_field_arguments(self):
# If fields is specified, fieldsets_add and fieldsets_change should
# just stick the fields into a formsets structure and return it.
class BandAdmin(ModelAdmin):
fields = ['name']
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_fields(request)), ['name'])
self.assertEqual(list(ma.get_fields(request, self.band)), ['name'])
self.assertEqual(ma.get_fieldsets(request), [(None, {'fields': ['name']})])
self.assertEqual(ma.get_fieldsets(request, self.band), [(None, {'fields': ['name']})])
def test_field_arguments_restricted_on_form(self):
# If fields or fieldsets is specified, it should exclude fields on the
# Form class to the fields specified. This may cause errors to be
# raised in the db layer if required model fields aren't in fields/
# fieldsets, but that's preferable to ghost errors where a field in the
# Form class isn't being displayed because it's not in fields/fieldsets.
# Using `fields`.
class BandAdmin(ModelAdmin):
fields = ['name']
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields), ['name'])
self.assertEqual(list(ma.get_form(request, self.band).base_fields), ['name'])
# Using `fieldsets`.
class BandAdmin(ModelAdmin):
fieldsets = [(None, {'fields': ['name']})]
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields), ['name'])
self.assertEqual(list(ma.get_form(request, self.band).base_fields), ['name'])
# Using `exclude`.
class BandAdmin(ModelAdmin):
exclude = ['bio']
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields), ['name', 'sign_date'])
# You can also pass a tuple to `exclude`.
class BandAdmin(ModelAdmin):
exclude = ('bio',)
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields), ['name', 'sign_date'])
# Using `fields` and `exclude`.
class BandAdmin(ModelAdmin):
fields = ['name', 'bio']
exclude = ['bio']
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields), ['name'])
def test_custom_form_meta_exclude_with_readonly(self):
"""
The custom ModelForm's `Meta.exclude` is respected when used in
conjunction with `ModelAdmin.readonly_fields` and when no
`ModelAdmin.exclude` is defined (#14496).
"""
# With ModelAdmin
class AdminBandForm(forms.ModelForm):
class Meta:
model = Band
exclude = ['bio']
class BandAdmin(ModelAdmin):
readonly_fields = ['name']
form = AdminBandForm
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields), ['sign_date'])
# With InlineModelAdmin
class AdminConcertForm(forms.ModelForm):
class Meta:
model = Concert
exclude = ['day']
class ConcertInline(TabularInline):
readonly_fields = ['transport']
form = AdminConcertForm
fk_name = 'main_band'
model = Concert
class BandAdmin(ModelAdmin):
inlines = [ConcertInline]
ma = BandAdmin(Band, self.site)
self.assertEqual(
list(list(ma.get_formsets_with_inlines(request))[0][0]().forms[0].fields),
['main_band', 'opening_band', 'id', 'DELETE'])
def test_custom_formfield_override_readonly(self):
class AdminBandForm(forms.ModelForm):
name = forms.CharField()
class Meta:
exclude = ()
model = Band
class BandAdmin(ModelAdmin):
form = AdminBandForm
readonly_fields = ['name']
ma = BandAdmin(Band, self.site)
# `name` shouldn't appear in base_fields because it's part of
# readonly_fields.
self.assertEqual(
list(ma.get_form(request).base_fields),
['bio', 'sign_date']
)
# But it should appear in get_fields()/fieldsets() so it can be
# displayed as read-only.
self.assertEqual(
list(ma.get_fields(request)),
['bio', 'sign_date', 'name']
)
self.assertEqual(
list(ma.get_fieldsets(request)),
[(None, {'fields': ['bio', 'sign_date', 'name']})]
)
def test_custom_form_meta_exclude(self):
"""
The custom ModelForm's `Meta.exclude` is overridden if
`ModelAdmin.exclude` or `InlineModelAdmin.exclude` are defined (#14496).
"""
# With ModelAdmin
class AdminBandForm(forms.ModelForm):
class Meta:
model = Band
exclude = ['bio']
class BandAdmin(ModelAdmin):
exclude = ['name']
form = AdminBandForm
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields), ['bio', 'sign_date'])
# With InlineModelAdmin
class AdminConcertForm(forms.ModelForm):
class Meta:
model = Concert
exclude = ['day']
class ConcertInline(TabularInline):
exclude = ['transport']
form = AdminConcertForm
fk_name = 'main_band'
model = Concert
class BandAdmin(ModelAdmin):
inlines = [ConcertInline]
ma = BandAdmin(Band, self.site)
self.assertEqual(
list(list(ma.get_formsets_with_inlines(request))[0][0]().forms[0].fields),
['main_band', 'opening_band', 'day', 'id', 'DELETE']
)
def test_overriding_get_exclude(self):
class BandAdmin(ModelAdmin):
def get_exclude(self, request, obj=None):
return ['name']
self.assertEqual(
list(BandAdmin(Band, self.site).get_form(request).base_fields),
['bio', 'sign_date']
)
def test_get_exclude_overrides_exclude(self):
class BandAdmin(ModelAdmin):
exclude = ['bio']
def get_exclude(self, request, obj=None):
return ['name']
self.assertEqual(
list(BandAdmin(Band, self.site).get_form(request).base_fields),
['bio', 'sign_date']
)
def test_get_exclude_takes_obj(self):
class BandAdmin(ModelAdmin):
def get_exclude(self, request, obj=None):
if obj:
return ['sign_date']
return ['name']
self.assertEqual(
list(BandAdmin(Band, self.site).get_form(request, self.band).base_fields),
['name', 'bio']
)
def test_custom_form_validation(self):
# If a form is specified, it should use it allowing custom validation
# to work properly. This won't break any of the admin widgets or media.
class AdminBandForm(forms.ModelForm):
delete = forms.BooleanField()
class BandAdmin(ModelAdmin):
form = AdminBandForm
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields), ['name', 'bio', 'sign_date', 'delete'])
self.assertEqual(type(ma.get_form(request).base_fields['sign_date'].widget), AdminDateWidget)
def test_form_exclude_kwarg_override(self):
"""
The `exclude` kwarg passed to `ModelAdmin.get_form()` overrides all
other declarations (#8999).
"""
class AdminBandForm(forms.ModelForm):
class Meta:
model = Band
exclude = ['name']
class BandAdmin(ModelAdmin):
exclude = ['sign_date']
form = AdminBandForm
def get_form(self, request, obj=None, **kwargs):
kwargs['exclude'] = ['bio']
return super().get_form(request, obj, **kwargs)
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields), ['name', 'sign_date'])
def test_formset_exclude_kwarg_override(self):
"""
The `exclude` kwarg passed to `InlineModelAdmin.get_formset()`
overrides all other declarations (#8999).
"""
class AdminConcertForm(forms.ModelForm):
class Meta:
model = Concert
exclude = ['day']
class ConcertInline(TabularInline):
exclude = ['transport']
form = AdminConcertForm
fk_name = 'main_band'
model = Concert
def get_formset(self, request, obj=None, **kwargs):
kwargs['exclude'] = ['opening_band']
return super().get_formset(request, obj, **kwargs)
class BandAdmin(ModelAdmin):
inlines = [ConcertInline]
ma = BandAdmin(Band, self.site)
self.assertEqual(
list(list(ma.get_formsets_with_inlines(request))[0][0]().forms[0].fields),
['main_band', 'day', 'transport', 'id', 'DELETE']
)
def test_formset_overriding_get_exclude_with_form_fields(self):
class AdminConcertForm(forms.ModelForm):
class Meta:
model = Concert
fields = ['main_band', 'opening_band', 'day', 'transport']
class ConcertInline(TabularInline):
form = AdminConcertForm
fk_name = 'main_band'
model = Concert
def get_exclude(self, request, obj=None):
return ['opening_band']
class BandAdmin(ModelAdmin):
inlines = [ConcertInline]
ma = BandAdmin(Band, self.site)
self.assertEqual(
list(list(ma.get_formsets_with_inlines(request))[0][0]().forms[0].fields),
['main_band', 'day', 'transport', 'id', 'DELETE']
)
def test_formset_overriding_get_exclude_with_form_exclude(self):
class AdminConcertForm(forms.ModelForm):
class Meta:
model = Concert
exclude = ['day']
class ConcertInline(TabularInline):
form = AdminConcertForm
fk_name = 'main_band'
model = Concert
def get_exclude(self, request, obj=None):
return ['opening_band']
class BandAdmin(ModelAdmin):
inlines = [ConcertInline]
ma = BandAdmin(Band, self.site)
self.assertEqual(
list(list(ma.get_formsets_with_inlines(request))[0][0]().forms[0].fields),
['main_band', 'day', 'transport', 'id', 'DELETE']
)
def test_queryset_override(self):
# If the queryset of a ModelChoiceField in a custom form is overridden,
# RelatedFieldWidgetWrapper doesn't mess that up.
band2 = Band.objects.create(name='The Beatles', bio='', sign_date=date(1962, 1, 1))
ma = ModelAdmin(Concert, self.site)
form = ma.get_form(request)()
self.assertHTMLEqual(
str(form["main_band"]),
'<div class="related-widget-wrapper">'
'<select name="main_band" id="id_main_band" required>'
'<option value="" selected>---------</option>'
'<option value="%d">The Beatles</option>'
'<option value="%d">The Doors</option>'
'</select></div>' % (band2.id, self.band.id)
)
class AdminConcertForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields["main_band"].queryset = Band.objects.filter(name='The Doors')
class ConcertAdminWithForm(ModelAdmin):
form = AdminConcertForm
ma = ConcertAdminWithForm(Concert, self.site)
form = ma.get_form(request)()
self.assertHTMLEqual(
str(form["main_band"]),
'<div class="related-widget-wrapper">'
'<select name="main_band" id="id_main_band" required>'
'<option value="" selected>---------</option>'
'<option value="%d">The Doors</option>'
'</select></div>' % self.band.id
)
def test_regression_for_ticket_15820(self):
"""
`obj` is passed from `InlineModelAdmin.get_fieldsets()` to
`InlineModelAdmin.get_formset()`.
"""
class CustomConcertForm(forms.ModelForm):
class Meta:
model = Concert
fields = ['day']
class ConcertInline(TabularInline):
model = Concert
fk_name = 'main_band'
def get_formset(self, request, obj=None, **kwargs):
if obj:
kwargs['form'] = CustomConcertForm
return super().get_formset(request, obj, **kwargs)
class BandAdmin(ModelAdmin):
inlines = [ConcertInline]
Concert.objects.create(main_band=self.band, opening_band=self.band, day=1)
ma = BandAdmin(Band, self.site)
inline_instances = ma.get_inline_instances(request)
fieldsets = list(inline_instances[0].get_fieldsets(request))
self.assertEqual(fieldsets[0][1]['fields'], ['main_band', 'opening_band', 'day', 'transport'])
fieldsets = list(inline_instances[0].get_fieldsets(request, inline_instances[0].model))
self.assertEqual(fieldsets[0][1]['fields'], ['day'])
# radio_fields behavior ###########################################
def test_default_foreign_key_widget(self):
# First, without any radio_fields specified, the widgets for ForeignKey
# and fields with choices specified ought to be a basic Select widget.
# ForeignKey widgets in the admin are wrapped with RelatedFieldWidgetWrapper so
# they need to be handled properly when type checking. For Select fields, all of
# the choices lists have a first entry of dashes.
cma = ModelAdmin(Concert, self.site)
cmafa = cma.get_form(request)
self.assertEqual(type(cmafa.base_fields['main_band'].widget.widget), Select)
self.assertEqual(
list(cmafa.base_fields['main_band'].widget.choices),
[('', '---------'), (self.band.id, 'The Doors')])
self.assertEqual(type(cmafa.base_fields['opening_band'].widget.widget), Select)
self.assertEqual(
list(cmafa.base_fields['opening_band'].widget.choices),
[('', '---------'), (self.band.id, 'The Doors')]
)
self.assertEqual(type(cmafa.base_fields['day'].widget), Select)
self.assertEqual(
list(cmafa.base_fields['day'].widget.choices),
[('', '---------'), (1, 'Fri'), (2, 'Sat')]
)
self.assertEqual(type(cmafa.base_fields['transport'].widget), Select)
self.assertEqual(
list(cmafa.base_fields['transport'].widget.choices),
[('', '---------'), (1, 'Plane'), (2, 'Train'), (3, 'Bus')])
def test_foreign_key_as_radio_field(self):
# Now specify all the fields as radio_fields. Widgets should now be
# RadioSelect, and the choices list should have a first entry of 'None' if
# blank=True for the model field. Finally, the widget should have the
# 'radiolist' attr, and 'inline' as well if the field is specified HORIZONTAL.
class ConcertAdmin(ModelAdmin):
radio_fields = {
'main_band': HORIZONTAL,
'opening_band': VERTICAL,
'day': VERTICAL,
'transport': HORIZONTAL,
}
cma = ConcertAdmin(Concert, self.site)
cmafa = cma.get_form(request)
self.assertEqual(type(cmafa.base_fields['main_band'].widget.widget), AdminRadioSelect)
self.assertEqual(cmafa.base_fields['main_band'].widget.attrs, {'class': 'radiolist inline'})
self.assertEqual(
list(cmafa.base_fields['main_band'].widget.choices),
[(self.band.id, 'The Doors')]
)
self.assertEqual(type(cmafa.base_fields['opening_band'].widget.widget), AdminRadioSelect)
self.assertEqual(cmafa.base_fields['opening_band'].widget.attrs, {'class': 'radiolist'})
self.assertEqual(
list(cmafa.base_fields['opening_band'].widget.choices),
[('', 'None'), (self.band.id, 'The Doors')]
)
self.assertEqual(type(cmafa.base_fields['day'].widget), AdminRadioSelect)
self.assertEqual(cmafa.base_fields['day'].widget.attrs, {'class': 'radiolist'})
self.assertEqual(list(cmafa.base_fields['day'].widget.choices), [(1, 'Fri'), (2, 'Sat')])
self.assertEqual(type(cmafa.base_fields['transport'].widget), AdminRadioSelect)
self.assertEqual(cmafa.base_fields['transport'].widget.attrs, {'class': 'radiolist inline'})
self.assertEqual(
list(cmafa.base_fields['transport'].widget.choices),
[('', 'None'), (1, 'Plane'), (2, 'Train'), (3, 'Bus')]
)
class AdminConcertForm(forms.ModelForm):
class Meta:
model = Concert
exclude = ('transport',)
class ConcertAdmin(ModelAdmin):
form = AdminConcertForm
ma = ConcertAdmin(Concert, self.site)
self.assertEqual(list(ma.get_form(request).base_fields), ['main_band', 'opening_band', 'day'])
class AdminConcertForm(forms.ModelForm):
extra = forms.CharField()
class Meta:
model = Concert
fields = ['extra', 'transport']
class ConcertAdmin(ModelAdmin):
form = AdminConcertForm
ma = ConcertAdmin(Concert, self.site)
self.assertEqual(list(ma.get_form(request).base_fields), ['extra', 'transport'])
class ConcertInline(TabularInline):
form = AdminConcertForm
model = Concert
fk_name = 'main_band'
can_delete = True
class BandAdmin(ModelAdmin):
inlines = [ConcertInline]
ma = BandAdmin(Band, self.site)
self.assertEqual(
list(list(ma.get_formsets_with_inlines(request))[0][0]().forms[0].fields),
['extra', 'transport', 'id', 'DELETE', 'main_band']
)
def test_log_actions(self):
ma = ModelAdmin(Band, self.site)
mock_request = MockRequest()
mock_request.user = User.objects.create(username='bill')
content_type = get_content_type_for_model(self.band)
tests = (
(ma.log_addition, ADDITION, {'added': {}}),
(ma.log_change, CHANGE, {'changed': {'fields': ['name', 'bio']}}),
(ma.log_deletion, DELETION, str(self.band)),
)
for method, flag, message in tests:
with self.subTest(name=method.__name__):
created = method(mock_request, self.band, message)
fetched = LogEntry.objects.filter(action_flag=flag).latest('id')
self.assertEqual(created, fetched)
self.assertEqual(fetched.action_flag, flag)
self.assertEqual(fetched.content_type, content_type)
self.assertEqual(fetched.object_id, str(self.band.pk))
self.assertEqual(fetched.user, mock_request.user)
if flag == DELETION:
self.assertEqual(fetched.change_message, '')
self.assertEqual(fetched.object_repr, message)
else:
self.assertEqual(fetched.change_message, str(message))
self.assertEqual(fetched.object_repr, str(self.band))
def test_get_autocomplete_fields(self):
class NameAdmin(ModelAdmin):
search_fields = ['name']
class SongAdmin(ModelAdmin):
autocomplete_fields = ['featuring']
fields = ['featuring', 'band']
class OtherSongAdmin(SongAdmin):
def get_autocomplete_fields(self, request):
return ['band']
self.site.register(Band, NameAdmin)
try:
# Uses autocomplete_fields if not overridden.
model_admin = SongAdmin(Song, self.site)
form = model_admin.get_form(request)()
self.assertIsInstance(form.fields['featuring'].widget.widget, AutocompleteSelectMultiple)
# Uses overridden get_autocomplete_fields
model_admin = OtherSongAdmin(Song, self.site)
form = model_admin.get_form(request)()
self.assertIsInstance(form.fields['band'].widget.widget, AutocompleteSelect)
finally:
self.site.unregister(Band)
class ModelAdminPermissionTests(SimpleTestCase):
class MockUser:
def has_module_perms(self, app_label):
if app_label == "modeladmin":
return True
return False
class MockAddUser(MockUser):
def has_perm(self, perm):
if perm == "modeladmin.add_band":
return True
return False
class MockChangeUser(MockUser):
def has_perm(self, perm):
if perm == "modeladmin.change_band":
return True
return False
class MockDeleteUser(MockUser):
def has_perm(self, perm):
if perm == "modeladmin.delete_band":
return True
return False
def test_has_add_permission(self):
"""
has_add_permission returns True for users who can add objects and
False for users who can't.
"""
ma = ModelAdmin(Band, AdminSite())
request = MockRequest()
request.user = self.MockAddUser()
self.assertTrue(ma.has_add_permission(request))
request.user = self.MockChangeUser()
self.assertFalse(ma.has_add_permission(request))
request.user = self.MockDeleteUser()
self.assertFalse(ma.has_add_permission(request))
def test_has_change_permission(self):
"""
has_change_permission returns True for users who can edit objects and
False for users who can't.
"""
ma = ModelAdmin(Band, AdminSite())
request = MockRequest()
request.user = self.MockAddUser()
self.assertFalse(ma.has_change_permission(request))
request.user = self.MockChangeUser()
self.assertTrue(ma.has_change_permission(request))
request.user = self.MockDeleteUser()
self.assertFalse(ma.has_change_permission(request))
def test_has_delete_permission(self):
"""
has_delete_permission returns True for users who can delete objects and
False for users who can't.
"""
ma = ModelAdmin(Band, AdminSite())
request = MockRequest()
request.user = self.MockAddUser()
self.assertFalse(ma.has_delete_permission(request))
request.user = self.MockChangeUser()
self.assertFalse(ma.has_delete_permission(request))
request.user = self.MockDeleteUser()
self.assertTrue(ma.has_delete_permission(request))
def test_has_module_permission(self):
"""
as_module_permission returns True for users who have any permission
for the module and False for users who don't.
"""
ma = ModelAdmin(Band, AdminSite())
request = MockRequest()
request.user = self.MockAddUser()
self.assertTrue(ma.has_module_permission(request))
request.user = self.MockChangeUser()
self.assertTrue(ma.has_module_permission(request))
request.user = self.MockDeleteUser()
self.assertTrue(ma.has_module_permission(request))
original_app_label = ma.opts.app_label
ma.opts.app_label = 'anotherapp'
try:
request.user = self.MockAddUser()
self.assertFalse(ma.has_module_permission(request))
request.user = self.MockChangeUser()
self.assertFalse(ma.has_module_permission(request))
request.user = self.MockDeleteUser()
self.assertFalse(ma.has_module_permission(request))
finally:
ma.opts.app_label = original_app_label
| 38.055046
| 114
| 0.602666
|
dfb184c04843ec9fc24709c674330d9f57b1e3f7
| 4,241
|
py
|
Python
|
pybrain/datasets/supervised.py
|
zygmuntz/pybrain
|
049c7f0dc8896f149813e14e4d2c0ab0c272cde4
|
[
"BSD-3-Clause"
] | 2
|
2015-11-12T16:04:33.000Z
|
2016-01-25T21:48:44.000Z
|
pybrain/datasets/supervised.py
|
zygmuntz/pybrain
|
049c7f0dc8896f149813e14e4d2c0ab0c272cde4
|
[
"BSD-3-Clause"
] | null | null | null |
pybrain/datasets/supervised.py
|
zygmuntz/pybrain
|
049c7f0dc8896f149813e14e4d2c0ab0c272cde4
|
[
"BSD-3-Clause"
] | 7
|
2015-05-27T22:10:03.000Z
|
2019-09-20T13:25:05.000Z
|
from __future__ import print_function
__author__ = 'Thomas Rueckstiess, ruecksti@in.tum.de'
from random import sample
from scipy import isscalar
from pybrain.datasets.dataset import DataSet
from pybrain.utilities import fListToString
class SupervisedDataSet(DataSet):
"""SupervisedDataSets have two fields, one for input and one for the target.
"""
def __init__(self, inp, target):
"""Initialize an empty supervised dataset.
Pass `inp` and `target` to specify the dimensions of the input and
target vectors."""
DataSet.__init__(self)
if isscalar(inp):
# add input and target fields and link them
self.addField('input', inp)
self.addField('target', target)
else:
self.setField('input', inp)
self.setField('target', target)
self.linkFields(['input', 'target'])
# reset the index marker
self.index = 0
# the input and target dimensions
self.indim = self.getDimension('input')
self.outdim = self.getDimension('target')
def __reduce__(self):
_, _, state, _, _ = super(SupervisedDataSet, self).__reduce__()
creator = self.__class__
args = self.indim, self.outdim
return creator, args, state, iter([]), iter({})
def addSample(self, inp, target):
"""Add a new sample consisting of `input` and `target`."""
self.appendLinked(inp, target)
def getSample(self, index=None):
"""Return a sample at `index` or the current sample."""
return self.getLinked(index)
def setField(self, label, arr, **kwargs):
"""Set the given array `arr` as the new array of the field specfied by
`label`."""
DataSet.setField(self, label, arr, **kwargs)
# refresh dimensions, in case any of these fields were modified
if label == 'input':
self.indim = self.getDimension('input')
elif label == 'target':
self.outdim = self.getDimension('target')
def _provideSequences(self):
"""Return an iterator over sequence lists, although the dataset contains
only single samples."""
return iter([[x] for x in iter(self)])
def evaluateMSE(self, f, **args):
"""Evaluate the predictions of a function on the dataset and return the
Mean Squared Error, incorporating importance."""
ponderation = 0.
totalError = 0
for seq in self._provideSequences():
e, p = self._evaluateSequence(f, seq, **args)
totalError += e
ponderation += p
assert ponderation > 0
return totalError/ponderation
def _evaluateSequence(self, f, seq, verbose = False):
"""Return the ponderated MSE over one sequence."""
totalError = 0.
ponderation = 0.
for input, target in seq:
res = f(input)
e = 0.5 * sum((target-res).flatten()**2)
totalError += e
ponderation += len(target)
if verbose:
print(( 'out: ', fListToString( list( res ) )))
print(( 'correct:', fListToString( target )))
print(( 'error: % .8f' % e))
return totalError, ponderation
def evaluateModuleMSE(self, module, averageOver = 1, **args):
"""Evaluate the predictions of a module on a dataset and return the MSE
(potentially average over a number of epochs)."""
res = 0.
for dummy in range(averageOver):
module.reset()
res += self.evaluateMSE(module.activate, **args)
return res/averageOver
def splitWithProportion(self, proportion = 0.5):
"""Produce two new datasets, the first one containing the fraction given
by `proportion` of the samples."""
leftIndices = set(sample(list(range(len(self))), int(len(self)*proportion)))
leftDs = self.copy()
leftDs.clear()
rightDs = leftDs.copy()
index = 0
for sp in self:
if index in leftIndices:
leftDs.addSample(*sp)
else:
rightDs.addSample(*sp)
index += 1
return leftDs, rightDs
| 35.341667
| 84
| 0.594435
|
fdbc103278c41cf42171f452d500606d25e3c8f7
| 1,791
|
py
|
Python
|
var/spack/repos/builtin/packages/neko/package.py
|
jeanbez/spack
|
f4e51ce8f366c85bf5aa0eafe078677b42dae1ba
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
var/spack/repos/builtin/packages/neko/package.py
|
jeanbez/spack
|
f4e51ce8f366c85bf5aa0eafe078677b42dae1ba
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 8
|
2021-11-09T20:28:40.000Z
|
2022-03-15T03:26:33.000Z
|
var/spack/repos/builtin/packages/neko/package.py
|
jeanbez/spack
|
f4e51ce8f366c85bf5aa0eafe078677b42dae1ba
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2
|
2019-02-08T20:37:20.000Z
|
2019-03-31T15:19:26.000Z
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class Neko(AutotoolsPackage, CudaPackage, ROCmPackage):
"""Neko: A modern, portable, and scalable framework
for high-fidelity computational fluid dynamics
"""
homepage = "https://github.com/ExtremeFLOW/neko"
git = "https://github.com/ExtremeFLOW/neko.git"
url = "https://github.com/ExtremeFLOW/neko/releases/download/v0.3.2/neko-0.3.2.tar.gz"
maintainers = ['njansson']
version('0.3.2', sha256='0628910aa9838a414f2f27d09ea9474d1b3d7dcb5a7715556049a2fdf81a71ae')
version('0.3.0', sha256='e46bef72f694e59945514ab8b1ad7d74f87ec9dca2ba2b230e2148662baefdc8')
version('develop', branch='develop')
variant('parmetis', default=False, description='Build with support for parmetis')
variant('xsmm', default=False, description='Build with support for libxsmm')
depends_on('autoconf', type='build')
depends_on('automake', type='build')
depends_on('libtool', type='build')
depends_on('m4', type='build')
depends_on('pkgconfig', type='build')
depends_on('parmetis', when='+parmetis')
depends_on('libxsmm', when='+xsmm')
depends_on('mpi')
depends_on('blas')
depends_on('lapack')
def configure_args(self):
args = []
args += self.with_or_without('parmetis')
args += self.with_or_without('libxsmm', variant='xsmm')
args += self.with_or_without('cuda', activation_value='prefix')
rocm_fn = lambda x: spec['hip'].prefix
args += self.with_or_without('hip', variant='rocm', activation_value=rocm_fn)
return args
| 39.8
| 95
| 0.692351
|
cf068f0406fe41221f43239cabf77a0f144236e1
| 8,826
|
py
|
Python
|
tensor2tensor/data_generators/celeba.py
|
repoloper/tensor2tensor
|
2fd91d34b8e6d79599c0612e446175174e838b9d
|
[
"Apache-2.0"
] | 4
|
2019-04-20T23:28:41.000Z
|
2021-01-03T03:21:43.000Z
|
tensor2tensor/data_generators/celeba.py
|
repoloper/tensor2tensor
|
2fd91d34b8e6d79599c0612e446175174e838b9d
|
[
"Apache-2.0"
] | null | null | null |
tensor2tensor/data_generators/celeba.py
|
repoloper/tensor2tensor
|
2fd91d34b8e6d79599c0612e446175174e838b9d
|
[
"Apache-2.0"
] | 1
|
2019-01-29T18:44:17.000Z
|
2019-01-29T18:44:17.000Z
|
# coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""CelebA."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import zipfile
from tensor2tensor.data_generators import generator_utils
from tensor2tensor.data_generators import image_utils
from tensor2tensor.utils import registry
import tensorflow as tf
@registry.register_problem
class ImageCeleba(image_utils.ImageProblem):
"""CelebA dataset, aligned and cropped images."""
IMG_DATA = ("img_align_celeba.zip",
"https://drive.google.com/uc?export=download&"
"id=0B7EVK8r0v71pZjFTYXZWM3FlRnM")
LANDMARKS_DATA = ("celeba_landmarks_align",
"https://drive.google.com/uc?export=download&"
"id=0B7EVK8r0v71pd0FJY3Blby1HUTQ")
ATTR_DATA = ("celeba_attr", "https://drive.google.com/uc?export=download&"
"id=0B7EVK8r0v71pblRyaVFSWGxPY0U")
LANDMARK_HEADINGS = ("lefteye_x lefteye_y righteye_x righteye_y "
"nose_x nose_y leftmouth_x leftmouth_y rightmouth_x "
"rightmouth_y").split()
ATTR_HEADINGS = (
"5_o_Clock_Shadow Arched_Eyebrows Attractive Bags_Under_Eyes Bald Bangs "
"Big_Lips Big_Nose Black_Hair Blond_Hair Blurry Brown_Hair "
"Bushy_Eyebrows Chubby Double_Chin Eyeglasses Goatee Gray_Hair "
"Heavy_Makeup High_Cheekbones Male Mouth_Slightly_Open Mustache "
"Narrow_Eyes No_Beard Oval_Face Pale_Skin Pointy_Nose Receding_Hairline "
"Rosy_Cheeks Sideburns Smiling Straight_Hair Wavy_Hair Wearing_Earrings "
"Wearing_Hat Wearing_Lipstick Wearing_Necklace Wearing_Necktie Young"
).split()
def hparams(self, defaults, unused_model_hparams):
p = defaults
p.input_modality = {"inputs": ("image:identity", 256)}
p.target_modality = ("image:identity", 256)
p.batch_size_multiplier = 256
p.input_space_id = 1
p.target_space_id = 1
def generator(self, tmp_dir, how_many, start_from=0):
"""Image generator for CELEBA dataset.
Args:
tmp_dir: path to temporary storage directory.
how_many: how many images and labels to generate.
start_from: from which image to start.
Yields:
A dictionary representing the images with the following fields:
* image/encoded: the string encoding the image as JPEG,
* image/format: the string "jpeg" representing image format,
"""
out_paths = []
for fname, url in [self.IMG_DATA, self.LANDMARKS_DATA, self.ATTR_DATA]:
path = generator_utils.maybe_download_from_drive(tmp_dir, fname, url)
out_paths.append(path)
img_path, landmarks_path, attr_path = out_paths # pylint: disable=unbalanced-tuple-unpacking
unzipped_folder = img_path[:-4]
if not tf.gfile.Exists(unzipped_folder):
zipfile.ZipFile(img_path, "r").extractall(tmp_dir)
with tf.gfile.Open(landmarks_path) as f:
landmarks_raw = f.read()
with tf.gfile.Open(attr_path) as f:
attr_raw = f.read()
def process_landmarks(raw_data):
landmarks = {}
lines = raw_data.split("\n")
headings = lines[1].strip().split()
for line in lines[2:-1]:
values = line.strip().split()
img_name = values[0]
landmark_values = [int(v) for v in values[1:]]
landmarks[img_name] = landmark_values
return landmarks, headings
def process_attrs(raw_data):
attrs = {}
lines = raw_data.split("\n")
headings = lines[1].strip().split()
for line in lines[2:-1]:
values = line.strip().split()
img_name = values[0]
attr_values = [int(v) for v in values[1:]]
attrs[img_name] = attr_values
return attrs, headings
img_landmarks, _ = process_landmarks(landmarks_raw)
img_attrs, _ = process_attrs(attr_raw)
image_files = list(sorted(tf.gfile.Glob(unzipped_folder + "/*.jpg")))
for filename in image_files[start_from:start_from + how_many]:
img_name = os.path.basename(filename)
landmarks = img_landmarks[img_name]
attrs = img_attrs[img_name]
with tf.gfile.Open(filename, "rb") as f:
encoded_image_data = f.read()
yield {
"image/encoded": [encoded_image_data],
"image/format": ["jpeg"],
"attributes": attrs,
"landmarks": landmarks,
}
@property
def train_shards(self):
return 100
@property
def dev_shards(self):
return 10
@property
def test_shards(self):
return 10
def generate_data(self, data_dir, tmp_dir, task_id=-1):
train_gen = self.generator(tmp_dir, 162770)
train_paths = self.training_filepaths(
data_dir, self.train_shards, shuffled=False)
generator_utils.generate_files(train_gen, train_paths)
dev_gen = self.generator(tmp_dir, 19867, 162770)
dev_paths = self.dev_filepaths(data_dir, self.dev_shards, shuffled=False)
generator_utils.generate_files(dev_gen, dev_paths)
test_gen = self.generator(tmp_dir, 19962, 162770+19867)
test_paths = self.test_filepaths(data_dir, self.test_shards, shuffled=False)
generator_utils.generate_files(test_gen, test_paths)
generator_utils.shuffle_dataset(train_paths + dev_paths + test_paths)
@registry.register_problem
class ImageCelebaMultiResolution(ImageCeleba):
"""CelebA at multiple resolutions.
The resolutions are specified as a hyperparameter during preprocessing.
"""
def dataset_filename(self):
return "image_celeba"
def preprocess_example(self, example, mode, hparams):
image = example["inputs"]
# Get resize method. Include a default if not specified, or if it's not in
# TensorFlow's collection of pre-implemented resize methods.
resize_method = getattr(hparams, "resize_method", "BICUBIC")
resize_method = getattr(tf.image.ResizeMethod, resize_method, resize_method)
# Remove boundaries in CelebA images. Remove 40 pixels each side
# vertically and 20 pixels each side horizontally.
image = tf.image.crop_to_bounding_box(image, 40, 20, 218 - 80, 178 - 40)
highest_res = hparams.resolutions[-1]
if resize_method == "DILATED":
# Resize image so that dilated subsampling is properly divisible.
scaled_image = image_utils.resize_by_area(image, highest_res)
scaled_images = image_utils.make_multiscale_dilated(
scaled_image, hparams.resolutions, num_channels=self.num_channels)
else:
scaled_images = image_utils.make_multiscale(
image, hparams.resolutions,
resize_method=resize_method, num_channels=self.num_channels)
# Pack tuple of scaled images into one tensor. We do this by enforcing the
# columns to match for every resolution.
example["inputs"] = image
example["targets"] = tf.concat([
tf.reshape(scaled_image,
[res**2 // highest_res, highest_res, self.num_channels])
for scaled_image, res in zip(scaled_images, hparams.resolutions)],
axis=0)
return example
@registry.register_problem
class Img2imgCeleba(ImageCeleba):
"""8px to 32px problem."""
def dataset_filename(self):
return "image_celeba"
def preprocess_example(self, example, unused_mode, unused_hparams):
image = example["inputs"]
# Remove boundaries in CelebA images. Remove 40 pixels each side
# vertically and 20 pixels each side horizontally.
image = tf.image.crop_to_bounding_box(image, 40, 20, 218 - 80, 178 - 40)
image_8 = image_utils.resize_by_area(image, 8)
image_32 = image_utils.resize_by_area(image, 32)
example["inputs"] = image_8
example["targets"] = image_32
return example
@registry.register_problem
class Img2imgCeleba64(Img2imgCeleba):
"""8px to 64px problem."""
def preprocess_example(self, example, unused_mode, unused_hparams):
image = example["inputs"]
# Remove boundaries in CelebA images. Remove 40 pixels each side
# vertically and 20 pixels each side horizontally.
image = tf.image.crop_to_bounding_box(image, 40, 20, 218 - 80, 178 - 40)
image_8 = image_utils.resize_by_area(image, 8)
image_64 = image_utils.resize_by_area(image, 64)
example["inputs"] = image_8
example["targets"] = image_64
return example
| 36.92887
| 97
| 0.704736
|
784639434dd8d153d1ac72cfc6d9fcbbcca5eba4
| 544
|
py
|
Python
|
contento/processors.py
|
inmagik/contento
|
b7430e42aedd80809a57d599a5841c60fd16c21f
|
[
"MIT"
] | 1
|
2016-10-11T17:00:30.000Z
|
2016-10-11T17:00:30.000Z
|
contento/processors.py
|
inmagik/contento
|
b7430e42aedd80809a57d599a5841c60fd16c21f
|
[
"MIT"
] | 23
|
2016-08-05T17:07:31.000Z
|
2021-06-10T23:30:06.000Z
|
contento/processors.py
|
inmagik/contento
|
b7430e42aedd80809a57d599a5841c60fd16c21f
|
[
"MIT"
] | 1
|
2016-09-04T23:39:40.000Z
|
2016-09-04T23:39:40.000Z
|
import re
from django.core.urlresolvers import reverse
contento_link = "contento://(?P<url>[\w-]+)"
contento_link_re = re.compile(contento_link)
class InternalLinks(object):
def process(self, text):
def f(matchf):
match = matchf.groups()
url = match[0] or "_root"
try:
link = reverse('contento-cms', kwargs={"page_url":url})
return link
except:
return "#cms:notfound"
text = contento_link_re.sub(f, text)
return text
| 27.2
| 71
| 0.568015
|
682e695bd98be1372df4814b2220fcb45f8c800c
| 153
|
py
|
Python
|
Chapter 08/ch8_22.py
|
bpbpublications/TEST-YOUR-SKILLS-IN-PYTHON-LANGUAGE
|
f6a4194684515495d00aa38347a725dd08f39a0c
|
[
"MIT"
] | null | null | null |
Chapter 08/ch8_22.py
|
bpbpublications/TEST-YOUR-SKILLS-IN-PYTHON-LANGUAGE
|
f6a4194684515495d00aa38347a725dd08f39a0c
|
[
"MIT"
] | null | null | null |
Chapter 08/ch8_22.py
|
bpbpublications/TEST-YOUR-SKILLS-IN-PYTHON-LANGUAGE
|
f6a4194684515495d00aa38347a725dd08f39a0c
|
[
"MIT"
] | null | null | null |
cities = ['Mumbai', 'London', 'Paris', 'New York']
pos = cities.index('Bangkok')
print('The index of Bangkok is: ', pos)
# error Bangkok not in list
| 30.6
| 51
| 0.647059
|
b2e9f70468ad0034eecda9d18963e7bf3e3fa45a
| 4,932
|
py
|
Python
|
gym_malware/envs/malware_env.py
|
portalnir/malware-adversarial
|
296a42f9d11ebfc8271cde469916c90f98b80338
|
[
"MIT"
] | 1
|
2021-12-15T01:23:11.000Z
|
2021-12-15T01:23:11.000Z
|
gym_malware/envs/malware_env.py
|
portalnir/malware-adversarial
|
296a42f9d11ebfc8271cde469916c90f98b80338
|
[
"MIT"
] | 4
|
2020-11-13T18:37:24.000Z
|
2022-02-10T01:26:52.000Z
|
gym_malware/envs/malware_env.py
|
portalnir/malware-adversarial
|
296a42f9d11ebfc8271cde469916c90f98b80338
|
[
"MIT"
] | null | null | null |
import random
import gym
from collections import OrderedDict
from gym import error, spaces, utils
from gym.utils import seeding
import hashlib
import os
import numpy as np
from gym_malware.envs.utils import interface, pefeatures
from gym_malware.envs.controls import manipulate2 as manipulate
ACTION_LOOKUP = {i: act for i, act in enumerate(
manipulate.ACTION_TABLE.keys())}
# change this to function to the AV engine to attack
# function should be of the form
# def label_function( bytez ):
# # returns 0.0 if benign else 1.0 if malware
label_function = interface.get_label_local
class MalwareEnv(gym.Env):
metadata = {'render.modes': ['human']}
def __init__(self, sha256list, random_sample=True, maxturns=3, output_path='evaded/blackbox/', cache=False):
self.cache = cache
self.available_sha256 = sha256list
self.action_space = spaces.Discrete(len(ACTION_LOOKUP))
self.maxturns = maxturns
self.feature_extractor = pefeatures.PEFeatureExtractor()
self.random_sample = random_sample
self.sample_iteration_index = 0
self.output_path = os.path.join(
os.path.dirname(
os.path.dirname(
os.path.dirname(
os.path.abspath(__file__)))), output_path)
if not os.path.exists(output_path):
os.makedirs(output_path)
self.history = OrderedDict()
self.samples = {}
if self.cache:
for sha256 in self.available_sha256:
try:
self.samples[sha256] = interface.fetch_file(self.sha256)
except interface.FileRetrievalFailure:
print("failed fetching file")
continue # try a new sha256...this one can't be retrieved from storage
self.reset()
def step(self, action_index):
self.turns += 1
self._take_action(action_index) # update self.bytez
# get reward
try:
self.label = label_function(self.bytez)
except interface.ClassificationFailure:
print("Failed to classify file")
episode_over = True
else:
self.observation_space = self.feature_extractor.extract(self.bytez)
if self.label == 0:
# we win!
reward = 10.0 # !! a strong reward
episode_over = True
self.history[self.sha256]['evaded'] = True
# store sample to output directory
m = hashlib.sha256()
m.update( self.bytez )
sha256 = m.hexdigest()
self.history[self.sha256]['evaded_sha256'] = sha256
with open( os.path.join( self.output_path, sha256), 'wb') as outfile:
outfile.write( self.bytez )
elif self.turns >= self.maxturns:
# out of turns :(
reward = 0.0
episode_over = True
else:
reward = 0.0
episode_over = False
if episode_over:
print("episode is over: reward = {}!".format(reward))
return self.observation_space, reward, episode_over, {}
def _take_action(self, action_index):
assert action_index < len(ACTION_LOOKUP)
action = ACTION_LOOKUP[action_index]
print(action)
self.history[self.sha256]['actions'].append(action)
self.bytez = bytes( manipulate.modify_without_breaking(self.bytez, [action]))
def reset(self):
self.turns = 0
while True:
# get the new environment
if self.random_sample:
self.sha256 = random.choice(self.available_sha256)
else: # draw a sample at random
self.sha256 = self.available_sha256[ self.sample_iteration_index % len(self.available_sha256) ]
self.sample_iteration_index += 1
self.history[self.sha256] = {'actions': [], 'evaded': False}
if self.cache:
self.bytez = self.samples[self.sha256]
else:
try:
self.bytez = interface.fetch_file(self.sha256)
except interface.FileRetrievalFailure:
print("failed fetching file")
continue # try a new sha256...this one can't be retrieved from storage
original_label = label_function(self.bytez)
if original_label == 0:
# skip this one, it's already benign, and the agent will learn nothing
continue
print("new sha256: {}".format(self.sha256))
self.observation_space = self.feature_extractor.extract(self.bytez)
break # we're done here
return np.asarray(self.observation_space)
def render(self, mode='human', close=False):
pass
| 36.264706
| 112
| 0.587388
|
f642133ce9997be66942479c81a5dccf93f5a1ca
| 2,993
|
py
|
Python
|
my_mourse/api_views.py
|
DarishkaAMS/Django_Projects-Mourse_catalog
|
d11606bc75b6887a260236a6546a4ea3ce050088
|
[
"MIT"
] | null | null | null |
my_mourse/api_views.py
|
DarishkaAMS/Django_Projects-Mourse_catalog
|
d11606bc75b6887a260236a6546a4ea3ce050088
|
[
"MIT"
] | null | null | null |
my_mourse/api_views.py
|
DarishkaAMS/Django_Projects-Mourse_catalog
|
d11606bc75b6887a260236a6546a4ea3ce050088
|
[
"MIT"
] | null | null | null |
import json
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.contrib.admin.views.decorators import staff_member_required
from django.core.exceptions import ObjectDoesNotExist
from django.http import JsonResponse
from django.shortcuts import redirect, render, get_object_or_404
from django.template.loader import get_template
from django.views.decorators.csrf import csrf_exempt
from rest_framework import status
from rest_framework.response import Response
from rest_framework.decorators import api_view, permission_classes
from .forms import MourseForm, MourseModelForm
from .models import Mourse
from .serializers import MourseSerializer
@api_view(["GET", ])
@csrf_exempt
# @permission_classes([IsAuthenticated])
def mourse_list_view(request):
try:
mourse = Mourse.objects.all()
except Mourse.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == 'GET':
serializer = MourseSerializer(mourse, many=True)
return Response(serializer.data)
@api_view(["POST", ])
@csrf_exempt
def mourse_create_view(request):
mourse = Mourse()
if request.method == "POST":
serializer = MourseSerializer(mourse, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@api_view(["GET", ])
@csrf_exempt
def mourse_detail_view(request, slug):
try:
mourse = Mourse.objects.get(slug=slug)
except Mourse.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == 'GET':
serializer = MourseSerializer(mourse)
return Response(serializer.data)
@api_view(["PUT", ])
@staff_member_required
def mourse_update_view(request, slug):
try:
mourse = Mourse.objects.get(slug=slug)
except Mourse.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == 'PUT':
serializer = MourseSerializer(mourse, data=request.data)
data = {}
if serializer.is_valid():
serializer.save()
data['success'] = 'Your Mouse has been changed successfully!'
return Response(data=data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@api_view(["DELETE", ])
@staff_member_required
def mourse_delete_view(request, slug):
try:
mourse = Mourse.objects.get(slug=slug)
except Mourse.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == 'DELETE':
operation = mourse.delete()
data = {}
if operation:
data['success'] = 'Your Mouse has been deleted!'
return redirect('/my_mourse')
else:
data['failure'] = 'Your Mouse has NOT been deleted!'
return Response(data=data)
| 31.505263
| 78
| 0.710324
|
2d51ca302a761ab48ca0a98c199bcbe2dc9dd45b
| 2,166
|
py
|
Python
|
trade_remedies_api/documents/migrations/0005_auto_20190502_0912.py
|
uktrade/trade-remedies-api
|
fbe2d142ef099c7244788a0f72dd1003eaa7edce
|
[
"MIT"
] | 1
|
2020-08-13T10:37:15.000Z
|
2020-08-13T10:37:15.000Z
|
trade_remedies_api/documents/migrations/0005_auto_20190502_0912.py
|
uktrade/trade-remedies-api
|
fbe2d142ef099c7244788a0f72dd1003eaa7edce
|
[
"MIT"
] | 4
|
2020-09-10T13:41:52.000Z
|
2020-12-16T09:00:21.000Z
|
trade_remedies_api/documents/migrations/0005_auto_20190502_0912.py
|
uktrade/trade-remedies-api
|
fbe2d142ef099c7244788a0f72dd1003eaa7edce
|
[
"MIT"
] | null | null | null |
import logging
from django.db import migrations
from documents.models import DocumentBundle
logger = logging.getLogger(__name__)
def migrate_case_documents(apps, schema_editor):
try:
from cases.models import CaseDocument, SubmissionType
from cases.constants import SUBMISSION_TYPE_REGISTER_INTEREST
case_documents = CaseDocument.objects.all()
reg_interest = SubmissionType.objects.get(id=SUBMISSION_TYPE_REGISTER_INTEREST)
case_index = {}
logger.info("Indexing old case documents")
for case_doc in case_documents:
case_id = str(case_doc.case.id)
case_index.setdefault(
case_id,
{
"case": case_doc.case,
"created_by": case_doc.created_by,
"created_at": case_doc.created_at,
"documents": [],
},
)
case_index[case_id]["documents"].append(case_doc.document)
logger.info(f"Indexed {len(case_index)} cases")
for _, params in case_index.items():
bundle, created = DocumentBundle.objects.get_or_create(
case=params["case"], submission_type=reg_interest
)
bundle.status = "LIVE"
bundle.created_by = params["created_by"]
bundle.created_at = params["created_at"]
bundle.finalised_by = params["created_by"]
bundle.finalised_at = params["created_at"]
bundle.save()
logger.info(f"Bundle created for {bundle.case} (created ?= {created})")
bundle.documents.clear()
logger.info(f"Adding {len(params['documents'])} documents to bundle")
for doc in params["documents"]:
bundle.documents.add(doc)
except Exception as exc:
logger.error(
f"Failed. Are we in test? If so that's ok... (reason in exception)", exc_info=True
)
class Migration(migrations.Migration):
dependencies = [
("documents", "0004_auto_20190424_1409"),
]
operations = [
# migrations.RunPython(migrate_case_documents)
]
| 35.508197
| 94
| 0.604801
|
416603bac453c4c7e73891d53bdfe21ffbc54d80
| 1,466
|
py
|
Python
|
_Tracking/MLStartPoint.py
|
horribleheffalump/AUVResearch
|
f9bf37da4a74662fcd1df88a8177a654fccf0728
|
[
"MIT"
] | 4
|
2020-06-23T07:24:40.000Z
|
2021-05-24T04:57:52.000Z
|
_Tracking/MLStartPoint.py
|
horribleheffalump/AUVResearch
|
f9bf37da4a74662fcd1df88a8177a654fccf0728
|
[
"MIT"
] | null | null | null |
_Tracking/MLStartPoint.py
|
horribleheffalump/AUVResearch
|
f9bf37da4a74662fcd1df88a8177a654fccf0728
|
[
"MIT"
] | 3
|
2020-09-23T07:14:26.000Z
|
2021-07-10T12:50:28.000Z
|
from StaticEstimates.ConditionalMean import ConditionalMean
from StaticEstimates.PriorLearn import PriorLearn
from _Tracking.TrackingModel import *
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import MultiTaskLassoCV
from sklearn.pipeline import Pipeline
from joblib import dump, load
# from sklearn.model_selection import train_test_split
Mtrain = 10000 # number of sample paths for CMNF parameters estimation (train set)
ml_train_part = 2000/(Mtrain)
pipe_file_name = "Z:/Наука - Data/2019 - Sensors - Tracking/data/pipe.joblib"
do_save = True
do_load = False
def extract_features_and_variables(States, Observations):
X = Observations[:, :, :2 * Xb.shape[0]].reshape(Observations.shape[0] * Observations.shape[1], -1)
Y = States[:, :, :3].reshape(States.shape[0] * States.shape[1], -1)
return X, Y
states, observations = generate_sample_paths(Mtrain, 0)
x_train, y_train = extract_features_and_variables(states, observations)
if do_load:
# load previous
pipe_lasso = load(pipe_file_name)
else:
pipe_lasso = Pipeline(steps=[
('polynomial', PolynomialFeatures(degree=2, interaction_only=False)),
('lasso', MultiTaskLassoCV(eps=0.001, n_alphas=20, normalize=True, cv=5, n_jobs=-1, max_iter=10000))
])
estimator_ml = PriorLearn(pipe_lasso, train_size=ml_train_part, already_fit=do_load)
estimator_ml.fit(x_train, y_train)
if do_save:
dump(pipe_lasso, pipe_file_name)
| 31.869565
| 108
| 0.761255
|
e9da23eef781444e4a638897d5533137cb613ee4
| 17,634
|
py
|
Python
|
conans/client/remote_manager.py
|
brianembry/conan
|
562e5b080cda1f2644fecdd40ae30faef1cf6640
|
[
"MIT"
] | null | null | null |
conans/client/remote_manager.py
|
brianembry/conan
|
562e5b080cda1f2644fecdd40ae30faef1cf6640
|
[
"MIT"
] | null | null | null |
conans/client/remote_manager.py
|
brianembry/conan
|
562e5b080cda1f2644fecdd40ae30faef1cf6640
|
[
"MIT"
] | null | null | null |
import os
import stat
import tarfile
import traceback
import shutil
import time
from requests.exceptions import ConnectionError
from conans.errors import ConanException, ConanConnectionError, NotFoundException
from conans.model.manifest import gather_files
from conans.paths import PACKAGE_TGZ_NAME, CONANINFO, CONAN_MANIFEST, CONANFILE, EXPORT_TGZ_NAME, \
rm_conandir, EXPORT_SOURCES_TGZ_NAME, EXPORT_SOURCES_DIR_OLD
from conans.util.files import gzopen_without_timestamps, is_dirty,\
make_read_only, set_dirty, clean_dirty
from conans.util.files import tar_extract, rmdir, exception_message_safe, mkdir
from conans.util.files import touch_folder
from conans.util.log import logger
# FIXME: Eventually, when all output is done, tracer functions should be moved to the recorder class
from conans.util.tracer import (log_package_upload, log_recipe_upload,
log_recipe_sources_download,
log_uncompressed_file, log_compressed_files, log_recipe_download,
log_package_download)
from conans.client.source import merge_directories
from conans.util.env_reader import get_env
from conans.search.search import filter_packages
class RemoteManager(object):
""" Will handle the remotes to get recipes, packages etc """
def __init__(self, client_cache, auth_manager, output):
self._client_cache = client_cache
self._output = output
self._auth_manager = auth_manager
def upload_recipe(self, conan_reference, remote, retry, retry_wait, ignore_deleted_file,
skip_upload=False, no_overwrite=None):
"""Will upload the conans to the first remote"""
t1 = time.time()
export_folder = self._client_cache.export(conan_reference)
for f in (EXPORT_TGZ_NAME, EXPORT_SOURCES_TGZ_NAME):
tgz_path = os.path.join(export_folder, f)
if is_dirty(tgz_path):
self._output.warn("%s: Removing %s, marked as dirty" % (str(conan_reference), f))
os.remove(tgz_path)
clean_dirty(tgz_path)
files, symlinks = gather_files(export_folder)
if CONANFILE not in files or CONAN_MANIFEST not in files:
raise ConanException("Cannot upload corrupted recipe '%s'" % str(conan_reference))
export_src_folder = self._client_cache.export_sources(conan_reference, short_paths=None)
src_files, src_symlinks = gather_files(export_src_folder)
the_files = _compress_recipe_files(files, symlinks, src_files, src_symlinks, export_folder,
self._output)
if skip_upload:
return None
ret, new_ref = self._call_remote(remote, "upload_recipe", conan_reference, the_files, retry, retry_wait,
ignore_deleted_file, no_overwrite)
duration = time.time() - t1
log_recipe_upload(new_ref, duration, the_files, remote)
if ret:
msg = "Uploaded conan recipe '%s' to '%s'" % (str(new_ref), remote.name)
url = remote.url.replace("https://api.bintray.com/conan", "https://bintray.com")
msg += ": %s" % url
else:
msg = "Recipe is up to date, upload skipped"
self._output.info(msg)
return new_ref
def _package_integrity_check(self, package_reference, files, package_folder):
# If package has been modified remove tgz to regenerate it
self._output.rewrite_line("Checking package integrity...")
read_manifest, expected_manifest = self._client_cache.package_manifests(package_reference)
if read_manifest != expected_manifest:
self._output.writeln("")
diff = read_manifest.difference(expected_manifest)
for fname, (h1, h2) in diff.items():
self._output.warn("Mismatched checksum '%s' (manifest: %s, file: %s)"
% (fname, h1, h2))
if PACKAGE_TGZ_NAME in files:
try:
tgz_path = os.path.join(package_folder, PACKAGE_TGZ_NAME)
os.unlink(tgz_path)
except Exception:
pass
error_msg = os.linesep.join("Mismatched checksum '%s' (manifest: %s, file: %s)"
% (fname, h1, h2) for fname, (h1, h2) in diff.items())
logger.error("Manifests doesn't match!\n%s" % error_msg)
raise ConanException("Cannot upload corrupted package '%s'" % str(package_reference))
else:
self._output.rewrite_line("Package integrity OK!")
self._output.writeln("")
def upload_package(self, package_reference, remote, retry, retry_wait, skip_upload=False,
integrity_check=False, no_overwrite=None):
"""Will upload the package to the first remote"""
t1 = time.time()
# existing package, will use short paths if defined
package_folder = self._client_cache.package(package_reference, short_paths=None)
if is_dirty(package_folder):
raise ConanException("Package %s is corrupted, aborting upload.\n"
"Remove it with 'conan remove %s -p=%s'" % (package_reference,
package_reference.conan,
package_reference.package_id))
tgz_path = os.path.join(package_folder, PACKAGE_TGZ_NAME)
if is_dirty(tgz_path):
self._output.warn("%s: Removing %s, marked as dirty" % (str(package_reference), PACKAGE_TGZ_NAME))
os.remove(tgz_path)
clean_dirty(tgz_path)
# Get all the files in that directory
files, symlinks = gather_files(package_folder)
if CONANINFO not in files or CONAN_MANIFEST not in files:
logger.error("Missing info or manifest in uploading files: %s" % (str(files)))
raise ConanException("Cannot upload corrupted package '%s'" % str(package_reference))
logger.debug("====> Time remote_manager build_files_set : %f" % (time.time() - t1))
if integrity_check:
self._package_integrity_check(package_reference, files, package_folder)
logger.debug("====> Time remote_manager check package integrity : %f"
% (time.time() - t1))
the_files = compress_package_files(files, symlinks, package_folder, self._output)
if skip_upload:
return None
tmp = self._call_remote(remote, "upload_package", package_reference, the_files,
retry, retry_wait, no_overwrite)
duration = time.time() - t1
log_package_upload(package_reference, duration, the_files, remote)
logger.debug("====> Time remote_manager upload_package: %f" % duration)
if not tmp:
self._output.rewrite_line("Package is up to date, upload skipped")
self._output.writeln("")
return tmp
def get_conan_manifest(self, conan_reference, remote):
"""
Read ConanDigest from remotes
Will iterate the remotes to find the conans unless remote was specified
returns (ConanDigest, remote_name)"""
return self._call_remote(remote, "get_conan_manifest", conan_reference)
def get_package_manifest(self, package_reference, remote):
"""
Read ConanDigest from remotes
Will iterate the remotes to find the conans unless remote was specified
returns (ConanDigest, remote_name)"""
return self._call_remote(remote, "get_package_manifest", package_reference)
def get_package_info(self, package_reference, remote):
"""
Read a package ConanInfo from remotes
Will iterate the remotes to find the conans unless remote was specified
returns (ConanInfo, remote_name)"""
return self._call_remote(remote, "get_package_info", package_reference)
def get_recipe(self, conan_reference, remote):
"""
Read the conans from remotes
Will iterate the remotes to find the conans unless remote was specified
returns (dict relative_filepath:abs_path , remote_name)"""
dest_folder = self._client_cache.export(conan_reference)
rmdir(dest_folder)
t1 = time.time()
zipped_files, conan_reference = self._call_remote(remote, "get_recipe", conan_reference,
dest_folder)
duration = time.time() - t1
log_recipe_download(conan_reference, duration, remote, zipped_files)
unzip_and_get_files(zipped_files, dest_folder, EXPORT_TGZ_NAME)
# Make sure that the source dir is deleted
rm_conandir(self._client_cache.source(conan_reference))
touch_folder(dest_folder)
return conan_reference
def get_recipe_sources(self, conan_reference, export_folder, export_sources_folder, remote):
t1 = time.time()
zipped_files = self._call_remote(remote, "get_recipe_sources", conan_reference,
export_folder)
if not zipped_files:
mkdir(export_sources_folder) # create the folder even if no source files
return conan_reference
duration = time.time() - t1
log_recipe_sources_download(conan_reference, duration, remote, zipped_files)
unzip_and_get_files(zipped_files, export_sources_folder, EXPORT_SOURCES_TGZ_NAME)
c_src_path = os.path.join(export_sources_folder, EXPORT_SOURCES_DIR_OLD)
if os.path.exists(c_src_path):
merge_directories(c_src_path, export_sources_folder)
rmdir(c_src_path)
touch_folder(export_sources_folder)
return conan_reference
def get_package(self, package_reference, dest_folder, remote, output, recorder):
package_id = package_reference.package_id
output.info("Retrieving package %s from remote '%s' " % (package_id, remote.name))
rm_conandir(dest_folder) # Remove first the destination folder
t1 = time.time()
try:
zipped_files = self._call_remote(remote, "get_package", package_reference, dest_folder)
duration = time.time() - t1
log_package_download(package_reference, duration, remote, zipped_files)
unzip_and_get_files(zipped_files, dest_folder, PACKAGE_TGZ_NAME)
# Issue #214 https://github.com/conan-io/conan/issues/214
touch_folder(dest_folder)
if get_env("CONAN_READ_ONLY_CACHE", False):
make_read_only(dest_folder)
recorder.package_downloaded(package_reference, remote.url)
output.success('Package installed %s' % package_id)
except NotFoundException:
raise NotFoundException("Package binary '%s' not found in '%s'" % (package_reference, remote.name))
except BaseException as e:
output.error("Exception while getting package: %s" % str(package_reference.package_id))
output.error("Exception: %s %s" % (type(e), str(e)))
try:
output.warn("Trying to remove package folder: %s" % dest_folder)
rmdir(dest_folder)
except OSError as e:
raise ConanException("%s\n\nCouldn't remove folder '%s', might be busy or open. Close any app "
"using it, and retry" % (str(e), dest_folder))
raise
def search_recipes(self, remote, pattern=None, ignorecase=True):
"""
Search exported conans information from remotes
returns (dict str(conan_ref): {packages_info}"""
return self._call_remote(remote, "search", pattern, ignorecase)
def search_packages(self, remote, reference, query):
packages = self._call_remote(remote, "search_packages", reference, query)
packages = filter_packages(query, packages)
return packages
def remove(self, conan_ref, remote):
"""
Removed conans or packages from remote
"""
return self._call_remote(remote, "remove", conan_ref)
def remove_packages(self, conan_ref, remove_ids, remote):
"""
Removed conans or packages from remote
"""
return self._call_remote(remote, "remove_packages", conan_ref, remove_ids)
def get_path(self, conan_ref, package_id, path, remote):
return self._call_remote(remote, "get_path", conan_ref, package_id, path)
def authenticate(self, remote, name, password):
return self._call_remote(remote, 'authenticate', name, password)
def _call_remote(self, remote, method, *argc, **argv):
self._auth_manager.remote = remote
try:
return getattr(self._auth_manager, method)(*argc, **argv)
except ConnectionError as exc:
raise ConanConnectionError("%s\n\nUnable to connect to %s=%s"
% (str(exc), remote.name, remote.url))
except ConanException as exc:
raise exc.__class__("%s. [Remote: %s]" % (exception_message_safe(exc), remote.name))
except Exception as exc:
logger.error(traceback.format_exc())
raise ConanException(exc)
def _compress_recipe_files(files, symlinks, src_files, src_symlinks, dest_folder, output):
# This is the minimum recipe
result = {CONANFILE: files.pop(CONANFILE),
CONAN_MANIFEST: files.pop(CONAN_MANIFEST)}
export_tgz_path = files.pop(EXPORT_TGZ_NAME, None)
sources_tgz_path = files.pop(EXPORT_SOURCES_TGZ_NAME, None)
def add_tgz(tgz_name, tgz_path, tgz_files, tgz_symlinks, msg):
if tgz_path:
result[tgz_name] = tgz_path
elif tgz_files:
output.rewrite_line(msg)
tgz_path = compress_files(tgz_files, tgz_symlinks, tgz_name, dest_folder)
result[tgz_name] = tgz_path
add_tgz(EXPORT_TGZ_NAME, export_tgz_path, files, symlinks, "Compressing recipe...")
add_tgz(EXPORT_SOURCES_TGZ_NAME, sources_tgz_path, src_files, src_symlinks,
"Compressing recipe sources...")
return result
def compress_package_files(files, symlinks, dest_folder, output):
tgz_path = files.get(PACKAGE_TGZ_NAME)
if not tgz_path:
output.rewrite_line("Compressing package...")
tgz_files = {f: path for f, path in files.items() if f not in [CONANINFO, CONAN_MANIFEST]}
tgz_path = compress_files(tgz_files, symlinks, PACKAGE_TGZ_NAME, dest_dir=dest_folder)
return {PACKAGE_TGZ_NAME: tgz_path,
CONANINFO: files[CONANINFO],
CONAN_MANIFEST: files[CONAN_MANIFEST]}
def compress_files(files, symlinks, name, dest_dir):
t1 = time.time()
# FIXME, better write to disk sequentially and not keep tgz contents in memory
tgz_path = os.path.join(dest_dir, name)
set_dirty(tgz_path)
with open(tgz_path, "wb") as tgz_handle:
# tgz_contents = BytesIO()
tgz = gzopen_without_timestamps(name, mode="w", fileobj=tgz_handle)
for filename, dest in sorted(symlinks.items()):
info = tarfile.TarInfo(name=filename)
info.type = tarfile.SYMTYPE
info.linkname = dest
tgz.addfile(tarinfo=info)
mask = ~(stat.S_IWOTH | stat.S_IWGRP)
for filename, abs_path in sorted(files.items()):
info = tarfile.TarInfo(name=filename)
info.size = os.stat(abs_path).st_size
info.mode = os.stat(abs_path).st_mode & mask
if os.path.islink(abs_path):
info.type = tarfile.SYMTYPE
info.linkname = os.readlink(abs_path) # @UndefinedVariable
tgz.addfile(tarinfo=info)
else:
with open(abs_path, 'rb') as file_handler:
tgz.addfile(tarinfo=info, fileobj=file_handler)
tgz.close()
clean_dirty(tgz_path)
duration = time.time() - t1
log_compressed_files(files, duration, tgz_path)
return tgz_path
def check_compressed_files(tgz_name, files):
bare_name = os.path.splitext(tgz_name)[0]
for f in files:
if bare_name == os.path.splitext(f)[0] and f != tgz_name:
raise ConanException("This Conan version is not prepared to handle '%s' file format. "
"Please upgrade conan client." % f)
def unzip_and_get_files(files, destination_dir, tgz_name):
"""Moves all files from package_files, {relative_name: tmp_abs_path}
to destination_dir, unzipping the "tgz_name" if found"""
tgz_file = files.pop(tgz_name, None)
check_compressed_files(tgz_name, files)
if tgz_file:
uncompress_file(tgz_file, destination_dir)
os.remove(tgz_file)
def uncompress_file(src_path, dest_folder):
t1 = time.time()
try:
with open(src_path, 'rb') as file_handler:
tar_extract(file_handler, dest_folder)
except Exception as e:
error_msg = "Error while downloading/extracting files to %s\n%s\n" % (dest_folder, str(e))
# try to remove the files
try:
if os.path.exists(dest_folder):
shutil.rmtree(dest_folder)
error_msg += "Folder removed"
except Exception:
error_msg += "Folder not removed, files/package might be damaged, remove manually"
raise ConanException(error_msg)
duration = time.time() - t1
log_uncompressed_file(src_path, duration, dest_folder)
| 44.643038
| 112
| 0.647216
|
a778f3811b2428a76a12b4d23eb82ae1eb8334e7
| 20,246
|
py
|
Python
|
exchanges/quickswap.py
|
0x1d00ffff/0xbtc-discord-price-bot
|
afa33821bb77b63fa528a784b5ef9b913eaac115
|
[
"MIT"
] | 10
|
2018-05-01T14:54:55.000Z
|
2022-01-24T09:37:31.000Z
|
exchanges/quickswap.py
|
0x1d00ffff/0xbtc-discord-price-bot
|
afa33821bb77b63fa528a784b5ef9b913eaac115
|
[
"MIT"
] | 1
|
2019-04-10T16:44:52.000Z
|
2019-04-10T16:44:52.000Z
|
exchanges/quickswap.py
|
0x1d00ffff/0xbtc-discord-price-bot
|
afa33821bb77b63fa528a784b5ef9b913eaac115
|
[
"MIT"
] | 16
|
2018-06-06T21:44:18.000Z
|
2021-09-22T09:35:59.000Z
|
"""
API for Uniswap v2 distributed exchange (uniswap.exchange)
Price info is pulled from the smart contract
"""
import logging
from web3 import Web3
import time
import requests
from .base_exchange import Daily24hChangeTrackedAPI, NoLiquidityException
from .uniswap_v2_abi import exchange_abi
from .uniswap_v2_router_abi import router_abi
from secret_info import MATIC_NODE_URL
from constants import SECONDS_PER_MATIC_BLOCK
from token_class import MaticToken, NoTokenMatchError
from weighted_average import WeightedAverage
# list of exchange contract addresses. each pair has a unique address.
# token0 name, token1 name, uniswap exchange address
exchanges = (
# WETH pairs
("USDC", "WETH", "0x853Ee4b2A13f8a742d64C8F088bE7bA2131f670d"),
("WETH", "DAI", "0x4A35582a710E1F4b2030A3F826DA20BfB6703C09"),
("WETH", "USDT", "0xF6422B997c7F54D1c6a6e103bcb1499EeA0a7046"),
("WMATIC", "WETH", "0xadbF1854e5883eB8aa7BAf50705338739e558E5b"),
("maWETH", "WETH", "0x587381961298A6019926329468f2dB73C414cf68"),
("WETH", "SWAM", "0xe3aD20db6f1B061024F4dF761DEB80bCd3e3E2a7"),
# 0xBTC pairs
#("maWETH", "0xBTC", "0x83Eaa0dD0146fb2494eDb1b260eC7C830d356AF7"), # removed 5/26/21; no liquidity
("WMATIC", "0xBTC", "0x74FE2ea44ACe1AEee9937A2FDc7554CFC9288964"),
("0xBTC", "WETH", "0x58BBC687Ad7113e46D35314776FAd9c4B73e200C"),
#("USDC", "0xBTC", "0x19FcFD016a5Fa35286C1FBb3F96Fe9b3fF44530e"), # removed 5/26/21; no liquidity
#("0xBTC", "USDT", "0xa3F3b3ad33C233633242bd1236072355a8af6f52"), # removed 5/26/21; no liquidity
#("KIWI", "0xBTC", "0xf115308E8347E816D23566EAafB4C0BCb1349432"), # removed 5/26/21; no liquidity
#("0xBTC", "DAI", "0xc5e5208A9544Bd0589063D4670E9747535127E16"), # removed 5/26/21; no liquidity
# KIWI pairs
("KIWI", "SWAM", "0x0cD19Fb530D0ff9caB6F233d61dE6240E7f4660F"),
("WMATIC", "KIWI", "0xb97759d3b6210F2b7Af081E023Db972856523A5a"),
("KIWI", "SWAM", "0x6233132c03DAC2Af6495A9dAB02DF18b2A9DA892"),
)
_TIME_BETWEEN_VOLUME_UPDATES = 60 * 60 # 1 hour
# if less than this many tokens in pair, don't use it for price
_MINIMUM_ALLOWED_LIQUIDITY_IN_TOKENS = 0.1
# if less than this many tokens in pair, don't check its volume
_MINIMUM_ALLOWED_LIQUIDITY_TOKENS_TO_CHECK_VOLUME = 10
class PairNotDefinedError(Exception):
pass
def getExchangeAddressesForToken(name):
return [i[2] for i in exchanges if i[0].lower() == name.lower() or i[1].lower() == name.lower()]
def getTokensFromExchangeAddress(exchange_address):
return [(i[0], i[1]) for i in exchanges if i[2].lower() == exchange_address.lower()][0]
def getExchangeAddressForTokenPair(first_token_name, second_token_name):
token_addresses = sorted([MaticToken().from_symbol(first_token_name).address.lower(),
MaticToken().from_symbol(second_token_name).address.lower()])
for token1_name, token2_name, address in exchanges:
if (token1_name in [first_token_name, second_token_name]
and token2_name in [first_token_name, second_token_name]):
return (address,
MaticToken().from_address(token_addresses[0]).symbol,
MaticToken().from_address(token_addresses[1]).symbol)
raise PairNotDefinedError(f"No pair {first_token_name}-{second_token_name} found")
def wei_to_ether(amount_in_wei):
return int(amount_in_wei) / 1000000000000000000.0
def ether_to_wei(amount_in_ether):
return int(amount_in_ether * 1000000000000000000.0)
# HACK
# python implementation of uniswap router contract's getAmountOut function. Once web3.py
# supports solidity >= 0.6, we should be able to use the real getAmountOut function.
#
# function getAmountOut(uint amountIn, uint reserveIn, uint reserveOut) internal pure returns (uint amountOut) {
# require(amountIn > 0, 'UniswapV2Library: INSUFFICIENT_INPUT_AMOUNT');
# require(reserveIn > 0 && reserveOut > 0, 'UniswapV2Library: INSUFFICIENT_LIQUIDITY');
# uint amountInWithFee = amountIn.mul(997);
# uint numerator = amountInWithFee.mul(reserveOut);
# uint denominator = reserveIn.mul(1000).add(amountInWithFee);
# amountOut = numerator / denominator;
# }
def get_amount_out__uniswap_router(amountIn, reserveIn, reserveOut):
amountIn = int(amountIn)
reserveIn = int(reserveIn)
reserveOut = int(reserveOut)
if amountIn <= 0 or reserveIn <= 0 or reserveOut <= 0:
return None
amountInWithFee = amountIn * 997
numerator = amountInWithFee * reserveOut
denominator = (reserveIn * 1000) + amountInWithFee
return numerator / denominator
def get_swap_amount(web3, amount, token0_name, token1_name):
"""Returns the number of token1 tokens you can buy for a given number of
token0 tokens"""
exchange_address, first_token_name, second_token_name = getExchangeAddressForTokenPair(token0_name, token1_name)
exchange = web3.eth.contract(address=exchange_address, abi=exchange_abi)
reserves = exchange.functions.getReserves().call()
if token0_name == second_token_name:
reserves[0], reserves[1] = reserves[1], reserves[0]
if reserves[0] == 0 or reserves[1] == 0:
return 0
# TODO: replace this with the real function (commented below) once web3.py
# supports solidity >= 0.6
amount_out = get_amount_out__uniswap_router(
amount * 10**MaticToken().from_symbol(token0_name).decimals,
reserves[0],
reserves[1])
# amount_out = self._router.functions.getAmountOut(
# amount * 10**token0_decimals,
# reserves[0],
# reserves[1]).call()
return amount_out / 10**MaticToken().from_symbol(token1_name).decimals
def get_pooled_balance_for_address(web3, token0_name, token1_name, owner_address):
"""get the balance of a particular address in a uniswap v2 pool"""
exchange_address, _, _ = getExchangeAddressForTokenPair(token0_name, token1_name)
exchange = web3.eth.contract(address=exchange_address, abi=exchange_abi)
all_ownership_tokens = exchange.functions.totalSupply().call()
if all_ownership_tokens == 0:
ownership_tokens_in_address = exchange.functions.balanceOf(owner_address).call()
ownership_percentage = ownership_tokens_in_address / all_ownership_tokens
else:
ownership_tokens_in_address = 0
ownership_percentage = 0
reserves = get_reserves(web3, token0_name, token1_name)
return reserves[0] * ownership_percentage, reserves[1] * ownership_percentage
def get_reserves(web3, token0_name, token1_name):
"""get the reserves, in tokens, of a particular uniswap v2 pool"""
exchange_address, first_token_name, second_token_name = getExchangeAddressForTokenPair(token0_name, token1_name)
exchange = web3.eth.contract(address=exchange_address, abi=exchange_abi)
reserves = exchange.functions.getReserves().call()
reserves[0] = reserves[0] / 10**MaticToken().from_symbol(first_token_name).decimals
reserves[1] = reserves[1] / 10**MaticToken().from_symbol(second_token_name).decimals
if token0_name == second_token_name:
reserves[0], reserves[1] = reserves[1], reserves[0]
return reserves[0], reserves[1]
def get_price(web3, token0_name, token1_name):
"""Get the price at a particular uniswap v2 pool, in terms of token0 / token1"""
reserves = get_reserves(web3, token0_name, token1_name)
if reserves[1] == 0:
return 0
else:
return reserves[0] / reserves[1]
class QuickSwapAPI(Daily24hChangeTrackedAPI):
def __init__(self, currency_symbol, timeout=10.0):
super().__init__()
try:
self._exchange_addresses = getExchangeAddressesForToken(currency_symbol)
self._decimals = MaticToken().from_symbol(currency_symbol).decimals
except IndexError:
raise RuntimeError("Unknown currency_symbol {}, need to add address to token_class.py".format(currency_symbol))
self.currency_symbol = currency_symbol
self.exchange_name = "QuickSwap"
self.command_names = ["quickswap"]
self.short_url = "https://bit.ly/2R42MbO" # main quickswap pre-selected to 0xbtc
self.volume_eth = 0
self._time_volume_last_updated = 0
self._w3 = Web3(Web3.HTTPProvider(MATIC_NODE_URL, request_kwargs={'timeout': timeout}))
self._exchanges = [self._w3.eth.contract(address=a, abi=exchange_abi) for a in self._exchange_addresses]
def _is_time_to_update_volume(self):
return time.time() - self._time_volume_last_updated > _TIME_BETWEEN_VOLUME_UPDATES
def _mark_volume_as_updated(self):
self._time_volume_last_updated = time.time()
async def _get_volume_at_exchange_contract(self, exchange_contract, current_eth_block=None, timeout=10.0):
volume_tokens = 0 # volume in units of <self.currency_symbol> tokens
volume_pair = 0 # volume in units of the paired token
swap_topic = "0xd78ad95fa46c994b6551d0da85fc275fe613ce37657fb8d5e3d130840159d822"
sync_topic = "0x1c411e9a96e071241c2f21f7726b17ae89e3cab4c78be50e062b03a9fffbbad1"
burn_topic = "0xdccd412f0b1252819cb1fd330b93224ca42612892bb3f4f789976e6d81936496"
transfer_topic = "0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef"
approval_topic = "0x8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925"
mint_topic = "0x4c209b5fc8ad50758f13e2e1088ba56a560dff690a1c6fef26394f4c03821c4f"
token0_address = exchange_contract.functions.token0().call()
token1_address = exchange_contract.functions.token1().call()
if current_eth_block is None:
current_eth_block = self._w3.eth.blockNumber
for event in self._w3.eth.getLogs({
'fromBlock': current_eth_block - (int(60*60*24 / SECONDS_PER_MATIC_BLOCK)),
'toBlock': current_eth_block - 1,
'address': exchange_contract.address}):
topic0 = self._w3.toHex(event['topics'][0])
if topic0 == swap_topic:
#print('swap in tx', self._w3.toHex(event['transactionHash']))
receipt = self._w3.eth.getTransactionReceipt(event['transactionHash'])
parsed_logs = exchange_contract.events.Swap().processReceipt(receipt)
correct_log = None
for log in parsed_logs:
if log.address.lower() == exchange_contract.address.lower():
correct_log = log
if correct_log is None:
logging.warning('bad swap transaction {}'.format(self._w3.toHex(event['transactionHash'])))
continue
#sender_address = correct_log.args.sender
#to_address = correct_log.args.to
amount0In = correct_log.args.amount0In
amount1In = correct_log.args.amount1In
amount0Out = correct_log.args.amount0Out
amount1Out = correct_log.args.amount1Out
#block_number = correct_log.blockNumber
if MaticToken().from_address(token0_address).symbol.lower() == self.currency_symbol.lower():
# token0 is the tracked currency symbol
volume_tokens += abs((amount0In - amount0Out) / 10**MaticToken().from_address(token0_address).decimals)
volume_pair += abs((amount1In - amount1Out) / 10**MaticToken().from_address(token1_address).decimals)
elif MaticToken().from_address(token1_address).symbol.lower() == self.currency_symbol.lower():
# token1 is the tracked currency symbol
volume_tokens += abs((amount1In - amount1Out) / 10**MaticToken().from_address(token1_address).decimals)
volume_pair += abs((amount0In - amount0Out) / 10**MaticToken().from_address(token0_address).decimals)
# print(' token', getTokenNameFromAddress(token0_address), 'send to exchange', (amount0In - amount0Out) / 10**getTokenDecimalsFromAddress(token0_address), getTokenNameFromAddress(token0_address))
# print(' token', getTokenNameFromAddress(token1_address), 'send to exchange', (amount1In - amount1Out) / 10**getTokenDecimalsFromAddress(token1_address), getTokenNameFromAddress(token1_address))
continue
elif topic0 == mint_topic:
# skip liquidity deposits/withdrawals
continue
elif topic0 == sync_topic:
continue
elif topic0 == burn_topic:
continue
elif topic0 == transfer_topic:
continue
elif topic0 == approval_topic:
continue
else:
logging.debug('unknown topic txhash {}'.format(self._w3.toHex(event['transactionHash'])))
logging.debug('unknown topic topic0 {}'.format(topic0))
return volume_tokens, volume_pair
async def _get_price_and_liquidity_at_exchange_contract(self, exchange_contract):
token0_address = exchange_contract.functions.token0().call().lower()
token1_address = exchange_contract.functions.token1().call().lower()
paired_token_address = token0_address if token1_address.lower() == MaticToken().from_symbol(self.currency_symbol).address.lower() else token1_address
paired_token_symbol = MaticToken().from_address(paired_token_address).symbol
liquidity_tokens, liquidity_pair = get_reserves(self._w3, self.currency_symbol, paired_token_symbol)
# bail early if the number of tokens LPd is very small
# TODO: this should probably be configurable. Or generated automatically
# based on some USD value, not token value
if liquidity_tokens < _MINIMUM_ALLOWED_LIQUIDITY_IN_TOKENS:
raise NoLiquidityException(f"Less than {_MINIMUM_ALLOWED_LIQUIDITY_IN_TOKENS} tokens LP'd for exchange contract.")
# get price of paired token (in USD) to determine price of
# <self.currency_symbol> in USD. Strategy changes depending on pair
price_in_paired_token = get_price(self._w3, paired_token_symbol, self.currency_symbol)
if paired_token_symbol == "WETH":
paired_token_price_in_usd = self.eth_price_usd
else:
# get the paired token's price in Eth. If there is less than $500 in
# liquidity to determine this, then skip this pair when determining price.
liquidity_eth_of_paired_token, _ = get_reserves(self._w3, "WETH", paired_token_symbol)
if liquidity_eth_of_paired_token < 500 / self.eth_price_usd:
raise NoLiquidityException(f"Less than {500} USD LP'd for paired token {paired_token_symbol}, pair token price not considered accurate. Skipping pair.")
else:
paired_token_price_in_eth = get_price(self._w3, "WETH", paired_token_symbol)
paired_token_price_in_usd = paired_token_price_in_eth * self.eth_price_usd
price_in_usd = price_in_paired_token * paired_token_price_in_usd
return price_in_usd, liquidity_tokens
async def _update_all_values(self, should_update_volume=False, timeout=10):
# TODO: switch to rolling 24-hour volume by loading 1 hour at a time to
# allow re-enable volume updates
# currently alchemyapi errors because 24h of events is too many for 1 call
should_update_volume = False
# END TODO
if should_update_volume:
current_eth_block = self._w3.eth.blockNumber
# get price of eth
eth_prices = [
get_price(self._w3, "DAI", "WETH"),
get_price(self._w3, "USDT", "WETH"),
get_price(self._w3, "USDC", "WETH"),
]
self.eth_price_usd = sum(eth_prices) / len(eth_prices) # TODO: should be weighted average
# get token price (in USD), liquidity (in tokens), and volume (in tokens) for
# each pair. Note if liquidity is low for a pair, its voluem is not checked.
price_usd_weighted_average = WeightedAverage()
total_liquidity_tokens = 0
total_volume_tokens = 0
for exchange_contract in self._exchanges:
try:
price_usd, liquidity_tokens = await self._get_price_and_liquidity_at_exchange_contract(exchange_contract)
except (NoTokenMatchError, PairNotDefinedError) as e:
logging.warning(f"Failed to update quickswap exchange: {str(e)}")
continue
except NoLiquidityException:
# no liquidity is not an error; simply skip this exchange
continue
else:
price_usd_weighted_average.add(price_usd, liquidity_tokens)
total_liquidity_tokens += liquidity_tokens
if should_update_volume and liquidity_tokens > _MINIMUM_ALLOWED_LIQUIDITY_TOKENS_TO_CHECK_VOLUME:
try:
volume_tokens, volume_pair = await self._get_volume_at_exchange_contract(exchange_contract, current_eth_block=current_eth_block, timeout=timeout)
total_volume_tokens += volume_tokens
except requests.exceptions.ReadTimeout:
logging.warning(f"Failed to update QuickSwapAPI volume: ReadTimeout")
self.price_usd = price_usd_weighted_average.average()
self.price_eth = self.price_usd / self.eth_price_usd
self.liquidity_tokens = total_liquidity_tokens
self.liquidity_eth = self.liquidity_tokens * self.price_eth
if should_update_volume:
self.volume_tokens = total_volume_tokens
self.volume_eth = self.volume_tokens * self.price_eth
# NOTE: this sets _time_volume_last_updated even if all volume updates
# failed. This is OK for now, it throttles struggling APIs (matic) but
# may not be the ideal behavior.
self._mark_volume_as_updated()
async def _update(self, timeout=10.0):
if self._is_time_to_update_volume():
await self._update_all_values(timeout=timeout, should_update_volume=True)
else:
await self._update_all_values(timeout=timeout, should_update_volume=False)
if __name__ == "__main__":
# run some generic uniswap v2 functions
web3 = Web3(Web3.HTTPProvider(MATIC_NODE_URL))
print('$1 in USDC will swap for {} 0xBTC tokens'.format(get_swap_amount(web3, 1, "USDC", "0xBTC")))
print('$1 in DAI will swap for {} 0xBTC tokens'.format(get_swap_amount(web3, 1, "DAI", "0xBTC")))
print('1 0xBTC token will swap for {} DAI'.format(get_swap_amount(web3, 1, "0xBTC", "DAI")))
print('100 0xBTC tokens will swap for {} DAI'.format(get_swap_amount(web3, 100, "0xBTC", "DAI")))
print('1 ETH will swap for {} DAI'.format(get_swap_amount(web3, 1, "WETH", "DAI")))
print('230 DAI will swap for {} ETH'.format(get_swap_amount(web3, 230, "DAI", "WETH")))
print('0xbtc and ETH balances:', get_reserves(web3, "0xBTC", "WETH"))
# print('0xbtc and ETH price:', e.get_price("0xBTC", "WETH"), "0xBTC per ETH")
# print('0xbtc and ETH price:', e.get_price("WETH", "0xBTC"), "ETH per 0xBTC")
print()
print('eth usdc reserves ', get_reserves(web3, "WETH", "USDC"))
print('1 in ETH will swap for {} USDC '.format(get_swap_amount(web3, 1, "WETH", "USDC")))
print('1 in ETH will swap for {} USDT '.format(get_swap_amount(web3, 1, "WETH", "USDT")))
print('1 in ETH will swap for {} DAI '.format(get_swap_amount(web3, 1, "WETH", "DAI")))
print()
# get some data from 0xBTC pool via QuickSwapAPI
e = QuickSwapAPI('0xBTC')
e.load_once_and_print_values()
print()
try:
print('0xBTC-WETH liquidity in eth', e.liquidity_eth)
except AttributeError:
pass
print('0xBTC-WETH liquidity in tokens', e.liquidity_tokens)
# get some data from KIWI pool via QuickSwapAPI
# e = QuickSwapAPI('KIWI')
# e.load_once_and_print_values()
# print()
# try:
# print('KIWI-WETH liquidity in eth', e.liquidity_eth)
# except AttributeError:
# pass
# print('KIWI-WETH liquidity in tokens', e.liquidity_tokens)
# e = QuickSwapAPI('DAI')
# e.load_once_and_print_values()
| 51.126263
| 214
| 0.69347
|
93452d01af873613cc36e7d24d70472104cda038
| 416
|
py
|
Python
|
tasks/UDEMY/100_days/L005/day-5-day-5-end.py
|
AleksNeStu/projects
|
1a4c68dfbdcb77228f0f3617e58fd18fcb1f5dbb
|
[
"Apache-2.0"
] | 2
|
2022-01-19T18:01:35.000Z
|
2022-02-06T06:54:38.000Z
|
tasks/UDEMY/100_days/L005/day-5-day-5-end.py
|
AleksNeStu/projects
|
1a4c68dfbdcb77228f0f3617e58fd18fcb1f5dbb
|
[
"Apache-2.0"
] | null | null | null |
tasks/UDEMY/100_days/L005/day-5-day-5-end.py
|
AleksNeStu/projects
|
1a4c68dfbdcb77228f0f3617e58fd18fcb1f5dbb
|
[
"Apache-2.0"
] | null | null | null |
#For Loop with Lists
fruits = ["Apple", "Peach", "Pear"]
for fruit in fruits:
print(fruit)
print(fruit + " Pie")
#For Loop with Range
for number in range(1, 100):
print(number)
for number in range(1, 101):
print(number)
for number in range(1, 11, 3):
print(number)
#Calculating the sum of all the numbers from 1 to 100.
total = 0
for number in range(1, 101):
total += number
print(total)
| 18.909091
| 54
| 0.65625
|
d6330de053af2086188e2a8adbf5f69b40451656
| 3,390
|
py
|
Python
|
engine/__init__.py
|
laashub/LaasOps
|
4eb088f99cc3542ff24c2fa5898d1c682dca0498
|
[
"Apache-2.0"
] | 7
|
2020-03-24T08:13:19.000Z
|
2020-04-18T20:17:43.000Z
|
engine/__init__.py
|
laashub/LaasOps
|
4eb088f99cc3542ff24c2fa5898d1c682dca0498
|
[
"Apache-2.0"
] | null | null | null |
engine/__init__.py
|
laashub/LaasOps
|
4eb088f99cc3542ff24c2fa5898d1c682dca0498
|
[
"Apache-2.0"
] | 5
|
2020-03-24T08:13:27.000Z
|
2020-04-24T03:27:51.000Z
|
import importlib
import os
from distribution.component import mymysql
ENGINE_LOGIC_DIR_STR = "engine_logic_dir"
ENGINE_LOGIC_STR = "engine_logic"
class Runtime(object):
@staticmethod
def define_dependencies(dependencies):
for item in dependencies:
try:
importlib.import_module(item)
except Exception as e:
os.system("pip install %s" % item)
raise e
@staticmethod
def require(module_name):
return importlib.import_module(module_name)
@staticmethod
def execute_logic(engine_data):
try:
Data.log(engine_data, "TRACE", "execute_logic: %s:%s for data: %s:%s:%s" % (
engine_data["logic_id"], engine_data["func_name"], engine_data["data_id"],
engine_data["data_data_id"], engine_data["logic_id"],))
Data.status(engine_data, "RUNNING")
module_name = ENGINE_LOGIC_DIR_STR + "." + ENGINE_LOGIC_STR + "_" + str(
engine_data["logic_id"])
module = importlib.import_module(module_name)
target_func = getattr(module, engine_data["func_name"])
target_func(engine_data)
Data.status(engine_data, "FINISH")
except Exception as e:
Data.log(engine_data, "TRACE", str(e))
Data.status(engine_data, "EXCEPTION")
class Data(object):
@staticmethod
def set(sql, parameters={}):
print("Data: ", "sql: ", sql, "parameters: ", parameters)
return mymysql.execute(sql, parameters)
@staticmethod
def get(sql, parameters={}):
print("Data: ", "sql: ", sql, "parameters: ", parameters)
return mymysql.execute(sql, parameters)
@staticmethod
def status(engine_data, cur_status):
print("Data: ", "status: ", "engine_data: ", str(engine_data), "cur_status: ", cur_status)
mymysql.execute("""
insert into engine_data_logic_trigger_data_status(data_id, data_data_id, data_event_type, logic_id, func_name, status)
values(%(data_id)s, %(data_data_id)s, %(data_event_type)s, %(logic_id)s, %(func_name)s, %(status)s)
""", {
"data_id": engine_data["data_id"],
"data_data_id": engine_data["data_data_id"],
"data_event_type": engine_data["data_event_type"],
"logic_id": engine_data["logic_id"],
"func_name": engine_data["func_name"],
"status": cur_status,
})
@staticmethod
def log(engine_data, log_level="TRACE", log_content=""):
print("log: ", "engine_data: ", str(engine_data), "log_level: ", log_level, "log_content: ", log_content)
mymysql.execute("""
insert into engine_data_logic_trigger_data_log(data_id, data_data_id, data_event_type, logic_id, func_name, log_level, log)
values(%(data_id)s, %(data_data_id)s, %(data_event_type)s, %(logic_id)s, %(func_name)s, %(log_level)s, %(log)s)
""", {
"data_id": engine_data["data_id"],
"data_data_id": engine_data["data_data_id"],
"data_event_type": engine_data["data_event_type"],
"logic_id": engine_data["logic_id"],
"func_name": engine_data["func_name"],
"log_level": log_level,
"log": log_content,
})
# TODO refactor the code to increase the engine project, now is too weak
| 39.418605
| 131
| 0.618584
|
4170bcd5f40fc4b753f10abb4f9949819b669599
| 9,887
|
py
|
Python
|
modules/convenient_universal.py
|
DamienIrving/climate-analysis
|
4b9911101f85bde30ac6e7764119e6410d7f633d
|
[
"MIT"
] | 37
|
2016-04-24T09:05:06.000Z
|
2021-11-08T08:47:40.000Z
|
modules/convenient_universal.py
|
YonSci/climate-analysis
|
4b9911101f85bde30ac6e7764119e6410d7f633d
|
[
"MIT"
] | 35
|
2015-02-18T23:42:34.000Z
|
2017-03-12T01:33:48.000Z
|
modules/convenient_universal.py
|
YonSci/climate-analysis
|
4b9911101f85bde30ac6e7764119e6410d7f633d
|
[
"MIT"
] | 16
|
2015-02-18T23:38:01.000Z
|
2022-02-17T08:39:29.000Z
|
"""Collection of convenient functions that will work with my anaconda or uvcdat install.
Functions:
adjust_lon_range -- Express longitude values in desired 360 degree interval
apply_lon_filter -- Set values outside of specified longitude range to zero
broadcast_array -- Broadcast an array to a target shape
calc_significance -- Perform significance test
coordinate_paris -- Generate lat/lon pairs
dict_filter -- Filter dictionary according to specified keys
find_nearest -- Find the closest array item to value
find_duplicates -- Return list of duplicates in a list
fix_label -- Fix formatting of an axis label taken from the command line
get_threshold -- Turn the user input threshold into a numeric threshold
hi_lo -- Determine the new highest and lowest value.
list_kwargs -- List keyword arguments of a function
match_dates -- Take list of dates and match with the corresponding times
in a detailed time axis
single2list -- Check if item is a list, then convert if not
units_info -- Make the units taken from a file LaTeX math compliant
"""
import numpy
from scipy import stats
import pdb, re
import inspect
def adjust_lon_range(lons, radians=True, start=0.0):
"""Express longitude values in a 360 degree (or 2*pi radians) interval.
Args:
lons (list/tuple): Longitude axis values (monotonically increasing)
radians (bool): Specify whether input data are in radians (True) or
degrees (False). Output will be the same units.
start (float, optional): Start value for the output interval (add 360 degrees or 2*pi
radians to get the end point)
"""
lons = single2list(lons, numpy_array=True)
interval360 = 2.0*numpy.pi if radians else 360.0
end = start + interval360
less_than_start = numpy.ones([len(lons),])
while numpy.sum(less_than_start) != 0:
lons = numpy.where(lons < start, lons + interval360, lons)
less_than_start = lons < start
more_than_end = numpy.ones([len(lons),])
while numpy.sum(more_than_end) != 0:
lons = numpy.where(lons >= end, lons - interval360, lons)
more_than_end = lons >= end
return lons
def apply_lon_filter(data, lon_bounds):
"""Set values outside of specified longitude range to zero.
Args:
data (numpy.ndarray): Array of longitude values.
lon_bounds (list/tuple): Specified longitude range (min, max)
"""
# Convert to common bounds (0, 360)
lon_min = adjust_lon_range(lon_bounds[0], radians=False, start=0.0)
lon_max = adjust_lon_range(lon_bounds[1], radians=False, start=0.0)
lon_axis = adjust_lon_range(data.getLongitude()[:], radians=False, start=0.0)
# Make required values zero
ntimes, nlats, nlons = data.shape
lon_axis_tiled = numpy.tile(lon_axis, (ntimes, nlats, 1))
new_data = numpy.where(lon_axis_tiled < lon_min, 0.0, data)
return numpy.where(lon_axis_tiled > lon_max, 0.0, new_data)
def broadcast_array(array, axis_index, shape):
"""Broadcast an array to a target shape.
Args:
array (numpy.ndarray)
axis_index (int or tuple): Postion in the target shape that the
axis/axes of the array corresponds to
e.g. if array corresponds to (depth, lat, lon) in (time, depth, lat, lon)
then axis_index = [1, 3]
e.g. if array corresponds to (lat) in (time, depth, lat, lon)
then axis_index = 2
shape (tuple): shape to broadcast to
For a one dimensional array, make start_axis_index = end_axis_index
"""
if type(axis_index) in [float, int]:
start_axis_index = end_axis_index = axis_index
else:
assert len(axis_index) == 2
start_axis_index, end_axis_index = axis_index
dim = start_axis_index - 1
while dim >= 0:
array = array[numpy.newaxis, ...]
array = numpy.repeat(array, shape[dim], axis=0)
dim = dim - 1
dim = end_axis_index + 1
while dim < len(shape):
array = array[..., numpy.newaxis]
array = numpy.repeat(array, shape[dim], axis=-1)
dim = dim + 1
return array
def calc_significance(data_subset, data_all, standard_name):
"""Perform significance test.
One sample t-test, with sample size adjusted for autocorrelation.
Reference:
Zieba (2010). doi:10.2478/v10178-010-0001-0
"""
from statsmodels.tsa.stattools import acf
# Data must be three dimensional, with time first
assert len(data_subset.shape) == 3, "Input data must be 3 dimensional"
# Define autocorrelation function
n = data_subset.shape[0]
autocorr_func = numpy.apply_along_axis(acf, 0, data_subset, nlags=n - 2)
# Calculate effective sample size (formula from Zieba2010, eq 12)
k = numpy.arange(1, n - 1)
r_k_sum = ((n - k[:, None, None]) / float(n)) * autocorr_func[1:]
n_eff = float(n) / (1 + 2 * r_k_sum.sum(axis=0))
# Calculate significance
var_x = data_subset.var(axis=0) / n_eff
tvals = (data_subset.mean(axis=0) - data_all.mean(axis=0)) / numpy.sqrt(var_x)
pvals = stats.t.sf(numpy.abs(tvals), n - 1) * 2 # two-sided pvalue = Prob(abs(t)>tt)
notes = "One sample t-test, with sample size adjusted for autocorrelation (Zieba2010, eq 12)"
pval_atts = {'standard_name': standard_name,
'long_name': standard_name,
'units': ' ',
'notes': notes,}
return pvals, pval_atts
def coordinate_pairs(lat_axis, lon_axis):
"""Take the latitude and longitude values from given grid axes
and produce a flattened lat and lon array, with element-wise pairs
corresponding to every grid point."""
lon_mesh, lat_mesh = numpy.meshgrid(lon_axis, lat_axis) # This is the correct order
return lat_mesh.flatten(), lon_mesh.flatten()
def dict_filter(indict, key_list):
"""Filter dictionary according to specified keys."""
return dict((key, value) for key, value in indict.iteritems() if key in key_list)
def find_duplicates(inlist):
"""Return list of duplicates in a list."""
D = defaultdict(list)
for i,item in enumerate(mylist):
D[item].append(i)
D = {k:v for k,v in D.items() if len(v)>1}
return D
def find_nearest(array, value):
"""Find the closest array item to value."""
idx = (numpy.abs(numpy.array(array) - value)).argmin()
return array[idx]
def fix_label(label):
"""Fix axis label taken from the command line."""
replace_dict = {'_': ' ',
'degE': '$^{\circ}$E',
'ms-1': '$m s^{-1}$',
'm.s-1': '$m s^{-1}$',
'1000000 m2.s-1': '$10^6$m$^2$s$^{-1}$',
'kgkg-1': '$kg \: kg^{-1}$'
}
for value, replacement in replace_dict.iteritems():
label = label.replace(value, replacement)
return label
def get_threshold(data, threshold_str, axis=None):
"""Turn the user input threshold into a numeric threshold."""
if 'pct' in threshold_str:
value = float(re.sub('pct', '', threshold_str))
threshold_float = numpy.percentile(data, value, axis=axis)
else:
threshold_float = float(threshold_str)
return threshold_float
def hi_lo(data_series, current_max, current_min):
"""Determine the new highest and lowest value."""
try:
highest = numpy.max(data_series)
except:
highest = max(data_series)
if highest > current_max:
new_max = highest
else:
new_max = current_max
try:
lowest = numpy.min(data_series)
except:
lowest = min(data_series)
if lowest < current_min:
new_min = lowest
else:
new_min = current_min
return new_max, new_min
def list_kwargs(func):
"""List keyword arguments of a function."""
details = inspect.getargspec(func)
nopt = len(details.defaults)
return details.args[-nopt:]
def match_dates(datetimes, datetime_axis):
"""Take list of datetimes and match with the corresponding datetimes in a time axis.
Args:
datetimes (list/tuple)
datetime_axis (list/tuple)
"""
dates = map(split_dt, datetimes)
date_axis = map(split_dt, datetime_axis[:])
match_datetimes = []
miss_datetimes = []
for i in range(0, len(datetime_axis)):
if date_axis[i] in dates:
match_datetimes.append(datetime_axis[i])
else:
miss_datetimes.append(datetime_axis[i])
return match_datetimes, miss_datetimes
def split_dt(dt):
"""Split a numpy.datetime64 value so as to just keep the date part."""
return str(dt).split('T')[0]
def single2list(item, numpy_array=False):
"""Check if item is a list, then convert if not."""
if type(item) == list or type(item) == tuple or type(item) == numpy.ndarray:
output = item
elif type(item) == str:
output = [item,]
else:
try:
test = len(item)
except TypeError:
output = [item,]
if numpy_array and not isinstance(output, numpy.ndarray):
return numpy.array(output)
else:
return output
def units_info(units):
"""Make the units taken from a file LaTeX math compliant.
This function particularly deals with powers:
e.g. 10^22 J
"""
index = units.find('^')
units = units[:index + 1] + '{' + units[index + 1:]
index = units.find('J')
units = units[:index - 1] + '}' + units[index - 1:]
tex_units = '$'+units+'$'
exponent = tex_units.split('}')[0].split('{')[1]
return tex_units, exponent
| 30.515432
| 98
| 0.627794
|
3e6752a29d673de35a276a8caff96345f0c60760
| 2,590
|
py
|
Python
|
baseclasses/pyAero_geometry.py
|
akleb/baseclasses
|
8e8b9ee4b9e7d84e4e3f8fb28c81e80b8b459338
|
[
"Apache-2.0"
] | null | null | null |
baseclasses/pyAero_geometry.py
|
akleb/baseclasses
|
8e8b9ee4b9e7d84e4e3f8fb28c81e80b8b459338
|
[
"Apache-2.0"
] | null | null | null |
baseclasses/pyAero_geometry.py
|
akleb/baseclasses
|
8e8b9ee4b9e7d84e4e3f8fb28c81e80b8b459338
|
[
"Apache-2.0"
] | null | null | null |
"""
pyAero_geometry
Holds the Python Aerodynamic Analysis Classes (base and inherited).
"""
# =============================================================================
# Geometry Class
# =============================================================================
class Geometry(object):
"""
Abstract Class for Geometry Object
"""
def __init__(
self,
name={},
CGPercent=0.25,
ForeSparPercent=0.25,
RearSparPercent=0.75,
StaticMarginPercent=0.05,
ForeThickCon=0.01,
RearThickCon=0.99,
rootOffset=0.01,
tipOffset=0.01,
xRootec=0.0,
yRootec=0.0,
zRootec=0.0,
*args,
**kwargs,
):
"""
Flow Class Initialization
Keyword Arguments:
------------------
name -> STRING: Geometry Instance Name
Attributes:
-----------
"""
#
self.name = name
self.CGPercent = CGPercent
self.ForeSparPercent = ForeSparPercent
self.RearSparPercent = RearSparPercent
self.StaticMarginPercent = StaticMarginPercent
self.ForeThickCon = ForeThickCon
self.RearThickCon = RearThickCon
self.tipOffset = tipOffset
self.rootOffset = rootOffset
self.xRootec = xRootec
self.yRootec = yRootec
self.zRootec = zRootec
def ListAttributes(self):
"""
Print Structured Attributes List
"""
ListAttributes(self)
def __str__(self):
"""
Print Structured List of Variable
"""
return "name \n" + " " + str(self.name).center(9)
# ==============================================================================
#
# ==============================================================================
def ListAttributes(self):
"""
Print Structured Attributes List
"""
print("\n")
print("Attributes List of: " + repr(self.__dict__["name"]) + " - " + self.__class__.__name__ + " Instance\n")
self_keys = self.__dict__.keys()
self_keys.sort()
for key in self_keys:
if key != "name":
print(str(key) + " : " + repr(self.__dict__[key]))
# end
# end
print("\n")
# ==============================================================================
# Flow Test
# ==============================================================================
if __name__ == "__main__":
print("Testing ...")
# Test Variable
geo = Geometry(name="test")
geo.ListAttributes()
print(geo)
| 23.981481
| 113
| 0.447876
|
8c5a04ffbe07fb95d8ad2f7cc9b8582166c756ac
| 4,077
|
py
|
Python
|
datasets/cals/__init__.py
|
LinguList/lexibank-data-old
|
7bf886597afa26863de8527dfd8529d9eb99fcd6
|
[
"Apache-2.0"
] | null | null | null |
datasets/cals/__init__.py
|
LinguList/lexibank-data-old
|
7bf886597afa26863de8527dfd8529d9eb99fcd6
|
[
"Apache-2.0"
] | null | null | null |
datasets/cals/__init__.py
|
LinguList/lexibank-data-old
|
7bf886597afa26863de8527dfd8529d9eb99fcd6
|
[
"Apache-2.0"
] | 1
|
2018-10-19T11:58:00.000Z
|
2018-10-19T11:58:00.000Z
|
# coding=utf-8
"""
Note: We run libreoffice to convert from doc to docx after download.
"""
from __future__ import unicode_literals, print_function
from subprocess import check_call
import re
from collections import defaultdict
from docx import Document
from clldutils.dsv import UnicodeWriter, reader
from clldutils.misc import slug
from clldutils.path import Path
from pylexibank.util import download_and_unpack_zipfiles
from pylexibank.dataset import CldfDataset
from pylexibank.lingpy_util import segmentize, iter_alignments
URL = 'https://ndownloader.figshare.com/articles/3443090/versions/1'
FNAME = 'Table_S2_Supplementary_Mennecier_et_al..doc'
COLOR_PATTERN = re.compile('fill="(?P<color>[^"]+)"')
def text_and_color(cell):
color = None
for line in cell._tc.tcPr.xml.split('\n'):
if 'w:shd' in line:
m = COLOR_PATTERN.search(line)
if m:
color = m.group('color')
break
if color == 'auto':
color = None
if color:
color = '#' + color + ' '
return '%s%s' % (color if color else '', cell.paragraphs[0].text)
def download(dataset, **kw):
def rp(*names):
return dataset.raw.joinpath(*names).as_posix()
download_and_unpack_zipfiles(URL, dataset, FNAME)
check_call(
'libreoffice --headless --convert-to docx %s --outdir %s' % (rp(FNAME), rp()),
shell=True)
doc = Document(rp(Path(FNAME).stem + '.docx'))
for i, table in enumerate(doc.tables):
with UnicodeWriter(rp('%s.csv' % (i + 1,))) as writer:
for row in table.rows:
writer.writerow(map(text_and_color, row.cells))
def get_loan_and_form(c):
if c.startswith('#'):
return c.split(' ', 1)
return None, c
def read_csv(fname, data):
concepts = None
for i, row in enumerate(reader(fname)):
if i == 0:
concepts = {j: c for j, c in enumerate(row[1:])}
else:
for j, c in enumerate(row[1:]):
if j % 2 == 0: # even number
loan, form = get_loan_and_form(c)
else:
if form.strip():
data[row[0]][concepts[j]] = (form, loan, c)
return data
def cldf(dataset, concepticon, **kw):
gcode = {x['ID']: x['GLOTTOCODE'] for x in dataset.languages}
ccode = {x.english: x.concepticon_id for x in
dataset.conceptlist.concepts.values()}
data = defaultdict(dict)
for fname in dataset.raw.glob('*.csv'):
read_csv(fname, data)
cognatesets = []
with CldfDataset(
(
'ID',
'Language_ID',
'Language_name',
'Parameter_ID',
'Parameter_name',
'Value',
'Segments'),
dataset) as ds:
for doculect, wl in data.items():
for concept, (form, loan, cogset) in wl.items():
wid = '%s-%s' % (slug(doculect), slug(concept))
if concept in ccode:
csid = ccode[concept]
elif concept.startswith('to ') and concept[3:] in ccode:
csid = ccode[concept[3:]]
else:
csid = None
ds.add_row([
wid,
gcode[doculect.split('-')[0]],
doculect,
csid,
concept,
form,
'',
])
if cogset:
cognatesets.append([
wid,
ds.name,
form,
'%s-%s' % (slug(concept), cogset),
False,
'expert',
'',
'',
'',
'',
])
segmentize(ds, clean=lambda s: s.split(' ~ ')[0])
dataset.cognates.extend(iter_alignments(ds, cognatesets, column='Segments'))
| 31.122137
| 86
| 0.509198
|
f99eea7aaf0bbb62146ddfb98078e1f23bb34bb0
| 1,160
|
py
|
Python
|
iriusrisk-python-client-lib/test/test_productsrefcomponentscomponent_reftestscwe_source.py
|
iriusrisk/iriusrisk-python-client-lib
|
4912706cd1e5c0bc555dbc7da02fb64cbeab3b18
|
[
"Apache-2.0"
] | null | null | null |
iriusrisk-python-client-lib/test/test_productsrefcomponentscomponent_reftestscwe_source.py
|
iriusrisk/iriusrisk-python-client-lib
|
4912706cd1e5c0bc555dbc7da02fb64cbeab3b18
|
[
"Apache-2.0"
] | null | null | null |
iriusrisk-python-client-lib/test/test_productsrefcomponentscomponent_reftestscwe_source.py
|
iriusrisk/iriusrisk-python-client-lib
|
4912706cd1e5c0bc555dbc7da02fb64cbeab3b18
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
IriusRisk API
Products API # noqa: E501
OpenAPI spec version: 1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import iriusrisk_python_client_lib
from iriusrisk_python_client_lib.models.productsrefcomponentscomponent_reftestscwe_source import ProductsrefcomponentscomponentReftestscweSource # noqa: E501
from iriusrisk_python_client_lib.rest import ApiException
class TestProductsrefcomponentscomponentReftestscweSource(unittest.TestCase):
"""ProductsrefcomponentscomponentReftestscweSource unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testProductsrefcomponentscomponentReftestscweSource(self):
"""Test ProductsrefcomponentscomponentReftestscweSource"""
# FIXME: construct object with mandatory attributes with example values
# model = iriusrisk_python_client_lib.models.productsrefcomponentscomponent_reftestscwe_source.ProductsrefcomponentscomponentReftestscweSource() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 28.292683
| 166
| 0.781034
|
505d06958908e030648ea3aafe1169db48ac4d74
| 3,448
|
py
|
Python
|
tradingBot/tests/integration/helpers.py
|
yamf0/TradingBot
|
9e5b7fb8f537f33ece71c9caecc4c19f13e0fc02
|
[
"MIT"
] | 1
|
2020-11-10T17:17:23.000Z
|
2020-11-10T17:17:23.000Z
|
tradingBot/tests/integration/helpers.py
|
yamf0/TradingBot
|
9e5b7fb8f537f33ece71c9caecc4c19f13e0fc02
|
[
"MIT"
] | 1
|
2020-12-15T10:55:38.000Z
|
2020-12-15T10:55:38.000Z
|
tradingBot/tests/integration/helpers.py
|
yamf0/TradingBot
|
9e5b7fb8f537f33ece71c9caecc4c19f13e0fc02
|
[
"MIT"
] | null | null | null |
from tradingBot.src.utils.helpers import counter
from tradingBot.src.utils.helpers import mqttClientBase
import json
import subprocess
import signal
import psutil
import time
import os
import sys
sys.path.insert(0, r'')
class mqttTestclientClass(mqttClientBase):
##
# @class mqttTestclientClass
# @brief fixture for integration testing with MQTT broker
def __init__(self, name, mq):
super().__init__(name, mq)
self.var = None
self.msgFlag = False
self.killProc = False
def on_connect(self, client, userdata, flags, rc):
# @fn on_connect
# @brief After Brocker connection
# @param client The name of the client class (e.g. "Counter")
# @param userdata TODO (e.g. None)
# @param flags TODO (e.g. {'session present': 0})
# @param rc Brocker error code (e.g. 0)
# TODO CHANGE PRINT
print("Broker returned error code: ", rc, self.name)
if rc == 0:
self.connected = True
def connect2Broker(self, port=51454):
# @fn connect2Broker
# @brief Connects to broker
print("\n<Connecting to broker>")
# connect to broker
self.client.connect(self.broker_address, port=port)
# start the loop and callbacks
self.client.loop_start()
while self.connected == False:
print("\nWaiting for connection to stablish")
time.sleep(0.01)
# PAtch for coinbots, do not affect other clients funcs
def on_message(self, client, userdata, msg):
payload = msg.payload
topic = msg.topic
print("\n<<<Message from Server to {} on topic {}>>>".format(self.name, topic))
# create the list for the queue
task = [topic, payload]
if "OCHL" in topic:
self.var = json.loads(payload)
self.msgFlag = True
return
if "killProc" in topic:
print("MSG KILL RECEIVED")
self.killProc = True
return
self.mq.put(task)
class startMosquittoBroker():
##
# @class startMosquittoBroker
# @brief creates a mosquitto broker at specified port for testing
def __init__(self):
if "linux" in sys.platform:
self.mosquittoInstance = subprocess.Popen(
['sudo', 'mosquitto', '-p', '51454', '-d'], close_fds=True)
else:
self.mosquittoInstance = subprocess.Popen(
['mosquitto', '-p', '51454', '-v'], shell=True)
def killBroker(self):
##
# @fn killBroker
# @brief stops broker for terminating tests
if "linux" in sys.platform:
pass
else:
for proc in psutil.process_iter():
for conns in proc.connections(kind='inet'):
if conns.laddr.port == 51454 and conns.status == 'LISTEN' and proc.name() == 'mosquitto.exe':
proc.send_signal(signal.SIGTERM)
return
def __del__(self):
if not self.mosquittoInstance.poll():
self.mosquittoInstance.terminate()
print(self.mosquittoInstance.poll())
class MyOpen():
def open(self, path, mode):
f = open(os.path.join(".", "fixture_" + path), mode)
f.write("test")
return f
def close(self, file):
file.close()
if __name__ == "__main__":
MyOpen().open("test.txt", "w")
| 25.731343
| 113
| 0.582367
|
9988b230f99278a1dbcb299fcd8718fb63b3469f
| 6,066
|
py
|
Python
|
tests/functional/test_custom_log_plugin.py
|
AKhodus/adcm
|
98dbf22af3f1c6afa94505e9acaff0ac4088a602
|
[
"Apache-2.0"
] | null | null | null |
tests/functional/test_custom_log_plugin.py
|
AKhodus/adcm
|
98dbf22af3f1c6afa94505e9acaff0ac4088a602
|
[
"Apache-2.0"
] | null | null | null |
tests/functional/test_custom_log_plugin.py
|
AKhodus/adcm
|
98dbf22af3f1c6afa94505e9acaff0ac4088a602
|
[
"Apache-2.0"
] | null | null | null |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import allure
import pytest
from adcm_client.objects import ADCMClient
from adcm_pytest_plugin import utils
FORMAT_STORAGE = ["json_path", "json_content", 'txt_path', "txt_content"]
FIELD = ['name', 'format', 'storage_type']
@pytest.mark.parametrize("bundle", FIELD)
def test_required_fields(sdk_client_fs: ADCMClient, bundle):
"""Task should be failed if required field not presented"""
stack_dir = utils.get_data_dir(__file__, "required_fields", "no_{}".format(bundle))
bundle = sdk_client_fs.upload_from_fs(stack_dir)
cluster = bundle.cluster_create(utils.random_string())
task = cluster.action(name='custom_log').run()
task.wait()
with allure.step('Check job state'):
assert task.status == 'failed', "Current job status {}. Expected: failed".format(task.status)
with allure.step('Check if logs are equal 2'):
job = task.job()
logs = job.log_list()
assert len(logs) == 2, "Logs count not equal 2, current log count {}".format(len(logs))
@pytest.mark.parametrize("bundle", FORMAT_STORAGE)
def test_different_storage_types_with_format(sdk_client_fs: ADCMClient, bundle):
"""Check different combinations of storage and format"""
log_format = bundle.split("_")[0]
stack_dir = utils.get_data_dir(__file__, bundle)
bundle = sdk_client_fs.upload_from_fs(stack_dir)
cluster = bundle.cluster_create(utils.random_string())
task = cluster.action(name='custom_log').run()
task.wait()
with allure.step('Check if logs are equal 3, job state and logs'):
job = task.job()
logs = job.log_list()
log = job.log(job_id=job.id, log_id=logs[2].id)
assert len(logs) == 3, "Logs count {}. Expected 3".format(len(logs))
assert job.status == 'success', "Current job status {}. Expected: success".format(job.status)
err_msg = "Expected log format {}. Actual log format {}".format(log_format, log.format)
assert log.format == log_format, err_msg
assert log.type == 'custom'
def test_path_and_content(sdk_client_fs: ADCMClient):
"""If path and content presented we need to get path, not content"""
stack_dir = utils.get_data_dir(__file__, "path_and_content")
bundle = sdk_client_fs.upload_from_fs(stack_dir)
cluster = bundle.cluster_create(utils.random_string())
task = cluster.action(name='custom_log').run()
task.wait()
with allure.step('Check logs content and format'):
job = task.job()
logs = job.log_list()
log = job.log(job_id=job.id, log_id=logs[2].id)
assert log.content == '{\n "key": "value"\n}'
assert log.format == 'json'
@pytest.mark.parametrize("bundle", ['equal_pathes', 'equal_names', 'equal_pathes_and_names'])
def test_multiple_tasks(sdk_client_fs: ADCMClient, bundle):
"""Check situation when we have multiple tasks"""
stack_dir = utils.get_data_dir(__file__, bundle)
bundle = sdk_client_fs.upload_from_fs(stack_dir)
cluster = bundle.cluster_create(utils.random_string())
task = cluster.action(name='custom_log').run()
task.wait()
with allure.step('Check 4 logs entries'):
job = task.job()
logs = job.log_list()
assert len(logs) == 4, "Expected 4 logs entries, because 2 tasks in playbook"
def test_check_text_file_content(sdk_client_fs: ADCMClient):
"""Check that text content from file correct"""
stack_dir = utils.get_data_dir(__file__, "txt_path")
bundle = sdk_client_fs.upload_from_fs(stack_dir)
cluster = bundle.cluster_create(utils.random_string())
task = cluster.action(name='custom_log').run()
task.wait()
with allure.step('Check logs content and format'):
job = task.job()
logs = job.log_list()
log = job.log(job_id=job.id, log_id=logs[2].id)
assert log.content == 'Hello world!\n'
assert log.format == 'txt'
def test_check_text_content(sdk_client_fs: ADCMClient):
"""Check that text content correct"""
stack_dir = utils.get_data_dir(__file__, "txt_content")
bundle = sdk_client_fs.upload_from_fs(stack_dir)
cluster = bundle.cluster_create(utils.random_string())
task = cluster.action(name='custom_log').run()
task.wait()
with allure.step('Check logs content'):
job = task.job()
logs = job.log_list()
log = job.log(job_id=job.id, log_id=logs[2].id)
assert log.content == 'shalala'
def test_check_json_content(sdk_client_fs: ADCMClient):
"""Check that json content correct"""
stack_dir = utils.get_data_dir(__file__, "json_content")
bundle = sdk_client_fs.upload_from_fs(stack_dir)
cluster = bundle.cluster_create(utils.random_string())
task = cluster.action(name='custom_log').run()
task.wait()
with allure.step('Check logs content'):
job = task.job()
logs = job.log_list()
log = job.log(job_id=job.id, log_id=logs[2].id)
assert log.content == '{\n "hello": "world"\n}'
def test_incorrect_syntax_for_fields(sdk_client_fs: ADCMClient):
"""Check if we have not json in content"""
stack_dir = utils.get_data_dir(__file__, "syntax_for_fields")
bundle = sdk_client_fs.upload_from_fs(stack_dir)
cluster = bundle.cluster_create(utils.random_string())
task = cluster.action(name='custom_log').run()
task.wait()
with allure.step('Check logs content'):
job = task.job()
logs = job.log_list()
log = job.log(job_id=job.id, log_id=logs[2].id)
assert log.content == '{1: "world"}'
| 43.021277
| 101
| 0.688922
|
d7c7bb67a78821caa0c24368826735d09195f713
| 1,367
|
py
|
Python
|
reminder.py
|
CrEaTiiOn187/tg-management-bot
|
554dbdd2134c14466baff3f3d448ac66f54ced03
|
[
"MIT"
] | 3
|
2021-10-21T18:16:37.000Z
|
2022-03-10T18:24:55.000Z
|
reminder.py
|
NoodleSoup/tg-management-bot
|
554dbdd2134c14466baff3f3d448ac66f54ced03
|
[
"MIT"
] | null | null | null |
reminder.py
|
NoodleSoup/tg-management-bot
|
554dbdd2134c14466baff3f3d448ac66f54ced03
|
[
"MIT"
] | 1
|
2020-07-15T07:43:55.000Z
|
2020-07-15T07:43:55.000Z
|
def reminder(bot, update, job_queue):
#from telegram.ext import Updater
#updater = Updater(token="698907268:AAGQE2j1nGV1vWzYeiANn8x_G7U6IWiilZY")
#j = updater.job_queue
msg = update.message
chat_id = msg.chat_id
user = bot.get_chat_member(chat_id = chat_id , user_id = msg.from_user.id)['status']
if user in ['administrator', 'creator']:
text = msg.text.split(" ",1)
if len(text)>1:
text = text[1].split(" ",1)
if len(text)>1:
time = int(text[0])
text = text[1]
bot.send_message(chat_id = chat_id, text = f"Set a reminder for {time} seconds", reply_to_message_id = msg.message_id)
def send_reminder(bot , job_queue):
bot.send_message(chat_id = chat_id, text = text)
job = job_queue.run_once(send_reminder, time)
else:
bot.send_message(chat_id = msg.chat_id,
text = "*Format:*\n_/reminder time (in seconds) text_",
reply_to_message_id = msg.message_id,
parse_mode = 'Markdown')
else:
bot.send_message(chat_id = msg.chat_id,
text = "*Format:*\n_/reminder time (in seconds) text_",
reply_to_message_id = msg.message_id,
parse_mode = 'Markdown')
else:
bot.send_message(chat_id = msg.chat_id,
text = "Fuck off, you aren't admin.",
reply_to_message_id = msg.message_id,
parse_mode = 'Markdown')
| 35.973684
| 123
| 0.648135
|
2b75497001575f3ef3aabf4047d9edb51e621378
| 2,209
|
py
|
Python
|
.venv/lib/python3.6/site-packages/sphinx/environment/collectors/dependencies.py
|
danielltm/sample
|
9f8dc92653bf26ae08451a2bfc1491b71cc31502
|
[
"MIT"
] | 1
|
2019-03-26T03:07:13.000Z
|
2019-03-26T03:07:13.000Z
|
.venv/lib/python3.6/site-packages/sphinx/environment/collectors/dependencies.py
|
danielltm/sample
|
9f8dc92653bf26ae08451a2bfc1491b71cc31502
|
[
"MIT"
] | 301
|
2020-10-03T10:46:31.000Z
|
2022-03-27T23:46:23.000Z
|
.venv/lib/python3.6/site-packages/sphinx/environment/collectors/dependencies.py
|
danielltm/sample
|
9f8dc92653bf26ae08451a2bfc1491b71cc31502
|
[
"MIT"
] | 1
|
2020-07-24T17:53:25.000Z
|
2020-07-24T17:53:25.000Z
|
# -*- coding: utf-8 -*-
"""
sphinx.environment.collectors.dependencies
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The dependencies collector components for sphinx.environment.
:copyright: Copyright 2007-2019 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from os import path
from docutils.utils import relative_path
from sphinx.environment.collectors import EnvironmentCollector
from sphinx.util.osutil import getcwd, fs_encoding
if False:
# For type annotation
from typing import Dict, Set # NOQA
from docutils import nodes # NOQA
from sphinx.sphinx import Sphinx # NOQA
from sphinx.environment import BuildEnvironment # NOQA
class DependenciesCollector(EnvironmentCollector):
"""dependencies collector for sphinx.environment."""
def clear_doc(self, app, env, docname):
# type: (Sphinx, BuildEnvironment, unicode) -> None
env.dependencies.pop(docname, None)
def merge_other(self, app, env, docnames, other):
# type: (Sphinx, BuildEnvironment, Set[unicode], BuildEnvironment) -> None
for docname in docnames:
if docname in other.dependencies:
env.dependencies[docname] = other.dependencies[docname]
def process_doc(self, app, doctree):
# type: (Sphinx, nodes.Node) -> None
"""Process docutils-generated dependency info."""
cwd = getcwd()
frompath = path.join(path.normpath(app.srcdir), 'dummy')
deps = doctree.settings.record_dependencies
if not deps:
return
for dep in deps.list:
# the dependency path is relative to the working dir, so get
# one relative to the srcdir
if isinstance(dep, bytes):
dep = dep.decode(fs_encoding)
relpath = relative_path(frompath,
path.normpath(path.join(cwd, dep)))
app.env.dependencies[app.env.docname].add(relpath)
def setup(app):
# type: (Sphinx) -> Dict
app.add_env_collector(DependenciesCollector)
return {
'version': 'builtin',
'parallel_read_safe': True,
'parallel_write_safe': True,
}
| 32.970149
| 82
| 0.637845
|
01e7bc37e742fcb0bac0879f5d353ba87fd80390
| 2,933
|
py
|
Python
|
authapp/views.py
|
dmitricus/robotshop
|
4c89a70e9191a8073efc1f2709bea4b79c0a6532
|
[
"MIT"
] | null | null | null |
authapp/views.py
|
dmitricus/robotshop
|
4c89a70e9191a8073efc1f2709bea4b79c0a6532
|
[
"MIT"
] | null | null | null |
authapp/views.py
|
dmitricus/robotshop
|
4c89a70e9191a8073efc1f2709bea4b79c0a6532
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
from django.shortcuts import render, HttpResponseRedirect
from authapp.forms import ShopUserLoginForm
from django.contrib import auth
from django.contrib.auth.decorators import login_required
from django.urls import reverse
from basketapp.models import Basket
from authapp.forms import ShopUserRegisterForm
from authapp.forms import ShopUserEditForm
import os, json
JSON_PATH = 'mainapp/json'
def loadFromJSON(file_name):
with open(os.path.join(JSON_PATH, file_name + '.json'), 'r', encoding='utf-8') as infile:
return json.load(infile)
def getBasket(user):
if user.is_authenticated:
return Basket.objects.filter(user=user)
else:
return []
def login(request):
title = 'вход в систему'
links_menu = loadFromJSON("links_menu")
login_form = ShopUserLoginForm(data=request.POST or None)
next = request.GET['next'] if 'next' in request.GET.keys() else ''
if request.method == 'POST' and login_form.is_valid():
username = request.POST['username']
password = request.POST['password']
user = auth.authenticate(username=username, password=password)
if user and user.is_active:
auth.login(request, user)
if 'next' in request.POST.keys():
return HttpResponseRedirect(request.POST['next'])
else:
return HttpResponseRedirect(reverse('main'))
content = {
'title': title,
'login_form': login_form,
'links_menu': links_menu,
'next': next,
}
return render(request, 'authapp/login.html', content)
def logout(request):
auth.logout(request)
return HttpResponseRedirect(reverse('main'))
def register(request):
title = 'регистрация'
links_menu = loadFromJSON("links_menu")
if request.method == 'POST':
register_form = ShopUserRegisterForm(request.POST, request.FILES)
if register_form.is_valid():
register_form.save()
return HttpResponseRedirect(reverse('auth:login'))
else:
register_form = ShopUserRegisterForm()
content = {
'title': title,
'register_form': register_form,
'links_menu': links_menu,
}
return render(request, 'authapp/register.html', content)
def edit(request):
title = 'редактирование'
basket = getBasket(request.user)
links_menu = loadFromJSON("links_menu")
if request.method == 'POST':
edit_form = ShopUserEditForm(request.POST, request.FILES, instance=request.user)
if edit_form.is_valid():
edit_form.save()
return HttpResponseRedirect(reverse('auth:edit'))
else:
edit_form = ShopUserEditForm(instance=request.user)
content = {
'title': title,
'edit_form': edit_form,
'links_menu': links_menu,
'basket': basket,
}
return render(request, 'authapp/edit.html', content)
| 27.411215
| 93
| 0.666553
|
3e8d819394c76d55d1abbc440e7ac91f0f95427a
| 5,088
|
py
|
Python
|
python/ray/autoscaler/local/node_provider.py
|
andyljones/ray
|
52dfde1cbb7131fd62ebcb00f5a2b22ced7321ad
|
[
"Apache-2.0"
] | 1
|
2019-11-04T05:08:47.000Z
|
2019-11-04T05:08:47.000Z
|
python/ray/autoscaler/local/node_provider.py
|
collinswei/ray
|
2e30f7ba386e716bf80f019dcd473b67d83abb95
|
[
"Apache-2.0"
] | null | null | null |
python/ray/autoscaler/local/node_provider.py
|
collinswei/ray
|
2e30f7ba386e716bf80f019dcd473b67d83abb95
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from filelock import FileLock
from threading import RLock
import json
import os
import socket
import logging
from ray.autoscaler.node_provider import NodeProvider
from ray.autoscaler.tags import TAG_RAY_NODE_TYPE
logger = logging.getLogger(__name__)
filelock_logger = logging.getLogger("filelock")
filelock_logger.setLevel(logging.WARNING)
class ClusterState(object):
def __init__(self, lock_path, save_path, provider_config):
self.lock = RLock()
self.file_lock = FileLock(lock_path)
self.save_path = save_path
with self.lock:
with self.file_lock:
if os.path.exists(self.save_path):
workers = json.loads(open(self.save_path).read())
else:
workers = {}
logger.info("ClusterState: "
"Loaded cluster state: {}".format(workers))
for worker_ip in provider_config["worker_ips"]:
if worker_ip not in workers:
workers[worker_ip] = {
"tags": {
TAG_RAY_NODE_TYPE: "worker"
},
"state": "terminated",
}
else:
assert workers[worker_ip]["tags"][
TAG_RAY_NODE_TYPE] == "worker"
if provider_config["head_ip"] not in workers:
workers[provider_config["head_ip"]] = {
"tags": {
TAG_RAY_NODE_TYPE: "head"
},
"state": "terminated",
}
else:
assert workers[provider_config["head_ip"]]["tags"][
TAG_RAY_NODE_TYPE] == "head"
assert len(workers) == len(provider_config["worker_ips"]) + 1
with open(self.save_path, "w") as f:
logger.info("ClusterState: "
"Writing cluster state: {}".format(workers))
f.write(json.dumps(workers))
def get(self):
with self.lock:
with self.file_lock:
workers = json.loads(open(self.save_path).read())
return workers
def put(self, worker_id, info):
assert "tags" in info
assert "state" in info
with self.lock:
with self.file_lock:
workers = self.get()
workers[worker_id] = info
with open(self.save_path, "w") as f:
logger.info("ClusterState: "
"Writing cluster state: {}".format(workers))
f.write(json.dumps(workers))
class LocalNodeProvider(NodeProvider):
def __init__(self, provider_config, cluster_name):
NodeProvider.__init__(self, provider_config, cluster_name)
self.state = ClusterState("/tmp/cluster-{}.lock".format(cluster_name),
"/tmp/cluster-{}.state".format(cluster_name),
provider_config)
def nodes(self, tag_filters):
workers = self.state.get()
matching_ips = []
for worker_ip, info in workers.items():
if info["state"] == "terminated":
continue
ok = True
for k, v in tag_filters.items():
if info["tags"].get(k) != v:
ok = False
break
if ok:
matching_ips.append(worker_ip)
return matching_ips
def is_running(self, node_id):
return self.state.get()[node_id]["state"] == "running"
def is_terminated(self, node_id):
return not self.is_running(node_id)
def node_tags(self, node_id):
return self.state.get()[node_id]["tags"]
def external_ip(self, node_id):
return socket.gethostbyname(node_id)
def internal_ip(self, node_id):
return socket.gethostbyname(node_id)
def set_node_tags(self, node_id, tags):
with self.state.file_lock:
info = self.state.get()[node_id]
info["tags"].update(tags)
self.state.put(node_id, info)
def create_node(self, node_config, tags, count):
node_type = tags[TAG_RAY_NODE_TYPE]
with self.state.file_lock:
workers = self.state.get()
for node_id, info in workers.items():
if (info["state"] == "terminated"
and info["tags"][TAG_RAY_NODE_TYPE] == node_type):
info["tags"] = tags
info["state"] = "running"
self.state.put(node_id, info)
return
def terminate_node(self, node_id):
workers = self.state.get()
info = workers[node_id]
info["state"] = "terminated"
self.state.put(node_id, info)
| 36.085106
| 79
| 0.527516
|
c8b81cdf6477edf579fa365ca104420e87361e39
| 9,478
|
py
|
Python
|
tools/codegen/utils.py
|
ljhOfGithub/pytorch
|
c568f7b16f2a98d72ff5b7c6c6161b67b2c27514
|
[
"Intel"
] | 1
|
2022-03-29T00:44:31.000Z
|
2022-03-29T00:44:31.000Z
|
tools/codegen/utils.py
|
ljhOfGithub/pytorch
|
c568f7b16f2a98d72ff5b7c6c6161b67b2c27514
|
[
"Intel"
] | null | null | null |
tools/codegen/utils.py
|
ljhOfGithub/pytorch
|
c568f7b16f2a98d72ff5b7c6c6161b67b2c27514
|
[
"Intel"
] | 1
|
2022-03-28T21:49:41.000Z
|
2022-03-28T21:49:41.000Z
|
import contextlib
import functools
import hashlib
import os
import re
import textwrap
from argparse import Namespace
from typing import Tuple, List, Iterable, Iterator, Callable, Sequence, TypeVar, Optional, Dict, Any, Union, Set, NoReturn
from enum import Enum
from tools.codegen.code_template import CodeTemplate
# Safely load fast C Yaml loader/dumper if they are available
try:
from yaml import CSafeLoader as Loader
except ImportError:
from yaml import SafeLoader as Loader # type: ignore[misc]
try:
from yaml import CSafeDumper as Dumper
except ImportError:
from yaml import SafeDumper as Dumper # type: ignore[misc]
YamlDumper = Dumper
# A custom loader for YAML that errors on duplicate keys.
# This doesn't happen by default: see https://github.com/yaml/pyyaml/issues/165
class YamlLoader(Loader):
def construct_mapping(self, node, deep=False): # type: ignore[no-untyped-def]
mapping = []
for key_node, value_node in node.value:
key = self.construct_object(key_node, deep=deep) # type: ignore[no-untyped-call]
assert key not in mapping, f"Found a duplicate key in the yaml. key={key}, line={node.start_mark.line}"
mapping.append(key)
mapping = super().construct_mapping(node, deep=deep) # type: ignore[no-untyped-call]
return mapping
# Many of these functions share logic for defining both the definition
# and declaration (for example, the function signature is the same), so
# we organize them into one function that takes a Target to say which
# code we want.
#
# This is an OPEN enum (we may add more cases to it in the future), so be sure
# to explicitly specify with Union[Literal[Target.XXX]] what targets are valid
# for your use.
Target = Enum('Target', (
# top level namespace (not including at)
'DEFINITION',
'DECLARATION',
# TORCH_LIBRARY(...) { ... }
'REGISTRATION',
# namespace { ... }
'ANONYMOUS_DEFINITION',
# namespace cpu { ... }
'NAMESPACED_DEFINITION',
'NAMESPACED_DECLARATION',
))
# Matches "foo" in "foo, bar" but not "foobar". Used to search for the
# occurrence of a parameter in the derivative formula
IDENT_REGEX = r'(^|\W){}($|\W)'
# TODO: Use a real parser here; this will get bamboozled
def split_name_params(schema: str) -> Tuple[str, List[str]]:
m = re.match(r'(\w+)(\.\w+)?\((.*)\)', schema)
if m is None:
raise RuntimeError(f'Unsupported function schema: {schema}')
name, _, params = m.groups()
return name, params.split(', ')
T = TypeVar('T')
S = TypeVar('S')
# These two functions purposely return generators in analogy to map()
# so that you don't mix up when you need to list() them
# Map over function that may return None; omit Nones from output sequence
def mapMaybe(func: Callable[[T], Optional[S]], xs: Iterable[T]) -> Iterator[S]:
for x in xs:
r = func(x)
if r is not None:
yield r
# Map over function that returns sequences and cat them all together
def concatMap(func: Callable[[T], Sequence[S]], xs: Iterable[T]) -> Iterator[S]:
for x in xs:
for r in func(x):
yield r
# Conveniently add error context to exceptions raised. Lets us
# easily say that an error occurred while processing a specific
# context.
@contextlib.contextmanager
def context(msg_fn: Callable[[], str]) -> Iterator[None]:
try:
yield
except Exception as e:
# TODO: this does the wrong thing with KeyError
msg = msg_fn()
msg = textwrap.indent(msg, ' ')
msg = f'{e.args[0]}\n{msg}' if e.args else msg
e.args = (msg,) + e.args[1:]
raise
# A little trick from https://github.com/python/mypy/issues/6366
# for getting mypy to do exhaustiveness checking
# TODO: put this somewhere else, maybe
def assert_never(x: NoReturn) -> NoReturn:
raise AssertionError("Unhandled type: {}".format(type(x).__name__))
@functools.lru_cache(maxsize=None)
def _read_template(template_fn: str) -> CodeTemplate:
return CodeTemplate.from_file(template_fn)
# String hash that's stable across different executions, unlike builtin hash
def string_stable_hash(s: str) -> int:
sha1 = hashlib.sha1(s.encode('latin1')).digest()
return int.from_bytes(sha1, byteorder='little')
# A small abstraction for writing out generated files and keeping track
# of what files have been written (so you can write out a list of output
# files)
class FileManager:
install_dir: str
template_dir: str
dry_run: bool
filenames: Set[str]
def __init__(self, install_dir: str, template_dir: str, dry_run: bool) -> None:
self.install_dir = install_dir
self.template_dir = template_dir
self.filenames = set()
self.dry_run = dry_run
def _write_if_changed(self, filename: str, contents: str) -> None:
old_contents: Optional[str]
try:
with open(filename, 'r') as f:
old_contents = f.read()
except IOError:
old_contents = None
if contents != old_contents:
# Create output directory if it doesn't exist
os.makedirs(os.path.dirname(filename), exist_ok=True)
with open(filename, 'w') as f:
f.write(contents)
def write_with_template(self, filename: str, template_fn: str,
env_callable: Callable[[], Union[str, Dict[str, Any]]]) -> None:
filename = '{}/{}'.format(self.install_dir, filename)
assert filename not in self.filenames, "duplicate file write {filename}"
self.filenames.add(filename)
if not self.dry_run:
env = env_callable()
if isinstance(env, dict):
# TODO: Update the comment reference to the correct location
if 'generated_comment' not in env:
comment = "@" + "generated by tools/codegen/gen.py"
comment += " from {}".format(os.path.basename(template_fn))
env['generated_comment'] = comment
template = _read_template(os.path.join(self.template_dir, template_fn))
self._write_if_changed(filename, template.substitute(env))
elif isinstance(env, str):
self._write_if_changed(filename, env)
else:
assert_never(env)
def write(self, filename: str, env_callable: Callable[[], Union[str, Union[str, Dict[str, Any]]]]) -> None:
self.write_with_template(filename, filename, env_callable)
def write_sharded(
self,
filename: str,
items: Iterable[T],
*,
key_fn: Callable[[T], str],
env_callable: Callable[[T], Dict[str, List[str]]],
num_shards: int,
base_env: Optional[Dict[str, Any]] = None,
sharded_keys: Set[str]
) -> None:
everything: Dict[str, Any] = {'shard_id': 'Everything'}
shards: List[Dict[str, Any]] = [{'shard_id': f'_{i}'} for i in range(num_shards)]
all_shards = [everything] + shards
if base_env is not None:
for shard in all_shards:
shard.update(base_env)
for key in sharded_keys:
for shard in all_shards:
if key in shard:
assert isinstance(shard[key], list), "sharded keys in base_env must be a list"
shard[key] = shard[key].copy()
else:
shard[key] = []
def merge_env(into: Dict[str, List[str]], from_: Dict[str, List[str]]) -> None:
for k, v in from_.items():
assert k in sharded_keys, f"undeclared sharded key {k}"
into[k] += v
if self.dry_run:
# Dry runs don't write any templates, so incomplete environments are fine
items = ()
for item in items:
key = key_fn(item)
sid = string_stable_hash(key) % num_shards
env = env_callable(item)
merge_env(shards[sid], env)
merge_env(everything, env)
dot_pos = filename.rfind('.')
if dot_pos == -1:
dot_pos = len(filename)
base_filename = filename[:dot_pos]
extension = filename[dot_pos:]
for shard in all_shards:
shard_id = shard['shard_id']
self.write_with_template(f"{base_filename}{shard_id}{extension}",
filename,
lambda: shard)
# filenames is used to track compiled files, but FooEverything.cpp isn't meant to be compiled
self.filenames.discard(
f"{self.install_dir}/{base_filename}Everything{extension}")
def write_outputs(self, variable_name: str, filename: str) -> None:
"""Write a file containing the list of all outputs which are
generated by this script."""
content = 'set({}\n {})'.format(
variable_name, '\n '.join('"' + name + '"' for name in sorted(self.filenames)))
self._write_if_changed(filename, content)
# Helper function to generate file manager
def make_file_manager(options: Namespace, install_dir: Optional[str] = None) -> FileManager:
template_dir = os.path.join(options.source_path, "templates")
install_dir = install_dir if install_dir else options.install_dir
return FileManager(install_dir=install_dir, template_dir=template_dir, dry_run=options.dry_run)
| 38.528455
| 122
| 0.631779
|
e50203a4e9a5d188e8e4bd71563446b8604bb534
| 8,561
|
py
|
Python
|
caf/scripts/RsaPersCom.py
|
sduprey/open_data_platform
|
a20b5ab6a1ca99ae8aef11ac74a314114144d291
|
[
"MIT"
] | 2
|
2019-12-03T13:35:31.000Z
|
2019-12-03T13:35:37.000Z
|
caf/scripts/RsaPersCom.py
|
sduprey/open_data_platform
|
a20b5ab6a1ca99ae8aef11ac74a314114144d291
|
[
"MIT"
] | null | null | null |
caf/scripts/RsaPersCom.py
|
sduprey/open_data_platform
|
a20b5ab6a1ca99ae8aef11ac74a314114144d291
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import pandas as pd
import glob
#Variable Type Label
#Communes Char NOM DE LA COMMUNE
#Codes_Insee Char NUMERO DE LA COMMUNE, CODE INSEE
#NB_Pers_par_Foyer_Alloc Num NOMBRE DE PERSONNES COUVERTES PAR UNE PRESTATION DE LA BRANCHE FAMILLE
#NB_Pers_couv_RSA Num NOMBRE DE PERSONNES COUVERTES PAR LE RSA
#RSA_SOCLE_non_Majore_Pers_couv Num NOMBRE DE PERSONNES COUVERTES PAR LE RSA SOCLE SANS MAJORATION VERSABLE
#RSA_SOCLE_Majore_Pers_couv Num NOMBRE DE PERSONNES COUVERTES PAR LE RSA SOCLE AVEC MAJORATION VERSABLE
#RSA_activite_Pers_couv Num NOMBRE DE PERSONNES COUVERTES PAR LE RSA ACTIVITE VERSABLE
#
#***********DESCRIPTION DE LA PRESTATION***********
#
#Le RSA est une prestation sous conditions de ressources versée mensuellement sur la base des ressources du trimestre précédent. Entré
#en vigueur le 1er juin 2009 en France métropolitaine et le 1er janvier 2011 dans les départements d’Outre-mer, cette prestation remplace
#le revenu minimum d'insertion (RMI) et l'allocation de parent isolé (API) pour les personnes privées d'emploi. Il apporte une incitation
#financière aux personnes sans ressource qui reprennent un emploi (le RSA garantit à quelqu'un qui reprend un travail que ses revenus augmentent).
#Enfin il complète les ressources des personnes dont l'activité professionnelle ne leur apporte que des revenus limités.
#
#Le RSA est versé sans limitation de durée, tant que les revenus du bénéficiaire sont inférieurs au montant maximal du RSA. Le montant versé
#peut varier si la situation familiale, professionnelle et les ressources du foyer évoluent. Le RSA est constitué de trois composantes : le
#"RSA socle", le "RSA socle et activité" et le "RSA activité". Ainsi, le RSA couvre une population large, puisqu’il concerne aussi bien des
#foyers n’ayant aucune ressource, que des personnes percevant des revenus d’activité proches du Smic. Un foyer allocataire du "RSA socle seul"
#n’a pas de revenus d’activité (toutefois, en cas de reprise d’activité, le bénéficiaire peut cumuler salaires et allocation pendant trois mois).
#Les bénéficiaires du "RSA socle et activité" ont de faibles revenus d’activité et l’ensemble de leurs ressources est inférieur au montant
#forfaitaire. Ceux du "RSA activité seul" ont de faibles revenus d’activité et l’ensemble de leurs ressources est supérieur au montant forfaitaire.
#Deux types de publics peuvent se combiner aux composantes de RSA :
#
#-les personnes en état de grossesse ou assumant seules la charge d’au moins un enfant bénéficient du "RSA majoré" (on parle alors de "socle majoré"
#ou "socle et activité majoré" ou "activité majoré")
#
#-les bénéficiaires du RSA ne faisant partie du public "RSA majoré" correspondent au RSA "non majoré". Au sein du public "non majoré" on peut
#distinguer le public "RSA jeune". Le public RSA jeune concerne les jeunes de moins de 25 ans isolés et sans enfant à charge et versé sous
#condition d’activité antérieure (deux ans au cours des trois dernières années).
#
#***********REMARQUES***********
#
#1)Le foyer allocataire est composé du responsable du dossier (personne qui perçoit au moins une prestation au regard de sa situation
#familiale et/ou monétaire), et l'ensemble des autres ayants droit au sens de la règlementation en vigueur (conjoint, enfant(s) et
#autre(s) personne(s) à charge).
#Plusieurs foyers allocataires peuvent cohabiter dans un même lieu, ils constituent alors un seul ménage au sens de la définition
#statistique Insee. C’est le cas, par exemple, lorsque un couple perçoit des allocations logement et héberge son enfant titulaire d'un
#minimum social des Caf (RSO, RSA, AAH).
#En pratique, le terme "allocataire" est souvent utilisé à la place de "foyer allocataire".
#
#2) Le droit versable signifie que le foyer allocataire remplit toutes les conditions pour être effectivement payé au titre du mois
#d’observation. En particulier ne sont pas inclus dans ce périmètre les bénéficiaires qui n’ont pas fourni l’intégralité de leurs
#pièces justificatives, ou ceux dont le montant de la prestation est inférieur au seuil de versement.
#
#3) A partir de 2014, le champ géographique d’observation du jeu de données correspond à la commune de résidence du foyer allocataire
#telle qu’elle est enregistrée dans le fichier statistique des allocataires extrait début d’année N+1 et ce quelle que soit la Caf de
#gestion.Les deux premières lignes du fichier recouvrent les allocataires résidents à l'étranger (code 99999) et les allocataires
#dont la commune
# de résidence est inconnue (code XXXXX).
#En 2012 et 2013 les résidents à l'étranger n'ont pas été comptabilisés.
#En 2009, 2010 et 2011, la première ligne (code 99999) recouvre, en plus des résidents à l'étranger, tous les allocataires vivant sur
#une commune en dehors du territoire de leur caf de gestion.
#
#4) L'application d'un blanc ' ' est dû à deux cas de figure soit l'information est manquante, soit il y a eu application d'un secret
#statistique. Le secret statistique est appliqué à toutes les valeurs inférieures à 5. De plus, pour éviter de déduire certaines
#valeurs manquantes d'autres valeurs par croisement (exemple, différence avec la totalité dans le cas d'une seule valeur manquante), un
#secret statistique est appliqué sur d'autres valeurs.
#
#5)Dans les ressources associées à ce descriptif l'expression "RSA SOCLE" renvoi au "RSA socle seul" et au "RSA socle et activité" classique.
#
#
#***********Titres des fichiers***********
#
#RSA_Perscouv_Com_XXXX.csv
#où XXXX est l'année de référence
#
#***********Informations additionnelles***********
#
#Source : Cnaf, fichier FILEAS et BASE COMMUNALE ALLOCATAIRES (BCA)
#Fréquence de diffusion : Annuelle
#Granularité temporelle : Mois
#Unité : Personnes couvertes
#Champ : France, régime général + régime agricole dans les Dom
#Zone géographique : Commune
#
#
#***********LIENS***********
#
#Retrouvez plus d'informations sur le site de la branche famille: http://www.caf.fr/aides-et-services/s-informer-sur-les-aides/solidarite-et-insertion/
#et sur le Cahier des données sociales : http://www.caf.fr/etudes-et-statistiques/publications/cahier-des-donnees-sociales
df = pd.read_csv('source/RsaPersCom2009.csv', sep=";")
df.columns = ['Communes', 'Codes_Insee', 'NB_Pers_par_Foyer_Alloc_2009',
'NB_Pers_couv_RSA_2009', 'RSA_SOCLE_non_Majore_Pers_couv_2009',
'RSA_SOCLE_Majore_Pers_couv_2009', 'RSA_activite_Pers_couv_2009']
files = glob.glob('source/RsaPersCom*')
for path_file in files:
year = str(path_file[-8:-4])
if (year != '2009'):
df_temp = pd.read_csv(path_file, sep=';')
# Rename Col with year
year_col = ['Communes', 'Codes_Insee']
features_col = []
for col in df_temp.columns[2:]:
year_col.append(col +"_"+ year)
features_col.append(col +"_"+ year)
# Adding key for mergeing
features_col.append('Codes_Insee')
df_temp.columns = year_col
df = pd.merge(df, df_temp[features_col], how='inner', on='Codes_Insee')
# Rename col to have unique name in futur merge
list_col = []
for col in df.columns:
if col[0:15] in 'NB_Pers_par_Foyer':
list_col.append(col+"_RPC") # RPC = RsaPersCom
else:
list_col.append(col)
df.columns = list_col
df.to_csv('data/full_RsaPersCom.csv', encoding='utf-8', index=False)
## Features
#u'NB_Pers_par_Foyer_Alloc_2009_RPC',
# u'NB_Pers_couv_RSA_2009', u'RSA_SOCLE_non_Majore_Pers_couv_2009',
# u'RSA_SOCLE_Majore_Pers_couv_2009', u'RSA_activite_Pers_couv_2009',
# u'NB_Pers_par_Foyer_Alloc_2010_RPC', u'NB_Pers_couv_RSA_2010',
# u'RSA_SOCLE_non_Majore_Pers_couv_2010',
# u'RSA_SOCLE_Majore_Pers_couv_2010', u'RSA_activite_Pers_couv_2010',
# u'NB_Pers_par_Foyer_Alloc_2011_RPC', u'NB_Pers_couv_RSA_2011',
# u'RSA_SOCLE_non_Majore_Pers_couv_2011',
# u'RSA_SOCLE_Majore_Pers_couv_2011', u'RSA_activite_Pers_couv_2011',
# u'NB_Pers_par_Foyer_Alloc_2012_RPC', u'NB_Pers_couv_RSA_2012',
# u'RSA_SOCLE_non_Majore_Pers_couv_2012',
# u'RSA_SOCLE_Majore_Pers_couv_2012', u'RSA_activite_Pers_couv_2012',
# u'NB_Pers_par_Foyer_Alloc_2013_RPC', u'NB_Pers_couv_RSA_2013',
# u'RSA_SOCLE_non_Majore_Pers_couv_2013',
# u'RSA_SOCLE_Majore_Pers_couv_2013', u'RSA_activite_Pers_couv_2013',
# u'NB_Pers_par_Foyer_Alloc_2014_RPC', u'NB_Pers_couv_RSA_2014',
# u'RSA_SOCLE_non_Majore_Pers_couv_2014',
# u'RSA_SOCLE_Majore_Pers_couv_2014', u'RSA_activite_Pers_couv_2014'
| 58.238095
| 151
| 0.759257
|
d0fb32ba199fcc42c37311042d9572cf8c1a397e
| 227
|
py
|
Python
|
server/constRPC.py
|
CostaDiego/aws-server-client-py
|
8dca81fd2a629c4ec8ee6448d3a4dbb31ef16234
|
[
"MIT"
] | null | null | null |
server/constRPC.py
|
CostaDiego/aws-server-client-py
|
8dca81fd2a629c4ec8ee6448d3a4dbb31ef16234
|
[
"MIT"
] | null | null | null |
server/constRPC.py
|
CostaDiego/aws-server-client-py
|
8dca81fd2a629c4ec8ee6448d3a4dbb31ef16234
|
[
"MIT"
] | null | null | null |
OK = '1'
ADD = '2'
APPEND = '3'
GETVALUE = '4'
CREATE = '5'
STOP = '6'
HOST = '172.31.88.77'
PORT = 12304
CLIENT1 = 12305
CLIENT2 = 12306
HOSTCL1 = '172.31.89.146'
HOSTCL2 = '172.31.87.222'
| 17.461538
| 26
| 0.506608
|
109c01297b35e5e88118b4b36dfd87ee82bf2393
| 2,964
|
py
|
Python
|
omega_converter/converter_functions/road/structural_object.py
|
lu-w/criticality-recognition
|
5ad2e12699ad4bf2d7f60ce9e30f26110adce436
|
[
"MIT"
] | 4
|
2022-03-13T19:33:43.000Z
|
2022-03-15T22:20:36.000Z
|
omega_converter/converter_functions/road/structural_object.py
|
lu-w/criticality-recognition
|
5ad2e12699ad4bf2d7f60ce9e30f26110adce436
|
[
"MIT"
] | null | null | null |
omega_converter/converter_functions/road/structural_object.py
|
lu-w/criticality-recognition
|
5ad2e12699ad4bf2d7f60ce9e30f26110adce436
|
[
"MIT"
] | null | null | null |
from ..utils import *
@monkeypatch(omega_format.StructuralObject)
def to_auto(cls, world: owlready2.World, scenes, identifier=None, parent_identifier=None):
# Fetches ontologies
ph = auto.get_ontology(auto.Ontology.Physics, world)
l2_core = auto.get_ontology(auto.Ontology.L2_Core, world)
l2_de = auto.get_ontology(auto.Ontology.L2_DE, world)
l3_de = auto.get_ontology(auto.Ontology.L3_DE, world)
# Creates structural object instance
structural_object = ph.Spatial_Object()
structural_object.identifier = str(parent_identifier) + "_" + str(identifier)
for scene in scenes:
scene.has_traffic_entity.append(structural_object)
if cls.type == omega_format.ReferenceTypes.StructuralObjectType.VEGETATION:
structural_object.is_a.append(l2_core.Road_Side_Vegetation)
elif cls.type == omega_format.ReferenceTypes.StructuralObjectType.BUILDING:
structural_object.is_a.append(l2_de.Building)
elif cls.type == omega_format.ReferenceTypes.StructuralObjectType.BUS_SHELTER:
structural_object.is_a.append(l2_de.Bus_Stop)
elif cls.type == omega_format.ReferenceTypes.StructuralObjectType.TUNNEL:
structural_object.is_a.append(l2_de.Tunnel)
elif cls.type == omega_format.ReferenceTypes.StructuralObjectType.BRIDGE:
structural_object.is_a.append(l2_de.Bridge)
elif cls.type == omega_format.ReferenceTypes.StructuralObjectType.FENCE:
structural_object.is_a.append(l2_de.Fence)
elif cls.type == omega_format.ReferenceTypes.StructuralObjectType.BENCH:
structural_object.is_a.append(l2_de.Bench)
elif cls.type == omega_format.ReferenceTypes.StructuralObjectType.ROAD_WORK:
structural_object.is_a.append(l3_de.Construction_Site)
elif cls.type == omega_format.ReferenceTypes.StructuralObjectType.BODY_OF_WATER:
structural_object.is_a.append(l2_core.Water_Body)
elif cls.type == omega_format.ReferenceTypes.StructuralObjectType.GARAGE:
structural_object.is_a.append(l2_de.Garage)
elif cls.type == omega_format.ReferenceTypes.StructuralObjectType.BILLBOARD:
structural_object.is_a.append(l2_de.Billboard)
elif cls.type == omega_format.ReferenceTypes.StructuralObjectType.ADVERTISING_PILLAR:
structural_object.is_a.append(l2_de.Advertising_Pillar)
elif cls.type == omega_format.ReferenceTypes.StructuralObjectType.PHONE_BOX:
structural_object.is_a.append(l2_de.Phone_Box)
elif cls.type == omega_format.ReferenceTypes.StructuralObjectType.POST_BOX:
structural_object.is_a.append(l2_de.Post_Box)
elif cls.type == omega_format.ReferenceTypes.StructuralObjectType.OVERHEAD_STRUCTURE:
structural_object.is_a.append(l2_de.Overhead_Traffic_Structure)
structural_object.has_height = float(cls.height)
add_geometry_from_polygon(cls, structural_object, world)
add_layer_3_information(cls, structural_object, world)
return [(cls, [structural_object])]
| 52
| 90
| 0.779352
|
a0ea3c474b5869b4cfab332d06af9f7cd009ddd7
| 907
|
py
|
Python
|
ashic/misc/mat2hicrep.py
|
wmalab/ASHIC
|
f9dbee64ef13c7f10c25bc266209fb7fc430d39e
|
[
"MIT"
] | 5
|
2021-01-28T21:51:55.000Z
|
2022-03-04T17:35:59.000Z
|
ashic/misc/mat2hicrep.py
|
wmalab/ASHIC
|
f9dbee64ef13c7f10c25bc266209fb7fc430d39e
|
[
"MIT"
] | 4
|
2020-09-01T06:23:49.000Z
|
2022-01-11T03:58:04.000Z
|
ashic/misc/mat2hicrep.py
|
wmalab/ASHIC
|
f9dbee64ef13c7f10c25bc266209fb7fc430d39e
|
[
"MIT"
] | 1
|
2020-12-06T07:03:50.000Z
|
2020-12-06T07:03:50.000Z
|
import os
import click
import numpy as np
def convert(mat, ch, r, fh):
n = mat.shape[0]
start = 0
for i in range(n):
end = start + r
fh.write("{}\t{}\t{}\t".format(ch, start, end) + '\t'.join(map(str, mat[i])) + "\n")
start = end
@click.command()
@click.argument('matfile')
@click.argument('outdir')
@click.option('-r', '--resolution', type=int)
@click.option('-p', '--prefix', default='')
@click.option('-c', '--ch', default='chrX')
def cli(matfile, outdir, resolution, prefix, ch):
if not os.path.exists(outdir):
os.makedirs(outdir)
contactfile = os.path.join(outdir, prefix + "_hicrep.txt")
cfw = open(contactfile, 'w')
if matfile.endswith('.npy'):
mat = np.load(matfile)
else:
mat = np.loadtxt(matfile)
mat[np.isnan(mat)] = 0
convert(mat, ch, resolution, cfw)
cfw.close()
if __name__ == '__main__':
cli()
| 27.484848
| 92
| 0.588754
|
81fd8d5ff99dac0ea700184d0817a4a885871b75
| 1,832
|
py
|
Python
|
python/src/nnabla/backward_function/clip_grad_by_value.py
|
sdonatti/nnabla
|
ac4a42e62dd358f16bd79c08a9a9f3d83c0100c9
|
[
"Apache-2.0"
] | 1
|
2020-08-03T12:49:19.000Z
|
2020-08-03T12:49:19.000Z
|
python/src/nnabla/backward_function/clip_grad_by_value.py
|
sdonatti/nnabla
|
ac4a42e62dd358f16bd79c08a9a9f3d83c0100c9
|
[
"Apache-2.0"
] | 1
|
2020-11-09T07:33:29.000Z
|
2020-11-09T07:33:29.000Z
|
python/src/nnabla/backward_function/clip_grad_by_value.py
|
sdonatti/nnabla
|
ac4a42e62dd358f16bd79c08a9a9f3d83c0100c9
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2017 Sony Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import nnabla as nn
from .backward_function import BackwardFunction
class ClipGradByValueBackward(BackwardFunction):
@property
def name(self):
return 'ClipGradByValueBackward'
def _create_forward_inputs_and_outputs(self, inputs, outputs):
# Inputs on the forward graph
inputs_fwd = []
for i in range(self._num_inputs_fwd):
need_grad = self.forward_func.inputs[i].need_grad
v = nn.Variable(inputs[i].shape, need_grad=need_grad)
v.data = inputs[i].data
v.grad = outputs[i].data
inputs_fwd += [v]
# Outputs on the forward graph
outputs_fwd = []
for i in range(self._num_outputs_fwd):
inp = inputs[self._num_inputs_fwd + i]
v = nn.Variable(inp.shape)
v.grad = inp.data
outputs_fwd += [v]
return inputs_fwd, outputs_fwd
def backward_impl(self, inputs, outputs, prop_down, accum):
# inputs: [inputs_fwd_graph] + [inputs_bwd_graph] or
# [inputs_fwd_graph] + [outputs_fwd_graph] + [inputs_bwd_graph]
raise NotImplementedError(
"The backward method of ClipGradByValueBackward class is not implemented.")
| 37.387755
| 87
| 0.679039
|
563096a84e268e7d2a31dd466f7dbdc649a54fca
| 18,452
|
py
|
Python
|
custom_step/_version.py
|
MolSSI/custom_step
|
fb0a1f6a07f93d2d72f497e44a9f05e57b2df8c3
|
[
"BSD-3-Clause"
] | null | null | null |
custom_step/_version.py
|
MolSSI/custom_step
|
fb0a1f6a07f93d2d72f497e44a9f05e57b2df8c3
|
[
"BSD-3-Clause"
] | 35
|
2019-12-17T19:48:36.000Z
|
2021-11-27T20:15:05.000Z
|
custom_step/_version.py
|
MolSSI/custom_step
|
fb0a1f6a07f93d2d72f497e44a9f05e57b2df8c3
|
[
"BSD-3-Clause"
] | 1
|
2019-11-04T20:34:45.000Z
|
2019-11-04T20:34:45.000Z
|
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.18 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = ""
cfg.parentdir_prefix = "None"
cfg.versionfile_source = "custom_step/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %s but none started with prefix %s" %
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%s*" % tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"],
cwd=root)[0].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None}
| 35.416507
| 79
| 0.584598
|
c39c338aa1ac980e43251d3a3b806ff155640842
| 9,865
|
py
|
Python
|
src/visualize.py
|
gruberto/DepthBenchmark
|
63abdca085600bbbd601496986a0580e7bc39e23
|
[
"MIT"
] | 14
|
2019-09-16T07:42:10.000Z
|
2021-11-29T01:08:13.000Z
|
src/visualize.py
|
gruberto/DepthBenchmark
|
63abdca085600bbbd601496986a0580e7bc39e23
|
[
"MIT"
] | 5
|
2020-05-02T12:55:43.000Z
|
2021-12-15T20:26:19.000Z
|
src/visualize.py
|
gruberto/DepthBenchmark
|
63abdca085600bbbd601496986a0580e7bc39e23
|
[
"MIT"
] | 8
|
2019-09-27T08:41:05.000Z
|
2021-06-11T12:46:30.000Z
|
from Evaluator import Evaluator
from results import colorize_pointcloud, colorize_depth
from Dataset import Dataset
import cv2
import os
import numpy as np
def visualize(data_root, result_root, scenes, daytimes, approaches, evaluations, weathers, visibilities, rainfall_rates):
d = Dataset(data_root)
e = Evaluator(data_root)
for scene in scenes:
if 'intermetric' in evaluations:
depth = e.load_depth_groundtruth(scene, frame='rgb_left', gt_type='intermetric')
depth_color = colorize_depth(depth, min_distance=e.clip_min, max_distance=e.clip_max)
intermetric_path = os.path.join(result_root, 'intermetric', '{}_{}.jpg'.format('intermetric', scene))
if not os.path.exists(os.path.split(intermetric_path)[0]):
os.makedirs(os.path.split(intermetric_path)[0])
cv2.imwrite(intermetric_path, depth_color)
# top_view, top_view_color = e.create_top_view(e.load_depth_groundtruth, scene)
top_view, top_view_color = e.create_top_view(scene, 'intermetric')
intermetric_top_view_file = os.path.join(result_root, 'intermetric',
'{}_{}_topview.jpg'.format('intermetric', scene))
cv2.imwrite(intermetric_top_view_file, top_view_color)
for daytime in daytimes:
for weather in weathers:
samples = []
if weather == 'fog':
for visibility in visibilities:
samples.append(d.get_fog_sequence(scene, daytime, visibility)[0])
if weather == 'rain':
for rainfall_rate in rainfall_rates:
samples.append(d.get_rain_sequence(scene, daytime, rainfall_rate)[0])
if weather == 'clear':
samples.append(d.get_clear_sequence(scene, daytime)[0])
for i, sample in enumerate(samples):
print(sample)
if 'rgb' in evaluations:
rgb = e.load_rgb(sample)
if weather == 'fog':
rgb_path = os.path.join(result_root, 'rgb',
'{}_{}_{}_{}_{}'.format('rgb', scene, daytime, weather,
visibilities[i]))
elif weather == 'rain':
rgb_path = os.path.join(result_root, 'rgb',
'{}_{}_{}_{}_{}'.format('rgb', scene, daytime, weather,
rainfall_rates[i]))
elif weather == 'clear':
rgb_path = os.path.join(result_root, 'rgb',
'{}_{}_{}_{}'.format('rgb', scene, daytime, weather))
if not os.path.exists(os.path.split(rgb_path)[0]):
os.makedirs(os.path.split(rgb_path)[0])
cv2.imwrite(rgb_path + '.jpg', rgb)
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
rgb[:, :, 0] = clahe.apply(rgb[:, :, 0])
rgb[:, :, 1] = clahe.apply(rgb[:, :, 1])
rgb[:, :, 2] = clahe.apply(rgb[:, :, 2])
cv2.imwrite(rgb_path + '_clahe.jpg', rgb)
if 'lidar_raw' in evaluations:
depth = e.load_depth(sample, 'lidar_hdl64_rgb_left', interpolate=False)
depth_color = colorize_pointcloud(depth, min_distance=e.clip_min, max_distance=e.clip_max,
radius=5)
if weather == 'fog':
lidar_path = os.path.join(result_root, 'lidar_raw',
'{}_{}_{}_{}_{}'.format('lidar_raw', scene, daytime, weather,
visibilities[i]))
elif weather == 'rain':
lidar_path = os.path.join(result_root, 'lidar_raw',
'{}_{}_{}_{}_{}'.format('lidar_raw', scene, daytime, weather,
rainfall_rates[i]))
elif weather == 'clear':
lidar_path = os.path.join(result_root, 'lidar_raw',
'{}_{}_{}_{}'.format('lidar_raw', scene, daytime, weather))
if not os.path.exists(os.path.split(lidar_path)[0]):
os.makedirs(os.path.split(lidar_path)[0])
cv2.imwrite(lidar_path + '.jpg', depth_color)
if 'gated' in evaluations:
for t in [0,17,31]:
gated_img = e.load_gated(sample, t)
if weather == 'fog':
gated_path = os.path.join(result_root, 'gated{}'.format(t),
'{}_{}_{}_{}_{}'.format('gated{}'.format(t), scene, daytime, weather,
visibilities[i]))
elif weather == 'rain':
gated_path = os.path.join(result_root, 'gated{}'.format(t),
'{}_{}_{}_{}_{}'.format('gated{}'.format(t), scene, daytime, weather,
rainfall_rates[i]))
elif weather == 'clear':
gated_path = os.path.join(result_root, 'gated{}'.format(t),
'{}_{}_{}_{}'.format('gated{}'.format(t), scene, daytime, weather))
if not os.path.exists(os.path.split(gated_path)[0]):
os.makedirs(os.path.split(gated_path)[0])
cv2.imwrite(gated_path + '.jpg', gated_img)
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
gated_img[:, :, 0] = clahe.apply(gated_img[:, :, 0])
gated_img[:, :, 1] = clahe.apply(gated_img[:, :, 1])
gated_img[:, :, 2] = clahe.apply(gated_img[:, :, 2])
cv2.imwrite(gated_path + '_clahe.jpg', gated_img)
for approach in approaches:
if weather == 'fog':
sample_path = os.path.join(result_root, approach,
'{}_{}_{}_{}_{}'.format(approach, scene, daytime, weather,
visibilities[i]))
elif weather == 'rain':
sample_path = os.path.join(result_root, approach,
'{}_{}_{}_{}_{}'.format(approach, scene, daytime, weather,
rainfall_rates[i]))
elif weather == 'clear':
sample_path = os.path.join(result_root, approach,
'{}_{}_{}_{}'.format(approach, scene, daytime, weather))
if not os.path.exists(os.path.split(sample_path)[0]):
os.makedirs(os.path.split(sample_path)[0])
if 'depth_map' in evaluations:
depth = e.load_depth(sample, approach)
depth_color = colorize_depth(depth, min_distance=e.clip_min, max_distance=e.clip_max)
depth_map_path = sample_path + '_depth_map.jpg'
cv2.imwrite(depth_map_path, depth_color)
if 'error_image' in evaluations:
error_image = e.error_image(sample, approach, gt_type='intermetric')
error_image_path = sample_path + '_error_image.jpg'
cv2.imwrite(error_image_path, error_image)
if 'top_view' in evaluations:
top_view, top_view_color = e.create_top_view(sample, approach)
top_view_path = sample_path + '_top_view.jpg'
cv2.imwrite(top_view_path, top_view_color)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description="Visualize depth estimation results")
parser.add_argument('--data_root', type=str, default='data', help='Path to data')
parser.add_argument('--results_dir', type=str, default='results', help='Folder for evaluation results')
parser.add_argument('--daytime', type=str, default='day', help='day or night')
parser.add_argument('--approach', type=str, default='depth', help='Selected folder for evaluation')
args = parser.parse_args()
scenes = ['scene1', 'scene2', 'scene3', 'scene4']
daytimes = ['day', 'night']
evaluations = ['depth_map', 'rgb', 'lidar_raw', 'intermetric', 'top_view']
weathers = ['clear', 'fog', 'rain']
visibilities = [20, 40, 30, 50, 70, 100]
rainfall_rates = [0, 15, 55]
visualize(args.data_root, args.results_dir, scenes, daytimes, [args.approach], evaluations, weathers, visibilities, rainfall_rates)
| 54.203297
| 135
| 0.461733
|
21cef99ef3760082fbe7c6910b5cacc6ef60579d
| 2,199
|
py
|
Python
|
scripts/zMayaTools/pick_walk.py
|
zewt/zMayaTools
|
9f7f43ab015e58cf25fc82f4ae1cdd424b5a52a1
|
[
"MIT"
] | 73
|
2017-12-08T03:33:50.000Z
|
2022-03-21T15:44:12.000Z
|
scripts/zMayaTools/pick_walk.py
|
fsanges/zMayaTools
|
795168d497459b43439e03a55233320f90d8d11c
|
[
"MIT"
] | 4
|
2019-03-17T05:25:23.000Z
|
2021-03-25T04:22:18.000Z
|
scripts/zMayaTools/pick_walk.py
|
fsanges/zMayaTools
|
795168d497459b43439e03a55233320f90d8d11c
|
[
"MIT"
] | 10
|
2018-12-19T04:38:10.000Z
|
2022-01-28T06:24:18.000Z
|
from zMayaTools import maya_helpers
from pymel import core as pm
def pick_walk_add(direction):
"""
Pick walk in a direction, adding the picked nodes to the selection instead of
replacing the selection.
"""
# pm.pickWalk is the basic pick walk command, but PickWalkUp, etc. have special
# cases for certain types of selections. These are four copied-and-pasted commands
# instead of just one with an argument, so we need to map to the corresponding
# command.
assert direction in ('up', 'down', 'left', 'right')
pick_walk_commands = {
'up': 'PickWalkUp',
'down': 'PickWalkDown',
'left': 'PickWalkLeft',
'right': 'PickWalkRight',
}
# Store the current selection.
selection = pm.ls(sl=True)
# Execute the pick walk. This will replace the selection.
pm.mel.eval(pick_walk_commands[direction])
new_selection = pm.ls(sl=True)
# Select the original selection, then add the new selection after it.
pm.select(selection, ne=True)
pm.select(new_selection, add=True)
def setup_runtime_commands():
maya_helpers.create_or_replace_runtime_command('zPickWalkAddUp', category='zMayaTools.Miscellaneous',
annotation='zMayaTools: Pick walk up, adding to the selection',
command='from zMayaTools import pick_walk; pick_walk.pick_walk_add("up")')
maya_helpers.create_or_replace_runtime_command('zPickWalkAddDown', category='zMayaTools.Miscellaneous',
annotation='zMayaTools: Pick walk down, adding to the selection',
command='from zMayaTools import pick_walk; pick_walk.pick_walk_add("down")')
maya_helpers.create_or_replace_runtime_command('zPickWalkAddLeft', category='zMayaTools.Miscellaneous',
annotation='zMayaTools: Pick walk left, adding to the selection',
command='from zMayaTools import pick_walk; pick_walk.pick_walk_add("left")')
maya_helpers.create_or_replace_runtime_command('zPickWalkAddRight', category='zMayaTools.Miscellaneous',
annotation='zMayaTools: Pick walk right, adding to the selection',
command='from zMayaTools import pick_walk; pick_walk.pick_walk_add("right")')
| 46.787234
| 108
| 0.71487
|
b230dcb37cc20f0d23018807b5d2f32cefea5230
| 5,773
|
py
|
Python
|
tools/memory_inspector/memory_inspector/core/backends.py
|
iplo/Chain
|
8bc8943d66285d5258fffc41bed7c840516c4422
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 231
|
2015-01-08T09:04:44.000Z
|
2021-12-30T03:03:10.000Z
|
tools/memory_inspector/memory_inspector/core/backends.py
|
JasonEric/chromium
|
c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 1
|
2017-02-14T21:55:58.000Z
|
2017-02-14T21:55:58.000Z
|
tools/memory_inspector/memory_inspector/core/backends.py
|
JasonEric/chromium
|
c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 268
|
2015-01-21T05:53:28.000Z
|
2022-03-25T22:09:01.000Z
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
_backends = {} # Maps a string (backend name) to a |Backend| instance.
def Register(backend):
"""Called by each backend module to register upon initialization."""
assert(isinstance(backend, Backend))
_backends[backend.name] = backend
def ListDevices():
"""Enumerates all the devices from all the registered backends."""
for backend in _backends.itervalues():
for device in backend.EnumerateDevices():
assert(isinstance(device, Device))
yield device
def GetDevice(backend_name, device_id):
"""Retrieves a specific device given its backend name and device id."""
for backend in _backends.itervalues():
if backend.name != backend_name:
continue
for device in backend.EnumerateDevices():
if device.id != device_id:
continue
return device
return None
# The classes below model the contract interfaces exposed to the frontends and
# implemented by each concrete backend.
class Backend(object):
"""Base class for backends.
This is the extension point for the OS-specific profiler implementations.
"""
def __init__(self, settings=None):
# Initialize with empty settings if not required by the overriding backend.
self.settings = settings or Settings()
def EnumerateDevices(self):
"""Enumeates the devices discovered and supported by the backend.
Returns:
A sequence of |Device| instances.
"""
raise NotImplementedError()
@property
def name(self):
"""A unique name which identifies the backend.
Typically this will just return the target OS name, e.g., 'Android'."""
raise NotImplementedError()
class Device(object):
"""Interface contract for devices enumerated by a backend."""
def __init__(self, backend, settings=None):
self.backend = backend
# Initialize with empty settings if not required by the overriding device.
self.settings = settings or Settings()
def Initialize(self):
"""Called before anything else, for initial provisioning."""
raise NotImplementedError()
def IsNativeAllocTracingEnabled(self):
"""Check if the device is ready to capture native allocation traces."""
raise NotImplementedError()
def EnableNativeAllocTracing(self, enabled):
"""Provision the device and make it ready to trace native allocations."""
raise NotImplementedError()
def IsMmapTracingEnabled(self):
"""Check if the device is ready to capture memory map traces."""
raise NotImplementedError()
def EnableMmapTracing(self, enabled):
"""Provision the device and make it ready to trace memory maps."""
raise NotImplementedError()
def ListProcesses(self):
"""Returns a sequence of |Process|."""
raise NotImplementedError()
def GetProcess(self, pid):
"""Returns an instance of |Process| or None (if not found)."""
raise NotImplementedError()
def GetStats(self):
"""Returns an instance of |DeviceStats|."""
raise NotImplementedError()
@property
def name(self):
"""Friendly name of the target device (e.g., phone model)."""
raise NotImplementedError()
@property
def id(self):
"""Unique identifier (within the backend) of the device (e.g., S/N)."""
raise NotImplementedError()
class Process(object):
"""Interface contract for each running process."""
def __init__(self, device, pid, name):
assert(isinstance(device, Device))
assert(isinstance(pid, int))
self.device = device
self.pid = pid
self.name = name
def DumpMemoryMaps(self):
"""Returns an instance of |memory_map.Map|."""
raise NotImplementedError()
def DumpNativeHeap(self):
"""Returns an instance of |native_heap.NativeHeap|."""
raise NotImplementedError()
def GetStats(self):
"""Returns an instance of |ProcessStats|."""
raise NotImplementedError()
def __str__(self):
return '[%d] %s' % (self.pid, self.name)
class DeviceStats(object):
"""CPU/Memory stats for a |Device|."""
def __init__(self, uptime, cpu_times, memory_stats):
"""Args:
uptime: uptime in seconds.
cpu_times: array (CPUs) of dicts (cpu times since last call).
e.g., [{'User': 10, 'System': 80, 'Idle': 10}, ... ]
memory_stats: Dictionary of memory stats. e.g., {'Free': 1, 'Cached': 10}
"""
assert(isinstance(cpu_times, list) and isinstance(cpu_times[0], dict))
assert(isinstance(memory_stats, dict))
self.uptime = uptime
self.cpu_times = cpu_times
self.memory_stats = memory_stats
class ProcessStats(object):
"""CPU/Memory stats for a |Process|."""
def __init__(self, threads, run_time, cpu_usage, vm_rss, page_faults):
"""Args:
threads: Number of threads.
run_time: Total process uptime in seconds.
cpu_usage: CPU usage [0-100] since the last GetStats call.
vm_rss_kb: Resident Memory Set in Kb.
page_faults: Number of VM page faults (hard + soft).
"""
self.threads = threads
self.run_time = run_time
self.cpu_usage = cpu_usage
self.vm_rss = vm_rss
self.page_faults = page_faults
class Settings(object):
"""Models user-definable settings for backends and devices."""
def __init__(self, expected_keys=None):
"""Args:
expected_keys: A dict. (key-name -> description) of expected settings
"""
self.expected_keys = expected_keys or {}
self._settings = dict((k, '') for k in self.expected_keys.iterkeys())
def __getitem__(self, key):
assert(key in self.expected_keys)
return self._settings.get(key)
def __setitem__(self, key, value):
assert(key in self.expected_keys)
self._settings[key] = value
| 30.067708
| 79
| 0.692881
|
99d38992e9153f346b041a36320b63170e517294
| 24,988
|
py
|
Python
|
LICDataWarehouseSync.py
|
WestonSF/ArcGISDataToolkit
|
106c814a022a96029052320add57f8d26d6d3a4c
|
[
"Apache-2.0"
] | 15
|
2015-05-29T00:18:50.000Z
|
2022-03-02T07:34:03.000Z
|
LICDataWarehouseSync.py
|
WestonSF/ArcGISDataToolkit
|
106c814a022a96029052320add57f8d26d6d3a4c
|
[
"Apache-2.0"
] | 4
|
2015-03-11T20:45:55.000Z
|
2017-09-14T18:14:59.000Z
|
LICDataWarehouseSync.py
|
WestonSF/ArcGISDataToolkit
|
106c814a022a96029052320add57f8d26d6d3a4c
|
[
"Apache-2.0"
] | 8
|
2016-03-02T14:13:52.000Z
|
2019-11-18T22:47:48.000Z
|
#-------------------------------------------------------------
# Name: Livestock Improvement Corporation Datawarehouse Sync
# Purpose: Syncronises data between the LIC data warehouse and GIS database, producing error and change reports.
# Author: Shaun Weston (shaun_weston@eagle.co.nz)
# Date Created: 18/09/2015
# Last Updated: 10/11/2015 (TWH)
# Copyright: (c) Eagle Technology
# ArcGIS Version: ArcGIS for Desktop 10.3+
# Python Version: 2.7
#--------------------------------
# Import modules
import os
import sys
import logging
import smtplib
import arcpy
import string
import datetime
import time
# Set global variables
enableLogging = "true" # Use logger.info("Example..."), logger.warning("Example..."), logger.error("Example...")
logFile = os.path.join(os.path.dirname(__file__), "Logs\LICDataWarehouseSync.log") # os.path.join(os.path.dirname(__file__), "Example.log")
sendErrorEmail = "true"
emailTo = "cgriffin@lic.co.nz, araj@lic.co.nz"
emailUser = "svcaGIS@lic.co.nz"
emailPassword = ""
emailSubject = "EDW/GIS Sync"
emailMessage = ""
enableProxy = "false"
requestProtocol = "http" # http or https
proxyURL = ""
output = None
if arcpy.CheckProduct("ArcEditor") == "Available":
print "License OK"
else:
print "License not available"
sendEmail("Quitting","Cant get a desktop GIS license.")
sys.exit(-99999)
# Enable data to be overwritten
arcpy.env.overwriteOutput = True
# Setup up reporting dictionary
gisReportDict = {}
gisReportCount = 0
# Start of main function
def mainFunction(gisProperty,gisShed,gisEntrance,gisPropertyEntranceRelate,gisPropertyShedRelate,gisshedEntranceRelate,dwProperty,dwShed,gisDataSyncReport,dwLoadStatus): # Get parameters from ArcGIS Desktop tool by seperating by comma e.g. (var1 is 1st parameter,var2 is 2nd parameter,var3 is 3rd parameter)
try:
# Check the DW has finished its job
dwLoadNotComplete = True
while dwLoadNotComplete:
#Check if there is a row with both start and end times from today
dwLoadStatusSearchCursor = arcpy.da.SearchCursor(dwLoadStatus, ["start_datetime","end_datetime"], "start_date = CAST(GETDATE() AS DATE) AND end_date = CAST(GETDATE() AS DATE)")
for row in dwLoadStatusSearchCursor:
dwLoadNotComplete = False
if dwLoadNotComplete:
# Insert row to say we are waiting for DW to complete
gisReportInsertCursor = arcpy.da.InsertCursor(gisDataSyncReport,["ID","Date","LogType","Description"])
gisReportInsertCursor.insertRow([0,datetime.datetime.now(),"WAIT","The DW ETL is not complete, waiting 5 minutes."])
del gisReportInsertCursor
time.sleep(300) #sleep for 5 minutes
#If after 11am then quit
if datetime.datetime.now().hour >= 11:
# Insert row to say we are quitting
gisReportInsertCursor = arcpy.da.InsertCursor(gisDataSyncReport,["ID","Date","LogType","Description"])
gisReportInsertCursor.insertRow([0,datetime.datetime.now(),"QUIT","The GIS Data Sync is quitting as its after 11am."])
del gisReportInsertCursor
sendEmail("Quitting","The GIS Data Sync is quitting as its after 11am.")
sys.exit(-99999)
# Insert row to say we are starting
gisReportInsertCursor = arcpy.da.InsertCursor(gisDataSyncReport,["ID","Date","LogType","Description"])
gisReportInsertCursor.insertRow([0,datetime.datetime.now(),"START","The DW GIS Data Sync is starting"])
del gisReportInsertCursor
# --------------------------------------- Start of code --------------------------------------- #
global gisReportDict
global gisReportCount
# Set up search cursors
# GIS Property
gisPropertySearchCursor = arcpy.da.SearchCursor(gisProperty, ["Id","GlobalID","SHAPE@X","SHAPE@Y"], "RecordStatus <> 'I'")
# GIS Shed
gisPropertyShedSearchCursor = arcpy.da.SearchCursor(gisShed, ["Id"], "RecordStatus <> 'I'")
# GIS Property/Entrance Relationship
gisPropertyEntranceRelateSearchCursor = arcpy.da.SearchCursor(gisPropertyEntranceRelate, ["PropertyGlobalID","EntranceGlobalID"])
# GIS Property/Shed Relationship
gisPropertyShedRelateSearchCursor = arcpy.da.SearchCursor(gisPropertyShedRelate, ["ShedID","PropertyID"])
dwPropertyShedSearchCursor = arcpy.da.SearchCursor(dwShed, ["shed_bsns_partner_num"])
dwPropertySearchCursor = arcpy.da.SearchCursor(dwProperty, ["property_bsns_partner_num"])
# ---------------- Log the differing IDs - Property not in Data Warehouse ----------------
# Add GIS property into array and dictionary
gisPropertyIDsArray = []
gisPropertyDict = {}
for row in gisPropertySearchCursor:
# Add IDs as strings to an array
gisPropertyIDsArray.append(str(row[0]).strip().rstrip().upper())
# Add ID, global ID and XY coordinate into a dictionary
gisPropertyDict[str(row[0]).strip().rstrip().upper()] = [str(row[1]).strip().rstrip(),row[2],row[3]]
# Add Data Warehouse property into array
dwPropertyIDsArray = []
for row in dwPropertySearchCursor:
# Add IDs as strings to an array
dwPropertyIDsArray.append(str(row[0]).strip().rstrip().upper())
# Setup array containing IDs that are in GIS and the Data Warehouse
gisdwPropertyIDsArray = []
for gisPropertyID in gisPropertyIDsArray:
# If GIS property not in Data Warehouse property
if gisPropertyID not in dwPropertyIDsArray:
# Info message
describeDataset = arcpy.Describe(dwProperty)
descriptionString = "Property not in Data Warehouse - " + gisPropertyID + ": " + describeDataset.name
arcpy.AddMessage(descriptionString)
# Add to logs dictionary
gisReportDict[gisReportCount] = [gisPropertyID,datetime.datetime.now(),"ERROR",descriptionString]
gisReportCount = gisReportCount + 1
else:
# Add to property (GIS and Data Warehouse) array
gisdwPropertyIDsArray.append(gisPropertyID)
# ---------------- Log the differing IDs - Property not in GIS ----------------
for dwPropertyID in dwPropertyIDsArray:
# If Data Warehouse property not in GIS property
if dwPropertyID not in gisPropertyIDsArray:
# Info message
describeDataset = arcpy.Describe(gisProperty)
descriptionString = "Property not in GIS - " + dwPropertyID + ": " + describeDataset.name
arcpy.AddMessage(descriptionString)
# Add to logs dictionary
gisReportDict[gisReportCount] = [dwPropertyID,datetime.datetime.now(),"ERROR",descriptionString]
gisReportCount = gisReportCount + 1
# Remove out ID from property (GIS and Data Warehouse) array if it's in there
if dwPropertyID in gisdwPropertyIDsArray:
gisdwPropertyIDsArray.remove(dwPropertyID)
# Add GIS property entrance relate into array and dictionary
gisPropertyEntranceArray = []
gisPropertyEntranceDict = {}
for row in gisPropertyEntranceRelateSearchCursor:
# Add property global IDs to an array
gisPropertyEntranceArray.append(str(row[0]).strip().rstrip())
# Add both global IDs into a dictionary
gisPropertyEntranceDict[str(row[0]).strip().rstrip()] = str(row[1]).strip().rstrip()
# Delete cursor objects
del gisPropertySearchCursor
del dwPropertySearchCursor
del gisPropertyEntranceRelateSearchCursor
# ---------------- Create Property Entrance Point - If Property not in Property Entrance Relate ----------------
gisPropertyGlobalIDsArray = []
gisPropertyEntranceRelatesToAddDict = {}
for gisdwPropertyID in gisdwPropertyIDsArray:
# Get the property global ID
propertyGlobalID = gisPropertyDict[gisdwPropertyID][0]
# Check if property not in property entrance relate
if propertyGlobalID not in gisPropertyEntranceArray:
# Info message
describeDataset = arcpy.Describe(gisPropertyEntranceRelate)
descriptionString = "Property not in Property to Entrance Relationship - " + gisdwPropertyID + ", " + propertyGlobalID + ": " + describeDataset.name
arcpy.AddMessage(descriptionString)
# Create property entrance point at same location as the property point
propertyXPoint = gisPropertyDict[gisdwPropertyID][1]
propertyYPoint = gisPropertyDict[gisdwPropertyID][2]
propertyPoint = arcpy.Point(propertyXPoint,propertyYPoint)
# Insert new record
propertyEntranceOID = -1
with arcpy.da.InsertCursor(gisEntrance,["SHAPE@XY","RecordStatus","SpatialAccuracy","CreatedUser","CreatedDate","LastEditedUser","LastEditedDate","EntranceNumber"]) as gisPropertyEntranceInsertCursor:
propertyEntranceOID = gisPropertyEntranceInsertCursor.insertRow([propertyPoint,"M","MIG","SCRIPT",datetime.datetime.now(),"SCRIPT",datetime.datetime.now(),1])
# Get the global ID for the record just created
#gisEntranceRows = [row for row in arcpy.da.SearchCursor(gisEntrance, "GlobalID", sql_clause=(None, "ORDER BY OBJECTID ASC"))]
gisEntranceRows = [row for row in arcpy.da.SearchCursor(gisEntrance, "GlobalID", "OBJECTID = " + str(propertyEntranceOID))]
propertyEntranceGlobalID = gisEntranceRows[-1][0]
# Info message
describeDataset = arcpy.Describe(gisEntrance)
descriptionString = "New feature record created - " + propertyEntranceGlobalID + ": " + describeDataset.name
arcpy.AddMessage(descriptionString)
# Add to logs dictionary
gisReportDict[gisReportCount] = [gisdwPropertyID,datetime.datetime.now(),"CHANGE",descriptionString]
gisReportCount = gisReportCount + 1
# Add entry to property entrance relates to add dictionary
gisPropertyEntranceRelatesToAddDict[propertyGlobalID] = propertyEntranceGlobalID
# ---------------- Create Property to Property Entrance Relationship - From dictionary created above ----------------
# Setup up cursor for property entrance relationship
gisPropertyEntranceRelateInsertCursor = arcpy.da.InsertCursor(gisPropertyEntranceRelate, ["PropertyGlobalID","EntranceGlobalID"])
for key, value in gisPropertyEntranceRelatesToAddDict.iteritems():
# Create record for property entrance to property relate
newOID = gisPropertyEntranceRelateInsertCursor.insertRow([key,value])
# Update property entrance to property relate dictionary
gisPropertyEntranceDict[key] = value
# Get the property ID
for keyProp, valueProp in gisPropertyDict.iteritems():
if (str(key) == str(valueProp[0])):
# Info message
describeDataset = arcpy.Describe(gisPropertyEntranceRelate)
descriptionString = "New relationship record created - " + keyProp + ": " + describeDataset.name
arcpy.AddMessage(descriptionString)
# Add to logs dictionary
gisReportDict[gisReportCount] = [keyProp,datetime.datetime.now(),"CHANGE",descriptionString]
gisReportCount = gisReportCount + 1
# Delete cursor object
del gisPropertyEntranceRelateInsertCursor
# ---------------- Create Property Shed - If shed in property to shed relationship ----------------
# Add property shed relate into array and dictionary
gisPropertyShedRelateArray = []
gisPropertyShedRelateDict = {}
for row in gisPropertyShedRelateSearchCursor:
# Add property IDs to an array
gisPropertyShedRelateArray.append(str(row[1]).strip().rstrip().upper())
# Add shed ID and Property IDs into a dictionary
gisPropertyShedRelateDict[str(row[0]).strip().rstrip().upper()] = str(row[1]).strip().rstrip().upper()
# Add GIS property shed into array
gisPropertyShedArray = []
for row in gisPropertyShedSearchCursor:
# Add property shed IDs to an array
gisPropertyShedArray.append(str(row[0]).strip().rstrip().upper())
# For each property
for gisdwPropertyID in gisdwPropertyIDsArray:
# Check if property in property shed relate
if gisdwPropertyID in gisPropertyShedRelateArray:
# FUNCTION - Sync property shed and shed relationships
gisPropertyShedSync(gisdwPropertyID,gisPropertyDict,gisShed,gisPropertyShedRelate,gisshedEntranceRelate,gisPropertyShedArray,gisPropertyShedRelateDict,gisPropertyEntranceDict)
# If property not in property shed relate
else:
# Info message
describeDataset = arcpy.Describe(gisPropertyShedRelate)
descriptionString = "Property not in Property to Shed Relationship - " + gisdwPropertyID + ": " + describeDataset.name
arcpy.AddMessage(descriptionString)
del gisPropertyShedSearchCursor
del gisPropertyShedRelateSearchCursor
# ---------------- Log the differing IDs - Shed not in Data Warehouse ----------------
# Add GIS property shed into array
gisPropertyShedIDsArray = []
gisPropertyShedSearchCursor = arcpy.da.SearchCursor(gisShed, ["Id"], "RecordStatus <> 'I'")
for row in gisPropertyShedSearchCursor:
# Add property shed IDs to an array
gisPropertyShedIDsArray.append(str(row[0]).strip().rstrip().upper())
# Add Data Warehouse property shed into array
dwPropertyShedIDsArray = []
for row in dwPropertyShedSearchCursor:
# Add IDs as strings to an array
dwPropertyShedIDsArray.append(str(row[0]).strip().rstrip().upper())
for gisPropertyShedID in gisPropertyShedIDsArray:
# If GIS shed not in Data Warehouse sheds
if gisPropertyShedID not in dwPropertyShedIDsArray:
# Info message
describeDataset = arcpy.Describe(dwShed)
descriptionString = "Shed not in Data Warehouse - " + gisPropertyShedID + ": " + describeDataset.name
arcpy.AddMessage(descriptionString)
# Add to logs dictionary
gisReportDict[gisReportCount] = [gisPropertyShedID,datetime.datetime.now(),"ERROR",descriptionString]
gisReportCount = gisReportCount + 1
# ---------------- Log the differing IDs - Shed not in GIS ----------------
for dwPropertyShedID in dwPropertyShedIDsArray:
# If data warehouse shed not in GIS sheds
if dwPropertyShedID not in gisPropertyShedIDsArray:
# Info message
describeDataset = arcpy.Describe(dwShed)
descriptionString = "Shed not in GIS - " + dwPropertyShedID + ": " + describeDataset.name
arcpy.AddMessage(descriptionString)
# Add to logs dictionary
gisReportDict[gisReportCount] = [dwPropertyShedID,datetime.datetime.now(),"ERROR",descriptionString]
gisReportCount = gisReportCount + 1
del gisPropertyShedSearchCursor
# ---------------- Create Change and Error Report - From dictionary being logged to ----------------
# Setup up cursor for report
gisReportInsertCursor = arcpy.da.InsertCursor(gisDataSyncReport,["ID","Date","LogType","Description"])
for key, value in gisReportDict.iteritems():
# Write to log
gisReportInsertCursor.insertRow([value[0],value[1],value[2],value[3]])
# Delete cursor object
del gisReportInsertCursor
# Insert row to say we are stopping
gisReportInsertCursor = arcpy.da.InsertCursor(gisDataSyncReport,["ID","Date","LogType","Description"])
gisReportInsertCursor.insertRow([0,datetime.datetime.now(),"STOP","The DW GIS Data Sync is stopping"])
del gisReportInsertCursor
# --------------------------------------- End of code --------------------------------------- #
# If called from gp tool return the arcpy parameter
if __name__ == '__main__':
# Return the output if there is any
if output:
arcpy.SetParameterAsText(1, output)
# Otherwise return the result
else:
# Return the output if there is any
if output:
return output
# Logging
if (enableLogging == "true"):
# Log end of process
logger.info("Process ended.")
# Remove file handler and close log file
logMessage.flush()
logMessage.close()
logger.handlers = []
# If arcpy error
except arcpy.ExecuteError:
# Build and show the error message
errorMessage = arcpy.GetMessages(2)
arcpy.AddError(errorMessage)
# Logging
if (enableLogging == "true"):
# Log error
logger.error(errorMessage)
# Log end of process
logger.info("Process ended.")
# Remove file handler and close log file
logMessage.flush()
logMessage.close()
logger.handlers = []
if (sendErrorEmail == "true"):
# Send email
sendEmail("Error",errorMessage)
# If python error
except Exception as e:
errorMessage = ""
# Build and show the error message
for i in range(len(e.args)):
if (i == 0):
errorMessage = unicode(e.args[i]).encode('utf-8')
else:
errorMessage = errorMessage + " " + unicode(e.args[i]).encode('utf-8')
arcpy.AddError(errorMessage)
# Logging
if (enableLogging == "true"):
# Log error
logger.error(errorMessage)
# Log end of process
logger.info("Process ended.")
# Remove file handler and close log file
logMessage.flush()
logMessage.close()
logger.handlers = []
if (sendErrorEmail == "true"):
# Send email
sendEmail("Error",errorMessage)
# End of main function
# Start of GIS property shed sync function
def gisPropertyShedSync(gisdwPropertyID,gisPropertyDict,gisShed,gisPropertyShedRelate,gisshedEntranceRelate,gisPropertyShedArray,gisPropertyShedRelateDict,gisPropertyEntranceDict):
global gisReportDict
global gisReportCount
# Get the shed IDs related to the property from Data Warehouse
gisShedIDs = []
for key, value in gisPropertyShedRelateDict.iteritems():
if (value == gisdwPropertyID):
gisShedIDs.append(key)
gisShedIDs.sort()
# Get the number of sheds related to this property
gisShedNumber = len(gisShedIDs)
# For each shed
count = 1
for gisShedID in gisShedIDs:
# Check if shed is not in GIS sheds
if gisShedID not in gisPropertyShedArray:
# Create property shed point(s) with offset from property
shedXPoint = gisPropertyDict[gisdwPropertyID][1] + (10 * count)
shedYPoint = gisPropertyDict[gisdwPropertyID][2] + (10 * count)
shedPoint = arcpy.Point(shedXPoint,shedYPoint)
# Insert new record into GIS property shed
with arcpy.da.InsertCursor(gisShed,["SHAPE@XY","Id","RecordStatus","SpatialAccuracy","CreatedUser","CreatedDate","LastEditedUser","LastEditedDate"]) as gisPropertyShedInsertCursor:
gisPropertyShedInsertCursor.insertRow([shedPoint,gisShedID,"M","MIG","SCRIPT",datetime.datetime.now(),"SCRIPT",datetime.datetime.now()])
del gisPropertyShedInsertCursor
# Info message
describeDataset = arcpy.Describe(gisShed)
descriptionString = "New feature record created - " + gisShedID + ": " + describeDataset.name
arcpy.AddMessage(descriptionString)
# Add to logs dictionary
gisReportDict[gisReportCount] = [gisdwPropertyID,datetime.datetime.now(),"CHANGE",descriptionString]
gisReportCount = gisReportCount + 1
# ---------------- Create Shed to Entrance Relationship - When a new shed is created ----------------
# Get the global ID for the record just created
gisShedRows = [row for row in arcpy.da.SearchCursor(gisShed, "GlobalID", sql_clause=(None, "ORDER BY OBJECTID ASC"))]
propertyShedGlobalID = gisShedRows[-1][0]
# Get the property global ID
propertyGlobalID = gisPropertyDict[gisdwPropertyID][0]
# Get the property entrance related to this property
propertyEntranceGlobalID = gisPropertyEntranceDict[propertyGlobalID]
# Insert new record into GIS shed entrance relate
with arcpy.da.InsertCursor(gisshedEntranceRelate,["ShedGlobalID","EntranceGlobalID"]) as gisEntranceShedRelateInsertCursor:
gisEntranceShedRelateInsertCursor.insertRow([propertyShedGlobalID,propertyEntranceGlobalID])
# Info message
describeDataset = arcpy.Describe(gisshedEntranceRelate)
descriptionString = "New relationship record created - " + gisShedID + ": " + describeDataset.name
arcpy.AddMessage(descriptionString)
# Add to logs dictionary
gisReportDict[gisReportCount] = [gisdwPropertyID,datetime.datetime.now(),"CHANGE",descriptionString]
gisReportCount = gisReportCount + 1
count = count + 1
# End of GIS property shed sync function
# Start of set logging function
def setLogging(logFile):
# Create a logger
logger = logging.getLogger(os.path.basename(__file__))
logger.setLevel(logging.DEBUG)
# Setup log message handler
logMessage = logging.FileHandler(logFile)
# Setup the log formatting
logFormat = logging.Formatter("%(asctime)s: %(levelname)s - %(message)s", "%d/%m/%Y - %H:%M:%S")
# Add formatter to log message handler
logMessage.setFormatter(logFormat)
# Add log message handler to logger
logger.addHandler(logMessage)
return logger, logMessage
# End of set logging function
# Start of send email function
def sendEmail(subject, message):
# Send an email
arcpy.AddMessage("Sending email...")
# Server and port information
smtpServer = smtplib.SMTP("relay.livestock.org.nz",25)
smtpServer.ehlo
# Email content
header = 'To:' + emailTo + '\n' + 'From: ' + emailUser + '\n' + 'Subject:' + emailSubject + ': ' + subject + '\n'
body = header + '\n' + emailMessage + '\n' + '\n' + message
# Send the email and close the connection
smtpServer.sendmail(emailUser, emailTo, body)
# End of send email function
# This test allows the script to be used from the operating
# system command prompt (stand-alone), in a Python IDE,
# as a geoprocessing script tool, or as a module imported in
# another script
if __name__ == '__main__':
# Arguments are optional - If running from ArcGIS Desktop tool, parameters will be loaded into *argv
argv = tuple(arcpy.GetParameterAsText(i)
for i in range(arcpy.GetArgumentCount()))
# Logging
if (enableLogging == "true"):
# Setup logging
logger, logMessage = setLogging(logFile)
# Log start of process
logger.info("Process started.")
# Setup the use of a proxy for requests
if (enableProxy == "true"):
# Setup the proxy
proxy = urllib2.ProxyHandler({requestProtocol : proxyURL})
openURL = urllib2.build_opener(proxy)
# Install the proxy
urllib2.install_opener(openURL)
mainFunction(*argv)
| 49.976
| 309
| 0.623259
|
ba2996fc934a829e71024efe128b717115ad2b84
| 485
|
py
|
Python
|
rest/sending-messages/example-1/example-1.6.x.py
|
shaileshn/api-snippets
|
08826972154634335378fed0edd2707d7f62b03b
|
[
"MIT"
] | 2
|
2017-11-23T11:31:20.000Z
|
2018-01-22T04:14:02.000Z
|
rest/sending-messages/example-1/example-1.6.x.py
|
berkus/twilio-api-snippets
|
beaa4e211044cb06daf9b73fb05ad6a7a948f879
|
[
"MIT"
] | null | null | null |
rest/sending-messages/example-1/example-1.6.x.py
|
berkus/twilio-api-snippets
|
beaa4e211044cb06daf9b73fb05ad6a7a948f879
|
[
"MIT"
] | 1
|
2019-10-02T14:36:36.000Z
|
2019-10-02T14:36:36.000Z
|
# Download the Python helper library from twilio.com/docs/python/install
from twilio.rest import Client
# Your Account Sid and Auth Token from twilio.com/user/account
account_sid = "ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
auth_token = "your_auth_token"
client = Client(account_sid, auth_token)
message = client.messages.create(
"+15558675309",
body="Jenny please?! I love you <3",
from_="+14158141829",
media_url="http://www.example.com/hearts.png"
)
print(message.sid)
| 28.529412
| 72
| 0.756701
|
95e1a4403054558661b3784bde4bf7312e74cabc
| 2,735
|
py
|
Python
|
examples/track.py
|
fatestcar/demo
|
5baa338a1d600c816fc20d23006910d4f609a1b9
|
[
"MIT"
] | null | null | null |
examples/track.py
|
fatestcar/demo
|
5baa338a1d600c816fc20d23006910d4f609a1b9
|
[
"MIT"
] | null | null | null |
examples/track.py
|
fatestcar/demo
|
5baa338a1d600c816fc20d23006910d4f609a1b9
|
[
"MIT"
] | 1
|
2018-10-11T05:55:33.000Z
|
2018-10-11T05:55:33.000Z
|
import cv2
import numpy as np
from scipy.linalg import block_diag
class LaneTracker:
def __init__(self, n_lanes, proc_noise_scale, meas_noise_scale, process_cov_parallel=0, proc_noise_type='white'):
self.n_lanes = n_lanes
# 总共多少线
self.meas_size = 4 * self.n_lanes
self.state_size = self.meas_size * 2
self.contr_size = 0
self.kf = cv2.KalmanFilter(self.state_size, self.meas_size, self.contr_size)
self.kf.transitionMatrix = np.eye(self.state_size, dtype=np.float32)
self.kf.measurementMatrix = np.zeros((self.meas_size, self.state_size), np.float32)
for i in range(self.meas_size):
self.kf.measurementMatrix[i, i * 2] = 1
if proc_noise_type == 'white': # 白噪声
block = np.matrix([[0.25, 0.5],
[0.5, 1.]], dtype=np.float32)
self.kf.processNoiseCov = block_diag(*([block] * self.meas_size)) * proc_noise_scale
# block_diag是方块矩阵对角拼接
if proc_noise_type == 'identity':
self.kf.processNoiseCov = np.eye(self.state_size, dtype=np.float32) * proc_noise_scale
for i in range(0, self.meas_size, 2):
for j in range(1, self.n_lanes):
self.kf.processNoiseCov[i, i + (j * 8)] = process_cov_parallel
self.kf.processNoiseCov[i + (j * 8), i] = process_cov_parallel
self.kf.measurementNoiseCov = np.eye(self.meas_size, dtype=np.float32) * meas_noise_scale
self.kf.errorCovPre = np.eye(self.state_size)
self.meas = np.zeros((self.meas_size, 1), np.float32)
self.state = np.zeros((self.state_size, 1), np.float32)
self.first_detected = False
def _update_dt(self, dt):
for i in range(0, self.state_size, 2):
self.kf.transitionMatrix[i, i + 1] = dt
def _first_detect(self, lanes):
for l, i in zip(lanes, range(0, self.state_size, 8)):
self.state[i:i + 8:2, 0] = l
self.kf.statePost = self.state
self.first_detected = True
def update(self, lanes):
if self.first_detected:
for l, i in zip(lanes, range(0, self.meas_size, 4)):
if l is not None:
self.meas[i:i + 4, 0] = l
self.kf.correct(self.meas)
else:
if lanes.count(None) == 0:
self._first_detect(lanes)
def predict(self, dt):
if self.first_detected:
self._update_dt(dt)
state = self.kf.predict()
lanes = []
for i in range(0, len(state), 8):
lanes.append((state[i], state[i + 2], state[i + 4], state[i + 6]))
return lanes
else:
return None
| 38.521127
| 117
| 0.583181
|
f1f6ed3793e5f37becf6c9cf1c4a4301c537f849
| 18,267
|
py
|
Python
|
airflow/providers/amazon/aws/hooks/batch_client.py
|
donnut/airflow
|
ce66bc944d246aa3b51cce6e2fc13cd25da08d6e
|
[
"Apache-2.0"
] | 1
|
2020-09-15T02:32:55.000Z
|
2020-09-15T02:32:55.000Z
|
airflow/providers/amazon/aws/hooks/batch_client.py
|
donnut/airflow
|
ce66bc944d246aa3b51cce6e2fc13cd25da08d6e
|
[
"Apache-2.0"
] | 14
|
2019-12-03T02:54:42.000Z
|
2020-02-27T16:08:10.000Z
|
airflow/providers/amazon/aws/hooks/batch_client.py
|
donnut/airflow
|
ce66bc944d246aa3b51cce6e2fc13cd25da08d6e
|
[
"Apache-2.0"
] | 1
|
2020-11-04T03:10:24.000Z
|
2020-11-04T03:10:24.000Z
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
A client for AWS batch services
.. seealso::
- http://boto3.readthedocs.io/en/latest/guide/configuration.html
- http://boto3.readthedocs.io/en/latest/reference/services/batch.html
- https://docs.aws.amazon.com/batch/latest/APIReference/Welcome.html
"""
from random import uniform
from time import sleep
from typing import Dict, List, Optional, Union
import botocore.client
import botocore.exceptions
import botocore.waiter
from airflow.exceptions import AirflowException
from airflow.providers.amazon.aws.hooks.base_aws import AwsBaseHook
from airflow.typing_compat import Protocol, runtime_checkable
# Add exceptions to pylint for the boto3 protocol only; ideally the boto3 library
# could provide
# protocols for all their dynamically generated classes (try to migrate this to a PR on botocore).
# Note that the use of invalid-name parameters should be restricted to the boto3 mappings only;
# all the Airflow wrappers of boto3 clients should not adopt invalid-names to match boto3.
# pylint: disable=invalid-name, unused-argument
@runtime_checkable
class AwsBatchProtocol(Protocol):
"""
A structured Protocol for ``boto3.client('batch') -> botocore.client.Batch``.
This is used for type hints on :py:meth:`.AwsBatchClient.client`; it covers
only the subset of client methods required.
.. seealso::
- https://mypy.readthedocs.io/en/latest/protocols.html
- http://boto3.readthedocs.io/en/latest/reference/services/batch.html
"""
def describe_jobs(self, jobs: List[str]) -> Dict:
"""
Get job descriptions from AWS batch
:param jobs: a list of JobId to describe
:type jobs: List[str]
:return: an API response to describe jobs
:rtype: Dict
"""
...
def get_waiter(self, waiterName: str) -> botocore.waiter.Waiter:
"""
Get an AWS Batch service waiter
:param waiterName: The name of the waiter. The name should match
the name (including the casing) of the key name in the waiter
model file (typically this is CamelCasing).
:type waiterName: str
:return: a waiter object for the named AWS batch service
:rtype: botocore.waiter.Waiter
.. note::
AWS batch might not have any waiters (until botocore PR-1307 is released).
.. code-block:: python
import boto3
boto3.client('batch').waiter_names == []
.. seealso::
- https://boto3.amazonaws.com/v1/documentation/api/latest/guide/clients.html#waiters
- https://github.com/boto/botocore/pull/1307
"""
...
def submit_job(
self,
jobName: str,
jobQueue: str,
jobDefinition: str,
arrayProperties: Dict,
parameters: Dict,
containerOverrides: Dict,
) -> Dict:
"""
Submit a batch job
:param jobName: the name for the AWS batch job
:type jobName: str
:param jobQueue: the queue name on AWS Batch
:type jobQueue: str
:param jobDefinition: the job definition name on AWS Batch
:type jobDefinition: str
:param arrayProperties: the same parameter that boto3 will receive
:type arrayProperties: Dict
:param parameters: the same parameter that boto3 will receive
:type parameters: Dict
:param containerOverrides: the same parameter that boto3 will receive
:type containerOverrides: Dict
:return: an API response
:rtype: Dict
"""
...
def terminate_job(self, jobId: str, reason: str) -> Dict:
"""
Terminate a batch job
:param jobId: a job ID to terminate
:type jobId: str
:param reason: a reason to terminate job ID
:type reason: str
:return: an API response
:rtype: Dict
"""
...
# Note that the use of invalid-name parameters should be restricted to the boto3 mappings only;
# all the Airflow wrappers of boto3 clients should not adopt invalid-names to match boto3.
# pylint: enable=invalid-name, unused-argument
class AwsBatchClientHook(AwsBaseHook):
"""
A client for AWS batch services.
:param max_retries: exponential back-off retries, 4200 = 48 hours;
polling is only used when waiters is None
:type max_retries: Optional[int]
:param status_retries: number of HTTP retries to get job status, 10;
polling is only used when waiters is None
:type status_retries: Optional[int]
.. note::
Several methods use a default random delay to check or poll for job status, i.e.
``random.uniform(DEFAULT_DELAY_MIN, DEFAULT_DELAY_MAX)``
Using a random interval helps to avoid AWS API throttle limits
when many concurrent tasks request job-descriptions.
To modify the global defaults for the range of jitter allowed when a
random delay is used to check batch job status, modify these defaults, e.g.:
.. code-block::
AwsBatchClient.DEFAULT_DELAY_MIN = 0
AwsBatchClient.DEFAULT_DELAY_MAX = 5
When explict delay values are used, a 1 second random jitter is applied to the
delay (e.g. a delay of 0 sec will be a ``random.uniform(0, 1)`` delay. It is
generally recommended that random jitter is added to API requests. A
convenience method is provided for this, e.g. to get a random delay of
10 sec +/- 5 sec: ``delay = AwsBatchClient.add_jitter(10, width=5, minima=0)``
.. seealso::
- https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/batch.html
- https://docs.aws.amazon.com/general/latest/gr/api-retries.html
- https://aws.amazon.com/blogs/architecture/exponential-backoff-and-jitter/
"""
MAX_RETRIES = 4200
STATUS_RETRIES = 10
# delays are in seconds
DEFAULT_DELAY_MIN = 1
DEFAULT_DELAY_MAX = 10
def __init__(
self, *args, max_retries: Optional[int] = None, status_retries: Optional[int] = None, **kwargs
):
# https://github.com/python/mypy/issues/6799 hence type: ignore
super().__init__(client_type='batch', *args, **kwargs) # type: ignore
self.max_retries = max_retries or self.MAX_RETRIES
self.status_retries = status_retries or self.STATUS_RETRIES
@property
def client(self) -> Union[AwsBatchProtocol, botocore.client.BaseClient]: # noqa: D402
"""
An AWS API client for batch services, like ``boto3.client('batch')``
:return: a boto3 'batch' client for the ``.region_name``
:rtype: Union[AwsBatchProtocol, botocore.client.BaseClient]
"""
return self.conn
def terminate_job(self, job_id: str, reason: str) -> Dict:
"""
Terminate a batch job
:param job_id: a job ID to terminate
:type job_id: str
:param reason: a reason to terminate job ID
:type reason: str
:return: an API response
:rtype: Dict
"""
response = self.get_conn().terminate_job(jobId=job_id, reason=reason)
self.log.info(response)
return response
def check_job_success(self, job_id: str) -> bool:
"""
Check the final status of the batch job; return True if the job
'SUCCEEDED', else raise an AirflowException
:param job_id: a batch job ID
:type job_id: str
:rtype: bool
:raises: AirflowException
"""
job = self.get_job_description(job_id)
job_status = job.get("status")
if job_status == "SUCCEEDED":
self.log.info("AWS batch job (%s) succeeded: %s", job_id, job)
return True
if job_status == "FAILED":
raise AirflowException("AWS Batch job ({}) failed: {}".format(job_id, job))
if job_status in ["SUBMITTED", "PENDING", "RUNNABLE", "STARTING", "RUNNING"]:
raise AirflowException("AWS Batch job ({}) is not complete: {}".format(job_id, job))
raise AirflowException("AWS Batch job ({}) has unknown status: {}".format(job_id, job))
def wait_for_job(self, job_id: str, delay: Union[int, float, None] = None):
"""
Wait for batch job to complete
:param job_id: a batch job ID
:type job_id: str
:param delay: a delay before polling for job status
:type delay: Optional[Union[int, float]]
:raises: AirflowException
"""
self.delay(delay)
self.poll_for_job_running(job_id, delay)
self.poll_for_job_complete(job_id, delay)
self.log.info("AWS Batch job (%s) has completed", job_id)
def poll_for_job_running(self, job_id: str, delay: Union[int, float, None] = None):
"""
Poll for job running. The status that indicates a job is running or
already complete are: 'RUNNING'|'SUCCEEDED'|'FAILED'.
So the status options that this will wait for are the transitions from:
'SUBMITTED'>'PENDING'>'RUNNABLE'>'STARTING'>'RUNNING'|'SUCCEEDED'|'FAILED'
The completed status options are included for cases where the status
changes too quickly for polling to detect a RUNNING status that moves
quickly from STARTING to RUNNING to completed (often a failure).
:param job_id: a batch job ID
:type job_id: str
:param delay: a delay before polling for job status
:type delay: Optional[Union[int, float]]
:raises: AirflowException
"""
self.delay(delay)
running_status = ["RUNNING", "SUCCEEDED", "FAILED"]
self.poll_job_status(job_id, running_status)
def poll_for_job_complete(self, job_id: str, delay: Union[int, float, None] = None):
"""
Poll for job completion. The status that indicates job completion
are: 'SUCCEEDED'|'FAILED'.
So the status options that this will wait for are the transitions from:
'SUBMITTED'>'PENDING'>'RUNNABLE'>'STARTING'>'RUNNING'>'SUCCEEDED'|'FAILED'
:param job_id: a batch job ID
:type job_id: str
:param delay: a delay before polling for job status
:type delay: Optional[Union[int, float]]
:raises: AirflowException
"""
self.delay(delay)
complete_status = ["SUCCEEDED", "FAILED"]
self.poll_job_status(job_id, complete_status)
def poll_job_status(self, job_id: str, match_status: List[str]) -> bool:
"""
Poll for job status using an exponential back-off strategy (with max_retries).
:param job_id: a batch job ID
:type job_id: str
:param match_status: a list of job status to match; the batch job status are:
'SUBMITTED'|'PENDING'|'RUNNABLE'|'STARTING'|'RUNNING'|'SUCCEEDED'|'FAILED'
:type match_status: List[str]
:rtype: bool
:raises: AirflowException
"""
retries = 0
while True:
job = self.get_job_description(job_id)
job_status = job.get("status")
self.log.info(
"AWS Batch job (%s) check status (%s) in %s",
job_id,
job_status,
match_status,
)
if job_status in match_status:
return True
if retries >= self.max_retries:
raise AirflowException("AWS Batch job ({}) status checks exceed max_retries".format(job_id))
retries += 1
pause = self.exponential_delay(retries)
self.log.info(
"AWS Batch job (%s) status check (%d of %d) in the next %.2f seconds",
job_id,
retries,
self.max_retries,
pause,
)
self.delay(pause)
def get_job_description(self, job_id: str) -> Dict:
"""
Get job description (using status_retries).
:param job_id: a batch job ID
:type job_id: str
:return: an API response for describe jobs
:rtype: Dict
:raises: AirflowException
"""
retries = 0
while True:
try:
response = self.get_conn().describe_jobs(jobs=[job_id])
return self.parse_job_description(job_id, response)
except botocore.exceptions.ClientError as err:
error = err.response.get("Error", {})
if error.get("Code") == "TooManyRequestsException":
pass # allow it to retry, if possible
else:
raise AirflowException("AWS Batch job ({}) description error: {}".format(job_id, err))
retries += 1
if retries >= self.status_retries:
raise AirflowException(
"AWS Batch job ({}) description error: exceeded "
"status_retries ({})".format(job_id, self.status_retries)
)
pause = self.exponential_delay(retries)
self.log.info(
"AWS Batch job (%s) description retry (%d of %d) in the next %.2f seconds",
job_id,
retries,
self.status_retries,
pause,
)
self.delay(pause)
@staticmethod
def parse_job_description(job_id: str, response: Dict) -> Dict:
"""
Parse job description to extract description for job_id
:param job_id: a batch job ID
:type job_id: str
:param response: an API response for describe jobs
:type response: Dict
:return: an API response to describe job_id
:rtype: Dict
:raises: AirflowException
"""
jobs = response.get("jobs", [])
matching_jobs = [job for job in jobs if job.get("jobId") == job_id]
if len(matching_jobs) != 1:
raise AirflowException(
"AWS Batch job ({}) description error: response: {}".format(job_id, response)
)
return matching_jobs[0]
@staticmethod
def add_jitter(
delay: Union[int, float], width: Union[int, float] = 1, minima: Union[int, float] = 0
) -> float:
"""
Use delay +/- width for random jitter
Adding jitter to status polling can help to avoid
AWS batch API limits for monitoring batch jobs with
a high concurrency in Airflow tasks.
:param delay: number of seconds to pause;
delay is assumed to be a positive number
:type delay: Union[int, float]
:param width: delay +/- width for random jitter;
width is assumed to be a positive number
:type width: Union[int, float]
:param minima: minimum delay allowed;
minima is assumed to be a non-negative number
:type minima: Union[int, float]
:return: uniform(delay - width, delay + width) jitter
and it is a non-negative number
:rtype: float
"""
delay = abs(delay)
width = abs(width)
minima = abs(minima)
lower = max(minima, delay - width)
upper = delay + width
return uniform(lower, upper)
@staticmethod
def delay(delay: Union[int, float, None] = None):
"""
Pause execution for ``delay`` seconds.
:param delay: a delay to pause execution using ``time.sleep(delay)``;
a small 1 second jitter is applied to the delay.
:type delay: Optional[Union[int, float]]
.. note::
This method uses a default random delay, i.e.
``random.uniform(DEFAULT_DELAY_MIN, DEFAULT_DELAY_MAX)``;
using a random interval helps to avoid AWS API throttle limits
when many concurrent tasks request job-descriptions.
"""
if delay is None:
delay = uniform(AwsBatchClientHook.DEFAULT_DELAY_MIN, AwsBatchClientHook.DEFAULT_DELAY_MAX)
else:
delay = AwsBatchClientHook.add_jitter(delay)
sleep(delay)
@staticmethod
def exponential_delay(tries: int) -> float:
"""
An exponential back-off delay, with random jitter. There is a maximum
interval of 10 minutes (with random jitter between 3 and 10 minutes).
This is used in the :py:meth:`.poll_for_job_status` method.
:param tries: Number of tries
:type tries: int
:rtype: float
Examples of behavior:
.. code-block:: python
def exp(tries):
max_interval = 600.0 # 10 minutes in seconds
delay = 1 + pow(tries * 0.6, 2)
delay = min(max_interval, delay)
print(delay / 3, delay)
for tries in range(10):
exp(tries)
# 0.33 1.0
# 0.45 1.35
# 0.81 2.44
# 1.41 4.23
# 2.25 6.76
# 3.33 10.00
# 4.65 13.95
# 6.21 18.64
# 8.01 24.04
# 10.05 30.15
.. seealso::
- https://docs.aws.amazon.com/general/latest/gr/api-retries.html
- https://aws.amazon.com/blogs/architecture/exponential-backoff-and-jitter/
"""
max_interval = 600.0 # results in 3 to 10 minute delay
delay = 1 + pow(tries * 0.6, 2)
delay = min(max_interval, delay)
return uniform(delay / 3, delay)
| 34.143925
| 108
| 0.613018
|
e4b30eb8ae32d32ba5dbcbc4f9f44a3881b5f973
| 668
|
py
|
Python
|
python/features/vlsift_load_matlab.py
|
S-o-T/vlb
|
78495570e002d0ed6badd3df62f86e416839b0af
|
[
"BSD-2-Clause"
] | 11
|
2017-09-08T16:32:46.000Z
|
2022-02-02T15:28:22.000Z
|
python/features/vlsift_load_matlab.py
|
albutko/vlb
|
437245c0991948eeb36a277937a7e67d389041e4
|
[
"BSD-2-Clause"
] | 9
|
2017-09-13T20:22:51.000Z
|
2019-03-13T02:38:25.000Z
|
python/features/vlsift_load_matlab.py
|
albutko/vlb
|
437245c0991948eeb36a277937a7e67d389041e4
|
[
"BSD-2-Clause"
] | 3
|
2017-09-08T21:07:14.000Z
|
2021-02-17T17:42:43.000Z
|
"""
vlsift matlab version python wrapper
Author: Xu Zhang
"""
import numpy as np
import features.feature_utils
from features.DetectorDescriptorTemplate import DetectorAndDescriptor
class vlsift_load_matlab(DetectorAndDescriptor):
def __init__(self, csv_flag=True):
super(
vlsift_load_matlab,
self).__init__(
name='vlsift_load_matlab',
is_detector=True,
is_descriptor=True,
is_both=True,
csv_flag=csv_flag)
def detect_feature(self, image):
pass
def extract_descriptor(self, image, feature):
pass
def extract_all(self, image):
pass
| 22.266667
| 69
| 0.654192
|
fe742088f456e867f57ce5595630c721f1dbff30
| 335
|
py
|
Python
|
h/api/groups/__init__.py
|
noscripter/h
|
a7a4095a46683ea08dae62335bbcd53f7ab313e2
|
[
"MIT"
] | null | null | null |
h/api/groups/__init__.py
|
noscripter/h
|
a7a4095a46683ea08dae62335bbcd53f7ab313e2
|
[
"MIT"
] | null | null | null |
h/api/groups/__init__.py
|
noscripter/h
|
a7a4095a46683ea08dae62335bbcd53f7ab313e2
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from h.api.groups.auth import group_principals
from h.api.groups.auth import set_permissions
from h.api.groups.logic import set_group_if_reply
from h.api.groups.logic import insert_group_if_none
__all__ = (
'group_principals',
'set_permissions',
'set_group_if_reply',
'insert_group_if_none'
)
| 23.928571
| 51
| 0.755224
|
b854a3d195b78efbeb2c4719c42a4abefe566de5
| 34,894
|
py
|
Python
|
discord/flags.py
|
PlasticStrawActivist/pycord
|
6ec6c27c99009b36ce3c9c1d80f09dde48fe0ce6
|
[
"MIT"
] | null | null | null |
discord/flags.py
|
PlasticStrawActivist/pycord
|
6ec6c27c99009b36ce3c9c1d80f09dde48fe0ce6
|
[
"MIT"
] | null | null | null |
discord/flags.py
|
PlasticStrawActivist/pycord
|
6ec6c27c99009b36ce3c9c1d80f09dde48fe0ce6
|
[
"MIT"
] | null | null | null |
"""
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import annotations
from typing import Any, Callable, ClassVar, Dict, Generic, Iterator, List, Optional, Tuple, Type, TypeVar, overload
from .enums import UserFlags
__all__ = (
'SystemChannelFlags',
'MessageFlags',
'PublicUserFlags',
'Intents',
'MemberCacheFlags',
'ApplicationFlags',
)
FV = TypeVar('FV', bound='flag_value')
BF = TypeVar('BF', bound='BaseFlags')
class flag_value:
def __init__(self, func: Callable[[Any], int]):
self.flag = func(None)
self.__doc__ = func.__doc__
@overload
def __get__(self: FV, instance: None, owner: Type[BF]) -> FV:
...
@overload
def __get__(self, instance: BF, owner: Type[BF]) -> bool:
...
def __get__(self, instance: Optional[BF], owner: Type[BF]) -> Any:
if instance is None:
return self
return instance._has_flag(self.flag)
def __set__(self, instance: BF, value: bool) -> None:
instance._set_flag(self.flag, value)
def __repr__(self):
return f'<flag_value flag={self.flag!r}>'
class alias_flag_value(flag_value):
pass
def fill_with_flags(*, inverted: bool = False):
def decorator(cls: Type[BF]):
# fmt: off
cls.VALID_FLAGS = {
name: value.flag
for name, value in cls.__dict__.items()
if isinstance(value, flag_value)
}
# fmt: on
if inverted:
max_bits = max(cls.VALID_FLAGS.values()).bit_length()
cls.DEFAULT_VALUE = -1 + (2 ** max_bits)
else:
cls.DEFAULT_VALUE = 0
return cls
return decorator
# n.b. flags must inherit from this and use the decorator above
class BaseFlags:
VALID_FLAGS: ClassVar[Dict[str, int]]
DEFAULT_VALUE: ClassVar[int]
value: int
__slots__ = ('value',)
def __init__(self, **kwargs: bool):
self.value = self.DEFAULT_VALUE
for key, value in kwargs.items():
if key not in self.VALID_FLAGS:
raise TypeError(f'{key!r} is not a valid flag name.')
setattr(self, key, value)
@classmethod
def _from_value(cls, value):
self = cls.__new__(cls)
self.value = value
return self
def __eq__(self, other: Any) -> bool:
return isinstance(other, self.__class__) and self.value == other.value
def __ne__(self, other: Any) -> bool:
return not self.__eq__(other)
def __hash__(self) -> int:
return hash(self.value)
def __repr__(self) -> str:
return f'<{self.__class__.__name__} value={self.value}>'
def __iter__(self) -> Iterator[Tuple[str, bool]]:
for name, value in self.__class__.__dict__.items():
if isinstance(value, alias_flag_value):
continue
if isinstance(value, flag_value):
yield (name, self._has_flag(value.flag))
def _has_flag(self, o: int) -> bool:
return (self.value & o) == o
def _set_flag(self, o: int, toggle: bool) -> None:
if toggle is True:
self.value |= o
elif toggle is False:
self.value &= ~o
else:
raise TypeError(f'Value to set for {self.__class__.__name__} must be a bool.')
@fill_with_flags(inverted=True)
class SystemChannelFlags(BaseFlags):
r"""Wraps up a Discord system channel flag value.
Similar to :class:`Permissions`\, the properties provided are two way.
You can set and retrieve individual bits using the properties as if they
were regular bools. This allows you to edit the system flags easily.
To construct an object you can pass keyword arguments denoting the flags
to enable or disable.
.. container:: operations
.. describe:: x == y
Checks if two flags are equal.
.. describe:: x != y
Checks if two flags are not equal.
.. describe:: hash(x)
Return the flag's hash.
.. describe:: iter(x)
Returns an iterator of ``(name, value)`` pairs. This allows it
to be, for example, constructed as a dict or a list of pairs.
Attributes
-----------
value: :class:`int`
The raw value. This value is a bit array field of a 53-bit integer
representing the currently available flags. You should query
flags via the properties rather than using this raw value.
"""
__slots__ = ()
# For some reason the flags for system channels are "inverted"
# ergo, if they're set then it means "suppress" (off in the GUI toggle)
# Since this is counter-intuitive from an API perspective and annoying
# these will be inverted automatically
def _has_flag(self, o: int) -> bool:
return (self.value & o) != o
def _set_flag(self, o: int, toggle: bool) -> None:
if toggle is True:
self.value &= ~o
elif toggle is False:
self.value |= o
else:
raise TypeError('Value to set for SystemChannelFlags must be a bool.')
@flag_value
def join_notifications(self):
""":class:`bool`: Returns ``True`` if the system channel is used for member join notifications."""
return 1
@flag_value
def premium_subscriptions(self):
""":class:`bool`: Returns ``True`` if the system channel is used for "Nitro boosting" notifications."""
return 2
@flag_value
def guild_reminder_notifications(self):
""":class:`bool`: Returns ``True`` if the system channel is used for server setup helpful tips notifications.
.. versionadded:: 2.0
"""
return 4
@fill_with_flags()
class MessageFlags(BaseFlags):
r"""Wraps up a Discord Message flag value.
See :class:`SystemChannelFlags`.
.. container:: operations
.. describe:: x == y
Checks if two flags are equal.
.. describe:: x != y
Checks if two flags are not equal.
.. describe:: hash(x)
Return the flag's hash.
.. describe:: iter(x)
Returns an iterator of ``(name, value)`` pairs. This allows it
to be, for example, constructed as a dict or a list of pairs.
.. versionadded:: 1.3
Attributes
-----------
value: :class:`int`
The raw value. This value is a bit array field of a 53-bit integer
representing the currently available flags. You should query
flags via the properties rather than using this raw value.
"""
__slots__ = ()
@flag_value
def crossposted(self):
""":class:`bool`: Returns ``True`` if the message is the original crossposted message."""
return 1
@flag_value
def is_crossposted(self):
""":class:`bool`: Returns ``True`` if the message was crossposted from another channel."""
return 2
@flag_value
def suppress_embeds(self):
""":class:`bool`: Returns ``True`` if the message's embeds have been suppressed."""
return 4
@flag_value
def source_message_deleted(self):
""":class:`bool`: Returns ``True`` if the source message for this crosspost has been deleted."""
return 8
@flag_value
def urgent(self):
""":class:`bool`: Returns ``True`` if the source message is an urgent message.
An urgent message is one sent by Discord Trust and Safety.
"""
return 16
@flag_value
def has_thread(self):
""":class:`bool`: Returns ``True`` if the source message is associated with a thread.
.. versionadded:: 2.0
"""
return 32
@flag_value
def ephemeral(self):
""":class:`bool`: Returns ``True`` if the source message is ephemeral.
.. versionadded:: 2.0
"""
return 64
@fill_with_flags()
class PublicUserFlags(BaseFlags):
r"""Wraps up the Discord User Public flags.
.. container:: operations
.. describe:: x == y
Checks if two PublicUserFlags are equal.
.. describe:: x != y
Checks if two PublicUserFlags are not equal.
.. describe:: hash(x)
Return the flag's hash.
.. describe:: iter(x)
Returns an iterator of ``(name, value)`` pairs. This allows it
to be, for example, constructed as a dict or a list of pairs.
Note that aliases are not shown.
.. versionadded:: 1.4
Attributes
-----------
value: :class:`int`
The raw value. This value is a bit array field of a 53-bit integer
representing the currently available flags. You should query
flags via the properties rather than using this raw value.
"""
__slots__ = ()
@flag_value
def staff(self):
""":class:`bool`: Returns ``True`` if the user is a Discord Employee."""
return UserFlags.staff.value
@flag_value
def partner(self):
""":class:`bool`: Returns ``True`` if the user is a Discord Partner."""
return UserFlags.partner.value
@flag_value
def hypesquad(self):
""":class:`bool`: Returns ``True`` if the user is a HypeSquad Events member."""
return UserFlags.hypesquad.value
@flag_value
def bug_hunter(self):
""":class:`bool`: Returns ``True`` if the user is a Bug Hunter"""
return UserFlags.bug_hunter.value
@flag_value
def hypesquad_bravery(self):
""":class:`bool`: Returns ``True`` if the user is a HypeSquad Bravery member."""
return UserFlags.hypesquad_bravery.value
@flag_value
def hypesquad_brilliance(self):
""":class:`bool`: Returns ``True`` if the user is a HypeSquad Brilliance member."""
return UserFlags.hypesquad_brilliance.value
@flag_value
def hypesquad_balance(self):
""":class:`bool`: Returns ``True`` if the user is a HypeSquad Balance member."""
return UserFlags.hypesquad_balance.value
@flag_value
def early_supporter(self):
""":class:`bool`: Returns ``True`` if the user is an Early Supporter."""
return UserFlags.early_supporter.value
@flag_value
def team_user(self):
""":class:`bool`: Returns ``True`` if the user is a Team User."""
return UserFlags.team_user.value
@flag_value
def system(self):
""":class:`bool`: Returns ``True`` if the user is a system user (i.e. represents Discord officially)."""
return UserFlags.system.value
@flag_value
def bug_hunter_level_2(self):
""":class:`bool`: Returns ``True`` if the user is a Bug Hunter Level 2"""
return UserFlags.bug_hunter_level_2.value
@flag_value
def verified_bot(self):
""":class:`bool`: Returns ``True`` if the user is a Verified Bot."""
return UserFlags.verified_bot.value
@flag_value
def verified_bot_developer(self):
""":class:`bool`: Returns ``True`` if the user is an Early Verified Bot Developer."""
return UserFlags.verified_bot_developer.value
@alias_flag_value
def early_verified_bot_developer(self):
""":class:`bool`: An alias for :attr:`verified_bot_developer`.
.. versionadded:: 1.5
"""
return UserFlags.verified_bot_developer.value
@flag_value
def discord_certified_moderator(self):
""":class:`bool`: Returns ``True`` if the user is a Discord Certified Moderator.
.. versionadded:: 2.0
"""
return UserFlags.discord_certified_moderator.value
def all(self) -> List[UserFlags]:
"""List[:class:`UserFlags`]: Returns all public flags the user has."""
return [public_flag for public_flag in UserFlags if self._has_flag(public_flag.value)]
@fill_with_flags()
class Intents(BaseFlags):
r"""Wraps up a Discord gateway intent flag.
Similar to :class:`Permissions`\, the properties provided are two way.
You can set and retrieve individual bits using the properties as if they
were regular bools.
To construct an object you can pass keyword arguments denoting the flags
to enable or disable.
This is used to disable certain gateway features that are unnecessary to
run your bot. To make use of this, it is passed to the ``intents`` keyword
argument of :class:`Client`.
.. versionadded:: 1.5
.. container:: operations
.. describe:: x == y
Checks if two flags are equal.
.. describe:: x != y
Checks if two flags are not equal.
.. describe:: hash(x)
Return the flag's hash.
.. describe:: iter(x)
Returns an iterator of ``(name, value)`` pairs. This allows it
to be, for example, constructed as a dict or a list of pairs.
Attributes
-----------
value: :class:`int`
The raw value. You should query flags via the properties
rather than using this raw value.
"""
__slots__ = ()
def __init__(self, **kwargs: bool):
self.value = self.DEFAULT_VALUE
for key, value in kwargs.items():
if key not in self.VALID_FLAGS:
raise TypeError(f'{key!r} is not a valid flag name.')
setattr(self, key, value)
@classmethod
def all(cls: Type[Intents]) -> Intents:
"""A factory method that creates a :class:`Intents` with everything enabled."""
bits = max(cls.VALID_FLAGS.values()).bit_length()
value = (1 << bits) - 1
self = cls.__new__(cls)
self.value = value
return self
@classmethod
def none(cls: Type[Intents]) -> Intents:
"""A factory method that creates a :class:`Intents` with everything disabled."""
self = cls.__new__(cls)
self.value = self.DEFAULT_VALUE
return self
@classmethod
def default(cls: Type[Intents]) -> Intents:
"""A factory method that creates a :class:`Intents` with everything enabled
except :attr:`presences` and :attr:`members`.
"""
self = cls.all()
self.presences = False
self.members = False
return self
@flag_value
def guilds(self):
""":class:`bool`: Whether guild related events are enabled.
This corresponds to the following events:
- :func:`on_guild_join`
- :func:`on_guild_remove`
- :func:`on_guild_available`
- :func:`on_guild_unavailable`
- :func:`on_guild_channel_update`
- :func:`on_guild_channel_create`
- :func:`on_guild_channel_delete`
- :func:`on_guild_channel_pins_update`
This also corresponds to the following attributes and classes in terms of cache:
- :attr:`Client.guilds`
- :class:`Guild` and all its attributes.
- :meth:`Client.get_channel`
- :meth:`Client.get_all_channels`
It is highly advisable to leave this intent enabled for your bot to function.
"""
return 1 << 0
@flag_value
def members(self):
""":class:`bool`: Whether guild member related events are enabled.
This corresponds to the following events:
- :func:`on_member_join`
- :func:`on_member_remove`
- :func:`on_member_update`
- :func:`on_user_update`
This also corresponds to the following attributes and classes in terms of cache:
- :meth:`Client.get_all_members`
- :meth:`Client.get_user`
- :meth:`Guild.chunk`
- :meth:`Guild.fetch_members`
- :meth:`Guild.get_member`
- :attr:`Guild.members`
- :attr:`Member.roles`
- :attr:`Member.nick`
- :attr:`Member.premium_since`
- :attr:`User.name`
- :attr:`User.avatar`
- :attr:`User.discriminator`
For more information go to the :ref:`member intent documentation <need_members_intent>`.
.. note::
Currently, this requires opting in explicitly via the developer portal as well.
Bots in over 100 guilds will need to apply to Discord for verification.
"""
return 1 << 1
@flag_value
def bans(self):
""":class:`bool`: Whether guild ban related events are enabled.
This corresponds to the following events:
- :func:`on_member_ban`
- :func:`on_member_unban`
This does not correspond to any attributes or classes in the library in terms of cache.
"""
return 1 << 2
@flag_value
def emojis(self):
""":class:`bool`: Alias of :attr:`.emojis_and_stickers`.
.. versionchanged:: 2.0
Changed to an alias.
"""
return 1 << 3
@alias_flag_value
def emojis_and_stickers(self):
""":class:`bool`: Whether guild emoji and sticker related events are enabled.
.. versionadded:: 2.0
This corresponds to the following events:
- :func:`on_guild_emojis_update`
- :func:`on_guild_stickers_update`
This also corresponds to the following attributes and classes in terms of cache:
- :class:`Emoji`
- :class:`GuildSticker`
- :meth:`Client.get_emoji`
- :meth:`Client.get_sticker`
- :meth:`Client.emojis`
- :meth:`Client.stickers`
- :attr:`Guild.emojis`
- :attr:`Guild.stickers`
"""
return 1 << 3
@flag_value
def integrations(self):
""":class:`bool`: Whether guild integration related events are enabled.
This corresponds to the following events:
- :func:`on_guild_integrations_update`
- :func:`on_integration_create`
- :func:`on_integration_update`
- :func:`on_raw_integration_delete`
This does not correspond to any attributes or classes in the library in terms of cache.
"""
return 1 << 4
@flag_value
def webhooks(self):
""":class:`bool`: Whether guild webhook related events are enabled.
This corresponds to the following events:
- :func:`on_webhooks_update`
This does not correspond to any attributes or classes in the library in terms of cache.
"""
return 1 << 5
@flag_value
def invites(self):
""":class:`bool`: Whether guild invite related events are enabled.
This corresponds to the following events:
- :func:`on_invite_create`
- :func:`on_invite_delete`
This does not correspond to any attributes or classes in the library in terms of cache.
"""
return 1 << 6
@flag_value
def voice_states(self):
""":class:`bool`: Whether guild voice state related events are enabled.
This corresponds to the following events:
- :func:`on_voice_state_update`
This also corresponds to the following attributes and classes in terms of cache:
- :attr:`VoiceChannel.members`
- :attr:`VoiceChannel.voice_states`
- :attr:`Member.voice`
.. note::
This intent is required to connect to voice.
"""
return 1 << 7
@flag_value
def presences(self):
""":class:`bool`: Whether guild presence related events are enabled.
This corresponds to the following events:
- :func:`on_presence_update`
This also corresponds to the following attributes and classes in terms of cache:
- :attr:`Member.activities`
- :attr:`Member.status`
- :attr:`Member.raw_status`
For more information go to the :ref:`presence intent documentation <need_presence_intent>`.
.. note::
Currently, this requires opting in explicitly via the developer portal as well.
Bots in over 100 guilds will need to apply to Discord for verification.
"""
return 1 << 8
@alias_flag_value
def messages(self):
""":class:`bool`: Whether guild and direct message related events are enabled.
This is a shortcut to set or get both :attr:`guild_messages` and :attr:`dm_messages`.
This corresponds to the following events:
- :func:`on_message` (both guilds and DMs)
- :func:`on_message_edit` (both guilds and DMs)
- :func:`on_message_delete` (both guilds and DMs)
- :func:`on_raw_message_delete` (both guilds and DMs)
- :func:`on_raw_message_edit` (both guilds and DMs)
This also corresponds to the following attributes and classes in terms of cache:
- :class:`Message`
- :attr:`Client.cached_messages`
Note that due to an implicit relationship this also corresponds to the following events:
- :func:`on_reaction_add` (both guilds and DMs)
- :func:`on_reaction_remove` (both guilds and DMs)
- :func:`on_reaction_clear` (both guilds and DMs)
"""
return (1 << 9) | (1 << 12)
@flag_value
def guild_messages(self):
""":class:`bool`: Whether guild message related events are enabled.
See also :attr:`dm_messages` for DMs or :attr:`messages` for both.
This corresponds to the following events:
- :func:`on_message` (only for guilds)
- :func:`on_message_edit` (only for guilds)
- :func:`on_message_delete` (only for guilds)
- :func:`on_raw_message_delete` (only for guilds)
- :func:`on_raw_message_edit` (only for guilds)
This also corresponds to the following attributes and classes in terms of cache:
- :class:`Message`
- :attr:`Client.cached_messages` (only for guilds)
Note that due to an implicit relationship this also corresponds to the following events:
- :func:`on_reaction_add` (only for guilds)
- :func:`on_reaction_remove` (only for guilds)
- :func:`on_reaction_clear` (only for guilds)
"""
return 1 << 9
@flag_value
def dm_messages(self):
""":class:`bool`: Whether direct message related events are enabled.
See also :attr:`guild_messages` for guilds or :attr:`messages` for both.
This corresponds to the following events:
- :func:`on_message` (only for DMs)
- :func:`on_message_edit` (only for DMs)
- :func:`on_message_delete` (only for DMs)
- :func:`on_raw_message_delete` (only for DMs)
- :func:`on_raw_message_edit` (only for DMs)
This also corresponds to the following attributes and classes in terms of cache:
- :class:`Message`
- :attr:`Client.cached_messages` (only for DMs)
Note that due to an implicit relationship this also corresponds to the following events:
- :func:`on_reaction_add` (only for DMs)
- :func:`on_reaction_remove` (only for DMs)
- :func:`on_reaction_clear` (only for DMs)
"""
return 1 << 12
@alias_flag_value
def reactions(self):
""":class:`bool`: Whether guild and direct message reaction related events are enabled.
This is a shortcut to set or get both :attr:`guild_reactions` and :attr:`dm_reactions`.
This corresponds to the following events:
- :func:`on_reaction_add` (both guilds and DMs)
- :func:`on_reaction_remove` (both guilds and DMs)
- :func:`on_reaction_clear` (both guilds and DMs)
- :func:`on_raw_reaction_add` (both guilds and DMs)
- :func:`on_raw_reaction_remove` (both guilds and DMs)
- :func:`on_raw_reaction_clear` (both guilds and DMs)
This also corresponds to the following attributes and classes in terms of cache:
- :attr:`Message.reactions` (both guild and DM messages)
"""
return (1 << 10) | (1 << 13)
@flag_value
def guild_reactions(self):
""":class:`bool`: Whether guild message reaction related events are enabled.
See also :attr:`dm_reactions` for DMs or :attr:`reactions` for both.
This corresponds to the following events:
- :func:`on_reaction_add` (only for guilds)
- :func:`on_reaction_remove` (only for guilds)
- :func:`on_reaction_clear` (only for guilds)
- :func:`on_raw_reaction_add` (only for guilds)
- :func:`on_raw_reaction_remove` (only for guilds)
- :func:`on_raw_reaction_clear` (only for guilds)
This also corresponds to the following attributes and classes in terms of cache:
- :attr:`Message.reactions` (only for guild messages)
"""
return 1 << 10
@flag_value
def dm_reactions(self):
""":class:`bool`: Whether direct message reaction related events are enabled.
See also :attr:`guild_reactions` for guilds or :attr:`reactions` for both.
This corresponds to the following events:
- :func:`on_reaction_add` (only for DMs)
- :func:`on_reaction_remove` (only for DMs)
- :func:`on_reaction_clear` (only for DMs)
- :func:`on_raw_reaction_add` (only for DMs)
- :func:`on_raw_reaction_remove` (only for DMs)
- :func:`on_raw_reaction_clear` (only for DMs)
This also corresponds to the following attributes and classes in terms of cache:
- :attr:`Message.reactions` (only for DM messages)
"""
return 1 << 13
@alias_flag_value
def typing(self):
""":class:`bool`: Whether guild and direct message typing related events are enabled.
This is a shortcut to set or get both :attr:`guild_typing` and :attr:`dm_typing`.
This corresponds to the following events:
- :func:`on_typing` (both guilds and DMs)
This does not correspond to any attributes or classes in the library in terms of cache.
"""
return (1 << 11) | (1 << 14)
@flag_value
def guild_typing(self):
""":class:`bool`: Whether guild and direct message typing related events are enabled.
See also :attr:`dm_typing` for DMs or :attr:`typing` for both.
This corresponds to the following events:
- :func:`on_typing` (only for guilds)
This does not correspond to any attributes or classes in the library in terms of cache.
"""
return 1 << 11
@flag_value
def dm_typing(self):
""":class:`bool`: Whether guild and direct message typing related events are enabled.
See also :attr:`guild_typing` for guilds or :attr:`typing` for both.
This corresponds to the following events:
- :func:`on_typing` (only for DMs)
This does not correspond to any attributes or classes in the library in terms of cache.
"""
return 1 << 14
@fill_with_flags()
class MemberCacheFlags(BaseFlags):
"""Controls the library's cache policy when it comes to members.
This allows for finer grained control over what members are cached.
Note that the bot's own member is always cached. This class is passed
to the ``member_cache_flags`` parameter in :class:`Client`.
Due to a quirk in how Discord works, in order to ensure proper cleanup
of cache resources it is recommended to have :attr:`Intents.members`
enabled. Otherwise the library cannot know when a member leaves a guild and
is thus unable to cleanup after itself.
To construct an object you can pass keyword arguments denoting the flags
to enable or disable.
The default value is all flags enabled.
.. versionadded:: 1.5
.. container:: operations
.. describe:: x == y
Checks if two flags are equal.
.. describe:: x != y
Checks if two flags are not equal.
.. describe:: hash(x)
Return the flag's hash.
.. describe:: iter(x)
Returns an iterator of ``(name, value)`` pairs. This allows it
to be, for example, constructed as a dict or a list of pairs.
Attributes
-----------
value: :class:`int`
The raw value. You should query flags via the properties
rather than using this raw value.
"""
__slots__ = ()
def __init__(self, **kwargs: bool):
bits = max(self.VALID_FLAGS.values()).bit_length()
self.value = (1 << bits) - 1
for key, value in kwargs.items():
if key not in self.VALID_FLAGS:
raise TypeError(f'{key!r} is not a valid flag name.')
setattr(self, key, value)
@classmethod
def all(cls: Type[MemberCacheFlags]) -> MemberCacheFlags:
"""A factory method that creates a :class:`MemberCacheFlags` with everything enabled."""
bits = max(cls.VALID_FLAGS.values()).bit_length()
value = (1 << bits) - 1
self = cls.__new__(cls)
self.value = value
return self
@classmethod
def none(cls: Type[MemberCacheFlags]) -> MemberCacheFlags:
"""A factory method that creates a :class:`MemberCacheFlags` with everything disabled."""
self = cls.__new__(cls)
self.value = self.DEFAULT_VALUE
return self
@property
def _empty(self):
return self.value == self.DEFAULT_VALUE
@flag_value
def voice(self):
""":class:`bool`: Whether to cache members that are in voice.
This requires :attr:`Intents.voice_states`.
Members that leave voice are no longer cached.
"""
return 1
@flag_value
def joined(self):
""":class:`bool`: Whether to cache members that joined the guild
or are chunked as part of the initial log in flow.
This requires :attr:`Intents.members`.
Members that leave the guild are no longer cached.
"""
return 2
@classmethod
def from_intents(cls: Type[MemberCacheFlags], intents: Intents) -> MemberCacheFlags:
"""A factory method that creates a :class:`MemberCacheFlags` based on
the currently selected :class:`Intents`.
Parameters
------------
intents: :class:`Intents`
The intents to select from.
Returns
---------
:class:`MemberCacheFlags`
The resulting member cache flags.
"""
self = cls.none()
if intents.members:
self.joined = True
if intents.voice_states:
self.voice = True
return self
def _verify_intents(self, intents: Intents):
if self.voice and not intents.voice_states:
raise ValueError('MemberCacheFlags.voice requires Intents.voice_states')
if self.joined and not intents.members:
raise ValueError('MemberCacheFlags.joined requires Intents.members')
@property
def _voice_only(self):
return self.value == 1
@fill_with_flags()
class ApplicationFlags(BaseFlags):
r"""Wraps up the Discord Application flags.
.. container:: operations
.. describe:: x == y
Checks if two ApplicationFlags are equal.
.. describe:: x != y
Checks if two ApplicationFlags are not equal.
.. describe:: hash(x)
Return the flag's hash.
.. describe:: iter(x)
Returns an iterator of ``(name, value)`` pairs. This allows it
to be, for example, constructed as a dict or a list of pairs.
Note that aliases are not shown.
.. versionadded:: 2.0
Attributes
-----------
value: :class:`int`
The raw value. You should query flags via the properties
rather than using this raw value.
"""
@flag_value
def gateway_presence(self):
""":class:`bool`: Returns ``True`` if the application is verified and is allowed to
receive presence information over the gateway.
"""
return 1 << 12
@flag_value
def gateway_presence_limited(self):
""":class:`bool`: Returns ``True`` if the application is allowed to receive limited
presence information over the gateway.
"""
return 1 << 13
@flag_value
def gateway_guild_members(self):
""":class:`bool`: Returns ``True`` if the application is verified and is allowed to
receive guild members information over the gateway.
"""
return 1 << 14
@flag_value
def gateway_guild_members_limited(self):
""":class:`bool`: Returns ``True`` if the application is allowed to receive limited
guild members information over the gateway.
"""
return 1 << 15
@flag_value
def verification_pending_guild_limit(self):
""":class:`bool`: Returns ``True`` if the application is currently pending verification
and has hit the guild limit.
"""
return 1 << 16
@flag_value
def embedded(self):
""":class:`bool`: Returns ``True`` if the application is embedded within the Discord client."""
return 1 << 17
| 32.641721
| 118
| 0.605319
|
0c7d5ed9aaf5bd3d21e5c7d15d019aab3204a8a2
| 2,669
|
py
|
Python
|
pyvista-examples/wolf-creek.py
|
RichardScottOZ/banesullivan
|
8b6a530fc7ea36a91f6aa6a5dc3d4d5557128d04
|
[
"MIT"
] | 2
|
2019-11-14T10:44:59.000Z
|
2021-05-26T05:47:48.000Z
|
pyvista-examples/wolf-creek.py
|
RichardScottOZ/banesullivan
|
8b6a530fc7ea36a91f6aa6a5dc3d4d5557128d04
|
[
"MIT"
] | 2
|
2019-11-14T16:53:40.000Z
|
2021-09-28T23:05:42.000Z
|
pyvista-examples/wolf-creek.py
|
RichardScottOZ/banesullivan
|
8b6a530fc7ea36a91f6aa6a5dc3d4d5557128d04
|
[
"MIT"
] | 3
|
2019-11-14T10:20:30.000Z
|
2021-09-28T18:01:38.000Z
|
"""
Load OMF Project
~~~~~~~~~~~~~~~~
Load and visualize an OMF project file
Originally from: https://opengeovis.github.io/omfvista/examples/load-project.html
"""
import omfvista
# sphinx_gallery_thumbnail_number = 3
import pyvista as pv
from pyvista import examples
###############################################################################
# Load the project into an :class:`pyvista.MultiBlock` dataset
path, _ = examples.downloads._download_file("test_file.omf")
project = omfvista.load_project(path)
print(project)
###############################################################################
# Once the data is loaded as a :class:`pyvista.MultiBlock` dataset from
# ``omfvista``, then that object can be directly used for interactive 3D
# visualization from ``pyvista``:
project.plot()
###############################################################################
# Or an interactive scene can be created and manipulated to create a compelling
# figure directly in a Jupyter notebook. First, grab the elements from the
# project:
# Grab a few elements of interest and plot em up!
vol = project["Block Model"]
assay = project["wolfpass_WP_assay"]
topo = project["Topography"]
dacite = project["Dacite"]
###############################################################################
assay.set_active_scalars("DENSITY")
p = pv.Plotter()
p.add_mesh(assay.tube(radius=3))
p.add_mesh(topo, opacity=0.5)
p.camera_position = [
(445542.1943310096, 491993.83439313783, 2319.4833541935445),
(445279.0538059701, 493496.6896061105, 2751.562316285356),
(-0.03677380086746433, -0.2820672798388477, 0.9586895937758338),
]
p.show()
###############################################################################
# Then apply a filtering tool from ``pyvista`` to the volumetric data:
# Threshold the volumetric data
thresh_vol = vol.threshold([1.09, 4.20])
print(thresh_vol)
###############################################################################
# Then you can put it all in one environment!
# Create a plotting window
p = pv.Plotter()
# Add the bounds axis
p.show_grid()
p.add_bounding_box()
# Add our datasets
p.add_mesh(topo, opacity=0.5)
p.add_mesh(
dacite,
color="orange",
opacity=0.6,
)
p.add_mesh(thresh_vol, cmap="coolwarm", clim=vol.get_data_range())
# Add the assay logs: use a tube filter that various the radius by an attribute
p.add_mesh(assay.tube(radius=3), cmap="viridis")
p.camera_position = [
(446842.54037898243, 492089.0563631193, 3229.5037597889404),
(445265.2503466077, 493747.3230470255, 2799.8853219866005),
(-0.10728419235836695, 0.1524885965210015, 0.9824649255831316),
]
p.show()
| 29.988764
| 81
| 0.608842
|
51287e09bb2301a055c70c1427b736a14890f24b
| 300
|
py
|
Python
|
stdeb/__init__.py
|
saraso-la/stdeb
|
a788f74ff9951df73fbd8da7665f2d1d8eb67d99
|
[
"MIT"
] | null | null | null |
stdeb/__init__.py
|
saraso-la/stdeb
|
a788f74ff9951df73fbd8da7665f2d1d8eb67d99
|
[
"MIT"
] | null | null | null |
stdeb/__init__.py
|
saraso-la/stdeb
|
a788f74ff9951df73fbd8da7665f2d1d8eb67d99
|
[
"MIT"
] | null | null | null |
import logging
__version__ = '0.6.0+git' # keep in sync with ../setup.py
log = logging.getLogger('stdeb')
log.setLevel(logging.INFO)
handler = logging.StreamHandler()
handler.setLevel(logging.INFO)
formatter = logging.Formatter('%(message)s')
handler.setFormatter(formatter)
log.addHandler(handler)
| 27.272727
| 57
| 0.77
|
eb5e314017ac041165cc363685a28ed2a4d50691
| 520
|
py
|
Python
|
tests/int_tests/test_lt.py
|
lycantropos/rithm
|
61ae1614411ab0ce7feb403fdf93b71f49231ec1
|
[
"MIT"
] | null | null | null |
tests/int_tests/test_lt.py
|
lycantropos/rithm
|
61ae1614411ab0ce7feb403fdf93b71f49231ec1
|
[
"MIT"
] | null | null | null |
tests/int_tests/test_lt.py
|
lycantropos/rithm
|
61ae1614411ab0ce7feb403fdf93b71f49231ec1
|
[
"MIT"
] | null | null | null |
from hypothesis import given
from tests.utils import (IntWithBuiltin,
equivalence)
from . import strategies
@given(strategies.ints_with_builtins, strategies.ints_with_builtins)
def test_connection_with_builtin(first_with_builtin: IntWithBuiltin,
second_with_builtin: IntWithBuiltin) -> None:
first, first_builtin = first_with_builtin
second, second_builtin = second_with_builtin
assert equivalence(first < second, first_builtin < second_builtin)
| 34.666667
| 78
| 0.732692
|
64dc25101eae6934bc1342bd1925494c3cbe3286
| 12,450
|
py
|
Python
|
tests/core/test_datamodules.py
|
aribornstein/pytorch-lightning
|
ca68cac57ad8eefc9b477ee126eb42a483f27a39
|
[
"Apache-2.0"
] | 1
|
2020-11-24T17:49:42.000Z
|
2020-11-24T17:49:42.000Z
|
tests/core/test_datamodules.py
|
aribornstein/pytorch-lightning
|
ca68cac57ad8eefc9b477ee126eb42a483f27a39
|
[
"Apache-2.0"
] | 8
|
2020-10-27T22:39:24.000Z
|
2021-01-24T16:41:34.000Z
|
tests/core/test_datamodules.py
|
tarepan/pytorch-lightning
|
0b7f5a88a0f4691ec228c4708295a10d403fd592
|
[
"Apache-2.0"
] | null | null | null |
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pickle
from argparse import ArgumentParser
from typing import Optional
from unittest.mock import MagicMock
import pytest
import torch
from torch.utils.data import DataLoader, random_split
from pytorch_lightning import LightningDataModule, seed_everything, Trainer
from pytorch_lightning.accelerators.gpu_accelerator import GPUAccelerator
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning.utilities.model_utils import is_overridden
from tests.base import EvalModelTemplate
from tests.base.datamodules import TrialMNISTDataModule
from tests.base.datasets import TrialMNIST
from tests.base.develop_utils import reset_seed
def test_can_prepare_data(tmpdir):
dm = TrialMNISTDataModule()
trainer = Trainer()
trainer.datamodule = dm
# 1 no DM
# prepare_data_per_node = True
# local rank = 0 (True)
trainer.prepare_data_per_node = True
trainer.local_rank = 0
assert trainer.data_connector.can_prepare_data()
# local rank = 1 (False)
trainer.local_rank = 1
assert not trainer.data_connector.can_prepare_data()
# prepare_data_per_node = False (prepare across all nodes)
# global rank = 0 (True)
trainer.prepare_data_per_node = False
trainer.node_rank = 0
trainer.local_rank = 0
assert trainer.data_connector.can_prepare_data()
# global rank = 1 (False)
trainer.node_rank = 1
trainer.local_rank = 0
assert not trainer.data_connector.can_prepare_data()
trainer.node_rank = 0
trainer.local_rank = 1
assert not trainer.data_connector.can_prepare_data()
# 2 dm
# prepar per node = True
# local rank = 0 (True)
trainer.prepare_data_per_node = True
trainer.local_rank = 0
# is_overridden prepare data = True
# has been called
# False
dm._has_prepared_data = True
assert not trainer.data_connector.can_prepare_data()
# has not been called
# True
dm._has_prepared_data = False
assert trainer.data_connector.can_prepare_data()
# is_overridden prepare data = False
# True
dm.prepare_data = None
assert trainer.data_connector.can_prepare_data()
def test_hooks_no_recursion_error(tmpdir):
# hooks were appended in cascade every tine a new data module was instantiated leading to a recursion error.
# See https://github.com/PyTorchLightning/pytorch-lightning/issues/3652
class DummyDM(LightningDataModule):
def setup(self, *args, **kwargs):
pass
def prepare_data(self, *args, **kwargs):
pass
for i in range(1005):
dm = DummyDM()
dm.setup()
dm.prepare_data()
def test_base_datamodule(tmpdir):
dm = TrialMNISTDataModule()
dm.prepare_data()
dm.setup()
def test_base_datamodule_with_verbose_setup(tmpdir):
dm = TrialMNISTDataModule()
dm.prepare_data()
dm.setup('fit')
dm.setup('test')
def test_data_hooks_called(tmpdir):
dm = TrialMNISTDataModule()
assert dm.has_prepared_data is False
assert dm.has_setup_fit is False
assert dm.has_setup_test is False
dm.prepare_data()
assert dm.has_prepared_data is True
assert dm.has_setup_fit is False
assert dm.has_setup_test is False
dm.setup()
assert dm.has_prepared_data is True
assert dm.has_setup_fit is True
assert dm.has_setup_test is True
def test_data_hooks_called_verbose(tmpdir):
dm = TrialMNISTDataModule()
assert dm.has_prepared_data is False
assert dm.has_setup_fit is False
assert dm.has_setup_test is False
dm.prepare_data()
assert dm.has_prepared_data is True
assert dm.has_setup_fit is False
assert dm.has_setup_test is False
dm.setup('fit')
assert dm.has_prepared_data is True
assert dm.has_setup_fit is True
assert dm.has_setup_test is False
dm.setup('test')
assert dm.has_prepared_data is True
assert dm.has_setup_fit is True
assert dm.has_setup_test is True
def test_data_hooks_called_with_stage_kwarg(tmpdir):
dm = TrialMNISTDataModule()
dm.prepare_data()
assert dm.has_prepared_data is True
dm.setup(stage='fit')
assert dm.has_setup_fit is True
assert dm.has_setup_test is False
dm.setup(stage='test')
assert dm.has_setup_fit is True
assert dm.has_setup_test is True
def test_dm_add_argparse_args(tmpdir):
parser = ArgumentParser()
parser = TrialMNISTDataModule.add_argparse_args(parser)
args = parser.parse_args(['--data_dir', str(tmpdir)])
assert args.data_dir == str(tmpdir)
def test_dm_init_from_argparse_args(tmpdir):
parser = ArgumentParser()
parser = TrialMNISTDataModule.add_argparse_args(parser)
args = parser.parse_args(['--data_dir', str(tmpdir)])
dm = TrialMNISTDataModule.from_argparse_args(args)
dm.prepare_data()
dm.setup()
def test_dm_pickle_after_init(tmpdir):
dm = TrialMNISTDataModule()
pickle.dumps(dm)
def test_train_loop_only(tmpdir):
dm = TrialMNISTDataModule(tmpdir)
model = EvalModelTemplate()
model.validation_step = None
model.validation_step_end = None
model.validation_epoch_end = None
model.test_step = None
model.test_step_end = None
model.test_epoch_end = None
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=3,
weights_summary=None,
)
# fit model
result = trainer.fit(model, dm)
assert result == 1
assert trainer.logger_connector.callback_metrics['loss'] < 0.6
def test_train_val_loop_only(tmpdir):
reset_seed()
dm = TrialMNISTDataModule(tmpdir)
model = EvalModelTemplate()
model.validation_step = None
model.validation_step_end = None
model.validation_epoch_end = None
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=3,
weights_summary=None,
)
# fit model
result = trainer.fit(model, dm)
assert result == 1
assert trainer.logger_connector.callback_metrics['loss'] < 0.6
def test_dm_checkpoint_save(tmpdir):
reset_seed()
dm = TrialMNISTDataModule(tmpdir)
model = EvalModelTemplate()
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=3,
weights_summary=None,
callbacks=[ModelCheckpoint(dirpath=tmpdir, monitor='early_stop_on')],
)
# fit model
result = trainer.fit(model, dm)
checkpoint_path = list(trainer.checkpoint_callback.best_k_models.keys())[0]
checkpoint = torch.load(checkpoint_path)
assert dm.__class__.__name__ in checkpoint
assert checkpoint[dm.__class__.__name__] == dm.__class__.__name__
def test_test_loop_only(tmpdir):
reset_seed()
dm = TrialMNISTDataModule(tmpdir)
model = EvalModelTemplate()
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=3,
weights_summary=None,
)
trainer.test(model, datamodule=dm)
def test_full_loop(tmpdir):
reset_seed()
dm = TrialMNISTDataModule(tmpdir)
model = EvalModelTemplate()
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=3,
weights_summary=None,
deterministic=True,
)
# fit model
result = trainer.fit(model, dm)
assert result == 1
# test
result = trainer.test(datamodule=dm)
result = result[0]
assert result['test_acc'] > 0.8
def test_trainer_attached_to_dm(tmpdir):
reset_seed()
dm = TrialMNISTDataModule(tmpdir)
model = EvalModelTemplate()
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=3,
weights_summary=None,
deterministic=True,
)
# fit model
result = trainer.fit(model, dm)
assert result == 1
assert dm.trainer is not None
# test
result = trainer.test(datamodule=dm)
result = result[0]
assert dm.trainer is not None
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="test requires multi-GPU machine")
def test_full_loop_single_gpu(tmpdir):
reset_seed()
dm = TrialMNISTDataModule(tmpdir)
model = EvalModelTemplate()
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=3,
weights_summary=None,
gpus=1,
deterministic=True,
)
# fit model
result = trainer.fit(model, dm)
assert result == 1
# test
result = trainer.test(datamodule=dm)
result = result[0]
assert result['test_acc'] > 0.8
@pytest.mark.skipif(torch.cuda.device_count() < 2, reason="test requires multi-GPU machine")
def test_full_loop_dp(tmpdir):
reset_seed()
dm = TrialMNISTDataModule(tmpdir)
model = EvalModelTemplate()
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=3,
weights_summary=None,
accelerator='dp',
gpus=2,
deterministic=True,
)
# fit model
result = trainer.fit(model, dm)
assert result == 1
# test
result = trainer.test(datamodule=dm)
result = result[0]
assert result['test_acc'] > 0.8
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="test requires multi-GPU machine")
def test_dm_transfer_batch_to_device(tmpdir):
class CustomBatch:
def __init__(self, data):
self.samples = data[0]
self.targets = data[1]
class CurrentTestDM(LightningDataModule):
hook_called = False
def transfer_batch_to_device(self, data, device):
self.hook_called = True
if isinstance(data, CustomBatch):
data.samples = data.samples.to(device)
data.targets = data.targets.to(device)
else:
data = super().transfer_batch_to_device(data, device)
return data
model = EvalModelTemplate()
dm = CurrentTestDM()
batch = CustomBatch((torch.zeros(5, 28), torch.ones(5, 1, dtype=torch.long)))
trainer = Trainer(gpus=1)
# running .fit() would require us to implement custom data loaders, we mock the model reference instead
trainer.get_model = MagicMock(return_value=model)
if is_overridden('transfer_batch_to_device', dm):
model.transfer_batch_to_device = dm.transfer_batch_to_device
trainer.accelerator_backend = GPUAccelerator(trainer)
batch_gpu = trainer.accelerator_backend.batch_to_device(batch, torch.device('cuda:0'))
expected = torch.device('cuda', 0)
assert dm.hook_called
assert batch_gpu.samples.device == batch_gpu.targets.device == expected
class CustomMNISTDataModule(LightningDataModule):
def __init__(self, data_dir: str = "./"):
super().__init__()
self.data_dir = data_dir
self._epochs_called_for = []
def prepare_data(self):
TrialMNIST(self.data_dir, train=True, download=True)
def setup(self, stage: Optional[str] = None):
mnist_full = TrialMNIST(
root=self.data_dir, train=True, num_samples=64, download=True
)
self.mnist_train, self.mnist_val = random_split(mnist_full, [128, 64])
self.dims = self.mnist_train[0][0].shape
def train_dataloader(self):
assert self.trainer.current_epoch not in self._epochs_called_for
self._epochs_called_for.append(self.trainer.current_epoch)
return DataLoader(self.mnist_train, batch_size=4)
def test_dm_reload_dataloaders_every_epoch(tmpdir):
"""Test datamodule, where trainer argument
reload_dataloaders_every_epoch is set to True/False"""
dm = CustomMNISTDataModule(tmpdir)
model = EvalModelTemplate()
model.validation_step = None
model.validation_step_end = None
model.validation_epoch_end = None
model.test_step = None
model.test_step_end = None
model.test_epoch_end = None
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=2,
limit_train_batches=0.01,
reload_dataloaders_every_epoch=True,
)
trainer.fit(model, dm)
| 27.183406
| 112
| 0.697751
|
b922dbf11af57661fa50d6bba30784d7471c73a3
| 36,534
|
py
|
Python
|
delfin/drivers/hpe/hpe_msa/ssh_handler.py
|
joseph-v/SIM
|
61fedb261aa745d715b8a30c0945a6244fb807e2
|
[
"Apache-2.0"
] | 4
|
2020-05-07T07:43:43.000Z
|
2020-06-21T15:25:42.000Z
|
delfin/drivers/hpe/hpe_msa/ssh_handler.py
|
joseph-v/SIM
|
61fedb261aa745d715b8a30c0945a6244fb807e2
|
[
"Apache-2.0"
] | 210
|
2020-05-08T04:06:49.000Z
|
2020-06-22T12:59:02.000Z
|
delfin/drivers/hpe/hpe_msa/ssh_handler.py
|
joseph-v/SIM
|
61fedb261aa745d715b8a30c0945a6244fb807e2
|
[
"Apache-2.0"
] | 10
|
2020-04-11T07:09:55.000Z
|
2020-04-28T09:50:13.000Z
|
import hashlib
import time
import six
from oslo_log import log as logging
from operator import itemgetter
from itertools import groupby
from delfin import exception
from delfin.common import constants, alert_util
from delfin.drivers.utils.ssh_client import SSHPool
from delfin.drivers.utils.tools import Tools
from delfin.drivers.hpe.hpe_msa import consts
try:
import defusedxml.cElementTree as Et
except ImportError:
import defusedxml.ElementTree as Et
LOG = logging.getLogger(__name__)
class SSHHandler(object):
def __init__(self, **kwargs):
self.ssh_pool = SSHPool(**kwargs)
def login(self):
try:
self.ssh_pool.do_exec('show pools')
except Exception as e:
LOG.error("Failed to login msa %s" %
(six.text_type(e)))
raise e
def get_storage(self, storage_id):
try:
system_info = self.ssh_pool.do_exec('show system')
system_data = self.handle_xml_to_dict(system_info, 'system')
version_info = self.ssh_pool.do_exec('show version')
version_arr = self.handle_xml_to_json(version_info, 'versions')
version_id = ""
if version_arr:
version_id = version_arr[0].get('bundle-version')
if system_data:
pools_list = self.list_storage_pools(storage_id)
total_capacity = 0
if pools_list:
for pool in pools_list:
total_capacity += int(pool.get('total_capacity'))
disks_list = self.list_storage_disks(storage_id)
raw_capacity = 0
if disks_list:
for disk in disks_list:
raw_capacity += int(disk.get('capacity'))
volumes_list = self.list_storage_volume(storage_id)
volume_all_size = 0
if volumes_list:
for volume in volumes_list:
volume_all_size += int(volume.get('total_capacity'))
health = system_data.get('health')
status = constants.StorageStatus.OFFLINE
if health == 'OK':
status = constants.StorageStatus.NORMAL
elif health == 'Degraded':
status = constants.StorageStatus.DEGRADED
serial_num = system_data.get('midplane-serial-number')
storage_map = {
'name': system_data.get('system-name'),
'vendor': consts.StorageVendor.HPE_MSA_VENDOR,
'model': system_data.get('product-id'),
'status': status,
'serial_number': serial_num,
'firmware_version': version_id,
'location': system_data.get('system-location'),
'raw_capacity': int(raw_capacity),
'total_capacity': int(total_capacity),
'used_capacity': int(volume_all_size),
'free_capacity': int(total_capacity - volume_all_size)
}
return storage_map
except Exception as e:
err_msg = "Failed to get system info : %s" % (six.text_type(e))
LOG.error(err_msg)
raise e
def list_storage_disks(self, storage_id):
try:
disk_info = self.ssh_pool.do_exec('show disks')
disk_detail = self.handle_xml_to_json(disk_info, 'drives')
disks_arr = []
for data in disk_detail:
health = data.get('health')
status = constants.StoragePoolStatus.OFFLINE
if health == 'OK':
status = constants.StoragePoolStatus.NORMAL
size = self.parse_string_to_bytes(data.get('size'))
physical_type = consts.DiskPhysicalType.\
DISK_PHYSICAL_TYPE.get(data.get('description'),
constants.DiskPhysicalType.
UNKNOWN)
rpm = data.get('rpm')
if rpm:
rpm = int(rpm) * consts.RpmSpeed.RPM_SPEED
data_map = {
'native_disk_id': data.get('location'),
'name': data.get('location'),
'physical_type': physical_type,
'status': status,
'storage_id': storage_id,
'native_disk_group_id': data.get('disk-group'),
'serial_number': data.get('serial-number'),
'manufacturer': data.get('vendor'),
'model': data.get('model'),
'speed': rpm,
'capacity': int(size),
'health_score': status
}
disks_arr.append(data_map)
return disks_arr
except Exception as e:
err_msg = "Failed to get storage disk: %s" % (six.text_type(e))
LOG.error(err_msg)
raise e
def list_storage_ports(self, storage_id):
try:
ports_info = self.ssh_pool.do_exec('show ports')
ports_split = ports_info.split('\n')
ports_array = ports_split[1:len(ports_split) - 1]
ports_xml_data = ''.join(ports_array)
xml_element = Et.fromstring(ports_xml_data)
ports_json = []
for element_data in xml_element.iter('OBJECT'):
property_name = element_data.get('basetype')
if property_name != 'status':
msg = {}
for child in element_data.iter('PROPERTY'):
msg[child.get('name')] = child.text
ports_json.append(msg)
ports_elements_info = []
for i in range(0, len(ports_json) - 1, 2):
port_element = ports_json[i].copy()
port_element.update(ports_json[i + 1])
ports_elements_info.append(port_element)
list_ports = []
for data in ports_elements_info:
status = constants.PortHealthStatus.NORMAL
conn_status = constants.PortConnectionStatus.CONNECTED
if data.get('health') != 'OK':
status = constants.PortHealthStatus.ABNORMAL
conn_status = constants.PortConnectionStatus.\
DISCONNECTED
wwn = None
port_type = constants.PortType.FC
location_port_type = data.get('port-type')
if location_port_type:
location_port_type = location_port_type.upper()
if location_port_type == 'ISCSI':
port_type = constants.PortType.ETH
else:
target_id = data.get('target-id')
if target_id:
wwn = target_id
location = '%s_%s' % (data.get('port'),
location_port_type)
speed = data.get('configured-speed', None)
max_speed = 0
if speed != 'Auto' and speed is not None:
max_speed = self.parse_string_to_bytes(speed)
data_map = {
'native_port_id': data.get('durable-id'),
'name': data.get('port'),
'type': port_type,
'connection_status': conn_status,
'health_status': status,
'location': location,
'storage_id': storage_id,
'speed': max_speed,
'max_speed': max_speed,
'mac_address': data.get('mac-address'),
'ipv4': data.get('ip-address'),
'wwn': wwn
}
list_ports.append(data_map)
return list_ports
except Exception as e:
err_msg = "Failed to get storage ports: %s" % (six.text_type(e))
LOG.error(err_msg)
raise e
def list_storage_controller(self, storage_id):
try:
controller_info = self.ssh_pool.do_exec('show controllers')
controller_detail = self.handle_xml_to_json(
controller_info, 'controllers')
controller_arr = []
for data in controller_detail:
health = data.get('health')
status = constants.StoragePoolStatus.OFFLINE
if health == 'OK':
status = constants.StoragePoolStatus.NORMAL
cpu_info = data.get('sc-cpu-type')
memory_size = data.get('system-memory-size')
if memory_size is not None:
memory_size += "MB"
system_memory_size = self.parse_string_to_bytes(
memory_size)
data_map = {
'native_controller_id': data.get('controller-id'),
'name': data.get('durable-id'),
'storage_id': storage_id,
'status': status,
'location': data.get('position'),
'soft_version': data.get('sc-fw'),
'cpu_info': cpu_info,
'memory_size': int(system_memory_size)
}
controller_arr.append(data_map)
return controller_arr
except Exception as e:
err_msg = "Failed to get storage controllers: %s"\
% (six.text_type(e))
LOG.error(err_msg)
raise e
def list_storage_volume(self, storage_id):
try:
volume_infos = self.ssh_pool.do_exec('show volumes')
volume_detail = self.handle_xml_to_json(volume_infos, 'volumes')
pools_info = self.ssh_pool.do_exec('show pools')
pool_detail = self.handle_xml_to_json(pools_info, 'pools')
list_volumes = []
for data in volume_detail:
health = data.get('health')
status = constants.StoragePoolStatus.OFFLINE
if health == 'OK':
status = constants.StoragePoolStatus.NORMAL
total_size = self.parse_string_to_bytes(data.get('total-size'))
total_avail = self.parse_string_to_bytes(
data.get('allocated-size'))
native_storage_pool_id = ''
if pool_detail:
native_storage_pool_id = pool_detail[0]. \
get('serial-number')
for pools in pool_detail:
if data.get('virtual-disk-name') == pools.\
get('name'):
native_storage_pool_id = pools.\
get('serial-number')
blocks = data.get('blocks')
if blocks is not None:
blocks = int(blocks)
volume_map = {
'name': data.get('volume-name'),
'storage_id': storage_id,
'description': data.get('volume-name'),
'status': status,
'native_volume_id': str(data.get('durable-id')),
'native_storage_pool_id': native_storage_pool_id,
'wwn': str(data.get('wwn')),
'type': data.get('volume-type'),
'total_capacity': int(total_size),
'free_capacit': int(total_size - total_avail),
'used_capacity': int(total_avail),
'blocks': int(blocks),
'compressed': True,
'deduplicated': True
}
list_volumes.append(volume_map)
return list_volumes
except Exception as e:
err_msg = "Failed to get storage volume: %s" % (six.text_type(e))
LOG.error(err_msg)
raise e
def list_storage_pools(self, storage_id):
try:
pool_infos = self.ssh_pool.do_exec('show pools')
pool_detail = self.handle_xml_to_json(pool_infos, 'pools')
volume_list = self.list_storage_volume(storage_id)
pools_list = []
for data in pool_detail:
volume_size = 0
blocks = 0
if volume_list:
for volume in volume_list:
if volume.get('native_storage_pool_id') == data.\
get('serial-number'):
volume_size += volume.get('total_capacity')
blocks += volume.get('blocks')
health = data.get('health')
status = constants.StoragePoolStatus.OFFLINE
if health == 'OK':
status = constants.StoragePoolStatus.NORMAL
total_size = self.parse_string_to_bytes(
data.get('total-size'))
pool_map = {
'name': data.get('name'),
'storage_id': storage_id,
'native_storage_pool_id': data.get('serial-number'),
'status': status,
'storage_type': constants.StorageType.BLOCK,
'total_capacity': int(total_size),
'subscribed_capacity': int(blocks),
'used_capacity': volume_size,
'free_capacity': int(total_size - volume_size)
}
pools_list.append(pool_map)
return pools_list
except Exception as e:
err_msg = "Failed to get storage pool: %s" % (six.text_type(e))
LOG.error(err_msg)
raise e
@staticmethod
def parse_string_to_bytes(value):
capacity = 0
if value:
if value.isdigit():
capacity = float(value)
else:
if value == '0B':
capacity = 0
else:
unit = value[-2:]
capacity = float(value[:-2]) * int(
Tools.change_capacity_to_bytes(unit))
return capacity
@staticmethod
def handle_xml_to_json(detail_info, element):
detail_arr = []
detail_data = detail_info.split('\n')
detail = detail_data[1:len(detail_data) - 1]
detail_xml = ''.join(detail)
xml_element = Et.fromstring(detail_xml)
for children in xml_element.iter('OBJECT'):
property_name = children.get('basetype')
if element == property_name:
msg = {}
for child in children.iter('PROPERTY'):
msg[child.get('name')] = child.text
detail_arr.append(msg)
return detail_arr
def list_alerts(self, query_para):
alert_list = []
try:
alert_infos = self.ssh_pool.do_exec('show events error')
alert_json = self.handle_xml_to_json(alert_infos, 'events')
for alert_map in alert_json:
now = time.time()
occur_time = int(round(now * consts.SecondsNumber
.SECONDS_TO_MS))
time_stamp = alert_map.get('time-stamp-numeric')
if time_stamp is not None:
occur_time = int(time_stamp) * consts.SecondsNumber\
.SECONDS_TO_MS
if not alert_util.is_alert_in_time_range(query_para,
occur_time):
continue
event_code = alert_map.get('event-code')
event_id = alert_map.get('event-id')
location = alert_map.get('message')
resource_type = alert_map.get('event-code')
severity = alert_map.get('severity')
additional_info = str(alert_map.get('additional-information'))
match_key = None
if event_code:
match_key = event_code
if severity:
match_key += severity
if location:
match_key += location
description = None
if additional_info:
description = additional_info
if severity == 'Informational' or severity == 'RESOLVED':
continue
alert_model = {
'alert_id': event_id,
'alert_name': event_code,
'severity': severity,
'category': constants.Category.FAULT,
'type': 'EquipmentAlarm',
'sequence_number': event_id,
'occur_time': occur_time,
'description': description,
'resource_type': resource_type,
'location': location,
'match_key': hashlib.md5(match_key.encode()).hexdigest()
}
alert_list.append(alert_model)
alert_list_data = SSHHandler.get_last_alert_data(alert_list)
return alert_list_data
except Exception as e:
err_msg = "Failed to get storage alert: %s" % (six.text_type(e))
LOG.error(err_msg)
raise e
@staticmethod
def get_last_alert_data(alert_json):
alert_list = []
alert_json.sort(key=itemgetter('alert_name', 'location', 'severity'))
for key, item in groupby(alert_json, key=itemgetter(
'alert_name', 'location', 'severity')):
alert_last_index = 0
alert_list.append(list(item)[alert_last_index])
return alert_list
@staticmethod
def parse_alert(alert):
try:
alert_model = dict()
alert_id = None
description = None
severity = consts.TrapSeverity.TRAP_SEVERITY_MAP.get('8')
sequence_number = None
event_type = None
for alert_key, alert_value in alert.items():
if consts.AlertOIDNumber.OID_ERR_ID in alert_key:
alert_id = str(alert_value)
elif consts.AlertOIDNumber.OID_EVENT_TYPE in alert_key:
event_type = alert_value
elif consts.AlertOIDNumber.OID_EVENT_DESC in alert_key:
description = alert_value
elif consts.AlertOIDNumber.OID_SEVERITY in alert_key:
severity = consts.TrapSeverity.TRAP_SEVERITY_MAP\
.get(alert.get(consts.AlertOIDNumber.OID_SEVERITY),
constants.Severity.INFORMATIONAL)
elif consts.AlertOIDNumber.OID_EVENT_ID in alert_key:
sequence_number = alert_value
if description:
desc_arr = description.split(",")
if desc_arr:
alert_id = SSHHandler.split_by_char_and_number(
desc_arr[0], ":", 1)
alert_model['alert_id'] = str(alert_id)
alert_model['alert_name'] = event_type
alert_model['severity'] = severity
alert_model['category'] = constants.Category.FAULT
alert_model['type'] = constants.EventType.EQUIPMENT_ALARM
alert_model['sequence_number'] = sequence_number
now = time.time()
alert_model['occur_time'] = int(round(now * consts.
SecondsNumber.SECONDS_TO_MS))
alert_model['description'] = description
alert_model['location'] = description
return alert_model
except Exception as e:
LOG.error(e)
msg = "Failed to build alert model: %s." % (six.text_type(e))
raise exception.InvalidResults(msg)
@staticmethod
def split_by_char_and_number(split_str, split_char, arr_number):
split_value = ''
if split_str:
tmp_value = split_str.split(split_char, 1)
if arr_number == 1 and len(tmp_value) > 1:
split_value = tmp_value[arr_number].strip()
elif arr_number == 0:
split_value = tmp_value[arr_number].strip()
return split_value
@staticmethod
def handle_xml_to_dict(xml_info, element):
msg = {}
xml_split = xml_info.split('\n')
xml_data = xml_split[1:len(xml_split) - 1]
detail_xml = ''.join(xml_data)
xml_element = Et.fromstring(detail_xml)
for children in xml_element.iter('OBJECT'):
property_name = children.get('basetype')
if element == property_name:
for child in children.iter('PROPERTY'):
msg[child.get('name')] = child.text
return msg
def list_storage_host_initiators(self, storage_id):
try:
initiator_list = []
host_groups_info = self.ssh_pool.do_exec("show initiators")
host_groups_json = self.handle_xml_to_json(host_groups_info,
"initiator")
type_switch = {
consts.InitiatorType.ISCSI_INITIATOR_TYPE:
consts.InitiatorType.ISCSI_INITIATOR_DESCRIPTION,
consts.InitiatorType.FC_INITIATOR_TYPE:
consts.InitiatorType.FC_INITIATOR_DESCRIPTION,
}
for initiator in host_groups_json:
description = type_switch.get(
initiator.get('host-bus-type-numeric'),
consts.InitiatorType.UNKNOWN_INITIATOR_DESCRIPTION)
initiator_item = {
"name": initiator.get('nickname'),
"type": description,
"alias": initiator.get('durable-id'),
"storage_id": storage_id,
"native_storage_host_initiator_id":
initiator.get('durable-id'),
"wwn": initiator.get('id'),
"status": constants.InitiatorStatus.ONLINE,
"native_storage_host_id": initiator.get('host-id')
}
initiator_list.append(initiator_item)
return initiator_list
except Exception as e:
LOG.error("Failed to get initiator "
"from msa storage_id: %s" % storage_id)
raise e
def list_storage_hosts(self, storage_id):
try:
hosts_info = self.ssh_pool.do_exec('show host-groups')
host_list = []
hosts = self.handle_xml_to_json(hosts_info, 'host')
host_set = set()
for host in hosts:
status = constants.HostStatus.NORMAL
os_type = constants.HostOSTypes.HP_UX
host_member_count = int(host.get('member-count'))
if host_member_count > 0:
serial_number = host.get('serial-number')
if serial_number not in host_set:
host_set.add(host.get('serial-number'))
host_dict = {
"name": host.get('name'),
"description": host.get('durable-id'),
"storage_id": storage_id,
"native_storage_host_id":
host.get('serial-number'),
"os_type": os_type,
"status": status
}
host_list.append(host_dict)
return host_list
except Exception as e:
LOG.error("Failed to get host "
"from msa storage_id: %s" % storage_id)
raise e
def list_storage_host_groups(self, storage_id):
try:
host_groups_info = self.ssh_pool.do_exec('show host-groups')
host_group_list = []
storage_host_grp_relation_list = []
host_groups = self.handle_xml_to_json(
host_groups_info, 'host-group')
host_info_list = self.handle_xml_to_json(host_groups_info, 'host')
for host_group in host_groups:
member_count = int(host_group.get('member-count'))
if member_count > 0:
hosts_list = []
storage_host_group_id = host_group.get('serial-number')
for host_info in host_info_list:
host_id = host_info.get('serial-number')
host_group_id = host_info.get('host-group')
if host_id != 'NOHOST' and \
host_group_id == storage_host_group_id:
hosts_list.append(host_id)
storage_host_group_relation = {
'storage_id': storage_id,
'native_storage_host_group_id':
storage_host_group_id,
'native_storage_host_id': host_id
}
storage_host_grp_relation_list.\
append(storage_host_group_relation)
host_group_map = {
"name": host_group.get('name'),
"description": host_group.get('durable-id'),
"storage_id": storage_id,
"native_storage_host_group_id": storage_host_group_id,
"storage_hosts": ','.join(hosts_list)
}
host_group_list.append(host_group_map)
storage_host_groups_result = {
'storage_host_groups': host_group_list,
'storage_host_grp_host_rels':
storage_host_grp_relation_list
}
return storage_host_groups_result
except Exception as e:
LOG.error("Failed to get host_group from msa "
"storage_id: %s" % storage_id)
raise e
def list_volume_groups(self, storage_id):
try:
volume_group_list = []
volume_group_relation_list = []
volume_groups_info = self.ssh_pool.do_exec('show volume-groups')
volume_groups_json = self.handle_xml_to_json(
volume_groups_info, 'volume-groups')
volumes_json = self.handle_xml_to_json(
volume_groups_info, 'volumes')
for volume_group in volume_groups_json:
volumes_list = []
durable_id = volume_group.get('durable-id')
if volumes_json:
for volume_info in volumes_json:
group_key = volume_info.get('group-key')
volume_id = volume_info.get('durable-id')
if group_key == durable_id:
volumes_list.append(volume_id)
volume_group_relation = {
'storage_id': storage_id,
'native_volume_group_id': durable_id,
'native_volume_id': volume_id
}
volume_group_relation_list.\
append(volume_group_relation)
volume_groups_map = {
"name": volume_group.get('group-name'),
"description": volume_group.get('durable-id'),
"storage_id": storage_id,
"native_volume_group_id": durable_id,
"volumes": ','.join(volumes_list)
}
volume_group_list.append(volume_groups_map)
volume_group_result = {
'volume_groups': volume_group_list,
'vol_grp_vol_rels': volume_group_relation_list
}
return volume_group_result
except Exception as e:
LOG.error("Failed to get volume_group"
" from msa storage_id: %s" % storage_id)
raise e
def list_port_groups(self, storage_id):
try:
port_group_list = []
port_group_relation_list = []
storage_view_info = self.ssh_pool.do_exec('show maps all ')
storage_port_list = self.list_storage_ports(storage_id)
storage_host_view = self.handle_xml_to_json(
storage_view_info, 'volume-view-mappings')
reduce_set = set()
for storage_view in storage_host_view:
port_number = storage_view.get('ports')
port_group_dict = self.get_port_group_id_and_name(
port_number, storage_port_list)
native_port_group_id = port_group_dict.get(
'native_port_group_id')
native_port_group_name = port_group_dict.get(
'native_port_group_name')
if native_port_group_name:
native_port_group_id = "port_group_" + \
native_port_group_id
if native_port_group_id in reduce_set:
continue
reduce_set.add(native_port_group_id)
port_group_map = {
'name': native_port_group_id,
'description': native_port_group_id,
'storage_id': storage_id,
'native_port_group_id': native_port_group_id,
'ports': native_port_group_name
}
port_ids = native_port_group_name.split(',')
for port_id in port_ids:
port_group_relation = {
'storage_id': storage_id,
'native_port_group_id': native_port_group_id,
'native_port_id': port_id
}
port_group_relation_list.append(
port_group_relation)
port_group_list.append(port_group_map)
result = {
'port_groups': port_group_list,
'port_grp_port_rels': port_group_relation_list
}
return result
except Exception as e:
LOG.error("Failed to get port_group"
" from msa storage_id: %s" % storage_id)
raise e
@staticmethod
def get_port_group_id_and_name(port_number, storage_port_list):
native_port_group_id = []
native_port_group_name = []
if port_number:
port_codes = port_number.split(',')
for port_code in port_codes:
for port in storage_port_list:
port_name = port.get('name')
durable_id = port.get('native_port_id')
if port_code in port_name:
native_port_group_id.append(port_name)
native_port_group_name.append(durable_id)
port_group_dict = {
'native_port_group_id': ''.join(native_port_group_id),
'native_port_group_name': ','.join(native_port_group_name)
}
return port_group_dict
def list_masking_views(self, storage_id):
try:
views_list = []
storage_view_info = self.ssh_pool.do_exec('show maps all ')
if storage_view_info:
storage_port_list = self.list_storage_ports(storage_id)
host_list = self.list_storage_hosts(storage_id)
initiators_list = self.list_storage_host_initiators(storage_id)
host_group_list = self.list_storage_host_groups(storage_id)
storage_host_group = host_group_list.get('storage_host_groups')
storage_host_view = self.handle_xml_to_json(
storage_view_info, 'volume-view-mappings')
views_list.extend(
self.get_storage_view_list(storage_host_view, 'volume',
storage_id, storage_port_list,
host_list, initiators_list,
storage_host_group))
storage_host_volume_groups_view = self.handle_xml_to_json(
storage_view_info, 'volume-group-view-mappings')
views_list.extend(self.get_storage_view_list(
storage_host_volume_groups_view, 'group',
storage_id, storage_port_list, host_list, initiators_list,
storage_host_group))
return views_list
except Exception as e:
LOG.error("Failed to get view "
"from msa storage_id: %s" % storage_id)
raise e
def get_storage_view_list(self, storage_view_list, vol_type, storage_id,
storage_port_list, host_list, initiators_list,
storage_host_groups):
views_list = []
if storage_view_list:
native_volume_group_name = 'native_volume_group_id'\
if vol_type == 'group' else 'native_volume_id'
for host_view in storage_view_list:
access = host_view.get('access')
if access != 'not-mapped':
mapped_id = host_view.get('mapped-id')
native_masking_view_id = host_view.get('durable-id')
volume_id = host_view.get('parent-id')
port_number = host_view.get('ports')
view_name = host_view.get('nickname')
host_group_name = 'native_storage_host_group_id'\
if '.*.*' in view_name else 'native_storage_host_id'
native_port_group_dict = \
self.get_port_group_id_and_name(port_number,
storage_port_list)
native_port_group_id = native_port_group_dict.get(
'native_port_group_id')
native_storage_host_id = self.get_storage_host_id(
host_list, mapped_id, initiators_list,
storage_host_groups, view_name)
view_map = {
"name": view_name,
"description": view_name,
"storage_id": storage_id,
"native_masking_view_id":
native_masking_view_id + volume_id,
native_volume_group_name: volume_id,
host_group_name: native_storage_host_id
}
if native_port_group_id:
view_map['native_port_group_id'] = \
"port_group_" + native_port_group_id
views_list.append(view_map)
return views_list
@staticmethod
def get_storage_host_id(host_list, mapped_id, initiators_list,
storage_host_groups, view_name):
for host_value in host_list:
host_durable_id = host_value.get('description')
if host_durable_id == mapped_id:
native_storage_host_id = \
host_value.get('native_storage_host_id')
return native_storage_host_id
for initiators in initiators_list:
initiators_durable_id = initiators.get(
'native_storage_host_initiator_id')
if initiators_durable_id == mapped_id:
native_storage_host_id = \
initiators.get('native_storage_host_id')
return native_storage_host_id
group_name = view_name.split('.')[0]
for host_group in storage_host_groups:
if group_name == host_group.get('name'):
native_storage_host_id = \
host_group.get('native_storage_host_group_id')
return native_storage_host_id
| 45.954717
| 79
| 0.518969
|
d02957ed5ec95d808dd8648da6549f579dedddca
| 3,012
|
py
|
Python
|
test/Parallel/failed-build/fixture/teststate.py
|
jcassagnol-public/scons
|
8eaf585a893757e68c9e4a6e25d375021fa5eab7
|
[
"MIT"
] | 1,403
|
2017-11-23T14:24:01.000Z
|
2022-03-30T20:59:39.000Z
|
test/Parallel/failed-build/fixture/teststate.py
|
jcassagnol-public/scons
|
8eaf585a893757e68c9e4a6e25d375021fa5eab7
|
[
"MIT"
] | 3,708
|
2017-11-27T13:47:12.000Z
|
2022-03-29T17:21:17.000Z
|
test/Parallel/failed-build/fixture/teststate.py
|
jcassagnol-public/scons
|
8eaf585a893757e68c9e4a6e25d375021fa5eab7
|
[
"MIT"
] | 281
|
2017-12-01T23:48:38.000Z
|
2022-03-31T15:25:44.000Z
|
import http.server
import socketserver
import time
from urllib.parse import urlparse, parse_qs
from threading import Lock
from enum import Enum
import psutil
class TestState(Enum):
start_state = 0
mycopy_started = 1
myfail_done = 2
class Response(Enum):
OK = 200
WAIT = 201
DONE = 202
def server_thread(PORT):
class S(http.server.BaseHTTPRequestHandler):
current_state = TestState.start_state
mutex = Lock()
pid_killed_tries = 20
def do_GET(self):
gets = parse_qs(urlparse(self.path).query)
# the two tasks will communicate with the server with basic get
# requests, either updating or getting the state of the test to
# know if they continue. The server is regluating the state and making
# the right criteria is in place from both tasks before moving the
# test state forward.
if gets.get('set_mycopy_started'):
S.mutex.acquire()
if S.current_state == TestState.start_state:
S.current_state = TestState.mycopy_started
response = Response.OK
else:
response = Response.WAIT
S.mutex.release()
elif gets.get('get_mycopy_started'):
S.mutex.acquire()
if S.current_state == TestState.mycopy_started:
response = Response.OK
else:
response = Response.WAIT
S.mutex.release()
elif gets.get('set_myfail_done'):
S.mutex.acquire()
if S.current_state == TestState.mycopy_started:
count = 0
pid = int(gets.get('pid')[0])
while psutil.pid_exists(pid) and count < self.pid_killed_tries:
time.sleep(0.5)
count += 1
if not psutil.pid_exists(pid):
S.current_state = TestState.myfail_done
response = Response.DONE
else:
response = Response.WAIT
else:
response = Response.WAIT
S.mutex.release()
elif gets.get('get_myfail_done'):
S.mutex.acquire()
if S.current_state == TestState.myfail_done:
response = Response.OK
else:
response = Response.WAIT
S.mutex.release()
else:
response = Response.WAIT
self.send_response(response.value)
self.send_header('Content-type', 'text/html')
self.end_headers()
if response != Response.DONE:
self.wfile.write("".encode('utf-8'))
def log_message(self, format, *args):
return
httpd = socketserver.TCPServer(("127.0.0.1", PORT), S)
httpd.serve_forever()
| 33.466667
| 83
| 0.530544
|
f6097be57e8e49af83ade84d629cc760b0cdf205
| 2,348
|
py
|
Python
|
examples/asynccsaga_example.py
|
Nathaniel-Rodriguez/asyncevo
|
c6398327e8a9e2d9e582c986709480bb8114872a
|
[
"MIT"
] | null | null | null |
examples/asynccsaga_example.py
|
Nathaniel-Rodriguez/asyncevo
|
c6398327e8a9e2d9e582c986709480bb8114872a
|
[
"MIT"
] | null | null | null |
examples/asynccsaga_example.py
|
Nathaniel-Rodriguez/asyncevo
|
c6398327e8a9e2d9e582c986709480bb8114872a
|
[
"MIT"
] | null | null | null |
# run this example with:
# <mpi command> -n <number cores> python asyncga_example.py
# Initialize the MPI process
# all code following this code will be executed on rank 1
# rank 0 is dedicated to the scheduler
# ranks 2+ are dedicated to workers
from asyncevo import Scheduler
from asyncevo import AsyncCSAGa
from asyncevo import CSAMember
import numpy as np
from math import inf
from pathlib import Path
from time import sleep
# Below are available fitness functions
def elli(x):
"""ellipsoid-like test fitness function"""
n = len(x)
aratio = 1e3
return -sum(x[i]**2 * aratio**(2.*i/(n-1)) for i in range(n))
def sphere(x):
"""sphere-like, ``sum(x**2)``, test fitness function"""
return -sum(x[i]**2 for i in range(len(x)))
def rosenbrock(x):
"""Rosenbrock test fitness function"""
n = len(x)
if n < 2:
raise ValueError('dimension must be greater than one')
return -sum(100 * (x[i+1] - x[i]**2)**2 + (1 - x[i])**2
for i in range(n-1))
def rosenbrock2d(x):
"""
Best at f(1,1)=0
"""
return -((1 - x[0])**2 + 100 * (x[1] - x[0]**2)**2)
def rest(x):
"""sleeps for a time and then returns sphere."""
sleep(np.random.randint(1, 3))
return sphere(x)
def member_example(member):
return sphere(member.parameters)
def main():
# make the scheduler first
with Scheduler({'nanny': True, 'interface': 'lo'}) as mpi_scheduler:
# create and run GA
ga = AsyncCSAGa(initial_state=np.array([0.4, 0.3, -0.25, 0.01]),
initial_sigma=np.ones(4)*5,
population_size=5,
scheduler=mpi_scheduler,
global_seed=96879,
table_size=200000,
max_table_step=1,
member_type=CSAMember,
save_filename=Path("test.csaga"),
save_every=1000)
ga.run(sphere, 300, take_member=False)
# load pop from file and continue
ga = AsyncCSAGa.from_file("test.csaga",
scheduler=mpi_scheduler,
global_seed=432,
save_filename="test.csaga")
ga.run(sphere, 50, take_member=False)
if __name__ == "__main__":
main()
| 28.634146
| 72
| 0.567291
|
bafd6c4cb8e21f40b7b2ebcdb7d01a4f5279c9a3
| 3,247
|
py
|
Python
|
app/recipe/tests/test_tags_api.py
|
Brainless18/recipe-app-api
|
27bc5f0832ec65b155bcc383cf2659d7637454a7
|
[
"MIT"
] | null | null | null |
app/recipe/tests/test_tags_api.py
|
Brainless18/recipe-app-api
|
27bc5f0832ec65b155bcc383cf2659d7637454a7
|
[
"MIT"
] | null | null | null |
app/recipe/tests/test_tags_api.py
|
Brainless18/recipe-app-api
|
27bc5f0832ec65b155bcc383cf2659d7637454a7
|
[
"MIT"
] | null | null | null |
from django.contrib.auth import get_user_model
from django.urls import reverse
from django.test import TestCase
from rest_framework import status
from rest_framework.test import APIClient
from core.models import Tag, Recipe
from recipe.serializers import TagSerializer
TAGS_URL = reverse('recipe:tag-list')
class PublicTagsApiTest(TestCase):
# Test publicly available api tags api
def setUp(self):
self.client = APIClient()
def test_login_required(self):
# Test that login is required for retrieving tags
response = self.client.get(TAGS_URL)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateTagsApiTests(TestCase):
# Test authorized user tags api
def setUp(self):
self.user = get_user_model().objects.create_user(
'test@test.com',
'test123',
)
self.client = APIClient()
self.client.force_authenticate(self.user)
def test_retrieve_tags(self):
# Test retrieving tags
Tag.objects.create(user=self.user, name="Vegan")
Tag.objects.create(user=self.user, name="Desert")
response = self.client.get(TAGS_URL)
tags = Tag.objects.all().order_by('-name')
serializer = TagSerializer(tags, many=True)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, serializer.data)
def test_tags_limited_to_user(self):
# Test that tags returned are for authenticated user
user2 = get_user_model().objects.create_user('other@other.com', 'test123')
Tag.objects.create(user=user2, name='Fruity')
tag = Tag.objects.create(user=self.user, name='Comfort Food')
response = self.client.get(TAGS_URL)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data), 1)
self.assertEqual(response.data[0]['name'], tag.name)
def test_create_tag(self):
# Test that tags created successfully
payload = {
'name': 'test tag'
}
self.client.post(TAGS_URL, payload)
exists = Tag.objects.filter(
user=self.user,
name=payload['name']
).exists()
self.assertTrue(exists)
def test_create_tag_invalid(self):
# Test that create tag with invalid payload
payload = {'name': ''}
response = self.client.post(TAGS_URL, payload)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_retrieve_tags_assigned_to_recipes(self):
# Test filtering tags by those assigned to recipes
tag1 = Tag.objects.create(user=self.user, name="Breakfast")
tag2 = Tag.objects.create(user=self.user, name="Launch")
recipe = Recipe.objects.create(
title="Coriander eggs on toast",
time_minutes=10,
price=5.00,
user=self.user
)
recipe.tags.add(tag1)
response = self.client.get(TAGS_URL, {'assigned_only': 1})
serializer1 = TagSerializer(tag1)
serializer2 = TagSerializer(tag2)
self.assertIn(serializer1.data, response.data)
self.assertNotIn(serializer2.data, response.data)
| 36.483146
| 82
| 0.664613
|
46745f0d4157148dd2d326213e9c8fe46d5d2120
| 30,398
|
py
|
Python
|
mkt/lookup/tests/test_views.py
|
oremj/zamboni
|
a751dc6d22f7af947da327b0a091cbab0a999f49
|
[
"BSD-3-Clause"
] | null | null | null |
mkt/lookup/tests/test_views.py
|
oremj/zamboni
|
a751dc6d22f7af947da327b0a091cbab0a999f49
|
[
"BSD-3-Clause"
] | null | null | null |
mkt/lookup/tests/test_views.py
|
oremj/zamboni
|
a751dc6d22f7af947da327b0a091cbab0a999f49
|
[
"BSD-3-Clause"
] | null | null | null |
from datetime import datetime, timedelta
from decimal import Decimal
import json
from babel import numbers
from pyquery import PyQuery as pq
from nose.exc import SkipTest
from nose.tools import eq_
import amo
from abuse.models import AbuseReport
from addons.cron import reindex_addons
from addons.models import Addon, AddonUser
from amo.helpers import urlparams
from amo.tests import addon_factory, app_factory, ESTestCase, TestCase
from amo.urlresolvers import reverse
from devhub.models import ActivityLog
from market.models import AddonPaymentData, AddonPremium, Price, Refund
from mkt.webapps.cron import update_weekly_downloads
from mkt.webapps.models import Installed, Webapp
from stats.models import Contribution, DownloadCount
from users.cron import reindex_users
from users.models import UserProfile
class TestAcctSummary(TestCase):
fixtures = ['base/users', 'base/addon_3615',
'webapps/337141-steamcube']
def setUp(self):
super(TestAcctSummary, self).setUp()
self.user = UserProfile.objects.get(username='31337') # steamcube
self.steamcube = Addon.objects.get(pk=337141)
self.otherapp = app_factory(app_slug='otherapp')
self.reg_user = UserProfile.objects.get(email='regular@mozilla.com')
self.summary_url = reverse('lookup.user_summary',
args=[self.user.pk])
assert self.client.login(username='support-staff@mozilla.com',
password='password')
def buy_stuff(self, contrib_type):
for i in range(3):
if i == 1:
curr = 'GBR'
else:
curr = 'USD'
amount = Decimal('2.00')
Contribution.objects.create(addon=self.steamcube,
type=contrib_type,
currency=curr,
amount=amount,
user_id=self.user.pk)
def summary(self, expected_status=200):
res = self.client.get(self.summary_url)
eq_(res.status_code, expected_status)
return res
def payment_data(self):
return {'full_name': 'Ed Peabody Jr.',
'business_name': 'Mr. Peabody',
'phone': '(1) 773-111-2222',
'address_one': '1111 W Leland Ave',
'address_two': 'Apt 1W',
'city': 'Chicago',
'post_code': '60640',
'country': 'USA',
'state': 'Illinois'}
def test_home_auth(self):
self.client.logout()
res = self.client.get(reverse('lookup.home'))
self.assertLoginRedirects(res, reverse('lookup.home'))
def test_summary_auth(self):
self.client.logout()
res = self.client.get(self.summary_url)
self.assertLoginRedirects(res, self.summary_url)
def test_home(self):
res = self.client.get(reverse('lookup.home'))
self.assertNoFormErrors(res)
eq_(res.status_code, 200)
def test_basic_summary(self):
res = self.summary()
eq_(res.context['account'].pk, self.user.pk)
def test_app_counts(self):
self.buy_stuff(amo.CONTRIB_PURCHASE)
sm = self.summary().context['app_summary']
eq_(sm['app_total'], 3)
eq_(sm['app_amount']['USD'], 4.0)
eq_(sm['app_amount']['GBR'], 2.0)
def test_inapp_counts(self):
self.buy_stuff(amo.CONTRIB_INAPP)
sm = self.summary().context['app_summary']
eq_(sm['inapp_total'], 3)
eq_(sm['inapp_amount']['USD'], 4.0)
eq_(sm['inapp_amount']['GBR'], 2.0)
def test_requested_refunds(self):
contrib = Contribution.objects.create(type=amo.CONTRIB_PURCHASE,
user_id=self.user.pk,
addon=self.steamcube,
currency='USD',
amount='0.99')
Refund.objects.create(contribution=contrib)
res = self.summary()
eq_(res.context['refund_summary']['requested'], 1)
eq_(res.context['refund_summary']['approved'], 0)
def test_approved_refunds(self):
contrib = Contribution.objects.create(type=amo.CONTRIB_PURCHASE,
user_id=self.user.pk,
addon=self.steamcube,
currency='USD',
amount='0.99')
Refund.objects.create(contribution=contrib,
status=amo.REFUND_APPROVED_INSTANT)
res = self.summary()
eq_(res.context['refund_summary']['requested'], 1)
eq_(res.context['refund_summary']['approved'], 1)
def test_app_created(self):
res = self.summary()
# Number of apps/add-ons belonging to this user.
eq_(len(res.context['user_addons']), 1)
def test_paypal_ids(self):
self.user.addons.update(paypal_id='somedev@app.com')
res = self.summary()
eq_(list(res.context['paypal_ids']), [u'somedev@app.com'])
def test_no_paypal(self):
self.user.addons.update(paypal_id='')
res = self.summary()
eq_(list(res.context['paypal_ids']), [])
def test_payment_data(self):
payment_data = self.payment_data()
AddonPaymentData.objects.create(addon=self.steamcube,
**payment_data)
res = self.summary()
pd = res.context['payment_data'][0]
for key, value in payment_data.iteritems():
eq_(pd[key], value)
def test_no_payment_data(self):
res = self.summary()
eq_(len(res.context['payment_data']), 0)
def test_no_duplicate_payment_data(self):
role = AddonUser.objects.create(user=self.user,
addon=self.otherapp,
role=amo.AUTHOR_ROLE_DEV)
self.otherapp.addonuser_set.add(role)
payment_data = self.payment_data()
AddonPaymentData.objects.create(addon=self.steamcube,
**payment_data)
AddonPaymentData.objects.create(addon=self.otherapp,
**payment_data)
res = self.summary()
eq_(len(res.context['payment_data']), 1)
pd = res.context['payment_data'][0]
for key, value in payment_data.iteritems():
eq_(pd[key], value)
class SearchTestMixin(object):
def search(self, expect_results=True, **data):
res = self.client.get(self.url, data)
data = json.loads(res.content)
if expect_results:
assert len(data['results']), 'should be more than 0 results'
return data
def test_auth_required(self):
self.client.logout()
res = self.client.get(self.url)
self.assertLoginRedirects(res, self.url)
def test_no_results(self):
data = self.search(q='__garbage__', expect_results=False)
eq_(data['results'], [])
class TestAcctSearch(ESTestCase, SearchTestMixin):
fixtures = ['base/users']
@classmethod
def setUpClass(cls):
super(TestAcctSearch, cls).setUpClass()
reindex_users()
def setUp(self):
super(TestAcctSearch, self).setUp()
self.url = reverse('lookup.user_search')
self.user = UserProfile.objects.get(username='clouserw')
assert self.client.login(username='support-staff@mozilla.com',
password='password')
def verify_result(self, data):
eq_(data['results'][0]['name'], self.user.username)
eq_(data['results'][0]['display_name'], self.user.display_name)
eq_(data['results'][0]['email'], self.user.email)
eq_(data['results'][0]['id'], self.user.pk)
eq_(data['results'][0]['url'], reverse('lookup.user_summary',
args=[self.user.pk]))
def test_by_username(self):
self.user.update(username='newusername')
self.refresh()
data = self.search(q='newus')
self.verify_result(data)
def test_by_username_with_dashes(self):
self.user.update(username='kr-raj')
self.refresh()
data = self.search(q='kr-raj')
self.verify_result(data)
def test_by_display_name(self):
self.user.update(display_name='Kumar McMillan')
self.refresh()
data = self.search(q='mcmill')
self.verify_result(data)
def test_by_id(self):
data = self.search(q=self.user.pk)
self.verify_result(data)
def test_by_email(self):
self.user.update(email='fonzi@happydays.com')
self.refresh()
data = self.search(q='fonzih')
self.verify_result(data)
class TestTransactionSearch(TestCase):
fixtures = ['base/users']
def setUp(self):
self.tx_id = 45
self.url = reverse('lookup.transaction_search')
self.client.login(username='support-staff@mozilla.com',
password='password')
def test_redirect(self):
r = self.client.get(self.url, {'q': self.tx_id})
self.assertRedirects(r, reverse('lookup.transaction_summary',
args=[self.tx_id]))
def test_no_perm(self):
self.client.login(username='regular@mozilla.com',
password='password')
r = self.client.get(self.url, {'q': self.tx_id})
eq_(r.status_code, 403)
class TestTransactionSummary(TestCase):
fixtures = ['base/users']
def setUp(self):
self.tx_id = 45
self.url = reverse('lookup.transaction_summary', args=[self.tx_id])
self.client.login(username='support-staff@mozilla.com',
password='password')
def test_200(self):
r = self.client.get(self.url)
eq_(r.status_code, 200)
def test_no_perm(self):
self.client.login(username='regular@mozilla.com',
password='password')
r = self.client.get(self.url)
eq_(r.status_code, 403)
class TestAppSearch(ESTestCase, SearchTestMixin):
fixtures = ['base/users', 'webapps/337141-steamcube',
'base/addon_3615']
@classmethod
def setUpClass(cls):
super(TestAppSearch, cls).setUpClass()
reindex_addons()
def setUp(self):
super(TestAppSearch, self).setUp()
self.url = reverse('lookup.app_search')
self.app = Addon.objects.get(pk=337141)
assert self.client.login(username='support-staff@mozilla.com',
password='password')
def verify_result(self, data):
eq_(data['results'][0]['name'], self.app.name.localized_string)
eq_(data['results'][0]['id'], self.app.pk)
eq_(data['results'][0]['url'], reverse('lookup.app_summary',
args=[self.app.pk]))
def test_by_name_part(self):
self.app.name = 'This is Steamcube'
self.app.save()
self.refresh()
data = self.search(q='steamcube')
self.verify_result(data)
def test_by_name_unreviewed(self):
# Just the same as the above test, but with an unreviewed app.
self.app.status = amo.STATUS_UNREVIEWED
self.test_by_name_part()
def test_multiword(self):
self.app.name = 'Firefox Marketplace'
self.app.save()
self.refresh()
data = self.search(q='Firefox Marketplace')
self.verify_result(data)
def test_by_stem_name(self):
self.app.name = 'Instigation'
self.app.save()
self.refresh()
data = self.search(q='instigate')
self.verify_result(data)
def test_by_guid(self):
self.app = Addon.objects.get(pk=3615)
assert self.app.guid, 'Expected this addon to have a guid'
self.app = Addon.objects.get(guid=self.app.guid)
data = self.search(q=self.app.guid, type=amo.ADDON_EXTENSION)
self.verify_result(data)
def test_by_random_guid(self):
self.app = Addon.objects.get(pk=3615)
self.app.update(guid='__bonanza__')
data = self.search(q=self.app.guid, type=amo.ADDON_EXTENSION)
self.verify_result(data)
def test_by_id(self):
data = self.search(q=self.app.pk)
self.verify_result(data)
class AppSummaryTest(TestCase):
fixtures = ['base/users', 'webapps/337141-steamcube',
'base/addon_3615', 'market/prices']
def _setUp(self):
self.app = Addon.objects.get(pk=337141)
self.url = reverse('lookup.app_summary',
args=[self.app.pk])
self.user = UserProfile.objects.get(username='31337')
assert self.client.login(username='support-staff@mozilla.com',
password='password')
def summary(self, expected_status=200):
res = self.client.get(self.url)
eq_(res.status_code, expected_status)
return res
class TestAppSummary(AppSummaryTest):
def setUp(self):
super(TestAppSummary, self).setUp()
self._setUp()
def test_search_matches_type(self):
res = self.summary()
eq_(pq(res.content)('#app-search-form select option[selected]').val(),
str(amo.ADDON_WEBAPP))
def test_authors(self):
user = UserProfile.objects.get(username='admin')
role = AddonUser.objects.create(user=user,
addon=self.app,
role=amo.AUTHOR_ROLE_DEV)
self.app.addonuser_set.add(role)
res = self.summary()
eq_(res.context['authors'][0].display_name,
user.display_name)
def test_visible_authors(self):
for role in (amo.AUTHOR_ROLE_DEV,
amo.AUTHOR_ROLE_OWNER,
amo.AUTHOR_ROLE_VIEWER,
amo.AUTHOR_ROLE_SUPPORT):
user = UserProfile.objects.create(username=role)
role = AddonUser.objects.create(user=user,
addon=self.app,
role=role)
self.app.addonuser_set.add(role)
res = self.summary()
eq_(sorted([u.username for u in res.context['authors']]),
[str(amo.AUTHOR_ROLE_DEV), str(amo.AUTHOR_ROLE_OWNER)])
def test_details(self):
res = self.summary()
eq_(res.context['app'].manifest_url, self.app.manifest_url)
eq_(res.context['app'].premium_type, amo.ADDON_FREE)
eq_(res.context['price'], None)
def test_price(self):
price = Price.objects.get(pk=1)
AddonPremium.objects.create(addon=self.app,
price=price)
res = self.summary()
eq_(res.context['price'], price)
def test_abuse_reports(self):
for i in range(2):
AbuseReport.objects.create(addon=self.app,
ip_address='10.0.0.1',
message='spam and porn everywhere')
res = self.summary()
eq_(res.context['abuse_reports'], 2)
def test_permissions(self):
raise SkipTest('we do not support permissions yet')
def test_version_history_non_packaged(self):
res = self.summary()
eq_(pq(res.content)('section.version-history').length, 0)
def test_version_history_packaged(self):
self.app.update(is_packaged=True)
res = self.summary()
eq_(pq(res.content)('section.version-history').length, 1)
class DownloadSummaryTest(AppSummaryTest):
def setUp(self):
super(DownloadSummaryTest, self).setUp()
self._setUp()
self.users = [UserProfile.objects.get(username='regularuser'),
UserProfile.objects.get(username='admin')]
class TestAppDownloadSummary(DownloadSummaryTest, TestCase):
def setUp(self):
super(TestAppDownloadSummary, self).setUp()
self.addon = Addon.objects.get(pk=3615)
def test_7_days(self):
self.app.update(weekly_downloads=0)
for user in self.users:
Installed.objects.create(addon=self.app, user=user)
update_weekly_downloads()
res = self.summary()
eq_(res.context['downloads']['last_7_days'], 2)
def test_ignore_older_than_7_days(self):
_8_days_ago = datetime.now() - timedelta(days=8)
self.app.update(weekly_downloads=0)
for user in self.users:
c = Installed.objects.create(addon=self.app, user=user)
c.update(created=_8_days_ago)
update_weekly_downloads()
res = self.summary()
eq_(res.context['downloads']['last_7_days'], 0)
def test_24_hours(self):
for user in self.users:
Installed.objects.create(addon=self.app, user=user)
res = self.summary()
eq_(res.context['downloads']['last_24_hours'], 2)
def test_ignore_older_than_24_hours(self):
_25_hr_ago = datetime.now() - timedelta(hours=25)
for user in self.users:
c = Installed.objects.create(addon=self.app, user=user)
c.update(created=_25_hr_ago)
res = self.summary()
eq_(res.context['downloads']['last_24_hours'], 0)
def test_alltime_dl(self):
for user in self.users:
Installed.objects.create(addon=self.app, user=user)
# Downloads for some other app that shouldn't be counted.
for user in self.users:
Installed.objects.create(addon=self.addon, user=user)
res = self.summary()
eq_(res.context['downloads']['alltime'], 2)
class TestAppSummaryPurchases(AppSummaryTest):
def setUp(self):
super(TestAppSummaryPurchases, self).setUp()
self._setUp()
def assert_totals(self, data):
eq_(data['total'], 6)
six_bucks = numbers.format_currency(6, 'USD',
locale=numbers.LC_NUMERIC)
three_euro = numbers.format_currency(3, 'EUR',
locale=numbers.LC_NUMERIC)
eq_(set(data['amounts']), set([six_bucks, three_euro]))
eq_(len(data['amounts']), 2)
def assert_empty(self, data):
eq_(data['total'], 0)
eq_(sorted(data['amounts']), [])
def purchase(self, created=None, typ=amo.CONTRIB_PURCHASE):
for curr, amount in (('USD', '2.00'), ('EUR', '1.00')):
for i in range(3):
c = Contribution.objects.create(addon=self.app,
user=self.user,
amount=Decimal(amount),
currency=curr,
type=typ)
if created:
c.update(created=created)
def test_24_hr(self):
self.purchase()
res = self.summary()
self.assert_totals(res.context['purchases']['last_24_hours'])
def test_ignore_older_than_24_hr(self):
self.purchase(created=datetime.now() - timedelta(days=1,
minutes=1))
res = self.summary()
self.assert_empty(res.context['purchases']['last_24_hours'])
def test_7_days(self):
self.purchase(created=datetime.now() - timedelta(days=6,
minutes=55))
res = self.summary()
self.assert_totals(res.context['purchases']['last_7_days'])
def test_ignore_older_than_7_days(self):
self.purchase(created=datetime.now() - timedelta(days=7,
minutes=1))
res = self.summary()
self.assert_empty(res.context['purchases']['last_7_days'])
def test_alltime(self):
self.purchase(created=datetime.now() - timedelta(days=31))
res = self.summary()
self.assert_totals(res.context['purchases']['alltime'])
def test_ignore_non_purchases(self):
for typ in [amo.CONTRIB_REFUND,
amo.CONTRIB_CHARGEBACK,
amo.CONTRIB_PENDING,
amo.CONTRIB_INAPP_PENDING]:
self.purchase(typ=typ)
res = self.summary()
self.assert_empty(res.context['purchases']['alltime'])
def test_pay_methods(self):
for paykey in ('AP-1234', # indicates PayPal
'AP-1235',
None): # indicates other
Contribution.objects.create(addon=self.app,
user=self.user,
amount=Decimal('0.99'),
currency='USD',
paykey=paykey,
type=amo.CONTRIB_PURCHASE)
res = self.summary()
eq_(sorted(res.context['payment_methods']),
[u'33.3% of purchases via Other',
u'66.7% of purchases via PayPal'])
def test_inapp_pay_methods(self):
Contribution.objects.create(addon=self.app,
user=self.user,
amount=Decimal('0.99'),
currency='USD',
paykey='AP-1235',
type=amo.CONTRIB_INAPP)
res = self.summary()
eq_(res.context['payment_methods'],
[u'100.0% of purchases via PayPal'])
class TestAppSummaryRefunds(AppSummaryTest):
def setUp(self):
super(TestAppSummaryRefunds, self).setUp()
self._setUp()
self.user = UserProfile.objects.get(username='regularuser')
self.contrib1 = self.purchase()
self.contrib2 = self.purchase()
self.contrib3 = self.purchase()
self.contrib4 = self.purchase()
def purchase(self):
return Contribution.objects.create(addon=self.app,
user=self.user,
amount=Decimal('0.99'),
currency='USD',
paykey='AP-1235',
type=amo.CONTRIB_PURCHASE)
def refund(self, refunds):
for contrib, status in refunds:
Refund.objects.create(contribution=contrib,
status=status)
def test_requested(self):
self.refund(((self.contrib1, amo.REFUND_APPROVED),
(self.contrib2, amo.REFUND_APPROVED),
(self.contrib3, amo.REFUND_DECLINED),
(self.contrib4, amo.REFUND_DECLINED)))
res = self.summary()
eq_(res.context['refunds']['requested'], 2)
eq_(res.context['refunds']['percent_of_purchases'], '50.0%')
def test_no_refunds(self):
res = self.summary()
eq_(res.context['refunds']['requested'], 0)
eq_(res.context['refunds']['percent_of_purchases'], '0.0%')
eq_(res.context['refunds']['auto-approved'], 0)
eq_(res.context['refunds']['approved'], 0)
eq_(res.context['refunds']['rejected'], 0)
def test_auto_approved(self):
self.refund(((self.contrib1, amo.REFUND_APPROVED),
(self.contrib2, amo.REFUND_APPROVED_INSTANT)))
res = self.summary()
eq_(res.context['refunds']['auto-approved'], 1)
def test_approved(self):
self.refund(((self.contrib1, amo.REFUND_APPROVED),
(self.contrib2, amo.REFUND_DECLINED)))
res = self.summary()
eq_(res.context['refunds']['approved'], 1)
def test_rejected(self):
self.refund(((self.contrib1, amo.REFUND_APPROVED),
(self.contrib2, amo.REFUND_DECLINED),
(self.contrib3, amo.REFUND_FAILED)))
res = self.summary()
eq_(res.context['refunds']['rejected'], 2)
class TestAddonDownloadSummary(DownloadSummaryTest, TestCase):
def setUp(self):
super(TestAddonDownloadSummary, self).setUp()
self.app = Addon.objects.get(pk=3615)
self.url = reverse('lookup.app_summary',
args=[self.app.pk])
def test_7_days(self):
for user in self.users:
DownloadCount.objects.create(addon=self.app, count=2,
date=datetime.now().date())
res = self.summary()
eq_(res.context['downloads']['last_7_days'], 4)
def test_ignore_older_than_7_days(self):
_8_days_ago = datetime.now() - timedelta(days=8)
for user in self.users:
c = DownloadCount.objects.create(addon=self.app, count=2,
date=datetime.now().date())
c.date = _8_days_ago.date()
c.save()
res = self.summary()
eq_(res.context['downloads']['last_7_days'], 0)
def test_24_hours(self):
for user in self.users:
DownloadCount.objects.create(addon=self.app, count=2,
date=datetime.now().date())
res = self.summary()
eq_(res.context['downloads']['last_24_hours'], 4)
def test_ignore_older_than_24_hours(self):
yesterday = datetime.now().date() - timedelta(days=1)
for user in self.users:
c = DownloadCount.objects.create(addon=self.app, count=2,
date=datetime.now().date())
c.date = yesterday
c.save()
res = self.summary()
eq_(res.context['downloads']['last_24_hours'], 0)
def test_alltime_dl(self):
for i in range(2):
DownloadCount.objects.create(addon=self.app, count=2,
date=datetime.now().date())
# Downloads for some other addon that shouldn't be counted.
addon = addon_factory()
for user in self.users:
DownloadCount.objects.create(addon=addon, count=2,
date=datetime.now().date())
res = self.summary()
eq_(res.context['downloads']['alltime'], 4)
def test_zero_alltime_dl(self):
# Downloads for some other addon that shouldn't be counted.
addon = addon_factory()
for user in self.users:
DownloadCount.objects.create(addon=addon, count=2,
date=datetime.now().date())
res = self.summary()
eq_(res.context['downloads']['alltime'], 0)
class TestPurchases(amo.tests.TestCase):
fixtures = ['base/users', 'webapps/337141-steamcube']
def setUp(self):
self.app = Webapp.objects.get(pk=337141)
self.reviewer = UserProfile.objects.get(username='admin')
self.user = UserProfile.objects.get(username='regularuser')
self.url = reverse('lookup.user_purchases', args=[self.user.pk])
def test_not_allowed(self):
self.client.logout()
self.assertLoginRequired(self.client.get(self.url))
def test_not_even_mine(self):
self.client.login(username=self.user.email, password='password')
eq_(self.client.get(self.url).status_code, 403)
def test_access(self):
self.client.login(username=self.reviewer.email, password='password')
res = self.client.get(self.url)
eq_(res.status_code, 200)
eq_(len(pq(res.content)('p.no-results')), 1)
def test_purchase_shows_up(self):
Contribution.objects.create(user=self.user, addon=self.app,
amount=1, type=amo.CONTRIB_PURCHASE)
self.client.login(username=self.reviewer.email, password='password')
res = self.client.get(self.url)
eq_(res.status_code, 200)
doc = pq(res.content)
eq_(doc('ol.listing a.mkt-tile').attr('href'),
urlparams(self.app.get_detail_url(), src=''))
def test_no_support_link(self):
for type_ in [amo.CONTRIB_PURCHASE, amo.CONTRIB_INAPP]:
Contribution.objects.create(user=self.user, addon=self.app,
amount=1, type=type_)
self.client.login(username=self.reviewer.email, password='password')
res = self.client.get(self.url)
eq_(res.status_code, 200)
doc = pq(res.content)
eq_(len(doc('.item a.request-support')), 0)
class TestActivity(amo.tests.TestCase):
fixtures = ['base/users', 'webapps/337141-steamcube']
def setUp(self):
self.app = Webapp.objects.get(pk=337141)
self.reviewer = UserProfile.objects.get(username='admin')
self.user = UserProfile.objects.get(username='regularuser')
self.url = reverse('lookup.user_activity', args=[self.user.pk])
def test_not_allowed(self):
self.client.logout()
self.assertLoginRequired(self.client.get(self.url))
def test_not_even_mine(self):
self.client.login(username=self.user.email, password='password')
eq_(self.client.get(self.url).status_code, 403)
def test_access(self):
self.client.login(username=self.reviewer.email, password='password')
res = self.client.get(self.url)
eq_(res.status_code, 200)
eq_(len(pq(res.content)('.simple-log div')), 0)
def test_log(self):
self.client.login(username=self.reviewer.email, password='password')
self.client.get(self.url)
log_item = ActivityLog.objects.get(action=amo.LOG.ADMIN_VIEWED_LOG.id)
eq_(len(log_item.arguments), 1)
eq_(log_item.arguments[0].id, self.reviewer.id)
eq_(log_item.user, self.user)
def test_display(self):
amo.log(amo.LOG.PURCHASE_ADDON, self.app, user=self.user)
amo.log(amo.LOG.ADMIN_USER_EDITED, self.user, 'spite', user=self.user)
self.client.login(username=self.reviewer.email, password='password')
res = self.client.get(self.url)
eq_(res.status_code, 200)
doc = pq(res.content)
assert 'purchased' in doc('li.item').eq(0).text()
assert 'edited' in doc('li.item').eq(1).text()
| 38.140527
| 78
| 0.575137
|
2c18b6179a12e598a81f2c899f8a359be5edbac5
| 6,600
|
py
|
Python
|
Snake/Snake-master/snake/base/snake.py
|
shubham-11700069/Python
|
9ea7dcbe6d9050453357a0c56815ce2371f9364e
|
[
"Apache-2.0"
] | null | null | null |
Snake/Snake-master/snake/base/snake.py
|
shubham-11700069/Python
|
9ea7dcbe6d9050453357a0c56815ce2371f9364e
|
[
"Apache-2.0"
] | null | null | null |
Snake/Snake-master/snake/base/snake.py
|
shubham-11700069/Python
|
9ea7dcbe6d9050453357a0c56815ce2371f9364e
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# pylint: disable=C0103,C0111,W0201,W0212
"""Definition of class Snake."""
import random
from collections import deque
import sys
sys.path.insert(0,'D:/Python/Snake/Snake-master/snake/')
from base.direc import Direc
from base.point import PointType
from base.pos import Pos
class Snake:
"""Snake of the game."""
def __init__(self, game_map, init_direc=None, init_bodies=None, init_types=None):
"""Initialize a Snake object.
Args:
game_map (base.map.Map): The map that the snake moves on.
init_direc (base.direc.Direc): Initial direction.
init_bodies (list of base.pos.Pos): Initial snake bodies positions.
init_types (list of base.point.PointType): Types of each position in init_bodies.
"""
self._map = game_map
self._init_direc = init_direc
self._init_bodies = init_bodies
self._init_types = init_types
self.reset(False)
def reset(self, reset_map=True):
rand_init = False
if self._init_direc is None: # Randomly initialize
rand_init = True
head_row = random.randrange(2, self._map.num_rows - 2)
head_col = random.randrange(2, self._map.num_cols - 2)
head = Pos(head_row, head_col)
self._init_direc = random.choice([Direc.LEFT, Direc.UP, Direc.RIGHT, Direc.DOWN])
self._init_bodies = [head, head.adj(Direc.opposite(self._init_direc))]
self._init_types = []
if self._init_direc == Direc.LEFT:
self._init_types.append(PointType.HEAD_L)
elif self._init_direc == Direc.UP:
self._init_types.append(PointType.HEAD_U)
elif self._init_direc == Direc.RIGHT:
self._init_types.append(PointType.HEAD_R)
elif self._init_direc == Direc.DOWN:
self._init_types.append(PointType.HEAD_D)
if self._init_direc == Direc.LEFT or self._init_direc == Direc.RIGHT:
self._init_types.append(PointType.BODY_HOR)
elif self._init_direc == Direc.UP or self._init_direc == Direc.DOWN:
self._init_types.append(PointType.BODY_VER)
self._steps = 0
self._dead = False
self._direc = self._init_direc
self._direc_next = Direc.NONE
self._bodies = deque(self._init_bodies)
if reset_map:
self._map.reset()
for i, pos in enumerate(self._init_bodies):
self._map.point(pos).type = self._init_types[i]
if rand_init:
self._init_direc = self._init_bodies = self._init_types = None
def copy(self):
m_copy = self._map.copy()
s_copy = Snake(m_copy, Direc.NONE, [], [])
s_copy._steps = self._steps
s_copy._dead = self._dead
s_copy._direc = self._direc
s_copy._direc_next = self._direc_next
s_copy._bodies = deque(self._bodies)
return s_copy, m_copy
@property
def map(self):
return self._map
@property
def steps(self):
return self._steps
@property
def dead(self):
return self._dead
@dead.setter
def dead(self, val):
self._dead = val
@property
def direc(self):
return self._direc
@property
def direc_next(self):
return self._direc_next
@direc_next.setter
def direc_next(self, val):
self._direc_next = val
@property
def bodies(self):
return self._bodies
def len(self):
return len(self._bodies)
def head(self):
if not self._bodies:
return None
return self._bodies[0]
def tail(self):
if not self._bodies:
return None
return self._bodies[-1]
def move_path(self, path):
for p in path:
self.move(p)
def move(self, new_direc=None):
if new_direc is not None:
self._direc_next = new_direc
if self._dead or \
self._direc_next == Direc.NONE or \
self._map.is_full() or \
self._direc_next == Direc.opposite(self._direc):
return
old_head_type, new_head_type = self._new_types()
self._map.point(self.head()).type = old_head_type
new_head = self.head().adj(self._direc_next)
self._bodies.appendleft(new_head)
if not self._map.is_safe(new_head):
self._dead = True
if self._map.point(new_head).type == PointType.FOOD:
self._map.rm_food()
else:
self._rm_tail()
self._map.point(new_head).type = new_head_type
self._direc = self._direc_next
self._steps += 1
def _rm_tail(self):
self._map.point(self.tail()).type = PointType.EMPTY
self._bodies.pop()
def _new_types(self):
old_head_type, new_head_type = None, None
# new_head_type
if self._direc_next == Direc.LEFT:
new_head_type = PointType.HEAD_L
elif self._direc_next == Direc.UP:
new_head_type = PointType.HEAD_U
elif self._direc_next == Direc.RIGHT:
new_head_type = PointType.HEAD_R
elif self._direc_next == Direc.DOWN:
new_head_type = PointType.HEAD_D
# old_head_type
if (self._direc == Direc.LEFT and self._direc_next == Direc.LEFT) or \
(self._direc == Direc.RIGHT and self._direc_next == Direc.RIGHT):
old_head_type = PointType.BODY_HOR
elif (self._direc == Direc.UP and self._direc_next == Direc.UP) or \
(self._direc == Direc.DOWN and self._direc_next == Direc.DOWN):
old_head_type = PointType.BODY_VER
elif (self._direc == Direc.RIGHT and self._direc_next == Direc.UP) or \
(self._direc == Direc.DOWN and self._direc_next == Direc.LEFT):
old_head_type = PointType.BODY_LU
elif (self._direc == Direc.LEFT and self._direc_next == Direc.UP) or \
(self._direc == Direc.DOWN and self._direc_next == Direc.RIGHT):
old_head_type = PointType.BODY_UR
elif (self._direc == Direc.LEFT and self._direc_next == Direc.DOWN) or \
(self._direc == Direc.UP and self._direc_next == Direc.RIGHT):
old_head_type = PointType.BODY_RD
elif (self._direc == Direc.RIGHT and self._direc_next == Direc.DOWN) or \
(self._direc == Direc.UP and self._direc_next == Direc.LEFT):
old_head_type = PointType.BODY_DL
return old_head_type, new_head_type
| 34.020619
| 93
| 0.610758
|
eb17f34c5f12ab3dc4f2133b9485f9e17a44bf7e
| 105
|
py
|
Python
|
tests/manage.py
|
dwatkinsweb/django-skin
|
925db5313f564e2edca0ea2419b5d7b752a7a518
|
[
"MIT"
] | 3
|
2015-10-07T17:59:01.000Z
|
2017-11-16T11:19:13.000Z
|
tests/manage.py
|
dwatkinsweb/django-skin
|
925db5313f564e2edca0ea2419b5d7b752a7a518
|
[
"MIT"
] | null | null | null |
tests/manage.py
|
dwatkinsweb/django-skin
|
925db5313f564e2edca0ea2419b5d7b752a7a518
|
[
"MIT"
] | 2
|
2017-11-15T01:25:56.000Z
|
2022-01-06T23:39:32.000Z
|
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| 26.25
| 42
| 0.790476
|
841ce1871742faba322fb960dfdf941079e0b520
| 23,410
|
py
|
Python
|
wpoium/wda/__init__.py
|
elihe999/LAN-autotest-webium
|
355d3abfe3104690f8f6c574e64fd9927748c6e7
|
[
"Apache-2.0"
] | null | null | null |
wpoium/wda/__init__.py
|
elihe999/LAN-autotest-webium
|
355d3abfe3104690f8f6c574e64fd9927748c6e7
|
[
"Apache-2.0"
] | null | null | null |
wpoium/wda/__init__.py
|
elihe999/LAN-autotest-webium
|
355d3abfe3104690f8f6c574e64fd9927748c6e7
|
[
"Apache-2.0"
] | null | null | null |
import time
from wpoium.common import logging
from wpoium.settings import Setting
from wpoium.processing import processing, screenshots_name
from wpoium.common.assert_des import insert_assert
LOCATOR_LIST = [
"id",
"name",
"text",
"nameContains",
"label",
"xpath",
"labelContains",
"className",
"predicate",
"classChain"
]
class Page(object):
def __init__(self, dr):
self.driver = dr
def native_resolution(self):
"""
获取屏幕原始分辨率
"""
multiple = self.driver.scale
w, h = self.driver.window_size()
return multiple * w, multiple * h
@staticmethod
def wait(sleep=2):
"""
休眠时间
"""
time.sleep(sleep)
def close(self):
"""
关闭App
"""
self.driver.close()
def click(self, x: float=None, y: float=None, text: str=None, screenshots=Setting.click_screenshots):
"""
点击坐标
Args:
x(float): x坐标
y(float): y坐标
text(str): 文本
screenshots(bool): 当screenshots等于True, 会先截图再点击坐标;默认关闭
"""
if not x and not y and not text:
raise ValueError
(x, y) = self.get_position(text=text) if text else (x, y)
self.screenshots(x, y, describe="点击坐标, {},{}".format(x, y)) if screenshots else \
(print("\n"), logging.info(msg=" 点击 ==> " + "点击坐标{},{}".format(x, y)))
self.driver.click(x, y)
def get_position(self, text=None, element=None):
"""
获取元素或文本坐标
Args:
text(str): 文案
element(object): atx原生的元素对象
"""
w, h = self.driver.window_size()
if text is not None:
rect = self.driver(name=text).bounds
elif element is not None:
rect = element.bounds
else:
raise NameError
x = rect.x + rect.width / 2
y = rect.y + rect.height / 2
return x / w, y / h
def swipe(self, fx: float, fy: float, tx: float, ty: float, duration=0, screenshots=True):
"""
滑动
Args:
fx(float): 起始横坐标
fy(float): 起始纵坐标
tx(float): 终点横坐标
ty(float): 终点纵坐标
duration(float): 滑动过程的时间 (seconds)
screenshots(bool): 滑动后截图开关
"""
self.driver.swipe(fx, fy, tx, ty, duration=duration)
if screenshots is True:
time.sleep(0.5)
self.screenshots()
def swipe_left(self, fx=0.3, fy=0.5, tx=0.7, ty=0.5, times=1, duration=0, screenshots=True):
"""
滑向左边
Args:
fx(float): 起始横坐标
fy(float): 起始纵坐标
tx(float): 终点横坐标
ty(float): 终点纵坐标
times(int): 滑动的次数
duration(float): 滑动过程的时间 (seconds)
screenshots(bool): 滑动后截图开关
"""
for i in range(times):
self.swipe(fx, fy, tx, ty, duration=duration, screenshots=screenshots)
def swipe_right(self, fx=0.7, fy=0.5, tx=0.3, ty=0.5, times=1, duration=0, screenshots=True):
"""
滑向右边
Args:
fx(float): 起始横坐标
fy(float): 起始纵坐标
tx(float): 终点横坐标
ty(float): 终点纵坐标
times(int): 滑动的次数
duration(float): 滑动过程的时间 (seconds)
screenshots(bool): 滑动后截图开关
"""
for i in range(times):
self.swipe(fx, fy, tx, ty, duration=duration, screenshots=screenshots)
def swipe_up(self, fx=0.5, fy=0.5, tx=0.5, ty=0.8, times=1, duration=0, screenshots=True):
"""
滑向上边
Args:
fx(float): 起始横坐标
fy(float): 起始纵坐标
tx(float): 终点横坐标
ty(float): 终点纵坐标
times(int): 滑动的次数
duration(float): 滑动过程的时间 (seconds)
screenshots(bool): 滑动后截图开关
"""
for i in range(times):
self.swipe(fx, fy, tx, ty, duration=duration, screenshots=screenshots)
def swipe_down(self, fx=0.5, fy=0.5, tx=0.5, ty=0.2, times=1, duration=0, screenshots=True):
"""
滑向下边
Args:
fx(float): 起始横坐标
fy(float): 起始纵坐标
tx(float): 终点横坐标
ty(float): 终点纵坐标
times(int): 滑动的次数
duration(float): 滑动过程的时间 (seconds)
screenshots(bool): 滑动后截图开关
"""
for i in range(times):
self.swipe(fx, fy, tx, ty, duration=duration, screenshots=screenshots)
def swipe_search(self, text, direction="down"):
"""
文本搜索(不基于元素对象)
Args:
text(str): 搜索的内容
direction(str): "down" 或 "up"
"""
for i in range(20):
if self.driver(text=text).exists and self.driver(text=text).displayed:
break
else:
if direction is "down":
self.swipe_down()
elif direction is "up":
self.swipe_up()
else:
raise NameError
else:
raise TimeoutError("Timeout, element not found")
def screenshots(self, w=None, h=None, describe=None):
"""
截图
"""
if w is not None and h is not None:
if float(w) < 1 and float(h) < 1:
multiple = self.driver.scale
w, h = multiple * w, multiple * h
screenshots_dir = screenshots_name(describe)
self.driver.screenshot().save(screenshots_dir)
processing(screenshots_dir, w, h)
def who_exists(self, element=None, text=None):
"""
判断不同页面的多个元素或文本,看哪一个先出现,判断页面的状态
Args:
element(list): 元素列表,不同页面的元素对象
text(list): 文本列表,不同页面的文本
Return:
element_child(object): 返回当前页面存在的元素
text_child(text): 返回当前页面存在的文本
"""
for i in range(10):
if element is not None:
if type(element) is list:
for element_child in element:
if element_child.exists() is True:
return element_child
else:
raise TypeError("The element must be a list")
elif text is not None:
if type(text) is list:
for text_child in text:
if self.driver(nama=text_child).exists is True:
return text_child
else:
raise TypeError("The text must be a list")
else:
raise ValueError("Must pass parameter")
time.sleep(1)
else:
raise TypeError("The text or element is not exists")
def alert(self, click=None, timeout=5) -> bool:
for i in range(timeout):
if "error" not in self.driver.alert.buttons():
_list = self.driver.alert.buttons()
text = self.driver.alert.text
logging.info(msg="弹窗,提示⚠{text},选项按钮{button}".format(text=text, button=_list))
if click == "first":
position = self.get_position(text=_list[0])
self.screenshots(position[0], position[1])
logging.info(msg="👆 ==> {}".format(_list[0]))
self.driver.alert.accept()
elif click == "second":
position = self.get_position(text=_list[1])
self.screenshots(position[0], position[1])
logging.info(msg="👆 ==> {}".format(_list[1]))
self.driver.alert.dismiss()
else:
position = self.get_position(text=click)
self.screenshots(position[0], position[1])
logging.info(msg="👆 ==> {}".format(click))
self.driver.alert.click(click)
return True
else:
time.sleep(1)
continue
else:
return False
def assert_text_exists(self, text: str, describe, sleep=0, timeout=10):
"""
Asserts that the text exists on the current page
Args:
sleep(int): sleep time
text(str): text
describe(str): Assertion description information
timeout(int): Maximum waiting time
"""
time.sleep(sleep)
logging.info("预期结果: " + describe + " 文案存在")
for i in range(timeout):
text_exists = self.driver(text=text).exists
if text_exists is True:
insert_assert(describe, True)
logging.info("实际结果: " + describe + " 文案存在")
break
else:
time.sleep(1)
continue
else:
insert_assert(describe, False)
logging.warn("实际结果: " + describe + " 文案不存在")
self.screenshots(describe="断言")
def assert_text_contains(self, text: str, describe, sleep=0, timeout=10):
"""
Asserts that the text exists on the current page
Args:
sleep(int): sleep time
text(str): text
describe(str): Assertion description information
timeout(int): Maximum waiting time
"""
time.sleep(sleep)
logging.info("预期结果: " + describe + " 文案存在")
for i in range(timeout):
text_exists = self.driver(nameContains=text).exists
if text_exists is True:
insert_assert(describe, True)
logging.info("实际结果: " + describe + " 文案存在")
break
else:
time.sleep(1)
continue
else:
insert_assert(describe, False)
logging.warn("实际结果: " + describe + " 文案不存在")
self.screenshots(describe="断言")
def assert_element_exists(self, element, describe, sleep=0, timeout=10):
"""
Asserts that the text exists on the current page
Args:
sleep(int): sleep time
element: element
describe(str): Assertion description information
timeout(int): Maximum waiting time
"""
time.sleep(sleep)
logging.info("预期结果: " + describe + " 元素存在")
for i in range(timeout):
element_exists = element.exists()
if element_exists is True:
insert_assert(describe, True)
logging.info("实际结果: " + describe + " 元素存在")
break
else:
time.sleep(1)
continue
else:
insert_assert(describe, False)
logging.warn("实际结果: " + describe + " 元素不存在")
self.screenshots(describe="断言")
def assert_text_not_exists(self, text: str, describe, sleep=0, timeout=10):
"""
Asserts that the text exists on the current page
Args:
sleep(int): sleep time
text(str): text
describe(str): Assertion description information
timeout(int): Maximum waiting time
"""
time.sleep(sleep)
logging.info("预期结果: " + describe + " 文案不存在")
for i in range(timeout):
text_exists = self.driver(text=text).exists
if text_exists is True:
insert_assert(describe, False)
logging.warn("实际结果: " + describe + " 文案存在")
break
else:
time.sleep(1)
continue
else:
insert_assert(describe, True)
logging.info("实际结果: " + describe + " 文案不存在")
self.screenshots(describe="断言")
def assert_element_not_exists(self, element, describe, sleep=0, timeout=10):
"""
Asserts that the text exists on the current page
Args:
sleep(int): sleep time
element: element
describe(str): Assertion description information
timeout(int): Maximum waiting time
"""
time.sleep(sleep)
logging.info("预期结果: " + describe + " 元素不存在")
for i in range(timeout):
element_exists = element.exists()
if element_exists is True:
insert_assert(describe, False)
logging.warn("实际结果: " + describe + " 元素存在")
break
else:
time.sleep(1)
continue
else:
insert_assert(describe, True)
logging.info("实际结果: " + describe + " 元素不存在")
self.screenshots(describe="断言")
@staticmethod
def assert_text_equals(text_1, text_2, describe):
"""
Asserts that two texts are equal
Args:
text(list): text
"""
logging.info("预期结果: " + text_1 + "," + text_2 + " 相等")
if text_1 == text_2:
result = [describe, True]
Setting.assert_result.append(result)
logging.info("预期结果: " + text_1 + "," + text_2 + " 相等")
else:
result = [describe, False]
Setting.assert_result.append(result)
logging.warn("预期结果: " + text_1 + "," + text_2 + " 不相等")
@staticmethod
def assert_text_not_equals(text_1, text_2, describe):
"""
Asserts that two texts are not equal
Args:
text(list): text
"""
logging.info("预期结果: " + text_1 + "," + text_2 + " 不相等")
if text_1 == text_2:
result = [describe, False]
Setting.assert_result.append(result)
logging.warn("预期结果: " + text_1 + "," + text_2 + " 相等")
else:
result = [describe, True]
Setting.assert_result.append(result)
logging.info("预期结果: " + text_1 + "," + text_2 + " 不相等")
class Element(object):
driver = None
def __init__(self, timeout=10, describe=None, **kwargs):
self.time_out = timeout
self.describe = describe
if not kwargs:
raise ValueError("Please specify a locator")
self.kwargs = kwargs
self.k, self.v = next(iter(kwargs.items()))
if self.k not in LOCATOR_LIST:
raise KeyError("Element positioning of type '{}' is not supported.".format(self.k))
def __get__(self, instance, owner):
if instance is None:
return None
global driver
driver = instance.driver
return self
def click(self, focus=None, beyond=None, screenshots=Setting.click_screenshots):
"""
点击元素, 根据坐标去点击
Args:
focus(list): 点击元素区域的位置,默认点击元素的中心
beyond(list): 以传的元素为基准,点击相该元素以外的其他位置
screenshots(bool): 当screenshots等于True, 会先截图再点击坐标;默认关闭
"""
global driver
# 通过坐标点击
w, h = driver.window_size()
if self.k == "focus":
if type(self.v) is not list:
raise ValueError("The argument must be a list")
elif self.v[0] > 1 or self.v[1] > 1:
raise ValueError
x, y = self.v[0] * w, self.v[1] * h
self.screenshots(x, y, describe="点击, {}".format(self.describe)) if screenshots else \
(print("\n"), logging.info(msg=" 点击 ==> " + self.describe))
driver.click(self.v[0], self.v[1])
else:
if focus is not None:
x, y = self.focus(focus)
elif beyond is not None:
xx, yy = self.get_position(percentage=False)
x, y = xx + beyond[0] * w, yy + beyond[1] * h
else:
x, y = self.focus([0.5, 0.5])
self.screenshots(x, y, describe="点击, {}".format(self.describe)) if screenshots else \
(print("\n"), logging.info(msg=" 点击 ==> " + self.describe))
driver.click(x / w, y / h)
def click_exists(self, timeout=0):
"""
元素存在,点击元素,不存在就pass
Args:
timeout(int): 最大等待时间
"""
global driver
return driver(**self.kwargs).click_exists(timeout)
def wait(self, timeout=10):
"""
等待元素出现
Args:
timeout(int):等待时间
"""
global driver
time.sleep(1)
driver(**self.kwargs).wait(timeout=timeout)
def get(self, timeout=10, raise_error=False):
"""
Args:
timeout (float): timeout for query element, unit seconds
Default 10s
raise_error (bool): whether to raise error if element not found
Returns:
Element: UI Element
Raises:
WDAElementNotFoundError if raise_error is True else None
"""
global driver
driver(**self.kwargs).get(timeout=timeout, raise_error=raise_error)
def wait_gone(self, timeout=10):
"""
等待元素消失
Args:
timeout(int):等待时间
"""
global driver
driver(**self.kwargs).wait_gone(timeout=timeout)
def find_elements(self, text=False):
"""
查找元素
Args:
text(bool): 返回元素对应的文本内容
"""
global driver
text_list = []
data = driver(**self.kwargs).find_elements()
logging.info("查找到匹配数量有==> {}个".format(len(data)))
if text is True:
for text_data in data:
text_list.append(text_data.get_text())
return text_list
else:
return data
def instance(self, num=1):
"""
Click on the list of elements
"""
_list = []
self.wait()
data = len(self.find_elements())
for i in range(data):
_list.append(i)
if self.k == "xpath":
self.kwargs = {"xpath": self.v + "[{}]".format(_list[num] + 1)}
element = PageElement(**self.kwargs)
else:
element = PageElement(**self.kwargs, index=_list[num] + 1)
return element
def clear_text(self):
"""
清空输入框
"""
global driver
driver(**self.kwargs).clear_text()
def set_text(self, text):
"""
输入文本内容
Args:
text(str): 输入栏输入的文本
"""
global driver
text = str(text)
self.clear_text()
logging.info(msg=" 键盘输入 ==> " + text)
driver(**self.kwargs).set_text(text)
def get_text(self):
"""
获取元素对应的文本
"""
global driver
return driver(**self.kwargs).text
def swipe(self, direction, times=1, distance=1.0):
"""
基于元素滑动
times(int): 滑动次数
distance(float): 滑动距离
"""
global driver
assert direction in ("left", "right", "up", "down")
for i in range(times):
driver(**self.kwargs).scroll(direction=direction, distance=distance)
time.sleep(1)
def focus(self, position):
"""
定位元素区域内的坐标
Args:
position(list): 元素板块内的坐标
"""
global driver
self.get()
if type(position) is not list:
raise NameError("The argument must be a list")
elif position[0] > 1 or position[1] > 1:
raise NameError("Coordinates range from 0 to 1")
rect = driver(**self.kwargs).bounds
x = rect.x + rect.width * position[0]
y = rect.y + rect.height * position[1]
return x, y
def get_position(self, percentage=True):
"""
获取元素坐标
Args:
percentage(bool): percentage等于True,坐标是百分比; 默认是真实坐标
"""
global driver
self.get()
w, h = driver.window_size()
rect = driver(**self.kwargs).bounds
x = rect.x + rect.width / 2
y = rect.y + rect.height / 2
if percentage is True:
return round(x / w, 6), round(y / h, 6)
elif percentage is False:
return x, y
def exists(self):
"""
判断元素是否存在
"""
global driver
if "index" in self.kwargs:
return True if len(self.find_elements()) > 0 else False
else:
return True if driver(**self.kwargs).exists and driver(**self.kwargs).displayed else False
def scroll(self, direction='visible', distance=1.0):
"""
滚动定位到对应的元素
Args:
direction (str): one of "visible", "up", "down", "left", "right"
distance (float): swipe distance, only works when direction is not "visible"
Raises:
ValueError
distance=1.0 means, element (width or height) multiply 1.0
"""
global driver
driver(**self.kwargs).scroll(direction=direction, distance=distance)
def scroll_search(self, click=False, direction="down"):
"""
滚动定位到对应的元素
Args:
click(bool): 定位到元素后,是否点击
direction(str): 滑动的方向,只能是'down' 或 'or'
"""
global driver
for i in range(20):
if self.exists() is True:
break
else:
if direction is "down":
driver.swipe(0.5, 0.5, 0.5, 0.4)
elif direction is "up":
driver.swipe(0.5, 0.5, 0.5, 0.6)
else:
raise ValueError("The direction parameter can only be 'down' or 'up'")
if click is True:
self.click(screenshots=True)
@staticmethod
def screenshots(w=None, h=None, describe=None):
"""
截图
"""
global driver
screenshots_dir = screenshots_name(describe)
driver.screenshot().save(screenshots_dir)
multiple = driver.scale
w, h = multiple * w, multiple * h
processing(screenshots_dir, w, h)
def tap_hold(self, duration=1.0):
"""
长按
Args:
duration (float): seconds of hold time
"""
global driver
driver(**self.kwargs).tap_hold(duration=duration)
def sliding(self, height=0.5, click=False, direction="down"):
"""
将元素滑动到想要的位置
Args:
height(float): 预期将元素滑动到的位置, 位置的范围是 0 ~ 1, 默认是中间
click(bool): 当click等于True,把元素滑动到预期的位置后,进行点击操作; 默认不点击,只滑动到预期的位置
direction(str): 滑动的方向,只能是'down' 或 'or'
"""
if 0 < height < 1:
height = height
height_max = height + 0.05
height_min = height - 0.05
if direction is "down":
self.scroll_search(direction="down")
elif direction is "up":
self.scroll_search(direction="up")
else:
raise ValueError("The direction parameter can only be 'down' or 'up'")
x, y = self.get_position()
for i in range(20):
if height_min <= y <= height_max:
break
move_y = height - y
if move_y > 0:
if move_y >= 0.26:
driver.swipe(0.5, 0.5, 0.5, 0.6)
elif move_y < 0.26:
driver.swipe(0.5, 0.5, 0.5, 0.52, duration=0.5)
elif move_y < 0:
if move_y <= -0.26:
driver.swipe(0.5, 0.5, 0.5, 0.4)
elif move_y < 0.26:
driver.swipe(0.5, 0.5, 0.5, 0.48, duration=0.5)
x, y = self.get_position()
time.sleep(1)
if click is True:
self.click(screenshots=True)
else:
raise ValueError
| 31.507402
| 105
| 0.510508
|
8ba0d728786744163b80106ae3252d85466d2ebd
| 36,547
|
py
|
Python
|
orbitx/physics/engine.py
|
Spoopidy/orbitx
|
701250d06bfeab3f624fe4ab8b5feea19039e088
|
[
"MIT"
] | null | null | null |
orbitx/physics/engine.py
|
Spoopidy/orbitx
|
701250d06bfeab3f624fe4ab8b5feea19039e088
|
[
"MIT"
] | null | null | null |
orbitx/physics/engine.py
|
Spoopidy/orbitx
|
701250d06bfeab3f624fe4ab8b5feea19039e088
|
[
"MIT"
] | null | null | null |
"""This is the core of the OrbitX physics engine. Here, we simulate the state
of the solar system over time. To do this, we start a thread running in the
background to run the simulation, while our main thread handles all other
user interaction.
I've tried to provide a clean interface to this module, so that you don't have
to mess with the internals too much. Unfortunately, the internals are a bit
tied together, and can break in unforeseen ways if changed without
understanding how all the bits fit together.
Get in contact with me if you want to add new functionality! I'm more than
happy to help :)"""
import collections
import functools
import logging
import threading
import time
import warnings
from typing import Callable, List, Optional, Tuple, NamedTuple, Union
import numpy as np
import scipy.integrate
import scipy.spatial
import scipy.special
from google.protobuf.text_format import MessageToString
from orbitx.physics import calc
from orbitx import common
from orbitx.network import Request
from orbitx.orbitx_pb2 import PhysicalState
from orbitx.data_structures import protos, Entity, Navmode, PhysicsState, \
_FIELD_ORDERING
SOLUTION_CACHE_SIZE = 2
warnings.simplefilter('error') # Raise exception on numpy RuntimeWarning
scipy.special.seterr(all='raise')
log = logging.getLogger()
TIME_ACC_TO_BOUND = {time_acc.value: time_acc.accurate_bound
for time_acc in common.TIME_ACCS}
class TimeAccChange(NamedTuple):
"""Describes when the time acc of the simulation changes, and what to."""
time_acc: float
start_simtime: float
class PhysicsEngine:
"""Physics Engine class. Encapsulates simulating physical state.
Methods beginning with an underscore are not part of the API and change!
Example usage:
pe = PhysicsEngine(flight_savefile)
state = pe.get_state()
pe.handle_request(Request(ident=..., ...)) # Change some state.
# Simulates 20 seconds:
state = pe.get_state(requested_t=20)
This class will start a background thread to simulate physics when __init__
is called. This background thread may restart at arbitrary times.
This class is designed to be access from the main thread by methods that
don't begin with an underscore, so thread synchronization between the main
thread and the background solutions thread is done with this assumption in
mind. If this assumption changes, change thread synchronization code very
deliberately and carefully! Specifically, if you're in spacesim, feel free
to hit me (Patrick) up, and I can help.
"""
# Increasing this constant results in the simulation being faster to
# compute but less accurate of an approximation. If Phobos starts crashing
# into Mars, tweak this downwards.
MAX_STEP_SIZE = 100
def __init__(self, physical_state: PhysicsState):
# Controls access to self._solutions. If anything changes that is
# related to self._solutions, this condition variable should be
# notified. Currently, that's just if self._solutions or
# self._last_simtime changes.
self._solutions_cond = threading.Condition()
self._solutions: collections.deque
self._simthread: Optional[threading.Thread] = None
self._simthread_exception: Optional[Exception] = None
self._last_physical_state: PhysicalState
self._last_monotime: float = time.monotonic()
self._last_simtime: float
self._time_acc_changes: collections.deque
self.set_state(physical_state)
def _simtime(self, requested_t=None):
"""Gets simulation time, accounting for time acc and elapsed time."""
# During runtime, strange things will happen if you mix calling
# this with None (like from orbitx.py) or with values (like in test.py)
if requested_t is None:
# "Alpha time" refers to time in the real world
# (just as the spacesim wiki defines it).
alpha_time_elapsed = max(
time.monotonic() - self._last_monotime,
0.0001
)
self._last_monotime = time.monotonic()
simtime = self._last_simtime
assert self._time_acc_changes
# This while loop will increment simtime and decrement
# time_elapsed correspondingly until the second time acc change
# starts farther in the future than we will increment simtime.
while len(self._time_acc_changes) > 1 and \
self._time_acc_changes[1].start_simtime < (
simtime + self._time_acc_changes[0].time_acc *
alpha_time_elapsed):
remaining_simtime = \
self._time_acc_changes[1].start_simtime - simtime
simtime = self._time_acc_changes[1].start_simtime
alpha_time_elapsed -= \
remaining_simtime / self._time_acc_changes[0].time_acc
# We've advanced past self._time_acc_changes[0],
# we can forget it now.
self._time_acc_changes.popleft()
# Now we will just advance partway into the span of time
# between self._time_acc_changes[0].startime and [1].startime.
simtime += alpha_time_elapsed * self._time_acc_changes[0].time_acc
requested_t = simtime
with self._solutions_cond:
self._last_simtime = requested_t
self._solutions_cond.notify_all()
return requested_t
def _stop_simthread(self):
if self._simthread is not None:
with self._solutions_cond:
self._stopping_simthread = True
self._solutions_cond.notify_all()
self._simthread.join()
def _start_simthread(self, t0: float, y0: PhysicsState) -> None:
if round(y0.time_acc) == 0:
# We've paused the simulation. Don't start a new simthread
log.info('Pausing simulation')
return
# We don't need to synchronize self._last_simtime or
# self._solutions here, because we just stopped the background
# simulation thread only a few lines ago.
self._last_simtime = t0
# This double-ended queue should always have at least one element in
# it, and the first element should have a start_simtime less
# than self._last_simtime.
self._time_acc_changes = collections.deque(
[TimeAccChange(time_acc=y0.time_acc,
start_simtime=y0.timestamp)]
)
# Essentially just a cache of ODE solutions.
self._solutions = collections.deque(maxlen=SOLUTION_CACHE_SIZE)
self._simthread = threading.Thread(
target=self._simthread_target,
args=(t0, y0),
name=f'simthread t={round(t0)} acc={y0.time_acc}',
daemon=True
)
self._stopping_simthread = False
# Fork self._simthread into the background.
self._simthread.start()
def handle_requests(self, requests: List[Request], requested_t=None):
requested_t = self._simtime(requested_t)
if len(requests) == 0:
return
if len(requests) and requests[0].ident == Request.TIME_ACC_SET:
# Immediately change the time acceleration, don't wait for the
# simulation to catch up. This deals with the case where we're at
# 100,000x time acc, and the program seems frozen for the user and
# they try lowering time acc. We should immediately be able to
# restart simulation at a lower time acc without any waiting.
if len(self._solutions) == 0:
# We haven't even simulated any solutions yet.
requested_t = self._last_physical_state.timestamp
else:
requested_t = min(self._solutions[-1].t_max, requested_t)
if len(self._solutions) == 0:
y0 = PhysicsState(None, self._last_physical_state)
else:
y0 = self.get_state(requested_t)
for request in requests:
if request.ident == Request.NOOP:
# We don't care about these requests
continue
y0 = _one_request(request, y0)
if request.ident == Request.TIME_ACC_SET:
assert request.time_acc_set >= 0
self._time_acc_changes.append(
TimeAccChange(time_acc=y0.time_acc,
start_simtime=y0.timestamp)
)
self.set_state(y0)
def set_state(self, physical_state: PhysicsState):
self._stop_simthread()
physical_state = _reconcile_entity_dynamics(physical_state)
self._artificials = np.where(
np.array([
entity.artificial
for entity in physical_state]) >= 1)[0]
# We keep track of the PhysicalState because our simulation
# only simulates things that change like position and velocity,
# not things that stay constant like names and mass.
# self._last_physical_state contains these constants.
self._last_physical_state = physical_state.as_proto()
self.R = np.array([entity.r for entity in physical_state])
self.M = np.array([entity.mass for entity in physical_state])
self._start_simthread(physical_state.timestamp, physical_state)
def get_state(self, requested_t=None) -> PhysicsState:
"""Return the latest physical state of the simulation."""
requested_t = self._simtime(requested_t)
# Wait until there is a solution for our requested_t. The .wait_for()
# call will block until a new ODE solution is created.
with self._solutions_cond:
self._last_simtime = requested_t
self._solutions_cond.wait_for(
# Wait until we're paused, there's a solution, or an exception.
lambda:
self._last_physical_state.time_acc == 0 or
(len(self._solutions) != 0 and
self._solutions[-1].t_max >= requested_t) or
self._simthread_exception is not None
)
# Check if the simthread crashed
if self._simthread_exception is not None:
raise self._simthread_exception
if self._last_physical_state.time_acc == 0:
# We're paused, so there are no solutions being generated.
# Exit this 'with' block and release our _solutions_cond lock.
pass
else:
# We can't integrate backwards, so if integration has gone
# beyond what we need, fail early.
assert requested_t >= self._solutions[0].t_min, \
(self._solutions[0].t_min, self._solutions[-1].t_max)
for soln in self._solutions:
if soln.t_min <= requested_t <= soln.t_max:
solution = soln
if self._last_physical_state.time_acc == 0:
# We're paused, so return the only state we have.
return PhysicsState(None, self._last_physical_state)
else:
# We have a solution, return it.
newest_state = PhysicsState(
solution(requested_t), self._last_physical_state
)
newest_state.timestamp = requested_t
return newest_state
class RestartSimulationException(Exception):
"""A request to restart the simulation with new t and y."""
def __init__(self, t: float, y: PhysicsState):
self.t = t
self.y = y
def _simthread_target(self, t, y):
while True:
try:
self._run_simulation(t, y)
if self._stopping_simthread:
return
except PhysicsEngine.RestartSimulationException as e:
t = e.t
y = e.y
log.info(f'Simulation restarted itself at {t}.')
except Exception as e:
log.error(f'simthread got exception {repr(e)}.')
self._simthread_exception = e
with self._solutions_cond:
self._solutions_cond.notify_all()
return
def _derive(self, t: float, y_1d: np.ndarray,
pass_through_state: PhysicalState) -> np.ndarray:
"""
y_1d =
[X, Y, VX, VY, Heading, Spin, Fuel, Throttle, LandedOn, Broken] +
SRB_time_left + time_acc (these are both single values)
returns the derivative of y_1d, i.e.
[VX, VY, AX, AY, Spin, 0, Fuel consumption, 0, 0, 0] + -constant + 0
(zeroed-out fields are changed elsewhere)
!!!!!!!!!!! IMPORTANT !!!!!!!!!!!
This function should return a DERIVATIVE. The numpy.solve_ivp function
will do the rest of the work of the simulation, this function just
describes how things _move_.
At its most basic level, this function takes in the _position_ of
everything (plus some random stuff), and returns the _velocity_ of
everything (plus some random stuff).
Essentially, numpy.solve_ivp does this calculation:
new_positions_of_system = t_delta * _derive(
current_t_of_system,
current_y_of_system)
"""
# Note: we create this y as a PhysicsState for convenience, but if you
# set any values of y, the changes will be discarded! The only way they
# will be propagated out of this function is by numpy using the return
# value of this function as a derivative, as explained above.
# If you want to set values in y, look at _reconcile_entity_dynamics.
y = PhysicsState(y_1d, pass_through_state)
acc_matrix = calc.grav_acc(y.X, y.Y, self.M, y.Fuel)
zeros = np.zeros(y._n)
fuel_cons = np.zeros(y._n)
# Engine thrust and fuel consumption
for artif_index in self._artificials:
if y[artif_index].fuel > 0 and y[artif_index].throttle > 0:
# We have fuel remaining, calculate thrust
entity = y[artif_index]
capability = common.craft_capabilities[entity.name]
fuel_cons[artif_index] = \
-abs(capability.fuel_cons * entity.throttle)
eng_thrust = capability.thrust * entity.throttle * \
calc.heading_vector(entity.heading)
mass = entity.mass + entity.fuel
if entity.name == common.AYSE and \
y[common.HABITAT].landed_on == common.AYSE:
# It's bad that this is hardcoded, but it's also the only
# place that this comes up so IMO it's not too bad.
hab = y[common.HABITAT]
mass += hab.mass + hab.fuel
eng_acc = eng_thrust / mass
acc_matrix[artif_index] += eng_acc
# And SRB thrust
srb_usage = 0
try:
if y.srb_time >= 0:
hab_index = y._name_to_index(common.HABITAT)
hab = y[hab_index]
srb_acc = common.SRB_THRUST / (hab.mass + hab.fuel)
srb_acc_vector = srb_acc * calc.heading_vector(hab.heading)
acc_matrix[hab_index] += srb_acc_vector
srb_usage = -1
except PhysicsState.NoEntityError:
# The Habitat doesn't exist.
pass
# Drag effects
craft = y.craft
if craft is not None:
craft_index = y._name_to_index(y.craft)
drag_acc = calc.drag(y)
acc_matrix[craft_index] -= drag_acc
# Centripetal acceleration to keep landed entities glued to each other.
landed_on = y.LandedOn
for landed_i in landed_on:
lander = y[landed_i]
ground = y[landed_on[landed_i]]
centripetal_acc = (lander.pos - ground.pos) * ground.spin ** 2
acc_matrix[landed_i] = \
acc_matrix[landed_on[landed_i]] - centripetal_acc
# Sets velocity and spin of a couple more entities.
# If you want to set the acceleration of an entity, do it above and
# keep that logic in _derive. If you want to set the velocity and spin
# or any other fields that an Entity has, you should put that logic in
# this _reconcile_entity_dynamics helper.
y = _reconcile_entity_dynamics(y)
return np.concatenate((
y.VX, y.VY, np.hsplit(acc_matrix, 2), y.Spin,
zeros, fuel_cons, zeros, zeros, zeros, np.array([srb_usage, 0])
), axis=None)
def _run_simulation(self, t: float, y: PhysicsState) -> None:
# An overview of how time is managed:
#
# self._last_simtime is the main thread's latest idea of
# what the current time is in the simulation. Every call to
# get_state(), self._timetime_of_last_request is incremented by the
# amount of time that passed since the last call to get_state(),
# factoring in time_acc
#
# self._solutions is a fixed-size queue of ODE solutions.
# Each element has an attribute, t_max, which describes the largest
# time that the solution can be evaluated at and still be accurate.
# The highest such t_max should always be larger than the current
# simulation time, i.e. self._last_simtime
proto_state = y._proto_state
while not self._stopping_simthread:
derive_func = functools.partial(
self._derive, pass_through_state=proto_state)
events: List[Event] = [
CollisionEvent(y, self.R), HabFuelEvent(y), LiftoffEvent(y),
SrbFuelEvent()
]
if y.craft is not None:
events.append(HighAccEvent(
derive_func,
self._artificials,
TIME_ACC_TO_BOUND[round(y.time_acc)],
y.time_acc,
len(y)))
ivp_out = scipy.integrate.solve_ivp(
fun=derive_func,
t_span=[t, t + min(y.time_acc, 10 * self.MAX_STEP_SIZE)],
# solve_ivp requires a 1D y0 array
y0=y.y0(),
events=events,
dense_output=True,
max_step=self.MAX_STEP_SIZE
)
if not ivp_out.success:
# Integration error
raise Exception(ivp_out.message)
# When we create a new solution, let other people know.
with self._solutions_cond:
# If adding another solution to our max-sized deque would drop
# our oldest solution, and the main thread is still asking for
# state in the t interval of our oldest solution, take a break
# until the main thread has caught up.
self._solutions_cond.wait_for(
lambda:
len(self._solutions) < SOLUTION_CACHE_SIZE or
self._last_simtime > self._solutions[0].t_max or
self._stopping_simthread
)
if self._stopping_simthread:
break
# self._solutions contains ODE solutions for the interval
# [self._solutions[0].t_min, self._solutions[-1].t_max].
self._solutions.append(ivp_out.sol)
self._solutions_cond.notify_all()
y = PhysicsState(ivp_out.y[:, -1], proto_state)
t = ivp_out.t[-1]
if ivp_out.status > 0:
log.info(f'Got event: {ivp_out.t_events} at t={t}.')
for index, event_t in enumerate(ivp_out.t_events):
if len(event_t) == 0:
# If this event didn't occur, then event_t == []
continue
event = events[index]
if isinstance(event, CollisionEvent):
# Collision, simulation ended. Handled it and continue.
assert len(ivp_out.t_events[0]) == 1
assert len(ivp_out.t) >= 2
y = _collision_decision(t, y, events[0])
y = _reconcile_entity_dynamics(y)
if isinstance(event, HabFuelEvent):
# Something ran out of fuel.
for artificial_index in self._artificials:
artificial = y[artificial_index]
if round(artificial.fuel) != 0:
continue
log.info(f'{artificial.name} ran out of fuel.')
# This craft is out of fuel, the next iteration
# won't consume any fuel. Set throttle to zero.
artificial.throttle = 0
# Set fuel to a negative value, so it doesn't
# trigger the event function.
artificial.fuel = 0
if isinstance(event, LiftoffEvent):
# A craft has a TWR > 1
craft = y.craft_entity()
log.info(
'We have liftoff of the '
f'{craft.name} from {craft.landed_on} at {t}.')
craft.landed_on = ''
if isinstance(event, SrbFuelEvent):
# SRB fuel exhaustion.
log.info('SRB exhausted.')
y.srb_time = common.SRB_EMPTY
if isinstance(event, HighAccEvent):
# The acceleration acting on the craft is high, might
# result in inaccurate results. SLOOWWWW DOWWWWNNNN.
slower_time_acc_index = list(
TIME_ACC_TO_BOUND.keys()
).index(round(y.time_acc)) - 1
assert slower_time_acc_index >= 0
slower_time_acc = \
common.TIME_ACCS[slower_time_acc_index]
assert slower_time_acc.value > 0
log.info(
f'{y.time_acc} is too fast, '
f'slowing down to {slower_time_acc.value}')
# We should lower the time acc.
y.time_acc = slower_time_acc.value
raise PhysicsEngine.RestartSimulationException(t, y)
class Event:
"""Implements an event function. See numpy documentation for solve_ivp."""
# These two fields tell scipy to stop simulation when __call__ returns 0
terminal = True
direction = -1
# Implement this in an event subclass
def __call___(self, t: float, y_1d: np.ndarray) -> float:
...
class SrbFuelEvent(Event):
def __call__(self, t, y_1d) -> float:
"""Returns how much SRB burn time is left.
This will cause simulation to stop when SRB burn time reaches 0."""
return y_1d[PhysicsState.SRB_TIME_INDEX]
class HabFuelEvent(Event):
def __init__(self, initial_state: PhysicsState):
self.initial_state = initial_state
def __call__(self, t, y_1d) -> float:
"""Return a 0 only when throttle is nonzero."""
y = PhysicsState(y_1d, self.initial_state._proto_state)
for index, entity in enumerate(y._proto_state.entities):
if entity.artificial and y.Throttle[index] != 0:
return y.Fuel[index]
return np.inf
class CollisionEvent(Event):
def __init__(self, initial_state: PhysicsState, radii: np.ndarray):
self.initial_state = initial_state
self.radii = radii
def __call__(self, t, y_1d, return_pair=False
) -> Union[float, Tuple[int, int]]:
"""Returns a scalar, with 0 indicating a collision and a sign change
indicating a collision has happened."""
y = PhysicsState(y_1d, self.initial_state._proto_state)
n = len(self.initial_state)
# 2xN of (x, y) positions
posns = np.column_stack((y.X, y.Y))
# An n*n matrix of _altitudes_ between each entity
alt_matrix = (
scipy.spatial.distance.cdist(posns, posns) -
np.array([self.radii]) - np.array([self.radii]).T)
# To simplify calculations, an entity's altitude from itself is inf
np.fill_diagonal(alt_matrix, np.inf)
# For each pair of objects that have collisions disabled between
# them, also set their altitude to be inf
# If there are any entities landed on any other entities, ignore
# both the landed and the landee entity.
landed_on = y.LandedOn
for index in landed_on:
alt_matrix[index, landed_on[index]] = np.inf
alt_matrix[landed_on[index], index] = np.inf
if return_pair:
# Returns the actual pair of indicies instead of a scalar.
flattened_index = alt_matrix.argmin()
# flattened_index is a value in the interval [1, n*n]-1.
# Turn it into a 2D index.
object_i = flattened_index // n
object_j = flattened_index % n
return object_i, object_j
else:
# solve_ivp invocation, return scalar
return np.min(alt_matrix)
class LiftoffEvent(Event):
def __init__(self, initial_state: PhysicsState):
self.initial_state = initial_state
def __call__(self, t, y_1d) -> float:
"""Return 0 when the craft is landed but thrusting enough to lift off,
and a positive value otherwise."""
y = PhysicsState(y_1d, self.initial_state._proto_state)
if y.craft is None:
# There is no craft, return early.
return np.inf
craft = y.craft_entity()
if not craft.landed():
# We don't have to lift off because we already lifted off.
return np.inf
planet = y[craft.landed_on]
if planet.artificial:
# If we're docked with another satellite, undocking is governed
# by other mechanisms. Ignore this.
return np.inf
thrust = common.craft_capabilities[craft.name].thrust * craft.throttle
if y.srb_time > 0 and y.craft == common.HABITAT:
thrust += common.SRB_THRUST
pos = craft.pos - planet.pos
# This is the G(m1*m2)/r^2 formula
weight = common.G * craft.mass * planet.mass / np.inner(pos, pos)
# This should be positive when the craft isn't thrusting enough, and
# zero when it is thrusting enough.
return max(0, common.LAUNCH_TWR - thrust / weight)
class HighAccEvent(Event):
def __init__(
self, derive: Callable[[float, np.ndarray], np.ndarray],
artificials: List[int], acc_bound: float, current_acc: float,
n_entities: int):
self.derive = derive
self.artificials = artificials
self.acc_bound = acc_bound
self.current_acc = round(current_acc)
self.ax_offset = n_entities * _FIELD_ORDERING['vx']
self.ay_offset = n_entities * _FIELD_ORDERING['vy']
def __call__(self, t: float, y_1d: np.ndarray) -> float:
"""Return positive if the current time acceleration is accurate, zero
then negative otherwise."""
if self.current_acc == 1:
# If we can't lower the time acc, don't bother doing any work.
return np.inf
derive_result = self.derive(t, y_1d)
max_acc_mag = 0.0005 # A small nonzero value.
for artif_index in self.artificials:
accel = (derive_result[self.ax_offset] + artif_index,
derive_result[self.ay_offset] + artif_index)
acc_mag = calc.fastnorm(accel)
if acc_mag > max_acc_mag:
max_acc_mag = acc_mag
return max(self.acc_bound - acc_mag, 0)
def _reconcile_entity_dynamics(y: PhysicsState) -> PhysicsState:
"""Idempotent helper that sets velocities and spins of some entities.
This is in its own function because it has a couple calling points."""
# Navmode auto-rotation
if y.navmode != Navmode['Manual']:
craft = y.craft_entity()
craft.spin = calc.navmode_spin(y)
# Keep landed entities glued together
landed_on = y.LandedOn
for index in landed_on:
# If we're landed on something, make sure we move in lockstep.
lander = y[index]
ground = y[landed_on[index]]
if ground.name == common.AYSE and lander.name == common.HABITAT:
# Always put the Habitat at the docking port.
lander.pos = (
ground.pos -
calc.heading_vector(ground.heading) * (lander.r + ground.r))
else:
norm = lander.pos - ground.pos
unit_norm = norm / calc.fastnorm(norm)
lander.pos = ground.pos + unit_norm * (ground.r + lander.r)
lander.spin = ground.spin
lander.v = calc.rotational_speed(lander, ground)
return y
def _collision_decision(t, y, altitude_event):
e1_index, e2_index = altitude_event(
t, y.y0(), return_pair=True)
e1 = y[e1_index]
e2 = y[e2_index]
log.info(f'Collision at t={t} betwixt {e1.name} and {e2.name}')
if e1.artificial:
if e2.artificial:
if e2.dockable:
_docking(e1, e2, e2_index)
elif e1.dockable:
_docking(e2, e1, e1_index)
else:
_bounce(e1, e2)
else:
_land(e1, e2)
elif e2.artificial:
_land(e2, e1)
else:
_bounce(e1, e2)
return y
def _docking(e1, e2, e2_index):
# e1 is an artificial object
# if 2 artificial object to be docked on (spacespation)
norm = e1.pos - e2.pos
collision_angle = np.arctan2(norm[1], norm[0])
collision_angle = collision_angle % (2 * np.pi)
ANGLE_MIN = (e2.heading + 0.7 * np.pi) % (2 * np.pi)
ANGLE_MAX = (e2.heading + 1.3 * np.pi) % (2 * np.pi)
if collision_angle < ANGLE_MIN or collision_angle > ANGLE_MAX:
# add damage ?
_bounce(e1, e2)
return
log.info(f'Docking {e1.name} on {e2.name}')
e1.landed_on = e2.name
# Currently this flag has almost no effect.
e1.broken = bool(
calc.fastnorm(calc.rotational_speed(e1, e2) - e1.v) >
common.craft_capabilities[e1.name].hull_strength
)
# set right heading for future takeoff
e2_opposite = e2.heading + np.pi
e1.pos = e2.pos + (e1.r + e2.r) * calc.heading_vector(e2_opposite)
e1.heading = e2_opposite % (2 * np.pi)
e1.throttle = 0
e1.spin = e2.spin
e1.v = e2.v
def _bounce(e1, e2):
# Resolve a collision by:
# 1. calculating positions and velocities of the two entities
# 2. do a 1D collision calculation along the normal between the two
# 3. recombine the velocity vectors
log.info(f'Bouncing {e1.name} and {e2.name}')
norm = e1.pos - e2.pos
unit_norm = norm / calc.fastnorm(norm)
# The unit tangent is perpendicular to the unit normal vector
unit_tang = np.asarray([-unit_norm[1], unit_norm[0]])
# Calculate both normal and tangent velocities for both entities
v1n = np.dot(unit_norm, e1.v)
v1t = np.dot(unit_tang, e1.v)
v2n = np.dot(unit_norm, e2.v)
v2t = np.dot(unit_tang, e2.v)
# Use https://en.wikipedia.org/wiki/Elastic_collision
# to find the new normal velocities (a 1D collision)
new_v1n = ((v1n * (e1.mass - e2.mass) + 2 * e2.mass * v2n) /
(e1.mass + e2.mass))
new_v2n = ((v2n * (e2.mass - e1.mass) + 2 * e1.mass * v1n) /
(e1.mass + e2.mass))
# Calculate new velocities
e1.v = new_v1n * unit_norm + v1t * unit_tang
e2.v = new_v2n * unit_norm + v2t * unit_tang
def _land(e1, e2):
# e1 is an artificial object
# if 2 artificial object collide (habitat, spacespation)
# or small astroid collision (need deletion), handle later
log.info(f'Landing {e1.name} on {e2.name}')
assert e2.artificial is False
e1.landed_on = e2.name
# Currently does nothing
e1.broken = bool(
calc.fastnorm(calc.rotational_speed(e1, e2) - e1.v) >
common.craft_capabilities[e1.name].hull_strength
)
# set right heading for future takeoff
norm = e1.pos - e2.pos
e1.heading = np.arctan2(norm[1], norm[0])
e1.throttle = 0
e1.spin = e2.spin
e1.v = calc.rotational_speed(e1, e2)
def _one_request(request: Request, y0: PhysicsState) \
-> PhysicsState:
"""Interface to set habitat controls.
Use an argument to change habitat throttle or spinning, and simulation
will restart with this new information."""
log.info(f'At simtime={y0.timestamp}, '
f'Got command {MessageToString(request, as_one_line=True)}')
if request.ident != Request.TIME_ACC_SET:
# Reveal the type of y0.craft as str (not None).
assert y0.craft is not None
if request.ident == Request.HAB_SPIN_CHANGE:
if y0.navmode != Navmode['Manual']:
# We're in autopilot, ignore this command
return y0
craft = y0.craft_entity()
if not craft.landed():
craft.spin += request.spin_change
elif request.ident == Request.HAB_THROTTLE_CHANGE:
y0.craft_entity().throttle += request.throttle_change
elif request.ident == Request.HAB_THROTTLE_SET:
y0.craft_entity().throttle = request.throttle_set
elif request.ident == Request.TIME_ACC_SET:
assert request.time_acc_set >= 0
y0.time_acc = request.time_acc_set
elif request.ident == Request.ENGINEERING_UPDATE:
# Multiply this value by 100, because OrbitV considers engines at
# 100% to be 100x the maximum thrust.
common.craft_capabilities[common.HABITAT] = \
common.craft_capabilities[common.HABITAT]._replace(
thrust=100 * request.engineering_update.max_thrust)
hab = y0[common.HABITAT]
ayse = y0[common.AYSE]
hab.fuel = request.engineering_update.hab_fuel
ayse.fuel = request.engineering_update.ayse_fuel
y0[common.HABITAT] = hab
y0[common.AYSE] = ayse
if request.engineering_update.module_state == \
Request.DETACHED_MODULE and \
common.MODULE not in y0._entity_names and \
not hab.landed():
# If the Habitat is freely floating and engineering asks us to
# detach the Module, spawn in the Module.
module = Entity(protos.Entity(
name=common.MODULE, mass=100, r=10, artificial=True))
module.pos = hab.pos - (module.r + hab.r) * \
calc.heading_vector(hab.heading)
module.v = calc.rotational_speed(module, hab)
y0_proto = y0.as_proto()
y0_proto.entities.extend([module.proto])
y0 = PhysicsState(None, y0_proto)
elif request.ident == Request.UNDOCK:
habitat = y0[common.HABITAT]
if habitat.landed_on == common.AYSE:
ayse = y0[common.AYSE]
habitat.landed_on = ''
norm = habitat.pos - ayse.pos
unit_norm = norm / calc.fastnorm(norm)
habitat.v += unit_norm * common.UNDOCK_PUSH
habitat.spin = ayse.spin
y0[common.HABITAT] = habitat
elif request.ident == Request.REFERENCE_UPDATE:
y0.reference = request.reference
elif request.ident == Request.TARGET_UPDATE:
y0.target = request.target
elif request.ident == Request.LOAD_SAVEFILE:
y0 = common.load_savefile(common.savefile(request.loadfile))
elif request.ident == Request.NAVMODE_SET:
y0.navmode = Navmode(request.navmode)
if y0.navmode == Navmode['Manual']:
y0.craft_entity().spin = 0
elif request.ident == Request.PARACHUTE:
y0.parachute_deployed = request.deploy_parachute
elif request.ident == Request.IGNITE_SRBS:
if round(y0.srb_time) == common.SRB_FULL:
y0.srb_time = common.SRB_BURNTIME
return y0
| 41.017957
| 79
| 0.601992
|
5d6cd939328054b68131bd25e13798279af76ff5
| 3,289
|
py
|
Python
|
flexs/utils/sequence_utils.py
|
jarridrb/FLEXS
|
d51ecf6a640229e68d9158e27b25aa81b57a9ce2
|
[
"Apache-2.0"
] | null | null | null |
flexs/utils/sequence_utils.py
|
jarridrb/FLEXS
|
d51ecf6a640229e68d9158e27b25aa81b57a9ce2
|
[
"Apache-2.0"
] | null | null | null |
flexs/utils/sequence_utils.py
|
jarridrb/FLEXS
|
d51ecf6a640229e68d9158e27b25aa81b57a9ce2
|
[
"Apache-2.0"
] | null | null | null |
"""Utility functions for manipulating sequences."""
import random
from typing import List, Union
import numpy as np
AAS = "ILVAGMFYWEDQNHCRKSTP"
"""str: Amino acid alphabet for proteins (length 20 - no stop codon)."""
DB_AAS = "ARNDCQEGHILKMFPSTWYV"
RNAA = "UGCA"
"""str: RNA alphabet (4 base pairs)."""
DNAA = "ACGT"
"""str: DNA alphabet (4 base pairs)."""
DNA_NUPACK = "ATCG"
BA = "01"
"""str: Binary alphabet '01'."""
def construct_mutant_from_sample(
pwm_sample: np.ndarray, one_hot_base: np.ndarray
) -> np.ndarray:
"""Return one hot mutant, a utility function for some explorers."""
one_hot = np.zeros(one_hot_base.shape)
one_hot += one_hot_base
i, j = np.nonzero(pwm_sample) # this can be problematic for non-positive fitnesses
one_hot[i, :] = 0
one_hot[i, j] = 1
return one_hot
def string_to_one_hot(sequence: str, alphabet: str, seq_len: int = None) -> np.ndarray:
"""
Return the one-hot representation of a sequence string according to an alphabet.
Args:
sequence: Sequence string to convert to one_hot representation.
alphabet: Alphabet string (assigns each character an index).
Returns:
One-hot numpy array of shape `(len(sequence), len(alphabet))`.
"""
out = np.zeros((seq_len or len(sequence), len(alphabet)))
for i in range(len(sequence)):
out[i, alphabet.index(sequence[i])] = 1
return out
def one_hot_to_string(
one_hot: Union[List[List[int]], np.ndarray], alphabet: str
) -> str:
"""
Return the sequence string representing a one-hot vector according to an alphabet.
Args:
one_hot: One-hot of shape `(len(sequence), len(alphabet)` representing
a sequence.
alphabet: Alphabet string (assigns each character an index).
Returns:
Sequence string representation of `one_hot`.
"""
residue_idxs = np.argmax(one_hot, axis=1)
chars = []
for idx in residue_idxs:
if idx < len(alphabet):
chars.append(alphabet[idx])
return "".join(chars)
def generate_single_mutants(wt: str, alphabet: str) -> List[str]:
"""Generate all single mutants of `wt`."""
sequences = [wt]
for i in range(len(wt)):
tmp = list(wt)
for j in range(len(alphabet)):
tmp[i] = alphabet[j]
sequences.append("".join(tmp))
return sequences
def generate_random_sequences(length: int, number: int, alphabet: str) -> List[str]:
"""Generate random sequences of particular length."""
return [
"".join([random.choice(alphabet) for _ in range(length)]) for _ in range(number)
]
def generate_random_mutant(sequence: str, mu: float, alphabet: str) -> str:
"""
Generate a mutant of `sequence` where each residue mutates with probability `mu`.
So the expected value of the total number of mutations is `len(sequence) * mu`.
Args:
sequence: Sequence that will be mutated from.
mu: Probability of mutation per residue.
alphabet: Alphabet string.
Returns:
Mutant sequence string.
"""
mutant = []
for s in sequence:
if random.random() < mu:
mutant.append(random.choice(alphabet))
else:
mutant.append(s)
return "".join(mutant)
| 27.872881
| 88
| 0.645485
|
dcf388070c864a23e5da224a3aafce207877d63c
| 3,689
|
py
|
Python
|
examples/intermediate/coupled_cluster.py
|
utkarshdeorah/sympy
|
dcdf59bbc6b13ddbc329431adf72fcee294b6389
|
[
"BSD-3-Clause"
] | 8,323
|
2015-01-02T15:51:43.000Z
|
2022-03-31T13:13:19.000Z
|
examples/intermediate/coupled_cluster.py
|
utkarshdeorah/sympy
|
dcdf59bbc6b13ddbc329431adf72fcee294b6389
|
[
"BSD-3-Clause"
] | 15,102
|
2015-01-01T01:33:17.000Z
|
2022-03-31T22:53:13.000Z
|
examples/intermediate/coupled_cluster.py
|
utkarshdeorah/sympy
|
dcdf59bbc6b13ddbc329431adf72fcee294b6389
|
[
"BSD-3-Clause"
] | 4,490
|
2015-01-01T17:48:07.000Z
|
2022-03-31T17:24:05.000Z
|
#!/usr/bin/env python
"""
Calculates the Coupled-Cluster energy- and amplitude equations
See 'An Introduction to Coupled Cluster Theory' by
T. Daniel Crawford and Henry F. Schaefer III.
Other Resource : http://vergil.chemistry.gatech.edu/notes/sahan-cc-2010.pdf
"""
from sympy.physics.secondquant import (AntiSymmetricTensor, wicks,
F, Fd, NO, evaluate_deltas, substitute_dummies, Commutator,
simplify_index_permutations, PermutationOperator)
from sympy import (
symbols, Rational, latex, Dummy
)
pretty_dummies_dict = {
'above': 'cdefgh',
'below': 'klmno',
'general': 'pqrstu'
}
def get_CC_operators():
"""
Returns a tuple (T1,T2) of unique operators.
"""
i = symbols('i', below_fermi=True, cls=Dummy)
a = symbols('a', above_fermi=True, cls=Dummy)
t_ai = AntiSymmetricTensor('t', (a,), (i,))
ai = NO(Fd(a)*F(i))
i, j = symbols('i,j', below_fermi=True, cls=Dummy)
a, b = symbols('a,b', above_fermi=True, cls=Dummy)
t_abij = AntiSymmetricTensor('t', (a, b), (i, j))
abji = NO(Fd(a)*Fd(b)*F(j)*F(i))
T1 = t_ai*ai
T2 = Rational(1, 4)*t_abij*abji
return (T1, T2)
def main():
print()
print("Calculates the Coupled-Cluster energy- and amplitude equations")
print("See 'An Introduction to Coupled Cluster Theory' by")
print("T. Daniel Crawford and Henry F. Schaefer III")
print("Reference to a Lecture Series: http://vergil.chemistry.gatech.edu/notes/sahan-cc-2010.pdf")
print()
# setup hamiltonian
p, q, r, s = symbols('p,q,r,s', cls=Dummy)
f = AntiSymmetricTensor('f', (p,), (q,))
pr = NO(Fd(p)*F(q))
v = AntiSymmetricTensor('v', (p, q), (r, s))
pqsr = NO(Fd(p)*Fd(q)*F(s)*F(r))
H = f*pr + Rational(1, 4)*v*pqsr
print("Using the hamiltonian:", latex(H))
print("Calculating 4 nested commutators")
C = Commutator
T1, T2 = get_CC_operators()
T = T1 + T2
print("commutator 1...")
comm1 = wicks(C(H, T))
comm1 = evaluate_deltas(comm1)
comm1 = substitute_dummies(comm1)
T1, T2 = get_CC_operators()
T = T1 + T2
print("commutator 2...")
comm2 = wicks(C(comm1, T))
comm2 = evaluate_deltas(comm2)
comm2 = substitute_dummies(comm2)
T1, T2 = get_CC_operators()
T = T1 + T2
print("commutator 3...")
comm3 = wicks(C(comm2, T))
comm3 = evaluate_deltas(comm3)
comm3 = substitute_dummies(comm3)
T1, T2 = get_CC_operators()
T = T1 + T2
print("commutator 4...")
comm4 = wicks(C(comm3, T))
comm4 = evaluate_deltas(comm4)
comm4 = substitute_dummies(comm4)
print("construct Hausdorff expansion...")
eq = H + comm1 + comm2/2 + comm3/6 + comm4/24
eq = eq.expand()
eq = evaluate_deltas(eq)
eq = substitute_dummies(eq, new_indices=True,
pretty_indices=pretty_dummies_dict)
print("*********************")
print()
print("extracting CC equations from full Hbar")
i, j, k, l = symbols('i,j,k,l', below_fermi=True)
a, b, c, d = symbols('a,b,c,d', above_fermi=True)
print()
print("CC Energy:")
print(latex(wicks(eq, simplify_dummies=True,
keep_only_fully_contracted=True)))
print()
print("CC T1:")
eqT1 = wicks(NO(Fd(i)*F(a))*eq, simplify_kronecker_deltas=True, keep_only_fully_contracted=True)
eqT1 = substitute_dummies(eqT1)
print(latex(eqT1))
print()
print("CC T2:")
eqT2 = wicks(NO(Fd(i)*Fd(j)*F(b)*F(a))*eq, simplify_dummies=True, keep_only_fully_contracted=True, simplify_kronecker_deltas=True)
P = PermutationOperator
eqT2 = simplify_index_permutations(eqT2, [P(a, b), P(i, j)])
print(latex(eqT2))
if __name__ == "__main__":
main()
| 30.237705
| 134
| 0.632963
|
69153a15a6e5ad93aae97d1df0e6b39a894fefec
| 1,175
|
py
|
Python
|
pyvisdk/do/vm_failed_to_reset_event.py
|
Infinidat/pyvisdk
|
f2f4e5f50da16f659ccc1d84b6a00f397fa997f8
|
[
"MIT"
] | null | null | null |
pyvisdk/do/vm_failed_to_reset_event.py
|
Infinidat/pyvisdk
|
f2f4e5f50da16f659ccc1d84b6a00f397fa997f8
|
[
"MIT"
] | null | null | null |
pyvisdk/do/vm_failed_to_reset_event.py
|
Infinidat/pyvisdk
|
f2f4e5f50da16f659ccc1d84b6a00f397fa997f8
|
[
"MIT"
] | null | null | null |
import logging
from pyvisdk.exceptions import InvalidArgumentError
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
def VmFailedToResetEvent(vim, *args, **kwargs):
'''This event records a failure to reset a virtual machine.'''
obj = vim.client.factory.create('{urn:vim25}VmFailedToResetEvent')
# do some validation checking...
if (len(args) + len(kwargs)) < 6:
raise IndexError('Expected at least 7 arguments got: %d' % len(args))
required = [ 'reason', 'template', 'chainId', 'createdTime', 'key', 'userName' ]
optional = [ 'changeTag', 'computeResource', 'datacenter', 'ds', 'dvs',
'fullFormattedMessage', 'host', 'net', 'vm', 'dynamicProperty', 'dynamicType' ]
for name, arg in zip(required+optional, args):
setattr(obj, name, arg)
for name, value in kwargs.items():
if name in required + optional:
setattr(obj, name, value)
else:
raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional)))
return obj
| 34.558824
| 124
| 0.605957
|
dceb3ca8137f8d011eae2afb506037cb8a6c601c
| 2,286
|
py
|
Python
|
figures/Fig11_het_het.py
|
gstonge/influential-groups
|
d828adeada32ad354f1af864407c66066db20144
|
[
"MIT"
] | null | null | null |
figures/Fig11_het_het.py
|
gstonge/influential-groups
|
d828adeada32ad354f1af864407c66066db20144
|
[
"MIT"
] | null | null | null |
figures/Fig11_het_het.py
|
gstonge/influential-groups
|
d828adeada32ad354f1af864407c66066db20144
|
[
"MIT"
] | null | null | null |
import pickle
import numpy as np
import matplotlib.pyplot as plt
from scipy.special import loggamma
from gcm import *
from matplotlib.colors import LinearSegmentedColormap
color_list = ["#c7e9b4","#7fcdbb","#41b6c4","#1d91c0","#225ea8", "#0c2c84"]
newcm = LinearSegmentedColormap.from_list('ColorMap',
list(reversed(color_list[:-1])))
blackcm = LinearSegmentedColormap.from_list('BlackColorMap',['#1a1a1a','#1a1a1a'])
#Poisson membership and group size
nmax = 20
mmax = 100
m = np.arange(mmax+1)
n = np.arange(nmax+1)
gm = np.zeros(mmax+1)
gm[2:] = (m[2:]*1.)**(-3.)
gm /= np.sum(gm)
pn = np.zeros(nmax+1)
pn[2:] = (n[2:]*1.)**(-3.)
pn /= np.sum(pn)
state_meta = get_state_meta(mmax, nmax, gm, pn)
#infection
trate = 1 #independent from this
beta = lambda n,i,trate,nu: trate*i**nu
nu_c = bistability_threshold_safe(beta, gm, pn, min_params=(10**(-14),1),
max_params=(1,8))
#parameters
nu_list = np.linspace(0.9,4.1,100)
epsilon_list = np.logspace(-5,-1,100)
#get limit zeta
zeta_lim = []
for nu in nu_list:
inf_mat = infection_matrix(beta, nmax, args=(trate,nu))
zeta_lim.append(zeta_limit(inf_mat,state_meta))
print(nu,zeta_lim[-1])
#get zeta arr
zeta_arr = np.zeros((len(epsilon_list),len(nu_list)))
zeta_arr[:] = np.nan
for i,epsilon in enumerate(epsilon_list):
for j,nu in enumerate(nu_list):
inf_mat = infection_matrix(beta, nmax, args=(trate,nu))
sm_S,fni_S = optimize_sm(epsilon,state_meta)
sm_F,fni_F,_ = optimize_fni(epsilon, inf_mat, state_meta)
Phi_F = objective_function(fni_F,inf_mat,state_meta)
Phi_S = objective_function(fni_S,inf_mat,state_meta)
if Phi_F > 0 and Phi_S > 0:
zeta_arr[i,j] = Phi_F/Phi_S
nu_arr, eps_arr = np.meshgrid(nu_list, epsilon_list)
plt.contourf(nu_arr, eps_arr, np.log10(zeta_arr), cmap=newcm, zorder=-1)
plt.yscale('log')
plt.colorbar()
plt.contour(nu_arr, eps_arr, np.log10(zeta_arr), cmap=blackcm, levels=[0.])
plt.show()
#save data
results = dict()
results['nu_arr'] = nu_arr
results['eps_arr'] = eps_arr
results['zeta_arr'] = zeta_arr
results['nu_list'] = nu_list
results['zeta_lim'] = zeta_lim
with open('./dat/Fig10_het_het.pk', 'wb') as filename:
pickle.dump(results,filename)
| 31.315068
| 82
| 0.680227
|
b93fd46247beb3860f0f09becc68001108efc70c
| 24,685
|
py
|
Python
|
mannequinchallenge/models/networks.py
|
plarr2020-team1/mannequinchallenge
|
4aff68aedad8619a2ec557f9162cc9692181318c
|
[
"Apache-2.0"
] | null | null | null |
mannequinchallenge/models/networks.py
|
plarr2020-team1/mannequinchallenge
|
4aff68aedad8619a2ec557f9162cc9692181318c
|
[
"Apache-2.0"
] | 1
|
2020-07-05T10:24:10.000Z
|
2020-07-06T10:30:17.000Z
|
mannequinchallenge/models/networks.py
|
plarr2020-team1/mannequinchallenge
|
4aff68aedad8619a2ec557f9162cc9692181318c
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
import torch.optim as optim
import torch.autograd as autograd
import numpy as np
import functools
###############################################################################
# Functions
###############################################################################
EPSILON = 1e-6
def gradient(input, do_normalize=False):
if input.dim() == 2:
D_ry = input[1:, :]
D_ly = input[:-1, :]
D_rx = input[:, 1:]
D_lx = input[:, :-1]
elif input.dim() == 3:
D_ry = input[:, 1:, :]
D_ly = input[:, :-1, :]
D_rx = input[:, :, 1:]
D_lx = input[:, :, :-1]
elif input.dim() == 4:
D_ry = input[:, :, 1:, :]
D_ly = input[:, :, :-1, :]
D_rx = input[:, :, :, 1:]
D_lx = input[:, :, :, :-1]
Dx = D_rx - D_lx
Dy = D_ry - D_ly
if do_normalize:
Dx = Dx / (D_rx + D_lx + EPSILON)
Dy = Dy / (D_ry + D_ly + EPSILON)
return Dx, Dy
def init_weights(net, init_type='normal', gain=0.02):
def init_func(m):
classname = m.__class__.__name__
if hasattr(m, 'weight') and (classname.find('Conv') != -1 or
classname.find('Linear') != -1):
if init_type == 'normal':
nn.init.normal_(m.weight.data, 0.0, gain)
elif init_type == 'xavier':
nn.init.xavier_normal_(m.weight.data, gain=gain)
elif init_type == 'kaiming':
nn.init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif init_type == 'orthogonal':
nn.init.orthogonal_(m.weight.data, gain=gain)
else:
raise NotImplementedError(
'initialization method [%s] is not implemented' % init_type)
if hasattr(m, 'bias') and m.bias is not None:
nn.init.constant_(m.bias.data, 0.0)
elif classname.find('BatchNorm2d') != -1:
nn.init.normal_(m.weight.data, 1.0, gain)
nn.init.constant_(m.bias.data, 0.0)
print('initialize network with %s' % init_type)
net.apply(init_func)
def get_scheduler(optimizer, opt):
if opt.lr_policy == 'lambda':
def lambda_rule(epoch):
lr_l = 1.0 - max(0, epoch + 1 + opt.epoch_count -
opt.niter) / float(opt.niter_decay + 1)
return lr_l
scheduler = optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
elif opt.lr_policy == 'step':
scheduler = optim.lr_scheduler.StepLR(
optimizer, step_size=opt.lr_decay_epoch, gamma=0.5)
elif opt.lr_policy == 'plateau':
scheduler = optim.lr_scheduler.ReduceLROnPlateau(
optimizer, mode='min', factor=0.2, threshold=0.01, patience=5)
else:
return NotImplementedError('learning rate policy [%s] is not implemented',
opt.lr_policy)
return scheduler
def get_norm_layer(norm_type='instance'):
if norm_type == 'batch':
norm_layer = functools.partial(nn.BatchNorm2d, affine=True)
elif norm_type == 'instance':
norm_layer = functools.partial(
nn.InstanceNorm2d, affine=False, track_running_stats=True)
elif norm_type == 'none':
norm_layer = None
else:
raise NotImplementedError('normalization layer [%s] is not found' %
norm_type)
return norm_layer
def init_net(net, init_type='normal', init_gain=0.02, gpu_ids=[]):
if len(gpu_ids) > 0:
assert (torch.cuda.is_available())
net.to(gpu_ids[0])
net = torch.nn.DataParallel(net, gpu_ids)
init_weights(net, init_type, gain=init_gain)
return net
def print_network(net_):
num_params = 0
for param in net_.parameters():
num_params += param.numel()
print(net_)
print('Total number of parameters: %d' % num_params)
##############################################################################
# Classes
##############################################################################
class LaplacianLayer(nn.Module):
def __init__(self):
super(LaplacianLayer, self).__init__()
w_nom = torch.FloatTensor([[0, -1, 0], [-1, 4, -1],
[0, -1, 0]]).view(1, 1, 3, 3)
w_den = torch.FloatTensor([[0, 1, 0], [1, 4, 1],
[0, 1, 0]]).view(1, 1, 3, 3)
if torch.cuda.is_available():
w_nom = w_nom.cuda()
w_den = w_den.cuda()
self.register_buffer('w_nom', w_nom)
self.register_buffer('w_den', w_den)
def forward(self, input, do_normalize=True):
assert (input.dim() == 2 or input.dim() == 3 or input.dim() == 4)
input_size = input.size()
if input.dim() == 4:
x = input.view(input_size[0] * input_size[1], 1, input_size[2],
input_size[3])
elif input.dim() == 3:
x = input.unsqueeze(1)
else:
x = input.unsqueeze(0).unsqueeze(0)
x_nom = torch.nn.functional.conv2d(
input=x, weight=autograd.Variable(self.w_nom), stride=1, padding=0)
if do_normalize:
x_den = torch.nn.functional.conv2d(
input=x, weight=autograd.Variable(self.w_den), stride=1, padding=0)
# x_den = x.std() + 1e-5
x = (x_nom.abs() / x_den)
else:
x = x_nom.abs()
if input.dim() == 4:
return x.view(input_size[0], input_size[1], input_size[2] - 2,
input_size[3] - 2)
elif input.dim() == 3:
return x.squeeze(1)
elif input.dim() == 2:
return x.squeeze(0).squeeze(0)
class JointLoss(nn.Module):
def __init__(self, opt):
super(JointLoss, self).__init__()
self.opt = opt
self.w_si_mse = 1.0
self.w_l1_rel = 1.0
self.w_confidence = 1.0
self.w_grad = 0.75
self.w_sm = 0.1
self.w_sm1 = 0.075
self.w_sm2 = 0.1
self.w_normal = 0.5
self.num_scales = 5
self.total_loss = None
self.laplacian_func = LaplacianLayer()
def LaplacianSmoothnessLoss(self, depth, img):
img_lap = self.laplacian_func(img, do_normalize=False)
depth_lap = self.laplacian_func(depth, do_normalize=False)
x = (-img_lap.mean(1)).exp() * (depth_lap)
return x.mean()
def compute_image_aware_2nd_smoothness_cost(self, depth, img):
depth_grad_x, depth_grad_y = gradient(depth, do_normalize=False)
depth_grad_x2, depth_grad_xy = gradient(
depth_grad_x, do_normalize=False)
depth_grad_yx, depth_grad_y2 = gradient(
depth_grad_y, do_normalize=False)
return depth_grad_x2.abs().mean() \
+ depth_grad_xy.abs().mean() + depth_grad_yx.abs().mean() + \
depth_grad_y2.abs().mean()
def compute_image_aware_1st_smoothness_cost(self, depth, img):
depth_grad_x, depth_grad_y = gradient(depth, do_normalize=False)
img_grad_x, img_grad_y = gradient(img, do_normalize=False)
if img.dim() == 3:
weight_x = torch.exp(-img_grad_x.abs().mean(0))
weight_y = torch.exp(-img_grad_y.abs().mean(0))
cost = ((depth_grad_x.abs() * weight_x)[:-1, :] +
(depth_grad_y.abs() * weight_y)[:, :-1]).mean()
else:
weight_x = torch.exp(-img_grad_x.abs().mean(1))
weight_y = torch.exp(-img_grad_y.abs().mean(1))
cost = ((depth_grad_x.abs() * weight_x)[:, :-1, :] +
(depth_grad_y.abs() * weight_y)[:, :, :-1]).mean()
return cost
def SecondOrderLoss(self, log_pred_d, mask, log_gt):
N = torch.sum(mask) + 1e-6
v_pred_lap = log_pred_d[:-2, :] - 2 * \
log_pred_d[1:-1, :] + log_pred_d[2:, :]
v_gt_lap = log_gt[:-2, :] - 2 * log_gt[1:-1, :] + log_gt[2:, :]
v_diff = torch.abs(v_pred_lap - v_gt_lap)
v_mask = torch.mul(torch.mul(mask[:-2, :], mask[2:, :]), mask[1:-1, :])
v_lap_term = torch.mul(v_diff, v_mask)
h_pred_lap = log_pred_d[:, :-2] - 2 * \
log_pred_d[:, 1:-1] + log_pred_d[:, 2:]
h_gt_lap = log_gt[:, :-2] - 2 * log_gt[:, 1:-1] + log_gt[:, 2:]
h_diff = torch.abs(h_pred_lap - h_gt_lap)
h_mask = torch.mul(torch.mul(mask[:, :-2], mask[:, 2:]), mask[:, 1:-1])
h_lap_term = torch.mul(h_diff, h_mask)
second_order_term = torch.sum(v_lap_term) + torch.sum(h_lap_term)
second_order_term = second_order_term / N
return second_order_term
def GradientLoss(self, log_prediction_d, mask, log_gt):
log_d_diff = log_prediction_d - log_gt
v_gradient = torch.abs(log_d_diff[:, :-2, :] - log_d_diff[:, 2:, :])
v_mask = torch.mul(mask[:, :-2, :], mask[:, 2:, :])
v_gradient = torch.mul(v_gradient, v_mask)
h_gradient = torch.abs(log_d_diff[:, :, :-2] - log_d_diff[:, :, 2:])
h_mask = torch.mul(mask[:, :, :-2], mask[:, :, 2:])
h_gradient = torch.mul(h_gradient, h_mask)
N = torch.sum(h_mask) + torch.sum(v_mask) + EPSILON
gradient_loss = torch.sum(h_gradient) + torch.sum(v_gradient)
gradient_loss = gradient_loss / N
return gradient_loss
def DeMonGradientLoss(self, prediction_d, mask, gt_d):
v_mask = torch.mul(mask[:, :-2, :], mask[:, 2:, :])
v_grad_pred = (prediction_d[:, :-2, :] - prediction_d[:, 2:, :]) / (
prediction_d[:, :-2, :] + prediction_d[:, 2:, :] + EPSILON)
v_grad_gt = (gt_d[:, :-2, :] - gt_d[:, 2:, :]) / (
gt_d[:, :-2, :] + gt_d[:, 2:, :] + EPSILON)
v_grad_term = v_mask * torch.abs(v_grad_pred - v_grad_gt)
h_mask = torch.mul(mask[:, :, :-2], mask[:, :, 2:])
h_grad_pred = (prediction_d[:, :, :-2] - prediction_d[:, :, 2:]) / (
prediction_d[:, :, :-2] + prediction_d[:, :, 2:] + EPSILON)
h_grad_gt = (gt_d[:, :, :-2] - gt_d[:, :, 2:]) / (
gt_d[:, :, :-2] + gt_d[:, :, 2:] + EPSILON)
h_grad_term = h_mask * torch.abs(h_grad_pred - h_grad_gt)
N = torch.sum(h_mask) + torch.sum(v_mask) + EPSILON
gradient_loss = torch.sum(v_grad_term) + torch.sum(h_grad_term)
gradient_loss = gradient_loss / N
return gradient_loss
def Data_Loss(self, log_prediction_d, mask, log_gt):
N = torch.sum(mask) + EPSILON
log_d_diff = log_prediction_d - log_gt
log_d_diff = torch.mul(log_d_diff, mask)
s1 = torch.sum(torch.pow(log_d_diff, 2)) / N
s2 = (torch.sum(log_d_diff) * torch.sum(log_d_diff)) / (N * N)
data_loss = s1 - s2
return data_loss
def Confidence_Loss(self, pred_confidence, mask, pred_d, gt_d):
# using least square to find scaling factor
N = torch.sum(mask) + EPSILON
N = N.item()
if N > 0.5:
scale_factor = torch.median(
gt_d.data[mask.data > 0.1] /
(pred_d.data[mask.data > 0.1] + EPSILON)).item()
pred_d_aligned = pred_d * scale_factor
error = torch.abs(pred_d_aligned.data -
gt_d.data) / (gt_d.data + EPSILON)
error = torch.exp(-error * 2.0)
error_var = autograd.Variable(error, requires_grad=False)
u_loss = mask * torch.abs(pred_confidence - error_var)
confidence_term = torch.sum(u_loss) / N
else:
confidence_term = 0.0
return confidence_term
def Normal_Loss(self, pred_d, gt_d, mask, normalized_p_3d, p_3d_gt):
p_3d_pred = normalized_p_3d * pred_d.unsqueeze(1).repeat(1, 2, 1, 1)
x_mask = mask[:, 1:-1, :-2] * mask[:, 1:-1, 2:]
y_mask = mask[:, :-2, 1:-1] * mask[:, 2:, 1:-1]
final_mask = y_mask * x_mask
grad_x_pred = (pred_d[:, 1:-1, :-2] - pred_d[:, 1:-1, 2:]) / (
p_3d_pred[:, 0, 1:-1, :-2] - p_3d_pred[:, 0, 1:-1, 2:] + EPSILON)
grad_x_gt = (gt_d[:, 1:-1, :-2] - gt_d[:, 1:-1, 2:]) / (
p_3d_gt[:, 0, 1:-1, :-2] - p_3d_gt[:, 0, 1:-1, 2:] + EPSILON)
grad_y_pred = (pred_d[:, :-2, 1:-1] - pred_d[:, 2:, 1:-1]) / (
p_3d_pred[:, 1, :-2, 1:-1] - p_3d_pred[:, 1, 2:, 1:-1] + EPSILON)
grad_y_gt = (gt_d[:, :-2, 1:-1] - gt_d[:, 2:, 1:-1]) / (
p_3d_gt[:, 1, :-2, 1:-1] - p_3d_gt[:, 1, 2:, 1:-1] + EPSILON)
norm_pred = torch.sqrt(grad_x_pred * grad_x_pred +
grad_y_pred * grad_y_pred + 1.0) + EPSILON
norm_gt = torch.sqrt(grad_x_gt * grad_x_gt + grad_y_gt * grad_y_gt +
1.0) + EPSILON
dot_product = grad_x_gt * grad_x_pred + grad_y_gt * grad_y_pred + 1.0
dot_product = dot_product * final_mask
N = torch.sum(final_mask) + EPSILON
normal_term = dot_product / (norm_pred * norm_gt)
normal_term = 1.0 - torch.sum(normal_term) / N
return normal_term
def Weighted_Data_Loss(self, log_prediction_d, w_mask, log_gt):
log_d_diff = log_prediction_d - log_gt
wx_2 = torch.sum(w_mask * torch.pow(log_d_diff, 2))
wx = torch.sum(w_mask * log_d_diff)
w_sum = torch.sum(w_mask)
total_term = w_sum * wx_2 - torch.pow(wx, 2)
N = w_sum * w_sum + EPSILON
return total_term / N
def L1_inv_loss(self, pred_d_aligned, mask, depth_gt):
l1_inv_error = torch.abs(1. / (depth_gt + EPSILON) - 1. /
(EPSILON + pred_d_aligned))
l1_inv_error = l1_inv_error * mask
num_valid_pixels = torch.sum(mask) + EPSILON
return torch.sum(l1_inv_error) / num_valid_pixels
def L1RelLoss(self, pred_d_aligned, mask, depth_gt):
l1_rel_error = torch.abs(depth_gt - pred_d_aligned)
l1_rel_error = l1_rel_error * mask
l1_rel_error = l1_rel_error / \
torch.abs(depth_gt + pred_d_aligned + 1e-8)
num_valid_pixels = torch.sum(mask) + EPSILON
return torch.sum(l1_rel_error) / num_valid_pixels
def L1_rel_loss(self, pred_d_aligned, mask, depth_gt):
l1_rel_error = torch.abs(depth_gt - pred_d_aligned)
l1_rel_error = l1_rel_error * mask
l1_rel_error = l1_rel_error / torch.abs(depth_gt + 1e-8)
num_valid_pixels = torch.sum(mask) + EPSILON
return torch.sum(l1_rel_error) / num_valid_pixels
def compute_si_rmse(self, pred_log_d, targets):
gt_mask = targets['gt_mask']
log_d_gt = torch.log(targets['depth_gt'])
env_mask = targets['env_mask']
if torch.cuda.is_available():
gt_mask = gt_mask.cuda()
log_d_gt = log_d_gt.cuda()
env_mask = env_mask.cuda()
human_gt_mask = (1.0 - env_mask) * gt_mask
env_gt_mask = env_mask * gt_mask
si_rmse_full = 0.0
si_rmse_human = 0.0
si_rmse_env = 0.0
si_rmse_inter = 0.0
si_rmse_intra = 0.0
# compute full error
for i in range(0, gt_mask.size(0)):
mse_full = self.Data_Loss(pred_log_d[i, :, :], gt_mask[i, :, :],
log_d_gt[i, :, :])
mse_env = self.Data_Loss(pred_log_d[i, :, :], env_gt_mask[i, :, :],
log_d_gt[i, :, :])
mse_intra = self.Data_Loss(pred_log_d[i, :, :], human_gt_mask[i, :, :],
log_d_gt[i, :, :])
# compute human error
n_full = torch.sum(gt_mask[i, :, :])
n_human = torch.sum(human_gt_mask[i, :, :])
n_env = torch.sum(env_gt_mask[i, :, :])
log_diff = pred_log_d[i, :, :] - log_d_gt[i, :, :]
log_diff_mask = log_diff * gt_mask[i, :, :]
# full human error
sum_sq_log_diff = torch.sum(torch.pow(log_diff_mask, 2))
sum_log_diff = torch.sum(log_diff_mask)
per_pixel_error = n_full * torch.pow(
log_diff, 2) + sum_sq_log_diff - 2 * sum_log_diff * log_diff
per_pixel_error = per_pixel_error * human_gt_mask[i, :, :]
mse_human = torch.sum(per_pixel_error) / \
(n_human * n_full + EPSILON)
# inter class mse error
log_diff_env_mask = log_diff * env_gt_mask[i, :, :]
sum_sq_log_env_diff = torch.sum(torch.pow(log_diff_env_mask, 2))
sum_log_env_diff = torch.sum(log_diff_env_mask)
inter_error = n_env * torch.pow(
log_diff, 2) + sum_sq_log_env_diff - 2 * sum_log_env_diff * log_diff
inter_error = inter_error * human_gt_mask[i, :, :]
mse_inter = torch.sum(inter_error) / (n_human * n_env + EPSILON)
si_rmse_full += torch.sqrt(2.0 * mse_full)
si_rmse_human += torch.sqrt(mse_human)
si_rmse_env += torch.sqrt(2.0 * mse_env)
si_rmse_intra += torch.sqrt(2.0 * mse_intra)
si_rmse_inter += torch.sqrt(mse_inter)
return si_rmse_full, si_rmse_human, si_rmse_env, si_rmse_intra, si_rmse_inter
def compute_l1_rel_error(self, pred_d, targets):
gt_mask = targets['gt_mask']
d_gt = targets['depth_gt']
rel_full = 0.
for i in range(0, gt_mask.size(0)):
gt_d_np = d_gt[i, :, :].cpu().numpy()
pred_d_np = pred_d[i, :, :].cpu().numpy()
gt_mask_np = gt_mask[i, :, :].cpu().numpy()
scale_factor = np.linalg.lstsq(
np.expand_dims(pred_d_np[gt_mask_np > 1e-8], axis=-1),
gt_d_np[gt_mask_np > 1e-8])
scale_factor = scale_factor[0][0]
pred_d_aligned_np = pred_d_np * scale_factor
total_full_rel = np.sum(gt_mask_np * np.abs(gt_d_np - pred_d_aligned_np) /
(gt_d_np + EPSILON))
rel_full += total_full_rel / (np.sum(gt_mask_np) + EPSILON)
return rel_full
def compute_rmse_error(self, pred_d, targets):
gt_mask = targets['gt_mask']
d_gt = targets['depth_gt']
rmse_full = 0.
for i in range(0, gt_mask.size(0)):
gt_d_np = d_gt[i, :, :].cpu().numpy()
pred_d_np = pred_d[i, :, :].cpu().numpy()
gt_mask_np = gt_mask[i, :, :].cpu().numpy()
scale_factor = np.linalg.lstsq(
np.expand_dims(pred_d_np[gt_mask_np > 1e-8], axis=-1),
gt_d_np[gt_mask_np > 1e-8])
scale_factor = scale_factor[0][0]
pred_d_aligned_np = pred_d_np * scale_factor
total_full_rmse = np.sum(gt_mask_np *
np.square(gt_d_np - pred_d_aligned_np))
rmse_full += np.sqrt(total_full_rmse /
(np.sum(gt_mask_np) + EPSILON))
return rmse_full
def Data_Human_Loss(self, pred_log_d, gt_mask, human_gt_mask, log_d_gt):
n_full = torch.sum(gt_mask)
n_human = torch.sum(human_gt_mask)
log_diff = pred_log_d - log_d_gt
log_diff_mask = log_diff * gt_mask
sum_sq_log_diff = torch.sum(torch.pow(log_diff_mask, 2))
sum_log_diff = torch.sum(log_diff_mask)
inter_error = n_full * torch.pow(
log_diff, 2) + sum_sq_log_diff - 2 * sum_log_diff * log_diff
inter_error = inter_error * human_gt_mask
mse_human = torch.sum(inter_error) / (n_human * n_full + EPSILON)
mse_human = mse_human / 2.0
return mse_human
def __call__(self, input_images, log_pred_d_0, pred_confidence, targets):
log_pred_d_1 = log_pred_d_0[:, ::2, ::2]
log_pred_d_2 = log_pred_d_1[:, ::2, ::2]
log_pred_d_3 = log_pred_d_2[:, ::2, ::2]
log_pred_d_4 = log_pred_d_3[:, ::2, ::2]
input_0 = input_images
input_1 = input_0[:, :, ::2, ::2]
input_2 = input_1[:, :, ::2, ::2]
input_3 = input_2[:, :, ::2, ::2]
input_4 = input_3[:, :, ::2, ::2]
d_gt_0 = autograd.Variable(targets['depth_gt'].cuda(), requires_grad=False) if torch.cuda.is_available() else \
autograd.Variable(targets['depth_gt'], requires_grad=False)
log_d_gt_0 = torch.log(d_gt_0)
log_d_gt_1 = log_d_gt_0[:, ::2, ::2]
log_d_gt_2 = log_d_gt_1[:, ::2, ::2]
log_d_gt_3 = log_d_gt_2[:, ::2, ::2]
log_d_gt_4 = log_d_gt_3[:, ::2, ::2]
gt_mask = autograd.Variable(targets['gt_mask'].cuda(), requires_grad=False) if torch.cuda.is_available() else \
autograd.Variable(targets['gt_mask'], requires_grad=False)
human_mask = 1.0 - autograd.Variable(targets['env_mask'].cuda(), requires_grad=False) if \
torch.cuda.is_available() else 1.0 - autograd.Variable(targets['env_mask'], requires_grad=False)
human_gt_mask = human_mask * gt_mask
mask_0 = gt_mask
mask_1 = mask_0[:, ::2, ::2]
mask_2 = mask_1[:, ::2, ::2]
mask_3 = mask_2[:, ::2, ::2]
mask_4 = mask_3[:, ::2, ::2]
data_term = 0.0
grad_term = 0.0
sm_term = 0.0
confidence_term = 0.0
num_samples = mask_0.size(0)
for i in range(0, num_samples):
if self.opt.human_data_term > 0.1:
data_term += (self.w_si_mse / num_samples * self.Data_Loss(
log_pred_d_0[i, :, :], mask_0[i, :, :], log_d_gt_0[i, :, :]))
data_term += (self.w_si_mse / num_samples * 0.5 * self.Data_Human_Loss(
log_pred_d_0[i, :, :], mask_0[i,
:, :], human_gt_mask[i, :, :],
log_d_gt_0[i, :, :]))
else:
data_term += (self.w_si_mse / num_samples * 1.5 * self.Data_Loss(
log_pred_d_0[i, :, :], mask_0[i, :, :], log_d_gt_0[i, :, :]))
grad_term += self.w_grad * self.GradientLoss(log_pred_d_0, mask_0,
log_d_gt_0)
grad_term += self.w_grad * self.GradientLoss(log_pred_d_1, mask_1,
log_d_gt_1)
grad_term += self.w_grad * self.GradientLoss(log_pred_d_2, mask_2,
log_d_gt_2)
grad_term += self.w_grad * self.GradientLoss(log_pred_d_3, mask_3,
log_d_gt_3)
grad_term += self.w_grad * self.GradientLoss(log_pred_d_4, mask_4,
log_d_gt_4)
sm_term += self.w_sm1 * self.compute_image_aware_1st_smoothness_cost(
log_pred_d_0, input_0)
sm_term += (self.w_sm1 * 0.5 * self.compute_image_aware_1st_smoothness_cost(
log_pred_d_1, input_1))
sm_term += (self.w_sm1 * 0.25 * self.compute_image_aware_1st_smoothness_cost(
log_pred_d_2, input_2))
sm_term += (self.w_sm1 * 0.125 * self.compute_image_aware_1st_smoothness_cost(
log_pred_d_3, input_3))
sm_term += (self.w_sm1 * 0.0625 * self.compute_image_aware_1st_smoothness_cost(
log_pred_d_4, input_4))
sm_term += self.w_sm2 * \
self.LaplacianSmoothnessLoss(log_pred_d_0, input_0)
sm_term += self.w_sm2 * 0.5 * self.LaplacianSmoothnessLoss(
log_pred_d_1, input_1)
sm_term += self.w_sm2 * 0.25 * self.LaplacianSmoothnessLoss(
log_pred_d_2, input_2)
sm_term += self.w_sm2 * 0.125 * self.LaplacianSmoothnessLoss(
log_pred_d_3, input_3)
sm_term += self.w_sm2 * 0.0625 * self.LaplacianSmoothnessLoss(
log_pred_d_4, input_4)
print('data_term %f' % data_term.item())
print('grad_term %f' % grad_term.item())
print('sm_term %f' % sm_term.item())
total_loss = data_term + grad_term + sm_term + confidence_term
self.total_loss = total_loss
return total_loss.item()
def get_loss_var(self):
return self.total_loss
| 38.99684
| 119
| 0.553535
|
3246e9ee31f52ff280714f115025e7186b786ee1
| 68,346
|
py
|
Python
|
python/pyspark/sql/types.py
|
sslowikatpalantir/spark
|
f9dddebf019da89d8611e11e1f12bc8864c17419
|
[
"Apache-2.0"
] | null | null | null |
python/pyspark/sql/types.py
|
sslowikatpalantir/spark
|
f9dddebf019da89d8611e11e1f12bc8864c17419
|
[
"Apache-2.0"
] | null | null | null |
python/pyspark/sql/types.py
|
sslowikatpalantir/spark
|
f9dddebf019da89d8611e11e1f12bc8864c17419
|
[
"Apache-2.0"
] | null | null | null |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import decimal
import time
import datetime
import calendar
import json
import re
import base64
from array import array
import ctypes
if sys.version >= "3":
long = int
basestring = unicode = str
from py4j.protocol import register_input_converter
from py4j.java_gateway import JavaClass
from pyspark import SparkContext
from pyspark.serializers import CloudPickleSerializer
__all__ = [
"DataType", "NullType", "StringType", "BinaryType", "BooleanType", "DateType",
"TimestampType", "DecimalType", "DoubleType", "FloatType", "ByteType", "IntegerType",
"LongType", "ShortType", "ArrayType", "MapType", "StructField", "StructType"]
class DataType(object):
"""Base class for data types."""
def __repr__(self):
return self.__class__.__name__
def __hash__(self):
return hash(str(self))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not self.__eq__(other)
@classmethod
def typeName(cls):
return cls.__name__[:-4].lower()
def simpleString(self):
return self.typeName()
def jsonValue(self):
return self.typeName()
def json(self):
return json.dumps(self.jsonValue(),
separators=(',', ':'),
sort_keys=True)
def needConversion(self):
"""
Does this type need to conversion between Python object and internal SQL object.
This is used to avoid the unnecessary conversion for ArrayType/MapType/StructType.
"""
return False
def toInternal(self, obj):
"""
Converts a Python object into an internal SQL object.
"""
return obj
def fromInternal(self, obj):
"""
Converts an internal SQL object into a native Python object.
"""
return obj
# This singleton pattern does not work with pickle, you will get
# another object after pickle and unpickle
class DataTypeSingleton(type):
"""Metaclass for DataType"""
_instances = {}
def __call__(cls):
if cls not in cls._instances:
cls._instances[cls] = super(DataTypeSingleton, cls).__call__()
return cls._instances[cls]
class NullType(DataType):
"""Null type.
The data type representing None, used for the types that cannot be inferred.
"""
__metaclass__ = DataTypeSingleton
class AtomicType(DataType):
"""An internal type used to represent everything that is not
null, UDTs, arrays, structs, and maps."""
class NumericType(AtomicType):
"""Numeric data types.
"""
class IntegralType(NumericType):
"""Integral data types.
"""
__metaclass__ = DataTypeSingleton
class FractionalType(NumericType):
"""Fractional data types.
"""
class StringType(AtomicType):
"""String data type.
"""
__metaclass__ = DataTypeSingleton
class BinaryType(AtomicType):
"""Binary (byte array) data type.
"""
__metaclass__ = DataTypeSingleton
class BooleanType(AtomicType):
"""Boolean data type.
"""
__metaclass__ = DataTypeSingleton
class DateType(AtomicType):
"""Date (datetime.date) data type.
"""
__metaclass__ = DataTypeSingleton
EPOCH_ORDINAL = datetime.datetime(1970, 1, 1).toordinal()
def needConversion(self):
return True
def toInternal(self, d):
if d is not None:
return d.toordinal() - self.EPOCH_ORDINAL
def fromInternal(self, v):
if v is not None:
return datetime.date.fromordinal(v + self.EPOCH_ORDINAL)
class TimestampType(AtomicType):
"""Timestamp (datetime.datetime) data type.
"""
__metaclass__ = DataTypeSingleton
def needConversion(self):
return True
def toInternal(self, dt):
if dt is not None:
seconds = (calendar.timegm(dt.utctimetuple()) if dt.tzinfo
else time.mktime(dt.timetuple()))
return int(seconds) * 1000000 + dt.microsecond
def fromInternal(self, ts):
if ts is not None:
# using int to avoid precision loss in float
return datetime.datetime.fromtimestamp(ts // 1000000).replace(microsecond=ts % 1000000)
class DecimalType(FractionalType):
"""Decimal (decimal.Decimal) data type.
The DecimalType must have fixed precision (the maximum total number of digits)
and scale (the number of digits on the right of dot). For example, (5, 2) can
support the value from [-999.99 to 999.99].
The precision can be up to 38, the scale must be less or equal to precision.
When create a DecimalType, the default precision and scale is (10, 0). When infer
schema from decimal.Decimal objects, it will be DecimalType(38, 18).
:param precision: the maximum total number of digits (default: 10)
:param scale: the number of digits on right side of dot. (default: 0)
"""
def __init__(self, precision=10, scale=0):
self.precision = precision
self.scale = scale
self.hasPrecisionInfo = True # this is public API
def simpleString(self):
return "decimal(%d,%d)" % (self.precision, self.scale)
def jsonValue(self):
return "decimal(%d,%d)" % (self.precision, self.scale)
def __repr__(self):
return "DecimalType(%d,%d)" % (self.precision, self.scale)
class DoubleType(FractionalType):
"""Double data type, representing double precision floats.
"""
__metaclass__ = DataTypeSingleton
class FloatType(FractionalType):
"""Float data type, representing single precision floats.
"""
__metaclass__ = DataTypeSingleton
class ByteType(IntegralType):
"""Byte data type, i.e. a signed integer in a single byte.
"""
def simpleString(self):
return 'tinyint'
class IntegerType(IntegralType):
"""Int data type, i.e. a signed 32-bit integer.
"""
def simpleString(self):
return 'int'
class LongType(IntegralType):
"""Long data type, i.e. a signed 64-bit integer.
If the values are beyond the range of [-9223372036854775808, 9223372036854775807],
please use :class:`DecimalType`.
"""
def simpleString(self):
return 'bigint'
class ShortType(IntegralType):
"""Short data type, i.e. a signed 16-bit integer.
"""
def simpleString(self):
return 'smallint'
class ArrayType(DataType):
"""Array data type.
:param elementType: :class:`DataType` of each element in the array.
:param containsNull: boolean, whether the array can contain null (None) values.
"""
def __init__(self, elementType, containsNull=True):
"""
>>> ArrayType(StringType()) == ArrayType(StringType(), True)
True
>>> ArrayType(StringType(), False) == ArrayType(StringType())
False
"""
assert isinstance(elementType, DataType),\
"elementType %s should be an instance of %s" % (elementType, DataType)
self.elementType = elementType
self.containsNull = containsNull
def simpleString(self):
return 'array<%s>' % self.elementType.simpleString()
def __repr__(self):
return "ArrayType(%s,%s)" % (self.elementType,
str(self.containsNull).lower())
def jsonValue(self):
return {"type": self.typeName(),
"elementType": self.elementType.jsonValue(),
"containsNull": self.containsNull}
@classmethod
def fromJson(cls, json):
return ArrayType(_parse_datatype_json_value(json["elementType"]),
json["containsNull"])
def needConversion(self):
return self.elementType.needConversion()
def toInternal(self, obj):
if not self.needConversion():
return obj
return obj and [self.elementType.toInternal(v) for v in obj]
def fromInternal(self, obj):
if not self.needConversion():
return obj
return obj and [self.elementType.fromInternal(v) for v in obj]
class MapType(DataType):
"""Map data type.
:param keyType: :class:`DataType` of the keys in the map.
:param valueType: :class:`DataType` of the values in the map.
:param valueContainsNull: indicates whether values can contain null (None) values.
Keys in a map data type are not allowed to be null (None).
"""
def __init__(self, keyType, valueType, valueContainsNull=True):
"""
>>> (MapType(StringType(), IntegerType())
... == MapType(StringType(), IntegerType(), True))
True
>>> (MapType(StringType(), IntegerType(), False)
... == MapType(StringType(), FloatType()))
False
"""
assert isinstance(keyType, DataType),\
"keyType %s should be an instance of %s" % (keyType, DataType)
assert isinstance(valueType, DataType),\
"valueType %s should be an instance of %s" % (valueType, DataType)
self.keyType = keyType
self.valueType = valueType
self.valueContainsNull = valueContainsNull
def simpleString(self):
return 'map<%s,%s>' % (self.keyType.simpleString(), self.valueType.simpleString())
def __repr__(self):
return "MapType(%s,%s,%s)" % (self.keyType, self.valueType,
str(self.valueContainsNull).lower())
def jsonValue(self):
return {"type": self.typeName(),
"keyType": self.keyType.jsonValue(),
"valueType": self.valueType.jsonValue(),
"valueContainsNull": self.valueContainsNull}
@classmethod
def fromJson(cls, json):
return MapType(_parse_datatype_json_value(json["keyType"]),
_parse_datatype_json_value(json["valueType"]),
json["valueContainsNull"])
def needConversion(self):
return self.keyType.needConversion() or self.valueType.needConversion()
def toInternal(self, obj):
if not self.needConversion():
return obj
return obj and dict((self.keyType.toInternal(k), self.valueType.toInternal(v))
for k, v in obj.items())
def fromInternal(self, obj):
if not self.needConversion():
return obj
return obj and dict((self.keyType.fromInternal(k), self.valueType.fromInternal(v))
for k, v in obj.items())
class StructField(DataType):
"""A field in :class:`StructType`.
:param name: string, name of the field.
:param dataType: :class:`DataType` of the field.
:param nullable: boolean, whether the field can be null (None) or not.
:param metadata: a dict from string to simple type that can be toInternald to JSON automatically
"""
def __init__(self, name, dataType, nullable=True, metadata=None):
"""
>>> (StructField("f1", StringType(), True)
... == StructField("f1", StringType(), True))
True
>>> (StructField("f1", StringType(), True)
... == StructField("f2", StringType(), True))
False
"""
assert isinstance(dataType, DataType),\
"dataType %s should be an instance of %s" % (dataType, DataType)
assert isinstance(name, basestring), "field name %s should be string" % (name)
if not isinstance(name, str):
name = name.encode('utf-8')
self.name = name
self.dataType = dataType
self.nullable = nullable
self.metadata = metadata or {}
def simpleString(self):
return '%s:%s' % (self.name, self.dataType.simpleString())
def __repr__(self):
return "StructField(%s,%s,%s)" % (self.name, self.dataType,
str(self.nullable).lower())
def jsonValue(self):
return {"name": self.name,
"type": self.dataType.jsonValue(),
"nullable": self.nullable,
"metadata": self.metadata}
@classmethod
def fromJson(cls, json):
return StructField(json["name"],
_parse_datatype_json_value(json["type"]),
json["nullable"],
json["metadata"])
def needConversion(self):
return self.dataType.needConversion()
def toInternal(self, obj):
return self.dataType.toInternal(obj)
def fromInternal(self, obj):
return self.dataType.fromInternal(obj)
def typeName(self):
raise TypeError(
"StructField does not have typeName. "
"Use typeName on its type explicitly instead.")
class StructType(DataType):
"""Struct type, consisting of a list of :class:`StructField`.
This is the data type representing a :class:`Row`.
Iterating a :class:`StructType` will iterate its :class:`StructField`\\s.
A contained :class:`StructField` can be accessed by name or position.
>>> struct1 = StructType([StructField("f1", StringType(), True)])
>>> struct1["f1"]
StructField(f1,StringType,true)
>>> struct1[0]
StructField(f1,StringType,true)
"""
def __init__(self, fields=None):
"""
>>> struct1 = StructType([StructField("f1", StringType(), True)])
>>> struct2 = StructType([StructField("f1", StringType(), True)])
>>> struct1 == struct2
True
>>> struct1 = StructType([StructField("f1", StringType(), True)])
>>> struct2 = StructType([StructField("f1", StringType(), True),
... StructField("f2", IntegerType(), False)])
>>> struct1 == struct2
False
"""
if not fields:
self.fields = []
self.names = []
else:
self.fields = fields
self.names = [f.name for f in fields]
assert all(isinstance(f, StructField) for f in fields),\
"fields should be a list of StructField"
# Precalculated list of fields that need conversion with fromInternal/toInternal functions
self._needConversion = [f.needConversion() for f in self]
self._needSerializeAnyField = any(self._needConversion)
def add(self, field, data_type=None, nullable=True, metadata=None):
"""
Construct a StructType by adding new elements to it to define the schema. The method accepts
either:
a) A single parameter which is a StructField object.
b) Between 2 and 4 parameters as (name, data_type, nullable (optional),
metadata(optional). The data_type parameter may be either a String or a
DataType object.
>>> struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None)
>>> struct2 = StructType([StructField("f1", StringType(), True), \\
... StructField("f2", StringType(), True, None)])
>>> struct1 == struct2
True
>>> struct1 = StructType().add(StructField("f1", StringType(), True))
>>> struct2 = StructType([StructField("f1", StringType(), True)])
>>> struct1 == struct2
True
>>> struct1 = StructType().add("f1", "string", True)
>>> struct2 = StructType([StructField("f1", StringType(), True)])
>>> struct1 == struct2
True
:param field: Either the name of the field or a StructField object
:param data_type: If present, the DataType of the StructField to create
:param nullable: Whether the field to add should be nullable (default True)
:param metadata: Any additional metadata (default None)
:return: a new updated StructType
"""
if isinstance(field, StructField):
self.fields.append(field)
self.names.append(field.name)
else:
if isinstance(field, str) and data_type is None:
raise ValueError("Must specify DataType if passing name of struct_field to create.")
if isinstance(data_type, str):
data_type_f = _parse_datatype_json_value(data_type)
else:
data_type_f = data_type
self.fields.append(StructField(field, data_type_f, nullable, metadata))
self.names.append(field)
# Precalculated list of fields that need conversion with fromInternal/toInternal functions
self._needConversion = [f.needConversion() for f in self]
self._needSerializeAnyField = any(self._needConversion)
return self
def __iter__(self):
"""Iterate the fields"""
return iter(self.fields)
def __len__(self):
"""Return the number of fields."""
return len(self.fields)
def __getitem__(self, key):
"""Access fields by name or slice."""
if isinstance(key, str):
for field in self:
if field.name == key:
return field
raise KeyError('No StructField named {0}'.format(key))
elif isinstance(key, int):
try:
return self.fields[key]
except IndexError:
raise IndexError('StructType index out of range')
elif isinstance(key, slice):
return StructType(self.fields[key])
else:
raise TypeError('StructType keys should be strings, integers or slices')
def simpleString(self):
return 'struct<%s>' % (','.join(f.simpleString() for f in self))
def __repr__(self):
return ("StructType(List(%s))" %
",".join(str(field) for field in self))
def jsonValue(self):
return {"type": self.typeName(),
"fields": [f.jsonValue() for f in self]}
@classmethod
def fromJson(cls, json):
return StructType([StructField.fromJson(f) for f in json["fields"]])
def fieldNames(self):
"""
Returns all field names in a list.
>>> struct = StructType([StructField("f1", StringType(), True)])
>>> struct.fieldNames()
['f1']
"""
return list(self.names)
def needConversion(self):
# We need convert Row()/namedtuple into tuple()
return True
def toInternal(self, obj):
if obj is None:
return
if self._needSerializeAnyField:
# Only calling toInternal function for fields that need conversion
if isinstance(obj, dict):
return tuple(f.toInternal(obj.get(n)) if c else obj.get(n)
for n, f, c in zip(self.names, self.fields, self._needConversion))
elif isinstance(obj, Row) and getattr(obj, "__from_dict__", False):
return tuple(f.toInternal(obj[n]) if c else obj[n]
for n, f, c in zip(self.names, self.fields, self._needConversion))
elif isinstance(obj, (tuple, list)):
return tuple(f.toInternal(v) if c else v
for f, v, c in zip(self.fields, obj, self._needConversion))
elif hasattr(obj, "__dict__"):
d = obj.__dict__
return tuple(f.toInternal(d.get(n)) if c else d.get(n)
for n, f, c in zip(self.names, self.fields, self._needConversion))
else:
raise ValueError("Unexpected tuple %r with StructType" % obj)
else:
if isinstance(obj, dict):
return tuple(obj.get(n) for n in self.names)
elif isinstance(obj, Row) and getattr(obj, "__from_dict__", False):
return tuple(obj[n] for n in self.names)
elif isinstance(obj, (list, tuple)):
return tuple(obj)
elif hasattr(obj, "__dict__"):
d = obj.__dict__
return tuple(d.get(n) for n in self.names)
else:
raise ValueError("Unexpected tuple %r with StructType" % obj)
def fromInternal(self, obj):
if obj is None:
return
if isinstance(obj, Row):
# it's already converted by pickler
return obj
if self._needSerializeAnyField:
# Only calling fromInternal function for fields that need conversion
values = [f.fromInternal(v) if c else v
for f, v, c in zip(self.fields, obj, self._needConversion)]
else:
values = obj
return _create_row(self.names, values)
class UserDefinedType(DataType):
"""User-defined type (UDT).
.. note:: WARN: Spark Internal Use Only
"""
@classmethod
def typeName(cls):
return cls.__name__.lower()
@classmethod
def sqlType(cls):
"""
Underlying SQL storage type for this UDT.
"""
raise NotImplementedError("UDT must implement sqlType().")
@classmethod
def module(cls):
"""
The Python module of the UDT.
"""
raise NotImplementedError("UDT must implement module().")
@classmethod
def scalaUDT(cls):
"""
The class name of the paired Scala UDT (could be '', if there
is no corresponding one).
"""
return ''
def needConversion(self):
return True
@classmethod
def _cachedSqlType(cls):
"""
Cache the sqlType() into class, because it's heavy used in `toInternal`.
"""
if not hasattr(cls, "_cached_sql_type"):
cls._cached_sql_type = cls.sqlType()
return cls._cached_sql_type
def toInternal(self, obj):
if obj is not None:
return self._cachedSqlType().toInternal(self.serialize(obj))
def fromInternal(self, obj):
v = self._cachedSqlType().fromInternal(obj)
if v is not None:
return self.deserialize(v)
def serialize(self, obj):
"""
Converts the a user-type object into a SQL datum.
"""
raise NotImplementedError("UDT must implement toInternal().")
def deserialize(self, datum):
"""
Converts a SQL datum into a user-type object.
"""
raise NotImplementedError("UDT must implement fromInternal().")
def simpleString(self):
return 'udt'
def json(self):
return json.dumps(self.jsonValue(), separators=(',', ':'), sort_keys=True)
def jsonValue(self):
if self.scalaUDT():
assert self.module() != '__main__', 'UDT in __main__ cannot work with ScalaUDT'
schema = {
"type": "udt",
"class": self.scalaUDT(),
"pyClass": "%s.%s" % (self.module(), type(self).__name__),
"sqlType": self.sqlType().jsonValue()
}
else:
ser = CloudPickleSerializer()
b = ser.dumps(type(self))
schema = {
"type": "udt",
"pyClass": "%s.%s" % (self.module(), type(self).__name__),
"serializedClass": base64.b64encode(b).decode('utf8'),
"sqlType": self.sqlType().jsonValue()
}
return schema
@classmethod
def fromJson(cls, json):
pyUDT = str(json["pyClass"]) # convert unicode to str
split = pyUDT.rfind(".")
pyModule = pyUDT[:split]
pyClass = pyUDT[split+1:]
m = __import__(pyModule, globals(), locals(), [pyClass])
if not hasattr(m, pyClass):
s = base64.b64decode(json['serializedClass'].encode('utf-8'))
UDT = CloudPickleSerializer().loads(s)
else:
UDT = getattr(m, pyClass)
return UDT()
def __eq__(self, other):
return type(self) == type(other)
_atomic_types = [StringType, BinaryType, BooleanType, DecimalType, FloatType, DoubleType,
ByteType, ShortType, IntegerType, LongType, DateType, TimestampType, NullType]
_all_atomic_types = dict((t.typeName(), t) for t in _atomic_types)
_all_complex_types = dict((v.typeName(), v)
for v in [ArrayType, MapType, StructType])
_FIXED_DECIMAL = re.compile(r"decimal\(\s*(\d+)\s*,\s*(-?\d+)\s*\)")
def _parse_datatype_string(s):
"""
Parses the given data type string to a :class:`DataType`. The data type string format equals
to :class:`DataType.simpleString`, except that top level struct type can omit
the ``struct<>`` and atomic types use ``typeName()`` as their format, e.g. use ``byte`` instead
of ``tinyint`` for :class:`ByteType`. We can also use ``int`` as a short name
for :class:`IntegerType`. Since Spark 2.3, this also supports a schema in a DDL-formatted
string and case-insensitive strings.
>>> _parse_datatype_string("int ")
IntegerType
>>> _parse_datatype_string("INT ")
IntegerType
>>> _parse_datatype_string("a: byte, b: decimal( 16 , 8 ) ")
StructType(List(StructField(a,ByteType,true),StructField(b,DecimalType(16,8),true)))
>>> _parse_datatype_string("a DOUBLE, b STRING")
StructType(List(StructField(a,DoubleType,true),StructField(b,StringType,true)))
>>> _parse_datatype_string("a: array< short>")
StructType(List(StructField(a,ArrayType(ShortType,true),true)))
>>> _parse_datatype_string(" map<string , string > ")
MapType(StringType,StringType,true)
>>> # Error cases
>>> _parse_datatype_string("blabla") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ParseException:...
>>> _parse_datatype_string("a: int,") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ParseException:...
>>> _parse_datatype_string("array<int") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ParseException:...
>>> _parse_datatype_string("map<int, boolean>>") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ParseException:...
"""
sc = SparkContext._active_spark_context
def from_ddl_schema(type_str):
return _parse_datatype_json_string(
sc._jvm.org.apache.spark.sql.types.StructType.fromDDL(type_str).json())
def from_ddl_datatype(type_str):
return _parse_datatype_json_string(
sc._jvm.org.apache.spark.sql.api.python.PythonSQLUtils.parseDataType(type_str).json())
try:
# DDL format, "fieldname datatype, fieldname datatype".
return from_ddl_schema(s)
except Exception as e:
try:
# For backwards compatibility, "integer", "struct<fieldname: datatype>" and etc.
return from_ddl_datatype(s)
except:
try:
# For backwards compatibility, "fieldname: datatype, fieldname: datatype" case.
return from_ddl_datatype("struct<%s>" % s.strip())
except:
raise e
def _parse_datatype_json_string(json_string):
"""Parses the given data type JSON string.
>>> import pickle
>>> def check_datatype(datatype):
... pickled = pickle.loads(pickle.dumps(datatype))
... assert datatype == pickled
... scala_datatype = spark._jsparkSession.parseDataType(datatype.json())
... python_datatype = _parse_datatype_json_string(scala_datatype.json())
... assert datatype == python_datatype
>>> for cls in _all_atomic_types.values():
... check_datatype(cls())
>>> # Simple ArrayType.
>>> simple_arraytype = ArrayType(StringType(), True)
>>> check_datatype(simple_arraytype)
>>> # Simple MapType.
>>> simple_maptype = MapType(StringType(), LongType())
>>> check_datatype(simple_maptype)
>>> # Simple StructType.
>>> simple_structtype = StructType([
... StructField("a", DecimalType(), False),
... StructField("b", BooleanType(), True),
... StructField("c", LongType(), True),
... StructField("d", BinaryType(), False)])
>>> check_datatype(simple_structtype)
>>> # Complex StructType.
>>> complex_structtype = StructType([
... StructField("simpleArray", simple_arraytype, True),
... StructField("simpleMap", simple_maptype, True),
... StructField("simpleStruct", simple_structtype, True),
... StructField("boolean", BooleanType(), False),
... StructField("withMeta", DoubleType(), False, {"name": "age"})])
>>> check_datatype(complex_structtype)
>>> # Complex ArrayType.
>>> complex_arraytype = ArrayType(complex_structtype, True)
>>> check_datatype(complex_arraytype)
>>> # Complex MapType.
>>> complex_maptype = MapType(complex_structtype,
... complex_arraytype, False)
>>> check_datatype(complex_maptype)
>>> # Decimal with negative scale.
>>> check_datatype(DecimalType(1,-1))
"""
return _parse_datatype_json_value(json.loads(json_string))
def _parse_datatype_json_value(json_value):
if not isinstance(json_value, dict):
if json_value in _all_atomic_types.keys():
return _all_atomic_types[json_value]()
elif json_value == 'decimal':
return DecimalType()
elif _FIXED_DECIMAL.match(json_value):
m = _FIXED_DECIMAL.match(json_value)
return DecimalType(int(m.group(1)), int(m.group(2)))
else:
raise ValueError("Could not parse datatype: %s" % json_value)
else:
tpe = json_value["type"]
if tpe in _all_complex_types:
return _all_complex_types[tpe].fromJson(json_value)
elif tpe == 'udt':
return UserDefinedType.fromJson(json_value)
else:
raise ValueError("not supported type: %s" % tpe)
# Mapping Python types to Spark SQL DataType
_type_mappings = {
type(None): NullType,
bool: BooleanType,
int: LongType,
float: DoubleType,
str: StringType,
bytearray: BinaryType,
decimal.Decimal: DecimalType,
datetime.date: DateType,
datetime.datetime: TimestampType,
datetime.time: TimestampType,
}
if sys.version < "3":
_type_mappings.update({
unicode: StringType,
long: LongType,
})
# Mapping Python array types to Spark SQL DataType
# We should be careful here. The size of these types in python depends on C
# implementation. We need to make sure that this conversion does not lose any
# precision. Also, JVM only support signed types, when converting unsigned types,
# keep in mind that it required 1 more bit when stored as singed types.
#
# Reference for C integer size, see:
# ISO/IEC 9899:201x specification, chapter 5.2.4.2.1 Sizes of integer types <limits.h>.
# Reference for python array typecode, see:
# https://docs.python.org/2/library/array.html
# https://docs.python.org/3.6/library/array.html
# Reference for JVM's supported integral types:
# http://docs.oracle.com/javase/specs/jvms/se8/html/jvms-2.html#jvms-2.3.1
_array_signed_int_typecode_ctype_mappings = {
'b': ctypes.c_byte,
'h': ctypes.c_short,
'i': ctypes.c_int,
'l': ctypes.c_long,
}
_array_unsigned_int_typecode_ctype_mappings = {
'B': ctypes.c_ubyte,
'H': ctypes.c_ushort,
'I': ctypes.c_uint,
'L': ctypes.c_ulong
}
def _int_size_to_type(size):
"""
Return the Catalyst datatype from the size of integers.
"""
if size <= 8:
return ByteType
if size <= 16:
return ShortType
if size <= 32:
return IntegerType
if size <= 64:
return LongType
# The list of all supported array typecodes is stored here
_array_type_mappings = {
# Warning: Actual properties for float and double in C is not specified in C.
# On almost every system supported by both python and JVM, they are IEEE 754
# single-precision binary floating-point format and IEEE 754 double-precision
# binary floating-point format. And we do assume the same thing here for now.
'f': FloatType,
'd': DoubleType
}
# compute array typecode mappings for signed integer types
for _typecode in _array_signed_int_typecode_ctype_mappings.keys():
size = ctypes.sizeof(_array_signed_int_typecode_ctype_mappings[_typecode]) * 8
dt = _int_size_to_type(size)
if dt is not None:
_array_type_mappings[_typecode] = dt
# compute array typecode mappings for unsigned integer types
for _typecode in _array_unsigned_int_typecode_ctype_mappings.keys():
# JVM does not have unsigned types, so use signed types that is at least 1
# bit larger to store
size = ctypes.sizeof(_array_unsigned_int_typecode_ctype_mappings[_typecode]) * 8 + 1
dt = _int_size_to_type(size)
if dt is not None:
_array_type_mappings[_typecode] = dt
# Type code 'u' in Python's array is deprecated since version 3.3, and will be
# removed in version 4.0. See: https://docs.python.org/3/library/array.html
if sys.version_info[0] < 4:
_array_type_mappings['u'] = StringType
# Type code 'c' are only available at python 2
if sys.version_info[0] < 3:
_array_type_mappings['c'] = StringType
# SPARK-21465:
# In python2, array of 'L' happened to be mistakenly partially supported. To
# avoid breaking user's code, we should keep this partial support. Below is a
# dirty hacking to keep this partial support and make the unit test passes
import platform
if sys.version_info[0] < 3 and platform.python_implementation() != 'PyPy':
if 'L' not in _array_type_mappings.keys():
_array_type_mappings['L'] = LongType
_array_unsigned_int_typecode_ctype_mappings['L'] = ctypes.c_uint
def _infer_type(obj):
"""Infer the DataType from obj
"""
if obj is None:
return NullType()
if hasattr(obj, '__UDT__'):
return obj.__UDT__
dataType = _type_mappings.get(type(obj))
if dataType is DecimalType:
# the precision and scale of `obj` may be different from row to row.
return DecimalType(38, 18)
elif dataType is not None:
return dataType()
if isinstance(obj, dict):
for key, value in obj.items():
if key is not None and value is not None:
return MapType(_infer_type(key), _infer_type(value), True)
return MapType(NullType(), NullType(), True)
elif isinstance(obj, list):
for v in obj:
if v is not None:
return ArrayType(_infer_type(obj[0]), True)
return ArrayType(NullType(), True)
elif isinstance(obj, array):
if obj.typecode in _array_type_mappings:
return ArrayType(_array_type_mappings[obj.typecode](), False)
else:
raise TypeError("not supported type: array(%s)" % obj.typecode)
else:
try:
return _infer_schema(obj)
except TypeError:
raise TypeError("not supported type: %s" % type(obj))
def _infer_schema(row, names=None):
"""Infer the schema from dict/namedtuple/object"""
if isinstance(row, dict):
items = sorted(row.items())
elif isinstance(row, (tuple, list)):
if hasattr(row, "__fields__"): # Row
items = zip(row.__fields__, tuple(row))
elif hasattr(row, "_fields"): # namedtuple
items = zip(row._fields, tuple(row))
else:
if names is None:
names = ['_%d' % i for i in range(1, len(row) + 1)]
elif len(names) < len(row):
names.extend('_%d' % i for i in range(len(names) + 1, len(row) + 1))
items = zip(names, row)
elif hasattr(row, "__dict__"): # object
items = sorted(row.__dict__.items())
else:
raise TypeError("Can not infer schema for type: %s" % type(row))
fields = [StructField(k, _infer_type(v), True) for k, v in items]
return StructType(fields)
def _has_nulltype(dt):
""" Return whether there is NullType in `dt` or not """
if isinstance(dt, StructType):
return any(_has_nulltype(f.dataType) for f in dt.fields)
elif isinstance(dt, ArrayType):
return _has_nulltype((dt.elementType))
elif isinstance(dt, MapType):
return _has_nulltype(dt.keyType) or _has_nulltype(dt.valueType)
else:
return isinstance(dt, NullType)
def _merge_type(a, b, name=None):
if name is None:
new_msg = lambda msg: msg
new_name = lambda n: "field %s" % n
else:
new_msg = lambda msg: "%s: %s" % (name, msg)
new_name = lambda n: "field %s in %s" % (n, name)
if isinstance(a, NullType):
return b
elif isinstance(b, NullType):
return a
elif type(a) is not type(b):
# TODO: type cast (such as int -> long)
raise TypeError(new_msg("Can not merge type %s and %s" % (type(a), type(b))))
# same type
if isinstance(a, StructType):
nfs = dict((f.name, f.dataType) for f in b.fields)
fields = [StructField(f.name, _merge_type(f.dataType, nfs.get(f.name, NullType()),
name=new_name(f.name)))
for f in a.fields]
names = set([f.name for f in fields])
for n in nfs:
if n not in names:
fields.append(StructField(n, nfs[n]))
return StructType(fields)
elif isinstance(a, ArrayType):
return ArrayType(_merge_type(a.elementType, b.elementType,
name='element in array %s' % name), True)
elif isinstance(a, MapType):
return MapType(_merge_type(a.keyType, b.keyType, name='key of map %s' % name),
_merge_type(a.valueType, b.valueType, name='value of map %s' % name),
True)
else:
return a
def _need_converter(dataType):
if isinstance(dataType, StructType):
return True
elif isinstance(dataType, ArrayType):
return _need_converter(dataType.elementType)
elif isinstance(dataType, MapType):
return _need_converter(dataType.keyType) or _need_converter(dataType.valueType)
elif isinstance(dataType, NullType):
return True
else:
return False
def _create_converter(dataType):
"""Create a converter to drop the names of fields in obj """
if not _need_converter(dataType):
return lambda x: x
if isinstance(dataType, ArrayType):
conv = _create_converter(dataType.elementType)
return lambda row: [conv(v) for v in row]
elif isinstance(dataType, MapType):
kconv = _create_converter(dataType.keyType)
vconv = _create_converter(dataType.valueType)
return lambda row: dict((kconv(k), vconv(v)) for k, v in row.items())
elif isinstance(dataType, NullType):
return lambda x: None
elif not isinstance(dataType, StructType):
return lambda x: x
# dataType must be StructType
names = [f.name for f in dataType.fields]
converters = [_create_converter(f.dataType) for f in dataType.fields]
convert_fields = any(_need_converter(f.dataType) for f in dataType.fields)
def convert_struct(obj):
if obj is None:
return
if isinstance(obj, (tuple, list)):
if convert_fields:
return tuple(conv(v) for v, conv in zip(obj, converters))
else:
return tuple(obj)
if isinstance(obj, dict):
d = obj
elif hasattr(obj, "__dict__"): # object
d = obj.__dict__
else:
raise TypeError("Unexpected obj type: %s" % type(obj))
if convert_fields:
return tuple([conv(d.get(name)) for name, conv in zip(names, converters)])
else:
return tuple([d.get(name) for name in names])
return convert_struct
_acceptable_types = {
BooleanType: (bool,),
ByteType: (int, long),
ShortType: (int, long),
IntegerType: (int, long),
LongType: (int, long),
FloatType: (float,),
DoubleType: (float,),
DecimalType: (decimal.Decimal,),
StringType: (str, unicode),
BinaryType: (bytearray,),
DateType: (datetime.date, datetime.datetime),
TimestampType: (datetime.datetime,),
ArrayType: (list, tuple, array),
MapType: (dict,),
StructType: (tuple, list, dict),
}
def _make_type_verifier(dataType, nullable=True, name=None):
"""
Make a verifier that checks the type of obj against dataType and raises a TypeError if they do
not match.
This verifier also checks the value of obj against datatype and raises a ValueError if it's not
within the allowed range, e.g. using 128 as ByteType will overflow. Note that, Python float is
not checked, so it will become infinity when cast to Java float if it overflows.
>>> _make_type_verifier(StructType([]))(None)
>>> _make_type_verifier(StringType())("")
>>> _make_type_verifier(LongType())(0)
>>> _make_type_verifier(ArrayType(ShortType()))(list(range(3)))
>>> _make_type_verifier(ArrayType(StringType()))(set()) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError:...
>>> _make_type_verifier(MapType(StringType(), IntegerType()))({})
>>> _make_type_verifier(StructType([]))(())
>>> _make_type_verifier(StructType([]))([])
>>> _make_type_verifier(StructType([]))([1]) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError:...
>>> # Check if numeric values are within the allowed range.
>>> _make_type_verifier(ByteType())(12)
>>> _make_type_verifier(ByteType())(1234) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError:...
>>> _make_type_verifier(ByteType(), False)(None) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError:...
>>> _make_type_verifier(
... ArrayType(ShortType(), False))([1, None]) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError:...
>>> _make_type_verifier(MapType(StringType(), IntegerType()))({None: 1})
Traceback (most recent call last):
...
ValueError:...
>>> schema = StructType().add("a", IntegerType()).add("b", StringType(), False)
>>> _make_type_verifier(schema)((1, None)) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError:...
"""
if name is None:
new_msg = lambda msg: msg
new_name = lambda n: "field %s" % n
else:
new_msg = lambda msg: "%s: %s" % (name, msg)
new_name = lambda n: "field %s in %s" % (n, name)
def verify_nullability(obj):
if obj is None:
if nullable:
return True
else:
raise ValueError(new_msg("This field is not nullable, but got None"))
else:
return False
_type = type(dataType)
def assert_acceptable_types(obj):
assert _type in _acceptable_types, \
new_msg("unknown datatype: %s for object %r" % (dataType, obj))
def verify_acceptable_types(obj):
# subclass of them can not be fromInternal in JVM
if type(obj) not in _acceptable_types[_type]:
raise TypeError(new_msg("%s can not accept object %r in type %s"
% (dataType, obj, type(obj))))
if isinstance(dataType, StringType):
# StringType can work with any types
verify_value = lambda _: _
elif isinstance(dataType, UserDefinedType):
verifier = _make_type_verifier(dataType.sqlType(), name=name)
def verify_udf(obj):
if not (hasattr(obj, '__UDT__') and obj.__UDT__ == dataType):
raise ValueError(new_msg("%r is not an instance of type %r" % (obj, dataType)))
verifier(dataType.toInternal(obj))
verify_value = verify_udf
elif isinstance(dataType, ByteType):
def verify_byte(obj):
assert_acceptable_types(obj)
verify_acceptable_types(obj)
if obj < -128 or obj > 127:
raise ValueError(new_msg("object of ByteType out of range, got: %s" % obj))
verify_value = verify_byte
elif isinstance(dataType, ShortType):
def verify_short(obj):
assert_acceptable_types(obj)
verify_acceptable_types(obj)
if obj < -32768 or obj > 32767:
raise ValueError(new_msg("object of ShortType out of range, got: %s" % obj))
verify_value = verify_short
elif isinstance(dataType, IntegerType):
def verify_integer(obj):
assert_acceptable_types(obj)
verify_acceptable_types(obj)
if obj < -2147483648 or obj > 2147483647:
raise ValueError(
new_msg("object of IntegerType out of range, got: %s" % obj))
verify_value = verify_integer
elif isinstance(dataType, ArrayType):
element_verifier = _make_type_verifier(
dataType.elementType, dataType.containsNull, name="element in array %s" % name)
def verify_array(obj):
assert_acceptable_types(obj)
verify_acceptable_types(obj)
for i in obj:
element_verifier(i)
verify_value = verify_array
elif isinstance(dataType, MapType):
key_verifier = _make_type_verifier(dataType.keyType, False, name="key of map %s" % name)
value_verifier = _make_type_verifier(
dataType.valueType, dataType.valueContainsNull, name="value of map %s" % name)
def verify_map(obj):
assert_acceptable_types(obj)
verify_acceptable_types(obj)
for k, v in obj.items():
key_verifier(k)
value_verifier(v)
verify_value = verify_map
elif isinstance(dataType, StructType):
verifiers = []
for f in dataType.fields:
verifier = _make_type_verifier(f.dataType, f.nullable, name=new_name(f.name))
verifiers.append((f.name, verifier))
def verify_struct(obj):
assert_acceptable_types(obj)
if isinstance(obj, dict):
for f, verifier in verifiers:
verifier(obj.get(f))
elif isinstance(obj, Row) and getattr(obj, "__from_dict__", False):
# the order in obj could be different than dataType.fields
for f, verifier in verifiers:
verifier(obj[f])
elif isinstance(obj, (tuple, list)):
if len(obj) != len(verifiers):
raise ValueError(
new_msg("Length of object (%d) does not match with "
"length of fields (%d)" % (len(obj), len(verifiers))))
for v, (_, verifier) in zip(obj, verifiers):
verifier(v)
elif hasattr(obj, "__dict__"):
d = obj.__dict__
for f, verifier in verifiers:
verifier(d.get(f))
else:
raise TypeError(new_msg("StructType can not accept object %r in type %s"
% (obj, type(obj))))
verify_value = verify_struct
else:
def verify_default(obj):
assert_acceptable_types(obj)
verify_acceptable_types(obj)
verify_value = verify_default
def verify(obj):
if not verify_nullability(obj):
verify_value(obj)
return verify
# This is used to unpickle a Row from JVM
def _create_row_inbound_converter(dataType):
return lambda *a: dataType.fromInternal(a)
def _create_row(fields, values):
row = Row(*values)
row.__fields__ = fields
return row
class Row(tuple):
"""
A row in L{DataFrame}.
The fields in it can be accessed:
* like attributes (``row.key``)
* like dictionary values (``row[key]``)
``key in row`` will search through row keys.
Row can be used to create a row object by using named arguments,
the fields will be sorted by names. It is not allowed to omit
a named argument to represent the value is None or missing. This should be
explicitly set to None in this case.
>>> row = Row(name="Alice", age=11)
>>> row
Row(age=11, name='Alice')
>>> row['name'], row['age']
('Alice', 11)
>>> row.name, row.age
('Alice', 11)
>>> 'name' in row
True
>>> 'wrong_key' in row
False
Row also can be used to create another Row like class, then it
could be used to create Row objects, such as
>>> Person = Row("name", "age")
>>> Person
<Row(name, age)>
>>> 'name' in Person
True
>>> 'wrong_key' in Person
False
>>> Person("Alice", 11)
Row(name='Alice', age=11)
"""
def __new__(self, *args, **kwargs):
if args and kwargs:
raise ValueError("Can not use both args "
"and kwargs to create Row")
if kwargs:
# create row objects
names = sorted(kwargs.keys())
row = tuple.__new__(self, [kwargs[n] for n in names])
row.__fields__ = names
row.__from_dict__ = True
return row
else:
# create row class or objects
return tuple.__new__(self, args)
def asDict(self, recursive=False):
"""
Return as an dict
:param recursive: turns the nested Row as dict (default: False).
>>> Row(name="Alice", age=11).asDict() == {'name': 'Alice', 'age': 11}
True
>>> row = Row(key=1, value=Row(name='a', age=2))
>>> row.asDict() == {'key': 1, 'value': Row(age=2, name='a')}
True
>>> row.asDict(True) == {'key': 1, 'value': {'name': 'a', 'age': 2}}
True
"""
if not hasattr(self, "__fields__"):
raise TypeError("Cannot convert a Row class into dict")
if recursive:
def conv(obj):
if isinstance(obj, Row):
return obj.asDict(True)
elif isinstance(obj, list):
return [conv(o) for o in obj]
elif isinstance(obj, dict):
return dict((k, conv(v)) for k, v in obj.items())
else:
return obj
return dict(zip(self.__fields__, (conv(o) for o in self)))
else:
return dict(zip(self.__fields__, self))
def __contains__(self, item):
if hasattr(self, "__fields__"):
return item in self.__fields__
else:
return super(Row, self).__contains__(item)
# let object acts like class
def __call__(self, *args):
"""create new Row object"""
if len(args) > len(self):
raise ValueError("Can not create Row with fields %s, expected %d values "
"but got %s" % (self, len(self), args))
return _create_row(self, args)
def __getitem__(self, item):
if isinstance(item, (int, slice)):
return super(Row, self).__getitem__(item)
try:
# it will be slow when it has many fields,
# but this will not be used in normal cases
idx = self.__fields__.index(item)
return super(Row, self).__getitem__(idx)
except IndexError:
raise KeyError(item)
except ValueError:
raise ValueError(item)
def __getattr__(self, item):
if item.startswith("__"):
raise AttributeError(item)
try:
# it will be slow when it has many fields,
# but this will not be used in normal cases
idx = self.__fields__.index(item)
return self[idx]
except IndexError:
raise AttributeError(item)
except ValueError:
raise AttributeError(item)
def __setattr__(self, key, value):
if key != '__fields__' and key != "__from_dict__":
raise Exception("Row is read-only")
self.__dict__[key] = value
def __reduce__(self):
"""Returns a tuple so Python knows how to pickle Row."""
if hasattr(self, "__fields__"):
return (_create_row, (self.__fields__, tuple(self)))
else:
return tuple.__reduce__(self)
def __repr__(self):
"""Printable representation of Row used in Python REPL."""
if hasattr(self, "__fields__"):
return "Row(%s)" % ", ".join("%s=%r" % (k, v)
for k, v in zip(self.__fields__, tuple(self)))
else:
return "<Row(%s)>" % ", ".join(self)
class DateConverter(object):
def can_convert(self, obj):
return isinstance(obj, datetime.date)
def convert(self, obj, gateway_client):
Date = JavaClass("java.sql.Date", gateway_client)
return Date.valueOf(obj.strftime("%Y-%m-%d"))
class DatetimeConverter(object):
def can_convert(self, obj):
return isinstance(obj, datetime.datetime)
def convert(self, obj, gateway_client):
Timestamp = JavaClass("java.sql.Timestamp", gateway_client)
seconds = (calendar.timegm(obj.utctimetuple()) if obj.tzinfo
else time.mktime(obj.timetuple()))
t = Timestamp(int(seconds) * 1000)
t.setNanos(obj.microsecond * 1000)
return t
# datetime is a subclass of date, we should register DatetimeConverter first
register_input_converter(DatetimeConverter())
register_input_converter(DateConverter())
def to_arrow_type(dt):
""" Convert Spark data type to pyarrow type
"""
from distutils.version import LooseVersion
import pyarrow as pa
if type(dt) == BooleanType:
arrow_type = pa.bool_()
elif type(dt) == ByteType:
arrow_type = pa.int8()
elif type(dt) == ShortType:
arrow_type = pa.int16()
elif type(dt) == IntegerType:
arrow_type = pa.int32()
elif type(dt) == LongType:
arrow_type = pa.int64()
elif type(dt) == FloatType:
arrow_type = pa.float32()
elif type(dt) == DoubleType:
arrow_type = pa.float64()
elif type(dt) == DecimalType:
arrow_type = pa.decimal128(dt.precision, dt.scale)
elif type(dt) == StringType:
arrow_type = pa.string()
elif type(dt) == BinaryType:
# TODO: remove version check once minimum pyarrow version is 0.10.0
if LooseVersion(pa.__version__) < LooseVersion("0.10.0"):
raise TypeError("Unsupported type in conversion to Arrow: " + str(dt) +
"\nPlease install pyarrow >= 0.10.0 for BinaryType support.")
arrow_type = pa.binary()
elif type(dt) == DateType:
arrow_type = pa.date32()
elif type(dt) == TimestampType:
# Timestamps should be in UTC, JVM Arrow timestamps require a timezone to be read
arrow_type = pa.timestamp('us', tz='UTC')
elif type(dt) == ArrayType:
if type(dt.elementType) in [StructType, TimestampType]:
raise TypeError("Unsupported type in conversion to Arrow: " + str(dt))
arrow_type = pa.list_(to_arrow_type(dt.elementType))
elif type(dt) == StructType:
if any(type(field.dataType) == StructType for field in dt):
raise TypeError("Nested StructType not supported in conversion to Arrow")
fields = [pa.field(field.name, to_arrow_type(field.dataType), nullable=field.nullable)
for field in dt]
arrow_type = pa.struct(fields)
else:
raise TypeError("Unsupported type in conversion to Arrow: " + str(dt))
return arrow_type
def to_arrow_schema(schema):
""" Convert a schema from Spark to Arrow
"""
import pyarrow as pa
fields = [pa.field(field.name, to_arrow_type(field.dataType), nullable=field.nullable)
for field in schema]
return pa.schema(fields)
def from_arrow_type(at):
""" Convert pyarrow type to Spark data type.
"""
from distutils.version import LooseVersion
import pyarrow as pa
import pyarrow.types as types
if types.is_boolean(at):
spark_type = BooleanType()
elif types.is_int8(at):
spark_type = ByteType()
elif types.is_int16(at):
spark_type = ShortType()
elif types.is_int32(at):
spark_type = IntegerType()
elif types.is_int64(at):
spark_type = LongType()
elif types.is_float32(at):
spark_type = FloatType()
elif types.is_float64(at):
spark_type = DoubleType()
elif types.is_decimal(at):
spark_type = DecimalType(precision=at.precision, scale=at.scale)
elif types.is_string(at):
spark_type = StringType()
elif types.is_binary(at):
# TODO: remove version check once minimum pyarrow version is 0.10.0
if LooseVersion(pa.__version__) < LooseVersion("0.10.0"):
raise TypeError("Unsupported type in conversion from Arrow: " + str(at) +
"\nPlease install pyarrow >= 0.10.0 for BinaryType support.")
spark_type = BinaryType()
elif types.is_date32(at):
spark_type = DateType()
elif types.is_timestamp(at):
spark_type = TimestampType()
elif types.is_list(at):
if types.is_timestamp(at.value_type):
raise TypeError("Unsupported type in conversion from Arrow: " + str(at))
spark_type = ArrayType(from_arrow_type(at.value_type))
elif types.is_struct(at):
# TODO: remove version check once minimum pyarrow version is 0.10.0
if LooseVersion(pa.__version__) < LooseVersion("0.10.0"):
raise TypeError("Unsupported type in conversion from Arrow: " + str(at) +
"\nPlease install pyarrow >= 0.10.0 for StructType support.")
if any(types.is_struct(field.type) for field in at):
raise TypeError("Nested StructType not supported in conversion from Arrow: " + str(at))
return StructType(
[StructField(field.name, from_arrow_type(field.type), nullable=field.nullable)
for field in at])
else:
raise TypeError("Unsupported type in conversion from Arrow: " + str(at))
return spark_type
def from_arrow_schema(arrow_schema):
""" Convert schema from Arrow to Spark.
"""
return StructType(
[StructField(field.name, from_arrow_type(field.type), nullable=field.nullable)
for field in arrow_schema])
def _arrow_column_to_pandas(column, data_type):
""" Convert Arrow Column to pandas Series.
:param series: pyarrow.lib.Column
:param data_type: a Spark data type for the column
"""
import pandas as pd
import pyarrow as pa
from distutils.version import LooseVersion
# If the given column is a date type column, creates a series of datetime.date directly instead
# of creating datetime64[ns] as intermediate data to avoid overflow caused by datetime64[ns]
# type handling.
if LooseVersion(pa.__version__) < LooseVersion("0.11.0"):
if type(data_type) == DateType:
return pd.Series(column.to_pylist(), name=column.name)
else:
return column.to_pandas()
else:
# Since Arrow 0.11.0, support date_as_object to return datetime.date instead of
# np.datetime64.
return column.to_pandas(date_as_object=True)
def _arrow_table_to_pandas(table, schema):
""" Convert Arrow Table to pandas DataFrame.
Pandas DataFrame created from PyArrow uses datetime64[ns] for date type values, but we should
use datetime.date to match the behavior with when Arrow optimization is disabled.
:param table: pyarrow.lib.Table
:param schema: a Spark schema of the pyarrow.lib.Table
"""
import pandas as pd
import pyarrow as pa
from distutils.version import LooseVersion
# If the given table contains a date type column, use `_arrow_column_to_pandas` for pyarrow<0.11
# or use `date_as_object` option for pyarrow>=0.11 to avoid creating datetime64[ns] as
# intermediate data.
if LooseVersion(pa.__version__) < LooseVersion("0.11.0"):
if any(type(field.dataType) == DateType for field in schema):
return pd.concat([_arrow_column_to_pandas(column, field.dataType)
for column, field in zip(table.itercolumns(), schema)], axis=1)
else:
return table.to_pandas()
else:
return table.to_pandas(date_as_object=True)
def _get_local_timezone():
""" Get local timezone using pytz with environment variable, or dateutil.
If there is a 'TZ' environment variable, pass it to pandas to use pytz and use it as timezone
string, otherwise use the special word 'dateutil/:' which means that pandas uses dateutil and
it reads system configuration to know the system local timezone.
See also:
- https://github.com/pandas-dev/pandas/blob/0.19.x/pandas/tslib.pyx#L1753
- https://github.com/dateutil/dateutil/blob/2.6.1/dateutil/tz/tz.py#L1338
"""
import os
return os.environ.get('TZ', 'dateutil/:')
def _check_series_localize_timestamps(s, timezone):
"""
Convert timezone aware timestamps to timezone-naive in the specified timezone or local timezone.
If the input series is not a timestamp series, then the same series is returned. If the input
series is a timestamp series, then a converted series is returned.
:param s: pandas.Series
:param timezone: the timezone to convert. if None then use local timezone
:return pandas.Series that have been converted to tz-naive
"""
from pyspark.sql.utils import require_minimum_pandas_version
require_minimum_pandas_version()
from pandas.api.types import is_datetime64tz_dtype
tz = timezone or _get_local_timezone()
# TODO: handle nested timestamps, such as ArrayType(TimestampType())?
if is_datetime64tz_dtype(s.dtype):
return s.dt.tz_convert(tz).dt.tz_localize(None)
else:
return s
def _check_dataframe_localize_timestamps(pdf, timezone):
"""
Convert timezone aware timestamps to timezone-naive in the specified timezone or local timezone
:param pdf: pandas.DataFrame
:param timezone: the timezone to convert. if None then use local timezone
:return pandas.DataFrame where any timezone aware columns have been converted to tz-naive
"""
from pyspark.sql.utils import require_minimum_pandas_version
require_minimum_pandas_version()
for column, series in pdf.iteritems():
pdf[column] = _check_series_localize_timestamps(series, timezone)
return pdf
def _check_series_convert_timestamps_internal(s, timezone):
"""
Convert a tz-naive timestamp in the specified timezone or local timezone to UTC normalized for
Spark internal storage
:param s: a pandas.Series
:param timezone: the timezone to convert. if None then use local timezone
:return pandas.Series where if it is a timestamp, has been UTC normalized without a time zone
"""
from pyspark.sql.utils import require_minimum_pandas_version
require_minimum_pandas_version()
from pandas.api.types import is_datetime64_dtype, is_datetime64tz_dtype
# TODO: handle nested timestamps, such as ArrayType(TimestampType())?
if is_datetime64_dtype(s.dtype):
# When tz_localize a tz-naive timestamp, the result is ambiguous if the tz-naive
# timestamp is during the hour when the clock is adjusted backward during due to
# daylight saving time (dst).
# E.g., for America/New_York, the clock is adjusted backward on 2015-11-01 2:00 to
# 2015-11-01 1:00 from dst-time to standard time, and therefore, when tz_localize
# a tz-naive timestamp 2015-11-01 1:30 with America/New_York timezone, it can be either
# dst time (2015-01-01 1:30-0400) or standard time (2015-11-01 1:30-0500).
#
# Here we explicit choose to use standard time. This matches the default behavior of
# pytz.
#
# Here are some code to help understand this behavior:
# >>> import datetime
# >>> import pandas as pd
# >>> import pytz
# >>>
# >>> t = datetime.datetime(2015, 11, 1, 1, 30)
# >>> ts = pd.Series([t])
# >>> tz = pytz.timezone('America/New_York')
# >>>
# >>> ts.dt.tz_localize(tz, ambiguous=True)
# 0 2015-11-01 01:30:00-04:00
# dtype: datetime64[ns, America/New_York]
# >>>
# >>> ts.dt.tz_localize(tz, ambiguous=False)
# 0 2015-11-01 01:30:00-05:00
# dtype: datetime64[ns, America/New_York]
# >>>
# >>> str(tz.localize(t))
# '2015-11-01 01:30:00-05:00'
tz = timezone or _get_local_timezone()
return s.dt.tz_localize(tz, ambiguous=False).dt.tz_convert('UTC')
elif is_datetime64tz_dtype(s.dtype):
return s.dt.tz_convert('UTC')
else:
return s
def _check_series_convert_timestamps_localize(s, from_timezone, to_timezone):
"""
Convert timestamp to timezone-naive in the specified timezone or local timezone
:param s: a pandas.Series
:param from_timezone: the timezone to convert from. if None then use local timezone
:param to_timezone: the timezone to convert to. if None then use local timezone
:return pandas.Series where if it is a timestamp, has been converted to tz-naive
"""
from pyspark.sql.utils import require_minimum_pandas_version
require_minimum_pandas_version()
import pandas as pd
from pandas.api.types import is_datetime64tz_dtype, is_datetime64_dtype
from_tz = from_timezone or _get_local_timezone()
to_tz = to_timezone or _get_local_timezone()
# TODO: handle nested timestamps, such as ArrayType(TimestampType())?
if is_datetime64tz_dtype(s.dtype):
return s.dt.tz_convert(to_tz).dt.tz_localize(None)
elif is_datetime64_dtype(s.dtype) and from_tz != to_tz:
# `s.dt.tz_localize('tzlocal()')` doesn't work properly when including NaT.
return s.apply(
lambda ts: ts.tz_localize(from_tz, ambiguous=False).tz_convert(to_tz).tz_localize(None)
if ts is not pd.NaT else pd.NaT)
else:
return s
def _check_series_convert_timestamps_local_tz(s, timezone):
"""
Convert timestamp to timezone-naive in the specified timezone or local timezone
:param s: a pandas.Series
:param timezone: the timezone to convert to. if None then use local timezone
:return pandas.Series where if it is a timestamp, has been converted to tz-naive
"""
return _check_series_convert_timestamps_localize(s, None, timezone)
def _check_series_convert_timestamps_tz_local(s, timezone):
"""
Convert timestamp to timezone-naive in the specified timezone or local timezone
:param s: a pandas.Series
:param timezone: the timezone to convert from. if None then use local timezone
:return pandas.Series where if it is a timestamp, has been converted to tz-naive
"""
return _check_series_convert_timestamps_localize(s, timezone, None)
def _test():
import doctest
from pyspark.context import SparkContext
from pyspark.sql import SparkSession
globs = globals()
sc = SparkContext('local[4]', 'PythonTest')
globs['sc'] = sc
globs['spark'] = SparkSession.builder.getOrCreate()
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
globs['sc'].stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| 35.522869
| 100
| 0.624455
|
b6a3528da92797c15eba0de03edc47154d891b0c
| 15,275
|
py
|
Python
|
train_cifar.py
|
ekremerakin/ML-Reproducibility-Challenge-2021
|
005693a0c85c1022d3f475375e2c5de6e961e4ba
|
[
"MIT"
] | null | null | null |
train_cifar.py
|
ekremerakin/ML-Reproducibility-Challenge-2021
|
005693a0c85c1022d3f475375e2c5de6e961e4ba
|
[
"MIT"
] | null | null | null |
train_cifar.py
|
ekremerakin/ML-Reproducibility-Challenge-2021
|
005693a0c85c1022d3f475375e2c5de6e961e4ba
|
[
"MIT"
] | null | null | null |
from warnings import filterwarnings
filterwarnings("ignore")
import os
import random
import sys
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score
from sklearn.mixture import GaussianMixture
from torch.utils.data import DataLoader, Dataset
import dataloader_cifar as dataloader
from PreResNet import *
from preset_parser import *
if __name__ == "__main__":
args = parse_args("./presets.json")
logs = open(os.path.join(args.checkpoint_path, "saved", "metrics.log"), "a")
random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
# Training
def train(epoch, net, net2, optimizer, labeled_trainloader, unlabeled_trainloader):
net.train()
net2.eval() # fix one network and train the other
unlabeled_train_iter = iter(unlabeled_trainloader)
num_iter = (len(labeled_trainloader.dataset) // args.batch_size) + 1
for (
batch_idx,
(
inputs_x,
inputs_x2,
inputs_x3,
inputs_x4,
labels_x,
w_x,
),
) in enumerate(labeled_trainloader):
try:
inputs_u, inputs_u2, inputs_u3, inputs_u4 = unlabeled_train_iter.next()
except:
unlabeled_train_iter = iter(unlabeled_trainloader)
inputs_u, inputs_u2, inputs_u3, inputs_u4 = unlabeled_train_iter.next()
batch_size = inputs_x.size(0)
# Transform label to one-hot
labels_x = torch.zeros(batch_size, args.num_class).scatter_(
1, labels_x.view(-1, 1), 1
)
w_x = w_x.view(-1, 1).type(torch.FloatTensor)
inputs_x, inputs_x2, inputs_x3, inputs_x4, labels_x, w_x = (
inputs_x.cuda(),
inputs_x2.cuda(),
inputs_x3.cuda(),
inputs_x4.cuda(),
labels_x.cuda(),
w_x.cuda(),
)
inputs_u, inputs_u2, inputs_u3, inputs_u4 = (
inputs_u.cuda(),
inputs_u2.cuda(),
inputs_u3.cuda(),
inputs_u4.cuda(),
)
# inputs u/u2
with torch.no_grad():
# label co-guessing of unlabeled samples
outputs_u_1 = net(inputs_u3)
outputs_u_2 = net(inputs_u4)
outputs_u_3 = net2(inputs_u3)
outputs_u_4 = net2(inputs_u4)
pu = (
torch.softmax(outputs_u_1, dim=1)
+ torch.softmax(outputs_u_2, dim=1)
+ torch.softmax(outputs_u_3, dim=1)
+ torch.softmax(outputs_u_4, dim=1)
) / 4
ptu = pu ** (1 / args.T) # temparature sharpening
targets_u = ptu / ptu.sum(dim=1, keepdim=True) # normalize
targets_u = targets_u.detach()
# label refinement of labeled samples
outputs_x_1 = net(inputs_x3)
outputs_x_2 = net(inputs_x4)
px = (
torch.softmax(outputs_x_1, dim=1)
+ torch.softmax(outputs_x_2, dim=1)
) / 2
px = w_x * labels_x + (1 - w_x) * px
ptx = px ** (1 / args.T) # temparature sharpening
targets_x = ptx / ptx.sum(dim=1, keepdim=True) # normalize
targets_x = targets_x.detach()
# mixmatch
l = np.random.beta(args.alpha, args.alpha)
l = max(l, 1 - l)
all_inputs = torch.cat([inputs_x, inputs_x2, inputs_u, inputs_u2], dim=0)
all_targets = torch.cat([targets_x, targets_x, targets_u, targets_u], dim=0)
idx = torch.randperm(all_inputs.size(0))
input_a, input_b = all_inputs, all_inputs[idx]
target_a, target_b = all_targets, all_targets[idx]
mixed_input = l * input_a + (1 - l) * input_b
mixed_target = l * target_a + (1 - l) * target_b
logits = net(mixed_input)
logits_x = logits[: batch_size * 2]
logits_u = logits[batch_size * 2 :]
Lx, Lu, lamb = criterion(
logits_x,
mixed_target[: batch_size * 2],
logits_u,
mixed_target[batch_size * 2 :],
epoch + batch_idx / num_iter,
args.warm_up,
)
# regularization
prior = torch.ones(args.num_class) / args.num_class
prior = prior.cuda()
pred_mean = torch.softmax(logits, dim=1).mean(0)
penalty = torch.sum(prior * torch.log(prior / pred_mean))
loss = Lx + lamb * Lu + penalty
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
sys.stdout.write("\r")
sys.stdout.write(
"%s: %.1f-%s | Epoch [%3d/%3d], Iter[%3d/%3d]\t Labeled loss: %.2f, Unlabeled loss: %.2f"
% (
args.dataset,
args.r,
args.noise_mode,
epoch,
args.num_epochs - 1,
batch_idx + 1,
num_iter,
Lx.item(),
Lu.item(),
)
)
sys.stdout.flush()
def warmup(epoch, net, optimizer, dataloader):
net.train()
num_iter = (len(dataloader.dataset) // dataloader.batch_size) + 1
for batch_idx, (inputs, labels, path) in enumerate(dataloader):
inputs, labels = inputs.cuda(), labels.cuda()
optimizer.zero_grad()
outputs = net(inputs)
loss = CEloss(outputs, labels)
if (
args.noise_mode == "asym"
): # penalize confident prediction for asymmetric noise
penalty = conf_penalty(outputs)
L = loss + penalty
elif args.noise_mode == "sym":
L = loss
L.backward()
optimizer.step()
sys.stdout.write("\r")
sys.stdout.write(
"%s: %.1f-%s | Epoch [%3d/%3d] Iter[%3d/%3d]\t CE-loss: %.4f"
% (
args.dataset,
args.r,
args.noise_mode,
epoch,
args.num_epochs - 1,
batch_idx + 1,
num_iter,
loss.item(),
)
)
sys.stdout.flush()
def test(epoch, net1, net2, size_l1, size_u1, size_l2, size_u2):
global logs
net1.eval()
net2.eval()
all_targets = []
all_predicted = []
import time
print('test dataset size:', len(test_loader))
st = time.time()
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(test_loader):
inputs, targets = inputs.cuda(), targets.cuda()
outputs1 = net1(inputs)
outputs2 = net2(inputs)
outputs = outputs1 + outputs2
_, predicted = torch.max(outputs, 1)
all_targets += targets.tolist()
all_predicted += predicted.tolist()
print('time passed:', time.time()-st)
accuracy = accuracy_score(all_targets, all_predicted)
precision = precision_score(all_targets, all_predicted, average="weighted")
recall = recall_score(all_targets, all_predicted, average="weighted")
f1 = f1_score(all_targets, all_predicted, average="weighted")
results = "Test Epoch: %d, Accuracy: %.3f, Precision: %.3f, Recall: %.3f, F1: %.3f, L_1: %d, U_1: %d, L_2: %d, U_2: %d" % (
epoch,
accuracy * 100,
precision * 100,
recall * 100,
f1 * 100,
size_l1,
size_u1,
size_l2,
size_u2,
)
print("\n" + results + "\n")
logs.write(results + "\n")
logs.flush()
return accuracy
def eval_train(model, all_loss):
model.eval()
losses = torch.zeros(len(eval_loader.dataset))
with torch.no_grad():
for batch_idx, (inputs, targets, index) in enumerate(eval_loader):
inputs, targets = inputs.cuda(), targets.cuda()
outputs = model(inputs)
loss = CE(outputs, targets)
for b in range(inputs.size(0)):
losses[index[b]] = loss[b]
losses = (losses - losses.min()) / (losses.max() - losses.min())
all_loss.append(losses)
if (
args.average_loss > 0
): # average loss over last 5 epochs to improve convergence stability
history = torch.stack(all_loss)
input_loss = history[-args.average_loss :].mean(0)
input_loss = input_loss.reshape(-1, 1)
else:
input_loss = losses.reshape(-1, 1)
# fit a two-component GMM to the loss
gmm = GaussianMixture(n_components=2, max_iter=10, tol=1e-2, reg_covar=5e-4)
gmm.fit(input_loss)
prob = gmm.predict_proba(input_loss)
prob = prob[:, gmm.means_.argmin()]
return prob, all_loss
def linear_rampup(current, warm_up, rampup_length=16):
current = np.clip((current - warm_up) / rampup_length, 0.0, 1.0)
return args.lambda_u * float(current)
class SemiLoss(object):
def __call__(
self, outputs_x_1, targets_x, outputs_u, targets_u, epoch, warm_up
):
probs_u = torch.softmax(outputs_u, dim=1)
Lx = -torch.mean(
torch.sum(F.log_softmax(outputs_x_1, dim=1) * targets_x, dim=1)
)
Lu = torch.mean((probs_u - targets_u) ** 2)
return Lx, Lu, linear_rampup(epoch, warm_up)
class NegEntropy(object):
def __call__(self, outputs):
probs = torch.softmax(outputs, dim=1)
return torch.mean(torch.sum(probs.log() * probs, dim=1))
def create_model(devices=[0]):
model = ResNet18(num_classes=args.num_class)
model = model.cuda()
model = torch.nn.DataParallel(model, device_ids=devices).cuda()
return model
loader = dataloader.cifar_dataloader(
dataset=args.dataset,
r=args.r,
noise_mode=args.noise_mode,
batch_size=args.batch_size,
warmup_batch_size=args.warmup_batch_size,
num_workers=args.num_workers,
root_dir=args.data_path,
noise_file=f"{args.checkpoint_path}/saved/labels.json",
preaug_file=(
f"{args.checkpoint_path}/saved/{args.preset}_preaugdata.pth.tar"
if args.preaugment
else ""
),
augmentation_strategy=args,
)
print("| Building net")
devices = range(torch.cuda.device_count())
net1 = create_model(devices)
net2 = create_model(devices)
cudnn.benchmark = True
criterion = SemiLoss()
optimizer1 = optim.SGD(
net1.parameters(), lr=args.learning_rate, momentum=0.9, weight_decay=5e-4
)
optimizer2 = optim.SGD(
net2.parameters(), lr=args.learning_rate, momentum=0.9, weight_decay=5e-4
)
all_loss = [[], []] # save the history of losses from two networks
if args.pretrained_path != "":
with open(args.pretrained_path + f"/saved/{args.preset}.pth.tar", "rb") as p:
unpickled = torch.load(p)
net1.load_state_dict(unpickled["net1"])
net2.load_state_dict(unpickled["net2"])
optimizer1.load_state_dict(unpickled["optimizer1"])
optimizer2.load_state_dict(unpickled["optimizer2"])
all_loss = unpickled["all_loss"]
epoch = unpickled["epoch"] + 1
else:
epoch = 0
CE = nn.CrossEntropyLoss(reduction="none")
CEloss = nn.CrossEntropyLoss()
if args.noise_mode == "asym":
conf_penalty = NegEntropy()
warmup_trainloader = loader.run("warmup")
test_loader = loader.run("test")
eval_loader = loader.run("eval_train")
while epoch < args.num_epochs:
lr = args.learning_rate
if epoch >= args.lr_switch_epoch:
lr /= 10
for param_group in optimizer1.param_groups:
param_group["lr"] = lr
for param_group in optimizer2.param_groups:
param_group["lr"] = lr
size_l1, size_u1, size_l2, size_u2 = (
len(warmup_trainloader.dataset),
0,
len(warmup_trainloader.dataset),
0,
)
if epoch < args.warm_up:
print("Warmup Net1")
warmup(epoch, net1, optimizer1, warmup_trainloader)
print("\nWarmup Net2")
warmup(epoch, net2, optimizer2, warmup_trainloader)
else:
prob1, all_loss[0] = eval_train(net1, all_loss[0])
prob2, all_loss[1] = eval_train(net2, all_loss[1])
pred1 = prob1 > args.p_threshold
pred2 = prob2 > args.p_threshold
print("Train Net1")
labeled_trainloader, unlabeled_trainloader = loader.run(
"train", pred2, prob2
) # co-divide
size_l1, size_u1 = (
len(labeled_trainloader.dataset),
len(unlabeled_trainloader.dataset),
)
train(
epoch,
net1,
net2,
optimizer1,
labeled_trainloader,
unlabeled_trainloader,
) # train net1
print("\nTrain Net2")
labeled_trainloader, unlabeled_trainloader = loader.run(
"train", pred1, prob1
) # co-divide
size_l2, size_u2 = (
len(labeled_trainloader.dataset),
len(unlabeled_trainloader.dataset),
)
train(
epoch,
net2,
net1,
optimizer2,
labeled_trainloader,
unlabeled_trainloader,
) # train net2
acc = test(epoch, net1, net2, size_l1, size_u1, size_l2, size_u2)
data_dict = {
"epoch": epoch,
"net1": net1.state_dict(),
"net2": net2.state_dict(),
"optimizer1": optimizer1.state_dict(),
"optimizer2": optimizer2.state_dict(),
"all_loss": all_loss,
}
if (epoch + 1) % args.save_every == 0 or epoch == args.warm_up - 1:
checkpoint_model = os.path.join(
args.checkpoint_path, "all", f"{args.preset}_epoch{epoch}.pth.tar"
)
torch.save(data_dict, checkpoint_model)
saved_model = os.path.join(
args.checkpoint_path, "saved", f"{args.preset}.pth.tar"
)
torch.save(data_dict, saved_model)
epoch += 1
| 34.480813
| 131
| 0.535057
|
d40c7afbbadfacad8a6d4b64e5807c012db19e02
| 7,105
|
py
|
Python
|
eksupdate/src/self_managed.py
|
aws-samples/amazon-eks-one-click-cluster-upgrade
|
b9fcddfe0acb45a0d400177141603962a9ba322e
|
[
"MIT-0"
] | 26
|
2021-11-29T17:54:59.000Z
|
2022-03-28T10:00:11.000Z
|
eksupdate/src/self_managed.py
|
aws-samples/amazon-eks-one-click-cluster-upgrade
|
b9fcddfe0acb45a0d400177141603962a9ba322e
|
[
"MIT-0"
] | null | null | null |
eksupdate/src/self_managed.py
|
aws-samples/amazon-eks-one-click-cluster-upgrade
|
b9fcddfe0acb45a0d400177141603962a9ba322e
|
[
"MIT-0"
] | 2
|
2021-12-03T19:10:54.000Z
|
2022-02-08T09:34:57.000Z
|
from platform import node
import boto3
import time
from .latest_ami import get_latestami
from botocore.utils import instance_cache
from .ekslogs import logs_pusher
def status_of_cluster(Clustname,regionName):
client = boto3.client('eks',region_name=regionName)
''' Getting Self Managed Cluster Status'''
response = client.describe_cluster(
name=Clustname
)
logs_pusher(regionName=regionName,cluster_name=Clustname,msg="The Cluster Status = {stat} and Version = {ver}".format(stat=response["cluster"]["status"],ver=response["cluster"]["version"]))
return [response["cluster"]["status"], response["cluster"]["version"]]
def get_node_groups(Clustername,regionName):
client = boto3.client('eks',region_name=regionName)
''' Getting Node Group list'''
response =client.list_nodegroups(
clusterName=Clustername,
maxResults=100
)
return response['nodegroups']
def Desc_node_groups(Clustername,Nodegroup,regionName):
client = boto3.client('eks',region_name=regionName)
''' Getting Descrption of Node Gorup '''
response=client.describe_nodegroup(
clusterName=Clustername,
nodegroupName=Nodegroup)
# print(response)
# print(response.get('nodegroup')['version'])
logs_pusher(regionName=regionName,cluster_name=Clustername,msg="The NodeGroup = {ng} Status = {stat} and Version = {ver}".format(ng=Nodegroup,stat=response.get('nodegroup')['status'],ver=response.get('nodegroup')['version']))
return [response.get('nodegroup')['status'],response.get('nodegroup')['version']]
def get_asg_node_groups(Clustername,regionName):
client = boto3.client('eks',region_name=regionName)
''' Getting asg of self managed node groups '''
asg_groups=[]
node_groups=get_node_groups(Clustername,regionName)
if len(node_groups)==0:
return []
for ng in node_groups:
response=client.describe_nodegroup(clusterName=Clustername,nodegroupName=ng).get('nodegroup')["resources"]["autoScalingGroups"]
for asg_name in response:
asg_groups.append(asg_name["name"])
logs_pusher(regionName=regionName,cluster_name=Clustername,msg="The Asg's Of Node Groups ".format(inst=asg_groups))
return asg_groups
def filter_node_groups(cluster_name,node_list,latest_version,regionName):
client = boto3.client('eks',region_name=regionName)
''' filtering Node groups '''
old_ng=[]
for ng in node_list:
print("filter node group ",ng)
status,version=Desc_node_groups(Clustername=cluster_name,Nodegroup=ng,regionName=regionName)
if (status=="ACTIVE" or status=="UPDATING") and not version ==latest_version:
old_ng.append(ng)
logs_pusher(regionName=regionName,cluster_name=cluster_name,msg="The Old Manged Node Groups Found Are {inst} ".format(inst=old_ng))
return old_ng
def lt_id_func(Clustername,Nodegroup,Version,regionName):
client = boto3.client('eks',region_name=regionName)
ec2=boto3.client('ec2',region_name=regionName)
res=client.describe_nodegroup(
clusterName=Clustername,
nodegroupName=Nodegroup
)
Lt_id=""
version_no=""
AmiType=res['nodegroup']['amiType']
if res["nodegroup"].get("launchTemplate"):
Lt_id,version_no=res['nodegroup']['launchTemplate']['id'],res['nodegroup']['launchTemplate']['version']
os_lt=ec2.describe_launch_template_versions(
LaunchTemplateId=Lt_id,
Versions=[version_no])
latest_ami=""
if AmiType=="CUSTOM":
current_ami=os_lt['LaunchTemplateVersions'][0]['LaunchTemplateData']['ImageId']
os_type=ec2.describe_images(
ImageIds=[
current_ami,
],
)['Images'][0]['ImageLocation']
if isinstance(os_type,str) and "Windows_Server" in os_type:
os_type=os_type[:46]
latest_ami=get_latestami(clustVersion=Version,instancetype=os_type,image_to_search=os_type,region_Name=regionName)
return AmiType,Lt_id,version_no,latest_ami
def update_current_launch_template_ami(lt_id,latest_ami,regionName):
ec2 = boto3.client('ec2',region_name=regionName)
response = ec2.create_launch_template_version(
LaunchTemplateId=lt_id,
SourceVersion="$Latest",
VersionDescription="Latest-AMI",
LaunchTemplateData={
"ImageId": latest_ami
}
)
print(f"New launch template created with AMI {latest_ami}")
def Update_nodeGroup(Clustername,Nodegroup,Version,regionName):
client = boto3.client('eks',region_name=regionName)
start=time.time()
''' updating Node group '''
ami_type,lt_id,old_version,latest_ami=lt_id_func(Clustername,Nodegroup,Version,regionName)
if ami_type=="CUSTOM":
update_current_launch_template_ami(lt_id,latest_ami,regionName)
while True:
try:
if status_of_cluster(Clustername,regionName)[0]=="ACTIVE" and Desc_node_groups(Clustername,Nodegroup,regionName)[0]=="ACTIVE" and Desc_node_groups(Clustername,Nodegroup,regionName)[1]!=Version:
if ami_type=="CUSTOM":
client.update_nodegroup_version(
clusterName=Clustername,
nodegroupName=Nodegroup,
launchTemplate={
'version':"$Latest",
'id': lt_id
},
)
else:
client.update_nodegroup_version(
clusterName=Clustername,
nodegroupName=Nodegroup,
version=Version,
)
started = time.time()
print("Updating Node Group ",Nodegroup)
time.sleep(20)
if Desc_node_groups(Clustername,Nodegroup,regionName)[0]=="UPDATING":
end=time.time()
hours, rem = divmod(end-start, 3600)
minutes, seconds = divmod(rem, 60)
print("The {Ng}".format(Ng=Nodegroup)+" NodeGroup is Still Updating ","{:0>2}:{:0>2}:{:05.2f}".format(int(hours),int(minutes),seconds))
time.sleep(20)
if Desc_node_groups(Clustername,Nodegroup,regionName)[0]=="DEGRADED":
raise Exception("NodeGroup has not started due to unavailability ")
if Desc_node_groups(Clustername,Nodegroup,regionName)[0]=="ACTIVE" and Desc_node_groups(Clustername,Nodegroup,regionName)[1]==Version:
end=time.time()
hours, rem = divmod(end-start, 3600)
minutes, seconds = divmod(rem, 60)
print("The Time Taken For the NodeGroup Upgrade "+str(Nodegroup),"{:0>2}:{:0>2}:{:05.2f}".format(int(hours),int(minutes),seconds))
logs_pusher(regionName=regionName,cluster_name=Clustername,msg="The Taken For the NodeGroup Upgrade "+"{:0>2}:{:0>2}:{:05.2f}".format(int(hours),int(minutes),seconds))
return True
except Exception as e:
print(e)
raise Exception(e)
| 39.472222
| 229
| 0.654187
|
62fd6df25205857497c33406c99d3b00b3fd3493
| 1,324
|
py
|
Python
|
gdsfactory/samples/pdk/test_fab_c.py
|
gdsfactory/gdsfactory
|
e53b1f3415a81862d465e0443fc09fb35d14d1e0
|
[
"MIT"
] | 42
|
2020-05-25T09:33:45.000Z
|
2022-03-29T03:41:19.000Z
|
gdsfactory/samples/pdk/test_fab_c.py
|
gdsfactory/gdsfactory
|
e53b1f3415a81862d465e0443fc09fb35d14d1e0
|
[
"MIT"
] | 133
|
2020-05-28T18:29:04.000Z
|
2022-03-31T22:21:42.000Z
|
gdsfactory/samples/pdk/test_fab_c.py
|
gdsfactory/gdsfactory
|
e53b1f3415a81862d465e0443fc09fb35d14d1e0
|
[
"MIT"
] | 17
|
2020-06-30T07:07:50.000Z
|
2022-03-17T15:45:27.000Z
|
"""Test all the components in fab_c."""
import pathlib
import pytest
from pytest_regressions.data_regression import DataRegressionFixture
from gdsfactory.difftest import difftest
from gdsfactory.samples.pdk.fab_c import factory
component_names = list(factory.keys())
dirpath = pathlib.Path(__file__).absolute().with_suffix(".gds")
@pytest.fixture(params=component_names, scope="function")
def component_name(request) -> str:
return request.param
def test_gds(component_name: str) -> None:
"""Avoid regressions in GDS names, shapes and layers.
Runs XOR and computes the area."""
component = factory[component_name]()
test_name = f"fabc_{component_name}"
difftest(component, test_name=test_name, dirpath=dirpath)
def test_settings(component_name: str, data_regression: DataRegressionFixture) -> None:
"""Avoid regressions in component settings and ports."""
component = factory[component_name]()
data_regression.check(component.to_dict())
def test_assert_ports_on_grid(component_name: str):
"""Ensures all ports are on grid to avoid 1nm gaps"""
component = factory[component_name]()
component.assert_ports_on_grid()
if __name__ == "__main__":
print(component_names)
c = factory[component_names[0]]()
difftest(c, test_name=f"fabc_{component_names[0]}")
| 30.090909
| 87
| 0.752266
|
4b4536df1817f0dd2c38e6f9780674e48838066e
| 65
|
py
|
Python
|
cron_translator/exception.py
|
pygaur/cron-translator
|
c018ef7c4720290b6cef32b09da0d24858ce20c2
|
[
"MIT"
] | null | null | null |
cron_translator/exception.py
|
pygaur/cron-translator
|
c018ef7c4720290b6cef32b09da0d24858ce20c2
|
[
"MIT"
] | null | null | null |
cron_translator/exception.py
|
pygaur/cron-translator
|
c018ef7c4720290b6cef32b09da0d24858ce20c2
|
[
"MIT"
] | null | null | null |
"""
"""
class CronParsingException(Exception):
"""
"""
| 8.125
| 38
| 0.523077
|
1b41128a2b68b1635cf1e35e1f8742a7737efc5f
| 1,901
|
py
|
Python
|
data/p3BR/R2/benchmark/startPyquil220.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
data/p3BR/R2/benchmark/startPyquil220.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
data/p3BR/R2/benchmark/startPyquil220.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
# qubit number=2
# total number=41
import pyquil
from pyquil.api import local_forest_runtime, QVMConnection
from pyquil import Program, get_qc
from pyquil.gates import *
import numpy as np
conn = QVMConnection()
def make_circuit()-> Program:
prog = Program() # circuit begin
prog += H(0) # number=1
prog += CNOT(0,2) # number=11
prog += CNOT(0,2) # number=31
prog += H(2) # number=38
prog += CZ(0,2) # number=39
prog += H(2) # number=40
prog += X(2) # number=36
prog += CNOT(0,2) # number=37
prog += CNOT(0,2) # number=33
prog += H(2) # number=25
prog += CZ(0,2) # number=26
prog += H(2) # number=27
prog += H(1) # number=7
prog += CZ(2,1) # number=8
prog += RX(0.17592918860102857,2) # number=34
prog += RX(-0.3989822670059037,1) # number=30
prog += H(1) # number=9
prog += H(1) # number=18
prog += CZ(2,1) # number=19
prog += H(1) # number=20
prog += Y(1) # number=14
prog += H(1) # number=22
prog += CZ(2,1) # number=23
prog += H(1) # number=24
prog += Z(2) # number=3
prog += X(1) # number=17
prog += Y(2) # number=5
prog += X(2) # number=21
prog += CNOT(1,0) # number=15
prog += CNOT(1,0) # number=16
prog += X(2) # number=28
prog += X(2) # number=29
# circuit end
return prog
def summrise_results(bitstrings) -> dict:
d = {}
for l in bitstrings:
if d.get(l) is None:
d[l] = 1
else:
d[l] = d[l] + 1
return d
if __name__ == '__main__':
prog = make_circuit()
qvm = get_qc('1q-qvm')
results = qvm.run_and_measure(prog,1024)
bitstrings = np.vstack([results[i] for i in qvm.qubits()]).T
bitstrings = [''.join(map(str, l)) for l in bitstrings]
writefile = open("../data/startPyquil220.csv","w")
print(summrise_results(bitstrings),file=writefile)
writefile.close()
| 25.346667
| 64
| 0.570226
|
e31f48d69d94a0c7da76c917075323cec149903b
| 6,031
|
py
|
Python
|
electrum/gui/qt/update_checker.py
|
HyperPeek/electrum-raven
|
057484e957dd795286cf5822489953a6c8232ac2
|
[
"MIT"
] | null | null | null |
electrum/gui/qt/update_checker.py
|
HyperPeek/electrum-raven
|
057484e957dd795286cf5822489953a6c8232ac2
|
[
"MIT"
] | null | null | null |
electrum/gui/qt/update_checker.py
|
HyperPeek/electrum-raven
|
057484e957dd795286cf5822489953a6c8232ac2
|
[
"MIT"
] | 1
|
2021-03-18T19:14:16.000Z
|
2021-03-18T19:14:16.000Z
|
# Copyright (C) 2019 The Electrum developers
# Distributed under the MIT software license, see the accompanying
# file LICENCE or http://www.opensource.org/licenses/mit-license.php
import asyncio
import base64
from distutils.version import LooseVersion
from PyQt5.QtCore import Qt, QThread, pyqtSignal
from PyQt5.QtWidgets import (QWidget, QVBoxLayout, QLabel, QProgressBar,
QHBoxLayout, QPushButton)
from electrum import version
from electrum import constants
from electrum import ecc
from electrum.i18n import _
from electrum.util import make_aiohttp_session
from electrum.logging import Logger
class UpdateCheck(QWidget, Logger):
url = "https://raw.githubusercontent.com/kralverde/electrum-raven/master/check-version.json"
download_url = "https://github.com/kralverde/electrum-raven/releases"
VERSION_ANNOUNCEMENT_SIGNING_KEYS = (
"RPuQNvDVBC5Q4fXKyfYLjrunbyqiEYckP5",
)
def __init__(self, main_window, latest_version=None):
self.main_window = main_window
QWidget.__init__(self)
self.setWindowTitle('Electrum-RVN - ' + _('Update Check'))
self.content = QVBoxLayout()
self.content.setContentsMargins(*[10]*4)
self.heading_label = QLabel()
self.content.addWidget(self.heading_label)
self.detail_label = QLabel()
self.detail_label.setTextInteractionFlags(Qt.LinksAccessibleByMouse)
self.detail_label.setOpenExternalLinks(True)
self.content.addWidget(self.detail_label)
self.pb = QProgressBar()
self.pb.setMaximum(0)
self.pb.setMinimum(0)
self.content.addWidget(self.pb)
versions = QHBoxLayout()
versions.addWidget(QLabel(_("Current version: {}".format(version.ELECTRUM_VERSION))))
self.latest_version_label = QLabel(_("Latest version: {}".format(" ")))
versions.addWidget(self.latest_version_label)
self.content.addLayout(versions)
self.update_view(latest_version)
self.update_check_thread = UpdateCheckThread(self.main_window)
self.update_check_thread.checked.connect(self.on_version_retrieved)
self.update_check_thread.failed.connect(self.on_retrieval_failed)
self.update_check_thread.start()
close_button = QPushButton(_("Close"))
close_button.clicked.connect(self.close)
self.content.addWidget(close_button)
self.setLayout(self.content)
self.show()
def on_version_retrieved(self, version):
self.update_view(version)
def on_retrieval_failed(self):
self.heading_label.setText('<h2>' + _("Update check failed") + '</h2>')
self.detail_label.setText(_("Sorry, but we were unable to check for updates. Please try again later."))
self.pb.hide()
@staticmethod
def is_newer(latest_version):
return latest_version > LooseVersion(version.ELECTRUM_VERSION)
def update_view(self, latest_version=None):
if latest_version:
self.pb.hide()
self.latest_version_label.setText(_("Latest version: {}".format(latest_version)))
if self.is_newer(latest_version):
self.heading_label.setText('<h2>' + _("There is a new update available") + '</h2>')
url = "<a href='{u}'>{u}</a>".format(u=UpdateCheck.download_url)
self.detail_label.setText(_("You can download the new version from {}.").format(url))
else:
self.heading_label.setText('<h2>' + _("Already up to date") + '</h2>')
self.detail_label.setText(_("You are already on the latest version of Electrum."))
else:
self.heading_label.setText('<h2>' + _("Checking for updates...") + '</h2>')
self.detail_label.setText(_("Please wait while Electrum checks for available updates."))
class UpdateCheckThread(QThread, Logger):
checked = pyqtSignal(object)
failed = pyqtSignal()
def __init__(self, main_window):
QThread.__init__(self)
Logger.__init__(self)
self.main_window = main_window
async def get_update_info(self):
async with make_aiohttp_session(proxy=self.main_window.network.proxy) as session:
async with session.get(UpdateCheck.url) as result:
signed_version_dict = await result.json(content_type=None)
# example signed_version_dict:
# {
# "version": "3.9.9",
# "signatures": {
# "1Lqm1HphuhxKZQEawzPse8gJtgjm9kUKT4": "IA+2QG3xPRn4HAIFdpu9eeaCYC7S5wS/sDxn54LJx6BdUTBpse3ibtfq8C43M7M1VfpGkD5tsdwl5C6IfpZD/gQ="
# }
# }
version_num = signed_version_dict['version']
sigs = signed_version_dict['signatures']
for address, sig in sigs.items():
if address not in UpdateCheck.VERSION_ANNOUNCEMENT_SIGNING_KEYS:
continue
sig = base64.b64decode(sig)
msg = version_num.encode('utf-8')
if ecc.verify_message_with_address(address=address, sig65=sig, message=msg,
net=constants.BitcoinMainnet):
self.logger.info(f"valid sig for version announcement '{version_num}' from address '{address}'")
break
else:
raise Exception('no valid signature for version announcement')
return LooseVersion(version_num.strip())
def run(self):
network = self.main_window.network
if not network:
self.failed.emit()
return
try:
update_info = asyncio.run_coroutine_threadsafe(self.get_update_info(), network.asyncio_loop).result()
except Exception as e:
self.logger.info(f"got exception: '{repr(e)}'")
self.failed.emit()
else:
self.checked.emit(update_info)
| 42.174825
| 154
| 0.641187
|
d232665993c029b4d73f8afaa64453368654cce9
| 2,773
|
py
|
Python
|
cfgparser/targets/target.py
|
jejer/iptables-inspector
|
4d05ece83d8eacff7aeb43f8798d7cf6c6bc1d5c
|
[
"Apache-2.0"
] | null | null | null |
cfgparser/targets/target.py
|
jejer/iptables-inspector
|
4d05ece83d8eacff7aeb43f8798d7cf6c6bc1d5c
|
[
"Apache-2.0"
] | null | null | null |
cfgparser/targets/target.py
|
jejer/iptables-inspector
|
4d05ece83d8eacff7aeb43f8798d7cf6c6bc1d5c
|
[
"Apache-2.0"
] | null | null | null |
from cfgparser.targets.mark import Mark
from cfgparser.targets.dnat import DNAT
from cfgparser.targets.masquerade import Masquerade
from colorama import Fore
# http://ipset.netfilter.org/iptables.man.html
def NewTarget(target, raw):
unsupported = [
"AUDIT",
"CHECKSUM",
"CLASSIFY",
"CLUSTERIP",
"CONNMARK",
"CONNSECMARK",
"CT",
# "DNAT",
"DNPT",
"DSCP",
"ECN",
"HL",
"HMARK",
"IDLETIMER",
"LED",
"LOG",
# "MARK",
# "MASQUERADE",
"MIRROR",
"NETMAP",
"NFLOG",
"NFQUEUE",
"NOTRACK",
"RATEEST",
"REDIRECT",
"REJECT",
"REJECT",
"SAME",
"SECMARK",
"SET",
"SNAT",
"SNPT",
"TCPMSS",
"TCPOPTSTRIP",
"TEE",
"TOS",
"TPROXY",
"TRACE",
"TTL",
"ULOG",
]
if target == "ACCEPT":
return Accept()
elif target == "DROP":
return Drop()
elif target == "QUEUE":
return Trivial(target, raw)
elif target == "RETURN":
return Return()
elif target == "MARK":
return Mark(raw)
elif target == "DNAT":
return DNAT(raw)
elif target == "MASQUERADE":
return Masquerade(raw)
elif target in unsupported:
print("UNSUPPORTED TARGET: " + target + " [" + raw + "]")
return Trivial(target, raw)
else:
return Jump(target)
def NewGoto(target, raw):
return Goto(target)
class Accept(object):
def process(self, packet, runner):
print(Fore.RED + "TARGET ACCEPT")
return "ACCEPT"
class Drop(object):
def process(self, packet, runner):
print(Fore.RED + "TARGET DROP")
return "DROP"
class Return(object):
def process(self, packet, runner):
packet.stack_pop().stack_next_rule()
print(Fore.RED + "TARGET RETURN")
return "CONTINUE"
class Jump(object):
def __init__(self, chain):
self.chain = chain
def process(self, packet, runner):
packet.stack_push(self.chain)
print(Fore.RED + "TARGET JUMP: " + self.chain)
return "CONTINUE"
class Goto(object):
def __init__(self, chain):
self.chain = chain
def process(self, packet, runner):
packet.stack_pop().stack_push(self.chain)
print(Fore.RED + "TARGET GOTO: " + self.chain)
return "CONTINUE"
class Trivial(object):
def __init__(self, name, raw):
self.name = name
self.raw = raw
def process(self, packet, runner):
packet.stack_next_rule()
print(Fore.RED + "UNSUPPORTED TARGET " + self.name + " : " + self.raw)
return "CONTINUE"
| 24.324561
| 78
| 0.539127
|
7c74fb37e231ec0e240f8900ac147e94a95c4fdd
| 1,733
|
py
|
Python
|
src/Compare.py
|
akhof/Python-Sorting-Algorithms
|
55e7c71aa3fe4f9bf68e17101b581ee1163ec770
|
[
"MIT"
] | 1
|
2020-08-06T06:19:59.000Z
|
2020-08-06T06:19:59.000Z
|
src/Compare.py
|
akhof/Python-Sorting-Algorithms
|
55e7c71aa3fe4f9bf68e17101b581ee1163ec770
|
[
"MIT"
] | null | null | null |
src/Compare.py
|
akhof/Python-Sorting-Algorithms
|
55e7c71aa3fe4f9bf68e17101b581ee1163ec770
|
[
"MIT"
] | 1
|
2020-09-04T12:55:46.000Z
|
2020-09-04T12:55:46.000Z
|
from PySortAlgos import BadAlgorithm, BubbleSort, SelectionSort, InsertSort, QuickSort, MergeSort, HeapSort, isSorted
import time, random
START_E = 2 # 2^2 = 4
MAX_E = 22 # 2^22 = 4.194.304
TRIES = 1
TOTEST = (
#(Class, Name, Max)
(BadAlgorithm, "BadAlgorithm", 3),
(BubbleSort, "BubbleSort", 12),
(SelectionSort, "SelectionSort", 13),
(InsertSort, "InsertSort", 14),
(HeapSort, "HeapSort", 19),
(MergeSort, "MergeSort", 22),
(QuickSort, "QuickSort", 22),
)
nameList = ""
for r in TOTEST:
nameList += ("\t\t" if len(nameList) > 0 else "") + r[1]
print("Elements\t\t" + nameList)
e = START_E
while e <= MAX_E:
res = []
max = -1
for r in TOTEST:
if e > r[2]:
res.append(None)
else:
sum = 0
for _ in range(TRIES):
l = list(range(0, 2**e))
random.shuffle(l)
t1 = time.time()
sortedlist = r[0]().sort(l)
sum += time.time() - t1
if not isSorted(sortedlist):
print("Error in " + r[1])
res.append(sum / TRIES)
if res[-1] > max:
max = res[-1]
tp = "2^" + str(e) + " = " + str(2**e) + " \t\t"
for v in res:
if v == None:
tp += " \t\t"
else:
p = "0"
if max > 0:
p = str(round(100*v/max, 2))
tp += str(round(v, 5)) + "s (" + p + "%)"
if v == 0: tp += " "
tp += " \t"
print(tp)
e += 1
print("\nFinish :)")
| 26.661538
| 118
| 0.415465
|
882da46a8d79bfee37d7ed66108feb809d4c631a
| 2,074
|
py
|
Python
|
software/instructions.py
|
fbcbarbosa/DigCam
|
ce72e18cd76fb0d24487d7b7c24e1ea3efa36071
|
[
"MIT"
] | null | null | null |
software/instructions.py
|
fbcbarbosa/DigCam
|
ce72e18cd76fb0d24487d7b7c24e1ea3efa36071
|
[
"MIT"
] | null | null | null |
software/instructions.py
|
fbcbarbosa/DigCam
|
ce72e18cd76fb0d24487d7b7c24e1ea3efa36071
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import sys
"""
This module implements a instruction database.
"""
class Instruction:
"""
This class implements a instruction.
"""
def __init__(self, number, name, description):
"""
Creates a new instruction.
PARAMETERS
number - int: an integer between 0 and 255
name - string: call name of the instruction
description - string: description of the instruction
"""
self.code = chr(number);
self.name = name;
self.description = description;
__database = {}
def initDatabase():
add(Instruction(1, 'on', 'Turns on the camera'))
add(Instruction(2, 'off', 'Turns off the camera'))
add(Instruction(3, 'photo', 'Take a picture'))
add(Instruction(4, 'reset', 'Executes software reset'))
add(Instruction(5, 'status', 'Prints camera status'))
add(Instruction(6, 'read0', 'Reads register table 0xFF = 0'))
add(Instruction(7, 'read1', 'Reads register table 0xFF = 1'))
def add(instruction):
"""
Adds a new instruction to the database.
"""
__database[instruction.name] = [instruction.code, instruction.description]
def listAll():
"""
Lists all instructions and their descriptions.
"""
for k in sorted(__database.keys()):
print '\t' + k + '\t\t' + __database[k][1]
def encode(command):
"""
Interpret the command according to the Instruction Database and return its code.
Handles errors.
Keyword arguments:
command - string: the command given
"""
words = command.split() # split the command in single words
n = len(words)
ch1 = chr(0); # PIC instruction code
try:
if n == 0:
print "Error: '" + command + "'" + " is not a valid command!"
return -1
elif n == 1:
ch1 = __database[command][0]
else:
raise KeyError
except KeyError:
print "Error: '" + command + "'" + " is not a valid command!"
return -1
return ch1
| 28.027027
| 85
| 0.588717
|
5d6eabeb9755babf1db4f616fcd7be810d1036ba
| 4,434
|
py
|
Python
|
ncclient/capabilities.py
|
NorthLandTeam/ncclient
|
ff6bba74c3304f0a5053087449f5a51e8eb13ed4
|
[
"Apache-2.0"
] | 498
|
2015-10-21T18:43:23.000Z
|
2022-03-29T17:27:59.000Z
|
ncclient/capabilities.py
|
NorthLandTeam/ncclient
|
ff6bba74c3304f0a5053087449f5a51e8eb13ed4
|
[
"Apache-2.0"
] | 440
|
2015-09-07T23:43:01.000Z
|
2022-03-17T11:43:16.000Z
|
ncclient/capabilities.py
|
NorthLandTeam/ncclient
|
ff6bba74c3304f0a5053087449f5a51e8eb13ed4
|
[
"Apache-2.0"
] | 330
|
2015-09-10T16:53:50.000Z
|
2022-03-31T12:24:53.000Z
|
# Copyright 2009 Shikhar Bhushan
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import six
logger = logging.getLogger("ncclient.capabilities")
def _abbreviate(uri):
if uri.startswith("urn:ietf:params") and ":netconf:" in uri:
splitted = uri.split(":")
if ":capability:" in uri:
if uri.startswith("urn:ietf:params:xml:ns:netconf"):
name, version = splitted[7], splitted[8]
else:
name, version = splitted[5], splitted[6]
return [ ":" + name, ":" + name + ":" + version ]
elif ":base:" in uri:
if uri.startswith("urn:ietf:params:xml:ns:netconf"):
return [ ":base", ":base" + ":" + splitted[7] ]
else:
return [ ":base", ":base" + ":" + splitted[5] ]
return []
def schemes(url_uri):
"Given a URI that has a *scheme* query string (i.e. `:url` capability URI), will return a list of supported schemes."
return url_uri.partition("?scheme=")[2].split(",")
class Capabilities(object):
"Represents the set of capabilities available to a NETCONF client or server. It is initialized with a list of capability URI's."
def __init__(self, capabilities):
self._dict = {}
for uri in capabilities:
self.add(uri)
def __contains__(self, key):
try:
self.__getitem__(key)
except KeyError:
return False
else:
return True
def __getitem__(self, key):
try:
return self._dict[key]
except KeyError:
for capability in six.itervalues(self._dict):
if key in capability.get_abbreviations():
return capability
raise KeyError(key)
def __len__(self):
return len(self._dict)
# python 2 and 3 compatible
def __iter__(self):
return six.iterkeys(self._dict)
def __repr__(self):
return repr(six.iterkeys(self._dict))
def add(self, uri):
"Add a capability."
self._dict[uri] = Capability.from_uri(uri)
def remove(self, uri):
"Remove a capability."
if uri in self._dict:
del self._dict[uri]
class Capability(object):
"""Represents a single capability"""
def __init__(self, namespace_uri, parameters=None):
self.namespace_uri = namespace_uri
self.parameters = parameters or {}
@classmethod
def from_uri(cls, uri):
split_uri = uri.split("?")
namespace_uri = split_uri[0]
capability = cls(namespace_uri)
try:
param_string = split_uri[1]
except IndexError:
return capability
capability.parameters = {
param.key: param.value
for param in _parse_parameter_string(param_string, uri)
}
return capability
def __eq__(self, other):
return (
self.namespace_uri == other.namespace_uri and
self.parameters == other.parameters
)
def get_abbreviations(self):
return _abbreviate(self.namespace_uri)
def _parse_parameter_string(string, uri):
for param_string in string.split("&"):
try:
yield _Parameter.from_string(param_string)
except _InvalidParameter:
logger.error(
"Invalid parameter '{param}' in capability URI '{uri}'".format(
param=param_string,
uri=uri,
)
)
class _Parameter(object):
"""Represents a parameter to a capability"""
def __init__(self, key, value):
self.key = key
self.value = value
@classmethod
def from_string(cls, string):
try:
key, value = string.split("=")
except ValueError:
raise _InvalidParameter
return cls(key, value)
class _InvalidParameter(Exception):
pass
| 27.886792
| 132
| 0.601037
|
22987266824c40bf31ed6f225f93544f7e37223c
| 24,469
|
py
|
Python
|
ggplot/ggplot.py
|
mokkyl/ggpy
|
2e72907d6f62dbcf501027e6c9c2d27179bc6f5f
|
[
"BSD-2-Clause"
] | null | null | null |
ggplot/ggplot.py
|
mokkyl/ggpy
|
2e72907d6f62dbcf501027e6c9c2d27179bc6f5f
|
[
"BSD-2-Clause"
] | null | null | null |
ggplot/ggplot.py
|
mokkyl/ggpy
|
2e72907d6f62dbcf501027e6c9c2d27179bc6f5f
|
[
"BSD-2-Clause"
] | null | null | null |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib.colors import LinearSegmentedColormap
import six
import numpy as np
import pandas as pd
import warnings
from .aes import aes
from .legend import make_legend
from .themes import theme_gray
from .themes import element_text
from . import discretemappers
from .utils import format_ticks, sorted_unique
import urllib
import base64
import os
if os.environ.get("GGPLOT_DEV"):
from PIL import Image
class ggplot(object):
"""
ggplot is the base layer or object that you use to define
the components of your chart (x and y axis, shapes, colors, etc.).
You can combine it with layers (or geoms) to make complex graphics
with minimal effort.
Parameters
-----------
aesthetics : aes (ggplot.components.aes.aes)
aesthetics of your plot
data : pandas DataFrame (pd.DataFrame)
a DataFrame with the data you want to plot
Examples
----------
>>> p = ggplot(aes(x='carat', y='price'), data=diamonds)
>>> print(p + geom_point())
"""
CONTINUOUS = ['x', 'y', 'size', 'alpha']
DISCRETE = ['color', 'shape', 'marker', 'alpha', 'linestyle']
def __init__(self, aesthetics, data):
# figure out which is which between data and aesthetics
if not isinstance(data, pd.DataFrame):
aesthetics, data = data, aesthetics
self._aes = aesthetics
self.data = data.copy()
self._handle_index()
self.data = self._aes._evaluate_expressions(self.data)
self.data = self._aes.handle_identity_values(self.data)
self.layers = []
# labels
self.title = None
self.xlab = None
self.ylab = None
# limits
self.xlimits = None
self.ylimits = None
# themes
self.theme = theme_gray()
# scales
self.scales = []
self.scale_identity = set()
# margins
self.margins = None
self.scale_x_log = None
self.scale_y_log = None
self.scale_x_reverse = None
self.scale_y_reverse = None
# generic axis text
self.axis_text = None
self.xbreaks = None
self.xtick_labels = None
self.xtick_formatter = None
self.x_axis_text = None
self.ybreaks = None
self.ytick_labels = None
self.ytick_formatter = None
self.y_axis_text = None
# faceting
self.grid = None
self.facets = None
# colors
self.colormap = None
self.manual_color_list = []
# fill (pretty much t, colors=self.manual_fill_listhe same as colors)
self.manual_fill_list = []
# coordinate system
self.coords = None
def __repr__(self):
self.make()
# this is nice for dev but not the best for "real"
if os.environ.get("GGPLOT_DEV"):
self.fig.savefig('/tmp/ggplot-%d.png' % self.__hash__(), dpi=160)
img = Image.open('/tmp/ggplot-%d.png' % self.__hash__())
img.show()
return "<ggplot: (%d)>" % self.__hash__()
plt.show()
return "<ggplot: (%d)>" % self.__hash__()
def show(self):
"""
Builds and displays your plot.
"""
self.make()
plt.show()
def _handle_index(self):
if '__index__' in self._aes.values():
self.data['__index__'] = self.data.index
def add_labels(self):
labels = [(self.fig.suptitle, self.title)] #, (plt.xlabel, self.xlab), (plt.ylabel, self.ylab)]
for mpl_func, label in labels:
if label:
if isinstance(label, (str, six.text_type)):
label = element_text(label)
label.override(0.5, 0.95)
label.apply_to_fig(self.fig)
if not self.facets:
return
if self.facets.is_wrap:
return
if self.facets.rowvar:
for row, name in enumerate(sorted_unique(self.data[self.facets.rowvar])):
if self.facets.colvar:
ax = self.subplots[row][-1]
else:
ax = self.subplots[row]
ax.yaxis.set_label_position("right")
ax.yaxis.labelpad = 10
ax.set_ylabel(name, fontsize=10, rotation=-90)
if self.facets.colvar:
for col, name in enumerate(sorted_unique(self.data[self.facets.colvar])):
if len(self.subplots.shape) > 1:
col = col % self.facets.ncol
ax = self.subplots[0][col]
else:
ax = self.subplots[col]
ax.set_title(name, fontdict={'fontsize': 10})
def apply_limits(self):
for ax in self._iterate_subplots():
if self.xlimits:
ax.set_xlim(self.xlimits)
if self.ylimits:
ax.set_ylim(self.ylimits)
def apply_scales(self):
for scale in self.scales:
scale.apply()
def apply_theme(self):
if self.theme:
rcParams = self.theme.get_rcParams()
for key, val in rcParams.items():
# there is a bug in matplotlib which does not allow None directly
# https://github.com/matplotlib/matplotlib/issues/2543
try:
if key == 'text.dvipnghack' and val is None:
val = "none"
mpl.rcParams[key] = val
except Exception as e:
msg = """Setting "mpl.rcParams['%s']=%s" raised an Exception: %s""" % (key, str(val), str(e))
warnings.warn(msg, RuntimeWarning)
def apply_coords(self):
if self.coords=="equal":
for ax in self._iterate_subplots():
min_val = np.min([np.min(ax.get_yticks()), np.min(ax.get_xticks())])
max_val = np.max([np.max(ax.get_yticks()), np.max(ax.get_xticks())])
ax.set_xticks(np.linspace(min_val, max_val, 7))
ax.set_yticks(np.linspace(min_val, max_val, 7))
elif self.coords=="flip":
if 'x' in self._aes.data and 'y' in self._aes.data:
x = self._aes.data['x']
y = self._aes.data['y']
self._aes.data['x'] = y
self._aes.data['y'] = x
if self.margins:
plt.subplots_adjust(**self.margins)
def apply_axis_labels(self):
if self.xlab:
xlab = self.xlab
if not isinstance(xlab, six.string_types):
if xlab.args[2]=="":
xlab.args[2] = self._aes.get('x')
else:
xlab = self._aes.get('x')
if self.xbreaks:
for ax in self._iterate_subplots():
ax.xaxis.set_ticks(self.xbreaks)
if self.xtick_labels:
if isinstance(self.xtick_labels, list):
for ax in self._iterate_subplots():
ax.xaxis.set_ticklabels(self.xtick_labels)
if self.xtick_formatter:
for ax in self._iterate_subplots():
labels = []
for label_text in ax.get_xticks():
labels.append(self.xtick_formatter(label_text))
ax.xaxis.set_ticklabels(labels)
if self.ybreaks:
for ax in self._iterate_subplots():
ax.yaxis.set_ticks(self.ybreaks)
if self.ytick_labels:
if isinstance(self.ytick_labels, list):
for ax in self._iterate_subplots():
ax.yaxis.set_ticklabels(self.ytick_labels)
if self.ytick_formatter:
for ax in self._iterate_subplots():
labels = [self.ytick_formatter(label) for label in ax.get_yticks()]
ax.yaxis.set_ticklabels(labels)
if self.axis_text:
for ax in self._iterate_subplots():
xticks = format_ticks(ax.get_xticks())
ax.set_xticklabels(xticks, **self.axis_text.kwargs)
yticks = format_ticks(ax.get_yticks())
ax.set_yticklabels(yticks, **self.axis_text.kwargs)
if self.x_axis_text:
for ax in self._iterate_subplots():
xticks = format_ticks(ax.get_xticks())
ax.set_xticklabels(xticks, **self.x_axis_text.kwargs)
if self.y_axis_text:
for ax in self._iterate_subplots():
yticks = format_ticks(ax.get_yticks())
ax.set_yticklabels(yticks, **self.y_axis_text.kwargs)
if xlab:
if isinstance(xlab, (str, six.text_type)):
xlab = element_text(xlab)
# enforce it to be an x-label
xlab.override(0.5, 0.05)
xlab.apply_to_fig(self.fig)
if self.ylab:
ylab = self.ylab
ylab = self.ylab
if not isinstance(ylab, six.string_types):
if ylab.args[2]=="":
ylab.args[2] = self._aes.get('y')
else:
ylab = self._aes.get('y', '')
if isinstance(ylab, (str, six.text_type)):
ylab = element_text(ylab)
if ylab:
# enforce it to be a y-label
ylab.override(0.05, 0.5, dict(rotation='vertical'))
ylab.apply_to_fig(self.fig)
def _iterate_subplots(self):
"""
'Flat' iterator for subplots. Let's you do a for-loop over each subplot
which can be very handy.
"""
try:
return self.subplots.flat
except Exception as e:
return [self.subplots]
def apply_axis_scales(self):
if self.scale_x_log:
for ax in self._iterate_subplots():
ax.set_xscale('log', basex=self.scale_x_log)
if self.scale_y_log:
for ax in self._iterate_subplots():
ax.set_yscale('log', basey=self.scale_y_log)
if self.scale_x_reverse:
for ax in self._iterate_subplots():
ax.invert_xaxis()
if self.scale_y_reverse:
for ax in self._iterate_subplots():
ax.invert_yaxis()
def add_legend(self, legend):
if legend:
plt.subplots_adjust(right=0.825)
if self.facets:
if len(self.subplots.shape) > 1:
i, j = self.subplots.shape
i, j = int((i - 1) / 2), int(j - 1)
ax = self.subplots[i][j]
make_legend(ax, legend)
elif self.facets.rowvar:
i, = self.subplots.shape
i = int((i - 1) / 2)
ax = self.subplots[i]
make_legend(ax, legend)
elif self.facets.colvar:
ax = self.subplots[-1]
make_legend(ax, legend)
else:
make_legend(self.subplots, legend)
def _get_mapping(self, aes_type, colname):
"""
Converts a discrete aesthetic to a value that will be displayed. For example
from "a" => "#4682B4".
"""
mapping = None
if aes_type=="color":
mapping = discretemappers.color_gen(self.data[colname].nunique(), colors=self.manual_color_list)
elif aes_type=="fill":
mapping = discretemappers.color_gen(self.data[colname].nunique(), colors=self.manual_fill_list)
elif aes_type=="shape":
mapping = discretemappers.shape_gen()
elif aes_type=="linetype":
mapping = discretemappers.linetype_gen()
elif aes_type=="size":
mapping = discretemappers.size_gen(self.data[colname].unique())
elif aes_type=="group":
mapping = discretemappers.identity_gen(self.data[colname].unique())
return mapping
def _construct_plot_data(self):
"Splits up the main data based on discrete aesthetics into sub-data frames"
data = self.data
discrete_aes = self._aes._get_discrete_aes(data)
mappers = {}
for aes_type, colname in discrete_aes:
mapper = {}
if aes_type in self.scale_identity:
for item in sorted(data[colname].unique()):
mapper[item] = item
else:
mapping = self._get_mapping(aes_type, colname)
if mapping is None:
continue
for item in sorted(data[colname].unique()):
mapper[item] = next(mapping)
mappers[aes_type] = { "name": colname, "lookup": mapper }
data[colname + "_" + aes_type] = self.data[colname].apply(lambda x: mapper[x])
self._aes.data[aes_type] = colname + "_" + aes_type
discrete_aes_types = [aes_type for aes_type, _ in discrete_aes]
# checks for continuous aesthetics that can also be discrete (color, alpha, fill, linewidth???)
if "color" in self._aes.data and "color" not in discrete_aes_types:
# This is approximate, going to roll with it
if self.colormap:
self._aes.data['colormap'] = cmap = self.colormap
else:
self._aes.data['colormap'] = cmap = LinearSegmentedColormap.from_list('gradient2n', ['#1f3347', '#469cef'])
colname = self._aes.data['color']
quantiles_actual = quantiles = data[colname].quantile([0., .2, 0.4, 0.5, 0.6, 0.75, 1.0])
# TODO: NOT SURE IF THIS ACTUALLY WORKS WELL. could get a divide by 0 error
quantiles = (quantiles - quantiles.min()) / (quantiles.max()) # will be bug if max is 0
mappers['color'] = { "name": colname, "lookup": {} }
colors = cmap(quantiles)
for i, q in enumerate(quantiles_actual):
mappers['color']['lookup'][q] = colors[i]
data[colname] = (data[colname] - data[colname].min()) / data[colname].max()
if "alpha" in self._aes.data and "alpha" not in discrete_aes_types:
colname = self._aes.data['alpha']
quantiles = data[colname].quantile([0., .2, 0.4, 0.5, 0.6, 0.75, 0.95])
# TODO: NOT SURE IF THIS ACTUALLY WORKS WELL. could get a divide by 0 error
quantiles_scaled = (quantiles - quantiles.min()) / (quantiles.max()) # will be bug if max is 0
mappers['alpha'] = {
"name": colname,
"lookup": dict(zip(quantiles.values, quantiles_scaled.values))
}
data[colname] = (data[colname] - data[colname].min()) / data[colname].max()
discrete_aes.append(('alpha', colname))
if "size" in self._aes.data and "size" not in discrete_aes_types:
colname = self._aes.data['size']
quantiles = data[colname].quantile([0., .2, 0.4, 0.5, 0.6, 0.75, 0.95])
# TODO: NOT SURE IF THIS ACTUALLY WORKS WELL. could get a divide by 0 error
quantiles_scaled = (quantiles - quantiles.min()) / (quantiles.max()) # will be bug if max is 0
mappers['size'] = {
"name": colname,
"lookup": dict(zip(quantiles.values, 100 * quantiles_scaled.values))
}
data[colname] = 100 * (data[colname] - data[colname].min()) / data[colname].max()
discrete_aes.append(('size', colname))
groups = [column for _, column in discrete_aes]
if groups:
return mappers, data.groupby(groups)
else:
return mappers, [(0, data)]
def make_facets(self):
"Creates figure and axes for m x n facet grid/wrap"
sharex, sharey = True, True
if self.facets:
if self.facets.scales=="free":
sharex, sharey = False, False
elif self.facets.scales=="free_x":
sharex, sharey = False, True
elif self.facets.scales=="free_y":
sharex, sharey = True, False
facet_params = dict(sharex=sharex, sharey=sharey)
nrow, ncol = self.facets.nrow, self.facets.ncol
facet_params['nrows'] = nrow
facet_params['ncols'] = ncol
if self.coords=="polar":
facet_params['subplot_kw'] = { "polar": True }
fig, axs = plt.subplots(**facet_params)
return fig, axs
def get_subplot(self, row, col):
"Fetches subplot corresponding to row/column"
if row is not None and col is not None:
return self.subplots[int(row)][col]
elif row is not None:
return self.subplots[int(row)]
elif col is not None:
return self.subplots[int(col)]
else:
raise Exception("row and col were none!" + str(row) + ", " + str(col))
def get_facet_groups(self, group):
"???"
if self.facets is None:
yield (self.subplots, group)
return
col_variable = self.facets.colvar
row_variable = self.facets.rowvar
if self.facets.is_wrap==True:
groups = [row_variable, col_variable]
groups = [g for g in groups if g]
for (i, (name, subgroup)) in enumerate(group.groupby(groups)):
# TODO: doesn't work when these get mapped to discrete values.
# this only happens when a field is being used both as a facet parameter AND as a discrete aesthetic (i.e. shape)
row, col = self.facets.facet_map[name]
if len(self.subplots.shape)==1:
ax = self.subplots[i]
else:
ax = self.get_subplot(row, col)
font = { 'fontsize': 10 }
yield (ax, subgroup)
for item in self.facets.generate_subplot_index(self.data, self.facets.rowvar, self.facets.colvar):
row, col = self.facets.facet_map[item]
ax = self.get_subplot(row, col)
if isinstance(item, tuple):
title = ", ".join([str(i) for i in item])
else:
title = str(item)
ax.set_title(title, fontdict=font)
# remove axes that aren't being used
for i in range(self.facets.ndim, self.facets.nrow * self.facets.ncol):
row = i / self.facets.ncol
col = i % self.facets.ncol
ax = self.get_subplot(row, col)
try:
self.fig.delaxes(ax)
except:
pass
elif col_variable and row_variable:
for (_, (colname, subgroup)) in enumerate(group.groupby(col_variable)):
for (_, (rowname, facetgroup)) in enumerate(subgroup.groupby(row_variable)):
row, col = self.facets.facet_map[(rowname, colname)]
ax = self.get_subplot(row, col)
yield (ax, facetgroup)
elif col_variable:
for (_, (colname, subgroup)) in enumerate(group.groupby(col_variable)):
row, col = self.facets.facet_map[colname]
ax = self.subplots[col]
if self.facets.is_wrap==True:
ax.set_title("%s=%s" % (col_variable, colname))
else:
ax.set_title(colname, fontdict={'fontsize': 10})
yield (ax, subgroup)
elif row_variable:
for (row, (rowname, subgroup)) in enumerate(group.groupby(row_variable)):
row, col = self.facets.facet_map[rowname]
if self.facets.is_wrap==True:
ax = self.subplots[row]
ax.set_title("%s=%s" % (row_variable, rowname))
else:
ax = self.subplots[row]
ax.yaxis.set_label_position("right")
ax.yaxis.labelpad = 10
ax.set_ylabel(rowname, fontsize=10, rotation=-90)
yield (ax, subgroup)
else:
yield (self.subplots, group)
def save(self, filename, width=None, height=None, dpi=180):
"""
Save ggplot to a .png file.
Parameters
----------
filename : string
filepath to save to
width: int, float
width of the plot in inches
height: int, float
height of the plot in inches
"""
self.make()
w, h = self.fig.get_size_inches()
if width:
w = width
if height:
h = height
self.fig.set_size_inches(w, h)
self.fig.savefig(filename)
def save_as_base64(self, as_tag=False, width=None, height=None, dpi=180):
"""
Save ggplot to a base64 encoded string.
Parameters
----------
filename : string
filepath to save to
as_tag: bool
if true, returns an <img /> tag with the image uri as the src attribute
width: int, float
width of the plot in inches
height: int, float
height of the plot in inches
"""
imgdata = six.StringIO()
self.save(imgdata, width=width, height=height, dpi=dpi)
imgdata.seek(0) # rewind the data
uri = 'data:image/png;base64,' + urllib.quote(base64.b64encode(imgdata.buf))
if as_tag==True:
return '<img src = "%s"/>' % uri
else:
return uri
def _prep_layer_for_plotting(self, layer, facetgroup):
"""
Some types of geoms (layer) need to be prepped before calling the plot
function on them. This function performs those perperations and then
returns a dictionary of **kwargs for the layer.plot function to use.
"""
if layer.__class__.__name__=="geom_bar":
mask = True
df = layer.setup_data(self.data, self._aes, facets=self.facets)
if df is None:
return False
if self.facets:
facet_filter = facetgroup[self.facets.facet_cols].iloc[0].to_dict()
for k, v in facet_filter.items():
mask = (mask) & (df[k]==v)
df = df[mask]
if 'fill' in self._aes:
fillcol_raw = self._aes['fill'][:-5]
fillcol = self._aes['fill']
try: # change in pandas v0.19
fill_levels = self.data[[fillcol_raw, fillcol]].sort_values(fillcol_raw)[fillcol].unique()
except: # before pandas v0.19
fill_levels = self.data[[fillcol_raw, fillcol]].sort(fillcol_raw)[fillcol].unique()
else:
fill_levels = None
return dict(x_levels=self.data[self._aes['x']].unique(), fill_levels=fill_levels, lookups=df)
elif layer.__class__.__name__ in ("geom_boxplot", "geom_violin", "geom_errorbar"):
x_levels = list(pd.Series(self.data[self._aes['x']].unique()).sort_values())
return dict(x_levels=x_levels)
else:
return dict()
def make(self):
"Constructs the plot using the methods. This is the 'main' for ggplot"
plt.close()
with mpl.rc_context():
self.apply_theme()
if self.facets:
self.fig, self.subplots = self.make_facets()
else:
subplot_kw = {}
if self.coords=="polar":
subplot_kw = { "polar": True }
self.fig, self.subplots = plt.subplots(subplot_kw=subplot_kw)
self.apply_scales()
legend, groups = self._construct_plot_data()
self._aes.legend = legend
for _, group in groups:
for ax, facetgroup in self.get_facet_groups(group):
for layer in self.layers:
kwargs = self._prep_layer_for_plotting(layer, facetgroup)
if kwargs==False:
continue
layer.plot(ax, facetgroup, self._aes, **kwargs)
self.apply_limits()
self.add_labels()
self.apply_axis_scales()
self.apply_axis_labels()
self.apply_coords()
self.add_legend(legend)
if self.theme:
for ax in self._iterate_subplots():
self.theme.apply_final_touches(ax)
| 37.58679
| 130
| 0.54755
|
47551fedc3633c08a317a7630536d0ee7b7e4348
| 351
|
py
|
Python
|
app/_index/models.py
|
valeriansaliou/waaave-web
|
8a0cde773563865a905af38f5a0b723a43b17341
|
[
"RSA-MD"
] | 1
|
2020-04-06T10:04:43.000Z
|
2020-04-06T10:04:43.000Z
|
app/_index/models.py
|
valeriansaliou/waaave-web
|
8a0cde773563865a905af38f5a0b723a43b17341
|
[
"RSA-MD"
] | null | null | null |
app/_index/models.py
|
valeriansaliou/waaave-web
|
8a0cde773563865a905af38f5a0b723a43b17341
|
[
"RSA-MD"
] | null | null | null |
from django.db import models
from _commons.fields import IdField
# IDs: stores content relationships (per ID)
class Ids(models.Model):
"""
Database [_index.ids]
"""
# System fields
item_id = IdField(db_index=True)
item_type = models.PositiveSmallIntegerField(db_index=True)
date = models.DateTimeField(auto_now_add=True)
| 25.071429
| 63
| 0.723647
|
b37e94dbbccac2618ce9cbb5e40ab08aa597bf11
| 3,187
|
py
|
Python
|
cms/tests/nonroot.py
|
romera-github/django-cms
|
8aca4c881c7de2c58b39bc94ad0c6285ca77f4a2
|
[
"BSD-3-Clause"
] | null | null | null |
cms/tests/nonroot.py
|
romera-github/django-cms
|
8aca4c881c7de2c58b39bc94ad0c6285ca77f4a2
|
[
"BSD-3-Clause"
] | 5
|
2021-03-19T15:49:27.000Z
|
2022-03-12T00:52:58.000Z
|
cms/tests/nonroot.py
|
rom3r4/django-cms
|
8aca4c881c7de2c58b39bc94ad0c6285ca77f4a2
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import with_statement
from cms.api import create_page
from cms.models import Page
from cms.test_utils.testcases import CMSTestCase
from cms.test_utils.util.context_managers import SettingsOverride
from django.contrib.auth.models import User
from django.template import Template
from menus.base import NavigationNode
class NonRootCase(CMSTestCase):
urls = 'cms.test_utils.project.nonroot_urls'
def setUp(self):
with SettingsOverride(CMS_MODERATOR = False):
u = User(username="test", is_staff = True, is_active = True, is_superuser = True)
u.set_password("test")
u.save()
with self.login_user_context(u):
self.create_some_pages()
def create_some_pages(self):
"""
Creates the following structure:
+ P1
| + P2
| + P3
+ P4
"""
self.page1 = create_page("page1", "nav_playground.html", "en",
published=True, in_navigation=True)
self.page2 = create_page("page2", "nav_playground.html", "en",
parent=self.page1, published=True, in_navigation=True)
self.page3 = create_page("page3", "nav_playground.html", "en",
parent=self.page2, published=True, in_navigation=True)
self.page4 = create_page("page4", "nav_playground.html", "en",
published=True, in_navigation=True)
self.all_pages = [self.page1, self.page2, self.page3, self.page4]
self.top_level_pages = [self.page1, self.page4]
self.level1_pages = [self.page2]
self.level2_pages = [self.page3]
def test_get_page_root(self):
self.assertEqual(self.get_pages_root(), '/content/')
def test_basic_cms_menu(self):
with SettingsOverride(CMS_MODERATOR = False):
response = self.client.get(self.get_pages_root())
self.assertEquals(response.status_code, 200)
self.assertEquals(self.get_pages_root(), "/content/")
def test_show_menu(self):
with SettingsOverride(CMS_MODERATOR = False):
context = self.get_context()
tpl = Template("{% load menu_tags %}{% show_menu %}")
tpl.render(context)
nodes = context['children']
self.assertEqual(nodes[0].get_absolute_url(), self.get_pages_root())
self.assertEqual(nodes[0].get_absolute_url(), "/content/")
def test_show_breadcrumb(self):
with SettingsOverride(CMS_MODERATOR = False):
page2 = Page.objects.get(pk=self.page2.pk)
context = self.get_context(path=self.page2.get_absolute_url())
tpl = Template("{% load menu_tags %}{% show_breadcrumb %}")
tpl.render(context)
nodes = context['ancestors']
self.assertEqual(nodes[0].get_absolute_url(), self.get_pages_root())
self.assertEqual(nodes[0].get_absolute_url(), "/content/")
self.assertEqual(isinstance(nodes[0], NavigationNode), True)
self.assertEqual(nodes[1].get_absolute_url(), page2.get_absolute_url())
| 42.493333
| 93
| 0.624725
|
d7d345f7c9b0275b4a9411d312940a997abf44b3
| 9,215
|
py
|
Python
|
src/command_modules/azure-cli-batch/azure/cli/command_modules/batch/_help.py
|
saurabsa/azure-cli-old
|
f77477a98c9aa9cb55daf5b0d2f410d1455a9225
|
[
"MIT"
] | null | null | null |
src/command_modules/azure-cli-batch/azure/cli/command_modules/batch/_help.py
|
saurabsa/azure-cli-old
|
f77477a98c9aa9cb55daf5b0d2f410d1455a9225
|
[
"MIT"
] | 2
|
2021-03-25T21:38:56.000Z
|
2021-11-15T17:46:45.000Z
|
src/command_modules/azure-cli-batch/azure/cli/command_modules/batch/_help.py
|
Visual-Studio-China/azure-cli-int
|
48c7c7f371a0ecc4ebfd4dcfdc72764beddf5c31
|
[
"MIT"
] | null | null | null |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from azure.cli.core.help_files import helps
# pylint: disable=line-too-long
helps['batch'] = """
type: group
short-summary: Manage Azure Batch.
"""
helps['batch account'] = """
type: group
short-summary: Manage your Batch accounts.
"""
helps['batch account list'] = """
type: command
short-summary: List the Batch accounts associated with a subscription or resource group.
"""
helps['batch account create'] = """
type: command
short-summary: Create a Batch account with the specified parameters.
"""
helps['batch account set'] = """
type: command
short-summary: Update the properties of the specified Batch account. Properties that are not specified remain unchanged.
"""
helps['batch account autostorage-keys'] = """
type: group
short-summary: Manage the access keys for the auto storage account configured for your Batch account.
"""
helps['batch account keys'] = """
type: group
short-summary: Manage your Batch account keys.
"""
helps['batch account login'] = """
type: command
short-summary: Log in with specified Batch account through Azure Active Directory or Shared Key authentication.
"""
helps['batch application'] = """
type: group
short-summary: Manage your Batch applications.
"""
helps['batch application set'] = """
type: command
short-summary: Update the properties of the specified application. Properties that are not specified remain unchanged.
"""
helps['batch application package'] = """
type: group
short-summary: Manage your Batch application packages.
"""
helps['batch application package create'] = """
type: command
short-summary: Create an application package record and activate it.
"""
helps['batch application package activate'] = """
type: command
short-summary: Activates the specified application package. This step is unnecessary if the package has already been successfully activated by the 'create' command.
"""
helps['batch application summary'] = """
type: group
short-summary: View a summary of your Batch application packages.
"""
helps['batch location'] = """
type: group
short-summary: Manage Batch service options for a subscription at the region level.
"""
helps['batch location quotas'] = """
type: group
short-summary: Manage Batch service quotas at the region level.
"""
helps['batch certificate'] = """
type: group
short-summary: Manage your Batch certificates.
"""
helps['batch task file'] = """
type: group
short-summary: Manage your Batch task files.
"""
helps['batch task file download'] = """
type: command
short-summary: Download the content of the specified task file.
"""
helps['batch node file'] = """
type: group
short-summary: Manage your Batch compute node files.
"""
helps['batch node file download'] = """
type: command
short-summary: Download the content of the specified node file.
"""
helps['batch job'] = """
type: group
short-summary: Manage your Batch jobs.
"""
helps['batch job all-statistics'] = """
type: group
short-summary: View statistics of all the jobs under your Batch account.
"""
helps['batch job all-statistics show'] = """
type: command
short-summary: Get lifetime summary statistics for all of the jobs in the specified account. Statistics are aggregated across all jobs that have ever existed in the account, from account creation to the last update time of the statistics.
"""
helps['batch job prep-release-status'] = """
type: group
short-summary: View the status of your job preparation and release tasks.
"""
helps['batch job-schedule'] = """
type: group
short-summary: Manage your Batch job schedules.
"""
helps['batch node user'] = """
type: group
short-summary: Manage the user accounts of your Batch compute node.
"""
helps['batch node user create'] = """
type: command
short-summary: Add a user account to the specified compute node.
"""
helps['batch node user reset'] = """
type: command
short-summary: Update the properties of a user account on the specified compute node. All updatable properties are replaced with the values specified or reset if unspecified.
"""
helps['batch node'] = """
type: group
short-summary: Manage your Batch compute nodes.
"""
helps['batch node remote-login-settings'] = """
type: group
short-summary: Retrieve the remote login settings for a Batch compute node.
"""
helps['batch node remote-desktop'] = """
type: group
short-summary: Retrieve the remote desktop protocol for a Batch compute node.
"""
helps['batch node scheduling'] = """
type: group
short-summary: Manage task scheduling for a Batch compute node.
"""
helps['batch pool'] = """
type: group
short-summary: Manage your Batch pools.
"""
helps['batch pool os'] = """
type: group
short-summary: Manage the operating system of your Batch pools.
"""
helps['batch pool autoscale'] = """
type: group
short-summary: Manage automatic scaling of your Batch pools.
"""
helps['batch pool all-statistics'] = """
type: group
short-summary: View statistics of all pools under your Batch account.
"""
helps['batch pool all-statistics show'] = """
type: command
short-summary: Get lifetime summary statistics for all of the pools in the specified account. Statistics are aggregated across all pools that have ever existed in the account, from account creation to the last update time of the statistics.
"""
helps['batch pool usage-metrics'] = """
type: group
short-summary: View usage metrics of your Batch pools.
"""
helps['batch pool node-agent-skus'] = """
type: group
short-summary: Retrieve node agent SKUs of pools using a Virtual Machine Configuration.
"""
helps['batch task'] = """
type: group
short-summary: Manage your Batch tasks.
"""
helps['batch task subtask'] = """
type: group
short-summary: Manage subtask information of your Batch task.
"""
helps['batch certificate create'] = """
type: command
short-summary: Add a certificate.
"""
helps['batch certificate delete'] = """
type: command
short-summary: Delete the specified Batch certificate.
"""
helps['batch pool create'] = """
type: command
short-summary: Create a pool in the specified account. When creating a pool, choose arguments from either Cloud Services Configuration or Virtual Machine Configuration.
"""
helps['batch pool set'] = """
type: command
short-summary: Update the properties of the specified pool. Properties can be updated independently, but when a property is updated in a sub-group, for example 'start task', all properties of that group are reset.
"""
helps['batch pool reset'] = """
type: command
short-summary: Update the properties of the specified pool. All updatable properties are replaced with the values specified or reset to default values if unspecified.
"""
helps['batch pool resize'] = """
type: command
short-summary: Resize (or stop resizing) the Batch pool.
"""
helps['batch job create'] = """
type: command
short-summary: Add a job to the specified account.
"""
helps['batch job list'] = """
type: command
short-summary: List all of the jobs in the specified account or the specified job schedule.
"""
helps['batch job set'] = """
type: command
short-summary: Update the properties of a job. Properties can be updated independently, but when a property is updated in a sub-group, for example 'constraints' or 'pool info', all properties of that group are reset.
"""
helps['batch job reset'] = """
type: command
short-summary: Update the properties of a job. All updatable properties are replaced with the values specified or reset to default vaules if unspecified.
"""
helps['batch job-schedule create'] = """
type: command
short-summary: Add a job schedule to the specified account.
"""
helps['batch job-schedule set'] = """
type: command
short-summary: Update the properties of the specified job schedule. You can independently update the 'schedule' and the 'job specification', but any change to either of these entities will reset all properties in that entity.
"""
helps['batch job-schedule reset'] = """
type: command
short-summary: Update the properties of the specified job schedule. All updatable properties are replaced with the values specified or reset to default values if unspecified. An updated job specification only applies to new jobs.
"""
helps['batch task create'] = """
type: command
short-summary: Create a single Batch task or multiple Batch tasks.
"""
helps['batch task reset'] = """
type: command
short-summary: Update the properties of the specified task. All updatable properties are replaced with the values specified or reset if unspecified.
"""
| 31.343537
| 244
| 0.688985
|
62cc22e46de56b99cb0d5fd7b4d05b08143f1cd9
| 5,430
|
py
|
Python
|
conohadnsclient/tests/base.py
|
naototty/python-conohadns-client
|
04f360450d2e1a6020d2870272d8125cb112fa01
|
[
"Apache-2.0"
] | null | null | null |
conohadnsclient/tests/base.py
|
naototty/python-conohadns-client
|
04f360450d2e1a6020d2870272d8125cb112fa01
|
[
"Apache-2.0"
] | null | null | null |
conohadnsclient/tests/base.py
|
naototty/python-conohadns-client
|
04f360450d2e1a6020d2870272d8125cb112fa01
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2010-2011 OpenStack Foundation
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json as json_
import os
import fixtures
from keystoneclient import session as keystone_session
from oslotest import base as test
from requests_mock.contrib import fixture as req_fixture
import six
from six.moves.urllib import parse as urlparse
from conohadnsclient import client
from conohadnsclient.utils import AdapterWithTimeout
_TRUE_VALUES = ('True', 'true', '1', 'yes')
class TestCase(test.BaseTestCase):
"""Test case base class for all unit tests."""
def setUp(self):
"""Run before each test method to initialize test environment."""
super(TestCase, self).setUp()
test_timeout = os.environ.get('OS_TEST_TIMEOUT', 0)
try:
test_timeout = int(test_timeout)
except ValueError:
# If timeout value is invalid do not set a timeout.
test_timeout = 0
if test_timeout > 0:
self.useFixture(fixtures.Timeout(test_timeout, gentle=True))
self.useFixture(fixtures.NestedTempfile())
self.useFixture(fixtures.TempHomeDir())
if os.environ.get('OS_STDOUT_CAPTURE') in _TRUE_VALUES:
stdout = self.useFixture(fixtures.StringStream('stdout')).stream
self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout))
if os.environ.get('OS_STDERR_CAPTURE') in _TRUE_VALUES:
stderr = self.useFixture(fixtures.StringStream('stderr')).stream
self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr))
self.log_fixture = self.useFixture(fixtures.FakeLogger())
class APITestCase(TestCase):
"""Test case base class for all unit tests."""
TEST_URL = "http://127.0.0.1:9001/"
VERSION = None
def setUp(self):
"""Run before each test method to initialize test environment."""
super(TestCase, self).setUp()
self.log_fixture = self.useFixture(fixtures.FakeLogger())
self.requests = self.useFixture(req_fixture.Fixture())
self.client = self.get_client()
def get_base(self, base_url=None):
if not base_url:
base_url = '%sv%s' % (self.TEST_URL, self.VERSION)
return base_url
def stub_url(self, method, parts=None, base_url=None, json=None, **kwargs):
base_url = self.get_base(base_url)
if json:
kwargs['text'] = json_.dumps(json)
headers = kwargs.setdefault('headers', {})
headers['Content-Type'] = 'application/json'
if parts:
url = '/'.join([p.strip('/') for p in [base_url] + parts])
else:
url = base_url
url = url.replace("/?", "?")
self.requests.register_uri(method, url, **kwargs)
def get_client(self, version=None, session=None):
version = version or self.VERSION
session = session or keystone_session.Session()
adapted = AdapterWithTimeout(
session=session, endpoint_override=self.get_base())
return client.Client(version, session=adapted)
def assertRequestBodyIs(self, body=None, json=None):
last_request_body = self.requests.last_request.body
if json:
val = json_.loads(last_request_body)
self.assertEqual(json, val)
elif body:
self.assertEqual(body, last_request_body)
def assertQueryStringIs(self, qs=''):
"""Verify the QueryString matches what is expected.
The qs parameter should be of the format \'foo=bar&abc=xyz\'
"""
expected = urlparse.parse_qs(qs, keep_blank_values=True)
parts = urlparse.urlparse(self.requests.last_request.url)
querystring = urlparse.parse_qs(parts.query, keep_blank_values=True)
self.assertEqual(expected, querystring)
def assertQueryStringContains(self, **kwargs):
"""Verify the query string contains the expected parameters.
This method is used to verify that the query string for the most recent
request made contains all the parameters provided as ``kwargs``, and
that the value of each parameter contains the value for the kwarg. If
the value for the kwarg is an empty string (''), then all that's
verified is that the parameter is present.
"""
parts = urlparse.urlparse(self.requests.last_request.url)
qs = urlparse.parse_qs(parts.query, keep_blank_values=True)
for k, v in six.iteritems(kwargs):
self.assertIn(k, qs)
self.assertIn(v, qs[k])
def assertRequestHeaderEqual(self, name, val):
"""Verify that the last request made contains a header and its value
The request must have already been made.
"""
headers = self.requests.last_request.headers
self.assertEqual(val, headers.get(name))
| 36.938776
| 79
| 0.667956
|
b137b4d02cf265bfb204ed595d85918dc53f79e4
| 677
|
py
|
Python
|
manage_chat/migrations/0007_auto_20170424_0015.py
|
dduk-ddak/coding-night-live
|
be7b0b9a89a9a5332c0980dbc3698602266a1e8c
|
[
"MIT"
] | 73
|
2017-01-26T16:45:12.000Z
|
2021-07-05T20:27:38.000Z
|
manage_chat/migrations/0007_auto_20170424_0015.py
|
dduk-ddak/coding-night-live
|
be7b0b9a89a9a5332c0980dbc3698602266a1e8c
|
[
"MIT"
] | 93
|
2017-01-25T18:28:02.000Z
|
2019-06-10T22:11:38.000Z
|
manage_chat/migrations/0007_auto_20170424_0015.py
|
dduk-ddak/coding-night-live
|
be7b0b9a89a9a5332c0980dbc3698602266a1e8c
|
[
"MIT"
] | 22
|
2017-02-12T12:51:17.000Z
|
2020-09-08T02:38:20.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-04-24 00:15
from __future__ import unicode_literals
import django.contrib.postgres.fields.jsonb
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('manage_chat', '0006_poll_hash_value'),
]
operations = [
migrations.AlterField(
model_name='poll',
name='answer',
field=django.contrib.postgres.fields.jsonb.JSONField(),
),
migrations.AlterField(
model_name='poll',
name='answer_count',
field=django.contrib.postgres.fields.jsonb.JSONField(),
),
]
| 25.074074
| 67
| 0.618907
|
f82ad20d193ec8f00d4e7cb36e111ec454d0743a
| 638
|
py
|
Python
|
backend/src/feature_extraction/zerocrossing.py
|
AnXi-TieGuanYin-Tea/MusicGenreClassifiaction
|
a0b9f621b0a5d2451180b12af7681756c5abd138
|
[
"MIT"
] | 7
|
2018-05-01T19:39:17.000Z
|
2020-01-02T17:11:05.000Z
|
backend/src/feature_extraction/zerocrossing.py
|
AnXi-TieGuanYin-Tea/MusicGenreClassifiaction
|
a0b9f621b0a5d2451180b12af7681756c5abd138
|
[
"MIT"
] | 10
|
2018-12-10T22:16:43.000Z
|
2020-08-27T18:23:45.000Z
|
backend/src/feature_extraction/zerocrossing.py
|
AnXi-TieGuanYin-Tea/MusicGenreClassifiaction
|
a0b9f621b0a5d2451180b12af7681756c5abd138
|
[
"MIT"
] | 2
|
2021-04-16T08:20:17.000Z
|
2022-01-06T14:06:44.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 17 23:14:28 2018
@author: Akihiro Inui
"""
import numpy as np
def zerocrossing(input_windowed_signal:tuple or list) -> float:
"""
Zero Crossing Rate
:param input_windowed_signal: input audio signal after windowing
:return zero crossing rate
"""
# Size of windowed signal
window_size = len(input_windowed_signal)
# Slided signal
xw2 = np.zeros(window_size)
xw2[1:] = input_windowed_signal[0:-1]
# Compute Zero-crossing Rate
return (1/(2*window_size)) * sum(abs(np.sign(input_windowed_signal)-np.sign(xw2)))
| 24.538462
| 86
| 0.673981
|
6c29bfe7f6a1515ee88510c9ac5a3f73071694b7
| 137
|
py
|
Python
|
main/templates/generated/model_init.py
|
lipis/gae-init-magic
|
6b1e0b50f8e5200cb2dacebca9ac65e796b241a9
|
[
"MIT"
] | 1
|
2018-10-26T13:33:20.000Z
|
2018-10-26T13:33:20.000Z
|
main/templates/generated/model_init.py
|
lipis/gae-init-magic
|
6b1e0b50f8e5200cb2dacebca9ac65e796b241a9
|
[
"MIT"
] | 652
|
2018-10-26T12:28:08.000Z
|
2021-08-02T09:13:48.000Z
|
main/templates/generated/model_init.py
|
lipis/gae-init-magic
|
6b1e0b50f8e5200cb2dacebca9ac65e796b241a9
|
[
"MIT"
] | null | null | null |
# set model_dbs = model_dbs or [model_db]
# for model_db in model_dbs
from .{{model_db.variable_name}} import {{model_db.name}}
# endfor
| 27.4
| 57
| 0.744526
|
b30963254b23b68957b409dfe77dff10ccfaca9a
| 221
|
py
|
Python
|
test/ex3/ex3/profile_app/models.py
|
nrgxtra/web_framework
|
dd84968a77b84a03d66c5db190b28bffc479f05e
|
[
"MIT"
] | null | null | null |
test/ex3/ex3/profile_app/models.py
|
nrgxtra/web_framework
|
dd84968a77b84a03d66c5db190b28bffc479f05e
|
[
"MIT"
] | null | null | null |
test/ex3/ex3/profile_app/models.py
|
nrgxtra/web_framework
|
dd84968a77b84a03d66c5db190b28bffc479f05e
|
[
"MIT"
] | null | null | null |
from django.db import models
class Profile(models.Model):
first_name = models.CharField(
max_length=30,
)
last_name = models.CharField(
max_length=30,
)
image_url = models.URLField()
| 18.416667
| 34
| 0.647059
|
b9d1e46583249391d1ee5819825b7a9ab5613abb
| 22,227
|
py
|
Python
|
ejpcsvparser/parse.py
|
elifesciences/ejp-csv-parser
|
e3ed2815c0b20256a4ae555a51de3bfb7d40ae4e
|
[
"MIT"
] | null | null | null |
ejpcsvparser/parse.py
|
elifesciences/ejp-csv-parser
|
e3ed2815c0b20256a4ae555a51de3bfb7d40ae4e
|
[
"MIT"
] | 29
|
2017-12-09T01:16:50.000Z
|
2021-10-01T16:43:30.000Z
|
ejpcsvparser/parse.py
|
elifesciences/ejp-csv-parser
|
e3ed2815c0b20256a4ae555a51de3bfb7d40ae4e
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
import logging
import time
from collections import OrderedDict
from xml.dom import minidom
from xml.parsers.expat import ExpatError
from elifearticle import article as ea
from elifearticle import utils as eautils
from elifetools import utils as etoolsutils
from ejpcsvparser import utils
import ejpcsvparser.csv_data as data
LOGGER = logging.getLogger("parse")
HDLR = logging.FileHandler("parse.log")
FORMATTER = logging.Formatter("%(asctime)s %(levelname)s %(message)s")
HDLR.setFormatter(FORMATTER)
LOGGER.addHandler(HDLR)
LOGGER.setLevel(logging.INFO)
def instantiate_article(article_id):
LOGGER.info("in instantiate_article for %s", article_id)
doi = data.get_doi(article_id)
if doi is not None:
# Fallback if doi string is blank, default to eLife concatenated
if doi.strip() == "":
doi = utils.get_elife_doi(article_id)
article = ea.Article(doi, title=None)
return article
return None
def set_title(article, article_id):
LOGGER.info("in set_title")
title = data.get_title(article_id)
if title:
article.title = utils.convert_to_xml_string(title)
return True
LOGGER.error("could not set title ")
return False
def set_abstract(article, article_id):
LOGGER.info("in set_abstract")
raw_abstract = data.get_abstract(article_id)
if raw_abstract:
abstract = utils.decode_cp1252(raw_abstract)
article.abstract = utils.convert_to_xml_string(abstract)
article.manuscript = article_id
return True
LOGGER.error("could not set abstract ")
return False
def set_article_type(article, article_id):
LOGGER.info("in set_article_type")
article_type_id = data.get_article_type(article_id)
article_type_index = utils.article_type_indexes()
if article_type_id in article_type_index:
article_type = article_type_index[str(article_type_id)]
article.article_type = article_type["article_type"]
article.display_channel = article_type["display_channel"]
return True
return False
def set_license(article, article_id):
LOGGER.info("in set_license")
# if no article return False
if not article:
return False
license_id = data.get_license(article_id)
license_object = ea.License(license_id)
data_values = utils.license_data(license_id)
# if no data to populate the license return False
if not data_values:
return False
# set the object attributes from the data if present
for name in [
"license_id",
"license_type",
"copyright",
"href",
"name",
"paragraph1",
"paragraph2",
]:
eautils.set_attr_if_value(license_object, name, data_values.get(name))
article.license = license_object
return True
def add_date_to_article(article, date_type, date_string):
"add a date to the article object"
if not article:
return False
date_struct = None
date_parts = []
if date_string:
date_parts = date_string.split()
if date_parts:
try:
date_struct = time.strptime(date_parts[0], "%Y-%m-%d")
except ValueError:
LOGGER.info(
"unable to convert date %s given %s for article %s",
date_type,
date_parts,
article.doi,
)
else:
return False
if date_string and date_struct:
article_date = ea.ArticleDate(date_type, date_struct)
article.add_date(article_date)
LOGGER.info(
"set date_type %s from %s as %s", date_type, date_string, article_date
)
return True
return False
def set_dates(article, article_id):
LOGGER.info("in set_dates")
if not article:
return False
accepted_date = data.get_accepted_date(article_id)
date_status = add_date_to_article(article, "accepted", accepted_date)
if date_status is not True:
return False
received_date = data.get_received_date(article_id)
if received_date.strip() == "":
# Use the alternate date column receipt_date if received_date is blank
received_date = data.get_receipt_date(article_id)
date_status = add_date_to_article(article, "received", received_date)
if date_status is not True:
return False
# set the license date to be the same as the accepted date
if article.get_date("accepted"):
date_license = ea.ArticleDate("license", article.get_date("accepted").date)
article.add_date(date_license)
return True
def set_ethics(article, article_id):
LOGGER.info("in set_ethics")
ethics = None
parse_status = None
ethic = data.get_ethics(article_id)
LOGGER.info(ethic)
if ethic:
parse_status, ethics = parse_ethics(ethic)
if ethic and parse_status is not True:
LOGGER.error("could not set ethics due to parsing error")
return False
if ethics:
for ethics_value in ethics:
article.add_ethic(ethics_value)
return True
def set_datasets(article, article_id):
LOGGER.info("in set_datasets")
datasets = data.get_datasets(article_id)
dataset_objects = None
data_availability = None
parse_status = None
LOGGER.info(datasets)
if datasets:
parse_status, dataset_objects, data_availability = parse_datasets(datasets)
if datasets and parse_status is not True:
LOGGER.error("could not set datasets due to parsing error")
return False
if dataset_objects:
for dataset in dataset_objects:
article.add_dataset(dataset)
if data_availability:
article.data_availability = utils.convert_to_xml_string(data_availability)
return True
def set_categories(article, article_id):
LOGGER.info("in set_categories")
categories = data.get_subjects(article_id)
if categories:
for category in categories:
article.add_article_category(category)
return True
def set_organsims(article, article_id):
LOGGER.info("in set_organsims")
research_organisms = data.get_organisms(article_id)
if research_organisms:
for research_organism in research_organisms:
if research_organism.strip() != "":
article.add_research_organism(
utils.convert_to_xml_string(research_organism)
)
return True
def set_keywords(article, article_id):
LOGGER.info("in set_keywords")
keywords = data.get_keywords(article_id)
if keywords:
for keyword in keywords:
article.add_author_keyword(keyword)
return True
def build_author(article_id, author_id, author_type):
"build an author object with the basic name data"
first_name = utils.decode_cp1252(data.get_author_first_name(article_id, author_id))
last_name = utils.decode_cp1252(data.get_author_last_name(article_id, author_id))
middle_name = utils.decode_cp1252(
data.get_author_middle_name(article_id, author_id)
)
suffix = utils.decode_cp1252(data.get_author_suffix(article_id, author_id))
# initials = middle_name_initials(middle_name)
if middle_name.strip() != "":
# Middle name add to the first name / given name
first_name += " " + middle_name
author = ea.Contributor(author_type, last_name, first_name)
if suffix and suffix.strip() != "":
author.suffix = suffix
return author
def author_affiliation(article_id, author_id):
"create and set author affiliation details"
affiliation = ea.Affiliation()
department = utils.decode_cp1252(data.get_author_department(article_id, author_id))
if department.strip() != "":
affiliation.department = department
affiliation.institution = utils.decode_cp1252(
data.get_author_institution(article_id, author_id)
)
city = utils.decode_cp1252(data.get_author_city(article_id, author_id))
if city.strip() != "":
affiliation.city = city
affiliation.country = data.get_author_country(article_id, author_id)
contrib_type = data.get_author_contrib_type(article_id, author_id)
dual_corresponding = data.get_author_dual_corresponding(article_id, author_id)
if contrib_type == "Corresponding Author" or (
dual_corresponding.strip() != "" and int(dual_corresponding.strip()) == 1
):
affiliation.email = data.get_author_email(article_id, author_id)
return affiliation
def set_author_info(article, article_id):
"""
author information
Save the contributor and their position in the list in a dict,
for both authors and group authors,
Then add the contributors to the article object in order of their position
"""
LOGGER.info("in set_author_info")
authors_dict = {}
# check there are any authors before continuing
author_ids = data.get_author_ids(article_id)
if not author_ids and not data.get_group_authors(article_id):
LOGGER.error("could not find any author data")
return False
if author_ids:
for author_id in author_ids:
author_type = "author"
author = build_author(article_id, author_id, author_type)
affiliation = author_affiliation(article_id, author_id)
# set corresponding if the affiliation has an email
if affiliation.email:
author.corresp = True
conflict = data.get_author_conflict(article_id, author_id)
if conflict.strip() != "":
author.set_conflict(utils.convert_to_xml_string(conflict))
orcid = data.get_author_orcid(article_id, author_id)
if orcid.strip() != "":
author.orcid = orcid
author.auth_id = author_id
author.set_affiliation(affiliation)
author_position = data.get_author_position(article_id, author_id)
# Add the author to the dictionary recording their position in the list
authors_dict[int(author_position)] = author
# Add group author collab contributors, if present
group_authors = data.get_group_authors(article_id)
if group_authors:
# Parse the group authors string
group_author_dict = parse_group_authors(group_authors)
if group_author_dict:
for author_position in sorted(group_author_dict.keys()):
collab = group_author_dict.get(author_position)
author = ea.Contributor("author", None, None, collab)
# Add the author to the dictionary recording their position in the list
authors_dict[int(author_position)] = author
# Finally add authors to the article sorted by their position
for author_position in sorted(authors_dict.keys()):
# print article_id, author_position, author
article.add_contributor(authors_dict.get(author_position))
return True
def set_editor_info(article, article_id):
LOGGER.info("in set_editor_info")
author_type = "editor"
first_name = utils.decode_cp1252(data.get_me_first_nm(article_id))
last_name = utils.decode_cp1252(data.get_me_last_nm(article_id))
middle_name = utils.decode_cp1252(data.get_me_middle_nm(article_id))
suffix = utils.decode_cp1252(data.get_me_suffix(article_id))
# no first and last name then return False
if not (first_name and last_name):
LOGGER.error("could not set editor")
return False
# initials = middle_name_initials(middle_name)
if middle_name.strip() != "":
# Middle name add to the first name / given name
first_name += " " + middle_name
# create an instance of the POSContributor class
editor = ea.Contributor(author_type, last_name, first_name)
if suffix and suffix.strip() != "":
editor.suffix = suffix
LOGGER.info("editor is: %s", str(editor))
LOGGER.info("getting ed id for article %s", article_id)
LOGGER.info("editor id is %s", data.get_me_id(article_id))
LOGGER.info(str(type(data.get_me_id(article_id))))
editor.auth_id = data.get_me_id(article_id)
affiliation = ea.Affiliation()
department = data.get_me_department(article_id)
if department.strip() != "":
affiliation.department = department
affiliation.institution = data.get_me_institution(article_id)
affiliation.country = data.get_me_country(article_id)
# editor.auth_id = `int(author_id)`we have a me_id, but I need to determine
# whether that Id is the same as the relevent author id
editor.set_affiliation(affiliation)
article.add_contributor(editor)
return True
def set_funding(article, article_id):
"""
Instantiate one eLifeFundingAward for each funding award
Add principal award recipients in the order of author position for the article
Finally add the funding objects to the article in the order of funding position
"""
LOGGER.info("in set_funding")
if not article:
return False
# Set the funding note from the manuscript level
article.funding_note = data.get_funding_note(article_id)
# Query for all funding award data keys
funder_ids = data.get_funding_ids(article_id)
# Keep track of funding awards by position in a dict
funding_awards = OrderedDict()
# First pass, build the funding awards
if funder_ids:
for (funder_article_id, author_id, funder_position) in funder_ids:
# print (article_id, author_id, funder_position)
funder_identifier = data.get_funder_identifier(
funder_article_id, author_id, funder_position
)
funder = utils.decode_cp1252(
utils.clean_funder(
data.get_funder(funder_article_id, author_id, funder_position)
)
)
award_id = data.get_award_id(funder_article_id, author_id, funder_position)
if funder_position not in funding_awards.keys():
# Initialise the object values
funding_awards[funder_position] = ea.FundingAward()
if funder:
funding_awards[funder_position].institution_name = funder
if funder_identifier and funder_identifier.strip() != "":
funding_awards[funder_position].institution_id = funder_identifier
if award_id and award_id.strip() != "":
funding_awards[funder_position].add_award_id(award_id)
# Second pass, add the primary award recipients in article author order
for position in sorted(funding_awards.keys()):
for contrib in article.contributors:
for (funder_article_id, author_id, funder_position) in funder_ids:
if position == funder_position and contrib.auth_id == author_id:
funding_awards[position].add_principal_award_recipient(contrib)
# Add funding awards to the article object, sorted by position
for position in sorted(funding_awards.keys()):
article.add_funding_award(funding_awards.get(position))
return True
def parse_ethics(ethic):
"""
Given angle bracket escaped XML string, parse
animal and human ethic comments, and return
a list of strings if involved_comments tag
is found. Boiler plate prefix added too.
"""
ethics = []
reparsed = None
parse_status = None
# Decode escaped angle brackets
LOGGER.info("ethic is %s", ethic)
ethic_xml = utils.unserialise_angle_brackets(ethic)
ethic_xml = etoolsutils.escape_ampersand(ethic_xml)
LOGGER.info("ethic is %s", ethic_xml)
# Parse XML
try:
reparsed = minidom.parseString(ethic_xml)
parse_status = True
except ExpatError:
parse_status = False
LOGGER.info("ethic reparsed is %s", reparsed)
# Extract comments
if reparsed:
for ethic_type in "animal_subjects", "human_subjects":
ethic_node = reparsed.getElementsByTagName(ethic_type)[0]
for node in ethic_node.childNodes:
if node.nodeName == "involved_comments":
text_node = node.childNodes[0]
ethic_text = text_node.nodeValue
# Add boilerplate
if ethic_type == "animal_subjects":
ethic_text = "Animal experimentation: " + ethic_text.strip()
elif ethic_type == "human_subjects":
ethic_text = "Human subjects: " + ethic_text.strip()
# Decode unicode characters
ethics.append(utils.entity_to_unicode(ethic_text))
return parse_status, ethics
def parse_dataset_node(dataset_node, dataset_type):
"extract attributes from a minidom node and populate a Dataset object"
dataset = ea.Dataset()
dataset.dataset_type = dataset_type
for node in dataset_node.childNodes:
if node.nodeName == "authors_text_list" and node.childNodes:
text_node = node.childNodes[0]
for author_name in text_node.nodeValue.split(","):
if author_name.strip() != "":
dataset.add_author(author_name.lstrip())
if node.nodeName == "title":
text_node = node.childNodes[0]
dataset.title = utils.entity_to_unicode(text_node.nodeValue)
if node.nodeName == "id":
text_node = node.childNodes[0]
dataset.source_id = utils.entity_to_unicode(text_node.nodeValue)
if node.nodeName == "license_info":
text_node = node.childNodes[0]
dataset.license_info = utils.entity_to_unicode(text_node.nodeValue)
if node.nodeName == "year" and node.childNodes:
text_node = node.childNodes[0]
dataset.year = utils.entity_to_unicode(text_node.nodeValue)
return dataset
def parse_datasets(datasets_content):
"""
Datasets content is XML with escaped angle brackets
"""
datasets = []
data_availability = None
reparsed = None
parse_status = None
# Decode escaped angle brackets
LOGGER.info("datasets is %s", datasets_content)
datasets_xml = utils.escape_angle_brackets(datasets_content)
datasets_xml = utils.unserialise_angle_brackets(datasets_xml)
datasets_xml = etoolsutils.escape_ampersand(datasets_xml)
LOGGER.info("datasets is %s", datasets_xml)
# Parse XML
try:
reparsed = minidom.parseString(datasets_xml)
parse_status = True
except ExpatError:
LOGGER.info("datasets reparsed is %s", reparsed)
parse_status = False
# Extract comments
if reparsed:
for dataset_type in "datasets", "prev_published_datasets":
datasets_nodes = reparsed.getElementsByTagName(dataset_type)[0]
for dataset_node in datasets_nodes.getElementsByTagName("dataset"):
datasets.append(parse_dataset_node(dataset_node, dataset_type))
# Parse the data availability statement
if reparsed.getElementsByTagName("data_availability_textbox"):
data_availability_node = reparsed.getElementsByTagName(
"data_availability_textbox"
)
if data_availability_node[0].childNodes:
data_availability = utils.entity_to_unicode(
data_availability_node[0].childNodes[0].nodeValue
)
return parse_status, datasets, data_availability
def parse_group_authors(group_authors):
"""
Given a raw group author value from the data files,
check for empty, whitespace, zero
If not empty, remove extra numbers from the end of the string
Return a dictionary of dict[author_position] = collab_name
"""
group_author_dict = OrderedDict()
if not group_authors:
group_author_dict = None
elif group_authors.strip() == "" or group_authors.strip() == "0":
group_author_dict = None
else:
# Parse out elements into a list, clean and
# add the the dictionary using some steps
# Split the string on the first delimiter
group_author_list = group_authors.split("order_start")
for group_author_string in group_author_list:
if group_author_string == "":
continue
# Now split on the second delimiter
position_and_name = group_author_string.split("order_end")
author_position = position_and_name[0]
# Strip numbers at the end
if len(position_and_name) > 1:
group_author = position_and_name[1].rstrip("1234567890")
# Finally, add to the dict noting the authors position
group_author_dict[author_position] = group_author
return group_author_dict
def build_article(article_id):
"""
Given an article_id, instantiate and populate the article objects
"""
error_count = 0
error_messages = []
# Only happy with string article_id - cast it now to be safe!
article_id = str(article_id)
article = instantiate_article(article_id)
# Run each of the below functions to build the article object components
article_set_functions = [
set_title,
set_abstract,
set_article_type,
set_license,
set_dates,
set_ethics,
set_datasets,
set_categories,
set_organsims,
set_author_info,
set_editor_info,
set_keywords,
set_funding,
]
for set_function in article_set_functions:
if not set_function(article, article_id):
error_count = error_count + 1
error_messages.append(
"article_id " + str(article_id) + " error in " + set_function.__name__
)
# Building from CSV data it must be a POA type, set it
if article:
article.is_poa = True
print(error_count)
# default conflict text
if article:
article.conflict_default = (
"The authors declare that no competing interests exist."
)
if error_count == 0:
return article, error_count, error_messages
return None, error_count, error_messages
| 35.113744
| 87
| 0.673415
|
4438dca6a03a2a40ba30b58ce6284487f64a8c92
| 1,350
|
py
|
Python
|
source/ch03/generate_test_names2.py
|
AngelLiang/programming-in-python3-2nd-edition
|
8f9a6ab6768a10e94daef641009288de6845245f
|
[
"MIT"
] | null | null | null |
source/ch03/generate_test_names2.py
|
AngelLiang/programming-in-python3-2nd-edition
|
8f9a6ab6768a10e94daef641009288de6845245f
|
[
"MIT"
] | 4
|
2020-03-24T18:25:26.000Z
|
2021-08-23T20:44:07.000Z
|
source/ch03/generate_test_names2.py
|
AngelLiang/programming-in-python3-2nd-edition
|
8f9a6ab6768a10e94daef641009288de6845245f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2008-11 Qtrac Ltd. All rights reserved.
# This program or module is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version. It is provided for educational
# purposes and is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
import random
def get_forenames_and_surnames():
forenames = []
surnames = []
for names, filename in ((forenames, "data/forenames.txt"),
(surnames, "data/surnames.txt")):
for name in open(filename, encoding="utf8"):
names.append(name.rstrip())
return forenames, surnames
forenames, surnames = get_forenames_and_surnames()
fh = open("test-names2.txt", "w", encoding="utf8")
limit = 100
years = list(range(1970, 2013)) * 3
for year, forename, surname in zip(
random.sample(years, limit),
random.sample(forenames, limit),
random.sample(surnames, limit)):
name = "{0} {1}".format(forename, surname)
fh.write("{0:.<25}.{1}\n".format(name, year))
| 38.571429
| 74
| 0.691852
|
bc80ed94d329db65b45dd18bdd386966c99ed272
| 2,977
|
py
|
Python
|
contrib/devtools/test-security-check.py
|
bitcoinrtx/bitcoinrtx
|
00bcd7e3674174460bb0704e2f61aaad813db6fb
|
[
"MIT"
] | null | null | null |
contrib/devtools/test-security-check.py
|
bitcoinrtx/bitcoinrtx
|
00bcd7e3674174460bb0704e2f61aaad813db6fb
|
[
"MIT"
] | null | null | null |
contrib/devtools/test-security-check.py
|
bitcoinrtx/bitcoinrtx
|
00bcd7e3674174460bb0704e2f61aaad813db6fb
|
[
"MIT"
] | 1
|
2021-03-06T12:28:17.000Z
|
2021-03-06T12:28:17.000Z
|
#!/usr/bin/env python3
#
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Test script for security-check.py
'''
import subprocess
import unittest
def write_testcode(filename):
with open(filename, 'w', encoding="utf8") as f:
f.write('''
#include <stdio.h>
int main()
{
printf("the quick brown fox jumps over the lazy god\\n");
return 0;
}
''')
def call_security_check(cc, source, executable, options):
subprocess.check_call([cc,source,'-o',executable] + options)
p = subprocess.Popen(['./security-check.py',executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True)
(stdout, stderr) = p.communicate()
return (p.returncode, stdout.rstrip())
class TestSecurityChecks(unittest.TestCase):
def test_ELF(self):
source = 'test1.c'
executable = 'test1'
cc = 'gcc'
write_testcode(source)
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-zexecstack','-fno-stack-protector','-Wl,-znorelro','-no-pie','-fno-PIE']),
(1, executable+': failed PIE NX RELRO Canary'))
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fno-stack-protector','-Wl,-znorelro','-no-pie','-fno-PIE']),
(1, executable+': failed PIE RELRO Canary'))
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-znorelro','-no-pie','-fno-PIE']),
(1, executable+': failed PIE RELRO'))
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-znorelro','-pie','-fPIE']),
(1, executable+': failed RELRO'))
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-zrelro','-Wl,-z,now','-pie','-fPIE']),
(0, ''))
def test_64bit_PE(self):
source = 'test1.c'
executable = 'test1.exe'
cc = 'x86_64-w64-mingw32-gcc'
write_testcode(source)
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,--no-nxcompat','-Wl,--no-dynamicbase','-Wl,--no-high-entropy-va']), (1, executable+': failed DYNAMIC_BASE HIGH_ENTROPY_VA NX'))
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,--nxcompat','-Wl,--no-dynamicbase','-Wl,--no-high-entropy-va']), (1, executable+': failed DYNAMIC_BASE HIGH_ENTROPY_VA'))
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,--nxcompat','-Wl,--dynamicbase','-Wl,--no-high-entropy-va']), (1, executable+': failed HIGH_ENTROPY_VA'))
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,--nxcompat','-Wl,--dynamicbase','-Wl,--high-entropy-va']), (0, ''))
if __name__ == '__main__':
unittest.main()
| 49.616667
| 202
| 0.649647
|
fe682e5b9e49890bae70547c925521abfa8725f0
| 437
|
py
|
Python
|
data/scripts/templates/object/tangible/camp/shared_camp_light_s1.py
|
obi-two/GameServer
|
7d37024e2291a97d49522610cd8f1dbe5666afc2
|
[
"MIT"
] | 20
|
2015-02-23T15:11:56.000Z
|
2022-03-18T20:56:48.000Z
|
data/scripts/templates/object/tangible/camp/shared_camp_light_s1.py
|
apathyboy/swganh
|
665128efe9154611dec4cb5efc61d246dd095984
|
[
"MIT"
] | null | null | null |
data/scripts/templates/object/tangible/camp/shared_camp_light_s1.py
|
apathyboy/swganh
|
665128efe9154611dec4cb5efc61d246dd095984
|
[
"MIT"
] | 20
|
2015-04-04T16:35:59.000Z
|
2022-03-24T14:54:37.000Z
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/camp/shared_camp_light_s1.iff"
result.attribute_template_id = -1
result.stfName("frn_n","frn_light")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
| 25.705882
| 66
| 0.720824
|
f0029a5ece2b73055ed62a56538fb6ecebea486a
| 4,860
|
py
|
Python
|
test_plugins/SimpleHBond.py
|
nezix/nanome-lib
|
e602cc9e63d1e2499c0f865da7ddbfbda44f0509
|
[
"MIT"
] | 3
|
2020-07-02T13:08:27.000Z
|
2021-11-24T14:32:53.000Z
|
test_plugins/SimpleHBond.py
|
nezix/nanome-lib
|
e602cc9e63d1e2499c0f865da7ddbfbda44f0509
|
[
"MIT"
] | 11
|
2020-09-14T17:01:47.000Z
|
2022-02-18T04:00:52.000Z
|
test_plugins/SimpleHBond.py
|
nezix/nanome-lib
|
e602cc9e63d1e2499c0f865da7ddbfbda44f0509
|
[
"MIT"
] | 5
|
2020-08-12T16:30:03.000Z
|
2021-12-06T18:04:23.000Z
|
import sys
import time
import nanome
minimum_bond_distance = 3
maximum_angstrom_distance = 3
# Config
NAME = "Test HBonds"
DESCRIPTION = "A simple plugin demonstrating how plugin system can be used to extend Nanome capabilities"
CATEGORY = "Simple Actions"
HAS_ADVANCED_OPTIONS = False
# Plugin
class SimpleHBond(nanome.PluginInstance):
def start(self):
print("Start Simple HBond Plugin")
@staticmethod
def _is_good_element(atom, current_element=None):
if atom.symbol != 'H' and atom.symbol != 'O':
return False
if current_element == atom.symbol:
return False
return True
@staticmethod
def _check_atom(atom, original_atom, original_residue, depth, visited_atoms):
visited_atoms.append(atom)
# If we traveled at least minimum_bond_distance and distance is under maximum_angstrom_distance, we might have a HBond here
if depth >= minimum_bond_distance \
and nanome.util.Vector3.distance(atom.position, original_atom.position) <= maximum_angstrom_distance \
and SimpleHBond._is_good_element(atom, original_atom.symbol):
new_bond = nanome.structure.Bond()
new_bond.kind = nanome.structure.Bond.Kind.Hydrogen
new_bond.atom1 = original_atom
new_bond.atom2 = atom
original_residue.bonds.append(new_bond)
# Check all bonds related to current atom
for bond in original_residue.bonds:
if bond.kind == nanome.structure.Bond.Kind.Hydrogen:
continue
if bond.atom1.index == atom.index:
other_atom = bond.atom2
elif bond.atom2.index == atom.index:
other_atom = bond.atom1
else:
continue
found = [x for x in visited_atoms if x.index == other_atom.index]
if len(found) == 0:
SimpleHBond._check_atom(other_atom, original_atom, original_residue, depth + 1, visited_atoms)
@staticmethod
def _check_atom_without_bonds(atom, workspace, original_residue, visited_atoms):
for complex in workspace.complexes:
for molecule in complex.molecules:
for chain in molecule.chains:
for residue in chain.residues:
for current_atom in residue.atoms:
if SimpleHBond._is_good_element(current_atom, atom.symbol) == False:
continue
found = [x for x in visited_atoms if x.index == current_atom.index]
if len(found) == 0:
if nanome.util.Vector3.distance(atom.position, current_atom.position) <= maximum_angstrom_distance:
new_bond = nanome.structure.Bond()
new_bond.kind = nanome.structure.Bond.Kind.Hydrogen
new_bond.atom1 = atom
new_bond.atom2 = current_atom
original_residue.bonds.append(new_bond)
@staticmethod
def _remove_hbonds(workspace):
removed_hbonds = False
for complex in workspace.complexes:
for molecule in complex.molecules:
for chain in molecule.chains:
for residue in chain.residues:
for i, b in reversed(list(enumerate(residue.bonds))):
if b.kind == nanome.structure.Bond.Kind.Hydrogen:
del residue.bonds[i]
removed_hbonds = True
return removed_hbonds
def on_workspace_received(self, workspace):
if SimpleHBond._remove_hbonds(workspace):
print("HBonds removed")
else:
for complex in workspace.complexes:
for molecule in complex.molecules:
for chain in molecule.chains:
for residue in chain.residues:
for atom in residue.atoms:
if atom.selected == False:
continue
if SimpleHBond._is_good_element(atom):
visited_atoms = []
SimpleHBond._check_atom(atom, atom, residue, 0, visited_atoms)
SimpleHBond._check_atom_without_bonds(atom, workspace, residue, visited_atoms)
nanome.util.Logs.debug("HBonds added")
self.update_workspace(workspace)
def on_run(self):
self.request_workspace(self.on_workspace_received)
def __init__(self):
pass
nanome.Plugin.setup(NAME, DESCRIPTION, CATEGORY, HAS_ADVANCED_OPTIONS, SimpleHBond)
| 43.00885
| 131
| 0.580658
|
b3f6b229ee7868173dff553698b692a8304b91df
| 354
|
py
|
Python
|
apps/poker/views/__init__.py
|
deniskrumko/izyan-poker
|
ce70c9c8f761409adad289809e5220237b312407
|
[
"MIT"
] | 6
|
2019-08-05T07:37:52.000Z
|
2021-12-30T20:07:01.000Z
|
apps/poker/views/__init__.py
|
deniskrumko/izyan-poker
|
ce70c9c8f761409adad289809e5220237b312407
|
[
"MIT"
] | 8
|
2019-10-25T11:07:03.000Z
|
2021-06-10T18:43:42.000Z
|
apps/poker/views/__init__.py
|
deniskrumko/izyan-poker
|
ce70c9c8f761409adad289809e5220237b312407
|
[
"MIT"
] | 1
|
2019-10-07T15:44:26.000Z
|
2019-10-07T15:44:26.000Z
|
from .create import CreateRoomView
from .index import IndexView
from .member import MemberView
from .room import RoomHistoryView, RoomView
from .settings import SettingsView
from .status import StatusView
__all__ = (
'CreateRoomView',
'IndexView',
'MemberView',
'RoomHistoryView',
'RoomView',
'SettingsView',
'StatusView',
)
| 20.823529
| 43
| 0.728814
|
7781f58ba0f68cb680eb11e135a8e6db617499ae
| 6,764
|
py
|
Python
|
examples/scripts/example21.py
|
Alehud/QuSpin
|
c72d5fb2b2e9cd9a37d6917bba0337faf3b6c201
|
[
"BSD-3-Clause"
] | 195
|
2016-10-24T18:05:31.000Z
|
2022-03-29T10:11:56.000Z
|
examples/scripts/example21.py
|
markusschmitt/QuSpin
|
c239d01e6ce76253b03440cda3c8819a9f63e288
|
[
"BSD-3-Clause"
] | 303
|
2016-10-25T20:08:11.000Z
|
2022-03-31T16:52:09.000Z
|
examples/scripts/example21.py
|
markusschmitt/QuSpin
|
c239d01e6ce76253b03440cda3c8819a9f63e288
|
[
"BSD-3-Clause"
] | 54
|
2017-01-03T18:47:52.000Z
|
2022-03-16T06:54:33.000Z
|
from __future__ import print_function, division
#
import sys,os
os.environ['KMP_DUPLICATE_LIB_OK']='True' # uncomment this line if omp error occurs on OSX for python 3
os.environ['OMP_NUM_THREADS']='1' # set number of OpenMP threads to run in parallel
os.environ['MKL_NUM_THREADS']='1' # set number of MKL threads to run in parallel
#
quspin_path = os.path.join(os.getcwd(),"../../")
sys.path.insert(0,quspin_path)
#######################################################################
# example 21 #
# This example shows how to use the `Lanczos` submodule of the #
# `tools` module to compute finite temperature expectation values #
# using `FTLM_statc_iteration` and `LTLM_statiic_iteration`. #
#######################################################################
from quspin.basis import spin_basis_1d
from quspin.operators import hamiltonian,quantum_operator
from quspin.tools.lanczos import lanczos_full,lanczos_iter,FTLM_static_iteration,LTLM_static_iteration
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
#
np.random.seed(1203901) # fix seed
#
#
def bootstrap_mean(O_r,Id_r,n_bootstrap=100):
"""
Uses boostraping to esimate the error due to sampling.
O_r: numerator
Id_r: denominator
n_bootstrap: bootstrap sample size
"""
O_r = np.asarray(O_r)
Id_r = np.asarray(Id_r)
#
avg = np.nanmean(O_r,axis=0)/np.nanmean(Id_r,axis=0)
n_Id = Id_r.shape[0]
#n_N = O_r.shape[0]
#
i_iter = (np.random.randint(n_Id,size=n_Id) for i in range(n_bootstrap))
#
bootstrap_iter = (np.nanmean(O_r[i,...],axis=0)/np.nanmean(Id_r[i,...],axis=0) for i in i_iter)
diff_iter = ((bootstrap-avg)**2 for bootstrap in bootstrap_iter)
err = np.sqrt(sum(diff_iter)/n_bootstrap)
#
return avg,err
#
def get_operators(L):
"""
Generates hamiltonian for TFIM, see quspin tutorial papers to learn more aabout this
"""
# create basis
basis = spin_basis_1d(L,pauli=True)
# site-coupling lists
J_list = [[-1.0,i,(i+1)%L] for i in range(L)]
h_list = [[-1.0,i] for i in range(L)]
M_list = [[1.0/L,i] for i in range(L)]
# create magnetization-squared operator
M = hamiltonian([["z",M_list]],[],basis=basis,dtype=np.float64)
M2 = M**2
# create parameter-dependent Hamiltonian using quantum_oprator
ops_dict = dict(J=[["zz",J_list]],h=[["x",h_list]])
H = quantum_operator(ops_dict,basis=basis,dtype=np.float64)
#
return M2,H
#
class lanczos_wrapper(object):
"""
Class that contains minimum requirments to use Lanczos.
Using it is equired, since the dot and dtype methods of quantum_operator objects take more parameters
"""
#
def __init__(self,A,**kwargs):
"""
A: array-like object to assign/overwrite the dot and dtype objects of
kwargs: any optional arguments used when overwriting the methods
"""
self._A = A
self._kwargs = kwargs
#
def dot(self,v,out=None):
"""
Calls the `dot` method of quantum_operator with the parameters fixed to a given value.
"""
return self._A.dot(v,out=out,pars=self._kwargs)
#
@property
def dtype(self):
"""
The dtype attribute is required to figure out result types in lanczos calculations.
"""
return self._A.dtype
#
#
##### define system parameters #####
#
L = 10 # system size
m = 50 # dimensio of Krylov space
s = 0.5 # transverse-field Ising model parameter: H = sZZ + (1-s)X
#
N_samples = 100 # of samples to approximate thermal expectation value with
#
T = np.logspace(-3,3,51,base=10) # temperature vector
beta = 1.0/(T+1e-15) # inverse temperature vector
#
##### get operators #####
#
M2,H = get_operators(L)
# crate wrapper for quantum_operator
H_wrapped = lanczos_wrapper(H,J=s,h=(1-s))
# calculate ground state energy to use as shift that will prevent overflows (i.e. numerical instabilities)
[E0] = H.eigsh(k=1,which="SA",pars=dict(J=s,h=1-s),return_eigenvectors=False)
#
##### finite temperature methods #####
#
# preallocate lists to store results from iterations
M2_FT_list = []
M2_LT_list = []
Z_FT_list = []
Z_LT_list = []
#
# allocate memory for lanczos vectors
out = np.zeros((m,H.Ns),dtype=np.float64)
#
# calculate iterations
for i in range(N_samples):
# generate normalized random vector
r = np.random.normal(0,1,size=H.Ns)
r /= np.linalg.norm(r)
# get lanczos basis
E,V,lv = lanczos_full(H_wrapped,r,m,eps=1e-8,full_ortho=True)
# E,V,lv = lanczos_full(H_wrapped,r,m,eps=1e-8,full_ortho=False)
# E,V,lv = lanczos_iter(H_wrapped,r,m,eps=1e-8)
# shift energy to avoid overflows
E -= E0
# calculate iteration
results_FT,Id_FT = FTLM_static_iteration({"M2":M2},E,V,lv,beta=beta)
results_LT,Id_LT = LTLM_static_iteration({"M2":M2},E,V,lv,beta=beta)
# save results to a list
M2_FT_list.append(results_FT["M2"])
Z_FT_list.append(Id_FT)
M2_LT_list.append(results_LT["M2"])
Z_LT_list.append(Id_LT)
#
# calculating error bars on the expectation values
m2_FT,dm2_FT = bootstrap_mean(M2_FT_list,Z_FT_list)
m2_LT,dm2_LT = bootstrap_mean(M2_LT_list,Z_LT_list)
#
##### calculating exact results from full diagonalization #####
#
dim_cutoff=2000 # Hilbert space dimension cutoff
if H.Ns < dim_cutoff: # Hilbert space is not too big to diagonalize on a laptop
#
# adding more points for smooth line
T_new = np.logspace(np.log10(T.min()),np.log10(T.max()),10*len(T))
beta_new = 1.0/(T_new+1e-15)
#
# full diagonaization of H
E,V = H.eigh(pars=dict(J=s,h=1-s))
# shift energy to avoid overflows
E -= E[0]
# get boltzmann weights for each temperature
W = np.exp(-np.outer(E,beta_new))
# get diagonal matrix elements for trace
O = M2.matrix_ele(V,V,diagonal=True)
# calculate trace
O = np.einsum("j...,j->...",W,O)/np.einsum("j...->...",W)
#
#
##### plot results #####
#
# setting up plot and inset
h=4.2 # figure aspect ratio parameter
f,ax = plt.subplots(figsize=(1.5*h,h))
axinset = inset_axes(ax, width="45%", height="65%", loc="upper right")
axs = [ax,axinset]
#
# plot results for FTLM and LTLM.
for a in axs:
a.errorbar(T,m2_LT,dm2_LT,marker=".",label="LTLM",zorder=-1)
a.errorbar(T,m2_FT,dm2_FT,marker=".",label="FTLM",zorder=-2)
#
if H.Ns < dim_cutoff: # hilbert space is not too big to diagonalize on a laptop
a.plot(T_new,O,label="exact",zorder=0)
#
a.set_xscale("log")
#
# adding space for inset by expanding x limits.
xmin,xmax = ax.get_xlim()
ax.set_xlim((xmin,10*xmax))
ax.legend(loc="lower left")
#
# inset adjustment to zoom in low-temp limit.
xmin,xmax = axinset.get_xlim()
#
a = -0.6
m = np.logical_and(T>=xmin,T<=10**(a))
axinset.set_xlim((xmin,10**(a+0.1)))
ymin = min(m2_LT[m].min(),m2_FT[m].min())
ymax = max(m2_LT[m].max(),m2_FT[m].max())
ywin = ymax-ymin
boundy = 0.1*ywin
axinset.set_ylim((ymin-boundy,ymax+boundy))
#
# display plot
f.tight_layout()
#plt.show()
#
| 31.314815
| 106
| 0.688794
|
c067e83647c0a99b0d2aaaf132f8bb0d89cecd9f
| 10,458
|
py
|
Python
|
show_changes.py
|
Linguistics575/POCRE
|
36c5597921f8b3aa971b92702cdb6e063701cd7c
|
[
"MIT"
] | 3
|
2019-09-08T20:21:15.000Z
|
2020-08-10T03:43:45.000Z
|
show_changes.py
|
Linguistics575/POCRE
|
36c5597921f8b3aa971b92702cdb6e063701cd7c
|
[
"MIT"
] | 9
|
2018-06-08T00:49:20.000Z
|
2020-08-06T16:17:03.000Z
|
show_changes.py
|
Linguistics575/POCRE
|
36c5597921f8b3aa971b92702cdb6e063701cd7c
|
[
"MIT"
] | 1
|
2020-08-10T03:43:52.000Z
|
2020-08-10T03:43:52.000Z
|
#!/usr/bin/env python3
'''
Compares an edited text to the original line by line and prints the edited text with changes in bold, red font.
Deletions are enclosed in square brackets.
--show_original option prints the edited and original texts side by side
--numbered prints line numbers on the left margin
@author genevp, adapted from Jimmy Bruno's wer.py
'''
import argparse
from collections import OrderedDict
from itertools import chain
from os import path
import re
import sys
# used in StatsTuple:
from builtins import property as _property, tuple as _tuple
from operator import itemgetter as _itemgetter
def get_distance_matrix(orig, edited):
'''
return an edit distance matrix
Parameters:
-----------
orig : iterable
the "original" iterable, e.g. elements present in orig but absent
in edited will be deletions.
edited : iterable
the "edited iterable", e.g. elements present in edited but
absent in orig will be insertions
Returns:
--------
distance_matrix : 2d list of lists
'''
# initialize the matrix
orig_len = len(orig) + 1
edit_len = len(edited) + 1
distance_matrix = [[0] * edit_len for _ in range(orig_len)]
for i in range(orig_len):
distance_matrix[i][0] = i
for j in range(edit_len):
distance_matrix[0][j] = j
# calculate the edit distances
for i in range(1, orig_len):
for j in range(1, edit_len):
deletion = distance_matrix[i-1][j] + 1
insertion = distance_matrix[i][j-1] + 1
substitution = distance_matrix[i-1][j-1]
if orig[i-1] != edited[j-1]:
substitution += 1
distance_matrix[i][j] = min(insertion, deletion, substitution)
return distance_matrix
class Compare:
'''
Parameters:
-----------
orig : iterable
the "original" iterable, e.g. elements present in origerence
but absent in edited will be deletions.
edited : iterable
the "edited" iterable, e.g. elements present in edited
but absent in original will be insertions
'''
def __init__(self, original, edited):
self.original = original
self.edited = edited
self.distance_matrix = get_distance_matrix(original, edited)
i = len(self.distance_matrix) - 1
j = len(self.distance_matrix[i]) - 1
self.edit_distance = self.distance_matrix[i][j]
self.num_orig_elements = i
def __repr__(self):
edited_str = str(self.edited)
original_str = str(self.original)
if len(edited_str) > 10:
edited_str = edited_str[10:] + " ..."
if len(original_str) > 10:
original_str = original_str[10:] + " ..."
return "Compare({}, {})".format(edited_str, original_str)
def set_alignment_strings(self):
'''
get aligned [corresponding] elements of original, edited, and labels and set Compare object attributes
'''
original = self.original
edited = self.edited
num_orig_elements = self.num_orig_elements
i = num_orig_elements
j = len(self.edited)
# edit_distance = self.edit_distance
distance_matrix = self.distance_matrix
num_deletions = 0
num_insertions = 0
num_substitutions = 0
align_orig_elements = []
align_edited_elements = []
align_label_str = []
# start at the cell containing the edit distance and analyze the
# matrix to figure out what is a deletion, insertion, or
# substitution.
while i or j:
# if deletion
if distance_matrix[i][j] == distance_matrix[i-1][j] + 1:
num_deletions += 1
align_orig_elements.append(original[i-1])
align_edited_elements.append(" ")
align_label_str.append('D')
i -= 1
# if insertion
elif distance_matrix[i][j] == distance_matrix[i][j-1] + 1:
num_insertions += 1
align_orig_elements.append(" ")
align_edited_elements.append(edited[j-1])
align_label_str.append('I')
j -= 1
# if match or substitution
else:
orig_element = original[i-1]
edited_element = edited[j-1]
if orig_element != edited_element:
num_substitutions += 1
label = 'S'
else:
label = ' '
align_orig_elements.append(orig_element)
align_edited_elements.append(edited_element)
align_label_str.append(label)
i -= 1
j -= 1
align_orig_elements.reverse()
align_edited_elements.reverse()
align_label_str.reverse()
self.align_orig_elements = align_orig_elements
self.align_edited_elements = align_edited_elements
self.align_label_str = align_label_str
def show_changes(self):
'''
Returns the edited text only, with differences between it and the original surrounded by formatting markup
'orig' is correction system INPUT (top line), 'edit' is OUTPUT (bottom line)
'''
if not hasattr(self, 'align_orig_elements'):
self.set_alignment_strings()
assert (len(self.align_orig_elements) ==
len(self.align_edited_elements) ==
len(self.align_label_str))
assert len(self.align_label_str) == len(self.align_edited_elements) == len(self.align_orig_elements), "different number of elements"
# for each word in line, determine whether there's a change and append with the according format
print_string = ''
for index in range(len(self.align_label_str)):
if self.align_label_str[index] == ' ':
print_string += self.align_edited_elements[index] + ' '
elif self.align_label_str[index] == 'S' or self.align_label_str[index] == 'I':
element = self.align_edited_elements[index]
print_string += changed(element) + ' '
else: # a deletion - need to print what was in the original that got deleted
element = self.align_orig_elements[index]
print_string += changed('[' + element + ']')
return print_string
def changed(plain_text):
'''
returns plain_text surrounded by markup tags for bold, red font formatting (indicating a change from the original)
:param plain_text: text to change
:return: text with markup tags
'''
return r"\b \cf3 " + plain_text + r" \b0 \cf2"
def process_single_pair(args):
'''
process a single pair of files. Called by main when running in single pair
mode, or by process_batch when running in batch mode.
Do alignment on files line by line, so alignment is only at the line level and printing
(Compare.show_changes) also happens at each line
'''
# get non-empty lines from original and edited files
with open(args.original_file) as f:
original_lines = f.readlines()
original_lines = [line for line in original_lines if line != '\n']
# READ EDITED TEXT FROM STDIN
edited_lines = [line for line in sys.stdin.readlines() if line != '\n']
assert len(original_lines) == len(edited_lines), "Texts contain different numbers of lines"
open(args.output_file, 'w')
# print header for rich text format; need to do this outside of the foreach loop for lines in the files
header = r"{\rtf1\ansi\ansicpg1252\cocoartf1404\cocoasubrtf470{\fonttbl\f0\fnil\fcharset0 Menlo-Regular;}" \
r"{\colortbl;\red255\green255\blue255;\red0\green0\blue0;\red255\green0\blue0;}" \
r"\margl1440\margr1440\vieww11940\viewh7800\viewkind0\pard\tx720\tx1440\tx2160\tx2880\tx3600" \
r"\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\pardirnatural\partightenfactor0\f0\fs24 \cf2"
outf = open(args.output_file, 'a')
outf.write(header + '\n')
for i in range(len(original_lines)):
original_words = original_lines[i].split()
edited_words = edited_lines[i].split()
comparison = Compare(original_words, edited_words)
comparison.set_alignment_strings()
formatted_line = comparison.show_changes()
# add line numbers if --numbered flag is present
if args.numbered:
formatted_line = str(i + 1) + ' ' + formatted_line
# also print original line to the right if show_original flag is present
# (should probably make this into a separate method at some point)
if args.show_original:
original_line = original_lines[i]
# find number of characters contained in the formatting tags so we can adjust the padding
tags_length = 0
for token in formatted_line.split():
if token.startswith('\\'):
tags_length += len(token) + 1 # plus 1 for following space character
padding = 100 + tags_length
formatted_line = "{:{padding}}{}".format(formatted_line, original_line, padding=padding)
outf.write(formatted_line + '\n')
outf.write("\\\n")
outf.write('}')
def main():
# set up the main parser
parser = argparse.ArgumentParser(
description="Compare an original text and an edited text "
"and output the edited text with the differences in bolded red font.")
# main function for this parser:
parser.set_defaults(main_func=process_single_pair)
# arguments
parser.add_argument("original_file",
help='File to use as original')
parser.add_argument("output_file",
help='Destination file for system output')
parser.add_argument('--show_original',
help='display the original and edited texts side by side for easier comparison',
action='store_true',
default=False)
parser.add_argument('--numbered',
help='display line numbers',
action='store_true',
default=False)
args = parser.parse_args()
args.main_func(args)
if __name__ == '__main__':
main()
| 34.976589
| 140
| 0.616275
|
f46c4566d3141637fadb0ab1a6f061b5fa193a4c
| 572
|
py
|
Python
|
discordlogin/migrations/0004_auto_20210218_1718.py
|
fourmattunderscore/discord-django-login-api
|
6e25301662c6c041cca50e2bc537b65e7fc82bb6
|
[
"MIT"
] | null | null | null |
discordlogin/migrations/0004_auto_20210218_1718.py
|
fourmattunderscore/discord-django-login-api
|
6e25301662c6c041cca50e2bc537b65e7fc82bb6
|
[
"MIT"
] | null | null | null |
discordlogin/migrations/0004_auto_20210218_1718.py
|
fourmattunderscore/discord-django-login-api
|
6e25301662c6c041cca50e2bc537b65e7fc82bb6
|
[
"MIT"
] | 1
|
2021-10-03T09:55:07.000Z
|
2021-10-03T09:55:07.000Z
|
# Generated by Django 3.1.6 on 2021-02-18 07:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('discordlogin', '0003_auto_20210217_1817'),
]
operations = [
migrations.AddField(
model_name='discorduser',
name='is_active',
field=models.BooleanField(null=True),
),
migrations.AlterField(
model_name='discorduser',
name='last_login',
field=models.DateTimeField(),
),
]
| 23.833333
| 53
| 0.559441
|
3749d2ec41908dc8064fdb27a16341c4480eb873
| 2,949
|
py
|
Python
|
django_q/tests/settings.py
|
bigsfoot/django-q
|
85615ebde02183f93ea19dbdce383085cf53e295
|
[
"MIT"
] | 1,570
|
2015-06-17T22:13:29.000Z
|
2022-03-31T18:38:47.000Z
|
django_q/tests/settings.py
|
bigsfoot/django-q
|
85615ebde02183f93ea19dbdce383085cf53e295
|
[
"MIT"
] | 560
|
2015-07-09T00:32:22.000Z
|
2022-03-30T09:18:10.000Z
|
django_q/tests/settings.py
|
bigsfoot/django-q
|
85615ebde02183f93ea19dbdce383085cf53e295
|
[
"MIT"
] | 270
|
2015-07-01T10:56:30.000Z
|
2022-03-10T15:59:49.000Z
|
import os
import django
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = ")cqmpi+p@n&!u&fu@!m@9h&1bz9mwmstsahe)nf!ms+c$uc=x7"
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"django_q",
"django_redis",
)
MIDDLEWARE_CLASSES = (
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
)
MIDDLEWARE = MIDDLEWARE_CLASSES
ROOT_URLCONF = "tests.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": os.path.join(BASE_DIR, "db.sqlite3"),
}
}
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"handlers": {
"console": {
"class": "logging.StreamHandler",
},
},
"loggers": {
"django_q": {
"handlers": ["console"],
"level": "INFO",
},
},
}
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = "/static/"
# Django Redis
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": "redis://127.0.0.1:6379/0",
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
"PARSER_CLASS": "redis.connection.HiredisParser",
},
}
}
# Django Q specific
Q_CLUSTER = {
"name": "django_q_test",
"cpu_affinity": 1,
"testing": True,
"log_level": "DEBUG",
"django_redis": "default",
}
| 22.860465
| 71
| 0.636487
|
c1da9ce9846ab5c0b2e164683c23bd0b4c443ae0
| 1,159
|
py
|
Python
|
setup.py
|
jmeppley/du_histogram
|
9205973b43d14bca63343159b604c60718d82173
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
jmeppley/du_histogram
|
9205973b43d14bca63343159b604c60718d82173
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
jmeppley/du_histogram
|
9205973b43d14bca63343159b604c60718d82173
|
[
"Apache-2.0"
] | null | null | null |
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
DESCRIPTION = "Simply ASCII histogram of du output"
LONG_DESCRIPTION = open('README.md').read()
NAME = "du_histogram"
AUTHOR = "John Eppley"
AUTHOR_EMAIL = "jmeppley@gmail.com"
MAINTAINER = "John Eppley"
MAINTAINER_EMAIL = "jmeppley@gmail.com"
URL = 'http://github.com/jmeppley/du_histogram'
DOWNLOAD_URL = 'http://github.com/jmeppley/du_histogram'
LICENSE = 'Apache'
VERSION = '0.9.3'
setup(name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
url=URL,
download_url=DOWNLOAD_URL,
license=LICENSE,
scripts=['duhist.py',],
install_requires=('docopt'),
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: GPL License',
'Natural Language :: English',
'Programming Language :: Python :: 2.7'],
)
| 29.717949
| 56
| 0.665229
|
6132ad4aa8e5edc16ec2048a7eb07c3025cac7c8
| 347
|
py
|
Python
|
docker/test/integration/minifi/core/RemoteProcessGroup.py
|
kevdoran/nifi-minifi-cpp
|
4bb1fd97f5df103d5bee9a884839b24e2f77a2c5
|
[
"Apache-2.0",
"OpenSSL"
] | null | null | null |
docker/test/integration/minifi/core/RemoteProcessGroup.py
|
kevdoran/nifi-minifi-cpp
|
4bb1fd97f5df103d5bee9a884839b24e2f77a2c5
|
[
"Apache-2.0",
"OpenSSL"
] | null | null | null |
docker/test/integration/minifi/core/RemoteProcessGroup.py
|
kevdoran/nifi-minifi-cpp
|
4bb1fd97f5df103d5bee9a884839b24e2f77a2c5
|
[
"Apache-2.0",
"OpenSSL"
] | null | null | null |
import uuid
class RemoteProcessGroup(object):
def __init__(self, url, name=None):
self.uuid = uuid.uuid4()
if name is None:
self.name = str(self.uuid)
else:
self.name = name
self.url = url
def get_name(self):
return self.name
def get_uuid(self):
return self.uuid
| 17.35
| 39
| 0.567723
|
c7c7aab005a101e253319844d32e2cbf4b148d0f
| 16,510
|
py
|
Python
|
tortoise/fields/data.py
|
DDevine/tortoise-orm
|
414737a78e98ffd247174590720f5c90aeac4dde
|
[
"Apache-2.0"
] | 1
|
2020-05-15T19:50:12.000Z
|
2020-05-15T19:50:12.000Z
|
tortoise/fields/data.py
|
Tomes111/tortoise-orm
|
8b55499a228e44f33fec9099f4d559c77c73beb7
|
[
"Apache-2.0"
] | null | null | null |
tortoise/fields/data.py
|
Tomes111/tortoise-orm
|
8b55499a228e44f33fec9099f4d559c77c73beb7
|
[
"Apache-2.0"
] | null | null | null |
import datetime
import functools
import json
import warnings
from decimal import Decimal
from enum import Enum, IntEnum
from typing import TYPE_CHECKING, Any, Callable, Optional, Type, TypeVar, Union
from uuid import UUID, uuid4
from pypika import functions
from pypika.enums import SqlTypes
from pypika.terms import Term
from tortoise.exceptions import ConfigurationError
from tortoise.fields.base import Field
try:
from ciso8601 import parse_datetime
except ImportError: # pragma: nocoverage
from iso8601 import parse_date
parse_datetime = functools.partial(parse_date, default_timezone=None)
if TYPE_CHECKING: # pragma: nocoverage
from tortoise.models import Model
__all__ = (
"BigIntField",
"BinaryField",
"BooleanField",
"CharEnumField",
"CharField",
"DateField",
"DatetimeField",
"DecimalField",
"FloatField",
"IntEnumField",
"IntField",
"JSONField",
"SmallIntField",
"TextField",
"TimeDeltaField",
"UUIDField",
)
# Doing this we can replace json dumps/loads with different implementations
JsonDumpsFunc = Callable[[Any], str]
JsonLoadsFunc = Callable[[str], Any]
JSON_DUMPS: JsonDumpsFunc = functools.partial(json.dumps, separators=(",", ":"))
JSON_LOADS: JsonLoadsFunc = json.loads
try:
# Use python-rapidjson as an optional accelerator
import rapidjson
JSON_DUMPS = rapidjson.dumps
JSON_LOADS = rapidjson.loads
except ImportError: # pragma: nocoverage
pass
class IntField(Field, int):
"""
Integer field. (32-bit signed)
``pk`` (bool):
True if field is Primary Key.
"""
SQL_TYPE = "INT"
allows_generated = True
def __init__(self, pk: bool = False, **kwargs: Any) -> None:
if pk:
kwargs["generated"] = bool(kwargs.get("generated", True))
super().__init__(pk=pk, **kwargs)
@property
def constraints(self) -> dict:
return {
"ge": 1 if self.generated or self.reference else -2147483648,
"le": 2147483647,
}
class _db_postgres:
GENERATED_SQL = "SERIAL NOT NULL PRIMARY KEY"
class _db_sqlite:
GENERATED_SQL = "INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL"
class _db_mysql:
GENERATED_SQL = "INT NOT NULL PRIMARY KEY AUTO_INCREMENT"
class BigIntField(Field, int):
"""
Big integer field. (64-bit signed)
``pk`` (bool):
True if field is Primary Key.
"""
SQL_TYPE = "BIGINT"
allows_generated = True
def __init__(self, pk: bool = False, **kwargs: Any) -> None:
if pk:
kwargs["generated"] = bool(kwargs.get("generated", True))
super().__init__(pk=pk, **kwargs)
@property
def constraints(self) -> dict:
return {
"ge": 1 if self.generated or self.reference else -9223372036854775808,
"le": 9223372036854775807,
}
class _db_postgres:
GENERATED_SQL = "BIGSERIAL NOT NULL PRIMARY KEY"
class _db_sqlite:
GENERATED_SQL = "INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL"
class _db_mysql:
GENERATED_SQL = "BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT"
class SmallIntField(Field, int):
"""
Small integer field. (16-bit signed)
``pk`` (bool):
True if field is Primary Key.
"""
SQL_TYPE = "SMALLINT"
allows_generated = True
def __init__(self, pk: bool = False, **kwargs: Any) -> None:
if pk:
kwargs["generated"] = bool(kwargs.get("generated", True))
super().__init__(pk=pk, **kwargs)
@property
def constraints(self) -> dict:
return {
"ge": 1 if self.generated or self.reference else -32768,
"le": 32767,
}
class _db_postgres:
GENERATED_SQL = "SMALLSERIAL NOT NULL PRIMARY KEY"
class _db_sqlite:
GENERATED_SQL = "INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL"
class _db_mysql:
GENERATED_SQL = "SMALLINT NOT NULL PRIMARY KEY AUTO_INCREMENT"
class CharField(Field, str): # type: ignore
"""
Character field.
You must provide the following:
``max_length`` (int):
Maximum length of the field in characters.
"""
def __init__(self, max_length: int, **kwargs: Any) -> None:
if int(max_length) < 1:
raise ConfigurationError("'max_length' must be >= 1")
self.max_length = int(max_length)
super().__init__(**kwargs)
@property
def constraints(self) -> dict:
return {
"max_length": self.max_length,
}
@property
def SQL_TYPE(self) -> str: # type: ignore
return f"VARCHAR({self.max_length})"
class TextField(Field, str): # type: ignore
"""
Large Text field.
"""
indexable = False
SQL_TYPE = "TEXT"
def __init__(
self, pk: bool = False, unique: bool = False, index: bool = False, **kwargs: Any
) -> None:
if pk:
warnings.warn(
"TextField as a PrimaryKey is Deprecated, use CharField instead",
DeprecationWarning,
stacklevel=2,
)
if unique:
raise ConfigurationError(
f"TextField doesn't support unique indexes, consider CharField or another strategy"
)
if index:
raise ConfigurationError(f"TextField can't be indexed, consider CharField")
super().__init__(pk=pk, **kwargs)
class _db_mysql:
SQL_TYPE = "LONGTEXT"
class BooleanField(Field):
"""
Boolean field.
"""
# Bool is not subclassable, so we specify type here
field_type = bool
SQL_TYPE = "BOOL"
class _db_sqlite:
SQL_TYPE = "INT"
class DecimalField(Field, Decimal):
"""
Accurate decimal field.
You must provide the following:
``max_digits`` (int):
Max digits of significance of the decimal field.
``decimal_places`` (int):
How many of those signifigant digits is after the decimal point.
"""
skip_to_python_if_native = True
def __init__(self, max_digits: int, decimal_places: int, **kwargs: Any) -> None:
if int(max_digits) < 1:
raise ConfigurationError("'max_digits' must be >= 1")
if int(decimal_places) < 0:
raise ConfigurationError("'decimal_places' must be >= 0")
super().__init__(**kwargs)
self.max_digits = max_digits
self.decimal_places = decimal_places
self.quant = Decimal("1" if decimal_places == 0 else f"1.{('0' * decimal_places)}")
def to_python_value(self, value: Any) -> Optional[Decimal]:
if value is None:
return None
return Decimal(value).quantize(self.quant).normalize()
@property
def SQL_TYPE(self) -> str: # type: ignore
return f"DECIMAL({self.max_digits},{self.decimal_places})"
class _db_sqlite:
SQL_TYPE = "VARCHAR(40)"
def function_cast(self, term: Term) -> Term:
return functions.Cast(term, SqlTypes.NUMERIC)
class DatetimeField(Field, datetime.datetime):
"""
Datetime field.
``auto_now`` and ``auto_now_add`` is exclusive.
You can opt to set neither or only ONE of them.
``auto_now`` (bool):
Always set to ``datetime.utcnow()`` on save.
``auto_now_add`` (bool):
Set to ``datetime.utcnow()`` on first save only.
"""
skip_to_python_if_native = True
SQL_TYPE = "TIMESTAMP"
class _db_mysql:
SQL_TYPE = "DATETIME(6)"
def __init__(self, auto_now: bool = False, auto_now_add: bool = False, **kwargs: Any) -> None:
if auto_now_add and auto_now:
raise ConfigurationError("You can choose only 'auto_now' or 'auto_now_add'")
super().__init__(**kwargs)
self.auto_now = auto_now
self.auto_now_add = auto_now | auto_now_add
def to_python_value(self, value: Any) -> Optional[datetime.datetime]:
if value is None or isinstance(value, datetime.datetime):
return value
return parse_datetime(value)
def to_db_value(
self, value: Optional[datetime.datetime], instance: "Union[Type[Model], Model]"
) -> Optional[datetime.datetime]:
# Only do this if it is a Model instance, not class. Test for guaranteed instance var
if hasattr(instance, "_saved_in_db") and (
self.auto_now
or (self.auto_now_add and getattr(instance, self.model_field_name) is None)
):
value = datetime.datetime.utcnow()
setattr(instance, self.model_field_name, value)
return value
return value
@property
def constraints(self) -> dict:
data = {}
if self.auto_now_add:
data["readOnly"] = True
return data
class DateField(Field, datetime.date):
"""
Date field.
"""
skip_to_python_if_native = True
SQL_TYPE = "DATE"
def to_python_value(self, value: Any) -> Optional[datetime.date]:
if value is None or isinstance(value, datetime.date):
return value
return parse_datetime(value).date()
class TimeDeltaField(Field, datetime.timedelta):
"""
A field for storing time differences.
"""
SQL_TYPE = "BIGINT"
def to_python_value(self, value: Any) -> Optional[datetime.timedelta]:
if value is None or isinstance(value, datetime.timedelta):
return value
return datetime.timedelta(microseconds=value)
def to_db_value(
self, value: Optional[datetime.timedelta], instance: "Union[Type[Model], Model]"
) -> Optional[int]:
if value is None:
return None
return (value.days * 86400000000) + (value.seconds * 1000000) + value.microseconds
class FloatField(Field, float):
"""
Float (double) field.
"""
SQL_TYPE = "DOUBLE PRECISION"
class _db_sqlite:
SQL_TYPE = "REAL"
class _db_mysql:
SQL_TYPE = "DOUBLE"
class JSONField(Field, dict, list): # type: ignore
"""
JSON field.
This field can store dictionaries or lists of any JSON-compliant structure.
You can specify your own custom JSON encoder/decoder, leaving at the default should work well.
If you have ``python-rapidjson`` installed, we default to using that,
else the default ``json`` module will be used.
``encoder``:
The custom JSON encoder.
``decoder``:
The custom JSON decoder.
"""
SQL_TYPE = "TEXT"
indexable = False
class _db_postgres:
SQL_TYPE = "JSONB"
def __init__(
self,
encoder: JsonDumpsFunc = JSON_DUMPS,
decoder: JsonLoadsFunc = JSON_LOADS,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
self.encoder = encoder
self.decoder = decoder
def to_db_value(
self, value: Optional[Union[dict, list]], instance: "Union[Type[Model], Model]"
) -> Optional[str]:
return None if value is None else self.encoder(value)
def to_python_value(
self, value: Optional[Union[str, dict, list]]
) -> Optional[Union[dict, list]]:
return self.decoder(value) if isinstance(value, str) else value
class UUIDField(Field, UUID):
"""
UUID Field
This field can store uuid value.
If used as a primary key, it will auto-generate a UUID4 by default.
"""
SQL_TYPE = "CHAR(36)"
class _db_postgres:
SQL_TYPE = "UUID"
def __init__(self, **kwargs: Any) -> None:
if kwargs.get("pk", False) and "default" not in kwargs:
kwargs["default"] = uuid4
super().__init__(**kwargs)
def to_db_value(self, value: Any, instance: "Union[Type[Model], Model]") -> Optional[str]:
return value and str(value)
def to_python_value(self, value: Any) -> Optional[UUID]:
if value is None or isinstance(value, UUID):
return value
return UUID(value)
class BinaryField(Field, bytes): # type: ignore
"""
Binary field.
This is for storing ``bytes`` objects.
Note that filter or queryset-update operations are not supported.
"""
indexable = False
SQL_TYPE = "BLOB"
class _db_postgres:
SQL_TYPE = "BYTEA"
class _db_mysql:
SQL_TYPE = "LONGBLOB"
class IntEnumFieldInstance(SmallIntField):
def __init__(
self, enum_type: Type[IntEnum], description: Optional[str] = None, **kwargs: Any
) -> None:
# Validate values
for item in enum_type:
try:
value = int(item.value)
except ValueError:
raise ConfigurationError("IntEnumField only supports integer enums!")
if not 0 <= value < 32768:
raise ConfigurationError("The valid range of IntEnumField's values is 0..32767!")
# Automatic description for the field if not specified by the user
if description is None:
description = "\n".join([f"{e.name}: {int(e.value)}" for e in enum_type])[:2048]
super().__init__(description=description, **kwargs)
self.enum_type = enum_type
def to_python_value(self, value: Union[int, None]) -> Union[IntEnum, None]:
return self.enum_type(value) if value is not None else None
def to_db_value(
self, value: Union[IntEnum, None, int], instance: "Union[Type[Model], Model]"
) -> Union[int, None]:
if isinstance(value, IntEnum):
return int(value.value)
if isinstance(value, int):
return int(self.enum_type(value))
return value
IntEnumType = TypeVar("IntEnumType", bound=IntEnum)
def IntEnumField(
enum_type: Type[IntEnumType], description: Optional[str] = None, **kwargs: Any,
) -> IntEnumType:
"""
Enum Field
A field representing an integer enumeration.
The description of the field is set automatically if not specified to a multiline list of
"name: value" pairs.
**Note**: Valid int value of ``enum_type`` is acceptable.
``enum_type``:
The enum class
``description``:
The description of the field. It is set automatically if not specified to a multiline list
of "name: value" pairs.
"""
return IntEnumFieldInstance(enum_type, description, **kwargs) # type: ignore
class CharEnumFieldInstance(CharField):
def __init__(
self,
enum_type: Type[Enum],
description: Optional[str] = None,
max_length: int = 0,
**kwargs: Any,
) -> None:
# Automatic description for the field if not specified by the user
if description is None:
description = "\n".join([f"{e.name}: {str(e.value)}" for e in enum_type])[:2048]
# Automatic CharField max_length
if max_length == 0:
for item in enum_type:
item_len = len(str(item.value))
if item_len > max_length:
max_length = item_len
super().__init__(description=description, max_length=max_length, **kwargs)
self.enum_type = enum_type
def to_python_value(self, value: Union[str, None]) -> Union[Enum, None]:
return self.enum_type(value) if value is not None else None
def to_db_value(
self, value: Union[Enum, None, str], instance: "Union[Type[Model], Model]"
) -> Union[str, None]:
if isinstance(value, Enum):
return str(value.value)
if isinstance(value, str):
return str(self.enum_type(value).value)
return value
CharEnumType = TypeVar("CharEnumType", bound=Enum)
def CharEnumField(
enum_type: Type[CharEnumType],
description: Optional[str] = None,
max_length: int = 0,
**kwargs: Any,
) -> CharEnumType:
"""
Char Enum Field
A field representing a character enumeration.
**Warning**: If ``max_length`` is not specified or equals to zero, the size of represented
char fields is automatically detected. So if later you update the enum, you need to update your
table schema as well.
**Note**: Valid str value of ``enum_type`` is acceptable.
``enum_type``:
The enum class
``description``:
The description of the field. It is set automatically if not specified to a multiline list
of "name: value" pairs.
``max_length``:
The length of the created CharField. If it is zero it is automatically detected from
enum_type.
"""
return CharEnumFieldInstance(enum_type, description, max_length, **kwargs) # type: ignore
| 28.126065
| 99
| 0.626893
|
65ddd11a99bd8fb737ae8d34a4928ab5b6961496
| 127
|
py
|
Python
|
__init__.py
|
YihuiLu/json2markdown
|
76f288bdbb96b5a569ec7e0f37689dcce995de06
|
[
"MIT"
] | 2
|
2019-08-16T02:16:22.000Z
|
2019-08-16T02:18:45.000Z
|
__init__.py
|
yihuiLu/json2markdown
|
76f288bdbb96b5a569ec7e0f37689dcce995de06
|
[
"MIT"
] | null | null | null |
__init__.py
|
yihuiLu/json2markdown
|
76f288bdbb96b5a569ec7e0f37689dcce995de06
|
[
"MIT"
] | null | null | null |
# @Time : 2019-09-17 15:03
# @Author : Teacher Lu
# @Site : www.yifeilu.cn
# @File : __init__.py
# @Software: PyCharm
| 25.4
| 29
| 0.598425
|
e091cf37496bdac038e2930e066d75cb9b8cd139
| 2,169
|
py
|
Python
|
python/UVA/10044_erdos_number.py
|
gineer01/programming-challenges
|
9f0bbaab5b85423b5671ee3cfc2d0fd62cea4cc7
|
[
"MIT"
] | null | null | null |
python/UVA/10044_erdos_number.py
|
gineer01/programming-challenges
|
9f0bbaab5b85423b5671ee3cfc2d0fd62cea4cc7
|
[
"MIT"
] | null | null | null |
python/UVA/10044_erdos_number.py
|
gineer01/programming-challenges
|
9f0bbaab5b85423b5671ee3cfc2d0fd62cea4cc7
|
[
"MIT"
] | null | null | null |
import fileinput
import itertools
class Graph:
def __init__(self):
self.edges = {}
self.vertices = set()
def add_edge(self, u, v, data):
self.vertices.add(u)
self.vertices.add(v)
self.edges.setdefault(u, {})[v] = data
def remove_edge(self, u, v):
return self.edges[u].pop(v, None)
def bfs(self, start_node, node_func):
"""
Do BFS and for each node, call node_func
:param start_node:
:param node_func: a function that takes parent node, child node, and edge data
:return: None
"""
import collections
visited = set()
q = collections.deque()
q.append(start_node)
node_func(None, start_node, None)
visited.add(start_node)
while len(q) > 0:
node = q.popleft()
for c in self.edges[node]:
if c in visited:
continue
else:
node_func(node, c, self.edges[node][c])
visited.add(c)
q.append(c)
f = fileinput.input()
t = int(next(f))
ERDOS = 'Erdos, P.'
def solve(papers, names):
adjacency = Graph()
for p in papers:
authors = get_authors(p)
# print(authors)
for a in authors:
for co in authors:
# adjacency.setdefault(a, set()).update(authors)
adjacency.add_edge(a, co, True)
erdos = {}
def cal_erdos(parent, child, edge):
if parent:
erdos[child] = erdos[parent] + 1
else:
erdos[child] = 0
adjacency.bfs(ERDOS, cal_erdos)
for n in names:
print("{} {}".format(n, erdos.get(n, 'infinity')))
def get_authors(p):
import re
authors = p.split(':')[0]
return re.split(r'(?<=\.)\s*, ', authors)
def solve_case(f):
l = next(f).strip()
if not l:
l = next(f).strip()
p, n = map(int, l.split())
papers = list(itertools.islice(f, p))
names = [l.strip() for l in itertools.islice(f, n)]
solve(papers, names)
for i in range(t):
print("Scenario {}".format(i + 1))
solve_case(f)
| 22.831579
| 86
| 0.528354
|
5b1a0f37e6a0f6629452c19808ca2b9cff1422a0
| 3,186
|
py
|
Python
|
lite/tests/unittest_py/global_var_model.py
|
devchai123/Paddle-Lite
|
442d6996a59c3498eae27610d49a0d5b2c320f24
|
[
"Apache-2.0"
] | null | null | null |
lite/tests/unittest_py/global_var_model.py
|
devchai123/Paddle-Lite
|
442d6996a59c3498eae27610d49a0d5b2c320f24
|
[
"Apache-2.0"
] | null | null | null |
lite/tests/unittest_py/global_var_model.py
|
devchai123/Paddle-Lite
|
442d6996a59c3498eae27610d49a0d5b2c320f24
|
[
"Apache-2.0"
] | 1
|
2021-12-03T10:07:54.000Z
|
2021-12-03T10:07:54.000Z
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pickle
from pathlib import Path
import os
statics_data = {
"targets": set(),
"all_test_ops": {
"Host": set(),
"X86": set(),
"ARM": set(),
"OpenCL": set(),
"Metal": set()
},
"success_ops": {
"Host": set(),
"X86": set(),
"ARM": set(),
"OpenCL": set(),
"Metal": set()
},
"out_diff_ops": {
"Host": set(),
"X86": set(),
"ARM": set(),
"OpenCL": set(),
"Metal": set()
},
"not_supported_ops": {
"Host": set(),
"X86": set(),
"ARM": set(),
"OpenCL": set(),
"Metal": set()
},
}
static_file = Path("./statics_data")
static_file_path_str = "./statics_data"
# coding=utf-8
def set_value(kind, target, op):
if not static_file.exists():
global statics_data
else:
with open(static_file_path_str, "rb") as f:
statics_data = pickle.load(f)
statics_data["targets"].add(target)
statics_data[kind][target].add(op)
with open(static_file_path_str, "wb") as f:
pickle.dump(statics_data, f)
def set_all_test_ops(target, op):
set_value("all_test_ops", target, op)
def set_success_ops(target, op):
set_value("success_ops", target, op)
def set_out_diff_ops(target, op):
set_value("out_diff_ops", target, op)
def set_not_supported_ops(target, op):
set_value("not_supported_ops", target, op)
def display():
print("----------------------Unit Test Summary---------------------")
with open("./statics_data", "rb") as f:
statics_data = pickle.load(f)
targets = statics_data["targets"]
for target in targets:
all_test_ops = statics_data["all_test_ops"][target]
not_supported_ops = statics_data["not_supported_ops"][target]
out_diff_ops = statics_data["out_diff_ops"][target]
success_ops = statics_data["success_ops"][
target] - not_supported_ops - out_diff_ops
print("Target =", target)
print("Number of test ops =", len(all_test_ops))
print("Number of success ops =", len(success_ops))
print("Number of not supported ops =", len(not_supported_ops))
print("Number of output diff ops =", len(out_diff_ops))
print("\nDetails:")
print("Success ops:")
print(list(success_ops))
print("\nNot supported ops:")
print(list(not_supported_ops))
print("\nOutput diff ops:")
print(list(out_diff_ops))
print("\n")
if __name__ == "__main__":
display()
| 27.704348
| 74
| 0.606403
|
d23808cfbdaf225d0c1384425024e424ad4b1a45
| 7,202
|
py
|
Python
|
moto/__init__.py
|
jstangroome/moto
|
eb6515cf50878b18081ecf0afe944a36c1bdda5f
|
[
"Apache-2.0"
] | null | null | null |
moto/__init__.py
|
jstangroome/moto
|
eb6515cf50878b18081ecf0afe944a36c1bdda5f
|
[
"Apache-2.0"
] | null | null | null |
moto/__init__.py
|
jstangroome/moto
|
eb6515cf50878b18081ecf0afe944a36c1bdda5f
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import unicode_literals
import importlib
def lazy_load(module_name, element):
def f(*args, **kwargs):
module = importlib.import_module(module_name, "moto")
return getattr(module, element)(*args, **kwargs)
return f
mock_acm = lazy_load(".acm", "mock_acm")
mock_apigateway = lazy_load(".apigateway", "mock_apigateway")
mock_apigateway_deprecated = lazy_load(".apigateway", "mock_apigateway_deprecated")
mock_athena = lazy_load(".athena", "mock_athena")
mock_applicationautoscaling = lazy_load(
".applicationautoscaling", "mock_applicationautoscaling"
)
mock_autoscaling = lazy_load(".autoscaling", "mock_autoscaling")
mock_autoscaling_deprecated = lazy_load(".autoscaling", "mock_autoscaling_deprecated")
mock_lambda = lazy_load(".awslambda", "mock_lambda")
mock_lambda_deprecated = lazy_load(".awslambda", "mock_lambda_deprecated")
mock_batch = lazy_load(".batch", "mock_batch")
mock_batch = lazy_load(".batch", "mock_batch")
mock_cloudformation = lazy_load(".cloudformation", "mock_cloudformation")
mock_cloudformation_deprecated = lazy_load(
".cloudformation", "mock_cloudformation_deprecated"
)
mock_cloudwatch = lazy_load(".cloudwatch", "mock_cloudwatch")
mock_cloudwatch_deprecated = lazy_load(".cloudwatch", "mock_cloudwatch_deprecated")
mock_codecommit = lazy_load(".codecommit", "mock_codecommit")
mock_codepipeline = lazy_load(".codepipeline", "mock_codepipeline")
mock_cognitoidentity = lazy_load(".cognitoidentity", "mock_cognitoidentity")
mock_cognitoidentity_deprecated = lazy_load(
".cognitoidentity", "mock_cognitoidentity_deprecated"
)
mock_cognitoidp = lazy_load(".cognitoidp", "mock_cognitoidp")
mock_cognitoidp_deprecated = lazy_load(".cognitoidp", "mock_cognitoidp_deprecated")
mock_config = lazy_load(".config", "mock_config")
mock_datapipeline = lazy_load(".datapipeline", "mock_datapipeline")
mock_datapipeline_deprecated = lazy_load(
".datapipeline", "mock_datapipeline_deprecated"
)
mock_datasync = lazy_load(".datasync", "mock_datasync")
mock_dms = lazy_load(".dms", "mock_dms")
mock_dynamodb = lazy_load(".dynamodb", "mock_dynamodb")
mock_dynamodb_deprecated = lazy_load(".dynamodb", "mock_dynamodb_deprecated")
mock_dynamodb2 = lazy_load(".dynamodb2", "mock_dynamodb2")
mock_dynamodb2_deprecated = lazy_load(".dynamodb2", "mock_dynamodb2_deprecated")
mock_dynamodbstreams = lazy_load(".dynamodbstreams", "mock_dynamodbstreams")
mock_elasticbeanstalk = lazy_load(".elasticbeanstalk", "mock_elasticbeanstalk")
mock_ec2 = lazy_load(".ec2", "mock_ec2")
mock_ec2_deprecated = lazy_load(".ec2", "mock_ec2_deprecated")
mock_ec2instanceconnect = lazy_load(".ec2instanceconnect", "mock_ec2instanceconnect")
mock_ecr = lazy_load(".ecr", "mock_ecr")
mock_ecr_deprecated = lazy_load(".ecr", "mock_ecr_deprecated")
mock_ecs = lazy_load(".ecs", "mock_ecs")
mock_ecs_deprecated = lazy_load(".ecs", "mock_ecs_deprecated")
mock_elb = lazy_load(".elb", "mock_elb")
mock_elb_deprecated = lazy_load(".elb", "mock_elb_deprecated")
mock_elbv2 = lazy_load(".elbv2", "mock_elbv2")
mock_emr = lazy_load(".emr", "mock_emr")
mock_emr_deprecated = lazy_load(".emr", "mock_emr_deprecated")
mock_events = lazy_load(".events", "mock_events")
mock_forecast = lazy_load(".forecast", "mock_forecast")
mock_glacier = lazy_load(".glacier", "mock_glacier")
mock_glacier_deprecated = lazy_load(".glacier", "mock_glacier_deprecated")
mock_glue = lazy_load(".glue", "mock_glue")
mock_iam = lazy_load(".iam", "mock_iam")
mock_iam_deprecated = lazy_load(".iam", "mock_iam_deprecated")
mock_iot = lazy_load(".iot", "mock_iot")
mock_iotdata = lazy_load(".iotdata", "mock_iotdata")
mock_kinesis = lazy_load(".kinesis", "mock_kinesis")
mock_kinesis_deprecated = lazy_load(".kinesis", "mock_kinesis_deprecated")
mock_kms = lazy_load(".kms", "mock_kms")
mock_kms_deprecated = lazy_load(".kms", "mock_kms_deprecated")
mock_logs = lazy_load(".logs", "mock_logs")
mock_logs_deprecated = lazy_load(".logs", "mock_logs_deprecated")
mock_managedblockchain = lazy_load(".managedblockchain", "mock_managedblockchain")
mock_opsworks = lazy_load(".opsworks", "mock_opsworks")
mock_opsworks_deprecated = lazy_load(".opsworks", "mock_opsworks_deprecated")
mock_organizations = lazy_load(".organizations", "mock_organizations")
mock_polly = lazy_load(".polly", "mock_polly")
mock_ram = lazy_load(".ram", "mock_ram")
mock_rds = lazy_load(".rds", "mock_rds")
mock_rds_deprecated = lazy_load(".rds", "mock_rds_deprecated")
mock_rds2 = lazy_load(".rds2", "mock_rds2")
mock_rds2_deprecated = lazy_load(".rds2", "mock_rds2_deprecated")
mock_redshift = lazy_load(".redshift", "mock_redshift")
mock_redshift_deprecated = lazy_load(".redshift", "mock_redshift_deprecated")
mock_resourcegroups = lazy_load(".resourcegroups", "mock_resourcegroups")
mock_resourcegroupstaggingapi = lazy_load(
".resourcegroupstaggingapi", "mock_resourcegroupstaggingapi"
)
mock_route53 = lazy_load(".route53", "mock_route53")
mock_route53_deprecated = lazy_load(".route53", "mock_route53_deprecated")
mock_s3 = lazy_load(".s3", "mock_s3")
mock_s3_deprecated = lazy_load(".s3", "mock_s3_deprecated")
mock_sagemaker = lazy_load(".sagemaker", "mock_sagemaker")
mock_secretsmanager = lazy_load(".secretsmanager", "mock_secretsmanager")
mock_ses = lazy_load(".ses", "mock_ses")
mock_ses_deprecated = lazy_load(".ses", "mock_ses_deprecated")
mock_sns = lazy_load(".sns", "mock_sns")
mock_sns_deprecated = lazy_load(".sns", "mock_sns_deprecated")
mock_sqs = lazy_load(".sqs", "mock_sqs")
mock_sqs_deprecated = lazy_load(".sqs", "mock_sqs_deprecated")
mock_ssm = lazy_load(".ssm", "mock_ssm")
mock_stepfunctions = lazy_load(".stepfunctions", "mock_stepfunctions")
mock_sts = lazy_load(".sts", "mock_sts")
mock_sts_deprecated = lazy_load(".sts", "mock_sts_deprecated")
mock_swf = lazy_load(".swf", "mock_swf")
mock_swf_deprecated = lazy_load(".swf", "mock_swf_deprecated")
mock_transcribe = lazy_load(".transcribe", "mock_transcribe")
XRaySegment = lazy_load(".xray", "XRaySegment")
mock_xray = lazy_load(".xray", "mock_xray")
mock_xray_client = lazy_load(".xray", "mock_xray_client")
mock_kinesisvideo = lazy_load(".kinesisvideo", "mock_kinesisvideo")
mock_kinesisvideoarchivedmedia = lazy_load(
".kinesisvideoarchivedmedia", "mock_kinesisvideoarchivedmedia"
)
mock_medialive = lazy_load(".medialive", "mock_medialive")
mock_support = lazy_load(".support", "mock_support")
mock_mediaconnect = lazy_load(".mediaconnect", "mock_mediaconnect")
mock_mediapackage = lazy_load(".mediapackage", "mock_mediapackage")
mock_mediastore = lazy_load(".mediastore", "mock_mediastore")
mock_eks = lazy_load(".eks", "mock_eks")
mock_mediastoredata = lazy_load(".mediastoredata", "mock_mediastoredata")
# import logging
# logging.getLogger('boto').setLevel(logging.CRITICAL)
__title__ = "moto"
__version__ = "2.1.0.dev"
try:
# Need to monkey-patch botocore requests back to underlying urllib3 classes
from botocore.awsrequest import (
HTTPSConnectionPool,
HTTPConnectionPool,
HTTPConnection,
VerifiedHTTPSConnection,
)
except ImportError:
pass
else:
HTTPSConnectionPool.ConnectionCls = VerifiedHTTPSConnection
HTTPConnectionPool.ConnectionCls = HTTPConnection
| 47.695364
| 86
| 0.777145
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.