hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7951ef9700a3118310b51c3ba35ef4d26320a2a7
| 3,372
|
py
|
Python
|
Codeforces/648-2/A.py
|
dipta007/Competitive-Programming
|
998d47f08984703c5b415b98365ddbc84ad289c4
|
[
"MIT"
] | 6
|
2018-10-15T18:45:05.000Z
|
2022-03-29T04:30:10.000Z
|
Codeforces/648-2/A.py
|
dipta007/Competitive-Programming
|
998d47f08984703c5b415b98365ddbc84ad289c4
|
[
"MIT"
] | null | null | null |
Codeforces/648-2/A.py
|
dipta007/Competitive-Programming
|
998d47f08984703c5b415b98365ddbc84ad289c4
|
[
"MIT"
] | 4
|
2018-01-07T06:20:07.000Z
|
2019-08-21T15:45:59.000Z
|
""" Python 3 compatibility tools. """
from __future__ import division, print_function
import itertools
import sys
import os
from io import BytesIO, IOBase
if sys.version_info[0] < 3:
input = raw_input
range = xrange
filter = itertools.ifilter
map = itertools.imap
zip = itertools.izip
def is_it_local():
script_dir = str(os.getcwd()).split('/')
username = "dipta007"
return username in script_dir
def READ(fileName):
if is_it_local():
sys.stdin = open(f'./{fileName}', 'r')
# region fastio
BUFSIZE = 8192
class FastIO(IOBase):
newlines = 0
def __init__(self, file):
self._fd = file.fileno()
self.buffer = BytesIO()
self.writable = "x" in file.mode or "r" not in file.mode
self.write = self.buffer.write if self.writable else None
def read(self):
while True:
b = os.read(self._fd, max(os.fstat(self._fd).st_size, BUFSIZE))
if not b:
break
ptr = self.buffer.tell()
self.buffer.seek(0, 2), self.buffer.write(b), self.buffer.seek(ptr)
self.newlines = 0
return self.buffer.read()
def readline(self):
while self.newlines == 0:
b = os.read(self._fd, max(os.fstat(self._fd).st_size, BUFSIZE))
self.newlines = b.count(b"\n") + (not b)
ptr = self.buffer.tell()
self.buffer.seek(0, 2), self.buffer.write(b), self.buffer.seek(ptr)
self.newlines -= 1
return self.buffer.readline()
def flush(self):
if self.writable:
os.write(self._fd, self.buffer.getvalue())
self.buffer.truncate(0), self.buffer.seek(0)
class IOWrapper(IOBase):
def __init__(self, file):
self.buffer = FastIO(file)
self.flush = self.buffer.flush
self.writable = self.buffer.writable
self.write = lambda s: self.buffer.write(s.encode("ascii"))
self.read = lambda: self.buffer.read().decode("ascii")
self.readline = lambda: self.buffer.readline().decode("ascii")
if not is_it_local():
sys.stdin, sys.stdout = IOWrapper(sys.stdin), IOWrapper(sys.stdout)
input = lambda: sys.stdin.readline().rstrip("\r\n")
# endregion
def input1(type=int):
return type(input())
def input2(type=int):
[a, b] = list(map(type, input().split()))
return a, b
def input3(type=int):
[a, b, c] = list(map(type, input().split()))
return a, b, c
def input_array(type=int):
return list(map(type, input().split()))
def input_string():
s = input()
return list(s)
##############################################################
def main():
t = input1()
for ci in range(t):
n, m = input2()
arr = [[0 for _ in range(m)] for __ in range(n)]
for i in range(n):
arr[i] = input_array()
mn = 0
for i in range(n):
for j in range(m):
if arr[i][j] == 1:
for k in range(n):
if arr[k][j] == 0:
arr[k][j] = 2
for k in range(m):
if arr[i][k] == 0:
arr[i][k] = 2
rr = 0
for i in range(n):
cnt = 0
for j in range(m):
if arr[i][j] == 0:
cnt += 1
rr = max(rr, cnt)
cc = 0
for j in range(m):
cnt = 0
for i in range(n):
if arr[i][j] == 0:
cnt += 1
cc = max(cc, cnt)
mn = min(rr, cc)
# print(mn)
if mn%2 == 0:
print('Vivek')
else:
print('Ashish')
pass
if __name__ == '__main__':
READ('in.txt')
main()
| 22.184211
| 73
| 0.575326
|
7951f019b6f150d910ee32876a498d97c4787e87
| 1,235
|
py
|
Python
|
pymeasure/instruments/keithley/__init__.py
|
ronan-sensome/pymeasure
|
a88f73ee25e98ef976f3741c08a49da6c4d55f0d
|
[
"MIT"
] | 10
|
2019-01-23T05:52:59.000Z
|
2021-11-19T02:56:20.000Z
|
pymeasure/instruments/keithley/__init__.py
|
ronan-sensome/pymeasure
|
a88f73ee25e98ef976f3741c08a49da6c4d55f0d
|
[
"MIT"
] | null | null | null |
pymeasure/instruments/keithley/__init__.py
|
ronan-sensome/pymeasure
|
a88f73ee25e98ef976f3741c08a49da6c4d55f0d
|
[
"MIT"
] | 2
|
2020-01-27T04:40:48.000Z
|
2020-07-17T03:41:16.000Z
|
#
# This file is part of the PyMeasure package.
#
# Copyright (c) 2013-2017 PyMeasure Developers
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
from .keithley2000 import Keithley2000
from .keithley2400 import Keithley2400
| 45.740741
| 79
| 0.782186
|
7951f02f76b5e340b253b4bd106b53162328a893
| 479
|
py
|
Python
|
05_list_operation/numbers.py
|
Story5/python-crash-course
|
9e4e663db83ea9821c1dac83f02106eef7a9a14d
|
[
"Apache-2.0"
] | null | null | null |
05_list_operation/numbers.py
|
Story5/python-crash-course
|
9e4e663db83ea9821c1dac83f02106eef7a9a14d
|
[
"Apache-2.0"
] | null | null | null |
05_list_operation/numbers.py
|
Story5/python-crash-course
|
9e4e663db83ea9821c1dac83f02106eef7a9a14d
|
[
"Apache-2.0"
] | null | null | null |
for value in range(1,5):
print(value)
numbers = list(range(1,5))
print(numbers)
even_numbers = list(range(2,11,2))
print(even_numbers)
squares = []
for value in range(1,11):
square = value**2
squares.append(square)
print(squares)
squares2 = [value**2 for value in range(1,11)]
print(squares2)
players = ["charles", "martina", "michael", "florence", "eli"]
print(players[0:3])
#['charles', 'martina', 'michael']
print(players[-3:])
#['michael', 'florence', 'eli']
| 20.826087
| 62
| 0.661795
|
7951f035cc0fa28f563ed8de28ad2e714acf11dd
| 1,261
|
py
|
Python
|
Mini5App/models.py
|
cs-fullstack-2019-spring/django-mini-project5-Joshtg1104
|
8584602b039a27a90dda122a70d5bc825be2562c
|
[
"Apache-2.0"
] | null | null | null |
Mini5App/models.py
|
cs-fullstack-2019-spring/django-mini-project5-Joshtg1104
|
8584602b039a27a90dda122a70d5bc825be2562c
|
[
"Apache-2.0"
] | null | null | null |
Mini5App/models.py
|
cs-fullstack-2019-spring/django-mini-project5-Joshtg1104
|
8584602b039a27a90dda122a70d5bc825be2562c
|
[
"Apache-2.0"
] | null | null | null |
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class LoginModel(models.Model):
name = models.CharField(max_length=70, default="")
username = models.CharField(max_length=70, default="")
password1 = models.CharField(max_length=200, default="")
password2 = models.CharField(max_length=200, default="")
email_address = models.EmailField(default="")
profile_picture = models.CharField(max_length=800, default="")
userForeignKey = models.ForeignKey(User, on_delete=models.SET_NULL, null=True, blank=True)
def __str__(self):
return self.name
class RecipeModel(models.Model):
recipe_name = models.CharField(max_length=200, default="")
description = models.CharField(max_length=1000, default="")
recipe_ingredients = models.CharField(max_length=3000, default="")
recipe_directions = models.CharField(max_length=3000, default="")
date_created = models.DateField()
recipe_creator = models.CharField(max_length=60, default="")
recipe_image = models.CharField(max_length=800, default="")
recipeForeignKey = models.ForeignKey(LoginModel, on_delete=models.SET_NULL, null=True, blank=True)
def __str__(self):
return self.recipe_name
| 39.40625
| 102
| 0.736717
|
7951f18da3e226d30be5ede1f6e6c15a97bb94d6
| 1,659
|
py
|
Python
|
two_d_nav/utils.py
|
ZikangXiong/two-d-nav-gym
|
1ab3d5323e87d672f04a9c796f7ccabb752a6e67
|
[
"MIT"
] | null | null | null |
two_d_nav/utils.py
|
ZikangXiong/two-d-nav-gym
|
1ab3d5323e87d672f04a9c796f7ccabb752a6e67
|
[
"MIT"
] | 1
|
2021-09-06T21:34:21.000Z
|
2021-09-07T04:26:57.000Z
|
two_d_nav/utils.py
|
ZikangXiong/two-d-nav-gym
|
1ab3d5323e87d672f04a9c796f7ccabb752a6e67
|
[
"MIT"
] | 1
|
2021-09-07T22:52:13.000Z
|
2021-09-07T22:52:13.000Z
|
import math
import numpy as np
import pygame
from two_d_nav import config
def normalize_pos(pos: np.ndarray) -> np.ndarray:
map_size = np.array(config.map_size)
center = map_size / 2
radius = map_size - center
return (pos - center) / radius
def denormalize_pos(pos: np.ndarray) -> np.ndarray:
map_size = np.array(config.map_size)
center = map_size / 2
radius = map_size - center
return center + pos * radius
class Point:
# constructed using a normal tupple
def __init__(self, point_t=(0, 0)):
self.x = float(point_t[0])
self.y = float(point_t[1])
# define all useful operators
def __add__(self, other):
return Point((self.x + other.x, self.y + other.y))
def __sub__(self, other):
return Point((self.x - other.x, self.y - other.y))
def __mul__(self, scalar):
return Point((self.x * scalar, self.y * scalar))
def __div__(self, scalar):
return Point((self.x / scalar, self.y / scalar))
def __len__(self):
return int(math.sqrt(self.x ** 2 + self.y ** 2))
# get back values in original tuple format
def get(self):
return self.x, self.y
def draw_dashed_line(surf, color, start_pos, end_pos, width=1, dash_length=10):
origin = Point(start_pos)
target = Point(end_pos)
displacement = target - origin
length = len(displacement)
slope = displacement / length
for index in range(0, length / dash_length, 2):
start = origin + (slope * index * dash_length)
end = origin + (slope * (index + 1) * dash_length)
pygame.draw.line(surf, color, start.get(), end.get(), width)
| 26.333333
| 79
| 0.634117
|
7951f248e13460e8800bcb84f82eec6fff8f9743
| 657
|
py
|
Python
|
vuln_manager/core/templatetags/vuln_extras.py
|
jobscry/vuln_manager
|
ac68308d923b0719b960e45c1b9ff37aab991c89
|
[
"MIT"
] | null | null | null |
vuln_manager/core/templatetags/vuln_extras.py
|
jobscry/vuln_manager
|
ac68308d923b0719b960e45c1b9ff37aab991c89
|
[
"MIT"
] | null | null | null |
vuln_manager/core/templatetags/vuln_extras.py
|
jobscry/vuln_manager
|
ac68308d923b0719b960e45c1b9ff37aab991c89
|
[
"MIT"
] | null | null | null |
from django import template
from django.utils.http import urlquote
from django.utils.safestring import mark_safe
from cpes.models import Item
register = template.Library()
@register.filter(needs_autoescape=False)
def qstring(val_dict, end_comma=True):
q_strings = []
for key, val in val_dict.items():
if val is not None:
q_strings.append(urlquote(key) + '=' + urlquote(val))
ret_string = '?' + '&'.join(q_strings)
if end_comma:
if len(q_strings) > 0:
return ret_string + '&'
return mark_safe(ret_string)
@register.filter
def part_display(part):
return Item.PART_CHOICES._display_map[part]
| 27.375
| 65
| 0.692542
|
7951f463e59915f795016c4c60de636d4333f7dc
| 11,920
|
py
|
Python
|
q2_demux/plugin_setup.py
|
EmFord/q2-demux
|
d5dbcb3591d92fb6beff2d2ee7ba77c081907b71
|
[
"BSD-3-Clause"
] | null | null | null |
q2_demux/plugin_setup.py
|
EmFord/q2-demux
|
d5dbcb3591d92fb6beff2d2ee7ba77c081907b71
|
[
"BSD-3-Clause"
] | null | null | null |
q2_demux/plugin_setup.py
|
EmFord/q2-demux
|
d5dbcb3591d92fb6beff2d2ee7ba77c081907b71
|
[
"BSD-3-Clause"
] | null | null | null |
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2019, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import importlib
from qiime2.plugin import (
Plugin, Metadata, MetadataColumn, Categorical, Bool, Str, Int, Float,
Range, Citations, TypeMatch
)
from q2_types.sample_data import SampleData
from q2_types.per_sample_sequences import (
SequencesWithQuality, PairedEndSequencesWithQuality,
JoinedSequencesWithQuality)
import q2_demux
from ._type import (RawSequences, EMPSingleEndSequences, EMPPairedEndSequences,
ErrorCorrectionDetails)
from ._format import (EMPMultiplexedDirFmt, ErrorCorrectionDetailsDirFmt,
EMPSingleEndDirFmt, EMPSingleEndCasavaDirFmt,
EMPPairedEndDirFmt, EMPPairedEndCasavaDirFmt)
citations = Citations.load('citations.bib', package='q2_demux')
plugin = Plugin(
name='demux',
version=q2_demux.__version__,
website='https://github.com/qiime2/q2-demux',
package='q2_demux',
description=('This QIIME 2 plugin supports demultiplexing of '
'single-end and paired-end sequence reads and '
'visualization of sequence quality information.'),
short_description='Plugin for demultiplexing & viewing sequence quality.'
)
plugin.register_semantic_types(
RawSequences, EMPSingleEndSequences, EMPPairedEndSequences,
ErrorCorrectionDetails)
plugin.register_formats(EMPMultiplexedDirFmt, ErrorCorrectionDetailsDirFmt,
EMPSingleEndDirFmt, EMPSingleEndCasavaDirFmt,
EMPPairedEndDirFmt, EMPPairedEndCasavaDirFmt)
# TODO: remove when aliasing exists
plugin.register_semantic_type_to_format(
RawSequences,
artifact_format=EMPSingleEndDirFmt
)
plugin.register_semantic_type_to_format(
EMPSingleEndSequences,
artifact_format=EMPSingleEndDirFmt
)
plugin.register_semantic_type_to_format(
EMPPairedEndSequences,
artifact_format=EMPPairedEndDirFmt
)
plugin.register_semantic_type_to_format(
ErrorCorrectionDetails,
artifact_format=ErrorCorrectionDetailsDirFmt
)
plugin.methods.register_function(
function=q2_demux.emp_single,
# TODO: remove RawSequences by creating an alias to EMPSequences
inputs={'seqs': (RawSequences |
EMPSingleEndSequences |
EMPPairedEndSequences)},
parameters={'barcodes': MetadataColumn[Categorical],
'golay_error_correction': Bool,
'rev_comp_barcodes': Bool,
'rev_comp_mapping_barcodes': Bool},
outputs=[('per_sample_sequences', SampleData[SequencesWithQuality]),
('error_correction_details', ErrorCorrectionDetails)],
input_descriptions={
'seqs': 'The single-end sequences to be demultiplexed.'
},
parameter_descriptions={
'barcodes': 'The sample metadata column containing the per-sample '
'barcodes.',
'golay_error_correction': 'Perform 12nt Golay error correction on the '
'barcode reads.',
'rev_comp_barcodes': 'If provided, the barcode sequence reads will be '
'reverse complemented prior to demultiplexing.',
'rev_comp_mapping_barcodes': 'If provided, the barcode sequences in '
'the sample metadata will be reverse '
'complemented prior to demultiplexing.'
},
output_descriptions={
'per_sample_sequences': 'The resulting demultiplexed sequences.',
'error_correction_details': 'Detail about the barcode error '
'corrections.'
},
name='Demultiplex sequence data generated with the EMP protocol.',
description=('Demultiplex sequence data (i.e., map barcode reads to '
'sample ids) for data generated with the Earth Microbiome '
'Project (EMP) amplicon sequencing protocol. Details about '
'this protocol can be found at '
'http://www.earthmicrobiome.org/protocols-and-standards/'),
citations=[
citations['hamady2008'],
citations['hamady2009']]
)
plugin.methods.register_function(
function=q2_demux.emp_paired,
inputs={'seqs': EMPPairedEndSequences},
parameters={'barcodes': MetadataColumn[Categorical],
'golay_error_correction': Bool,
'rev_comp_barcodes': Bool,
'rev_comp_mapping_barcodes': Bool},
outputs=[
('per_sample_sequences', SampleData[PairedEndSequencesWithQuality]),
('error_correction_details', ErrorCorrectionDetails),
],
input_descriptions={
'seqs': 'The paired-end sequences to be demultiplexed.'
},
parameter_descriptions={
'barcodes': 'The sample metadata column containing the per-sample '
'barcodes.',
'golay_error_correction': 'Perform 12nt Golay error correction on the '
'barcode reads.',
'rev_comp_barcodes': 'If provided, the barcode sequence reads will be '
'reverse complemented prior to demultiplexing.',
'rev_comp_mapping_barcodes': 'If provided, the barcode sequences in '
'the sample metadata will be reverse '
'complemented prior to demultiplexing.'
},
output_descriptions={
'per_sample_sequences': 'The resulting demultiplexed sequences.',
'error_correction_details': 'Detail about the barcode error '
'corrections.'
},
name=('Demultiplex paired-end sequence data generated with the EMP '
'protocol.'),
description=('Demultiplex paired-end sequence data (i.e., map barcode '
'reads to sample ids) for data generated with the Earth '
'Microbiome Project (EMP) amplicon sequencing protocol. '
'Details about this protocol can be found at '
'http://www.earthmicrobiome.org/protocols-and-standards/'),
citations=[
citations['hamady2008'],
citations['hamady2009']]
)
plugin.visualizers.register_function(
function=q2_demux.summarize,
inputs={'data':
SampleData[SequencesWithQuality |
PairedEndSequencesWithQuality |
JoinedSequencesWithQuality]},
parameters={'n': Int},
input_descriptions={
'data': 'The demultiplexed sequences to be summarized.'
},
parameter_descriptions={
'n': ('The number of sequences that should be selected at random for '
'quality score plots. The quality plots will present the '
'average positional qualities across all of the sequences '
'selected. If input sequences are paired end, plots will be '
'generated for both forward and reverse reads for the same `n` '
'sequences.')
},
name='Summarize counts per sample.',
description=('Summarize counts per sample for all samples, and generate '
'interactive positional quality plots based on `n` randomly '
'selected sequences.')
)
plugin.methods.register_function(
function=q2_demux.subsample_single,
inputs={'sequences': SampleData[SequencesWithQuality |
PairedEndSequencesWithQuality]},
parameters={'fraction': Float % Range(0, 1,
inclusive_start=False,
inclusive_end=False)},
outputs=[
('subsampled_sequences', SampleData[SequencesWithQuality])
],
input_descriptions={
'sequences': 'The demultiplexed sequences to be subsampled.'
},
parameter_descriptions={
'fraction': ('The fraction of sequences to retain in subsample.')
},
output_descriptions={
'subsampled_sequences': 'The subsampled sequences.'
},
name='Subsample single-end sequences without replacement.',
description=('Generate a random subsample of single-end sequences '
'containing approximately the fraction of input sequences '
'specified by the fraction parameter. The number of output '
'samples will always be equal to the number of input '
'samples, even if some of those samples contain no '
'sequences after subsampling.')
)
plugin.methods.register_function(
function=q2_demux.subsample_paired,
inputs={'sequences': SampleData[PairedEndSequencesWithQuality]},
parameters={'fraction': Float % Range(0, 1,
inclusive_start=False,
inclusive_end=False)},
outputs=[
('subsampled_sequences', SampleData[PairedEndSequencesWithQuality])
],
input_descriptions={
'sequences': 'The demultiplexed sequences to be subsampled.'
},
parameter_descriptions={
'fraction': ('The fraction of sequences to retain in subsample.')
},
output_descriptions={
'subsampled_sequences': 'The subsampled sequences.'
},
name='Subsample paired-end sequences without replacement.',
description=('Generate a random subsample of paired-end sequences '
'containing approximately the fraction of input sequences '
'specified by the fraction parameter. The number of output '
'samples will always be equal to the number of input '
'samples, even if some of those samples contain no '
'sequences after subsampling.')
)
T = TypeMatch([SequencesWithQuality, PairedEndSequencesWithQuality,
JoinedSequencesWithQuality])
plugin.methods.register_function(
function=q2_demux.filter_samples,
inputs={'demux': SampleData[T]},
parameters={'metadata': Metadata,
'where': Str,
'exclude_ids': Bool},
outputs=[
('filtered_demux', SampleData[T])
],
input_descriptions={
'demux': 'The demultiplexed data from which samples should be '
'filtered.'
},
parameter_descriptions={
'metadata': 'Sample metadata indicating which sample ids to filter. '
'The optional `where` parameter may be used to filter ids '
'based on specified conditions in the metadata. The '
'optional `exclude_ids` parameter may be used to exclude '
'the ids specified in the metadata from the filter.',
'where': 'Optional SQLite WHERE clause specifying sample metadata '
'criteria that must be met to be included in the filtered '
'data. If not provided, all samples in `metadata` that are '
'also in the demultiplexed data will be retained.',
'exclude_ids': 'Defaults to False. If True, the samples selected by '
'the `metadata` and optional `where` parameter will be '
'excluded from the filtered data.',
},
output_descriptions={
'filtered_demux': 'Filtered demultiplexed data.'
},
name='Filter samples out of demultiplexed data.',
description='Filter samples indicated in given metadata out of '
'demultiplexed data. Specific samples can be further selected '
'with the WHERE clause, and the `exclude_ids` parameter '
'allows for filtering of all samples not specified.',
)
importlib.import_module('q2_demux._transformer')
| 43.663004
| 79
| 0.63255
|
7951f68ba610393d49116c00bab904fbcc380e11
| 1,612
|
py
|
Python
|
app/__init__.py
|
adi6ntro/microblog
|
85c1aad7f4081ebcf6b9038352c4bd1e726e8b13
|
[
"MIT"
] | null | null | null |
app/__init__.py
|
adi6ntro/microblog
|
85c1aad7f4081ebcf6b9038352c4bd1e726e8b13
|
[
"MIT"
] | null | null | null |
app/__init__.py
|
adi6ntro/microblog
|
85c1aad7f4081ebcf6b9038352c4bd1e726e8b13
|
[
"MIT"
] | null | null | null |
from flask import Flask
from config import Config
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
from flask_login import LoginManager
import logging
from logging.handlers import SMTPHandler, RotatingFileHandler
import os
app = Flask(__name__)
app.config.from_object(Config)
db = SQLAlchemy(app)
migrate = Migrate(app, db)
login = LoginManager(app)
login.login_view = 'login'
from app import routes, models, errors
if not app.debug:
if app.config['MAIL_SERVER']:
auth = None
if app.config['MAIL_USERNAME'] or app.config['MAIL_PASSWORD']:
auth = (app.config['MAIL_USERNAME'], app.config['MAIL_PASSWORD'])
secure = None
if app.config['MAIL_USE_TLS']:
secure = ()
mail_handler = SMTPHandler(
mailhost=(app.config['MAIL_SERVER'], app.config['MAIL_PORT']),
fromaddr='no-reply@' + app.config['MAIL_SERVER'],
toaddrs=app.config['ADMINS'], subject='Microblog Failure',
credentials=auth, secure=secure)
mail_handler.setLevel(logging.ERROR)
app.logger.addHandler(mail_handler)
if not os.path.exists('logs'):
os.mkdir('logs')
file_handler = RotatingFileHandler('logs/microblog.log', maxBytes=10240,
backupCount=10)
file_handler.setFormatter(logging.Formatter(
'%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]'))
file_handler.setLevel(logging.INFO)
app.logger.addHandler(file_handler)
app.logger.setLevel(logging.INFO)
app.logger.info('Microblog startup')
| 36.636364
| 79
| 0.675558
|
7951f763b65e285eb09ef170e1b0163141aa9eda
| 19,690
|
py
|
Python
|
pmaf/sequence/_multiple/_stream.py
|
mmtechslv/PhyloMAF
|
bab43dd4a4d2812951b1fdf4f1abb83edb79ea88
|
[
"BSD-3-Clause"
] | 1
|
2021-07-02T06:24:17.000Z
|
2021-07-02T06:24:17.000Z
|
pmaf/sequence/_multiple/_stream.py
|
mmtechslv/PhyloMAF
|
bab43dd4a4d2812951b1fdf4f1abb83edb79ea88
|
[
"BSD-3-Clause"
] | 1
|
2021-06-28T12:02:46.000Z
|
2021-06-28T12:02:46.000Z
|
pmaf/sequence/_multiple/_stream.py
|
mmtechslv/PhyloMAF
|
bab43dd4a4d2812951b1fdf4f1abb83edb79ea88
|
[
"BSD-3-Clause"
] | null | null | null |
import warnings
warnings.simplefilter("ignore", category=FutureWarning)
from shutil import copyfile
from pmaf.sequence._metakit import (
MultiSequenceMetabase,
MultiSequenceStreamBackboneMetabase,
)
from pmaf.sequence._multiple._multiple import MultiSequence
from pmaf.sequence import _shared as seq_shared
from random import random
from pmaf.sequence._sequence._nucleotide import Nucleotide
from pmaf.sequence._metakit import NucleotideMetabase
from Bio import SeqIO
import tempfile
import pandas as pd
import tables
import pickle
import os
class MultiSequenceStream(MultiSequenceStreamBackboneMetabase):
""":meta private:"""
_temp_filename_suffix_maker = lambda self, path_with_preffix: "{}_pmaf_{}".format(
path_with_preffix, str(round(100000000 * random()))
)
_supported_compression_libraries = ["zlib", "lzo", "bzip2", "blosc"]
_default_seq_encoding = "ascii"
_default_complevel = 5
_default_path_list = ["/seq", "/meta", "/info"]
_default_info_node_path = "/info/dump"
_default_seqs_node_path = "/seq/seqs"
_default_meta_node_path = "/meta/metas"
def __init__(
self,
filepath=None,
expected_rows=1000,
mode="DNA",
aligned=False,
name=None,
compressor=False,
):
self._name = ""
self._mode = None
self._stream_filepath = None
self._stream_storer = None
self._temp_state = True if filepath is None else False
self._stream_map = pd.Series(dtype=str)
self._last_seq_length = None
self._aligned = False
self._compressor = None
self._expected_rows = None
restore_state = False
if isinstance(aligned, bool):
self._aligned = aligned
else:
raise TypeError("`aligned` must have bool type")
if isinstance(expected_rows, int):
if expected_rows > 0:
self._expected_rows = expected_rows
else:
raise ValueError("`expected_rows` must be positive number")
else:
raise TypeError("`expected_rows` must have int type")
if isinstance(compressor, str):
if compressor in self._supported_compression_libraries:
self._compressor = compressor
else:
raise ValueError(
"Compressor is not supported. Please use one of {}".format(
",".join(self._supported_compression_libraries)
)
)
elif compressor == False:
self._compressor = False
else:
raise TypeError("`compressor` must have string type. ")
if isinstance(name, str):
self._name = name
elif name is None:
pass
else:
raise TypeError("Name can be only string or None.")
if isinstance(mode, str):
if seq_shared.validate_seq_mode(mode):
self._mode = mode.lower()
else:
raise ValueError("Sequence mode can only be 'DNA', 'RNA' or 'Protein'")
if isinstance(filepath, str):
self._stream_filepath = os.path.abspath(filepath)
if os.path.exists(self._stream_filepath):
restore_state = True
elif filepath is None:
tmp_temp_filepath = self._temp_filename_suffix_maker(
os.path.join(tempfile.gettempdir(), tempfile.gettempprefix())
)
while os.path.exists(tmp_temp_filepath):
tmp_temp_filepath = self._temp_filename_suffix_maker(
os.path.join(tempfile.gettempdir(), tempfile.gettempprefix())
)
self._stream_filepath = tmp_temp_filepath
else:
raise ValueError("`filepath` is invalid.")
if restore_state:
if not self._restore_init():
raise RuntimeError("Cannot load file.")
else:
if not self._init_seq_stream_storer():
raise RuntimeError("Cannot be initiate file.")
def __repr__(self):
class_name = self.__class__.__name__
name = self._name if self._name is not None else "N/A"
count = len(self._stream_map)
stream_filepath = self._stream_filepath
aligned = "Yes" if self._aligned else "No"
repr_str = "<{}: {}, Total Sequences: {}, Filepath: {}, Aligned: {}>".format(
class_name, name, count, stream_filepath, aligned
)
return repr_str
def __exit__(self, exc_type, exc_value, traceback):
if self._temp_state:
os.unlink(self._stream_filepath)
return
def _init_seq_stream_storer(self):
""""""
ret = False
try:
tmp_filters = (
tables.Filters(
complib=self._compressor, complevel=self._default_complevel
)
if self._compressor
else None
)
tmp_stream_store = tables.open_file(
self._stream_filepath, mode="a", title=self._name, filters=tmp_filters
)
tmp_stream_store.create_group("/", "seq", "Raw sequences")
tmp_stream_store.create_vlarray(
"/seq",
"seqs",
atom=tables.VLStringAtom(),
expectedrows=self._expected_rows,
)
tmp_stream_store.create_group("/", "meta", "Sequence metadata")
tmp_stream_store.create_vlarray(
"/meta",
"metas",
atom=tables.ObjectAtom(),
expectedrows=self._expected_rows,
)
tmp_stream_store.create_group("/", "info", "Instance attributes")
tmp_stream_store.create_vlarray(
"/info", "dump", atom=tables.ObjectAtom(), expectedrows=1
)
self._stream_storer = tmp_stream_store
ret = True
except:
pass
return ret
def _restore_init(self):
""""""
ret = False
try:
tmp_stream_store_read = tables.open_file(self._stream_filepath, mode="r")
group_list = []
for group in tmp_stream_store_read.walk_groups():
group_list.append(group._v_pathname)
if all([group in group_list for group in self._default_path_list]):
tmp_instance_dict_bytes = tmp_stream_store_read.get_node(
self._default_info_node_path
).read()[0]
tmp_instance_dict = pickle.loads(tmp_instance_dict_bytes)
self.__dict__.update(tmp_instance_dict)
tmp_stream_store_read.close()
tmp_filters = (
tables.Filters(
complib=self._compressor, complevel=self._default_complevel
)
if self._compressor
else None
)
tmp_stream_store = tables.open_file(
self._stream_filepath, mode="a", filters=tmp_filters
)
self._stream_storer = tmp_stream_store
ret = True
except:
pass
return ret
def close(self, copy_filepath=None):
"""
Parameters
----------
copy_filepath :
(Default value = None)
Returns
-------
"""
tmp_instance_dict = {
k: v
for k, v in self.__dict__.items()
if k not in ["_stream_filepath", "_stream_storer"]
}
tmp_instance_dict_bytes = pickle.dumps(tmp_instance_dict)
self._stream_storer.get_node(self._default_info_node_path).remove()
self._stream_storer.create_vlarray(
"/info", "dump", atom=tables.ObjectAtom(), expectedrows=1
)
self._stream_storer.get_node(self._default_info_node_path).append(
tmp_instance_dict_bytes
)
self._stream_storer.close()
if copy_filepath is not None and isinstance(copy_filepath, str):
if not os.path.exists(copy_filepath):
copyfile(self._stream_filepath, copy_filepath)
else:
raise FileExistsError()
if self._temp_state:
os.unlink(self._stream_filepath)
return
def get_sequence_by_acc(self, acc_number):
"""
Parameters
----------
acc_number :
Returns
-------
"""
ret = None
if acc_number in self._stream_map.index:
ret = self._get_sequence_by_acc_id(acc_number)
return ret
def get_multiseq_by_accs(self, acc_numbers):
"""
Parameters
----------
acc_numbers :
Returns
-------
"""
ret = None
if isinstance(acc_numbers, list):
if len(acc_numbers) > 0:
if self._stream_map.index.isin(acc_numbers).sum() == len(acc_numbers):
seq_list = []
for name in acc_numbers:
seq_list.append(self._get_sequence_by_acc_id(name))
ret = MultiSequence(
seq_list,
name=self._name,
aligned=self._aligned,
metadata={"accession-numbers": "; ".join(acc_numbers)},
)
return ret
def iter_sequences(self):
""""""
for acc_num in self._stream_map.index.values.tolist():
yield self._get_sequence_by_acc_id(acc_num)
def write_all_to_fasta(self, fasta_fp, write_in_chunks=100):
"""
Parameters
----------
fasta_fp :
write_in_chunks :
(Default value = 100)
Returns
-------
"""
if not os.path.exists(fasta_fp):
if isinstance(write_in_chunks, int):
if write_in_chunks >= 0:
chunks = (
len(self._stream_map)
if write_in_chunks == 0
else write_in_chunks
)
from Bio.Seq import Seq
with open(fasta_fp, "a") as fasta_handle:
chunk_counter = chunks
records_chunk = []
for sequence in self.iter_sequences():
tmp_record_metadata = (
sequence.metadata["description"]
if "description" in sequence.metadata.keys()
else self._name
)
next_record = SeqIO.SeqRecord(
Seq(sequence.text),
sequence.name,
description=tmp_record_metadata,
)
if chunk_counter > 1:
records_chunk.append(next_record)
chunk_counter = chunk_counter - 1
else:
records_chunk.append(next_record)
chunk_counter = chunks
SeqIO.write(records_chunk, fasta_handle, "fasta")
records_chunk = []
if chunk_counter > 0:
SeqIO.write(records_chunk, fasta_handle, "fasta")
else:
raise TypeError("`write_in_chunks` must be integer.")
else:
raise FileExistsError("Target file must not exists.")
return
def _get_sequence_by_acc_id(self, accid):
"""
Parameters
----------
accid :
Returns
-------
"""
seqid = self._accid_to_seqid(accid)
seq_str = self._retrieve_seq_by_seqid(seqid)
seq_meta_pack = self._retrieve_meta_by_seqid(seqid)
tmp_seq = Nucleotide(seq_str, accid, mode=self._mode)
tmp_seq.restore_buckle(seq_meta_pack)
return tmp_seq
def _accid_to_seqid(self, accid):
"""
Parameters
----------
accid :
Returns
-------
"""
return self._stream_map[accid]
def _retrieve_seq_by_seqid(self, seqid):
"""
Parameters
----------
seqid :
Returns
-------
"""
tmp_seq_bytes = self._stream_storer.get_node(self._default_seqs_node_path)[
seqid
]
return tmp_seq_bytes.decode(self._default_seq_encoding)
def _retrieve_meta_by_seqid(self, seqid):
"""
Parameters
----------
seqid :
Returns
-------
"""
tmp_meta_bytes = self._stream_storer.get_node(self._default_meta_node_path)[
seqid
]
return pickle.loads(tmp_meta_bytes)
def append_sequence(self, sequence):
"""
Parameters
----------
sequence :
Returns
-------
"""
if isinstance(sequence, NucleotideMetabase):
if isinstance(sequence, Nucleotide):
if sequence.mode == self._mode:
if (sequence.name is not None) and (
sequence.name not in self._stream_map.index
):
if self._verify_sequence(sequence.text):
self._append_sequence(sequence)
self._stream_storer.flush()
else:
raise ValueError("Sequences do not have same length.")
else:
raise ValueError(
"Sequence name must be unique and have legnth > 0."
)
else:
raise ValueError("All sequences must have same mode.")
else:
raise TypeError("`sequence` have invalid type.")
def _append_sequence(self, sequence_instance):
"""
Parameters
----------
sequence_instance :
Returns
-------
"""
tmp_metadata = sequence_instance.buckle_for_uid(self._name)
tmp_seq_str = sequence_instance.text
seqid = self._insert_seq_vlarray(tmp_seq_str)
metaid = self._insert_meta_vlarray(tmp_metadata)
if seqid == metaid:
self._stream_map[str(sequence_instance.name)] = seqid
else:
raise RuntimeError(
"Impossible condition. Stream file might have been externally modified!"
)
return
def extend_multiseq(self, multiseq):
"""
Parameters
----------
multiseq :
Returns
-------
"""
if isinstance(multiseq, MultiSequenceMetabase):
if multiseq.count > 0:
if multiseq.mode == self._mode:
for sequence in multiseq.sequences:
if (sequence.name is None) or (
sequence.name in self._stream_map.index
):
raise ValueError(
"Sequence name must be unique and have legnth > 0."
)
if not self._verify_sequence(sequence.text):
raise ValueError("Sequences do not have same length.")
self._append_multiseq(multiseq)
self._stream_storer.flush()
else:
raise ValueError("All sequences must have same mode.")
else:
raise TypeError("`multiseq` have invalid type.")
def _append_multiseq(self, multiseq):
"""
Parameters
----------
multiseq :
Returns
-------
"""
for sequence in multiseq.sequences:
self._append_sequence(sequence)
return
def append_string(self, name, mode, sequence_str, metadata_dict={}):
"""
Parameters
----------
name :
mode :
sequence_str :
metadata_dict :
(Default value = {})
Returns
-------
"""
if (
isinstance(name, str)
and isinstance(sequence_str, str)
and isinstance(metadata_dict, dict)
and isinstance(mode, str)
):
if mode == self._mode:
if len(name) > 0 and (name not in self._stream_map.index):
if self._verify_sequence(sequence_str):
self._append_sequence_str(name, sequence_str, metadata_dict)
self._stream_storer.flush()
else:
raise ValueError("Sequences do not have same length.")
else:
raise ValueError(
"Sequence name must be unique and have legnth > 0."
)
else:
raise ValueError("All sequences must have same mode.")
else:
raise TypeError("Invalid parameter types.")
return
def _append_sequence_str(self, seq_name, sequence_str, metadata_dict):
"""
Parameters
----------
seq_name :
sequence_str :
metadata_dict :
Returns
-------
"""
seqid = self._insert_seq_vlarray(sequence_str)
metaid = self._insert_meta_vlarray(metadata_dict)
if seqid == metaid:
self._stream_map[seq_name] = seqid
else:
raise RuntimeError(
"Impossible condition. Stream file might have been externally modified!"
)
return
def _insert_seq_vlarray(self, seq_data):
"""
Parameters
----------
seq_data :
Returns
-------
"""
self._last_seq_length = len(seq_data)
seq_data_bytes = seq_data.encode(self._default_seq_encoding)
self._stream_storer.get_node(self._default_seqs_node_path).append(
seq_data_bytes
)
return self._stream_storer.get_node(self._default_seqs_node_path).nrows - 1
def _insert_meta_vlarray(self, metadata):
"""
Parameters
----------
metadata :
Returns
-------
"""
metadata_bytes = pickle.dumps(metadata)
self._stream_storer.get_node(self._default_meta_node_path).append(
metadata_bytes
)
return self._stream_storer.get_node(self._default_meta_node_path).nrows - 1
def _verify_sequence(self, seq_str):
"""
Parameters
----------
seq_str :
Returns
-------
"""
ret = True
if self._aligned:
if self._last_seq_length is not None:
if self._last_seq_length == len(seq_str):
ret = False
return ret
@property
def name(self):
""""""
return self._name
@property
def mode(self):
""""""
return self._mode
@property
def count(self):
""""""
return len(self._stream_map)
@property
def summarize(self):
""""""
return
@property
def accession_numbers(self):
""""""
return self._stream_map.index.tolist()
| 30.153139
| 88
| 0.521229
|
7951f76c643f4ee7ccc9214282c505e92cbf5651
| 7,767
|
py
|
Python
|
django_baker/bakery.py
|
RobKuipers/django-baker
|
8f523f67bc94f4da5cc74754db47e34c19d47627
|
[
"BSD-3-Clause"
] | 1
|
2021-01-05T21:24:25.000Z
|
2021-01-05T21:24:25.000Z
|
django_baker/bakery.py
|
RobKuipers/django-baker
|
8f523f67bc94f4da5cc74754db47e34c19d47627
|
[
"BSD-3-Clause"
] | 2
|
2021-09-28T15:50:00.000Z
|
2021-09-28T16:05:20.000Z
|
django_baker/bakery.py
|
RobKuipers/django-baker
|
8f523f67bc94f4da5cc74754db47e34c19d47627
|
[
"BSD-3-Clause"
] | 1
|
2022-02-05T17:31:29.000Z
|
2022-02-05T17:31:29.000Z
|
from __future__ import print_function
import os
import re
import itertools
from django.db.models.fields import SlugField
from django.template.loader import get_template
from django.template import Context
from six import iteritems
class Baker(object):
"""
Given a dictionary of apps and models, Baker will bake up a bunch of files that will help get your new app up
and running quickly.
"""
def bake(self, apps_and_models):
"""
Iterates a dictionary of apps and models and creates all the necessary files to get up and running quickly.
"""
for app_label, models_app in iteritems(apps_and_models):
models, app = models_app
models = list(models)
model_names = {model.__name__: self.get_field_names_for_model(model) for model in models}
self.create_directories(app)
self.create_init_files(app, model_names.keys(), models)
self.remove_empty_startapp_files(app)
for file_name in ["forms", "admin"]:
file_path = "%s/%s.py" % (app.path, file_name)
template_path = "django_baker/%s" % (file_name)
self.create_file_from_template(file_path, template_path, {"model_names": model_names})
for model in models:
model_attributes = self.model_attributes(app, model)
self.create_files_from_templates(model_attributes)
def get_field_names_for_model(self, model):
"""
Returns fields other than id and uneditable fields (DateTimeField where auto_now or auto_now_add is True)
"""
return [field.name for field in model._meta.get_fields() if field.name != "id" and not
(field.get_internal_type() == "DateTimeField" and
(field.auto_now is True or field.auto_now_add is True)) and
field.concrete and (not field.is_relation or field.one_to_one or
(field.many_to_one and field.related_model))]
def create_directories(self, app):
"""
If not already there, adds a directory for views, urls and templates.
"""
for folder_name in ["views", "urls", "templates/%s" % app.label]:
directory_path = "%s/%s" % (app.path, folder_name)
if not os.path.exists(directory_path):
os.makedirs(directory_path)
def create_init_files(self, app, model_names, models):
"""
If not already there, creates a new init file in views and urls directory. Init file imports from all
of the files within the directory.
"""
model_name_slugs = ["%s_views" % (self.camel_to_slug(model_name)) for model_name in model_names]
model_names_dict = {self.camel_to_slug(model.__name__): self.camel_to_slug(self.model_name_plural(model)) for
model in models}
for folder_name in ["views", "urls"]:
file_path = "%s/%s/__init__.py" % (app.path, folder_name)
template_path = "django_baker/__init__%s" % folder_name
self.create_file_from_template(file_path, template_path, {"app_label": app.label,
"model_name_slugs": model_name_slugs,
"model_names_dict": model_names_dict
})
def model_attributes(self, app, model):
"""
Creates a dictionary of model attributes that will be used in the templates.
"""
model_name = model.__name__
model_name_plural = self.model_name_plural(model)
slug_field = self.get_unique_slug_field_name(model)
slug_field_name = slug_field.name if slug_field else "slug"
lookup_field = slug_field_name if slug_field else "pk"
return {
'app_label': app.label,
'app_path': app.path,
'model': model,
'model_name': model_name,
'model_name_slug': self.camel_to_slug(model_name),
'model_name_plural': model_name_plural,
'model_name_plural_slug': self.camel_to_slug(model_name_plural),
'model_fields': self.get_field_names_for_model(model),
'slug_field': slug_field,
'slug_field_name': slug_field_name,
'lookup_field': lookup_field
}
def create_files_from_templates(self, model_attributes):
"""
Determines the correct path to put each file and then calls create file method.
"""
for folder_name in ["views", "urls"]:
file_path = "%s/%s/%s_%s.py" % (model_attributes['app_path'], folder_name,
model_attributes['model_name_slug'], folder_name)
template_path = "django_baker/%s" % (folder_name)
self.create_file_from_template(file_path, template_path, model_attributes)
for file_name in ["base", "list", "detail", "create", "update", "delete"]:
file_path = "%s/templates/%s/%s_%s.html" % (model_attributes['app_path'], model_attributes['app_label'],
model_attributes['model_name_slug'], file_name)
template_path = "django_baker/%s.html" % (file_name)
self.create_file_from_template(file_path, template_path, model_attributes)
def create_file_from_template(self, file_path, template_path, context_variables):
"""
Takes template file and context variables and uses django's render method to create new file.
"""
if os.path.exists(file_path):
print("\033[91m" + file_path + " already exists. Skipping." + "\033[0m")
return
with open(file_path, 'w') as new_file:
new_file.write(get_template(template_path).render(context_variables))
print("\033[92m" + "successfully baked " + file_path + "\033[0m")
def remove_empty_startapp_files(self, app):
"""
Removes 'empty' (less than or equal to 4 lines, as that is what they begin with) views, admin, and tests
files.
"""
for file_name in ["views", "admin", "tests"]:
file_path = "%s/%s.py" % (app.path, file_name)
if os.path.exists(file_path):
num_lines = sum(1 for _ in open(file_path))
if num_lines <= 4:
os.remove(file_path)
def camel_to_slug(self, name):
"""
Helper method to convert camel case string (PumpernickelBread) to slug string (pumpernickel_bread)
"""
name = re.sub(r'([a-z])([A-Z])', r'\1 \2', name).title().replace(" ", "").replace("_", "")
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
slug = re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
return slug
def model_name_plural(self, model):
"""
Gets the pluralized version of a model. Simply adds an 's' to model name if verbose_name_plural isn't set.
"""
if isinstance(model._meta.verbose_name_plural, str):
return model._meta.verbose_name_plural
return "%ss" % model.__name__
def get_unique_slug_field_name(self, model):
"""
Determines if model has exactly 1 SlugField that is unique. If so, returns it. Otherwise returns None.
"""
slug_fields = []
for field in model._meta.get_fields():
if isinstance(field, SlugField) and field.unique:
slug_fields.append(field)
if len(slug_fields) == 1:
return slug_fields[0]
return None
| 47.650307
| 119
| 0.597528
|
7951f837dd07fabae720689af49805b073c46ae9
| 2,243
|
py
|
Python
|
mcycle/constants.py
|
momargoh/MCycle
|
4eed4b80fe041527f9765f9b6ca135fce864ed0f
|
[
"Apache-2.0"
] | 7
|
2018-05-08T08:42:07.000Z
|
2022-01-27T14:40:45.000Z
|
mcycle/constants.py
|
momargoh/MCycle
|
4eed4b80fe041527f9765f9b6ca135fce864ed0f
|
[
"Apache-2.0"
] | 6
|
2018-05-20T05:52:48.000Z
|
2019-09-05T19:56:29.000Z
|
mcycle/constants.py
|
momargoh/MCycle
|
4eed4b80fe041527f9765f9b6ca135fce864ed0f
|
[
"Apache-2.0"
] | 4
|
2018-05-06T20:57:34.000Z
|
2021-07-03T04:32:15.000Z
|
# THIS FILE IS AUTOMATICALLY GENERATED: DO NOT EDIT!
# Tolerances
TOLABS_X = 1e-10
# CoolProp input_pairs
INPUT_PAIR_INVALID = 0
QT_INPUTS = 1
PQ_INPUTS = 2
QSmolar_INPUTS = 3
QSmass_INPUTS = 4
HmolarQ_INPUTS = 5
HmassQ_INPUTS = 6
DmolarQ_INPUTS = 7
DmassQ_INPUTS = 8
PT_INPUTS = 9
DmassT_INPUTS = 10
DmolarT_INPUTS = 11
HmolarT_INPUTS = 12
HmassT_INPUTS = 13
SmolarT_INPUTS = 14
SmassT_INPUTS = 15
TUmolar_INPUTS = 16
TUmass_INPUTS = 17
DmassP_INPUTS = 18
DmolarP_INPUTS = 19
HmassP_INPUTS = 20
HmolarP_INPUTS = 21
PSmass_INPUTS = 22
PSmolar_INPUTS = 23
PUmass_INPUTS = 24
PUmolar_INPUTS = 25
HmassSmass_INPUTS = 26
HmolarSmolar_INPUTS = 27
SmassUmass_INPUTS = 28
SmolarUmolar_INPUTS = 29
DmassHmass_INPUTS = 30
DmolarHmolar_INPUTS = 31
DmassSmass_INPUTS = 32
DmolarSmolar_INPUTS = 33
DmassUmass_INPUTS = 34
DmolarUmolar_INPUTS = 35
# CoolProp imposed phases
iphase_liquid = 0
iphase_supercritical = 1
iphase_supercritical_gas = 2
iphase_supercritical_liquid = 3
iphase_critical_point = 4
iphase_gas = 5
iphase_twophase = 6
iphase_unknown = 7
iphase_not_imposed = 8
# MCycle phases
PHASE_LIQUID = 0
PHASE_SUPERCRITICAL = 1
PHASE_SUPERCRITICAL_GAS = 2
PHASE_SUPERCRITICAL_LIQUID = 3
PHASE_CRITICAL_POINT = 4
PHASE_VAPOUR = 5
PHASE_VAPOR = 5
PHASE_GAS = 5
PHASE_TWOPHASE = 6
PHASE_UNKNOWN = 7
PHASE_NOT_IMPOSED = 8
PHASE_SATURATED_LIQUID = 9
PHASE_SATURATED_VAPOUR = 10
PHASE_SATURATED_VAPOR = 10
# Unit Phases
UNITPHASE_NONE = 0
UNITPHASE_ALL = 1
UNITPHASE_LIQUID = 2
UNITPHASE_VAPOUR = 3
UNITPHASE_VAPOR = 3
UNITPHASE_GAS = 3
UNITPHASE_TWOPHASE_EVAPORATING = 4
UNITPHASE_TP_EVAP = 4
UNITPHASE_TWOPHASE_CONDENSING = 5
UNITPHASE_TP_COND = 5
UNITPHASE_SUPERCRITICAL = 6
UNITPHASE_ALL_SINGLEPHASE = 7
UNITPHASE_ALL_SP = 7
UNITPHASE_ALL_TWOPHASE = 8
UNITPHASE_ALL_TP = 8
# Transfer mechanisms
TRANSFER_NONE = 0
TRANSFER_ALL = 1
TRANSFER_HEAT = 2
TRANSFER_FRICTION = 3
# Flows
FLOW_NONE = 0
FLOW_ALL = 1
WORKING_FLUID = 2
FLOW_PRIMARY = 2
FLOW1 = 2
SECONDARY_FLUID = 3
FLOW_SECONDARY = 3
FLOW2 = 3
# HxFlowConfig
FLOWSENSE_UNDEFINED = 0
COUNTERFLOW = 1
PARALLELFLOW = 2
CROSSFLOW = 3
# Constraints
NO_CONSTRAINT = 0
CONSTANT_P = 1
CONSTANT_V = 2
# MCycle
SOURCE_URL = 'https://github.com/momargoh/MCycle'
DOCS_URL = 'https://mcycle.readthedocs.io'
| 20.768519
| 52
| 0.800267
|
7951f8f7c8305aaf71be43c3b809dcf7e5bfd182
| 8,886
|
py
|
Python
|
broadlink/__init__.py
|
thewh1teagle/python-broadlink
|
6ab23e92614ea89df1fe168a00bcf74d91356ed6
|
[
"MIT"
] | 1
|
2020-11-06T01:12:43.000Z
|
2020-11-06T01:12:43.000Z
|
broadlink/__init__.py
|
thewh1teagle/python-broadlink
|
6ab23e92614ea89df1fe168a00bcf74d91356ed6
|
[
"MIT"
] | null | null | null |
broadlink/__init__.py
|
thewh1teagle/python-broadlink
|
6ab23e92614ea89df1fe168a00bcf74d91356ed6
|
[
"MIT"
] | 1
|
2020-11-06T14:45:17.000Z
|
2020-11-06T14:45:17.000Z
|
#!/usr/bin/python3
"""The python-broadlink library."""
import socket
import time
from datetime import datetime
from typing import Dict, List, Union, Tuple, Type
from .alarm import S1C
from .climate import hysen
from .cover import dooya
from .device import device
from .helpers import get_local_ip
from .light import lb1
from .remote import rm, rm2, rm4
from .sensor import a1
from .switch import bg1, mp1, sp1, sp2
def get_devices() -> Dict[int, Tuple[Type[device], str, str]]:
"""Return all supported devices."""
return {
0x0000: (sp1, "SP1", "Broadlink"),
0x2711: (sp2, "SP2", "Broadlink"),
0x2716: (sp2, "NEO PRO", "Ankuoo"),
0x2717: (sp2, "NEO", "Ankuoo"),
0x2719: (sp2, "SP2-compatible", "Honeywell"),
0x271a: (sp2, "SP2-compatible", "Honeywell"),
0x2720: (sp2, "SP mini", "Broadlink"),
0x2728: (sp2, "SP2-compatible", "URANT"),
0x2733: (sp2, "SP3", "Broadlink"),
0x2736: (sp2, "SP mini+", "Broadlink"),
0x273e: (sp2, "SP mini", "Broadlink"),
0x7530: (sp2, "SP2", "Broadlink (OEM)"),
0x7539: (sp2, "SP2-IL", "Broadlink (OEM)"),
0x753e: (sp2, "SP mini 3", "Broadlink"),
0x7540: (sp2, "MP2", "Broadlink"),
0X7544: (sp2, "SP2-CL", "Broadlink"),
0x7546: (sp2, "SP2-UK/BR/IN", "Broadlink (OEM)"),
0x7547: (sp2, "SC1", "Broadlink"),
0x7918: (sp2, "SP2", "Broadlink (OEM)"),
0x7919: (sp2, "SP2-compatible", "Honeywell"),
0x791a: (sp2, "SP2-compatible", "Honeywell"),
0x7d00: (sp2, "SP3-EU", "Broadlink (OEM)"),
0x7d0d: (sp2, "SP mini 3", "Broadlink (OEM)"),
0x9479: (sp2, "SP3S-US", "Broadlink"),
0x947a: (sp2, "SP3S-EU", "Broadlink"),
0x2712: (rm, "RM pro/pro+", "Broadlink"),
0x272a: (rm, "RM pro", "Broadlink"),
0x2737: (rm, "RM mini 3", "Broadlink"),
0x273d: (rm, "RM pro", "Broadlink"),
0x277c: (rm, "RM home", "Broadlink"),
0x2783: (rm, "RM home", "Broadlink"),
0x2787: (rm, "RM pro", "Broadlink"),
0x278b: (rm, "RM plus", "Broadlink"),
0x278f: (rm, "RM mini", "Broadlink"),
0x2797: (rm, "RM pro+", "Broadlink"),
0x279d: (rm, "RM pro+", "Broadlink"),
0x27a1: (rm, "RM plus", "Broadlink"),
0x27a6: (rm, "RM plus", "Broadlink"),
0x27a9: (rm, "RM pro+", "Broadlink"),
0x27c2: (rm, "RM mini 3", "Broadlink"),
0x27c3: (rm, "RM pro+", "Broadlink"),
0x27cc: (rm, "RM mini 3", "Broadlink"),
0x27cd: (rm, "RM mini 3", "Broadlink"),
0x27d0: (rm, "RM mini 3", "Broadlink"),
0x27d1: (rm, "RM mini 3", "Broadlink"),
0x27de: (rm, "RM mini 3", "Broadlink"),
0x51da: (rm4, "RM4 mini", "Broadlink"),
0x5f36: (rm4, "RM mini 3", "Broadlink"),
0x6026: (rm4, "RM4 pro", "Broadlink"),
0x6070: (rm4, "RM4C mini", "Broadlink"),
0x610e: (rm4, "RM4 mini", "Broadlink"),
0x610f: (rm4, "RM4C mini", "Broadlink"),
0x61a2: (rm4, "RM4 pro", "Broadlink"),
0x62bc: (rm4, "RM4 mini", "Broadlink"),
0x62be: (rm4, "RM4C mini", "Broadlink"),
0x648d: (rm4, "RM4 mini", "Broadlink"),
0x649b: (rm4, "RM4 pro", "Broadlink"),
0x2714: (a1, "e-Sensor", "Broadlink"),
0x4eb5: (mp1, "MP1-1K4S", "Broadlink"),
0x4ef7: (mp1, "MP1-1K4S", "Broadlink (OEM)"),
0x4f1b: (mp1, "MP1-1K3S2U", "Broadlink (OEM)"),
0x4f65: (mp1, "MP1-1K3S2U", "Broadlink"),
0x5043: (lb1, "SB800TD", "Broadlink (OEM)"),
0x504e: (lb1, "LB1", "Broadlink"),
0x60c7: (lb1, "LB1", "Broadlink"),
0x60c8: (lb1, "LB1", "Broadlink"),
0x6112: (lb1, "LB1", "Broadlink"),
0x2722: (S1C, "S2KIT", "Broadlink"),
0x4ead: (hysen, "HY02B05H", "Hysen"),
0x4e4d: (dooya, "DT360E-45/20", "Dooya"),
0x51e3: (bg1, "BG800/BG900", "BG Electrical"),
}
def gendevice(
dev_type: int,
host: Tuple[str, int],
mac: Union[bytes, str],
name: str = None,
is_locked: bool = None,
) -> device:
"""Generate a device."""
try:
dev_class, model, manufacturer = get_devices()[dev_type]
except KeyError:
return device(host, mac, dev_type, name=name, is_locked=is_locked)
return dev_class(
host,
mac,
dev_type,
name=name,
model=model,
manufacturer=manufacturer,
is_locked=is_locked,
)
def discover(
timeout: int = None,
local_ip_address: str = None,
discover_ip_address: str = '255.255.255.255',
discover_ip_port: int = 80,
) -> List[device]:
"""Discover devices connected to the local network."""
local_ip_address = local_ip_address or get_local_ip()
address = local_ip_address.split('.')
cs = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
cs.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
cs.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
cs.bind((local_ip_address, 0))
port = cs.getsockname()[1]
starttime = time.time()
devices = []
timezone = int(time.timezone / -3600)
packet = bytearray(0x30)
year = datetime.now().year
if timezone < 0:
packet[0x08] = 0xff + timezone - 1
packet[0x09] = 0xff
packet[0x0a] = 0xff
packet[0x0b] = 0xff
else:
packet[0x08] = timezone
packet[0x09] = 0
packet[0x0a] = 0
packet[0x0b] = 0
packet[0x0c] = year & 0xff
packet[0x0d] = year >> 8
packet[0x0e] = datetime.now().minute
packet[0x0f] = datetime.now().hour
subyear = str(year)[2:]
packet[0x10] = int(subyear)
packet[0x11] = datetime.now().isoweekday()
packet[0x12] = datetime.now().day
packet[0x13] = datetime.now().month
packet[0x18] = int(address[0])
packet[0x19] = int(address[1])
packet[0x1a] = int(address[2])
packet[0x1b] = int(address[3])
packet[0x1c] = port & 0xff
packet[0x1d] = port >> 8
packet[0x26] = 6
checksum = sum(packet, 0xbeaf) & 0xffff
packet[0x20] = checksum & 0xff
packet[0x21] = checksum >> 8
cs.sendto(packet, (discover_ip_address, discover_ip_port))
if timeout is None:
response = cs.recvfrom(1024)
responsepacket = bytearray(response[0])
host = response[1]
devtype = responsepacket[0x34] | responsepacket[0x35] << 8
mac = responsepacket[0x3f:0x39:-1]
name = responsepacket[0x40:].split(b'\x00')[0].decode('utf-8')
is_locked = bool(responsepacket[-1])
device = gendevice(devtype, host, mac, name=name, is_locked=is_locked)
cs.close()
return device
while (time.time() - starttime) < timeout:
cs.settimeout(timeout - (time.time() - starttime))
try:
response = cs.recvfrom(1024)
except socket.timeout:
cs.close()
return devices
responsepacket = bytearray(response[0])
host = response[1]
devtype = responsepacket[0x34] | responsepacket[0x35] << 8
mac = responsepacket[0x3f:0x39:-1]
name = responsepacket[0x40:].split(b'\x00')[0].decode('utf-8')
is_locked = bool(responsepacket[-1])
device = gendevice(devtype, host, mac, name=name, is_locked=is_locked)
devices.append(device)
cs.close()
return devices
# Setup a new Broadlink device via AP Mode. Review the README to see how to enter AP Mode.
# Only tested with Broadlink RM3 Mini (Blackbean)
def setup(ssid: str, password: str, security_mode: int) -> None:
"""Set up a new Broadlink device via AP mode."""
# Security mode options are (0 - none, 1 = WEP, 2 = WPA1, 3 = WPA2, 4 = WPA1/2)
payload = bytearray(0x88)
payload[0x26] = 0x14 # This seems to always be set to 14
# Add the SSID to the payload
ssid_start = 68
ssid_length = 0
for letter in ssid:
payload[(ssid_start + ssid_length)] = ord(letter)
ssid_length += 1
# Add the WiFi password to the payload
pass_start = 100
pass_length = 0
for letter in password:
payload[(pass_start + pass_length)] = ord(letter)
pass_length += 1
payload[0x84] = ssid_length # Character length of SSID
payload[0x85] = pass_length # Character length of password
payload[0x86] = security_mode # Type of encryption (00 - none, 01 = WEP, 02 = WPA1, 03 = WPA2, 04 = WPA1/2)
checksum = sum(payload, 0xbeaf) & 0xffff
payload[0x20] = checksum & 0xff # Checksum 1 position
payload[0x21] = checksum >> 8 # Checksum 2 position
sock = socket.socket(socket.AF_INET, # Internet
socket.SOCK_DGRAM) # UDP
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
sock.sendto(payload, ('255.255.255.255', 80))
sock.close()
| 36.719008
| 112
| 0.585415
|
7951fa6331f6d6df56acdbd36acbd540b6636374
| 4,945
|
py
|
Python
|
Blockchain.py
|
Sukarnascience/Blockchain_Using_Py
|
b88dfc316c054ee8407ba2a1e1e3ac68a692ba80
|
[
"MIT"
] | 1
|
2021-07-09T02:54:24.000Z
|
2021-07-09T02:54:24.000Z
|
Blockchain.py
|
Sukarnascience/Blockchain_Using_Py
|
b88dfc316c054ee8407ba2a1e1e3ac68a692ba80
|
[
"MIT"
] | null | null | null |
Blockchain.py
|
Sukarnascience/Blockchain_Using_Py
|
b88dfc316c054ee8407ba2a1e1e3ac68a692ba80
|
[
"MIT"
] | null | null | null |
import mysql.connector as sql
from tkinter import *
from tkinter import messagebox
import hashlib
import csv
def block(transaction):
sender = transaction[0]
recever = transaction[1]
ammount = transaction[2]
previous_hash = transaction[3]
new_hash_data = "{},{},{},{}".format(sender,recever,ammount,previous_hash)
assign_new_hash = hashlib.sha256(new_hash_data.encode()).hexdigest()
return assign_new_hash
def login():
username = UN.get()
password = PW.get()
database=sql.connect(host="localhost",passwd="1234",user="root",database="school")
cc=database.cursor()
cc.execute("select * from account where username='{}';".format(username))
data = cc.fetchall()
if(data[0][1]==password):
screen.destroy()
def ledger():
database=sql.connect(host="localhost",passwd="1234",user="root",database="school")
cc=database.cursor()
cc.execute("select * from ledger;")
data = cc.fetchall()
printoutintxt = open("Myledger.csv",'a')
mytyper = csv.writer(printoutintxt)
for i in data:
mytyper.writerow(list(i))
printoutintxt.close()
messagebox.showinfo("Thank You","Print Out has beed dun successfully \nnamed \"Myledger.csv\" in home Page ")
def add_block():
global newhashdata
database=sql.connect(host="localhost",passwd="1234",user="root",database="school")
cc=database.cursor()
cc.execute("select * from ledger;")
data = cc.fetchall()
lastData = data[-1]
PH = lastData[-1]
newhashdata = block([SN.get(),RN.get(),AM.get(),PH])
cc.execute("INSERT INTO ledger values('{}','{}',{},'{}');".format(SN.get(),RN.get(),AM.get(),newhashdata))
database.commit()
database.close()
messagebox.showinfo("Great","Your block has added successfully")
mainscreen = Tk()
mainscreen.title("Block Chain - S.jana")
mainscreen.geometry("480x250")
l1=Label(mainscreen,text="Add your Block in Block Chain",font=("Courier",20))
l1.pack()
l2=Label(mainscreen,text="-----------------------------",font=("Courier",20))
l2.pack()
l3=Label(mainscreen,text="Sender :",font=("Courier",15))
l3.place(x=20,y=80)
l4=Label(mainscreen,text="Recever :",font=("Courier",15))
l4.place(x=20,y=120)
l6=Label(mainscreen,text="Ammount :",font=("Courier",15))
l6.place(x=20,y=160)
b2=Button(mainscreen,text="See Ledger",command=ledger)
b2.place(x=240+30,y=200)
b3=Button(mainscreen,text="Add the Block in chain",command=add_block)
b3.place(x=100+30,y=200)
SN=StringVar()
RN=StringVar()
AM=IntVar()
u1=Entry(mainscreen,textvariable=SN)
u1.place(x=220,y=80)
u2=Entry(mainscreen,textvariable=RN)
u2.place(x=220,y=120)
u4=Entry(mainscreen,textvariable=AM)
u4.place(x=220,y=160)
mainscreen.mainloop()
else:
messagebox.showwarning("Not Alloud","Please check your Details\nBecause the data is not matching with out database")
def signup():
username = NUN.get()
password = NPW.get()
passwordV = VPW.get()
if(password==passwordV):
database=sql.connect(host="localhost",passwd="1234",user="root",database="school")
cc=database.cursor()
cc.execute("INSERT INTO account values ('{}','{}');".format(username,password))
database.commit()
database.close()
messagebox.showinfo("Alloud","Your Account has created successfully :)")
else:
messagebox.showerror("Not Alloud","Please check your password and re-password\nthey both are not same")
screen = Tk()
screen.title("Entry Gate")
screen.geometry("600x240")
l1=Label(screen,text="Login Signup", font=("Courier",20))
l1.pack()
l2=Label(screen,text="|\n|\n|\n|\n|\n|", font=("Courier",20))
l2.pack()
l3=Label(screen,text="UserName :",font=("Courier",15))
l3.place(x=20,y=60)
l4=Label(screen,text="Password :",font=("Courier",15))
l4.place(x=20,y=100)
l6=Label(screen,text="UserName :",font=("Courier",15))
l6.place(x=320,y=60)
l7=Label(screen,text="Password :",font=("Courier",15))
l7.place(x=320,y=100)
l8=Label(screen,text="Re-Password:",font=("Courier",15))
l8.place(x=320,y=140)
b1=Button(screen,text="Login",command=login)
b1.place(x=130,y=200)
b3=Button(screen,text="Signup",command=signup)
b3.place(x=415,y=200)
UN=StringVar()
PW=StringVar()
NUN=StringVar()
NPW=StringVar()
VPW=StringVar()
u1=Entry(screen,textvariable=UN)
u1.place(x=150,y=65)
u2=Entry(screen,textvariable=PW)
u2.place(x=150,y=105)
u4=Entry(screen,textvariable=NUN)
u4.place(x=465,y=65)
u5=Entry(screen,textvariable=NPW)
u5.place(x=465,y=105)
u6=Entry(screen,textvariable=VPW)
u6.place(x=465,y=145)
screen.mainloop()
| 34.103448
| 124
| 0.627503
|
7951fa90900749187c175648eca7f72de772b342
| 48
|
py
|
Python
|
libhoney/version.py
|
DavidCain/libhoney-py
|
3ae150edad4503d6acc4f1c245a02e6761b11ae5
|
[
"Apache-2.0"
] | null | null | null |
libhoney/version.py
|
DavidCain/libhoney-py
|
3ae150edad4503d6acc4f1c245a02e6761b11ae5
|
[
"Apache-2.0"
] | null | null | null |
libhoney/version.py
|
DavidCain/libhoney-py
|
3ae150edad4503d6acc4f1c245a02e6761b11ae5
|
[
"Apache-2.0"
] | null | null | null |
VERSION = "1.11.0" # Update using bump2version
| 24
| 47
| 0.708333
|
7951fb7ddda0d62547c66de1a2a78b7aaddd0965
| 7,179
|
py
|
Python
|
pandas/tests/scalar/interval/test_interval.py
|
zhezherun/pandas
|
36c1104b7ad9761e020f7e8198eb60da4045d169
|
[
"BSD-3-Clause"
] | 1
|
2018-11-11T22:18:13.000Z
|
2018-11-11T22:18:13.000Z
|
pandas/tests/scalar/interval/test_interval.py
|
zhezherun/pandas
|
36c1104b7ad9761e020f7e8198eb60da4045d169
|
[
"BSD-3-Clause"
] | null | null | null |
pandas/tests/scalar/interval/test_interval.py
|
zhezherun/pandas
|
36c1104b7ad9761e020f7e8198eb60da4045d169
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import division
import numpy as np
from pandas import Interval, Timestamp, Timedelta
import pandas.core.common as com
import pytest
@pytest.fixture
def interval():
return Interval(0, 1)
class TestInterval(object):
def test_properties(self, interval):
assert interval.closed == 'right'
assert interval.left == 0
assert interval.right == 1
assert interval.mid == 0.5
def test_repr(self, interval):
assert repr(interval) == "Interval(0, 1, closed='right')"
assert str(interval) == "(0, 1]"
interval_left = Interval(0, 1, closed='left')
assert repr(interval_left) == "Interval(0, 1, closed='left')"
assert str(interval_left) == "[0, 1)"
def test_contains(self, interval):
assert 0.5 in interval
assert 1 in interval
assert 0 not in interval
msg = "__contains__ not defined for two intervals"
with pytest.raises(TypeError, match=msg):
interval in interval
interval_both = Interval(0, 1, closed='both')
assert 0 in interval_both
assert 1 in interval_both
interval_neither = Interval(0, 1, closed='neither')
assert 0 not in interval_neither
assert 0.5 in interval_neither
assert 1 not in interval_neither
def test_equal(self):
assert Interval(0, 1) == Interval(0, 1, closed='right')
assert Interval(0, 1) != Interval(0, 1, closed='left')
assert Interval(0, 1) != 0
def test_comparison(self):
with pytest.raises(TypeError, match='unorderable types'):
Interval(0, 1) < 2
assert Interval(0, 1) < Interval(1, 2)
assert Interval(0, 1) < Interval(0, 2)
assert Interval(0, 1) < Interval(0.5, 1.5)
assert Interval(0, 1) <= Interval(0, 1)
assert Interval(0, 1) > Interval(-1, 2)
assert Interval(0, 1) >= Interval(0, 1)
def test_hash(self, interval):
# should not raise
hash(interval)
@pytest.mark.parametrize('left, right, expected', [
(0, 5, 5),
(-2, 5.5, 7.5),
(10, 10, 0),
(10, np.inf, np.inf),
(-np.inf, -5, np.inf),
(-np.inf, np.inf, np.inf),
(Timedelta('0 days'), Timedelta('5 days'), Timedelta('5 days')),
(Timedelta('10 days'), Timedelta('10 days'), Timedelta('0 days')),
(Timedelta('1H10M'), Timedelta('5H5M'), Timedelta('3H55M')),
(Timedelta('5S'), Timedelta('1H'), Timedelta('59M55S'))])
def test_length(self, left, right, expected):
# GH 18789
iv = Interval(left, right)
result = iv.length
assert result == expected
@pytest.mark.parametrize('left, right, expected', [
('2017-01-01', '2017-01-06', '5 days'),
('2017-01-01', '2017-01-01 12:00:00', '12 hours'),
('2017-01-01 12:00', '2017-01-01 12:00:00', '0 days'),
('2017-01-01 12:01', '2017-01-05 17:31:00', '4 days 5 hours 30 min')])
@pytest.mark.parametrize('tz', (None, 'UTC', 'CET', 'US/Eastern'))
def test_length_timestamp(self, tz, left, right, expected):
# GH 18789
iv = Interval(Timestamp(left, tz=tz), Timestamp(right, tz=tz))
result = iv.length
expected = Timedelta(expected)
assert result == expected
@pytest.mark.parametrize('left, right', [
('a', 'z'),
(('a', 'b'), ('c', 'd')),
(list('AB'), list('ab')),
(Interval(0, 1), Interval(1, 2))])
def test_length_errors(self, left, right):
# GH 18789
iv = Interval(left, right)
msg = 'cannot compute length between .* and .*'
with pytest.raises(TypeError, match=msg):
iv.length
def test_math_add(self, closed):
interval = Interval(0, 1, closed=closed)
expected = Interval(1, 2, closed=closed)
result = interval + 1
assert result == expected
result = 1 + interval
assert result == expected
result = interval
result += 1
assert result == expected
msg = r"unsupported operand type\(s\) for \+"
with pytest.raises(TypeError, match=msg):
interval + interval
with pytest.raises(TypeError, match=msg):
interval + 'foo'
def test_math_sub(self, closed):
interval = Interval(0, 1, closed=closed)
expected = Interval(-1, 0, closed=closed)
result = interval - 1
assert result == expected
result = interval
result -= 1
assert result == expected
msg = r"unsupported operand type\(s\) for -"
with pytest.raises(TypeError, match=msg):
interval - interval
with pytest.raises(TypeError, match=msg):
interval - 'foo'
def test_math_mult(self, closed):
interval = Interval(0, 1, closed=closed)
expected = Interval(0, 2, closed=closed)
result = interval * 2
assert result == expected
result = 2 * interval
assert result == expected
result = interval
result *= 2
assert result == expected
msg = r"unsupported operand type\(s\) for \*"
with pytest.raises(TypeError, match=msg):
interval * interval
msg = r"can\'t multiply sequence by non-int"
with pytest.raises(TypeError, match=msg):
interval * 'foo'
def test_math_div(self, closed):
interval = Interval(0, 1, closed=closed)
expected = Interval(0, 0.5, closed=closed)
result = interval / 2.0
assert result == expected
result = interval
result /= 2.0
assert result == expected
msg = r"unsupported operand type\(s\) for /"
with pytest.raises(TypeError, match=msg):
interval / interval
with pytest.raises(TypeError, match=msg):
interval / 'foo'
def test_math_floordiv(self, closed):
interval = Interval(1, 2, closed=closed)
expected = Interval(0, 1, closed=closed)
result = interval // 2
assert result == expected
result = interval
result //= 2
assert result == expected
msg = r"unsupported operand type\(s\) for //"
with pytest.raises(TypeError, match=msg):
interval // interval
with pytest.raises(TypeError, match=msg):
interval // 'foo'
def test_constructor_errors(self):
msg = "invalid option for 'closed': foo"
with pytest.raises(ValueError, match=msg):
Interval(0, 1, closed='foo')
msg = 'left side of interval must be <= right side'
with pytest.raises(ValueError, match=msg):
Interval(1, 0)
@pytest.mark.parametrize('tz_left, tz_right', [
(None, 'UTC'), ('UTC', None), ('UTC', 'US/Eastern')])
def test_constructor_errors_tz(self, tz_left, tz_right):
# GH 18538
left = Timestamp('2017-01-01', tz=tz_left)
right = Timestamp('2017-01-02', tz=tz_right)
error = TypeError if com._any_none(tz_left, tz_right) else ValueError
with pytest.raises(error):
Interval(left, right)
| 31.765487
| 78
| 0.579329
|
7951fd3bdeab71dc53a8f29e08bf0f4419ded088
| 21,742
|
py
|
Python
|
pytorch/libs/nnet/pooling.py
|
Angelnatao/asv-subtools
|
77d4d940d235b16e9e36a6ee07ce283fa36f4172
|
[
"Apache-2.0"
] | 1
|
2020-11-21T08:03:59.000Z
|
2020-11-21T08:03:59.000Z
|
pytorch/libs/nnet/pooling.py
|
Angelnatao/asv-subtools
|
77d4d940d235b16e9e36a6ee07ce283fa36f4172
|
[
"Apache-2.0"
] | null | null | null |
pytorch/libs/nnet/pooling.py
|
Angelnatao/asv-subtools
|
77d4d940d235b16e9e36a6ee07ce283fa36f4172
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding:utf-8 -*-
# Copyright xmuspeech (Author: Snowdar 2019-05-29 2020-06-10)
import numpy as np
import torch
import torch.nn.functional as F
from libs.support.utils import to_device
from .components import *
## Pooling ✿
class StatisticsPooling(torch.nn.Module):
""" An usual mean [+ stddev] poolling layer"""
def __init__(self, input_dim, stddev=True, unbiased=False, eps=1.0e-10):
super(StatisticsPooling, self).__init__()
self.stddev = stddev
self.input_dim = input_dim
if self.stddev :
self.output_dim = 2 * input_dim
else :
self.output_dim = input_dim
self.eps = eps
# Used for unbiased estimate of stddev
self.unbiased = unbiased
def forward(self, inputs):
"""
@inputs: a 3-dimensional tensor (a batch), including [samples-index, frames-dim-index, frames-index]
"""
assert len(inputs.shape) == 3
assert inputs.shape[1] == self.input_dim
# Get the num of frames
counts = inputs.shape[2]
mean = inputs.sum(dim=2, keepdim=True) / counts
if self.stddev :
if self.unbiased and counts > 1:
counts = counts - 1
# The sqrt (as follows) is deprecated because it results in Nan problem.
# std = torch.unsqueeze(torch.sqrt(torch.sum((inputs - mean)**2, dim=2) / counts), dim=2)
# There is a eps to solve this problem.
# Another method: Var is equal to std in "cat" way, actually. So, just use Var directly.
var = torch.sum((inputs - mean)**2, dim=2, keepdim=True) / counts
std = torch.sqrt(var.clamp(min=self.eps))
return torch.cat((mean, std), dim=1)
else:
return mean
def get_output_dim(self):
return self.output_dim
def extra_repr(self):
return '{input_dim}, {output_dim}, stddev={stddev}, unbiased={unbiased}, eps={eps}'.format(**self.__dict__)
@classmethod
def thop_count(self, m, x, y):
pass
# To do
# x = x[0]
# kernel_ops = torch.zeros(m.weight.size()[2:]).numel() # Kw x Kh
# bias_ops = 1 if m.bias is not None else 0
# # N x Cout x H x W x (Cin x Kw x Kh + bias)
# total_ops = y.nelement() * (m.input_dim * kernel_ops + bias_ops)
# m.total_ops += torch.DoubleTensor([int(total_ops)])
class FreeStatisticsPooling(torch.nn.Module):
""" An usual mean [+ stddev] poolling layer"""
def __init__(self, stddev=True, unbiased=False, eps=1.0e-10):
super(FreeStatisticsPooling, self).__init__()
self.stddev = stddev
self.eps = eps
# Used for unbiased estimate of stddev
self.unbiased = unbiased
def forward(self, inputs):
"""
@inputs: a 3-dimensional tensor (a batch), including [samples-index, frames-dim-index, frames-index]
"""
inputs = inputs.reshape(inputs.shape[0], -1, inputs.shape[len(inputs.shape)-1])
# Get the num of frames
counts = inputs.shape[2]
mean = inputs.sum(dim=2, keepdim=True) / counts
if self.stddev :
if self.unbiased and counts > 1:
counts = counts - 1
# The sqrt (as follows) is deprecated because it results in Nan problem.
# std = torch.unsqueeze(torch.sqrt(torch.sum((inputs - mean)**2, dim=2) / counts), dim=2)
# There is a eps to solve this problem.
# Another method: Var is equal to std in "cat" way, actually. So, just use Var directly.
var = torch.sum((inputs - mean)**2, dim=2, keepdim=True) / counts
std = torch.sqrt(var.clamp(min=self.eps))
return torch.cat((mean, std), dim=1)
else:
return mean
class LDEPooling(torch.nn.Module):
"""A novel learnable dictionary encoding layer.
Reference: Weicheng Cai, etc., "A NOVEL LEARNABLE DICTIONARY ENCODING LAYER FOR END-TO-END
LANGUAGE IDENTIFICATION", icassp, 2018
"""
def __init__(self, input_dim, c_num=64, eps=1.0e-10):
super(LDEPooling, self).__init__()
self.input_dim = input_dim
self.output_dim = input_dim * c_num
self.eps = eps
self.mu = torch.nn.Parameter(torch.randn(input_dim, c_num))
self.s = torch.nn.Parameter(torch.ones(c_num))
self.softmax_for_w = torch.nn.Softmax(dim=3)
def forward(self, inputs):
"""
@inputs: a 3-dimensional tensor (a batch), including [samples-index, frames-dim-index, frames-index]
"""
assert len(inputs.shape) == 3
assert inputs.shape[1] == self.input_dim
r = inputs.transpose(1,2).unsqueeze(3) - self.mu
# Make sure beta=self.s**2+self.eps > 0
w = self.softmax_for_w(- (self.s**2 + self.eps) * torch.sum(r**2, dim=2, keepdim=True))
e = torch.mean(w * r, dim=1)
return e.reshape(-1, self.output_dim, 1)
def get_output_dim(self):
return self.output_dim
# Attention-based
class AttentionAlphaComponent(torch.nn.Module):
"""Compute the alpha with attention module.
alpha = softmax(v'·f(w·x + b) + k) or softmax(v'·x + k)
where f is relu here and bias could be lost.
Support:
1. Single or Multi-head attention
2. One affine or two affine
3. Share weight (last affine = vector) or un-shared weight (last affine = matrix)
4. Self-attention or time context attention (supported by context parameter of TdnnAffine)
5. Different temperatures for different heads.
"""
def __init__(self, input_dim, num_head=1, split_input=True, share=True, affine_layers=2,
hidden_size=64, context=[0], bias=True, temperature=False, fixed=True):
super(AttentionAlphaComponent, self).__init__()
assert num_head >= 1
# Multi-head case.
if num_head > 1:
if split_input:
# Make sure fatures/planes with input_dim dims could be splited to num_head parts.
assert input_dim % num_head == 0
if temperature:
if fixed:
t_list = []
for i in range(num_head):
t_list.append([[max(1, (i // 2) * 5)]])
# shape [1, num_head, 1, 1]
self.register_buffer('t', torch.tensor([t_list]))
else:
# Different heads have different temperature.
# Use 1 + self.t**2 in forward to make sure temperature >= 1.
self.t = torch.nn.Parameter(torch.zeros(1, num_head, 1, 1))
self.input_dim = input_dim
self.num_head = num_head
self.split_input = split_input
self.share = share
self.temperature = temperature
self.fixed = fixed
if share:
# weight: [input_dim, 1] or [input_dim, hidden_size] -> [hidden_size, 1]
final_dim = 1
else:
# weight: [input_dim, input_dim] or [input_dim, hidden_size] -> [hidden_size, input_dim]
final_dim = input_dim
first_groups = 1
last_groups = 1
if affine_layers == 1:
last_affine_input_dim = input_dim
# (x, 1) for global case and (x, h) for split case.
if num_head > 1 and split_input:
last_groups = num_head
self.relu_affine = False
elif affine_layers == 2:
last_affine_input_dim = hidden_size * num_head
if num_head > 1:
# (1, h) for global case and (h, h) for split case.
last_groups = num_head
if split_input:
first_groups = num_head
# Add a relu-affine with affine_layers=2.
self.relu_affine = True
self.first_affine = TdnnAffine(input_dim, last_affine_input_dim, context=context, bias=bias, groups=first_groups)
self.relu = torch.nn.ReLU(inplace=True)
else:
raise ValueError("Expected 1 or 2 affine layers, but got {}.",format(affine_layers))
self.last_affine = TdnnAffine(last_affine_input_dim, final_dim * num_head, context=context, bias=bias, groups=last_groups)
# Dim=2 means to apply softmax in different frames-index (batch is a 3-dim tensor in this case).
self.softmax = torch.nn.Softmax(dim=2)
def forward(self, inputs):
"""
@inputs: a 3-dimensional tensor (a batch), including [samples-index, frames-dim-index, frames-index]
"""
assert len(inputs.shape) == 3
assert inputs.shape[1] == self.input_dim
if self.temperature:
batch_size = inputs.shape[0]
chunk_size = inputs.shape[2]
x = inputs
if self.relu_affine:
x = self.relu(self.first_affine(x))
if self.num_head > 1 and self.temperature:
if self.fixed:
t = self.t
else:
t = 1 + self.t**2
x = self.last_affine(x).reshape(batch_size, self.num_head, -1, chunk_size) / t
return self.softmax(x.reshape(batch_size, -1, chunk_size))
else:
return self.softmax(self.last_affine(x))
class AttentiveStatisticsPooling(torch.nn.Module):
""" An attentive statistics pooling.
Reference: Okabe, Koji, Takafumi Koshinaka, and Koichi Shinoda. 2018. "Attentive Statistics Pooling
for Deep Speaker Embedding." ArXiv Preprint ArXiv:1803.10963.
"""
def __init__(self, input_dim, affine_layers=2, hidden_size=64, context=[0], stddev=True, stddev_attention=True, eps=1.0e-10):
super(AttentiveStatisticsPooling, self).__init__()
self.stddev = stddev
self.input_dim = input_dim
if self.stddev :
self.output_dim = 2 * input_dim
else :
self.output_dim = input_dim
self.eps = eps
self.stddev_attention = stddev_attention
self.attention = AttentionAlphaComponent(input_dim, num_head=1, share=True, affine_layers=affine_layers,
hidden_size=hidden_size, context=context)
def forward(self, inputs):
"""
@inputs: a 3-dimensional tensor (a batch), including [samples-index, frames-dim-index, frames-index]
"""
assert len(inputs.shape) == 3
assert inputs.shape[1] == self.input_dim
alpha = self.attention(inputs)
# Weight avarage
mean = torch.sum(alpha * inputs, dim=2, keepdim=True)
if self.stddev :
if self.stddev_attention:
var = torch.sum(alpha * inputs**2, dim=2, keepdim=True) - mean**2
std = torch.sqrt(var.clamp(min=self.eps))
else:
var = torch.mean((inputs - mean)**2, dim=2, keepdim=True)
std = torch.sqrt(var.clamp(min=self.eps))
return torch.cat((mean, std), dim=1)
else :
return mean
def get_output_dim(self):
return self.output_dim
class MultiHeadAttentionPooling(torch.nn.Module):
"""Implement multi-head attention pooling based on AttentionAlphaComponent.
Reference: Safari, Pooyan, and Javier Hernando. 2019. “Self Multi-Head Attention for Speaker
Recognition.” ArXiv Preprint ArXiv:1906.09890.
Note, in this paper, affine_layers is default to 1, and final_dim is 1 which means the weights are shared.
"""
def __init__(self, input_dim, stddev=True, stddev_attention=True, num_head=4, share=True, affine_layers=1, **options):
super(MultiHeadAttentionPooling, self).__init__()
self.input_dim = input_dim
self.stddev = stddev
self.stddev_attention = stddev_attention
self.num_head = num_head
if self.stddev :
self.output_dim = 2 * input_dim
else :
self.output_dim = input_dim
if "split_input" in options.keys():
if not options["split_input"]:
raise ValueError("split_input==False is not valid for this MultiHeadAttentionPooling.")
options.pop("split_input")
# In this pooling, the special point is that inputs will be splited.
self.attention = AttentionAlphaComponent(input_dim, num_head=num_head, split_input=True, share=share,
affine_layers=affine_layers, bias=False, **options)
def forward(self, inputs):
"""
@inputs: a 3-dimensional tensor (a batch), including [samples-index, frames-dim-index, frames-index]
"""
assert len(inputs.shape) == 3
assert inputs.shape[1] == self.input_dim
batch_size = inputs.shape[0]
chunk_size = inputs.shape[2] # a.k.a total frames
# alpha: [batch, weight, frames]
# When using the conv1d to implement the multi-multiple of multi-head, we can get
# the weight distribution of multi-head: [h11, h12, h13, h21, h22, h23, ..., hn1, hn2, ...]
# So, just reshape it to split different heads.
alpha = self.attention(inputs)
# In sharing weight case, the shape of alpha is [batch, head, 1, frames] and [batch, head, splited-features, frames]
# for another case.
# inputs: [batch, head, splited-features, frames]
after_mul = alpha.reshape(batch_size, self.num_head, -1, chunk_size) * \
inputs.reshape(batch_size, self.num_head, -1, chunk_size)
# After multi-multipling alpha and inputs for multi-head case, the mean could be got by reshaping back.
mean = torch.sum(after_mul.reshape(batch_size, -1, chunk_size), dim=2, keepdim=True)
if self.stddev :
if self.stddev_attention:
after_mul_2 = alpha.reshape(batch_size, self.num_head, -1, chunk_size) * \
inputs.reshape(batch_size, self.num_head, -1, chunk_size)**2
var = torch.sum(after_mul_2.reshape(batch_size, -1, chunk_size), dim=2, keepdim=True) - mean**2
std = torch.sqrt(var.clamp(min=1.0e-10))
else:
var = torch.mean((inputs - mean)**2, dim=2, keepdim=True)
std = torch.sqrt(var.clamp(min=1.0e-10))
return torch.cat((mean, std), dim=1)
else :
return mean
def get_output_dim(self):
return self.output_dim
class GlobalMultiHeadAttentionPooling(torch.nn.Module):
"""Implement global multi-head attention pooling based on AttentionAlphaComponent.
Reference: Zhiming Wang, Kaisheng Yao, Xiaolong Li, Shuo Fang. "MULTI-RESOLUTION MULTI-HEAD
ATTENTION IN DEEP SPEAKER EMBEDDING." ICASSP, 2020.
It is not equivalent to multi-head attention pooling even when
input_dim of global multi-head = 1/num_head * input_dim of multi-head.
"""
def __init__(self, input_dim, stddev=True, stddev_attention=True, num_head=4, share=True, affine_layers=2, **options):
super(GlobalMultiHeadAttentionPooling, self).__init__()
self.input_dim = input_dim
self.num_head = num_head
self.stddev = stddev
self.stddev_attention = stddev_attention
if self.stddev :
self.output_dim = 2 * input_dim
else :
self.output_dim = input_dim
if "split_input" in options.keys():
if options["split_input"]:
raise ValueError("split_input==True is not valid for GlobalMultiHeadAttentionPooling.")
options.pop("split_input")
if "temperature" in options.keys():
if options["temperature"]:
raise ValueError("temperature==True is not valid for GlobalMultiHeadAttentionPooling.")
options.pop("temperature")
# In this pooling, the special point is that all (global) features of inputs will be used.
self.attention = AttentionAlphaComponent(input_dim, num_head=num_head, split_input=False, share=share,
temperature=False, affine_layers=affine_layers, bias=True, **options)
def forward(self, inputs):
"""
@inputs: a 3-dimensional tensor (a batch), including [samples-index, frames-dim-index, frames-index]
"""
assert len(inputs.shape) == 3
assert inputs.shape[1] == self.input_dim
batch_size = inputs.shape[0]
chunk_size = inputs.shape[2] # a.k.a total frames
# alpha: [batch, weight, frames]
# When using the conv1d to implement the multi-multiple of multi-head, we can get
# the weight distribution of multi-head: [h11, h12, h13, h21, h22, h23, ..., hn1, hn2, ...]
# So, just reshape it to split different heads.
alpha = self.attention(inputs)
# In sharing weight case, the shape of alpha is [batch, head, 1, frames] and [batch, head, all-features, frames]
# for another case.
# inputs: [batch, 1, all-features, frames]
after_mul = alpha.reshape(batch_size, self.num_head, -1, chunk_size) * \
inputs.reshape(batch_size, 1, -1, chunk_size)
# After multi-multipling alpha and inputs for multi-head case, the mean could be got by reshaping back.
mean = torch.sum(after_mul.reshape(batch_size, -1, chunk_size), dim=2, keepdim=True)
if self.stddev :
if self.stddev_attention:
after_mul_2 = alpha.reshape(batch_size, self.num_head, -1, chunk_size) * \
inputs.reshape(batch_size, 1, -1, chunk_size)**2
var = torch.sum(after_mul_2.reshape(batch_size, -1, chunk_size), dim=2, keepdim=True) - mean**2
std = torch.sqrt(var.clamp(min=1.0e-10))
else:
var = torch.mean((inputs - mean)**2, dim=2, keepdim=True)
std = torch.sqrt(var.clamp(min=1.0e-10))
return torch.cat((mean, std), dim=1)
else :
return mean
def get_output_dim(self):
return self.output_dim * self.num_head
class MultiResolutionMultiHeadAttentionPooling(torch.nn.Module):
"""Implement multi-resolution global multi-head attention pooling based on AttentionAlphaComponent.
Reference: Zhiming Wang, Kaisheng Yao, Xiaolong Li, Shuo Fang. "MULTI-RESOLUTION MULTI-HEAD
ATTENTION IN DEEP SPEAKER EMBEDDING." ICASSP, 2020.
"""
def __init__(self, input_dim, stddev=True, stddev_attention=True, num_head=4, share=True, affine_layers=2, **options):
super(MultiResolutionMultiHeadAttentionPooling, self).__init__()
self.input_dim = input_dim
self.num_head = num_head
self.stddev = stddev
self.stddev_attention = stddev_attention
if self.stddev :
self.output_dim = 2 * input_dim
else :
self.output_dim = input_dim
if "split_input" in options.keys():
if options["split_input"]:
raise ValueError("split_input==True is not valid for MultiResolutionMultiHeadAttentionPooling.")
options.pop("split_input")
if "temperature" in options.keys():
if not options["temperature"]:
raise ValueError("temperature==False is not valid for MultiResolutionMultiHeadAttentionPooling.")
options.pop("temperature")
# In this pooling, the special point is that all (global) features of inputs will be used and
# the temperature will be added.
self.attention = AttentionAlphaComponent(input_dim, num_head=num_head, split_input=False, temperature=True,
share=share, affine_layers=affine_layers, bias=True, **options)
def forward(self, inputs):
"""
@inputs: a 3-dimensional tensor (a batch), including [samples-index, frames-dim-index, frames-index]
"""
assert len(inputs.shape) == 3
assert inputs.shape[1] == self.input_dim
batch_size = inputs.shape[0]
chunk_size = inputs.shape[2] # a.k.a total frames
# alpha: [batch, weight, frames]
# When using the conv1d to implement the multi-multiple of multi-head, we can get
# the weight distribution of multi-head: [h11, h12, h13, h21, h22, h23, ..., hn1, hn2, ...]
# So, just reshape it to split different heads.
alpha = self.attention(inputs)
# In sharing weight case, the shape of alpha is [batch, head, 1, frames] and [batch, head, all-features, frames]
# for another case.
# inputs: [batch, 1, all-features, frames]
after_mul = alpha.reshape(batch_size, self.num_head, -1, chunk_size) * \
inputs.reshape(batch_size, 1, -1, chunk_size)
# After multi-multipling alpha and inputs for multi-head case, the mean could be got by reshaping back.
mean = torch.sum(after_mul.reshape(batch_size, -1, chunk_size), dim=2, keepdim=True)
if self.stddev :
if self.stddev_attention:
after_mul_2 = alpha.reshape(batch_size, self.num_head, -1, chunk_size) * \
inputs.reshape(batch_size, 1, -1, chunk_size)**2
var = torch.sum(after_mul_2.reshape(batch_size, -1, chunk_size), dim=2, keepdim=True) - mean**2
std = torch.sqrt(var.clamp(min=1.0e-10))
else:
var = torch.mean((inputs - mean)**2, dim=2, keepdim=True)
std = torch.sqrt(var.clamp(min=1.0e-10))
return torch.cat((mean, std), dim=1)
else :
return mean
def get_output_dim(self):
return self.output_dim * self.num_head
| 42.054159
| 130
| 0.607166
|
7951fe6aa27fc2693f12d9fa775739e5a722e6ce
| 1,676
|
py
|
Python
|
lib/googlecloudsdk/compute/subcommands/instances/get_serial_port_output.py
|
bopopescu/google-cloud-sdk
|
b34e6a18f1e89673508166acce816111c3421e4b
|
[
"Apache-2.0"
] | null | null | null |
lib/googlecloudsdk/compute/subcommands/instances/get_serial_port_output.py
|
bopopescu/google-cloud-sdk
|
b34e6a18f1e89673508166acce816111c3421e4b
|
[
"Apache-2.0"
] | null | null | null |
lib/googlecloudsdk/compute/subcommands/instances/get_serial_port_output.py
|
bopopescu/google-cloud-sdk
|
b34e6a18f1e89673508166acce816111c3421e4b
|
[
"Apache-2.0"
] | 1
|
2020-07-24T20:04:47.000Z
|
2020-07-24T20:04:47.000Z
|
# Copyright 2014 Google Inc. All Rights Reserved.
"""Command for reading the serial port output of an instance."""
from googlecloudapis.compute.v1 import compute_v1_messages as messages
from googlecloudsdk.calliope import base
from googlecloudsdk.compute.lib import request_helper
from googlecloudsdk.core import log
class GetSerialPortOutput(base.Command):
"""Read output from a virtual machine instance's serial port."""
@staticmethod
def Args(parser):
parser.add_argument(
'--zone',
help='Specifies the zone of the instance.',
required=True)
parser.add_argument(
'name',
help='The name of the instance.')
def Run(self, args):
request = (self.context['compute'].instances,
'GetSerialPortOutput',
messages.ComputeInstancesGetSerialPortOutputRequest(
instance=args.name,
project=self.context['project'],
zone=args.zone))
objects = list(request_helper.MakeRequests(
requests=[request],
http=self.context['http'],
batch_url=self.context['batch-url'],
custom_get_requests=None))
return objects[0].contents
def Display(self, _, response):
log.out.write(response)
GetSerialPortOutput.detailed_help = {
'brief': "Read output from a virtual machine instance's serial port",
'DESCRIPTION': """\
{command} is used to get the output from a Google Compute
Engine virtual machine's serial port. The serial port output
from the virtual machine will be printed to standard out. This
information can be useful for diagnostic purposes.
""",
}
| 33.52
| 73
| 0.672434
|
79520327b096fad38e3a574491bb7615fba5b854
| 2,319
|
py
|
Python
|
databricks/koalas/tests/test_series_conversion.py
|
harupy/koalas
|
db2b00a0d57a16d7ccac101607195d2face73572
|
[
"Apache-2.0"
] | 2
|
2020-06-24T03:19:59.000Z
|
2020-06-24T03:20:02.000Z
|
databricks/koalas/tests/test_series_conversion.py
|
harupy/koalas
|
db2b00a0d57a16d7ccac101607195d2face73572
|
[
"Apache-2.0"
] | null | null | null |
databricks/koalas/tests/test_series_conversion.py
|
harupy/koalas
|
db2b00a0d57a16d7ccac101607195d2face73572
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright (C) 2019 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pandas as pd
from databricks import koalas as ks
from databricks.koalas.testing.utils import ReusedSQLTestCase, SQLTestUtils
class SeriesConversionTest(ReusedSQLTestCase, SQLTestUtils):
@property
def pser(self):
return pd.Series([1, 2, 3, 4, 5, 6, 7], name='x')
@property
def kser(self):
return ks.from_pandas(self.pser)
def test_to_clipboard(self):
pser = self.pser
kser = self.kser
self.assert_eq(kser.to_clipboard(), pser.to_clipboard())
self.assert_eq(kser.to_clipboard(excel=False),
pser.to_clipboard(excel=False))
self.assert_eq(kser.to_clipboard(sep=',', index=False),
pser.to_clipboard(sep=',', index=False))
def test_to_latex(self):
pser = self.pser
kser = self.kser
self.assert_eq(kser.to_latex(), pser.to_latex())
self.assert_eq(kser.to_latex(col_space=2), pser.to_latex(col_space=2))
self.assert_eq(kser.to_latex(header=True), pser.to_latex(header=True))
self.assert_eq(kser.to_latex(index=False), pser.to_latex(index=False))
self.assert_eq(kser.to_latex(na_rep='-'), pser.to_latex(na_rep='-'))
self.assert_eq(kser.to_latex(float_format='%.1f'), pser.to_latex(float_format='%.1f'))
self.assert_eq(kser.to_latex(sparsify=False), pser.to_latex(sparsify=False))
self.assert_eq(kser.to_latex(index_names=False), pser.to_latex(index_names=False))
self.assert_eq(kser.to_latex(bold_rows=True), pser.to_latex(bold_rows=True))
self.assert_eq(kser.to_latex(encoding='ascii'), pser.to_latex(encoding='ascii'))
self.assert_eq(kser.to_latex(decimal=','), pser.to_latex(decimal=','))
| 39.305085
| 94
| 0.692109
|
795203c86e5cfffbccbb41bacddb3be505884db0
| 104,331
|
py
|
Python
|
azure/cosmos/cosmos_client.py
|
nuno-andre/azure-cosmos-python
|
ea41365b72282823260242ef5dcb3651c4c72ce3
|
[
"MIT"
] | null | null | null |
azure/cosmos/cosmos_client.py
|
nuno-andre/azure-cosmos-python
|
ea41365b72282823260242ef5dcb3651c4c72ce3
|
[
"MIT"
] | null | null | null |
azure/cosmos/cosmos_client.py
|
nuno-andre/azure-cosmos-python
|
ea41365b72282823260242ef5dcb3651c4c72ce3
|
[
"MIT"
] | null | null | null |
#The MIT License (MIT)
#Copyright (c) 2014 Microsoft Corporation
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
"""Document client class for the Azure Cosmos database service.
"""
import requests
import six
import azure.cosmos.base as base
import azure.cosmos.documents as documents
import azure.cosmos.constants as constants
import azure.cosmos.http_constants as http_constants
import azure.cosmos.query_iterable as query_iterable
import azure.cosmos.runtime_constants as runtime_constants
import azure.cosmos.request_object as request_object
import azure.cosmos.synchronized_request as synchronized_request
import azure.cosmos.global_endpoint_manager as global_endpoint_manager
import azure.cosmos.routing.routing_map_provider as routing_map_provider
import azure.cosmos.session as session
import azure.cosmos.utils as utils
class CosmosClient(object):
"""Represents a document client.
Provides a client-side logical representation of the Azure Cosmos
service. This client is used to configure and execute requests against the
service.
The service client encapsulates the endpoint and credentials used to access
the Azure Cosmos service.
"""
class _QueryCompatibilityMode:
Default = 0
Query = 1
SqlQuery = 2
# default number precisions
_DefaultNumberHashPrecision = 3
_DefaultNumberRangePrecision = -1
# default string precision
_DefaultStringHashPrecision = 3
_DefaultStringRangePrecision = -1
def __init__(self,
url_connection,
auth,
connection_policy=None,
consistency_level=documents.ConsistencyLevel.Session):
"""
:param str url_connection:
The URL for connecting to the DB server.
:param dict auth:
Contains 'masterKey' or 'resourceTokens', where
auth['masterKey'] is the default authorization key to use to
create the client, and auth['resourceTokens'] is the alternative
authorization key.
:param documents.ConnectionPolicy connection_policy:
The connection policy for the client.
:param documents.ConsistencyLevel consistency_level:
The default consistency policy for client operations.
"""
self.url_connection = url_connection
self.master_key = None
self.resource_tokens = None
if auth != None:
self.master_key = auth.get('masterKey')
self.resource_tokens = auth.get('resourceTokens')
if auth.get('permissionFeed'):
self.resource_tokens = {}
for permission_feed in auth['permissionFeed']:
resource_parts = permission_feed['resource'].split('/')
id = resource_parts[-1]
self.resource_tokens[id] = permission_feed['_token']
self.connection_policy = (connection_policy or
documents.ConnectionPolicy())
self.partition_resolvers = {}
self.partition_key_definition_cache = {}
self.default_headers = {
http_constants.HttpHeaders.CacheControl: 'no-cache',
http_constants.HttpHeaders.Version:
http_constants.Versions.CurrentVersion,
http_constants.HttpHeaders.UserAgent:
utils._get_user_agent(),
# For single partition query with aggregate functions we would try to accumulate the results on the SDK.
# We need to set continuation as not expected.
http_constants.HttpHeaders.IsContinuationExpected: False
}
if consistency_level != None:
self.default_headers[
http_constants.HttpHeaders.ConsistencyLevel] = consistency_level
# Keeps the latest response headers from server.
self.last_response_headers = None
if consistency_level == documents.ConsistencyLevel.Session:
'''create a session - this is maintained only if the default consistency level
on the client is set to session, or if the user explicitly sets it as a property
via setter'''
self.session = session.Session(self.url_connection)
else:
self.session = None
self._useMultipleWriteLocations = False
self._global_endpoint_manager = global_endpoint_manager._GlobalEndpointManager(self)
# creating a requests session used for connection pooling and re-used by all requests
self._requests_session = requests.Session()
if self.connection_policy.ProxyConfiguration and self.connection_policy.ProxyConfiguration.Host:
host = connection_policy.ProxyConfiguration.Host
url = six.moves.urllib.parse.urlparse(host)
proxy = host if url.port else host + ":" + str(connection_policy.ProxyConfiguration.Port)
proxyDict = {url.scheme : proxy}
self._requests_session.proxies.update(proxyDict)
# Query compatibility mode.
# Allows to specify compatibility mode used by client when making query requests. Should be removed when
# application/sql is no longer supported.
self._query_compatibility_mode = CosmosClient._QueryCompatibilityMode.Default
# Routing map provider
self._routing_map_provider = routing_map_provider._SmartRoutingMapProvider(self)
database_account = self._global_endpoint_manager._GetDatabaseAccount()
self._global_endpoint_manager.force_refresh(database_account)
@property
def Session(self):
""" Gets the session object from the client """
return self.session
@Session.setter
def Session(self, session):
""" Sets a session object on the document client
This will override the existing session
"""
self.session = session
@property
def WriteEndpoint(self):
"""Gets the curent write endpoint for a geo-replicated database account.
"""
return self._global_endpoint_manager.get_write_endpoint()
@property
def ReadEndpoint(self):
"""Gets the curent read endpoint for a geo-replicated database account.
"""
return self._global_endpoint_manager.get_read_endpoint()
def RegisterPartitionResolver(self, database_link, partition_resolver):
"""Registers the partition resolver associated with the database link
:param str database_link:
Database Self Link or ID based link.
:param object partition_resolver:
An instance of PartitionResolver.
"""
if not database_link:
raise ValueError("database_link is None or empty.")
if partition_resolver is None:
raise ValueError("partition_resolver is None.")
self.partition_resolvers = {database_link.strip('/'): partition_resolver}
def GetPartitionResolver(self, database_link):
"""Gets the partition resolver associated with the database link
:param str database_link:
Database self link or ID based link.
:return:
An instance of PartitionResolver.
:rtype: object
"""
if not database_link:
raise ValueError("database_link is None or empty.")
return self.partition_resolvers.get(database_link.strip('/'))
def CreateDatabase(self, database, options=None):
"""Creates a database.
:param dict database:
The Azure Cosmos database to create.
:param dict options:
The request options for the request.
:return:
The Database that was created.
:rtype: dict
"""
if options is None:
options = {}
CosmosClient.__ValidateResource(database)
path = '/dbs'
return self.Create(database, path, 'dbs', None, None, options)
def ReadDatabase(self, database_link, options=None):
"""Reads a database.
:param str database_link:
The link to the database.
:param dict options:
The request options for the request.
:return:
The Database that was read.
:rtype: dict
"""
if options is None:
options = {}
path = base.GetPathFromLink(database_link)
database_id = base.GetResourceIdOrFullNameFromLink(database_link)
return self.Read(path, 'dbs', database_id, None, options)
def ReadDatabases(self, options=None):
"""Reads all databases.
:param dict options:
The request options for the request.
:return:
Query Iterable of Databases.
:rtype:
query_iterable.QueryIterable
"""
if options is None:
options = {}
return self.QueryDatabases(None, options)
def QueryDatabases(self, query, options=None):
"""Queries databases.
:param (str or dict) query:
:param dict options:
The request options for the request.
:return: Query Iterable of Databases.
:rtype:
query_iterable.QueryIterable
"""
if options is None:
options = {}
def fetch_fn(options):
return self.__QueryFeed('/dbs',
'dbs',
'',
lambda r: r['Databases'],
lambda _, b: b,
query,
options), self.last_response_headers
return query_iterable.QueryIterable(self, query, options, fetch_fn)
def ReadContainers(self, database_link, options=None):
"""Reads all collections in a database.
:param str database_link:
The link to the database.
:param dict options:
The request options for the request.
:return: Query Iterable of Collections.
:rtype:
query_iterable.QueryIterable
"""
if options is None:
options = {}
return self.QueryContainers(database_link, None, options)
def QueryContainers(self, database_link, query, options=None):
"""Queries collections in a database.
:param str database_link:
The link to the database.
:param (str or dict) query:
:param dict options:
The request options for the request.
:return: Query Iterable of Collections.
:rtype:
query_iterable.QueryIterable
"""
if options is None:
options = {}
path = base.GetPathFromLink(database_link, 'colls')
database_id = base.GetResourceIdOrFullNameFromLink(database_link)
def fetch_fn(options):
return self.__QueryFeed(path,
'colls',
database_id,
lambda r: r['DocumentCollections'],
lambda _, body: body,
query,
options), self.last_response_headers
return query_iterable.QueryIterable(self, query, options, fetch_fn)
def CreateContainer(self, database_link, collection, options=None):
"""Creates a collection in a database.
:param str database_link:
The link to the database.
:param dict collection:
The Azure Cosmos collection to create.
:param dict options:
The request options for the request.
:return: The Collection that was created.
:rtype: dict
"""
if options is None:
options = {}
CosmosClient.__ValidateResource(collection)
path = base.GetPathFromLink(database_link, 'colls')
database_id = base.GetResourceIdOrFullNameFromLink(database_link)
return self.Create(collection,
path,
'colls',
database_id,
None,
options)
def ReplaceContainer(self, collection_link, collection, options=None):
"""Replaces a collection and return it.
:param str collection_link:
The link to the collection entity.
:param dict collection:
The collection to be used.
:param dict options:
The request options for the request.
:return:
The new Collection.
:rtype:
dict
"""
if options is None:
options = {}
CosmosClient.__ValidateResource(collection)
path = base.GetPathFromLink(collection_link)
collection_id = base.GetResourceIdOrFullNameFromLink(collection_link)
return self.Replace(collection,
path,
'colls',
collection_id,
None,
options)
def ReadContainer(self, collection_link, options=None):
"""Reads a collection.
:param str collection_link:
The link to the document collection.
:param dict options:
The request options for the request.
:return:
The read Collection.
:rtype:
dict
"""
if options is None:
options = {}
path = base.GetPathFromLink(collection_link)
collection_id = base.GetResourceIdOrFullNameFromLink(collection_link)
return self.Read(path,
'colls',
collection_id,
None,
options)
def CreateUser(self, database_link, user, options=None):
"""Creates a user.
:param str database_link:
The link to the database.
:param dict user:
The Azure Cosmos user to create.
:param dict options:
The request options for the request.
:return:
The created User.
:rtype:
dict
"""
if options is None:
options = {}
database_id, path = self._GetDatabaseIdWithPathForUser(database_link, user)
return self.Create(user,
path,
'users',
database_id,
None,
options)
def UpsertUser(self, database_link, user, options=None):
"""Upserts a user.
:param str database_link:
The link to the database.
:param dict user:
The Azure Cosmos user to upsert.
:param dict options:
The request options for the request.
:return:
The upserted User.
:rtype: dict
"""
if options is None:
options = {}
database_id, path = self._GetDatabaseIdWithPathForUser(database_link, user)
return self.Upsert(user,
path,
'users',
database_id,
None,
options)
def _GetDatabaseIdWithPathForUser(self, database_link, user):
CosmosClient.__ValidateResource(user)
path = base.GetPathFromLink(database_link, 'users')
database_id = base.GetResourceIdOrFullNameFromLink(database_link)
return database_id, path
def ReadUser(self, user_link, options=None):
"""Reads a user.
:param str user_link:
The link to the user entity.
:param dict options:
The request options for the request.
:return:
The read User.
:rtype:
dict
"""
if options is None:
options = {}
path = base.GetPathFromLink(user_link)
user_id = base.GetResourceIdOrFullNameFromLink(user_link)
return self.Read(path, 'users', user_id, None, options)
def ReadUsers(self, database_link, options=None):
"""Reads all users in a database.
:params str database_link:
The link to the database.
:params dict options:
The request options for the request.
:return:
Query iterable of Users.
:rtype:
query_iterable.QueryIterable
"""
if options is None:
options = {}
return self.QueryUsers(database_link, None, options)
def QueryUsers(self, database_link, query, options=None):
"""Queries users in a database.
:param str database_link:
The link to the database.
:param (str or dict) query:
:param dict options:
The request options for the request.
:return:
Query Iterable of Users.
:rtype:
query_iterable.QueryIterable
"""
if options is None:
options = {}
path = base.GetPathFromLink(database_link, 'users')
database_id = base.GetResourceIdOrFullNameFromLink(database_link)
def fetch_fn(options):
return self.__QueryFeed(path,
'users',
database_id,
lambda r: r['Users'],
lambda _, b: b,
query,
options), self.last_response_headers
return query_iterable.QueryIterable(self, query, options, fetch_fn)
def DeleteDatabase(self, database_link, options=None):
"""Deletes a database.
:param str database_link:
The link to the database.
:param dict options:
The request options for the request.
:return:
The deleted Database.
:rtype:
dict
"""
if options is None:
options = {}
path = base.GetPathFromLink(database_link)
database_id = base.GetResourceIdOrFullNameFromLink(database_link)
return self.DeleteResource(path,
'dbs',
database_id,
None,
options)
def CreatePermission(self, user_link, permission, options=None):
"""Creates a permission for a user.
:param str user_link:
The link to the user entity.
:param dict permission:
The Azure Cosmos user permission to create.
:param dict options:
The request options for the request.
:return:
The created Permission.
:rtype:
dict
"""
if options is None:
options = {}
path, user_id = self._GetUserIdWithPathForPermission(permission, user_link)
return self.Create(permission,
path,
'permissions',
user_id,
None,
options)
def UpsertPermission(self, user_link, permission, options=None):
"""Upserts a permission for a user.
:param str user_link:
The link to the user entity.
:param dict permission:
The Azure Cosmos user permission to upsert.
:param dict options:
The request options for the request.
:return:
The upserted permission.
:rtype:
dict
"""
if options is None:
options = {}
path, user_id = self._GetUserIdWithPathForPermission(permission, user_link)
return self.Upsert(permission,
path,
'permissions',
user_id,
None,
options)
def _GetUserIdWithPathForPermission(self, permission, user_link):
CosmosClient.__ValidateResource(permission)
path = base.GetPathFromLink(user_link, 'permissions')
user_id = base.GetResourceIdOrFullNameFromLink(user_link)
return path, user_id
def ReadPermission(self, permission_link, options=None):
"""Reads a permission.
:param str permission_link:
The link to the permission.
:param dict options:
The request options for the request.
:return:
The read permission.
:rtype:
dict
"""
if options is None:
options = {}
path = base.GetPathFromLink(permission_link)
permission_id = base.GetResourceIdOrFullNameFromLink(permission_link)
return self.Read(path,
'permissions',
permission_id,
None,
options)
def ReadPermissions(self, user_link, options=None):
"""Reads all permissions for a user.
:param str user_link:
The link to the user entity.
:param dict options:
The request options for the request.
:return:
Query Iterable of Permissions.
:rtype:
query_iterable.QueryIterable
"""
if options is None:
options = {}
return self.QueryPermissions(user_link, None, options)
def QueryPermissions(self, user_link, query, options=None):
"""Queries permissions for a user.
:param str user_link:
The link to the user entity.
:param (str or dict) query:
:param dict options:
The request options for the request.
:return:
Query Iterable of Permissions.
:rtype:
query_iterable.QueryIterable
"""
if options is None:
options = {}
path = base.GetPathFromLink(user_link, 'permissions')
user_id = base.GetResourceIdOrFullNameFromLink(user_link)
def fetch_fn(options):
return self.__QueryFeed(path,
'permissions',
user_id,
lambda r: r['Permissions'],
lambda _, b: b,
query,
options), self.last_response_headers
return query_iterable.QueryIterable(self, query, options, fetch_fn)
def ReplaceUser(self, user_link, user, options=None):
"""Replaces a user and return it.
:param str user_link:
The link to the user entity.
:param dict user:
:param dict options:
The request options for the request.
:return:
The new User.
:rtype:
dict
"""
if options is None:
options = {}
CosmosClient.__ValidateResource(user)
path = base.GetPathFromLink(user_link)
user_id = base.GetResourceIdOrFullNameFromLink(user_link)
return self.Replace(user,
path,
'users',
user_id,
None,
options)
def DeleteUser(self, user_link, options=None):
"""Deletes a user.
:param str user_link:
The link to the user entity.
:param dict options:
The request options for the request.
:return:
The deleted user.
:rtype:
dict
"""
if options is None:
options = {}
path = base.GetPathFromLink(user_link)
user_id = base.GetResourceIdOrFullNameFromLink(user_link)
return self.DeleteResource(path,
'users',
user_id,
None,
options)
def ReplacePermission(self, permission_link, permission, options=None):
"""Replaces a permission and return it.
:param str permission_link:
The link to the permission.
:param dict permission:
:param dict options:
The request options for the request.
:return:
The new Permission.
:rtype:
dict
"""
if options is None:
options = {}
CosmosClient.__ValidateResource(permission)
path = base.GetPathFromLink(permission_link)
permission_id = base.GetResourceIdOrFullNameFromLink(permission_link)
return self.Replace(permission,
path,
'permissions',
permission_id,
None,
options)
def DeletePermission(self, permission_link, options=None):
"""Deletes a permission.
:param str permission_link:
The link to the permission.
:param dict options:
The request options for the request.
:return:
The deleted Permission.
:rtype:
dict
"""
if options is None:
options = {}
path = base.GetPathFromLink(permission_link)
permission_id = base.GetResourceIdOrFullNameFromLink(permission_link)
return self.DeleteResource(path,
'permissions',
permission_id,
None,
options)
def ReadItems(self, collection_link, feed_options=None):
"""Reads all documents in a collection.
:param str collection_link:
The link to the document collection.
:param dict feed_options:
:return:
Query Iterable of Documents.
:rtype:
query_iterable.QueryIterable
"""
if feed_options is None:
feed_options = {}
return self.QueryItems(collection_link, None, feed_options)
def QueryItems(self, database_or_Container_link, query, options=None, partition_key=None):
"""Queries documents in a collection.
:param str database_or_Container_link:
The link to the database when using partitioning, otherwise link to the document collection.
:param (str or dict) query:
:param dict options:
The request options for the request.
:param str partition_key:
Partition key for the query(default value None)
:return:
Query Iterable of Documents.
:rtype:
query_iterable.QueryIterable
"""
database_or_Container_link = database_or_Container_link.strip('/')
if options is None:
options = {}
if(base.IsDatabaseLink(database_or_Container_link)):
# Python doesn't have a good way of specifying an overloaded constructor, and this is how it's generally overloaded constructors are specified(by calling a @classmethod) and returning the 'self' instance
return query_iterable.QueryIterable.PartitioningQueryIterable(self, query, options, database_or_Container_link, partition_key)
else:
path = base.GetPathFromLink(database_or_Container_link, 'docs')
collection_id = base.GetResourceIdOrFullNameFromLink(database_or_Container_link)
def fetch_fn(options):
return self.__QueryFeed(path,
'docs',
collection_id,
lambda r: r['Documents'],
lambda _, b: b,
query,
options), self.last_response_headers
return query_iterable.QueryIterable(self, query, options, fetch_fn, database_or_Container_link)
def QueryItemsChangeFeed(self, collection_link, options=None):
"""Queries documents change feed in a collection.
:param str collection_link:
The link to the document collection.
:param dict options:
The request options for the request.
options may also specify partition key range id.
:return:
Query Iterable of Documents.
:rtype:
query_iterable.QueryIterable
"""
partition_key_range_id = None
if options is not None and 'partitionKeyRangeId' in options:
partition_key_range_id = options['partitionKeyRangeId']
return self._QueryChangeFeed(collection_link, "Documents" , options, partition_key_range_id)
def _QueryChangeFeed(self, collection_link, resource_type, options=None, partition_key_range_id=None):
"""Queries change feed of a resource in a collection.
:param str collection_link:
The link to the document collection.
:param str resource_type:
The type of the resource.
:param dict options:
The request options for the request.
:param str partition_key_range_id:
Specifies partition key range id.
:return:
Query Iterable of Documents.
:rtype:
query_iterable.QueryIterable
"""
if options is None:
options = {}
options['changeFeed'] = True
resource_key_map = {'Documents' : 'docs'}
# For now, change feed only supports Documents and Partition Key Range resouce type
if resource_type not in resource_key_map:
raise NotImplementedError(resource_type + " change feed query is not supported.")
resource_key = resource_key_map[resource_type]
path = base.GetPathFromLink(collection_link, resource_key)
collection_id = base.GetResourceIdOrFullNameFromLink(collection_link)
def fetch_fn(options):
return self.__QueryFeed(path,
resource_key,
collection_id,
lambda r: r[resource_type],
lambda _, b: b,
None,
options,
partition_key_range_id), self.last_response_headers
return query_iterable.QueryIterable(self, None, options, fetch_fn, collection_link)
def _ReadPartitionKeyRanges(self, collection_link, feed_options=None):
"""Reads Partition Key Ranges.
:param str collection_link:
The link to the document collection.
:param dict feed_options:
:return:
Query Iterable of PartitionKeyRanges.
:rtype:
query_iterable.QueryIterable
"""
if feed_options is None:
feed_options = {}
return self._QueryPartitionKeyRanges(collection_link, None, feed_options)
def _QueryPartitionKeyRanges(self, collection_link, query, options=None):
"""Queries Partition Key Ranges in a collection.
:param str collection_link:
The link to the document collection.
:param (str or dict) query:
:param dict options:
The request options for the request.
:return:
Query Iterable of PartitionKeyRanges.
:rtype:
query_iterable.QueryIterable
"""
if options is None:
options = {}
path = base.GetPathFromLink(collection_link, 'pkranges')
collection_id = base.GetResourceIdOrFullNameFromLink(collection_link)
def fetch_fn(options):
return self.__QueryFeed(path,
'pkranges',
collection_id,
lambda r: r['PartitionKeyRanges'],
lambda _, b: b,
query,
options), self.last_response_headers
return query_iterable.QueryIterable(self, query, options, fetch_fn)
def CreateItem(self, database_or_Container_link, document, options=None):
"""Creates a document in a collection.
:param str database_or_Container_link:
The link to the database when using partitioning, otherwise link to the document collection.
:param dict document:
The Azure Cosmos document to create.
:param dict options:
The request options for the request.
:param bool options['disableAutomaticIdGeneration']:
Disables the automatic id generation. If id is missing in the body and this
option is true, an error will be returned.
:return:
The created Document.
:rtype:
dict
"""
# Python's default arguments are evaluated once when the function is defined, not each time the function is called (like it is in say, Ruby).
# This means that if you use a mutable default argument and mutate it, you will and have mutated that object for all future calls to the function as well.
# So, using a non-mutable deafult in this case(None) and assigning an empty dict(mutable) inside the method
# For more details on this gotcha, please refer http://docs.python-guide.org/en/latest/writing/gotchas/
if options is None:
options = {}
# We check the link to be document collection link since it can be database link in case of client side partitioning
if(base.IsItemContainerLink(database_or_Container_link)):
options = self._AddPartitionKey(database_or_Container_link, document, options)
collection_id, document, path = self._GetContainerIdWithPathForItem(database_or_Container_link, document, options)
return self.Create(document,
path,
'docs',
collection_id,
None,
options)
def UpsertItem(self, database_or_Container_link, document, options=None):
"""Upserts a document in a collection.
:param str database_or_Container_link:
The link to the database when using partitioning, otherwise link to the document collection.
:param dict document:
The Azure Cosmos document to upsert.
:param dict options:
The request options for the request.
:param bool options['disableAutomaticIdGeneration']:
Disables the automatic id generation. If id is missing in the body and this
option is true, an error will be returned.
:return:
The upserted Document.
:rtype:
dict
"""
# Python's default arguments are evaluated once when the function is defined, not each time the function is called (like it is in say, Ruby).
# This means that if you use a mutable default argument and mutate it, you will and have mutated that object for all future calls to the function as well.
# So, using a non-mutable deafult in this case(None) and assigning an empty dict(mutable) inside the method
# For more details on this gotcha, please refer http://docs.python-guide.org/en/latest/writing/gotchas/
if options is None:
options = {}
# We check the link to be document collection link since it can be database link in case of client side partitioning
if(base.IsItemContainerLink(database_or_Container_link)):
options = self._AddPartitionKey(database_or_Container_link, document, options)
collection_id, document, path = self._GetContainerIdWithPathForItem(database_or_Container_link, document, options)
return self.Upsert(document,
path,
'docs',
collection_id,
None,
options)
PartitionResolverErrorMessage = "Couldn't find any partition resolvers for the database link provided. Ensure that the link you used when registering the partition resolvers matches the link provided or you need to register both types of database link(self link as well as ID based link)."
# Gets the collection id and path for the document
def _GetContainerIdWithPathForItem(self, database_or_Container_link, document, options):
if not database_or_Container_link:
raise ValueError("database_or_Container_link is None or empty.")
if document is None:
raise ValueError("document is None.")
CosmosClient.__ValidateResource(document)
document = document.copy()
if (not document.get('id') and
not options.get('disableAutomaticIdGeneration')):
document['id'] = base.GenerateGuidId()
collection_link = database_or_Container_link
if(base.IsDatabaseLink(database_or_Container_link)):
partition_resolver = self.GetPartitionResolver(database_or_Container_link)
if(partition_resolver != None):
collection_link = partition_resolver.ResolveForCreate(document)
else:
raise ValueError(CosmosClient.PartitionResolverErrorMessage)
path = base.GetPathFromLink(collection_link, 'docs')
collection_id = base.GetResourceIdOrFullNameFromLink(collection_link)
return collection_id, document, path
def ReadItem(self, document_link, options=None):
"""Reads a document.
:param str document_link:
The link to the document.
:param dict options:
The request options for the request.
:return:
The read Document.
:rtype:
dict
"""
if options is None:
options = {}
path = base.GetPathFromLink(document_link)
document_id = base.GetResourceIdOrFullNameFromLink(document_link)
return self.Read(path,
'docs',
document_id,
None,
options)
def ReadTriggers(self, collection_link, options=None):
"""Reads all triggers in a collection.
:param str collection_link:
The link to the document collection.
:param dict options:
The request options for the request.
:return:
Query Iterable of Triggers.
:rtype:
query_iterable.QueryIterable
"""
if options is None:
options = {}
return self.QueryTriggers(collection_link, None, options)
def QueryTriggers(self, collection_link, query, options=None):
"""Queries triggers in a collection.
:param str collection_link:
The link to the document collection.
:param (str or dict) query:
:param dict options:
The request options for the request.
:return:
Query Iterable of Triggers.
:rtype:
query_iterable.QueryIterable
"""
if options is None:
options = {}
path = base.GetPathFromLink(collection_link, 'triggers')
collection_id = base.GetResourceIdOrFullNameFromLink(collection_link)
def fetch_fn(options):
return self.__QueryFeed(path,
'triggers',
collection_id,
lambda r: r['Triggers'],
lambda _, b: b,
query,
options), self.last_response_headers
return query_iterable.QueryIterable(self, query, options, fetch_fn)
def CreateTrigger(self, collection_link, trigger, options=None):
"""Creates a trigger in a collection.
:param str collection_link:
The link to the document collection.
:param dict trigger:
:param dict options:
The request options for the request.
:return:
The created Trigger.
:rtype:
dict
"""
if options is None:
options = {}
collection_id, path, trigger = self._GetContainerIdWithPathForTrigger(collection_link, trigger)
return self.Create(trigger,
path,
'triggers',
collection_id,
None,
options)
def UpsertTrigger(self, collection_link, trigger, options=None):
"""Upserts a trigger in a collection.
:param str collection_link:
The link to the document collection.
:param dict trigger:
:param dict options:
The request options for the request.
:return:
The upserted Trigger.
:rtype:
dict
"""
if options is None:
options = {}
collection_id, path, trigger = self._GetContainerIdWithPathForTrigger(collection_link, trigger)
return self.Upsert(trigger,
path,
'triggers',
collection_id,
None,
options)
def _GetContainerIdWithPathForTrigger(self, collection_link, trigger):
CosmosClient.__ValidateResource(trigger)
trigger = trigger.copy()
if trigger.get('serverScript'):
trigger['body'] = str(trigger.pop('serverScript', ''))
elif trigger.get('body'):
trigger['body'] = str(trigger['body'])
path = base.GetPathFromLink(collection_link, 'triggers')
collection_id = base.GetResourceIdOrFullNameFromLink(collection_link)
return collection_id, path, trigger
def ReadTrigger(self, trigger_link, options=None):
"""Reads a trigger.
:param str trigger_link:
The link to the trigger.
:param dict options:
The request options for the request.
:return:
The read Trigger.
:rtype:
dict
"""
if options is None:
options = {}
path = base.GetPathFromLink(trigger_link)
trigger_id = base.GetResourceIdOrFullNameFromLink(trigger_link)
return self.Read(path, 'triggers', trigger_id, None, options)
def ReadUserDefinedFunctions(self, collection_link, options=None):
"""Reads all user defined functions in a collection.
:param str collection_link:
The link to the document collection.
:param dict options:
The request options for the request.
:return:
Query Iterable of UDFs.
:rtype:
query_iterable.QueryIterable
"""
if options is None:
options = {}
return self.QueryUserDefinedFunctions(collection_link, None, options)
def QueryUserDefinedFunctions(self, collection_link, query, options=None):
"""Queries user defined functions in a collection.
:param str collection_link:
The link to the collection.
:param (str or dict) query:
:param dict options:
The request options for the request.
:return:
Query Iterable of UDFs.
:rtype:
query_iterable.QueryIterable
"""
if options is None:
options = {}
path = base.GetPathFromLink(collection_link, 'udfs')
collection_id = base.GetResourceIdOrFullNameFromLink(collection_link)
def fetch_fn(options):
return self.__QueryFeed(path,
'udfs',
collection_id,
lambda r: r['UserDefinedFunctions'],
lambda _, b: b,
query,
options), self.last_response_headers
return query_iterable.QueryIterable(self, query, options, fetch_fn)
def CreateUserDefinedFunction(self, collection_link, udf, options=None):
"""Creates a user defined function in a collection.
:param str collection_link:
The link to the collection.
:param str udf:
:param dict options:
The request options for the request.
:return:
The created UDF.
:rtype:
dict
"""
if options is None:
options = {}
collection_id, path, udf = self._GetContainerIdWithPathForUDF(collection_link, udf)
return self.Create(udf,
path,
'udfs',
collection_id,
None,
options)
def UpsertUserDefinedFunction(self, collection_link, udf, options=None):
"""Upserts a user defined function in a collection.
:param str collection_link:
The link to the collection.
:param str udf:
:param dict options:
The request options for the request.
:return:
The upserted UDF.
:rtype:
dict
"""
if options is None:
options = {}
collection_id, path, udf = self._GetContainerIdWithPathForUDF(collection_link, udf)
return self.Upsert(udf,
path,
'udfs',
collection_id,
None,
options)
def _GetContainerIdWithPathForUDF(self, collection_link, udf):
CosmosClient.__ValidateResource(udf)
udf = udf.copy()
if udf.get('serverScript'):
udf['body'] = str(udf.pop('serverScript', ''))
elif udf.get('body'):
udf['body'] = str(udf['body'])
path = base.GetPathFromLink(collection_link, 'udfs')
collection_id = base.GetResourceIdOrFullNameFromLink(collection_link)
return collection_id, path, udf
def ReadUserDefinedFunction(self, udf_link, options=None):
"""Reads a user defined function.
:param str udf_link:
The link to the user defined function.
:param dict options:
The request options for the request.
:return:
The read UDF.
:rtype:
dict
"""
if options is None:
options = {}
path = base.GetPathFromLink(udf_link)
udf_id = base.GetResourceIdOrFullNameFromLink(udf_link)
return self.Read(path, 'udfs', udf_id, None, options)
def ReadStoredProcedures(self, collection_link, options=None):
"""Reads all store procedures in a collection.
:param str collection_link:
The link to the document collection.
:param dict options:
The request options for the request.
:return:
Query Iterable of Stored Procedures.
:rtype:
query_iterable.QueryIterable
"""
if options is None:
options = {}
return self.QueryStoredProcedures(collection_link, None, options)
def QueryStoredProcedures(self, collection_link, query, options=None):
"""Queries stored procedures in a collection.
:param str collection_link:
The link to the document collection.
:param (str or dict) query:
:param dict options:
The request options for the request.
:return:
Query Iterable of Stored Procedures.
:rtype:
query_iterable.QueryIterable
"""
if options is None:
options = {}
path = base.GetPathFromLink(collection_link, 'sprocs')
collection_id = base.GetResourceIdOrFullNameFromLink(collection_link)
def fetch_fn(options):
return self.__QueryFeed(path,
'sprocs',
collection_id,
lambda r: r['StoredProcedures'],
lambda _, b: b,
query,
options), self.last_response_headers
return query_iterable.QueryIterable(self, query, options, fetch_fn)
def CreateStoredProcedure(self, collection_link, sproc, options=None):
"""Creates a stored procedure in a collection.
:param str collection_link:
The link to the document collection.
:param str sproc:
:param dict options:
The request options for the request.
:return:
The created Stored Procedure.
:rtype:
dict
"""
if options is None:
options = {}
collection_id, path, sproc = self._GetContainerIdWithPathForSproc(collection_link, sproc)
return self.Create(sproc,
path,
'sprocs',
collection_id,
None,
options)
def UpsertStoredProcedure(self, collection_link, sproc, options=None):
"""Upserts a stored procedure in a collection.
:param str collection_link:
The link to the document collection.
:param str sproc:
:param dict options:
The request options for the request.
:return:
The upserted Stored Procedure.
:rtype:
dict
"""
if options is None:
options = {}
collection_id, path, sproc = self._GetContainerIdWithPathForSproc(collection_link, sproc)
return self.Upsert(sproc,
path,
'sprocs',
collection_id,
None,
options)
def _GetContainerIdWithPathForSproc(self, collection_link, sproc):
CosmosClient.__ValidateResource(sproc)
sproc = sproc.copy()
if sproc.get('serverScript'):
sproc['body'] = str(sproc.pop('serverScript', ''))
elif sproc.get('body'):
sproc['body'] = str(sproc['body'])
path = base.GetPathFromLink(collection_link, 'sprocs')
collection_id = base.GetResourceIdOrFullNameFromLink(collection_link)
return collection_id, path, sproc
def ReadStoredProcedure(self, sproc_link, options=None):
"""Reads a stored procedure.
:param str sproc_link:
The link to the stored procedure.
:param dict options:
The request options for the request.
:return:
The read Stored Procedure.
:rtype:
dict
"""
if options is None:
options = {}
path = base.GetPathFromLink(sproc_link)
sproc_id = base.GetResourceIdOrFullNameFromLink(sproc_link)
return self.Read(path, 'sprocs', sproc_id, None, options)
def ReadConflicts(self, collection_link, feed_options=None):
"""Reads conflicts.
:param str collection_link:
The link to the document collection.
:param dict feed_options:
:return:
Query Iterable of Conflicts.
:rtype:
query_iterable.QueryIterable
"""
if feed_options is None:
feed_options = {}
return self.QueryConflicts(collection_link, None, feed_options)
def QueryConflicts(self, collection_link, query, options=None):
"""Queries conflicts in a collection.
:param str collection_link:
The link to the document collection.
:param (str or dict) query:
:param dict options:
The request options for the request.
:return:
Query Iterable of Conflicts.
:rtype:
query_iterable.QueryIterable
"""
if options is None:
options = {}
path = base.GetPathFromLink(collection_link, 'conflicts')
collection_id = base.GetResourceIdOrFullNameFromLink(collection_link)
def fetch_fn(options):
return self.__QueryFeed(path,
'conflicts',
collection_id,
lambda r: r['Conflicts'],
lambda _, b: b,
query,
options), self.last_response_headers
return query_iterable.QueryIterable(self, query, options, fetch_fn)
def ReadConflict(self, conflict_link, options=None):
"""Reads a conflict.
:param str conflict_link:
The link to the conflict.
:param dict options:
:return:
The read Conflict.
:rtype:
dict
"""
if options is None:
options = {}
path = base.GetPathFromLink(conflict_link)
conflict_id = base.GetResourceIdOrFullNameFromLink(conflict_link)
return self.Read(path,
'conflicts',
conflict_id,
None,
options)
def DeleteContainer(self, collection_link, options=None):
"""Deletes a collection.
:param str collection_link:
The link to the document collection.
:param dict options:
The request options for the request.
:return:
The deleted Collection.
:rtype:
dict
"""
if options is None:
options = {}
path = base.GetPathFromLink(collection_link)
collection_id = base.GetResourceIdOrFullNameFromLink(collection_link)
return self.DeleteResource(path,
'colls',
collection_id,
None,
options)
def ReplaceItem(self, document_link, new_document, options=None):
"""Replaces a document and returns it.
:param str document_link:
The link to the document.
:param dict new_document:
:param dict options:
The request options for the request.
:return:
The new Document.
:rtype:
dict
"""
CosmosClient.__ValidateResource(new_document)
path = base.GetPathFromLink(document_link)
document_id = base.GetResourceIdOrFullNameFromLink(document_link)
# Python's default arguments are evaluated once when the function is defined, not each time the function is called (like it is in say, Ruby).
# This means that if you use a mutable default argument and mutate it, you will and have mutated that object for all future calls to the function as well.
# So, using a non-mutable deafult in this case(None) and assigning an empty dict(mutable) inside the function so that it remains local
# For more details on this gotcha, please refer http://docs.python-guide.org/en/latest/writing/gotchas/
if options is None:
options = {}
# Extract the document collection link and add the partition key to options
collection_link = base.GetItemContainerLink(document_link)
options = self._AddPartitionKey(collection_link, new_document, options)
return self.Replace(new_document,
path,
'docs',
document_id,
None,
options)
def DeleteItem(self, document_link, options=None):
"""Deletes a document.
:param str document_link:
The link to the document.
:param dict options:
The request options for the request.
:return:
The deleted Document.
:rtype:
dict
"""
if options is None:
options = {}
path = base.GetPathFromLink(document_link)
document_id = base.GetResourceIdOrFullNameFromLink(document_link)
return self.DeleteResource(path,
'docs',
document_id,
None,
options)
def CreateAttachment(self, document_link, attachment, options=None):
"""Creates an attachment in a document.
:param str document_link:
The link to the document.
:param dict attachment:
The Azure Cosmos attachment to create.
:param dict options:
The request options for the request.
:return:
The created Attachment.
:rtype:
dict
"""
if options is None:
options = {}
document_id, path = self._GetItemIdWithPathForAttachment(attachment, document_link)
return self.Create(attachment,
path,
'attachments',
document_id,
None,
options)
def UpsertAttachment(self, document_link, attachment, options=None):
"""Upserts an attachment in a document.
:param str document_link:
The link to the document.
:param dict attachment:
The Azure Cosmos attachment to upsert.
:param dict options:
The request options for the request.
:return:
The upserted Attachment.
:rtype:
dict
"""
if options is None:
options = {}
document_id, path = self._GetItemIdWithPathForAttachment(attachment, document_link)
return self.Upsert(attachment,
path,
'attachments',
document_id,
None,
options)
def _GetItemIdWithPathForAttachment(self, attachment, document_link):
CosmosClient.__ValidateResource(attachment)
path = base.GetPathFromLink(document_link, 'attachments')
document_id = base.GetResourceIdOrFullNameFromLink(document_link)
return document_id, path
def CreateAttachmentAndUploadMedia(self,
document_link,
readable_stream,
options=None):
"""Creates an attachment and upload media.
:param str document_link:
The link to the document.
:param (file-like stream object) readable_stream:
:param dict options:
The request options for the request.
:return:
The created Attachment.
:rtype:
dict
"""
if options is None:
options = {}
document_id, initial_headers, path = self._GetItemIdWithPathForAttachmentMedia(document_link, options)
return self.Create(readable_stream,
path,
'attachments',
document_id,
initial_headers,
options)
def UpsertAttachmentAndUploadMedia(self,
document_link,
readable_stream,
options=None):
"""Upserts an attachment and upload media.
:param str document_link:
The link to the document.
:param (file-like stream object) readable_stream:
:param dict options:
The request options for the request.
:return:
The upserted Attachment.
:rtype:
dict
"""
if options is None:
options = {}
document_id, initial_headers, path = self._GetItemIdWithPathForAttachmentMedia(document_link, options)
return self.Upsert(readable_stream,
path,
'attachments',
document_id,
initial_headers,
options)
def _GetItemIdWithPathForAttachmentMedia(self, document_link, options):
initial_headers = dict(self.default_headers)
# Add required headers slug and content-type.
if options.get('slug'):
initial_headers[http_constants.HttpHeaders.Slug] = options['slug']
if options.get('contentType'):
initial_headers[http_constants.HttpHeaders.ContentType] = (
options['contentType'])
else:
initial_headers[http_constants.HttpHeaders.ContentType] = (
runtime_constants.MediaTypes.OctetStream)
path = base.GetPathFromLink(document_link, 'attachments')
document_id = base.GetResourceIdOrFullNameFromLink(document_link)
return document_id, initial_headers, path
def ReadAttachment(self, attachment_link, options=None):
"""Reads an attachment.
:param str attachment_link:
The link to the attachment.
:param dict options:
The request options for the request.
:return:
The read Attachment.
:rtype:
dict
"""
if options is None:
options = {}
path = base.GetPathFromLink(attachment_link)
attachment_id = base.GetResourceIdOrFullNameFromLink(attachment_link)
return self.Read(path,
'attachments',
attachment_id,
None,
options)
def ReadAttachments(self, document_link, options=None):
"""Reads all attachments in a document.
:param str document_link:
The link to the document.
:param dict options:
The request options for the request.
:return:
Query Iterable of Attachments.
:rtype:
query_iterable.QueryIterable
"""
if options is None:
options = {}
return self.QueryAttachments(document_link, None, options)
def QueryAttachments(self, document_link, query, options=None):
"""Queries attachments in a document.
:param str document_link:
The link to the document.
:param (str or dict) query:
:param dict options:
The request options for the request.
:return:
Query Iterable of Attachments.
:rtype:
query_iterable.QueryIterable
"""
if options is None:
options = {}
path = base.GetPathFromLink(document_link, 'attachments')
document_id = base.GetResourceIdOrFullNameFromLink(document_link)
def fetch_fn(options):
return self.__QueryFeed(path,
'attachments',
document_id,
lambda r: r['Attachments'],
lambda _, b: b,
query,
options), self.last_response_headers
return query_iterable.QueryIterable(self, query, options, fetch_fn)
def ReadMedia(self, media_link):
"""Reads a media.
When self.connection_policy.MediaReadMode ==
documents.MediaReadMode.Streamed, returns a file-like stream object;
otherwise, returns a str.
:param str media_link:
The link to the media.
:return:
The read Media.
:rtype:
str or file-like stream object
"""
default_headers = self.default_headers
path = base.GetPathFromLink(media_link)
media_id = base.GetResourceIdOrFullNameFromLink(media_link)
attachment_id = base.GetAttachmentIdFromMediaId(media_id)
headers = base.GetHeaders(self,
default_headers,
'get',
path,
attachment_id,
'media',
{})
# ReadMedia will always use WriteEndpoint since it's not replicated in readable Geo regions
request = request_object._RequestObject('media', documents._OperationType.Read)
result, self.last_response_headers = self.__Get(path,
request,
headers)
return result
def UpdateMedia(self, media_link, readable_stream, options=None):
"""Updates a media and returns it.
:param str media_link:
The link to the media.
:param (file-like stream object) readable_stream:
:param dict options:
The request options for the request.
:return:
The updated Media.
:rtype:
str or file-like stream object
"""
if options is None:
options = {}
initial_headers = dict(self.default_headers)
# Add required headers slug and content-type in case the body is a stream
if options.get('slug'):
initial_headers[http_constants.HttpHeaders.Slug] = options['slug']
if options.get('contentType'):
initial_headers[http_constants.HttpHeaders.ContentType] = (
options['contentType'])
else:
initial_headers[http_constants.HttpHeaders.ContentType] = (
runtime_constants.MediaTypes.OctetStream)
path = base.GetPathFromLink(media_link)
media_id = base.GetResourceIdOrFullNameFromLink(media_link)
attachment_id = base.GetAttachmentIdFromMediaId(media_id)
headers = base.GetHeaders(self,
initial_headers,
'put',
path,
attachment_id,
'media',
options)
# UpdateMedia will use WriteEndpoint since it uses PUT operation
request = request_object._RequestObject('media', documents._OperationType.Update)
result, self.last_response_headers = self.__Put(path,
request,
readable_stream,
headers)
self._UpdateSessionIfRequired(headers, result, self.last_response_headers)
return result
def ReplaceAttachment(self, attachment_link, attachment, options=None):
"""Replaces an attachment and returns it.
:param str attachment_link:
The link to the attachment.
:param dict attachment:
:param dict options:
The request options for the request.
:return:
The replaced Attachment
:rtype:
dict
"""
if options is None:
options = {}
CosmosClient.__ValidateResource(attachment)
path = base.GetPathFromLink(attachment_link)
attachment_id = base.GetResourceIdOrFullNameFromLink(attachment_link)
return self.Replace(attachment,
path,
'attachments',
attachment_id,
None,
options)
def DeleteAttachment(self, attachment_link, options=None):
"""Deletes an attachment.
:param str attachment_link:
The link to the attachment.
:param dict options:
The request options for the request.
:return:
The deleted Attachment.
:rtype:
dict
"""
if options is None:
options = {}
path = base.GetPathFromLink(attachment_link)
attachment_id = base.GetResourceIdOrFullNameFromLink(attachment_link)
return self.DeleteResource(path,
'attachments',
attachment_id,
None,
options)
def ReplaceTrigger(self, trigger_link, trigger, options=None):
"""Replaces a trigger and returns it.
:param str trigger_link:
The link to the trigger.
:param dict trigger:
:param dict options:
The request options for the request.
:return:
The replaced Trigger.
:rtype:
dict
"""
if options is None:
options = {}
CosmosClient.__ValidateResource(trigger)
trigger = trigger.copy()
if trigger.get('serverScript'):
trigger['body'] = str(trigger['serverScript'])
elif trigger.get('body'):
trigger['body'] = str(trigger['body'])
path = base.GetPathFromLink(trigger_link)
trigger_id = base.GetResourceIdOrFullNameFromLink(trigger_link)
return self.Replace(trigger,
path,
'triggers',
trigger_id,
None,
options)
def DeleteTrigger(self, trigger_link, options=None):
"""Deletes a trigger.
:param str trigger_link:
The link to the trigger.
:param dict options:
The request options for the request.
:return:
The deleted Trigger.
:rtype:
dict
"""
if options is None:
options = {}
path = base.GetPathFromLink(trigger_link)
trigger_id = base.GetResourceIdOrFullNameFromLink(trigger_link)
return self.DeleteResource(path,
'triggers',
trigger_id,
None,
options)
def ReplaceUserDefinedFunction(self, udf_link, udf, options=None):
"""Replaces a user defined function and returns it.
:param str udf_link:
The link to the user defined function.
:param dict udf:
:param dict options:
The request options for the request.
:return:
The new UDF.
:rtype:
dict
"""
if options is None:
options = {}
CosmosClient.__ValidateResource(udf)
udf = udf.copy()
if udf.get('serverScript'):
udf['body'] = str(udf['serverScript'])
elif udf.get('body'):
udf['body'] = str(udf['body'])
path = base.GetPathFromLink(udf_link)
udf_id = base.GetResourceIdOrFullNameFromLink(udf_link)
return self.Replace(udf,
path,
'udfs',
udf_id,
None,
options)
def DeleteUserDefinedFunction(self, udf_link, options=None):
"""Deletes a user defined function.
:param str udf_link:
The link to the user defined function.
:param dict options:
The request options for the request.
:return:
The deleted UDF.
:rtype:
dict
"""
if options is None:
options = {}
path = base.GetPathFromLink(udf_link)
udf_id = base.GetResourceIdOrFullNameFromLink(udf_link)
return self.DeleteResource(path,
'udfs',
udf_id,
None,
options)
def ExecuteStoredProcedure(self, sproc_link, params, options=None):
"""Executes a store procedure.
:param str sproc_link:
The link to the stored procedure.
:param dict params:
List or None
:param dict options:
The request options for the request.
:return:
The Stored Procedure response.
:rtype:
dict
"""
if options is None:
options = {}
initial_headers = dict(self.default_headers)
initial_headers.update({
http_constants.HttpHeaders.Accept: (
runtime_constants.MediaTypes.Json)
})
if params and not type(params) is list:
params = [params]
path = base.GetPathFromLink(sproc_link)
sproc_id = base.GetResourceIdOrFullNameFromLink(sproc_link)
headers = base.GetHeaders(self,
initial_headers,
'post',
path,
sproc_id,
'sprocs',
options)
# ExecuteStoredProcedure will use WriteEndpoint since it uses POST operation
request = request_object._RequestObject('sprocs', documents._OperationType.ExecuteJavaScript)
result, self.last_response_headers = self.__Post(path,
request,
params,
headers)
return result
def ReplaceStoredProcedure(self, sproc_link, sproc, options=None):
"""Replaces a stored procedure and returns it.
:param str sproc_link:
The link to the stored procedure.
:param dict sproc:
:param dict options:
The request options for the request.
:return:
The replaced Stored Procedure.
:rtype:
dict
"""
if options is None:
options = {}
CosmosClient.__ValidateResource(sproc)
sproc = sproc.copy()
if sproc.get('serverScript'):
sproc['body'] = str(sproc['serverScript'])
elif sproc.get('body'):
sproc['body'] = str(sproc['body'])
path = base.GetPathFromLink(sproc_link)
sproc_id = base.GetResourceIdOrFullNameFromLink(sproc_link)
return self.Replace(sproc,
path,
'sprocs',
sproc_id,
None,
options)
def DeleteStoredProcedure(self, sproc_link, options=None):
"""Deletes a stored procedure.
:param str sproc_link:
The link to the stored procedure.
:param dict options:
The request options for the request.
:return:
The deleted Stored Procedure.
:rtype:
dict
"""
if options is None:
options = {}
path = base.GetPathFromLink(sproc_link)
sproc_id = base.GetResourceIdOrFullNameFromLink(sproc_link)
return self.DeleteResource(path,
'sprocs',
sproc_id,
None,
options)
def DeleteConflict(self, conflict_link, options=None):
"""Deletes a conflict.
:param str conflict_link:
The link to the conflict.
:param dict options:
The request options for the request.
:return:
The deleted Conflict.
:rtype:
dict
"""
if options is None:
options = {}
path = base.GetPathFromLink(conflict_link)
conflict_id = base.GetResourceIdOrFullNameFromLink(conflict_link)
return self.DeleteResource(path,
'conflicts',
conflict_id,
None,
options)
def ReplaceOffer(self, offer_link, offer):
"""Replaces an offer and returns it.
:param str offer_link:
The link to the offer.
:param dict offer:
:return:
The replaced Offer.
:rtype:
dict
"""
CosmosClient.__ValidateResource(offer)
path = base.GetPathFromLink(offer_link)
offer_id = base.GetResourceIdOrFullNameFromLink(offer_link)
return self.Replace(offer, path, 'offers', offer_id, None, None)
def ReadOffer(self, offer_link):
"""Reads an offer.
:param str offer_link:
The link to the offer.
:return:
The read Offer.
:rtype:
dict
"""
path = base.GetPathFromLink(offer_link)
offer_id = base.GetResourceIdOrFullNameFromLink(offer_link)
return self.Read(path, 'offers', offer_id, None, {})
def ReadOffers(self, options=None):
"""Reads all offers.
:param dict options:
The request options for the request
:return:
Query Iterable of Offers.
:rtype:
query_iterable.QueryIterable
"""
if options is None:
options = {}
return self.QueryOffers(None, options)
def QueryOffers(self, query, options=None):
"""Query for all offers.
:param (str or dict) query:
:param dict options:
The request options for the request
:return:
Query Iterable of Offers.
:rtype:
query_iterable.QueryIterable
"""
if options is None:
options = {}
def fetch_fn(options):
return self.__QueryFeed('/offers',
'offers',
'',
lambda r: r['Offers'],
lambda _, b: b,
query,
options), self.last_response_headers
return query_iterable.QueryIterable(self, query, options, fetch_fn)
def GetDatabaseAccount(self, url_connection=None):
"""Gets database account info.
:return:
The Database Account.
:rtype:
documents.DatabaseAccount
"""
if url_connection is None:
url_connection = self.url_connection
initial_headers = dict(self.default_headers)
headers = base.GetHeaders(self,
initial_headers,
'get',
'', # path
'', # id
'', # type
{})
request = request_object._RequestObject('databaseaccount', documents._OperationType.Read, url_connection)
result, self.last_response_headers = self.__Get('',
request,
headers)
database_account = documents.DatabaseAccount()
database_account.DatabasesLink = '/dbs/'
database_account.MediaLink = '/media/'
if (http_constants.HttpHeaders.MaxMediaStorageUsageInMB in
self.last_response_headers):
database_account.MaxMediaStorageUsageInMB = (
self.last_response_headers[
http_constants.HttpHeaders.MaxMediaStorageUsageInMB])
if (http_constants.HttpHeaders.CurrentMediaStorageUsageInMB in
self.last_response_headers):
database_account.CurrentMediaStorageUsageInMB = (
self.last_response_headers[
http_constants.HttpHeaders.CurrentMediaStorageUsageInMB])
database_account.ConsistencyPolicy = result.get(constants._Constants.UserConsistencyPolicy)
# WritableLocations and ReadableLocations fields will be available only for geo-replicated database accounts
if constants._Constants.WritableLocations in result:
database_account._WritableLocations = result[constants._Constants.WritableLocations]
if constants._Constants.ReadableLocations in result:
database_account._ReadableLocations = result[constants._Constants.ReadableLocations]
if constants._Constants.EnableMultipleWritableLocations in result:
database_account._EnableMultipleWritableLocations = result[constants._Constants.EnableMultipleWritableLocations]
self._useMultipleWriteLocations = self.connection_policy.UseMultipleWriteLocations and database_account._EnableMultipleWritableLocations
return database_account
def Create(self, body, path, type, id, initial_headers, options=None):
"""Creates a Azure Cosmos resource and returns it.
:param dict body:
:param str path:
:param str type:
:param str id:
:param dict initial_headers:
:param dict options:
The request options for the request.
:return:
The created Azure Cosmos resource.
:rtype:
dict
"""
if options is None:
options = {}
initial_headers = initial_headers or self.default_headers
headers = base.GetHeaders(self,
initial_headers,
'post',
path,
id,
type,
options)
# Create will use WriteEndpoint since it uses POST operation
request = request_object._RequestObject(type, documents._OperationType.Create)
result, self.last_response_headers = self.__Post(path,
request,
body,
headers)
# update session for write request
self._UpdateSessionIfRequired(headers, result, self.last_response_headers)
return result
def Upsert(self, body, path, type, id, initial_headers, options=None):
"""Upserts a Azure Cosmos resource and returns it.
:param dict body:
:param str path:
:param str type:
:param str id:
:param dict initial_headers:
:param dict options:
The request options for the request.
:return:
The upserted Azure Cosmos resource.
:rtype:
dict
"""
if options is None:
options = {}
initial_headers = initial_headers or self.default_headers
headers = base.GetHeaders(self,
initial_headers,
'post',
path,
id,
type,
options)
headers[http_constants.HttpHeaders.IsUpsert] = True
# Upsert will use WriteEndpoint since it uses POST operation
request = request_object._RequestObject(type, documents._OperationType.Upsert)
result, self.last_response_headers = self.__Post(path,
request,
body,
headers)
# update session for write request
self._UpdateSessionIfRequired(headers, result, self.last_response_headers)
return result
def Replace(self, resource, path, type, id, initial_headers, options=None):
"""Replaces a Azure Cosmos resource and returns it.
:param dict resource:
:param str path:
:param str type:
:param str id:
:param dict initial_headers:
:param dict options:
The request options for the request.
:return:
The new Azure Cosmos resource.
:rtype:
dict
"""
if options is None:
options = {}
initial_headers = initial_headers or self.default_headers
headers = base.GetHeaders(self,
initial_headers,
'put',
path,
id,
type,
options)
# Replace will use WriteEndpoint since it uses PUT operation
request = request_object._RequestObject(type, documents._OperationType.Replace)
result, self.last_response_headers = self.__Put(path,
request,
resource,
headers)
# update session for request mutates data on server side
self._UpdateSessionIfRequired(headers, result, self.last_response_headers)
return result
def Read(self, path, type, id, initial_headers, options=None):
"""Reads a Azure Cosmos resource and returns it.
:param str path:
:param str type:
:param str id:
:param dict initial_headers:
:param dict options:
The request options for the request.
:return:
The upserted Azure Cosmos resource.
:rtype:
dict
"""
if options is None:
options = {}
initial_headers = initial_headers or self.default_headers
headers = base.GetHeaders(self,
initial_headers,
'get',
path,
id,
type,
options)
# Read will use ReadEndpoint since it uses GET operation
request = request_object._RequestObject(type, documents._OperationType.Read)
result, self.last_response_headers = self.__Get(path,
request,
headers)
return result
def DeleteResource(self, path, type, id, initial_headers, options=None):
"""Deletes a Azure Cosmos resource and returns it.
:param str path:
:param str type:
:param str id:
:param dict initial_headers:
:param dict options:
The request options for the request.
:return:
The deleted Azure Cosmos resource.
:rtype:
dict
"""
if options is None:
options = {}
initial_headers = initial_headers or self.default_headers
headers = base.GetHeaders(self,
initial_headers,
'delete',
path,
id,
type,
options)
# Delete will use WriteEndpoint since it uses DELETE operation
request = request_object._RequestObject(type, documents._OperationType.Delete)
result, self.last_response_headers = self.__Delete(path,
request,
headers)
# update session for request mutates data on server side
self._UpdateSessionIfRequired(headers, result, self.last_response_headers)
return result
def __Get(self, path, request, headers):
"""Azure Cosmos 'GET' http request.
:params str url:
:params str path:
:params dict headers:
:return:
Tuple of (result, headers).
:rtype:
tuple of (dict, dict)
"""
return synchronized_request.SynchronizedRequest(self,
request,
self._global_endpoint_manager,
self.connection_policy,
self._requests_session,
'GET',
path,
None,
None,
headers)
def __Post(self, path, request, body, headers):
"""Azure Cosmos 'POST' http request.
:params str url:
:params str path:
:params (str, unicode, dict) body:
:params dict headers:
:return:
Tuple of (result, headers).
:rtype:
tuple of (dict, dict)
"""
return synchronized_request.SynchronizedRequest(self,
request,
self._global_endpoint_manager,
self.connection_policy,
self._requests_session,
'POST',
path,
body,
query_params=None,
headers=headers)
def __Put(self, path, request, body, headers):
"""Azure Cosmos 'PUT' http request.
:params str url:
:params str path:
:params (str, unicode, dict) body:
:params dict headers:
:return:
Tuple of (result, headers).
:rtype:
tuple of (dict, dict)
"""
return synchronized_request.SynchronizedRequest(self,
request,
self._global_endpoint_manager,
self.connection_policy,
self._requests_session,
'PUT',
path,
body,
query_params=None,
headers=headers)
def __Delete(self, path, request, headers):
"""Azure Cosmos 'DELETE' http request.
:params str url:
:params str path:
:params dict headers:
:return:
Tuple of (result, headers).
:rtype:
tuple of (dict, dict)
"""
return synchronized_request.SynchronizedRequest(self,
request,
self._global_endpoint_manager,
self.connection_policy,
self._requests_session,
'DELETE',
path,
request_data=None,
query_params=None,
headers=headers)
def QueryFeed(self, path, collection_id, query, options, partition_key_range_id = None):
"""Query Feed for Document Collection resource.
:param str path:
Path to the document collection.
:param str collection_id:
Id of the document collection.
:param (str or dict) query:
:param dict options:
The request options for the request.
:param str partition_key_range_id:
Partition key range id.
:rtype:
tuple
"""
return self.__QueryFeed(path,
'docs',
collection_id,
lambda r: r['Documents'],
lambda _, b: b,
query,
options,
partition_key_range_id), self.last_response_headers
def __QueryFeed(self,
path,
type,
id,
result_fn,
create_fn,
query,
options=None,
partition_key_range_id=None):
"""Query for more than one Azure Cosmos resources.
:param str path:
:param str type:
:param str id:
:param function result_fn:
:param function create_fn:
:param (str or dict) query:
:param dict options:
The request options for the request.
:param str partition_key_range_id:
Specifies partition key range id.
:rtype:
list
:raises SystemError: If the query compatibility mode is undefined.
"""
if options is None:
options = {}
if query:
__GetBodiesFromQueryResult = result_fn
else:
def __GetBodiesFromQueryResult(result):
if result is not None:
return [create_fn(self, body) for body in result_fn(result)]
else:
# If there is no change feed, the result data is empty and result is None.
# This case should be interpreted as an empty array.
return []
initial_headers = self.default_headers.copy()
# Copy to make sure that default_headers won't be changed.
if query is None:
# Query operations will use ReadEndpoint even though it uses GET(for feed requests)
request = request_object._RequestObject(type, documents._OperationType.ReadFeed)
headers = base.GetHeaders(self,
initial_headers,
'get',
path,
id,
type,
options,
partition_key_range_id)
result, self.last_response_headers = self.__Get(path,
request,
headers)
return __GetBodiesFromQueryResult(result)
else:
query = self.__CheckAndUnifyQueryFormat(query)
initial_headers[http_constants.HttpHeaders.IsQuery] = 'true'
if (self._query_compatibility_mode == CosmosClient._QueryCompatibilityMode.Default or
self._query_compatibility_mode == CosmosClient._QueryCompatibilityMode.Query):
initial_headers[http_constants.HttpHeaders.ContentType] = runtime_constants.MediaTypes.QueryJson
elif self._query_compatibility_mode == CosmosClient._QueryCompatibilityMode.SqlQuery:
initial_headers[http_constants.HttpHeaders.ContentType] = runtime_constants.MediaTypes.SQL
else:
raise SystemError('Unexpected query compatibility mode.')
# Query operations will use ReadEndpoint even though it uses POST(for regular query operations)
request = request_object._RequestObject(type, documents._OperationType.SqlQuery)
headers = base.GetHeaders(self,
initial_headers,
'post',
path,
id,
type,
options,
partition_key_range_id)
result, self.last_response_headers = self.__Post(path,
request,
query,
headers)
return __GetBodiesFromQueryResult(result)
def __CheckAndUnifyQueryFormat(self, query_body):
"""Checks and unifies the format of the query body.
:raises TypeError: If query_body is not of expected type (depending on the query compatibility mode).
:raises ValueError: If query_body is a dict but doesn\'t have valid query text.
:raises SystemError: If the query compatibility mode is undefined.
:param (str or dict) query_body:
:return:
The formatted query body.
:rtype:
dict or string
"""
if (self._query_compatibility_mode == CosmosClient._QueryCompatibilityMode.Default or
self._query_compatibility_mode == CosmosClient._QueryCompatibilityMode.Query):
if not isinstance(query_body, dict) and not isinstance(query_body, six.string_types):
raise TypeError('query body must be a dict or string.')
if isinstance(query_body, dict) and not query_body.get('query'):
raise ValueError('query body must have valid query text with key "query".')
if isinstance(query_body, six.string_types):
return {'query': query_body}
elif (self._query_compatibility_mode == CosmosClient._QueryCompatibilityMode.SqlQuery and
not isinstance(query_body, six.string_types)):
raise TypeError('query body must be a string.')
else:
raise SystemError('Unexpected query compatibility mode.')
return query_body
@staticmethod
def __ValidateResource(resource):
id = resource.get('id')
if id:
if id.find('/') != -1 or id.find('\\') != -1 or id.find('?') != -1 or id.find('#') != -1:
raise ValueError('Id contains illegal chars.')
if id[-1] == ' ':
raise ValueError('Id ends with a space.')
# Adds the partition key to options
def _AddPartitionKey(self, collection_link, document, options):
collection_link = collection_link.strip('/')
#TODO: Refresh the cache if partition is extracted automatically and we get a 400.1001
# If the document collection link is present in the cache, then use the cached partitionkey definition
if collection_link in self.partition_key_definition_cache:
partitionKeyDefinition = self.partition_key_definition_cache.get(collection_link)
# Else read the collection from backend and add it to the cache
else:
collection = self.ReadContainer(collection_link)
partitionKeyDefinition = collection.get('partitionKey')
self.partition_key_definition_cache[collection_link] = partitionKeyDefinition
# If the collection doesn't have a partition key definition, skip it as it's a legacy collection
if partitionKeyDefinition:
# If the user has passed in the partitionKey in options use that elase extract it from the document
if('partitionKey' not in options):
partitionKeyValue = self._ExtractPartitionKey(partitionKeyDefinition, document)
options['partitionKey'] = partitionKeyValue
return options
# Extracts the partition key from the document using the partitionKey definition
def _ExtractPartitionKey(self, partitionKeyDefinition, document):
# Parses the paths into a list of token each representing a property
partition_key_parts = base.ParsePaths(partitionKeyDefinition.get('paths'))
# Navigates the document to retrieve the partitionKey specified in the paths
return self._RetrievePartitionKey(partition_key_parts, document)
# Navigates the document to retrieve the partitionKey specified in the partition key parts
def _RetrievePartitionKey(self, partition_key_parts, document):
expected_matchCount = len(partition_key_parts)
matchCount = 0
partitionKey = document
for part in partition_key_parts:
# At any point if we don't find the value of a sub-property in the document, we return as Undefined
if part not in partitionKey:
return documents.Undefined
else:
partitionKey = partitionKey.get(part)
matchCount += 1
# Once we reach the "leaf" value(not a dict), we break from loop
if not isinstance(partitionKey, dict):
break
# Match the count of hops we did to get the partitionKey with the length of partition key parts and validate that it's not a dict at that level
if ((matchCount != expected_matchCount) or isinstance(partitionKey, dict)):
return documents.Undefined
return partitionKey
def _UpdateSessionIfRequired(self, request_headers, response_result, response_headers):
"""
Updates session if necessary.
:param dict response_result:
:param dict response_headers:
:param dict response_headers
:return:
None, but updates the client session if necessary.
"""
'''if this request was made with consistency level as session, then update
the session'''
if response_result is None or response_headers is None:
return
is_session_consistency = False
if http_constants.HttpHeaders.ConsistencyLevel in request_headers:
if documents.ConsistencyLevel.Session == request_headers[http_constants.HttpHeaders.ConsistencyLevel]:
is_session_consistency = True
if is_session_consistency:
# update session
self.session.update_session(response_result, response_headers)
| 35.498809
| 293
| 0.545044
|
79520480d88a05fb38d18b7816a139e84c110349
| 82,261
|
py
|
Python
|
tools/wraptypes/yacc.py
|
AnantTiwari-Naman/pyglet
|
4774f2889057da95a78785a69372112931e6a620
|
[
"BSD-3-Clause"
] | 1,160
|
2019-06-13T11:51:40.000Z
|
2022-03-31T01:55:32.000Z
|
tools/wraptypes/yacc.py
|
AnantTiwari-Naman/pyglet
|
4774f2889057da95a78785a69372112931e6a620
|
[
"BSD-3-Clause"
] | 491
|
2019-07-14T16:13:11.000Z
|
2022-03-31T08:04:32.000Z
|
tools/wraptypes/yacc.py
|
AnantTiwari-Naman/pyglet
|
4774f2889057da95a78785a69372112931e6a620
|
[
"BSD-3-Clause"
] | 316
|
2019-06-14T13:56:48.000Z
|
2022-03-30T19:26:58.000Z
|
from __future__ import print_function
#-----------------------------------------------------------------------------
# ply: yacc.py
#
# Author(s): David M. Beazley (dave@dabeaz.com)
# Modifications for pyglet by Alex Holkner (alex.holkner@gmail.com) (<ah>)
#
# Copyright (C) 2001-2006, David M. Beazley
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# See the file COPYING for a complete copy of the LGPL.
#
#
# This implements an LR parser that is constructed from grammar rules defined
# as Python functions. The grammer is specified by supplying the BNF inside
# Python documentation strings. The inspiration for this technique was borrowed
# from John Aycock's Spark parsing system. PLY might be viewed as cross between
# Spark and the GNU bison utility.
#
# The current implementation is only somewhat object-oriented. The
# LR parser itself is defined in terms of an object (which allows multiple
# parsers to co-exist). However, most of the variables used during table
# construction are defined in terms of global variables. Users shouldn't
# notice unless they are trying to define multiple parsers at the same
# time using threads (in which case they should have their head examined).
#
# This implementation supports both SLR and LALR(1) parsing. LALR(1)
# support was originally implemented by Elias Ioup (ezioup@alumni.uchicago.edu),
# using the algorithm found in Aho, Sethi, and Ullman "Compilers: Principles,
# Techniques, and Tools" (The Dragon Book). LALR(1) has since been replaced
# by the more efficient DeRemer and Pennello algorithm.
#
# :::::::: WARNING :::::::
#
# Construction of LR parsing tables is fairly complicated and expensive.
# To make this module run fast, a *LOT* of work has been put into
# optimization---often at the expensive of readability and what might
# consider to be good Python "coding style." Modify the code at your
# own risk!
# ----------------------------------------------------------------------------
__version__ = "2.2"
#-----------------------------------------------------------------------------
# === User configurable parameters ===
#
# Change these to modify the default behavior of yacc (if you wish)
#-----------------------------------------------------------------------------
yaccdebug = 1 # Debugging mode. If set, yacc generates a
# a 'parser.out' file in the current directory
debug_file = 'parser.out' # Default name of the debugging file
tab_module = 'parsetab' # Default name of the table module
default_lr = 'LALR' # Default LR table generation method
error_count = 3 # Number of symbols that must be shifted to leave recovery mode
import re, types, sys, cStringIO, md5, os.path
# Exception raised for yacc-related errors
class YaccError(Exception): pass
#-----------------------------------------------------------------------------
# === LR Parsing Engine ===
#
# The following classes are used for the LR parser itself. These are not
# used during table construction and are independent of the actual LR
# table generation algorithm
#-----------------------------------------------------------------------------
# This class is used to hold non-terminal grammar symbols during parsing.
# It normally has the following attributes set:
# .type = Grammar symbol type
# .value = Symbol value
# .lineno = Starting line number
# .endlineno = Ending line number (optional, set automatically)
# .lexpos = Starting lex position
# .endlexpos = Ending lex position (optional, set automatically)
class YaccSymbol:
filename = '' # <ah>
def __str__(self): return self.type
def __repr__(self): return str(self)
# This class is a wrapper around the objects actually passed to each
# grammar rule. Index lookup and assignment actually assign the
# .value attribute of the underlying YaccSymbol object.
# The lineno() method returns the line number of a given
# item (or 0 if not defined). The linespan() method returns
# a tuple of (startline,endline) representing the range of lines
# for a symbol. The lexspan() method returns a tuple (lexpos,endlexpos)
# representing the range of positional information for a symbol.
class YaccProduction:
def __init__(self,s,stack=None):
self.slice = s
self.pbstack = []
self.stack = stack
def __getitem__(self,n):
if type(n) == types.IntType:
if n >= 0: return self.slice[n].value
else: return self.stack[n].value
else:
return [s.value for s in self.slice[n.start:n.stop:n.step]]
def __setitem__(self,n,v):
self.slice[n].value = v
def __len__(self):
return len(self.slice)
def lineno(self,n):
return getattr(self.slice[n],"lineno",0)
def linespan(self,n):
startline = getattr(self.slice[n],"lineno",0)
endline = getattr(self.slice[n],"endlineno",startline)
return startline,endline
def lexpos(self,n):
return getattr(self.slice[n],"lexpos",0)
def lexspan(self,n):
startpos = getattr(self.slice[n],"lexpos",0)
endpos = getattr(self.slice[n],"endlexpos",startpos)
return startpos,endpos
def pushback(self,n):
if n <= 0:
raise ValueError("Expected a positive value")
if n > (len(self.slice)-1):
raise ValueError("Can't push %d tokens. Only %d are available." % (n,len(self.slice)-1))
for i in range(0,n):
self.pbstack.append(self.slice[-i-1])
# The LR Parsing engine. This is defined as a class so that multiple parsers
# can exist in the same process. A user never instantiates this directly.
# Instead, the global yacc() function should be used to create a suitable Parser
# object.
class Parser:
# <ah> Remove magic (use ParserPrototype)
def __init__(self):
# Reset internal state
self.productions = None # List of productions
self.errorfunc = None # Error handling function
self.action = { } # LR Action table
self.goto = { } # LR goto table
self.require = { } # Attribute require table
self.method = "Unknown LR" # Table construction method used
# <ah> 25 Jan 2007
self.statestackstack = []
self.symstackstack = []
def errok(self):
self.errorcount = 0
def restart(self):
del self.statestack[:]
del self.symstack[:]
sym = YaccSymbol()
sym.type = '$end'
self.symstack.append(sym)
self.statestack.append(0)
def push_state(self):
'''Save parser state and restart it.'''
# <ah> 25 Jan 2007
self.statestackstack.append(self.statestack[:])
self.symstackstack.append(self.symstack[:])
self.restart()
def pop_state(self):
'''Restore saved parser state.'''
# <ah> 25 Jan 2007
self.statestack[:] = self.statestackstack.pop()
self.symstack[:] = self.symstackstack.pop()
def parse(self,input=None,lexer=None,debug=0):
lookahead = None # Current lookahead symbol
lookaheadstack = [ ] # Stack of lookahead symbols
actions = self.action # Local reference to action table
goto = self.goto # Local reference to goto table
prod = self.productions # Local reference to production list
pslice = YaccProduction(None) # Production object passed to grammar rules
pslice.parser = self # Parser object
self.errorcount = 0 # Used during error recovery
# If no lexer was given, we will try to use the lex module
if not lexer:
import lex
lexer = lex.lexer
pslice.lexer = lexer
# If input was supplied, pass to lexer
if input:
lexer.input(input)
# Tokenize function
get_token = lexer.token
statestack = [ ] # Stack of parsing states
self.statestack = statestack
symstack = [ ] # Stack of grammar symbols
self.symstack = symstack
pslice.stack = symstack # Put in the production
errtoken = None # Err token
# The start state is assumed to be (0,$end)
statestack.append(0)
sym = YaccSymbol()
sym.type = '$end'
symstack.append(sym)
while 1:
# Get the next symbol on the input. If a lookahead symbol
# is already set, we just use that. Otherwise, we'll pull
# the next token off of the lookaheadstack or from the lexer
if debug > 1:
print('state', statestack[-1])
if not lookahead:
if not lookaheadstack:
lookahead = get_token() # Get the next token
else:
lookahead = lookaheadstack.pop()
if not lookahead:
lookahead = YaccSymbol()
lookahead.type = '$end'
if debug:
errorlead = ("%s . %s" % (" ".join([xx.type for xx in symstack][1:]), str(lookahead))).lstrip()
# Check the action table
s = statestack[-1]
ltype = lookahead.type
t = actions.get((s,ltype),None)
if debug > 1:
print('action', t)
if t is not None:
if t > 0:
# shift a symbol on the stack
if ltype == '$end':
# Error, end of input
sys.stderr.write("yacc: Parse error. EOF\n")
return
statestack.append(t)
if debug > 1:
sys.stderr.write("%-60s shift state %s\n" % (errorlead, t))
symstack.append(lookahead)
lookahead = None
# Decrease error count on successful shift
if self.errorcount > 0:
self.errorcount -= 1
continue
if t < 0:
# reduce a symbol on the stack, emit a production
p = prod[-t]
pname = p.name
plen = p.len
# Get production function
sym = YaccSymbol()
sym.type = pname # Production name
sym.value = None
if debug > 1:
sys.stderr.write("%-60s reduce %d\n" % (errorlead, -t))
if plen:
targ = symstack[-plen-1:]
targ[0] = sym
try:
sym.lineno = targ[1].lineno
sym.filename = targ[1].filename
sym.endlineno = getattr(targ[-1],"endlineno",targ[-1].lineno)
sym.lexpos = targ[1].lexpos
sym.endlexpos = getattr(targ[-1],"endlexpos",targ[-1].lexpos)
except AttributeError:
sym.lineno = 0
del symstack[-plen:]
del statestack[-plen:]
else:
sym.lineno = 0
targ = [ sym ]
pslice.slice = targ
pslice.pbstack = []
# Call the grammar rule with our special slice object
p.func(pslice)
# If there was a pushback, put that on the stack
if pslice.pbstack:
lookaheadstack.append(lookahead)
for _t in pslice.pbstack:
lookaheadstack.append(_t)
lookahead = None
symstack.append(sym)
statestack.append(goto[statestack[-1],pname])
continue
if t == 0:
n = symstack[-1]
return getattr(n,"value",None)
sys.stderr.write(errorlead, "\n")
if t == None:
if debug:
sys.stderr.write(errorlead + "\n")
# We have some kind of parsing error here. To handle
# this, we are going to push the current token onto
# the tokenstack and replace it with an 'error' token.
# If there are any synchronization rules, they may
# catch it.
#
# In addition to pushing the error token, we call call
# the user defined p_error() function if this is the
# first syntax error. This function is only called if
# errorcount == 0.
if not self.errorcount:
self.errorcount = error_count
errtoken = lookahead
if errtoken.type == '$end':
errtoken = None # End of file!
if self.errorfunc:
global errok,token,restart
errok = self.errok # Set some special functions available in error recovery
token = get_token
restart = self.restart
tok = self.errorfunc(errtoken)
del errok, token, restart # Delete special functions
if not self.errorcount:
# User must have done some kind of panic
# mode recovery on their own. The
# returned token is the next lookahead
lookahead = tok
errtoken = None
continue
else:
if errtoken:
if hasattr(errtoken,"lineno"): lineno = lookahead.lineno
else: lineno = 0
if lineno:
sys.stderr.write("yacc: Syntax error at line %d, token=%s\n" % (lineno, errtoken.type))
else:
sys.stderr.write("yacc: Syntax error, token=%s" % errtoken.type)
else:
sys.stderr.write("yacc: Parse error in input. EOF\n")
return
else:
self.errorcount = error_count
# case 1: the statestack only has 1 entry on it. If we're in this state, the
# entire parse has been rolled back and we're completely hosed. The token is
# discarded and we just keep going.
if len(statestack) <= 1 and lookahead.type != '$end':
lookahead = None
errtoken = None
# Nuke the pushback stack
del lookaheadstack[:]
continue
# case 2: the statestack has a couple of entries on it, but we're
# at the end of the file. nuke the top entry and generate an error token
# Start nuking entries on the stack
if lookahead.type == '$end':
# Whoa. We're really hosed here. Bail out
return
if lookahead.type != 'error':
sym = symstack[-1]
if sym.type == 'error':
# Hmmm. Error is on top of stack, we'll just nuke input
# symbol and continue
lookahead = None
continue
t = YaccSymbol()
t.type = 'error'
if hasattr(lookahead,"lineno"):
t.lineno = lookahead.lineno
t.value = lookahead
lookaheadstack.append(lookahead)
lookahead = t
else:
symstack.pop()
statestack.pop()
continue
# Call an error function here
raise RuntimeError("yacc: internal parser error!!!\n")
# -----------------------------------------------------------------------------
# === Parser Construction ===
#
# The following functions and variables are used to implement the yacc() function
# itself. This is pretty hairy stuff involving lots of error checking,
# construction of LR items, kernels, and so forth. Although a lot of
# this work is done using global variables, the resulting Parser object
# is completely self contained--meaning that it is safe to repeatedly
# call yacc() with different grammars in the same application.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# validate_file()
#
# This function checks to see if there are duplicated p_rulename() functions
# in the parser module file. Without this function, it is really easy for
# users to make mistakes by cutting and pasting code fragments (and it's a real
# bugger to try and figure out why the resulting parser doesn't work). Therefore,
# we just do a little regular expression pattern matching of def statements
# to try and detect duplicates.
# -----------------------------------------------------------------------------
def validate_file(filename):
base,ext = os.path.splitext(filename)
if ext != '.py': return 1 # No idea. Assume it's okay.
try:
f = open(filename)
lines = f.readlines()
f.close()
except IOError:
return 1 # Oh well
# Match def p_funcname(
fre = re.compile(r'\s*def\s+(p_[a-zA-Z_0-9]*)\(')
counthash = { }
linen = 1
noerror = 1
for l in lines:
m = fre.match(l)
if m:
name = m.group(1)
prev = counthash.get(name)
if not prev:
counthash[name] = linen
else:
sys.stderr.write("%s:%d: Function %s redefined. Previously defined on line %d\n" % (filename,linen,name,prev))
noerror = 0
linen += 1
return noerror
# This function looks for functions that might be grammar rules, but which don't have the proper p_suffix.
def validate_dict(d):
for n,v in d.items():
if n[0:2] == 'p_' and type(v) in (types.FunctionType, types.MethodType): continue
if n[0:2] == 't_': continue
if n[0:2] == 'p_':
sys.stderr.write("yacc: Warning. '%s' not defined as a function\n" % n)
if 1 and isinstance(v,types.FunctionType) and v.func_code.co_argcount == 1:
try:
doc = v.__doc__.split(" ")
if doc[1] == ':':
sys.stderr.write("%s:%d: Warning. Possible grammar rule '%s' defined without p_ prefix.\n" % (v.func_code.co_filename, v.func_code.co_firstlineno,n))
except Exception:
pass
# -----------------------------------------------------------------------------
# === GRAMMAR FUNCTIONS ===
#
# The following global variables and functions are used to store, manipulate,
# and verify the grammar rules specified by the user.
# -----------------------------------------------------------------------------
# Initialize all of the global variables used during grammar construction
def initialize_vars():
global Productions, Prodnames, Prodmap, Terminals
global Nonterminals, First, Follow, Precedence, LRitems
global Errorfunc, Signature, Requires
Productions = [None] # A list of all of the productions. The first
# entry is always reserved for the purpose of
# building an augmented grammar
Prodnames = { } # A dictionary mapping the names of nonterminals to a list of all
# productions of that nonterminal.
Prodmap = { } # A dictionary that is only used to detect duplicate
# productions.
Terminals = { } # A dictionary mapping the names of terminal symbols to a
# list of the rules where they are used.
Nonterminals = { } # A dictionary mapping names of nonterminals to a list
# of rule numbers where they are used.
First = { } # A dictionary of precomputed FIRST(x) symbols
Follow = { } # A dictionary of precomputed FOLLOW(x) symbols
Precedence = { } # Precedence rules for each terminal. Contains tuples of the
# form ('right',level) or ('nonassoc', level) or ('left',level)
LRitems = [ ] # A list of all LR items for the grammar. These are the
# productions with the "dot" like E -> E . PLUS E
Errorfunc = None # User defined error handler
Signature = md5.new() # Digital signature of the grammar rules, precedence
# and other information. Used to determined when a
# parsing table needs to be regenerated.
Requires = { } # Requires list
# File objects used when creating the parser.out debugging file
global _vf, _vfc
_vf = cStringIO.StringIO()
_vfc = cStringIO.StringIO()
# -----------------------------------------------------------------------------
# class Production:
#
# This class stores the raw information about a single production or grammar rule.
# It has a few required attributes:
#
# name - Name of the production (nonterminal)
# prod - A list of symbols making up its production
# number - Production number.
#
# In addition, a few additional attributes are used to help with debugging or
# optimization of table generation.
#
# file - File where production action is defined.
# lineno - Line number where action is defined
# func - Action function
# prec - Precedence level
# lr_next - Next LR item. Example, if we are ' E -> E . PLUS E'
# then lr_next refers to 'E -> E PLUS . E'
# lr_index - LR item index (location of the ".") in the prod list.
# lookaheads - LALR lookahead symbols for this item
# len - Length of the production (number of symbols on right hand side)
# -----------------------------------------------------------------------------
class Production:
def __init__(self,**kw):
for k,v in kw.items():
setattr(self,k,v)
self.lr_index = -1
self.lr0_added = 0 # Flag indicating whether or not added to LR0 closure
self.lr1_added = 0 # Flag indicating whether or not added to LR1
self.usyms = [ ]
self.lookaheads = { }
self.lk_added = { }
self.setnumbers = [ ]
def __str__(self):
if self.prod:
s = "%s -> %s" % (self.name," ".join(self.prod))
else:
s = "%s -> <empty>" % self.name
return s
def __repr__(self):
return str(self)
# Compute lr_items from the production
def lr_item(self,n):
if n > len(self.prod): return None
p = Production()
p.name = self.name
p.prod = list(self.prod)
p.number = self.number
p.lr_index = n
p.lookaheads = { }
p.setnumbers = self.setnumbers
p.prod.insert(n,".")
p.prod = tuple(p.prod)
p.len = len(p.prod)
p.usyms = self.usyms
# Precompute list of productions immediately following
try:
p.lrafter = Prodnames[p.prod[n+1]]
except (IndexError,KeyError) as e:
p.lrafter = []
try:
p.lrbefore = p.prod[n-1]
except IndexError:
p.lrbefore = None
return p
class MiniProduction:
pass
# regex matching identifiers
_is_identifier = re.compile(r'^[a-zA-Z0-9_-]+$')
# -----------------------------------------------------------------------------
# add_production()
#
# Given an action function, this function assembles a production rule.
# The production rule is assumed to be found in the function's docstring.
# This rule has the general syntax:
#
# name1 ::= production1
# | production2
# | production3
# ...
# | productionn
# name2 ::= production1
# | production2
# ...
# -----------------------------------------------------------------------------
def add_production(f,file,line,prodname,syms):
if Terminals.has_key(prodname):
sys.stderr.write("%s:%d: Illegal rule name '%s'. Already defined as a token.\n" % (file,line,prodname))
return -1
if prodname == 'error':
sys.stderr.write("%s:%d: Illegal rule name '%s'. error is a reserved word.\n" % (file,line,prodname))
return -1
if not _is_identifier.match(prodname):
sys.stderr.write("%s:%d: Illegal rule name '%s'\n" % (file,line,prodname))
return -1
for x in range(len(syms)):
s = syms[x]
if s[0] in "'\"":
try:
c = eval(s)
if (len(c) > 1):
sys.stderr.write("%s:%d: Literal token %s in rule '%s' may only be a single character\n" % (file,line,s, prodname))
return -1
if not Terminals.has_key(c):
Terminals[c] = []
syms[x] = c
continue
except SyntaxError:
pass
if not _is_identifier.match(s) and s != '%prec':
sys.stderr.write("%s:%d: Illegal name '%s' in rule '%s'\n" % (file,line,s, prodname))
return -1
# See if the rule is already in the rulemap
map = "%s -> %s" % (prodname,syms)
if Prodmap.has_key(map):
m = Prodmap[map]
sys.stderr.write("%s:%d: Duplicate rule %s.\n" % (file,line, m))
sys.stderr.write("%s:%d: Previous definition at %s:%d\n" % (file,line, m.file, m.line))
return -1
p = Production()
p.name = prodname
p.prod = syms
p.file = file
p.line = line
p.func = f
p.number = len(Productions)
Productions.append(p)
Prodmap[map] = p
if not Nonterminals.has_key(prodname):
Nonterminals[prodname] = [ ]
# Add all terminals to Terminals
i = 0
while i < len(p.prod):
t = p.prod[i]
if t == '%prec':
try:
precname = p.prod[i+1]
except IndexError:
sys.stderr.write("%s:%d: Syntax error. Nothing follows %%prec.\n" % (p.file,p.line))
return -1
prec = Precedence.get(precname,None)
if not prec:
sys.stderr.write("%s:%d: Nothing known about the precedence of '%s'\n" % (p.file,p.line,precname))
return -1
else:
p.prec = prec
del p.prod[i]
del p.prod[i]
continue
if Terminals.has_key(t):
Terminals[t].append(p.number)
# Is a terminal. We'll assign a precedence to p based on this
if not hasattr(p,"prec"):
p.prec = Precedence.get(t,('right',0))
else:
if not Nonterminals.has_key(t):
Nonterminals[t] = [ ]
Nonterminals[t].append(p.number)
i += 1
if not hasattr(p,"prec"):
p.prec = ('right',0)
# Set final length of productions
p.len = len(p.prod)
p.prod = tuple(p.prod)
# Calculate unique syms in the production
p.usyms = [ ]
for s in p.prod:
if s not in p.usyms:
p.usyms.append(s)
# Add to the global productions list
try:
Prodnames[p.name].append(p)
except KeyError:
Prodnames[p.name] = [ p ]
return 0
# Given a raw rule function, this function rips out its doc string
# and adds rules to the grammar
def add_function(f):
line = f.func_code.co_firstlineno
file = f.func_code.co_filename
error = 0
if isinstance(f,types.MethodType):
reqdargs = 2
else:
reqdargs = 1
if f.func_code.co_argcount > reqdargs:
sys.stderr.write("%s:%d: Rule '%s' has too many arguments.\n" % (file,line,f.__name__))
return -1
if f.func_code.co_argcount < reqdargs:
sys.stderr.write("%s:%d: Rule '%s' requires an argument.\n" % (file,line,f.__name__))
return -1
if f.__doc__:
# Split the doc string into lines
pstrings = f.__doc__.splitlines()
lastp = None
dline = line
for ps in pstrings:
dline += 1
p = ps.split()
if not p: continue
try:
if p[0] == '|':
# This is a continuation of a previous rule
if not lastp:
sys.stderr.write("%s:%d: Misplaced '|'.\n" % (file,dline))
return -1
prodname = lastp
if len(p) > 1:
syms = p[1:]
else:
syms = [ ]
else:
prodname = p[0]
lastp = prodname
assign = p[1]
if len(p) > 2:
syms = p[2:]
else:
syms = [ ]
if assign != ':' and assign != '::=':
sys.stderr.write("%s:%d: Syntax error. Expected ':'\n" % (file,dline))
return -1
e = add_production(f,file,dline,prodname,syms)
error += e
except Exception:
sys.stderr.write("%s:%d: Syntax error in rule '%s'\n" % (file,dline,ps))
error -= 1
else:
sys.stderr.write("%s:%d: No documentation string specified in function '%s'\n" % (file,line,f.__name__))
return error
# Cycle checking code (Michael Dyck)
def compute_reachable():
'''
Find each symbol that can be reached from the start symbol.
Print a warning for any nonterminals that can't be reached.
(Unused terminals have already had their warning.)
'''
Reachable = { }
for s in Terminals.keys() + Nonterminals.keys():
Reachable[s] = 0
mark_reachable_from( Productions[0].prod[0], Reachable )
for s in Nonterminals.keys():
if not Reachable[s]:
sys.stderr.write("yacc: Symbol '%s' is unreachable.\n" % s)
def mark_reachable_from(s, Reachable):
'''
Mark all symbols that are reachable from symbol s.
'''
if Reachable[s]:
# We've already reached symbol s.
return
Reachable[s] = 1
for p in Prodnames.get(s,[]):
for r in p.prod:
mark_reachable_from(r, Reachable)
# -----------------------------------------------------------------------------
# compute_terminates()
#
# This function looks at the various parsing rules and tries to detect
# infinite recursion cycles (grammar rules where there is no possible way
# to derive a string of only terminals).
# -----------------------------------------------------------------------------
def compute_terminates():
'''
Raise an error for any symbols that don't terminate.
'''
Terminates = {}
# Terminals:
for t in Terminals.keys():
Terminates[t] = 1
Terminates['$end'] = 1
# Nonterminals:
# Initialize to false:
for n in Nonterminals.keys():
Terminates[n] = 0
# Then propagate termination until no change:
while 1:
some_change = 0
for (n,pl) in Prodnames.items():
# Nonterminal n terminates iff any of its productions terminates.
for p in pl:
# Production p terminates iff all of its rhs symbols terminate.
for s in p.prod:
if not Terminates[s]:
# The symbol s does not terminate,
# so production p does not terminate.
p_terminates = 0
break
else:
# didn't break from the loop,
# so every symbol s terminates
# so production p terminates.
p_terminates = 1
if p_terminates:
# symbol n terminates!
if not Terminates[n]:
Terminates[n] = 1
some_change = 1
# Don't need to consider any more productions for this n.
break
if not some_change:
break
some_error = 0
for (s,terminates) in Terminates.items():
if not terminates:
if not Prodnames.has_key(s) and not Terminals.has_key(s) and s != 'error':
# s is used-but-not-defined, and we've already warned of that,
# so it would be overkill to say that it's also non-terminating.
pass
else:
sys.stderr.write("yacc: Infinite recursion detected for symbol '%s'.\n" % s)
some_error = 1
return some_error
# -----------------------------------------------------------------------------
# verify_productions()
#
# This function examines all of the supplied rules to see if they seem valid.
# -----------------------------------------------------------------------------
def verify_productions(cycle_check=1):
error = 0
for p in Productions:
if not p: continue
for s in p.prod:
if not Prodnames.has_key(s) and not Terminals.has_key(s) and s != 'error':
sys.stderr.write("%s:%d: Symbol '%s' used, but not defined as a token or a rule.\n" % (p.file,p.line,s))
error = 1
continue
unused_tok = 0
# Now verify all of the tokens
if yaccdebug:
_vf.write("Unused terminals:\n\n")
for s,v in Terminals.items():
if s != 'error' and not v:
sys.stderr.write("yacc: Warning. Token '%s' defined, but not used.\n" % s)
if yaccdebug: _vf.write(" %s\n"% s)
unused_tok += 1
# Print out all of the productions
if yaccdebug:
_vf.write("\nGrammar\n\n")
for i in range(1,len(Productions)):
_vf.write("Rule %-5d %s\n" % (i, Productions[i]))
unused_prod = 0
# Verify the use of all productions
for s,v in Nonterminals.items():
if not v:
p = Prodnames[s][0]
sys.stderr.write("%s:%d: Warning. Rule '%s' defined, but not used.\n" % (p.file,p.line, s))
unused_prod += 1
if unused_tok == 1:
sys.stderr.write("yacc: Warning. There is 1 unused token.\n")
if unused_tok > 1:
sys.stderr.write("yacc: Warning. There are %d unused tokens.\n" % unused_tok)
if unused_prod == 1:
sys.stderr.write("yacc: Warning. There is 1 unused rule.\n")
if unused_prod > 1:
sys.stderr.write("yacc: Warning. There are %d unused rules.\n" % unused_prod)
if yaccdebug:
_vf.write("\nTerminals, with rules where they appear\n\n")
ks = Terminals.keys()
ks.sort()
for k in ks:
_vf.write("%-20s : %s\n" % (k, " ".join([str(s) for s in Terminals[k]])))
_vf.write("\nNonterminals, with rules where they appear\n\n")
ks = Nonterminals.keys()
ks.sort()
for k in ks:
_vf.write("%-20s : %s\n" % (k, " ".join([str(s) for s in Nonterminals[k]])))
if (cycle_check):
compute_reachable()
error += compute_terminates()
# error += check_cycles()
return error
# -----------------------------------------------------------------------------
# build_lritems()
#
# This function walks the list of productions and builds a complete set of the
# LR items. The LR items are stored in two ways: First, they are uniquely
# numbered and placed in the list _lritems. Second, a linked list of LR items
# is built for each production. For example:
#
# E -> E PLUS E
#
# Creates the list
#
# [E -> . E PLUS E, E -> E . PLUS E, E -> E PLUS . E, E -> E PLUS E . ]
# -----------------------------------------------------------------------------
def build_lritems():
for p in Productions:
lastlri = p
lri = p.lr_item(0)
i = 0
while 1:
lri = p.lr_item(i)
lastlri.lr_next = lri
if not lri: break
lri.lr_num = len(LRitems)
LRitems.append(lri)
lastlri = lri
i += 1
# In order for the rest of the parser generator to work, we need to
# guarantee that no more lritems are generated. Therefore, we nuke
# the p.lr_item method. (Only used in debugging)
# Production.lr_item = None
# -----------------------------------------------------------------------------
# add_precedence()
#
# Given a list of precedence rules, add to the precedence table.
# -----------------------------------------------------------------------------
def add_precedence(plist):
plevel = 0
error = 0
for p in plist:
plevel += 1
try:
prec = p[0]
terms = p[1:]
if prec != 'left' and prec != 'right' and prec != 'nonassoc':
sys.stderr.write("yacc: Invalid precedence '%s'\n" % prec)
return -1
for t in terms:
if Precedence.has_key(t):
sys.stderr.write("yacc: Precedence already specified for terminal '%s'\n" % t)
error += 1
continue
Precedence[t] = (prec,plevel)
except:
sys.stderr.write("yacc: Invalid precedence table.\n")
error += 1
return error
# -----------------------------------------------------------------------------
# augment_grammar()
#
# Compute the augmented grammar. This is just a rule S' -> start where start
# is the starting symbol.
# -----------------------------------------------------------------------------
def augment_grammar(start=None):
if not start:
start = Productions[1].name
Productions[0] = Production(name="S'",prod=[start],number=0,len=1,prec=('right',0),func=None)
Productions[0].usyms = [ start ]
Nonterminals[start].append(0)
# -------------------------------------------------------------------------
# first()
#
# Compute the value of FIRST1(beta) where beta is a tuple of symbols.
#
# During execution of compute_first1, the result may be incomplete.
# Afterward (e.g., when called from compute_follow()), it will be complete.
# -------------------------------------------------------------------------
def first(beta):
# We are computing First(x1,x2,x3,...,xn)
result = [ ]
for x in beta:
x_produces_empty = 0
# Add all the non-<empty> symbols of First[x] to the result.
for f in First[x]:
if f == '<empty>':
x_produces_empty = 1
else:
if f not in result: result.append(f)
if x_produces_empty:
# We have to consider the next x in beta,
# i.e. stay in the loop.
pass
else:
# We don't have to consider any further symbols in beta.
break
else:
# There was no 'break' from the loop,
# so x_produces_empty was true for all x in beta,
# so beta produces empty as well.
result.append('<empty>')
return result
# FOLLOW(x)
# Given a non-terminal. This function computes the set of all symbols
# that might follow it. Dragon book, p. 189.
def compute_follow(start=None):
# Add '$end' to the follow list of the start symbol
for k in Nonterminals.keys():
Follow[k] = [ ]
if not start:
start = Productions[1].name
Follow[start] = [ '$end' ]
while 1:
didadd = 0
for p in Productions[1:]:
# Here is the production set
for i in range(len(p.prod)):
B = p.prod[i]
if Nonterminals.has_key(B):
# Okay. We got a non-terminal in a production
fst = first(p.prod[i+1:])
hasempty = 0
for f in fst:
if f != '<empty>' and f not in Follow[B]:
Follow[B].append(f)
didadd = 1
if f == '<empty>':
hasempty = 1
if hasempty or i == (len(p.prod)-1):
# Add elements of follow(a) to follow(b)
for f in Follow[p.name]:
if f not in Follow[B]:
Follow[B].append(f)
didadd = 1
if not didadd: break
if 0 and yaccdebug:
_vf.write('\nFollow:\n')
for k in Nonterminals.keys():
_vf.write("%-20s : %s\n" % (k, " ".join([str(s) for s in Follow[k]])))
# -------------------------------------------------------------------------
# compute_first1()
#
# Compute the value of FIRST1(X) for all symbols
# -------------------------------------------------------------------------
def compute_first1():
# Terminals:
for t in Terminals.keys():
First[t] = [t]
First['$end'] = ['$end']
First['#'] = ['#'] # what's this for?
# Nonterminals:
# Initialize to the empty set:
for n in Nonterminals.keys():
First[n] = []
# Then propagate symbols until no change:
while 1:
some_change = 0
for n in Nonterminals.keys():
for p in Prodnames[n]:
for f in first(p.prod):
if f not in First[n]:
First[n].append( f )
some_change = 1
if not some_change:
break
if 0 and yaccdebug:
_vf.write('\nFirst:\n')
for k in Nonterminals.keys():
_vf.write("%-20s : %s\n" %
(k, " ".join([str(s) for s in First[k]])))
# -----------------------------------------------------------------------------
# === SLR Generation ===
#
# The following functions are used to construct SLR (Simple LR) parsing tables
# as described on p.221-229 of the dragon book.
# -----------------------------------------------------------------------------
# Global variables for the LR parsing engine
def lr_init_vars():
global _lr_action, _lr_goto, _lr_method
global _lr_goto_cache, _lr0_cidhash
_lr_action = { } # Action table
_lr_goto = { } # Goto table
_lr_method = "Unknown" # LR method used
_lr_goto_cache = { }
_lr0_cidhash = { }
# Compute the LR(0) closure operation on I, where I is a set of LR(0) items.
# prodlist is a list of productions.
_add_count = 0 # Counter used to detect cycles
def lr0_closure(I):
global _add_count
_add_count += 1
prodlist = Productions
# Add everything in I to J
J = I[:]
didadd = 1
while didadd:
didadd = 0
for j in J:
for x in j.lrafter:
if x.lr0_added == _add_count: continue
# Add B --> .G to J
J.append(x.lr_next)
x.lr0_added = _add_count
didadd = 1
return J
# Compute the LR(0) goto function goto(I,X) where I is a set
# of LR(0) items and X is a grammar symbol. This function is written
# in a way that guarantees uniqueness of the generated goto sets
# (i.e. the same goto set will never be returned as two different Python
# objects). With uniqueness, we can later do fast set comparisons using
# id(obj) instead of element-wise comparison.
def lr0_goto(I,x):
# First we look for a previously cached entry
g = _lr_goto_cache.get((id(I),x),None)
if g: return g
# Now we generate the goto set in a way that guarantees uniqueness
# of the result
s = _lr_goto_cache.get(x,None)
if not s:
s = { }
_lr_goto_cache[x] = s
gs = [ ]
for p in I:
n = p.lr_next
if n and n.lrbefore == x:
s1 = s.get(id(n),None)
if not s1:
s1 = { }
s[id(n)] = s1
gs.append(n)
s = s1
g = s.get('$end',None)
if not g:
if gs:
g = lr0_closure(gs)
s['$end'] = g
else:
s['$end'] = gs
_lr_goto_cache[(id(I),x)] = g
return g
_lr0_cidhash = { }
# Compute the LR(0) sets of item function
def lr0_items():
C = [ lr0_closure([Productions[0].lr_next]) ]
i = 0
for I in C:
_lr0_cidhash[id(I)] = i
i += 1
# Loop over the items in C and each grammar symbols
i = 0
while i < len(C):
I = C[i]
i += 1
# Collect all of the symbols that could possibly be in the goto(I,X) sets
asyms = { }
for ii in I:
for s in ii.usyms:
asyms[s] = None
for x in asyms.keys():
g = lr0_goto(I,x)
if not g: continue
if _lr0_cidhash.has_key(id(g)): continue
_lr0_cidhash[id(g)] = len(C)
C.append(g)
return C
# -----------------------------------------------------------------------------
# ==== LALR(1) Parsing ====
#
# LALR(1) parsing is almost exactly the same as SLR except that instead of
# relying upon Follow() sets when performing reductions, a more selective
# lookahead set that incorporates the state of the LR(0) machine is utilized.
# Thus, we mainly just have to focus on calculating the lookahead sets.
#
# The method used here is due to DeRemer and Pennelo (1982).
#
# DeRemer, F. L., and T. J. Pennelo: "Efficient Computation of LALR(1)
# Lookahead Sets", ACM Transactions on Programming Languages and Systems,
# Vol. 4, No. 4, Oct. 1982, pp. 615-649
#
# Further details can also be found in:
#
# J. Tremblay and P. Sorenson, "The Theory and Practice of Compiler Writing",
# McGraw-Hill Book Company, (1985).
#
# Note: This implementation is a complete replacement of the LALR(1)
# implementation in PLY-1.x releases. That version was based on
# a less efficient algorithm and it had bugs in its implementation.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# compute_nullable_nonterminals()
#
# Creates a dictionary containing all of the non-terminals that might produce
# an empty production.
# -----------------------------------------------------------------------------
def compute_nullable_nonterminals():
nullable = {}
num_nullable = 0
while 1:
for p in Productions[1:]:
if p.len == 0:
nullable[p.name] = 1
continue
for t in p.prod:
if not nullable.has_key(t): break
else:
nullable[p.name] = 1
if len(nullable) == num_nullable: break
num_nullable = len(nullable)
return nullable
# -----------------------------------------------------------------------------
# find_nonterminal_trans(C)
#
# Given a set of LR(0) items, this functions finds all of the non-terminal
# transitions. These are transitions in which a dot appears immediately before
# a non-terminal. Returns a list of tuples of the form (state,N) where state
# is the state number and N is the nonterminal symbol.
#
# The input C is the set of LR(0) items.
# -----------------------------------------------------------------------------
def find_nonterminal_transitions(C):
trans = []
for state in range(len(C)):
for p in C[state]:
if p.lr_index < p.len - 1:
t = (state,p.prod[p.lr_index+1])
if Nonterminals.has_key(t[1]):
if t not in trans: trans.append(t)
state = state + 1
return trans
# -----------------------------------------------------------------------------
# dr_relation()
#
# Computes the DR(p,A) relationships for non-terminal transitions. The input
# is a tuple (state,N) where state is a number and N is a nonterminal symbol.
#
# Returns a list of terminals.
# -----------------------------------------------------------------------------
def dr_relation(C,trans,nullable):
dr_set = { }
state,N = trans
terms = []
g = lr0_goto(C[state],N)
for p in g:
if p.lr_index < p.len - 1:
a = p.prod[p.lr_index+1]
if Terminals.has_key(a):
if a not in terms: terms.append(a)
# This extra bit is to handle the start state
if state == 0 and N == Productions[0].prod[0]:
terms.append('$end')
return terms
# -----------------------------------------------------------------------------
# reads_relation()
#
# Computes the READS() relation (p,A) READS (t,C).
# -----------------------------------------------------------------------------
def reads_relation(C, trans, empty):
# Look for empty transitions
rel = []
state, N = trans
g = lr0_goto(C[state],N)
j = _lr0_cidhash.get(id(g),-1)
for p in g:
if p.lr_index < p.len - 1:
a = p.prod[p.lr_index + 1]
if empty.has_key(a):
rel.append((j,a))
return rel
# -----------------------------------------------------------------------------
# compute_lookback_includes()
#
# Determines the lookback and includes relations
#
# LOOKBACK:
#
# This relation is determined by running the LR(0) state machine forward.
# For example, starting with a production "N : . A B C", we run it forward
# to obtain "N : A B C ." We then build a relationship between this final
# state and the starting state. These relationships are stored in a dictionary
# lookdict.
#
# INCLUDES:
#
# Computes the INCLUDE() relation (p,A) INCLUDES (p',B).
#
# This relation is used to determine non-terminal transitions that occur
# inside of other non-terminal transition states. (p,A) INCLUDES (p', B)
# if the following holds:
#
# B -> LAT, where T -> epsilon and p' -L-> p
#
# L is essentially a prefix (which may be empty), T is a suffix that must be
# able to derive an empty string. State p' must lead to state p with the string L.
#
# -----------------------------------------------------------------------------
def compute_lookback_includes(C,trans,nullable):
lookdict = {} # Dictionary of lookback relations
includedict = {} # Dictionary of include relations
# Make a dictionary of non-terminal transitions
dtrans = {}
for t in trans:
dtrans[t] = 1
# Loop over all transitions and compute lookbacks and includes
for state,N in trans:
lookb = []
includes = []
for p in C[state]:
if p.name != N: continue
# Okay, we have a name match. We now follow the production all the way
# through the state machine until we get the . on the right hand side
lr_index = p.lr_index
j = state
while lr_index < p.len - 1:
lr_index = lr_index + 1
t = p.prod[lr_index]
# Check to see if this symbol and state are a non-terminal transition
if dtrans.has_key((j,t)):
# Yes. Okay, there is some chance that this is an includes relation
# the only way to know for certain is whether the rest of the
# production derives empty
li = lr_index + 1
while li < p.len:
if Terminals.has_key(p.prod[li]): break # No forget it
if not nullable.has_key(p.prod[li]): break
li = li + 1
else:
# Appears to be a relation between (j,t) and (state,N)
includes.append((j,t))
g = lr0_goto(C[j],t) # Go to next set
j = _lr0_cidhash.get(id(g),-1) # Go to next state
# When we get here, j is the final state, now we have to locate the production
for r in C[j]:
if r.name != p.name: continue
if r.len != p.len: continue
i = 0
# This look is comparing a production ". A B C" with "A B C ."
while i < r.lr_index:
if r.prod[i] != p.prod[i+1]: break
i = i + 1
else:
lookb.append((j,r))
for i in includes:
if not includedict.has_key(i): includedict[i] = []
includedict[i].append((state,N))
lookdict[(state,N)] = lookb
return lookdict,includedict
# -----------------------------------------------------------------------------
# digraph()
# traverse()
#
# The following two functions are used to compute set valued functions
# of the form:
#
# F(x) = F'(x) U U{F(y) | x R y}
#
# This is used to compute the values of Read() sets as well as FOLLOW sets
# in LALR(1) generation.
#
# Inputs: X - An input set
# R - A relation
# FP - Set-valued function
# ------------------------------------------------------------------------------
def digraph(X,R,FP):
N = { }
for x in X:
N[x] = 0
stack = []
F = { }
for x in X:
if N[x] == 0: traverse(x,N,stack,F,X,R,FP)
return F
def traverse(x,N,stack,F,X,R,FP):
stack.append(x)
d = len(stack)
N[x] = d
F[x] = FP(x) # F(X) <- F'(x)
rel = R(x) # Get y's related to x
for y in rel:
if N[y] == 0:
traverse(y,N,stack,F,X,R,FP)
N[x] = min(N[x],N[y])
for a in F.get(y,[]):
if a not in F[x]: F[x].append(a)
if N[x] == d:
N[stack[-1]] = sys.maxint
F[stack[-1]] = F[x]
element = stack.pop()
while element != x:
N[stack[-1]] = sys.maxint
F[stack[-1]] = F[x]
element = stack.pop()
# -----------------------------------------------------------------------------
# compute_read_sets()
#
# Given a set of LR(0) items, this function computes the read sets.
#
# Inputs: C = Set of LR(0) items
# ntrans = Set of nonterminal transitions
# nullable = Set of empty transitions
#
# Returns a set containing the read sets
# -----------------------------------------------------------------------------
def compute_read_sets(C, ntrans, nullable):
FP = lambda x: dr_relation(C,x,nullable)
R = lambda x: reads_relation(C,x,nullable)
F = digraph(ntrans,R,FP)
return F
# -----------------------------------------------------------------------------
# compute_follow_sets()
#
# Given a set of LR(0) items, a set of non-terminal transitions, a readset,
# and an include set, this function computes the follow sets
#
# Follow(p,A) = Read(p,A) U U {Follow(p',B) | (p,A) INCLUDES (p',B)}
#
# Inputs:
# ntrans = Set of nonterminal transitions
# readsets = Readset (previously computed)
# inclsets = Include sets (previously computed)
#
# Returns a set containing the follow sets
# -----------------------------------------------------------------------------
def compute_follow_sets(ntrans,readsets,inclsets):
FP = lambda x: readsets[x]
R = lambda x: inclsets.get(x,[])
F = digraph(ntrans,R,FP)
return F
# -----------------------------------------------------------------------------
# add_lookaheads()
#
# Attaches the lookahead symbols to grammar rules.
#
# Inputs: lookbacks - Set of lookback relations
# followset - Computed follow set
#
# This function directly attaches the lookaheads to productions contained
# in the lookbacks set
# -----------------------------------------------------------------------------
def add_lookaheads(lookbacks,followset):
for trans,lb in lookbacks.items():
# Loop over productions in lookback
for state,p in lb:
if not p.lookaheads.has_key(state):
p.lookaheads[state] = []
f = followset.get(trans,[])
for a in f:
if a not in p.lookaheads[state]: p.lookaheads[state].append(a)
# -----------------------------------------------------------------------------
# add_lalr_lookaheads()
#
# This function does all of the work of adding lookahead information for use
# with LALR parsing
# -----------------------------------------------------------------------------
def add_lalr_lookaheads(C):
# Determine all of the nullable nonterminals
nullable = compute_nullable_nonterminals()
# Find all non-terminal transitions
trans = find_nonterminal_transitions(C)
# Compute read sets
readsets = compute_read_sets(C,trans,nullable)
# Compute lookback/includes relations
lookd, included = compute_lookback_includes(C,trans,nullable)
# Compute LALR FOLLOW sets
followsets = compute_follow_sets(trans,readsets,included)
# Add all of the lookaheads
add_lookaheads(lookd,followsets)
# -----------------------------------------------------------------------------
# lr_parse_table()
#
# This function constructs the parse tables for SLR or LALR
# -----------------------------------------------------------------------------
def lr_parse_table(method):
global _lr_method
goto = _lr_goto # Goto array
action = _lr_action # Action array
actionp = { } # Action production array (temporary)
_lr_method = method
n_srconflict = 0
n_rrconflict = 0
if yaccdebug:
sys.stderr.write("yacc: Generating %s parsing table...\n" % method)
_vf.write("\n\nParsing method: %s\n\n" % method)
# Step 1: Construct C = { I0, I1, ... IN}, collection of LR(0) items
# This determines the number of states
C = lr0_items()
if method == 'LALR':
add_lalr_lookaheads(C)
# Build the parser table, state by state
st = 0
for I in C:
# Loop over each production in I
actlist = [ ] # List of actions
if yaccdebug:
_vf.write("\nstate %d\n\n" % st)
for p in I:
_vf.write(" (%d) %s\n" % (p.number, str(p)))
_vf.write("\n")
for p in I:
try:
if p.prod[-1] == ".":
if p.name == "S'":
# Start symbol. Accept!
action[st,"$end"] = 0
actionp[st,"$end"] = p
else:
# We are at the end of a production. Reduce!
if method == 'LALR':
laheads = p.lookaheads[st]
else:
laheads = Follow[p.name]
for a in laheads:
actlist.append((a,p,"reduce using rule %d (%s)" % (p.number,p)))
r = action.get((st,a),None)
if r is not None:
# Whoa. Have a shift/reduce or reduce/reduce conflict
if r > 0:
# Need to decide on shift or reduce here
# By default we favor shifting. Need to add
# some precedence rules here.
sprec,slevel = Productions[actionp[st,a].number].prec
rprec,rlevel = Precedence.get(a,('right',0))
if (slevel < rlevel) or ((slevel == rlevel) and (rprec == 'left')):
# We really need to reduce here.
action[st,a] = -p.number
actionp[st,a] = p
if not slevel and not rlevel:
_vfc.write("shift/reduce conflict in state %d resolved as reduce.\n" % st)
_vf.write(" ! shift/reduce conflict for %s resolved as reduce.\n" % a)
n_srconflict += 1
elif (slevel == rlevel) and (rprec == 'nonassoc'):
action[st,a] = None
else:
# Hmmm. Guess we'll keep the shift
if not rlevel:
_vfc.write("shift/reduce conflict in state %d resolved as shift.\n" % st)
_vf.write(" ! shift/reduce conflict for %s resolved as shift.\n" % a)
n_srconflict +=1
elif r < 0:
# Reduce/reduce conflict. In this case, we favor the rule
# that was defined first in the grammar file
oldp = Productions[-r]
pp = Productions[p.number]
if oldp.line > pp.line:
action[st,a] = -p.number
actionp[st,a] = p
# sys.stderr.write("Reduce/reduce conflict in state %d\n" % st)
n_rrconflict += 1
_vfc.write("reduce/reduce conflict in state %d resolved using rule %d (%s).\n" % (st, actionp[st,a].number, actionp[st,a]))
_vf.write(" ! reduce/reduce conflict for %s resolved using rule %d (%s).\n" % (a,actionp[st,a].number, actionp[st,a]))
else:
sys.stderr.write("Unknown conflict in state %d\n" % st)
else:
action[st,a] = -p.number
actionp[st,a] = p
else:
i = p.lr_index
a = p.prod[i+1] # Get symbol right after the "."
if Terminals.has_key(a):
g = lr0_goto(I,a)
j = _lr0_cidhash.get(id(g),-1)
if j >= 0:
# We are in a shift state
actlist.append((a,p,"shift and go to state %d" % j))
r = action.get((st,a),None)
if r is not None:
# Whoa have a shift/reduce or shift/shift conflict
if r > 0:
if r != j:
sys.stderr.write("Shift/shift conflict in state %d\n" % st)
elif r < 0:
# Do a precedence check.
# - if precedence of reduce rule is higher, we reduce.
# - if precedence of reduce is same and left assoc, we reduce.
# - otherwise we shift
rprec,rlevel = Productions[actionp[st,a].number].prec
sprec,slevel = Precedence.get(a,('right',0))
if (slevel > rlevel) or ((slevel == rlevel) and (rprec != 'left')):
# We decide to shift here... highest precedence to shift
action[st,a] = j
actionp[st,a] = p
if not rlevel:
n_srconflict += 1
_vfc.write("shift/reduce conflict in state %d resolved as shift.\n" % st)
_vf.write(" ! shift/reduce conflict for %s resolved as shift.\n" % a)
elif (slevel == rlevel) and (rprec == 'nonassoc'):
action[st,a] = None
else:
# Hmmm. Guess we'll keep the reduce
if not slevel and not rlevel:
n_srconflict +=1
_vfc.write("shift/reduce conflict in state %d resolved as reduce.\n" % st)
_vf.write(" ! shift/reduce conflict for %s resolved as reduce.\n" % a)
else:
sys.stderr.write("Unknown conflict in state %d\n" % st)
else:
action[st,a] = j
actionp[st,a] = p
except Exception as e:
raise YaccError("Hosed in lr_parse_table", e)
# Print the actions associated with each terminal
if yaccdebug:
_actprint = { }
for a,p,m in actlist:
if action.has_key((st,a)):
if p is actionp[st,a]:
_vf.write(" %-15s %s\n" % (a,m))
_actprint[(a,m)] = 1
_vf.write("\n")
for a,p,m in actlist:
if action.has_key((st,a)):
if p is not actionp[st,a]:
if not _actprint.has_key((a,m)):
_vf.write(" ! %-15s [ %s ]\n" % (a,m))
_actprint[(a,m)] = 1
# Construct the goto table for this state
if yaccdebug:
_vf.write("\n")
nkeys = { }
for ii in I:
for s in ii.usyms:
if Nonterminals.has_key(s):
nkeys[s] = None
for n in nkeys.keys():
g = lr0_goto(I,n)
j = _lr0_cidhash.get(id(g),-1)
if j >= 0:
goto[st,n] = j
if yaccdebug:
_vf.write(" %-30s shift and go to state %d\n" % (n,j))
st += 1
if yaccdebug:
if n_srconflict == 1:
sys.stderr.write("yacc: %d shift/reduce conflict\n" % n_srconflict)
if n_srconflict > 1:
sys.stderr.write("yacc: %d shift/reduce conflicts\n" % n_srconflict)
if n_rrconflict == 1:
sys.stderr.write("yacc: %d reduce/reduce conflict\n" % n_rrconflict)
if n_rrconflict > 1:
sys.stderr.write("yacc: %d reduce/reduce conflicts\n" % n_rrconflict)
# -----------------------------------------------------------------------------
# ==== LR Utility functions ====
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# _lr_write_tables()
#
# This function writes the LR parsing tables to a file
# -----------------------------------------------------------------------------
def lr_write_tables(modulename=tab_module,outputdir=''):
filename = os.path.join(outputdir,modulename) + ".py"
try:
f = open(filename,"w")
f.write("""
# %s
# This file is automatically generated. Do not edit.
_lr_method = %s
_lr_signature = %s
""" % (filename, repr(_lr_method), repr(Signature.digest())))
# Change smaller to 0 to go back to original tables
smaller = 1
# Factor out names to try and make smaller
if smaller:
items = { }
for k,v in _lr_action.items():
i = items.get(k[1])
if not i:
i = ([],[])
items[k[1]] = i
i[0].append(k[0])
i[1].append(v)
f.write("\n_lr_action_items = {")
for k,v in items.items():
f.write("%r:([" % k)
for i in v[0]:
f.write("%r," % i)
f.write("],[")
for i in v[1]:
f.write("%r," % i)
f.write("]),")
f.write("}\n")
f.write("""
_lr_action = { }
for _k, _v in _lr_action_items.items():
for _x,_y in zip(_v[0],_v[1]):
_lr_action[(_x,_k)] = _y
del _lr_action_items
""")
else:
f.write("\n_lr_action = { ");
for k,v in _lr_action.items():
f.write("(%r,%r):%r," % (k[0],k[1],v))
f.write("}\n");
if smaller:
# Factor out names to try and make smaller
items = { }
for k,v in _lr_goto.items():
i = items.get(k[1])
if not i:
i = ([],[])
items[k[1]] = i
i[0].append(k[0])
i[1].append(v)
f.write("\n_lr_goto_items = {")
for k,v in items.items():
f.write("%r:([" % k)
for i in v[0]:
f.write("%r," % i)
f.write("],[")
for i in v[1]:
f.write("%r," % i)
f.write("]),")
f.write("}\n")
f.write("""
_lr_goto = { }
for _k, _v in _lr_goto_items.items():
for _x,_y in zip(_v[0],_v[1]):
_lr_goto[(_x,_k)] = _y
del _lr_goto_items
""")
else:
f.write("\n_lr_goto = { ");
for k,v in _lr_goto.items():
f.write("(%r,%r):%r," % (k[0],k[1],v))
f.write("}\n");
# Write production table
f.write("_lr_productions = [\n")
for p in Productions:
if p:
if (p.func):
f.write(" (%r,%d,%r,%r,%d),\n" % (p.name, p.len, p.func.__name__,p.file,p.line))
else:
f.write(" (%r,%d,None,None,None),\n" % (p.name, p.len))
else:
f.write(" None,\n")
f.write("]\n")
f.close()
except IOError as e:
print("Unable to create '%s'" % filename)
print(e)
return
def lr_read_tables(module=tab_module,optimize=0):
global _lr_action, _lr_goto, _lr_productions, _lr_method
try:
exec("import %s as parsetab" % module)
global parsetab # declare the name of the imported module
if (optimize) or (Signature.digest() == parsetab._lr_signature):
_lr_action = parsetab._lr_action
_lr_goto = parsetab._lr_goto
_lr_productions = parsetab._lr_productions
_lr_method = parsetab._lr_method
return 1
else:
return 0
except (ImportError,AttributeError):
return 0
# Available instance types. This is used when parsers are defined by a class.
# it's a little funky because I want to preserve backwards compatibility
# with Python 2.0 where types.ObjectType is undefined.
try:
_INSTANCETYPE = (types.InstanceType, types.ObjectType)
except AttributeError:
_INSTANCETYPE = types.InstanceType
# -----------------------------------------------------------------------------
# yacc(module)
#
# Build the parser module
# -----------------------------------------------------------------------------
# <ah> Add parserclass parameter.
def yacc(method=default_lr, debug=yaccdebug, module=None, tabmodule=tab_module, start=None, check_recursion=1, optimize=0,write_tables=1,debugfile=debug_file,outputdir='', parserclass=Parser):
global yaccdebug
yaccdebug = debug
initialize_vars()
files = { }
error = 0
# Add parsing method to signature
Signature.update(method)
# If a "module" parameter was supplied, extract its dictionary.
# Note: a module may in fact be an instance as well.
if module:
# User supplied a module object.
if isinstance(module, types.ModuleType):
ldict = module.__dict__
elif isinstance(module, _INSTANCETYPE):
_items = [(k,getattr(module,k)) for k in dir(module)]
ldict = { }
for i in _items:
ldict[i[0]] = i[1]
else:
raise ValueError("Expected a module")
else:
# No module given. We might be able to get information from the caller.
# Throw an exception and unwind the traceback to get the globals
try:
raise RuntimeError
except RuntimeError:
e,b,t = sys.exc_info()
f = t.tb_frame
f = f.f_back # Walk out to our calling function
ldict = f.f_globals # Grab its globals dictionary
# Add starting symbol to signature
if not start:
start = ldict.get("start",None)
if start:
Signature.update(start)
# If running in optimized mode. We're going to
if (optimize and lr_read_tables(tabmodule,1)):
# Read parse table
del Productions[:]
for p in _lr_productions:
if not p:
Productions.append(None)
else:
m = MiniProduction()
m.name = p[0]
m.len = p[1]
m.file = p[3]
m.line = p[4]
if p[2]:
m.func = ldict[p[2]]
Productions.append(m)
else:
# Get the tokens map
if (module and isinstance(module,_INSTANCETYPE)):
tokens = getattr(module,"tokens",None)
else:
tokens = ldict.get("tokens",None)
if not tokens:
raise YaccError("module does not define a list 'tokens'")
if not (isinstance(tokens,types.ListType) or isinstance(tokens,types.TupleType)):
raise YaccError("tokens must be a list or tuple.")
# Check to see if a requires dictionary is defined.
requires = ldict.get("require",None)
if requires:
if not (isinstance(requires,types.DictType)):
raise YaccError("require must be a dictionary.")
for r,v in requires.items():
try:
if not (isinstance(v,types.ListType)):
raise TypeError
v1 = [x.split(".") for x in v]
Requires[r] = v1
except Exception:
print("Invalid specification for rule '%s' in require. Expected a list of strings" % r)
# Build the dictionary of terminals. We a record a 0 in the
# dictionary to track whether or not a terminal is actually
# used in the grammar
if 'error' in tokens:
print("yacc: Illegal token 'error'. Is a reserved word.")
raise YaccError("Illegal token name")
for n in tokens:
if Terminals.has_key(n):
print("yacc: Warning. Token '%s' multiply defined." % n)
Terminals[n] = [ ]
Terminals['error'] = [ ]
# Get the precedence map (if any)
prec = ldict.get("precedence",None)
if prec:
if not (isinstance(prec,types.ListType) or isinstance(prec,types.TupleType)):
raise YaccError("precedence must be a list or tuple.")
add_precedence(prec)
Signature.update(repr(prec))
for n in tokens:
if not Precedence.has_key(n):
Precedence[n] = ('right',0) # Default, right associative, 0 precedence
# Look for error handler
ef = ldict.get('p_error',None)
if ef:
if isinstance(ef,types.FunctionType):
ismethod = 0
elif isinstance(ef, types.MethodType):
ismethod = 1
else:
raise YaccError("'p_error' defined, but is not a function or method.")
eline = ef.func_code.co_firstlineno
efile = ef.func_code.co_filename
files[efile] = None
if (ef.func_code.co_argcount != 1+ismethod):
raise YaccError("%s:%d: p_error() requires 1 argument." % (efile,eline))
global Errorfunc
Errorfunc = ef
else:
print("yacc: Warning. no p_error() function is defined.")
# Get the list of built-in functions with p_ prefix
symbols = [ldict[f] for f in ldict.keys()
if (type(ldict[f]) in (types.FunctionType, types.MethodType) and ldict[f].__name__[:2] == 'p_'
and ldict[f].__name__ != 'p_error')]
# Check for non-empty symbols
if len(symbols) == 0:
raise YaccError("no rules of the form p_rulename are defined.")
# Sort the symbols by line number
symbols.sort(key=lambda func: func.__code__.co_firstlineno)
# Add all of the symbols to the grammar
for f in symbols:
if (add_function(f)) < 0:
error += 1
else:
files[f.func_code.co_filename] = None
# Make a signature of the docstrings
for f in symbols:
if f.__doc__:
Signature.update(f.__doc__)
lr_init_vars()
if error:
raise YaccError("Unable to construct parser.")
if not lr_read_tables(tabmodule):
# Validate files
for filename in files.keys():
if not validate_file(filename):
error = 1
# Validate dictionary
validate_dict(ldict)
if start and not Prodnames.has_key(start):
raise YaccError("Bad starting symbol '%s'" % start)
augment_grammar(start)
error = verify_productions(cycle_check=check_recursion)
otherfunc = [ldict[f] for f in ldict.keys()
if (type(f) in (types.FunctionType,types.MethodType) and ldict[f].__name__[:2] != 'p_')]
if error:
raise YaccError("Unable to construct parser.")
build_lritems()
compute_first1()
compute_follow(start)
if method in ['SLR','LALR']:
lr_parse_table(method)
else:
raise YaccError("Unknown parsing method '%s'" % method)
if write_tables:
lr_write_tables(tabmodule,outputdir)
if yaccdebug:
try:
f = open(os.path.join(outputdir,debugfile),"w")
f.write(_vfc.getvalue())
f.write("\n\n")
f.write(_vf.getvalue())
f.close()
except IOError as e:
print("yacc: can't create '%s'" % debugfile,e)
# Made it here. Create a parser object and set up its internal state.
# Set global parse() method to bound method of parser object.
g = ParserPrototype("xyzzy")
g.productions = Productions
g.errorfunc = Errorfunc
g.action = _lr_action
g.goto = _lr_goto
g.method = _lr_method
g.require = Requires
global parser
parser = g.init_parser()
global parse
parse = parser.parse
# Clean up all of the globals we created
if (not optimize):
yacc_cleanup()
return g
# <ah> Allow multiple instances of parser
class ParserPrototype(object):
def __init__(self, magic=None):
if magic != "xyzzy":
raise YaccError('Use yacc()')
def init_parser(self, parser=None):
if not parser:
parser = Parser()
parser.productions = self.productions
parser.errorfunc = self.errorfunc
parser.action = self.action
parser.goto = self.goto
parser.method = self.method
parser.require = self.require
return parser
# yacc_cleanup function. Delete all of the global variables
# used during table construction
def yacc_cleanup():
global _lr_action, _lr_goto, _lr_method, _lr_goto_cache
del _lr_action, _lr_goto, _lr_method, _lr_goto_cache
global Productions, Prodnames, Prodmap, Terminals
global Nonterminals, First, Follow, Precedence, LRitems
global Errorfunc, Signature, Requires
del Productions, Prodnames, Prodmap, Terminals
del Nonterminals, First, Follow, Precedence, LRitems
del Errorfunc, Signature, Requires
global _vf, _vfc
del _vf, _vfc
# Stub that raises an error if parsing is attempted without first calling yacc()
def parse(*args,**kwargs):
raise YaccError("yacc: No parser built with yacc()")
| 36.6582
| 192
| 0.497465
|
79520499e4c37d51fe5dbc76c69f3a8814269076
| 8,168
|
py
|
Python
|
helpers/swap_schedule.py
|
kevindkeogh/qlib.py
|
a21b08e4a2147025fbd6d64da689c00b4a073196
|
[
"MIT"
] | 1
|
2020-08-17T05:28:06.000Z
|
2020-08-17T05:28:06.000Z
|
helpers/swap_schedule.py
|
kevindkeogh/qlpy
|
a21b08e4a2147025fbd6d64da689c00b4a073196
|
[
"MIT"
] | null | null | null |
helpers/swap_schedule.py
|
kevindkeogh/qlpy
|
a21b08e4a2147025fbd6d64da689c00b4a073196
|
[
"MIT"
] | null | null | null |
'''
'''
import dateutil.relativedelta
import numpy as np
class Schedule:
'''Swap fixing, accrual, and payment dates
The Schedule class can be used to generate the details for periods
for swaps.
Arguments:
effective (datetime): effective date of the swap
maturity (datetime): maturity date of the swap
length (int): length of the period that the accrual lasts
kwargs
------
second (datetime, optional): second accrual date of the swap
penultimate (datetime, optional): penultimate accrual date of the swap
period_adjustment (str, optional): date adjustment type for the accrual
dates
default: unadjusted
available: following,
modified following,
preceding
payment_adjustment (str, optional): date adjustment type for the payment
dates
default: unadjusted
available: following,
modified following,
preceding
fixing_lag (int, optional): fixing lag for fixing dates
default: 2
period_length (str, optional): period type for the length
default: months
available: weeks, days
Attributes:
periods (np.recarray): numpy record array of period data
takes the form [fixing_date, accrual_start,
accrual_end, payment_date]
'''
def __init__(self, effective, maturity, length,
second=False, penultimate=False,
period_adjustment='unadjusted',
payment_adjustment='unadjusted',
fixing_lag=2, period_length='months'):
# variable assignment
self.effective = effective
self.maturity = maturity
self.length = length
self.period_delta = self._timedelta(length, period_length)
self.period_adjustment = period_adjustment
self.payment_adjustment = payment_adjustment
self.second = second
self.penultimate = penultimate
self.fixing_lag = fixing_lag
self.period_length = period_length
# date generation routine
self._gen_periods()
self._create_schedule()
def _gen_periods(self):
'''Private method to generate the date series
'''
if bool(self.second) ^ bool(self.penultimate):
raise Exception('If specifying second or penultimate dates,'
'must select both')
if self.second:
self._period_ends = self._gen_dates(self.second,
self.penultimate,
self.period_delta,
'unadjusted')
self._period_ends = [self.second] + self._period_ends + [self.maturity]
self._adjusted_period_ends = self._gen_dates(self.second,
self.penultimate,
self.period_delta,
self.period_adjustment)
self._adjusted_period_ends = [self.second] + \
self._adjusted_period_ends + \
[self.maturity]
else:
self._period_ends = self._gen_dates(self.effective,
self.maturity,
self.period_delta,
'unadjusted')
self._adjusted_period_ends = self._gen_dates(self.effective,
self.maturity,
self.period_delta,
self.period_adjustment)
self._period_starts = [self.effective] + self._adjusted_period_ends[:-1]
self._fixing_dates = self._gen_date_adjustments(self._period_starts,
-self.fixing_lag,
adjustment='preceding')
self._payment_dates = self._gen_date_adjustments(self._period_ends,
0,
adjustment=self.payment_adjustment)
def _create_schedule(self):
'''
'''
self.periods = np.rec.fromarrays(self._np_dtarrays(self._fixing_dates,
self._period_starts,
self._adjusted_period_ends,
self._payment_dates),
dtype=[('fixing_date', 'datetime64[D]'),
('accrual_start', 'datetime64[D]'),
('accrual_end', 'datetime64[D]'),
('payment_date', 'datetime64[D]')])
if bool(self.second) ^ bool(self.penultimate):
raise Exception('If specifying second or penultimate dates,'
'must select both')
def _timedelta(self, delta, period_length):
if period_length == 'months':
return dateutil.relativedelta.relativedelta(months=delta)
elif period_length == 'weeks':
return dateutil.relativedelta.relativedelta(weeks=delta)
elif period_length == 'days':
return dateutil.relativedelta.relativedelta(days=delta)
else:
raise Exception('Period length {period_length} not '
'recognized'.format(**locals()))
def _gen_dates(self, effective, maturity, delta, adjustment):
dates = []
current = maturity
counter = 1
while current > effective:
dates.append(self._date_adjust(current, adjustment))
counter += 1
current = maturity - (delta * counter)
return dates[::-1]
def _date_adjust(self, date, adjustment):
if adjustment == 'unadjusted':
return date
elif adjustment == 'following':
if date.weekday() < 5:
return date
else:
return date + self._timedelta(7 - date.weekday(), 'days')
elif adjustment == 'preceding':
if date.weekday() < 5:
return date
else:
return date - self._timedelta(max(0, date.weekday() - 5), 'days')
elif adjustment == 'modified following':
if date.month == self._date_adjust(date, 'following').month:
return self._date_adjust(date, 'following')
else:
return date - self._timedelta(7 - date.weekday(), 'days')
else:
raise Exception('Adjustment period not recognized')
def _gen_date_adjustments(self, dates, delta, adjustment='unadjusted'):
adjusted_dates = []
for date in dates:
adjusted_date = date + self._timedelta(delta, 'days')
adjusted_date = self._date_adjust(adjusted_date, adjustment)
adjusted_dates.append(adjusted_date)
return adjusted_dates
def _np_dtarrays(self, *args):
fmt = '%Y-%m-%d'
arrays = []
for arg in args:
arrays.append(np.asarray([np.datetime64(date.strftime(fmt)) for date in arg]))
return tuple(arrays)
| 44.63388
| 92
| 0.483105
|
795205f7d6ffb935a725da9d72e08c20092e5543
| 321
|
py
|
Python
|
markdown_external_link_finder/extract.py
|
MatMoore/markdown-external-link-finder
|
bb04d2573e150d8efe61063deafa3119c5f2ef3f
|
[
"MIT"
] | null | null | null |
markdown_external_link_finder/extract.py
|
MatMoore/markdown-external-link-finder
|
bb04d2573e150d8efe61063deafa3119c5f2ef3f
|
[
"MIT"
] | 4
|
2019-06-04T22:36:17.000Z
|
2021-06-25T15:34:31.000Z
|
markdown_external_link_finder/extract.py
|
MatMoore/markdown-external-link-finder
|
bb04d2573e150d8efe61063deafa3119c5f2ef3f
|
[
"MIT"
] | null | null | null |
import mistune
from .util import LinkRenderer
def extract_markdown_links(markdown_files):
impl = LinkRenderer()
renderer = mistune.Markdown(renderer=impl)
for markdown_file in markdown_files:
with open(markdown_file) as f:
text = f.read()
renderer(text)
yield from impl.urls
| 24.692308
| 46
| 0.694704
|
795206831faef22fcc39baa50c1cf26f43878106
| 4,641
|
py
|
Python
|
samples/oauth/oauth_on_appengine/appengine_utilities/flash.py
|
dorosh/gdata-python-client
|
eb6826ba1e6046a202158d4fefa672150efa0ca5
|
[
"Apache-2.0"
] | 2,293
|
2015-01-02T12:46:10.000Z
|
2022-03-29T09:45:43.000Z
|
samples/oauth/oauth_on_appengine/appengine_utilities/flash.py
|
dorosh/gdata-python-client
|
eb6826ba1e6046a202158d4fefa672150efa0ca5
|
[
"Apache-2.0"
] | 315
|
2015-05-31T11:55:46.000Z
|
2022-01-12T08:36:37.000Z
|
samples/oauth/oauth_on_appengine/appengine_utilities/flash.py
|
dorosh/gdata-python-client
|
eb6826ba1e6046a202158d4fefa672150efa0ca5
|
[
"Apache-2.0"
] | 1,033
|
2015-01-04T07:48:40.000Z
|
2022-03-24T09:34:37.000Z
|
"""
Copyright (c) 2008, appengine-utilities project
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
- Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
- Neither the name of the appengine-utilities project nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import os
import sys
import Cookie
import pickle
from time import strftime
from django.utils import simplejson
COOKIE_NAME = 'appengine-utilities-flash'
class Flash(object):
"""
Send messages to the user between pages.
When you instantiate the class, the attribute 'msg' will be set from the
cookie, and the cookie will be deleted. If there is no flash cookie, 'msg'
will default to None.
To set a flash message for the next page, simply set the 'msg' attribute.
Example psuedocode:
if new_entity.put():
flash = Flash()
flash.msg = 'Your new entity has been created!'
return redirect_to_entity_list()
Then in the template on the next page:
{% if flash.msg %}
<div class="flash-msg">{{ flash.msg }}</div>
{% endif %}
"""
def __init__(self, cookie=None):
"""
Load the flash message and clear the cookie.
"""
self.no_cache_headers()
# load cookie
if cookie is None:
browser_cookie = os.environ.get('HTTP_COOKIE', '')
self.cookie = Cookie.SimpleCookie()
self.cookie.load(browser_cookie)
else:
self.cookie = cookie
# check for flash data
if self.cookie.get(COOKIE_NAME):
# set 'msg' attribute
cookie_val = self.cookie[COOKIE_NAME].value
# we don't want to trigger __setattr__(), which creates a cookie
try:
self.__dict__['msg'] = simplejson.loads(cookie_val)
except:
# not able to load the json, so do not set message. This should
# catch for when the browser doesn't delete the cookie in time for
# the next request, and only blanks out the content.
pass
# clear the cookie
self.cookie[COOKIE_NAME] = ''
self.cookie[COOKIE_NAME]['path'] = '/'
self.cookie[COOKIE_NAME]['expires'] = 0
print self.cookie[COOKIE_NAME]
else:
# default 'msg' attribute to None
self.__dict__['msg'] = None
def __setattr__(self, name, value):
"""
Create a cookie when setting the 'msg' attribute.
"""
if name == 'cookie':
self.__dict__['cookie'] = value
elif name == 'msg':
self.__dict__['msg'] = value
self.__dict__['cookie'][COOKIE_NAME] = simplejson.dumps(value)
self.__dict__['cookie'][COOKIE_NAME]['path'] = '/'
print self.cookie
else:
raise ValueError('You can only set the "msg" attribute.')
def no_cache_headers(self):
"""
Adds headers, avoiding any page caching in the browser. Useful for highly
dynamic sites.
"""
print "Expires: Tue, 03 Jul 2001 06:00:00 GMT"
print strftime("Last-Modified: %a, %d %b %y %H:%M:%S %Z")
print "Cache-Control: no-store, no-cache, must-revalidate, max-age=0"
print "Cache-Control: post-check=0, pre-check=0"
print "Pragma: no-cache"
| 38.675
| 82
| 0.653307
|
795206a94949cc474a0065dbbb27278c303d0864
| 6,820
|
py
|
Python
|
cli/browser.py
|
windperson/private-docker-registry
|
23f2c0d7ecd507486c9979d0e2e9b69a1780839f
|
[
"Apache-2.0"
] | null | null | null |
cli/browser.py
|
windperson/private-docker-registry
|
23f2c0d7ecd507486c9979d0e2e9b69a1780839f
|
[
"Apache-2.0"
] | null | null | null |
cli/browser.py
|
windperson/private-docker-registry
|
23f2c0d7ecd507486c9979d0e2e9b69a1780839f
|
[
"Apache-2.0"
] | null | null | null |
import json
import requests
import sys
''' Disable Warnings when using verify=False'''
'''requests.packages.urllib3.disable_warnings()'''
def get_reqistry_request(url, username=None, password=None, ssl=False):
req = None
if ssl==True:
proto="https://"
else:
proto="http://"
url_endpoint = proto + url
s = requests.Session()
if(username!=None):
s.auth = (username, password)
try:
req = s.get(url_endpoint, verify=False)
except requests.ConnectionError:
print 'Cannot connect to Registry'
return req
def get_registry_catalog_request(url, username=None, password=None, ssl=False):
requrl = url+"/v2/_catalog"
req = get_reqistry_request(requrl, username, password, ssl)
return req
def get_registry_tag_request(url, repo, username=None, password=None, ssl=False):
requrl = url + "/v2/" + repo + "/tags/list"
req = get_reqistry_request(requrl, username, password, ssl)
return req
'''
Extracts the username and password from the url specified in case of Basic Authentication
enabled for a Docker registry
Example:-
If the url specified is like exampleuser:exampleuser@docker_registry_host:port
then, the username is exampleuser, password is exampleuser and url is docker_registry_host:port
'''
def extract_url(url):
uname_pwd_delimeter=":"
auth_ip_delimeter="@"
position_ip_delimeter=url.find(auth_ip_delimeter)
if position_ip_delimeter==-1:
return None, None, url
else:
delimiter_uname_pwd_pos = url.find(uname_pwd_delimeter)
delimeter_auth_ip_pos = position_ip_delimeter
username = url[:delimiter_uname_pwd_pos]
password = url[delimiter_uname_pwd_pos+1:delimeter_auth_ip_pos]
url_endpoint = url[delimeter_auth_ip_pos+1:]
return username, password, url_endpoint
def get_all_repos(url, ssl=False):
username, password, url_endpoint = extract_url(url)
req = get_registry_catalog_request(url_endpoint, username, password, ssl)
if(req!=None):
parsed_json = json.loads(req.text)
repo_array = parsed_json['repositories']
else:
return None
return repo_array
def search_for_repo(url, repo_search_name, ssl=False) :
repo_array = get_all_repos(url, ssl);
repo_dict_search = {}
if repo_search_name in repo_array:
parsed_repo_tag_req_resp = get_tags_for_repo(url, repo_search_name, ssl)
repo_dict_search[repo_search_name] = parsed_repo_tag_req_resp
else:
''' Get all the repos '''
repo_dict = get_all_repo_dict(url, repo_array, ssl)
if any(False if key.find(repo_search_name)==-1 else True for key in repo_dict) == True:
print "available options:- "
for key in repo_dict:
if(key.find(repo_search_name)!=-1):
repo_dict_search[key] = get_tags_for_repo(url, key, ssl)
return repo_dict_search
def get_tags_for_repo(url, repo, ssl=False):
username, password, url_endpoint = extract_url(url)
repo_tag_url_req = get_registry_tag_request(url_endpoint, repo, username, password, ssl)
parsed_repo_tag_req_resp = json.loads(repo_tag_url_req.text)
return parsed_repo_tag_req_resp["tags"]
'''
Gets the entire repository dictionary
'''
def get_all_repo_dict(url, repo_array,ssl=False):
repo_dict = {}
if (repo_array!=None):
for repo in repo_array:
parsed_repo_tag_req_resp = get_tags_for_repo(url, repo, ssl)
repo_dict[repo] = parsed_repo_tag_req_resp
return repo_dict
'''
Decorates the search results to be printed on the screen
'''
def decorate_list(repo_dict):
decorated_list_values = ""
if(len(repo_dict)==0):
return "No results!"
counter = 1;
for repo_key in repo_dict:
decorated_list_values += "\n-----------" + "\n" + str(counter) + ") Name: " + repo_key
decorated_list_values += "\nTags: "
counter+=1;
for tag in repo_dict[repo_key]:
decorated_list_values += tag + '\t'
decorated_list_values += "\n\n" + str(counter-1) + " images found !"
return decorated_list_values
'''
Decorates the search results to be printed on the screen
'''
def decorate_html(repo_dict, regurl):
decorated_list_values = "<html><head><title>Docker Registry Listing</title>\
<script src='http://cdnjs.cloudflare.com/ajax/libs/list.js/1.1.1/list.min.js'></script> \
<link rel='stylesheet' type='text/css' href='/css/browser_web.css'></head> \
<body><h1>Docker Registry Listing</h1> \
<div id='users'>\
<input class='search' placeholder='Search' />\
<button class='sort' data-sort='name'>\
Sort by name </button>"
if(len(repo_dict)==0):
decorated_list_values += "<p><h2>No results!</h2></p></body></html>"
return decorated_list_values
counter = 1;
decorated_list_values += "<p><ul class='list'>"
for repo_key in repo_dict:
decorated_list_values += "<li><h2 class='name'>" + str(counter) + ". " + repo_key +"</h2>"
counter+=1;
for tag in repo_dict[repo_key]:
decorated_list_values += "<p class='born'><b>[" + tag + "]</b>: docker pull " + regurl + "/" + repo_key + ":" + tag + "</p><br />"
decorated_list_values += "</li>"
decorated_list_values += "</ul>";
'''decorated_list_values += "<p><h2>" + + " images found !" + "</h2></p>"'''
decorated_list_values += "<script>var options = { valueNames: [ 'name', 'born' ]}; var userList = new List('users', options);</script></body></html>"
return decorated_list_values
def usage():
return "Usage: browser.py <registry_endpoint> <keyword> <value> <ssl>\
\nValid keywords : search, list \
\nValid values:- \
\nFor keyword search, use the value as the image name. For eg:- search redis\
\nFor keyword list, use the value 'all' without quotes to get a list of all the docker image repos. For eg:- list all\
\nFor eg:- python browser.py uname:pwd@registry_endpoint:port search busybox\
\nIf you use SSL, then specify 'ssl'\
\nFor eg:- python browser.py uname:pwd@registry_endpoint:port search busybox ssl\
\nFor more information, visit:- https://github.com/vivekjuneja/docker_registry_cli/"
if __name__ == "__main__":
len_sys_argv = len(sys.argv[1:])
if len_sys_argv < 3:
print usage()
elif len_sys_argv >= 3:
commandlineargs = sys.argv[1:]
regurl = commandlineargs[0]
keyword = commandlineargs[1]
repo_to_search = commandlineargs[2]
ssl_flag = False
if len_sys_argv == 4:
ssl = commandlineargs[3]
if ssl == "ssl":
ssl_flag = True
search_results = None
if keyword=="search":
search_results = search_for_repo(regurl, repo_to_search, ssl_flag)
print decorate_list(search_results)
elif keyword=="list":
all_repos = get_all_repos(regurl, ssl_flag)
search_results = get_all_repo_dict(regurl, all_repos, ssl_flag)
print decorate_list(search_results)
elif keyword=="html":
all_repos = get_all_repos(regurl, ssl_flag)
search_results = get_all_repo_dict(regurl, all_repos, ssl_flag)
print decorate_html(search_results, regurl)
else:
print usage()
sys.exit(1)
| 27.063492
| 151
| 0.71437
|
795206db4d172f7270f394e8951e6628a0493cf1
| 9,871
|
py
|
Python
|
hostscripts/rpm-packaging/createproject.py
|
dirkmueller/automation
|
a2c3f6ce5c1642d70e46812929f7b18c42d785d7
|
[
"Apache-2.0"
] | 61
|
2015-02-01T04:43:09.000Z
|
2021-12-10T08:34:02.000Z
|
hostscripts/rpm-packaging/createproject.py
|
dirkmueller/automation
|
a2c3f6ce5c1642d70e46812929f7b18c42d785d7
|
[
"Apache-2.0"
] | 2,297
|
2015-01-05T12:23:32.000Z
|
2022-03-21T09:23:26.000Z
|
hostscripts/rpm-packaging/createproject.py
|
dirkmueller/automation
|
a2c3f6ce5c1642d70e46812929f7b18c42d785d7
|
[
"Apache-2.0"
] | 162
|
2015-01-13T12:30:44.000Z
|
2022-02-21T12:12:41.000Z
|
#!/usr/bin/python3
# vim: sw=4 et
# Copyright (c) 2016 SUSE LINUX GmbH, Nuernberg, Germany.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from __future__ import print_function
import argparse
import glob
import os
import platform
import shutil
import sys
import tempfile
import time
import pymod2pkg
import sh
from sh import Command
def pymodule2pkg(spectemplate):
specname = os.path.splitext(spectemplate)[0]
modulename = os.path.splitext(os.path.basename(specname))[0]
pkgname = pymod2pkg.module2package(
modulename, platform.linux_distribution()[0] or 'suse')
if modulename == 'openstack-macros':
pkgname = modulename
return pkgname
def get_osc_user():
import osc.conf
osc.conf.get_config()
return osc.conf.get_apiurl_usr(osc.conf.config['apiurl'])
def upload_meta(project, build_repository, linkproject):
projectlink = ''
if linkproject:
projectlink = '<link project="%s"/>\n' % linkproject
description = ''
if linkproject:
if 'ZUUL_UUID' in os.environ:
description = """
This project tests the following Zuul Change IDs: %(ZUUL_CHANGE_IDS)s\n
Branch used: %(ZUUL_BRANCH)s\n
Project used: %(ZUUL_PROJECT)s
""" % (os.environ)
templ = """
<project name="%(project)s">
<title>Autogenerated CI project</title>
<description>
%(description)s
</description>
<url>
%(url)s
</url>
%(projectlink)s
<person userid="%(user)s" role="maintainer"/>
<publish>
<disable/>
</publish>
%(build_repository)s
</project>""" % ({'project': project,
'user': get_osc_user(),
'description': description,
'url': os.environ.get('BUILD_URL'),
'projectlink': projectlink,
'build_repository': build_repository})
with tempfile.NamedTemporaryFile() as meta:
meta.write(templ.encode('UTF-8'))
meta.flush()
print('Updating meta for ', project)
# work around build service bug that forgets the publish flag
# https://github.com/openSUSE/open-build-service/issues/7126
for success_counter in range(2):
# work around build service bug that triggers a database deadlock
for fail_counter in range(1, 5):
try:
sh.osc('api', '-T', meta.name,
'/source/%s/_meta' % project)
break
except sh.ErrorReturnCode_1:
# Sleep a bit and try again. This has not been
# scientifically proven to be the correct sleep factor,
# but it seems to work
time.sleep(2)
continue
# wait for the source service to catch up with creation
if success_counter == 0:
# Sleep a bit and try again. This has not been scientifically
# proven to be the correct sleep factor, but it seems to work
time.sleep(3)
def upload_meta_enable_repository(project, linkproject):
repository = """
<repository name="standard" %(repoflags)s>
<path project="%(linkproject)s" repository="standard"/>
<arch>x86_64</arch>
</repository>
""" % ({'linkproject': linkproject,
'repoflags': 'rebuild="direct" block="local" linkedbuild="localdep"'})
upload_meta(project, repository, linkproject)
def freeze_project(project):
"""Generate a _frozenlink file for the project"""
result = sh.osc('api', '-X', 'POST', '/source/%s?cmd=freezelink' % project)
if '<status code="ok" />' not in result:
print('WARNING: freeze the project fails: %s' % result)
def create_new_build_project(workdir, project, linkproject):
sh.mkdir('-p', workdir)
olddir = os.getcwd()
try:
os.chdir(workdir)
if linkproject:
upload_meta_enable_repository(project, linkproject)
freeze_project(project)
sh.osc('init', project)
finally:
os.chdir(olddir)
def generate_pkgspec(pkgoutdir, spectemplate, pkgname):
obsservicedir = '/usr/lib/obs/service/'
outdir = ('--outdir', pkgoutdir)
olddir = os.getcwd()
try:
os.chdir(pkgoutdir)
renderspec = Command(os.path.join(obsservicedir, 'renderspec'))
renderspec(
'--input-template', os.path.join(olddir, spectemplate),
'--output-name', pkgname + '.spec', *outdir)
format_spec_file = Command(
os.path.join(obsservicedir, 'format_spec_file'))
format_spec_file(*outdir)
# configure a download cache to avoid downloading the same files
download_env = os.environ.copy()
download_env["CACHEDIRECTORY"] = os.path.join(
os.path.expanduser("~"), ".cache", "download_files")
download_files = Command(os.path.join(obsservicedir, 'download_files'))
download_files(_env=download_env, *outdir)
finally:
os.chdir(olddir)
def osc_mkpac(workdir, packagename):
olddir = os.getcwd()
try:
os.chdir(workdir)
sh.osc('mkpac', packagename)
finally:
os.chdir(olddir)
def spec_is_modified(pkgoutdir, project, pkgname):
specname = pkgname + ".spec"
cached_spec = os.path.join(pkgoutdir, '.osc', specname)
cleanup = False
if not os.path.exists(cached_spec):
cleanup = True
sh.osc('api', '/source/%s/%s/%s.spec' % (
project, pkgname, pkgname), _out=cached_spec)
r = sh.cmp(
'-s', os.path.join(pkgoutdir, specname), cached_spec, _ok_code=[0, 1])
if cleanup:
os.remove(cached_spec)
return r.exit_code == 1
def osc_detachbranch(workdir, project, pkgname):
olddir = os.getcwd()
try:
os.chdir(os.path.join(workdir))
sh.osc('detachbranch', project, pkgname)
os.mkdir(pkgname + '.b')
for f in glob.glob(os.path.join(pkgname, '*')):
os.rename(f, os.path.join(pkgname + '.b', os.path.basename(f)))
sh.rm('-rf', pkgname)
sh.osc('co', pkgname)
for f in glob.glob(os.path.join(pkgname + '.b', '*')):
dst = os.path.basename(f)
try:
os.unlink(os.path.join(pkgname, dst))
except OSError:
pass
os.rename(f, os.path.join(pkgname, dst))
os.rmdir(pkgname + '.b')
finally:
os.chdir(olddir)
def osc_commit_all(workdir, packagename):
olddir = os.getcwd()
try:
os.chdir(os.path.join(workdir, packagename))
sh.osc('addremove')
for o in sh.osc('service', 'localrun', 'source_validator'):
if o.startswith('###ASK'):
sh.osc('rm', '--force', o.strip().split()[1])
sh.osc('commit', '--noservice', '-n')
finally:
os.chdir(olddir)
def copy_extra_sources(specdir, pkgoutdir):
for f in glob.glob(os.path.join(specdir, '*')):
if f.endswith(".j2"):
continue
shutil.copy2(f, pkgoutdir)
def create_project(worktree, project, linkproject):
workdir = os.path.join(os.getcwd(), 'out')
sh.rm('-rf', workdir)
create_new_build_project(workdir, project, linkproject)
try:
existing_pkgs = [x.strip() for x in
sh.osc('ls', '-e', project, _iter=True)]
except Exception:
existing_pkgs = []
alive_pkgs = set()
worktree_pattern = os.path.join(worktree, 'openstack', '*', '*.spec.j2')
for spectemplate in sorted(glob.glob(worktree_pattern)):
pkgname = pymodule2pkg(spectemplate)
alive_pkgs.add(pkgname)
print(pkgname)
sys.stdout.flush()
pkgoutdir = os.path.join(workdir, pkgname)
osc_mkpac(workdir, pkgname)
copy_extra_sources(os.path.dirname(spectemplate), pkgoutdir)
generate_pkgspec(
pkgoutdir,
spectemplate, pkgname)
if pkgname in existing_pkgs:
if spec_is_modified(pkgoutdir, project, pkgname):
osc_detachbranch(workdir, project, pkgname)
print("Committing update to %s" % pkgname)
osc_commit_all(workdir, pkgname)
else:
print("Adding new pkg %s" % pkgname)
osc_commit_all(workdir, pkgname)
if not alive_pkgs:
print("Worktree does not contain any packages?")
sys.exit(1)
# remove no longer alive pkgs
for i in existing_pkgs:
if not linkproject and i not in alive_pkgs:
print("Removing outdated ", i)
sh.osc('rdelete', '-m', 'x', project, i)
def main():
parser = argparse.ArgumentParser(
description='Build a testproject for a given rpm-packaging checkout')
parser.add_argument('worktree',
help='directory with a rpm-packaging checkout')
parser.add_argument('project',
help='name of the destination buildservice project')
parser.add_argument('--linkproject',
help='create project link to given project')
args = parser.parse_args()
sh.ErrorReturnCode.truncate_cap = 9000
create_project(args.worktree, args.project, args.linkproject)
if __name__ == '__main__':
main()
| 31.944984
| 79
| 0.618884
|
7952079764248876b692a1d715776a333831446d
| 24,134
|
py
|
Python
|
Quad_Code_share/Main_Code_Automous.py
|
EvanHope/SeniorProject
|
0980f7187d49988d2d81e34506fa97e7c8cca3a4
|
[
"MIT"
] | null | null | null |
Quad_Code_share/Main_Code_Automous.py
|
EvanHope/SeniorProject
|
0980f7187d49988d2d81e34506fa97e7c8cca3a4
|
[
"MIT"
] | null | null | null |
Quad_Code_share/Main_Code_Automous.py
|
EvanHope/SeniorProject
|
0980f7187d49988d2d81e34506fa97e7c8cca3a4
|
[
"MIT"
] | 1
|
2021-12-02T21:39:18.000Z
|
2021-12-02T21:39:18.000Z
|
import time
import spidev
import math
import argparse
import sys
import navio.util
#import AccelGyroMag
import navio.mpu9250 # For magnetometer calibration only
import madgwickahrs.madgwickahrs as attitude
import navio.pwm
import navio.adc
import navio.leds
#import GPS
import numpy as np
import navio.ms5611
import os
import navio.rcinput
from transforms3d.euler import quat2euler
import filterShawn
import bufferShawn
import numIntegration
import kalmanFilterPython
# ----------------------------------------------------------------------
# Nic and Shawn's variables -------------------------------START--------
# ----------------------------------------------------------------------
convFact = 180/math.pi
def rad2Deg(rad):
return rad*convFact
def deg2Rad(deg):
return deg/convFact
def rangeCoeff(xLims, yLims):
coeffRange = [0,0]
# calc slope and y intercept
coeffRange[0] = float(yLims[1]-yLims[0])/float(xLims[1]-xLims[0])
coeffRange[1] = float(yLims[1]-xLims[1]*coeffRange[0])
#print(coeffRange)
return coeffRange
def rangeD(inputD,rangeCoeff):
return inputD*rangeCoeff[0] + rangeCoeff[1]
rc0in = [1065,1962]
rc1in = [1065,1960]
rc2in = [1065,1970]
#rc0in = [1068, 1930] # roll, ailerons
#rc1in = [1067, 1930] # pitch, elevators
#rc2in = [1073, 1925] # throttle
#rc3in = [1072, 1922] # yaw, rudder
#rc4in = [1065, 1933] # 3 pos flight mode switch, C
#rc5in = [1065, 1933] # extra sliders, can be used for tuning
#rc6in = [1065, 1933] # extra sliders, can be used for tuning
#rc7in = [1065, 1933] # 3 position switch, E
#rc8in = [1065, 1933] # 2 position switch, A
#prollIn = [-1000,1000]
rc0out = [-30,30]
rc1out = [.250,.500]
#rc0outI = [.16,1.6]
rc2out = [.99,1.75]
#rc5out = [.001,.01]
#rc6out = [.01,1]
throttle = 1.1
prollOut = [-.15,.15]
rc0c = rangeCoeff(rc0in,rc0out)
rc1c = rangeCoeff(rc1in,rc1out)
#rc0cI = rangeCoeff(rc0in,rc0outI)
rc2c = rangeCoeff(rc2in,rc2out)
#rc5c = rangeCoeff(rc5in,rc5out)
#rc6c = rangeCoeff(rc6in,rc6out)
# set this flag to true to run the freq sweep, otherwise disable
excitation = False
zeroed = False
# set this flag to run the rc calibration (output all values to record max/min)
# rcCal = false
# yaw gains
kpy = 19.39823
kiy = .387965
kdy = 7.525825
# ZN tuning
kp = .12057
ki = .321092
kd = .011319
# Disnmore Herrington Tuning
#kp = .508
#ki = .00001
#kd = .0897
# Disnmore Herrington Tuning
kp = .210
ki = .015
kd = .044
# Kevin Tuning
kp = .295
ki = .0015
kd = 3.2
# Disnmore Herrington Tuning RollSimp
#kp = .05
#ki = .0
#kd = .000
# no overshoot
kpz = .04019
kiz = .107031
kdz = .010061
# debug message flag
dbgmsg = True # turn on for specific messages
dbgmsg2 = False # both true to turn on all messages
# filter cutoff frequency and sampling rate
fc = 6
fs = 100.0
order = 2
# run these only once to calculate the coefficients
coeffGyro = filterShawn.butterworth(order,"HPF",fc,fs)
coeffAcc = filterShawn.butterworth(order,"LPF",fc,fs)
kalmanObj = kalmanFilterPython.kalmanFilterShawn()
# create input buffers for gyro raw and accel roll data
gyroRawBuffer = bufferShawn.bufferShawn(order+1)
accelRollBuffer = bufferShawn.bufferShawn(order+1)
# create output buffers for filtered gyro raw and filtered accel roll data
gyroFiltBuffer = bufferShawn.bufferShawn(order+2)
accelFiltBuffer = bufferShawn.bufferShawn(order)
# create roll error buffer
rollErrorCurr = 0
rollErrorPrev = 0
rollErrorSum = 0
pitchErrorPrev = 0
pitchErrorSum = 0
timeStep = .01 # seconds (at 100 Hz)
# initialize vars for Simpsons rule integration
evenRoll = 0
oddRoll = 0
rollGyroSimp = 0
sinr1 = 0
sinr2 = 0
sinr3 = 0
sinr4 = 0
rollKalman = 0
rollGyro = 0
rollGyroRaw = 0
currentGyro = 0
previousGyro = 0
rollAccel = 0
Pyaw = 0
yawStep = False
yawRel = 0
yawOffset = 0
stepInput = False
counter = 0
# set up encoder variables, encoder0 is used to set the starting position to 0 degrees
rollEnc = 0
encoder0 = 0
if(excitation):
n = 0
A = .04
wn = 1
zeta = 1
kp = wn**2
kd = 2*zeta*wn
else:
zeta = 1
wn = 1
#B = 51.3341 # experimentally determined, in radians
B = 2941.2 # experimentally determined, in degrees
A = -1.5856 # experimental
wn = 4.96 # experimental
#kp = wn**2.0
#kd = 2.0*zeta*wn
# ----------------------------------------------------------------------
# Nic and Shawn's variables --------------------------------END---------
# ----------------------------------------------------------------------
# current_alt has been zeroed out to prevent the code from wigging out
current_alt = 0;
# Getting next available log file number and creating header#
gg = 0
while os.path.exists("Log_Files/datalog%s.csv" % gg):
gg+=1
#header_string = "rates, motor right, motoro left\n"
header_string = "Time, roll, rollr,motor right,motor left,msn1,msn2\n"
fh = open("Log_Files/datalog%s.csv" % gg,"a")
fh.write(header_string)
fh.close()
print ("Initializing ADC")
navio.util.check_apm()
adc = navio.adc.ADC()
analog = [0] * adc.channel_count
print ("Initializing Sensors")
imu = navio.mpu9250.MPU9250()
imu.initialize()
rcin = navio.rcinput.RCInput()
AHRS_data = attitude.AHRS(0.01) #100Hz sensor attitude estimation FIXED
## GPS Disabled
#GPS = GPS.U_blox()
#for ind in range(0, 10):
# GPS.enable_posllh()
led = navio.leds.Led()
baro = navio.ms5611.MS5611()
baro.initialize()
time.sleep(0.25)
baro.refreshPressure()
time.sleep(0.01) # Waiting for pressure data ready 10ms
baro.readPressure()
baro.refreshTemperature()
time.sleep(0.01) # Waiting for temperature data ready 10ms
baro.readTemperature()
baro.calculatePressureAndTemperature()
ground_alt = 0
target_alt = 1 #target alt experimentation.
if baro.PRES < 1013: # small check in case barometer pressure is invalid
ground_alt = 44330.77*(1-(baro.PRES*100/101326)**0.1902632)
print("altitude ground?:")
print(ground_alt)
target_alt = target_alt
print("altitude target?:")
print(target_alt)
#ground_alt = 0
led.setColor('Red')
time.sleep(1)
if imu.testConnection():
print ("Connection established: True")
else:
sys.exit("Connection established: False")
accels, rates, m9m = imu.getMotion9()
if m9m[0] == 0:
print ("WARNING: Mag reading zeros, try rebooting Navio")
led.setColor('Magenta')
mag_calibration_flag = False # True means run calibration,
# otherwise use data in
# mag_calibration_constants.txt
# Run Magnetometer calibration if flag is TRUE
if mag_calibration_flag == True:
led.setColor('Yellow')
imu.mag_calibrate()
# Configure servo output
motor_front_pin = 0
motor_back_pin = 1
motor_left_pin = 2
motor_right_pin = 3
motor_front_pwm = navio.pwm.PWM(motor_front_pin)
motor_back_pwm = navio.pwm.PWM(motor_back_pin)
motor_left_pwm = navio.pwm.PWM(motor_left_pin)
motor_right_pwm = navio.pwm.PWM(motor_right_pin)
motor_front_pwm.initialize()
motor_back_pwm.initialize()
motor_left_pwm.initialize()
motor_right_pwm.initialize()
motor_front_pwm.set_period(200)
motor_back_pwm.set_period(200)
motor_left_pwm.set_period(200)
motor_right_pwm.set_period(200)
# Cycling the pwm commands to initialize the devices
motor_front_pwm.set_duty_cycle(1.000)
motor_back_pwm.set_duty_cycle(1.000)
motor_left_pwm.set_duty_cycle(1.000)
motor_right_pwm.set_duty_cycle(1.000)
time.sleep(0.5)
motor_front_pwm.set_duty_cycle(2.000)
motor_back_pwm.set_duty_cycle(2.000)
motor_left_pwm.set_duty_cycle(2.000)
motor_right_pwm.set_duty_cycle(2.000)
time.sleep(0.5)
motor_front_pwm.set_duty_cycle(1.000)
motor_back_pwm.set_duty_cycle(1.000)
motor_left_pwm.set_duty_cycle(1.000)
motor_right_pwm.set_duty_cycle(1.000)
time.sleep(0.5)
# Timers to maintain constant cycle times
timein = time.time()
prev_time = (time.time()-timein)*1000.0
timer_1hz = prev_time
timer_10hz = prev_time
timer_25hz = prev_time
timer_50hz = prev_time
timer_100hz = prev_time
baro_timer = 0
# Declaring variables for use in main loop
rc_data = rcin.read_all()
motor_front = 1.000
motor_back = 1.000
motor_left = 1.000
motor_right = 1.000
gyro_2 = 0
gyro_1 = 0
gyro_0 = 0
cur_time = 0
roll_angle_gyro = 0
gyro_2p = 0
gyro_1p = 0
gyro_0p = 0
pitch_angle_gyro = 0
rates2= [0, 0, 0]
accels2 = [0,0,0]
#------------------------------------------------#
### Declare global variables here ###
# ----------------------------perform RC calibration------------------------------
#doOver = True
#rcCal = int(input("RC Calibration, 1 or 0\n"))
#if(rcCal):
# while(doOver):
# doOver = False
# print("Calibrate RC Inputs")
# print("Minimum Values")
# print("rc_data[0]",rc_data[0],"rc_data[1]",rc_data[1],"rc_data[2]",rc_data[2],"rc_data[3]",rc_data[3],"rc_data[4]",rc_data[4])
# rc0[0] = rc_data[0]
# rc1[0] = rc_data[1]
# rc2[0] = rc_data[2]
# rc3[0] = rc_data[3]
# rc4[0] = rc_data[4]
# print("put all sticks in max position")
# dummy = int(input("press enter\n"))
# print("Maximum Values")
# print("rc_data[0]",rc_data[0],"rc_data[1]",rc_data[1],"rc_data[2]",rc_data[2],"rc_data[3]",rc_data[3],"rc_data[4]",rc_data[4])
# rc0[1] = rc_data[0]
# rc1[1] = rc_data[1]
# rc2[1] = rc_data[2]
# rc3[1] = rc_data[3]
# rc4[1] = rc_data[4]
# # these are the output limits for the RC inputs
# rc0out = [-90,90]
# rc1out = [0,1]
# rc2out = [1,10]
# rc3out = [0,1]
# rc4out = [0,1]
# # these are the coefficients for the RC input ranges
# rc0c = rangeCoeff(rc0,rc0out)
# rc1c = rangeCoeff(rc1,rc1out)
# rc2c = rangeCoeff(rc2,rc2out)
# rc3c = rangeCoeff(rc3,rc3out)
# rc4c = rangeCoeff(rc4,rc4out)
# doOver = int(input("THIS WILL ERASE ALL VALS\nto redo, type 1, otherwise type 0, then press enter\n"))
alts = 0
altitudeErrorSum = 0
altitudeErrorPrev = 0
print ("Starting main loop: here we go!")
while True:
current_time = (time.time()-timein)*1000.0
if m9m[0] == 0:
led.setColor('Magenta')
else:
led.setColor('Green')
if (current_time - timer_100hz) >=10.0: # 10 ms = 100Hz
#print ("In 100Hz loop!")
#### IMU/Attitude and GPS estimation: DO NOT TOUCH ####
for i in range (0, adc.channel_count):
analog[i] = adc.read(i)*0.001
accels, rates, m9m = imu.getMotion9()
accels2[0] = -accels[1]
accels2[1] = -accels[0]
accels2[2] = accels[2]
rates2[0] = rates[1]
rates2[1] = rates[0]
rates2[2] = -rates[2]
AHRS_data.update_imu(rates2, accels2)
yaw,roll,pitch = quat2euler(AHRS_data.quaternion,axes='rzxy')
baro_timer = baro_timer + 1
if (baro_timer == 1): baro.refreshPressure()
elif (baro_timer == 2): baro.readPressure()
elif (baro_timer == 3): baro.refreshTemperature()
elif (baro_timer == 4): baro.readTemperature()
elif (baro_timer == 5):
baro.calculatePressureAndTemperature()
baro_timer = 0
#print baro.PRES
if baro.PRES < 1013: # Only update if barometer is valid
alts = 44330.77*(1-(baro.PRES*100/101326)**0.1902632)
#print "altitude?:"
#print alts
#alts = 0
current_alt = alts - ground_alt
#print current_alt
#buffer = GPS.bus.xfer2([100])
## GPS is disabled ##
#for byt in buffer:
# GPS.scan_ubx(byt)
# if(GPS.mess_queue.empty() != True):
# GPS_data = GPS.parse_ubx()
#### End DO NOT TOUCH ####
#----------------------------------------------------#
#### BEGIN STUDENT SECTION ####
# This section runs at 100Hz. You can add things for executation
# at other speeds. See the 1Hz loop for display examples. SD
# logging occurs at 10Hz. See the 10Hz if statement for details
# Output commands
# motor_front, motor_back, motor_left, motor_right are the 4 motor commands
# motor range is from 1.000 to 2.000 (1.000 is 0% power)
# R/C Input
# rc_data variable stores all RC inputs (range is [0]-[5])
# each rc_data channel varies between 1000 and 2000
# Sensor data
# yaw, roll, and pitch contain attitude info (float)
# rates[0 to 2] stores the three angular velocity components (rad/s)
# accels[0 to 2] stores the three linear accelerations
# analog[4] and [5] are the two analog inputs (value is in Volts)
# current_alt contains the current altitude (relatively to
# start up) in meters (from the barometer)
# -------------------------ENCODER ROLL-----------------------------------------
# if encoder0 == 0:
# encoder0 = analog[4]
# rollEnc = (analog[4]-encoder0)*(360/5)
# rollEnc = analog[4]*(360.0/5.0)
# -------------------------ENCODER ROLL-----------------------------------------
rc_data = rcin.read_all()
# -------------------------ATTITUDE EST-----------------------------------------
# put current gyro raw data on the head of the gyro input buffer
#gyroRawBuffer.put(rates[1])
#print(gyroRawBuffer.prev(0),rates[1])
#rollKalman = kalmanObj.kalmanFilt(rad2Deg(-math.atan2(accels[0], accels[2])),rad2Deg(rates[1]),timeStep)
# put the roll accelerometer angle on the head of the accel roll input buffer
#accelRollBuffer.put(-math.atan2(accels[0], accels[2]))
# filter gyro data
#gyroFiltBuffer.put(filterShawn.filterEval(coeffGyro,order,gyroRawBuffer,gyroFiltBuffer))
# apply trapezoid rule to the filtered gyro data
#rollGyro = rollGyro + numIntegration.numIntegration("Trap",timeStep,gyroFiltBuffer)
# apply trapezoid rule to the filtered gyro data
#rollGyroRaw = rollGyroRaw + numIntegration.numIntegration("Trap",timeStep,gyroRawBuffer)
# apply simpson's rule to the filtered gyro data
#if((-1.0)**i >=0):
# evenRoll = evenRoll + numIntegration.numIntegration("Simp",timeStep,gyroFiltBuffer)
# rollGyroSimp = evenRoll
#else:
# oddRoll = oddRoll + numIntegration.numIntegration("Simp",timeStep,gyroFiltBuffer)
# rollGyroSimp = oddRoll
# filter acc data using coefficients found earlier
#accelFiltBuffer.put(filterShawn.filterEval(coeffAcc,order,accelRollBuffer,accelFiltBuffer))
#rollAccel = accelFiltBuffer.prev(0)
# -------------------------ATTITUDE EST-----------------------------------------
# -------------------------STABILIZATION-----------------------------------------
# read desired roll from RC stick
#rollDes = rangeD(float(rc_data[0]),rc0c)
#pitchDes = rangeD(float(rc_data[1]),rc0c)
rollDes = 0
pitchDes = 0
#throttle = rangeD(float(rc_data[2]),rc2c) #uncomment for manual control
throttle = 1.0 #for testing Paltitude
#yawRateDes = rangeD(float(rc_data[4]),rc4c)
if rollDes < 7 and rollDes >-7:
rollDes = 0
if pitchDes < 7 and pitchDes > -7:
pitchDes = 0
if throttle < 1.1:
throttle = 1.0
if(yawStep and counter > 300):
Pyaw = 0.05 #### what is Pyaw?
if(stepInput and counter>500):
#print("this is happening")
rollDes = 0
if(stepInput and counter-1000 > 0):
rollDes = 0
if(stepInput and counter-1500 > 0):
rollDes = 0
if(stepInput and counter-2000 > 0):
rollDes = 0
counter = 0
# uncomment for Kalman roll
#rollError = rollDes - rollKalman
#print rollKalman
# uncomment for simpsons rule roll
#rollError = rollDes - rad2Deg(rollAccel-rollGyro)
# uncomment for onboard roll/pitch
rollError = rollDes - rad2Deg(roll)
pitchError = pitchDes - rad2Deg(pitch)
altitudeError = target_alt - current_alt
#print(current_alt)
#print(altitudeError)
#print(rc_data)
#wn = rangeD(float(rc_data[2]),rc2c)
# recalculate at each time step to account for changing wn
#kp = wn**2.0
#kd = 2.0*zeta*wn
#kd = rangeD(float(rc_data[2]),rc2c)
#print kd
#kd = rangeD(float(rc_data[5]),rc5c)
#kp = rangeD(float(rc_data[6]),rc6c)
#print kp, kd
if(not excitation):
# NDI control
#Proll = (kd*rad2Deg(float(-rates[1])))/(B*1.8)+(kp*(rollError))/B-(A*rad2Deg(float(rates[1])))/(B*1.4)
#yawProportional = kpy * rates[2]
#derivative = kd * deg2Rad((rollError - rollErrorPrev)/timeStep)
#yawDerivative = kdy * -rates[2]
#print(deg2Rad((rollError - rollErrorPrev)/timeStep))
#yawErrorSum = yawErrorSum + (yawError + yawErrorPrev)*(timeStep/2.0)
#print(rollErrorSum)
#yawIntegral = ki * deg2Rad(yawErrorSum)
rollProportional = kp * deg2Rad(rollError)
#derivative = kd * deg2Rad((rollError - rollErrorPrev)/timeStep)
rollDerivative = kd * -deg2Rad(rates[1])
#print(deg2Rad((rollError - rollErrorPrev)/timeStep))
rollErrorSum = rollErrorSum + (rollError + rollErrorPrev)*(timeStep/2.0)
#print(rollErrorSum)
rollIntegral = ki * deg2Rad(rollErrorSum)
pitchProportional = kp * deg2Rad(pitchError)
#derivative = kd * deg2Rad((rollError - rollErrorPrev)/timeStep)
pitchDerivative = kd * -deg2Rad(rates[0])
#print(deg2Rad((rollError - rollErrorPrev)/timeStep))
pitchErrorSum = pitchErrorSum + (pitchError + pitchErrorPrev)*(timeStep/2.0)
#print(rollErrorSum)
pitchIntegral = ki * deg2Rad(pitchErrorSum)
altitudePorportional = kpz * altitudeError
altitudeDerivative = kdz * -current_alt #NOT SURE IF THIS IS CORRECT
altitudeErrorSum = altitudeErrorSum + (altitudeError + altitudeErrorPrev)*(timeStep/2.0)
altitudeIntegral = kiz * altitudeErrorSum
# -------------------------Kill Switch------------------------------------
# eveyrthing in here only happens when the switch is on (up)
if(float(rc_data[4]) > 1700.0): #what is rc_data[4] on controller?
#if(1):
timer = time.time() - timein
#sinr1=0.1*(.37*math.sin(1.5713+2*math.pi*0.2*timer) + .37*math.sin(4.5717+2*0.6*math.pi*timer) + .37*math.sin(1.2140+2*1.0*math.pi*timer) + .37*math.sin(1.0478+2*1.4*math.pi*timer) + .37*math.sin(3.9204+2*math.pi*1.8*timer) + .37*math.sin(4.0099+2*2.2*math.pi*timer) + .37*math.sin(3.4966+2*2.6*math.pi*timer))
#sinr2=0.1*(.37*math.sin(1.6146+2*math.pi*0.3*timer) + .37*math.sin(4.6867+2*0.7*math.pi*timer) + .37*math.sin(1.2267+2*1.1*math.pi*timer) + .37*math.sin(1.0671+2*1.5*math.pi*timer) + .37*math.sin(3.9664+2*math.pi*1.9*timer) + .37*math.sin(3.8699+2*2.3*math.pi*timer) + .37*math.sin(3.5712+2*2.7*math.pi*timer))
#sinr3=(.37*math.sin(1.9535+2*math.pi*0.4*timer) + .37*math.sin(5.2646+2*0.8*math.pi*timer) + .37*math.sin(2.0651+2*1.2*math.pi*timer) + .37*math.sin(2.4636+2*1.6*math.pi*timer) + .37*math.sin(5.6716+2*math.pi*2.0*timer) + .37*math.sin(5.7265+2*2.4*math.pi*timer) + .37*math.sin(5.7810+2*2.8*math.pi*timer))
#sinr4=(.37*math.sin(3.2771+2*math.pi*0.5*timer) + .37*math.sin(1.3417+2*0.9*math.pi*timer) + .37*math.sin(5.5561+2*1.3*math.pi*timer) + .37*math.sin(0.5030+2*1.7*math.pi*timer) + .37*math.sin(4.7331+2*math.pi*2.1*timer) + .37*math.sin(5.9415+2*2.5*math.pi*timer) + .37*math.sin(0.7460+2*2.9*math.pi*timer))
if(rollErrorSum > .5):
rollErrorSum = 0
if(not zeroed):
rollErrorSum = 0
yawOffset = yaw
Proll = rollProportional+rollIntegral+rollDerivative
Ppitch = pitchProportional+pitchIntegral+pitchDerivative
Paltitude = altitudePorportional+altitudeIntegral+altitudeDerivative
#print("this is Paltitude:" + Paltitude)
#print rad2Deg(yawRel)
counter = counter + 1
if(excitation):
timer=(time.time()-timein)
frequencySweep = math.sin(2*math.pi*timer*(.2+.001*n))
n=n+1
motor_right = throttle - A * frequencySweep
motor_left = throttle + A * frequencySweep
else:
#motor_right = 1.4 + sinr1
#motor_left = 1.4 + sinr2
#motor_front = 0
#motor_back = 0
#motor_right = throttle - Proll
motor_right = 0
#motor_right = Paltitude - Proll #might have to add 1.0 for Paltitude??
#print (motor_right)
#motor_left = throttle + Proll
motor_left = 0
#motor_left = Paltitude + Proll
#print (motor_left)
#motor_front = throttle + Ppitch
motor_front = 0
#motor_front = Paltitude + Ppitch
#print (motor_front)
#motor_back = throttle - Ppitch
motor_back = 0
#motor_back = Paltitude - Ppitch #use this for altitude control
#print (motor_back)
zeroed = True
else:
motor_right = 0
motor_left = 0
motor_front = 0
motor_back = 0
zeroed = False
Pyaw = 0
counter = 0
pitchErrorPrev = pitchError
rollErrorPrev = rollError
#altitudeErrorPrev = altitudeError
# LOG DATA
# RC Controller INPUT #
#print (x[4]*(180/math.pi),rollGyro*180/math.pi,x1[4]*(180/math.pi),rollAccel*180/math.pi)
### Data logging feature ###
# GPS is disabled, tab in fh and below to re-enable
#try:
# GPS_data
#except NameError:
# GPS_data = None
#if GPS_data is not None:
#fh = open("Log_Files/datalog%s.csv" % gg,"a")
#log_data = np.array([time.clock(), GPS_data.lat/10000000.0, GPS_data.lon/10000000.0,
#### LOGGING ####
# This is the data to be logged. The header (text at top of file) is edited at the top
# of the program. Add/subtract variables as needed.
#log_data = np.array([time.time()-timein,roll,rates[1],motor_right,motor_left,sinr1,sinr2])
#np.savetxt(fh, log_data.reshape(1,log_data.shape[0]), delimiter=',', fmt='%.6f')
#fh.close()
#### END STUDENT SECTION ####
#---------------------------------------------------#
motor_front_pwm.set_duty_cycle(motor_front)
motor_back_pwm.set_duty_cycle(motor_back)
motor_left_pwm.set_duty_cycle(motor_left)
motor_right_pwm.set_duty_cycle(motor_right)
timer_100hz = current_time # reset timer flag
# end of 100Hz section
if (current_time - timer_50hz) >= 20.0:
timer_50hz = current_time
# End of 50Hz section
if (current_time - timer_25hz) >= 40.0:
timer_25hz = current_time
# End of 25Hz section
if (current_time - timer_10hz) >= 100.0:
# # RC Controller INPUT #
# rc_data = rcin.read_all()
#
#if(dbgmsg and dbgmsg2):
# print("Gyro Filter Buffer Contents")
# print(gyroFiltBuffer)
#print(rad2Deg(rollAccel+rollGyroSimp))
#print(rad2Deg(accelRollBuffer.prev(0)),rad2Deg(accelFiltBuffer.prev(0)))
#print(rad2Deg(rollGyroRaw),rad2Deg(gyroFiltBuffer.prev(0)))
#print rc_data
#print(wn)
#print(rollError)
#print(float(rc_data[2]))
#print(rollAccel*(180/math.pi))
#print((rollGyro+rollAccel)*(180.0/math.pi),(rollGyroSimp+rollAccel)*(180/math.pi))
#print(rollEnc)
#
# #print (x[4]*(180/math.pi),rollGyro*180/math.pi,x1[4]*(180/math.pi),rollAccel*180/math.pi)
#
# ### Data logging feature ###
# # GPS is disabled, tab in fh and below to re-enable
# #try:
# # GPS_data
# #except NameError:
# # GPS_data = None
# #if GPS_data is not None:
# fh = open("Log_Files/datalog%s.csv" % gg,"a")
# #log_data = np.array([time.clock(), GPS_data.lat/10000000.0, GPS_data.lon/10000000.0,
# #### LOGGING ####
# # This is the data to be logged. The header (text at top of file) is edited at the top
# # of the program. Add/subtract variables as needed.
# log_data = np.array([time.clock(),current_alt, yaw, pitch, roll,
# accels[0], accels[1], accels[2], rates[0], rates[1], rates[2],
# m9m[0], m9m[1], m9m[2], rates[0], rates[1], rates[2],accels[0],accels[1],accels[2],rollGyro, rollAccel, rollEnc, (rollGyro+rollAccel)*(180/math.pi)])
# np.savetxt(fh, log_data.reshape(1,log_data.shape[0]), delimiter=',', fmt='%.6f')
#
# fh.close()
# #### END LOGGING ####
#
timer_10hz = current_time
# End of 10Hz section
if (current_time - timer_1hz) >= 1000.0:
# Customizable display message #
#print "Angles:", "{:+3.2f}".format(roll*57.32), "{:+3.2f}".format(pitch*57.32), "{:+3.2f}".format(yaw*57.32)
print('current alt:')
print(current_alt)
print('target alt:')
print(target_alt)
print('Ground alt:')
print(ground_alt)
print('Altitude Error:')
print(altitudeError)
print('rc_data[4] aka kill switch value:')
print(float(rc_data[4]))
print('Paltitude aka what we would replace throttle with if autonomous')
print(Paltitude)
#print "Analogs:", analog[0], analog[1], analog[2], analog[3], analog[4]
#print "Altitude:", current_alt
#print pitch_angle_gyro
#print roll_angle_acc
#print roll_angle_gyro
#if GPS_data is not None:
# print "Location:", "{:+3.6f}".format(GPS_data.lat/10000000.0), "{:+3.6f}".format(GPS_data.lon/10000000.0), "{:+4.1f}".format(GPS_data.heightSea/1000.0)
# print "Loc Accuracy:", "{:+3.3f}".format(GPS_data.horAcc/1000.0), "{:+3.3f}".format(GPS_data.verAcc/1000.0)
#print pitch_angle_gyro
#print accels
timer_1hz = current_time
# End of 1Hz section
| 29.467643
| 314
| 0.662758
|
7952083c0f33e4e80dd5e3eb10385a6f5c5a3e51
| 4,479
|
py
|
Python
|
lib/BatchNorm.py
|
bcrafton/ssdfa
|
661f9059184fde6ba7ad1ca710c5b5a1954c5ea6
|
[
"MIT"
] | 10
|
2019-01-23T17:07:59.000Z
|
2021-07-13T10:18:10.000Z
|
lib/BatchNorm.py
|
bcrafton/ssdfa
|
661f9059184fde6ba7ad1ca710c5b5a1954c5ea6
|
[
"MIT"
] | 1
|
2019-07-30T00:55:58.000Z
|
2019-09-17T13:31:48.000Z
|
lib/BatchNorm.py
|
bcrafton/ssdfa
|
661f9059184fde6ba7ad1ca710c5b5a1954c5ea6
|
[
"MIT"
] | 1
|
2019-07-28T17:28:30.000Z
|
2019-07-28T17:28:30.000Z
|
import tensorflow as tf
import numpy as np
import math
from tensorflow.python.ops import gen_nn_ops
from lib.Layer import Layer
class BatchNorm(Layer):
def __init__(self, input_size, name=None, load=None, train=True, eps=1e-3):
self.input_size = list(input_size)
if len(self.input_size) == 2:
self.dims = [0]
elif len(self.input_size) == 4:
self.dims = [0, 1, 2]
else:
assert(False)
self.size = self.input_size[-1]
self.name = name
self._train = train
self.eps = eps
self.num_parameters = np.prod(self.size) * 2
if load:
print ("Loading Weights: " + self.name)
weight_dict = np.load(load).item()
gamma = weight_dict[self.name + '_gamma']
beta = weight_dict[self.name + '_beta']
if np.shape(gamma) != (self.size,):
print (np.shape(gamma), self.size)
assert(np.shape(gamma) == (self.size,))
if np.shape(beta) != (self.size,):
print (np.shape(beta), self.size)
assert(np.shape(beta) == (self.size,))
else:
gamma = np.ones(shape=self.size)
beta = np.zeros(shape=self.size)
self.gamma = tf.Variable(gamma, dtype=tf.float32)
self.beta = tf.Variable(beta, dtype=tf.float32)
###################################################################
def get_weights(self):
return [(self.name + '_gamma', self.gamma), (self.name + '_beta', self.beta)]
def num_params(self):
return self.num_parameters
def output_shape(self):
if len(self.input_size) == 2:
return self.input_size[1]
elif len(self.input_size) == 4:
return self.input_size[1:4]
else:
assert(False)
###################################################################
def forward(self, X):
mean = tf.reduce_mean(X, axis=self.dims)
_, var = tf.nn.moments(X - mean, axes=self.dims)
A = tf.nn.batch_normalization(x=X, mean=mean, variance=var, offset=self.beta, scale=self.gamma, variance_epsilon=self.eps)
return {'aout':A, 'cache':{}}
def backward(self, AI, AO, DO, cache=None):
mean = tf.reduce_mean(AI, axis=self.dims)
_, var = tf.nn.moments(AI - mean, axes=self.dims)
ivar = 1. / tf.sqrt(self.eps + var)
if len(self.input_size) == 2:
AI = tf.reshape(AI, (self.input_size[0], 1, 1, self.input_size[1]))
DO = tf.reshape(AI, (self.input_size[0], 1, 1, self.size))
[DI, dgamma, dbeta, _, _] = gen_nn_ops.fused_batch_norm_grad_v2(y_backprop=DO, x=AI, scale=self.gamma, reserve_space_1=mean, reserve_space_2=ivar, epsilon=self.eps, is_training=True)
if len(self.input_size) == 2:
DI = tf.reshape(DI, (self.input_size[0], self.size))
return {'dout':DI, 'cache':{}}
def gv(self, AI, AO, DO, cache=None):
if not self._train:
return []
mean = tf.reduce_mean(AI, axis=self.dims)
_, var = tf.nn.moments(AI - mean, axes=self.dims)
ivar = 1. / tf.sqrt(self.eps + var)
if len(self.input_size) == 2:
AI = tf.reshape(AI, (self.input_size[0], 1, 1, self.input_size[1]))
DO = tf.reshape(AI, (self.input_size[0], 1, 1, self.size))
[DI, dgamma, dbeta, _, _] = gen_nn_ops.fused_batch_norm_grad_v2(y_backprop=DO, x=AI, scale=self.gamma, reserve_space_1=mean, reserve_space_2=ivar, epsilon=self.eps, is_training=True)
if len(self.input_size) == 2:
DI = tf.reshape(DI, (self.input_size[0], self.size))
return [(dgamma, self.gamma), (dbeta, self.beta)]
###################################################################
def dfa_backward(self, AI, AO, E, DO):
return self.backward(AI, AO, DO)
def dfa_gv(self, AI, AO, E, DO):
return self.gv(AI, AO, DO)
###################################################################
def lel_backward(self, AI, AO, E, DO, Y, cache):
return self.backward(AI, AO, DO, cache)
def lel_gv(self, AI, AO, E, DO, Y, cache):
return self.gv(AI, AO, DO, cache)
###################################################################
| 34.992188
| 190
| 0.509935
|
79520917eee40acaee7419bedfd4ffadcac052f8
| 11,327
|
py
|
Python
|
app.py
|
khanzaifa37/HR_Help
|
8c010e8f369028f306b5aa396137c1036af872f6
|
[
"MIT"
] | null | null | null |
app.py
|
khanzaifa37/HR_Help
|
8c010e8f369028f306b5aa396137c1036af872f6
|
[
"MIT"
] | null | null | null |
app.py
|
khanzaifa37/HR_Help
|
8c010e8f369028f306b5aa396137c1036af872f6
|
[
"MIT"
] | null | null | null |
import os, sys, shutil, time
import pickle
from flask import Flask, request, jsonify, render_template,send_from_directory
import pandas as pd
from sklearn.externals import joblib
from sklearn.ensemble import RandomForestClassifier
import numpy as np
import urllib.request
import json
from geopy.geocoders import Nominatim
from flask_mail import Mail,Message
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
app = Flask(__name__)
app.config['MAIL_SERVER']='smtp.gmail.com'
app.config['MAIL_PORT'] = 465
app.config['MAIL_USERNAME'] = 'anantisdumb@gmail.com'
app.config['MAIL_PASSWORD'] = '1fuckanant'
app.config['MAIL_USE_TLS'] = False
app.config['MAIL_USE_SSL'] = True
mail=Mail(app)
def func(X_res):
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns
from scipy import stats
from scipy.stats import randint
# prep
from sklearn.model_selection import train_test_split
from sklearn import preprocessing
from sklearn.datasets import make_classification
from sklearn.preprocessing import binarize, LabelEncoder, MinMaxScaler
# models
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
# Validation libraries
from sklearn import metrics
from sklearn.metrics import accuracy_score, mean_squared_error, precision_recall_curve
from sklearn.model_selection import cross_val_score
#Neural Network
from sklearn.neural_network import MLPClassifier
from sklearn.model_selection import learning_curve
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import RandomizedSearchCV
#Bagging
from sklearn.ensemble import BaggingClassifier, AdaBoostClassifier
from sklearn.neighbors import KNeighborsClassifier
#Naive bayes
from sklearn.naive_bayes import GaussianNB
#Stacking
from sklearn.preprocessing import LabelEncoder
#reading in CSV's from a file path
train_df = pd.read_csv('trainms.csv')
#missing data
total = train_df.isnull().sum().sort_values(ascending=False)
percent = (train_df.isnull().sum()/train_df.isnull().count()).sort_values(ascending=False)
missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent'])
#missing_data.head(20)
#print(missing_data)
train_df = train_df.drop(['comments'], axis= 1)
train_df = train_df.drop(['state'], axis= 1)
train_df = train_df.drop(['Timestamp'], axis= 1)
train_df.isnull().sum().max() #just checking that there's no missing data missing...
#train_df.head(5)
# Assign default values for each data type
defaultInt = 0
defaultString = 'NaN'
defaultFloat = 0.0
# Create lists by data tpe
intFeatures = ['Age']
stringFeatures = ['Gender', 'Country', 'self_employed', 'family_history', 'treatment', 'work_interfere',
'no_employees', 'remote_work', 'tech_company', 'anonymity', 'leave', 'mental_health_consequence',
'phys_health_consequence', 'coworkers', 'supervisor', 'mental_health_interview', 'phys_health_interview',
'mental_vs_physical', 'obs_consequence', 'benefits', 'care_options', 'wellness_program',
'seek_help']
floatFeatures = []
# Clean the NaN's
for feature in train_df:
if feature in intFeatures:
train_df[feature] = train_df[feature].fillna(defaultInt)
elif feature in stringFeatures:
train_df[feature] = train_df[feature].fillna(defaultString)
elif feature in floatFeatures:
train_df[feature] = train_df[feature].fillna(defaultFloat)
# else:
# #print('Error: Feature %s not recognized.' % feature)
###########################################
#clean 'Gender'
#Slower case all columm's elements
gender = train_df['Gender'].str.lower()
#print(gender)
#Select unique elements
gender = train_df['Gender'].unique()
#Made gender groups
male_str = ["male", "m", "male-ish", "maile", "mal", "male (cis)", "make", "male ", "man","msle", "mail", "malr","cis man", "Cis Male", "cis male"]
trans_str = ["trans-female", "something kinda male?", "queer/she/they", "non-binary","nah", "all", "enby", "fluid", "genderqueer", "androgyne", "agender", "male leaning androgynous", "guy (-ish) ^_^", "trans woman", "neuter", "female (trans)", "queer", "ostensibly male, unsure what that really means"]
female_str = ["cis female", "f", "female", "woman", "femake", "female ","cis-female/femme", "female (cis)", "femail"]
for (row, col) in train_df.iterrows():
if str.lower(col.Gender) in male_str:
train_df['Gender'].replace(to_replace=col.Gender, value='male', inplace=True)
if str.lower(col.Gender) in female_str:
train_df['Gender'].replace(to_replace=col.Gender, value='female', inplace=True)
if str.lower(col.Gender) in trans_str:
train_df['Gender'].replace(to_replace=col.Gender, value='trans', inplace=True)
#Get rid of bullshit
stk_list = ['A little about you', 'p']
train_df = train_df[~train_df['Gender'].isin(stk_list)]
###############################################
#complete missing age with mean
train_df['Age'].fillna(train_df['Age'].median(), inplace = True)
# Fill with media() values < 18 and > 120
s = pd.Series(train_df['Age'])
s[s<18] = train_df['Age'].median()
train_df['Age'] = s
s = pd.Series(train_df['Age'])
s[s>120] = train_df['Age'].median()
train_df['Age'] = s
#Ranges of Age
train_df['age_range'] = pd.cut(train_df['Age'], [0,20,30,65,100], labels=["0-20", "21-30", "31-65", "66-100"], include_lowest=True)
train_df = train_df.drop(['Country'], axis= 1)
########################################################
#missing data
total = train_df.isnull().sum().sort_values(ascending=False)
percent = (train_df.isnull().sum()/train_df.isnull().count()).sort_values(ascending=False)
missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent'])
#missing_data.head(20)
#print(missing_data)
######################################################
# # Scaling Age
# scaler = MinMaxScaler()
# train_df['Age'] = scaler.fit(train_df[['Age']])
# train_df['Age'] = scaler.transform(train_df[['Age']])
# X_res['Age']= scaler.transform(X_res[['Age']])
# Scaling Age
scaler = MinMaxScaler()
train_df['Age'] = scaler.fit_transform(train_df[['Age']])
X_res['Age']=scaler.transform(X_res[['Age']])
###################################################3
# define X and y
feature_cols = ['Age', 'Gender', 'family_history', 'benefits', 'care_options', 'anonymity', 'leave', 'work_interfere']
X = train_df[feature_cols]
y = train_df.treatment
# split X and y into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30, random_state=0)
le=LabelEncoder()
# Iterating over all the common columns in train and test
for col in X_test.columns.values:
# Encoding only categorical variables
if X_test[col].dtypes=='object':
# Using whole data to form an exhaustive list of levels
data=X_train[col].append(X_test[col])
le.fit(data.values)
X_train[col]=le.transform(X_train[col])
X_test[col]=le.transform(X_test[col])
X_res[col]=le.transform(X_res[col])
def tuningRandomizedSearchCV(model, param_dist):
#Searching multiple parameters simultaneously
# n_iter controls the number of searches
rand = RandomizedSearchCV(model, param_dist, cv=10, scoring='accuracy', n_iter=10, random_state=5)
rand.fit(X, y)
#rand.grid_scores_
# examine the best model
#print('Rand. Best Score: ', rand.best_score_)
#print('Rand. Best Params: ', rand.best_params_)
# run RandomizedSearchCV 20 times (with n_iter=10) and record the best score
best_scores = []
for _ in range(20):
rand = RandomizedSearchCV(model, param_dist, cv=10, scoring='accuracy', n_iter=10)
rand.fit(X, y)
best_scores.append(round(rand.best_score_, 3))
#print(best_scores)
#################################################def treeClassifier():
def treeClassifier(): # Calculating the best parameters
tree = DecisionTreeClassifier()
featuresSize = feature_cols.__len__()
param_dist = {"max_depth": [3, None],
"max_features": randint(1, featuresSize),
"min_samples_split": randint(2, 9),
"min_samples_leaf": randint(1, 9),
"criterion": ["gini", "entropy"]}
#tuningRandomizedSearchCV(tree, param_dist)
# train a decision tree model on the training set
tree = DecisionTreeClassifier(max_depth=3, min_samples_split=8, max_features=6, criterion='entropy', min_samples_leaf=7)
tree.fit(X_train, y_train)
#make class predictions for the testing set
y_pred = tree.predict(X_res)
print(y_pred)
return y_pred
#print('########### Tree classifier ###############')
#accuracy_score = evalClassModel(tree, y_test, y_pred_class, True)
#Data for final graph
#methodDict['Tree clas.'] = accuracy_score * 100
return treeClassifier()
'''*************************END*******************************'''
@app.route('/')
def root():
msg=Message('Hello',sender='anantisdumb@gmail.com',recipients=['anantisdumb@gmail.com'])
msg.body="This is the email body"
mail.send(msg)
return render_template('home.html')
@app.route('/form1')
def form1():
return render_template('form1.html')
@app.route('/form2')
def form2():
return render_template('form2.html')
@app.route('/form3')
def form3():
return render_template('form3.html')
@app.route('/form4')
def form4():
return render_template('form4.html')
@app.route('/images/<Paasbaan>')
def download_file(Paasbaan):
return send_from_directory(app.config['images'], Paasbaan)
@app.route('/index.html')
def index():
return render_template('index.html')
@app.route('/work.html')
def work():
return render_template('work.html')
@app.route('/about.html')
def about():
return render_template('about.html')
@app.route('/contact.html')
def contact():
return render_template('contact.html')
@app.route('/result.html', methods = ['POST'])
def predict():
if request.method == 'POST':
age=request.form['age']
gender=request.form['gender']
history=request.form['History']
benefits=request.form['seek_help']
care_opt=request.form['care_options']
anonymity=request.form['anonymity']
leave=request.form['leave']
work=request.form['intf']
# print(age)
# print(gender)
# print(history)
# print(benefits)
# print(care_opt)
# print(anonymity)
# print(leave)
# print(work)
X_res= pd.DataFrame({"Age": [age],
"Gender":[gender],
"family_history":[history],
"benefits":[benefits],
"care_options":[care_opt],
"anonymity":[anonymity],
"leave":[leave],
"work_interfere":[work]})
# print(X_res)
y_res=func(X_res)
print("**********************************")
print("**********************************")
print(y_res)
print("**********************************")
print("**********************************")
if y_res == "Yes":
my_prediction='Treatment is required.Sanction Leave'
#print("ROBBERY")
else:
my_prediction='Treatment not Needed'
#print("SAFE")
return render_template('result.html', prediction = my_prediction)
if __name__ == '__main__':
app.run(debug = True)
| 29.420779
| 314
| 0.681204
|
795209d2a1286ede6451d2143d46df546a1ec775
| 20,163
|
py
|
Python
|
du/denv/SshConfigManager.py
|
spiricn/DevUtils
|
58a035a08a7c58035c25f992c1b8aa33cc997cd2
|
[
"MIT"
] | 1
|
2021-12-21T13:18:08.000Z
|
2021-12-21T13:18:08.000Z
|
du/denv/SshConfigManager.py
|
spiricn/DevUtils
|
58a035a08a7c58035c25f992c1b8aa33cc997cd2
|
[
"MIT"
] | null | null | null |
du/denv/SshConfigManager.py
|
spiricn/DevUtils
|
58a035a08a7c58035c25f992c1b8aa33cc997cd2
|
[
"MIT"
] | null | null | null |
import logging
import re
import os
import sys
from subprocess import CalledProcessError
from du.denv.Command import Command
logger = logging.getLogger(__name__.split(".")[-1])
class SshConfigManager:
"""
Manages ssh configuration files on host and client side for given user
"""
# Path to the scp executable for both Linux and Win32 host platfrom
SCP_BINARY = (
"scp"
if sys.platform == Command.PLATFORM_OS_LINUX
else os.path.join(Command.WIN32_SYSTEM_PATH, "OpenSSH\\scp.exe")
)
# Path to the ssh-keygen executable for both Linux and Win32 host platfrom
SSH_KEYGEN_BINARY = (
"ssh-keygen -N '' -f {}"
if sys.platform == Command.PLATFORM_OS_LINUX
else os.path.join(
Command.WIN32_SYSTEM_PATH, 'OpenSSH\\ssh-keygen.exe -P "" -f {}'
)
)
# Path to ssh program data folder on Windows platform
WIN32_SSH_PROGRAM_DATA_PATH = (
os.path.join(
os.environ["AllUsersProfile"],
"ssh",
)
if sys.platform == Command.PLATFORM_OS_WIN32
else ""
)
# System ssh config file on Win32 OS
WIN32_SYSTEM_SSH_CONFIG_FILE = os.path.join(
WIN32_SSH_PROGRAM_DATA_PATH + os.sep, "ssh_config"
)
# Local user SSH root folder
LOCAL_USER_SSH_ROOT_FOLDER = os.path.join(os.path.expanduser("~") + os.sep, ".ssh")
# Local user SSH config file location on Linux OS
LOCAL_LINUX_USER_SSH_CONFIG_FILE = os.path.join(
LOCAL_USER_SSH_ROOT_FOLDER + os.sep, "config"
)
# Local user SSH config file location on Win32 OS
LOCAL_WIN32_USER_SSH_CONFIG_FILE = os.path.join(
LOCAL_USER_SSH_ROOT_FOLDER + os.sep, "config_overlay"
)
# Local user public and private key files
LOCAL_USER_SSH_IDENTITY_FILE_PUBLIC = os.path.join(
LOCAL_USER_SSH_ROOT_FOLDER + os.sep, "id_rsa.pub"
)
LOCAL_USER_SSH_IDENTITY_FILE_PRIVATE = os.path.join(
LOCAL_USER_SSH_ROOT_FOLDER + os.sep, "id_rsa"
)
# Remote side authorized_keys file location (Linux OS only for now)
REMOTE_LINUX_AUTH_KEYS_FILE = "/home/{}/.ssh/authorized_keys"
# Remote user SSH config file location (Linux OS only for now)
REMOTE_LINUX_USER_SSH_CONFIG_FILE = "/home/{}/.ssh/config"
# Docker container SSH config file location
DOCKER_CONTAINER_SSH_CONFIG_FILE = "/home/{}/.ssh/{}"
# OpenSSH default binary location on Windows 10
OPENSSH_WIN10_FILE_LOCATION = re.escape(
r"C:\\Windows\\System32\\{}".format(
re.escape(Command.WIN32_SSH_RELATIVE_EXE_PATH)
)
)
# Command string that is passed to remote shell in order to get the list of active_containers
SSH_DOCKER_CONTAINER_LIST_CMDLINE = 'active_containers=$(docker ps --format "{{ .Names }}" --filter "name=%(filter)s")'
# Line which is prepended to the user's ssh config file on Windows platform
SSH_CONFIG_OVERLAY_INCLUDE_STRING = "Include config_overlay"
# Default container template name
CONTAINER_NAME_TEMPLATE = "{}-denv"
# Windows host system ssh_config file template
WIN32_SSH_CONFIG_FILE_TEMPLATE = """Host *{}*
StrictHostKeyChecking no
UserKnownHostsFile NUL
""".format(
CONTAINER_NAME_TEMPLATE.format("")
)
def __init__(self, command):
"""
Constructor
@param command Command instance to be used
"""
self.__command = command
# Check if the system ssh_config is patched on Windows host platform
self.__sshWinConfigPatcher()
def sanitizeUserSshConfig(self, filter):
"""
Syncrhronize user's ssh config file with active docker containers hosts
and generate global ssh configs for both Linux and Windows platforms
@param filter Filter string used to filter current user's docker containers
@return stdout of bash executed script
"""
# Set ssh config files for given user
configFileLinux = self.REMOTE_LINUX_USER_SSH_CONFIG_FILE.format(
self.__command.getUsername()
)
configFileWin = configFileLinux + "{}".format("_windows")
logger.debug(
"Sanitizing container ssh configs:\n\thost:\t{}\n\tuser:\t{}".format(
self.__command.getHost(), self.__command.getUsername()
)
)
cmdRet = None
# Prepare bash shell command which will update user ssh config
# files based on currently active docker containers
cmd = """
rm -rf %(configLinux)s
rm -rf %(configWin)s
%(dockerListContCommand)s
delete=1
for file in /home/%(username)s/.ssh/%(filter)s*
do
for container in $active_containers; do
if [ \"${file##*/\"$container\"}\" ]; then
delete=1
else
delete=0
break
fi
done
if [ "$delete" = "1" ]; then
rm -rf $file
else
cat $file >> /home/%(username)s/.ssh/config
fi
done
if [ -f "%(configLinux)s" ]; then
sed -e 's/localhost/jump-box/g' -e 's#/dev/null#NUL#g' \
-e %(winSshBinPath)s %(configLinux)s > %(configWin)s
fi
""" % {
"configLinux": configFileLinux,
"configWin": configFileWin,
"dockerListContCommand": self.__command.sshCommandStringConvert(
self.SSH_DOCKER_CONTAINER_LIST_CMDLINE % {"filter": filter}
),
"filter": filter,
"username": self.__command.getUsername(),
"winSshBinPath": "s/ssh/" + self.OPENSSH_WIN10_FILE_LOCATION + "/g",
}
# If host is local and Win32, skip this step
if not (
self.__command.getHostPlatform() is Command.PLATFORM_OS_WIN32
and self.__command.getHost() is None
):
cmdRet = self.__command.runCommand(cmd)
if not cmdRet:
# Copy final ssh config file back to host
self.__copyDockerSshConfigToHost()
return cmdRet
def createDockerContainerSshConfig(self, containerAddress, containerName):
"""
Creates a ssh config for given docker container name and IP address
This config is created with host acting as a jump-box for the spawned docker container's ssh connection
Generated config also disables Host Key Cheking for those ssh connections
@param containerAddress IP address of the docker container
@param containerName Name of the docker container
@return stdout of bash executed script
"""
# Set ssh docker container config file
dockerSshConfig = self.DOCKER_CONTAINER_SSH_CONFIG_FILE.format(
self.__command.getUsername(), containerName
)
logger.debug(
"Creating ssh config for:\n\tuser:\t{}\n\tcontainer:\t{}".format(
self.__command.getUsername(), containerName
)
)
# Prepare bash shell command which will create ssh config for given user and docker container
cmd = """
if [ ! -d "/home/%(username)s/.ssh" ]; then
mkdir "/home/%(username)s/.ssh"
fi
echo "Host localhost" | tee -a %(dockerConfig)s > /dev/null
echo " HostName %(host)s" | tee -a %(dockerConfig)s > /dev/null
echo " User %(username)s" | tee -a %(dockerConfig)s > /dev/null
echo " Port 22" | tee -a %(dockerConfig)s > /dev/null
echo | tee -a %(dockerConfig)s > /dev/null
echo "Host %(dockerName)s" | tee -a %(dockerConfig)s > /dev/null
echo " HostName %(dockerIp)s" | tee -a %(dockerConfig)s > /dev/null
echo " User %(username)s" | tee -a %(dockerConfig)s > /dev/null
echo " StrictHostKeyChecking no" | tee -a %(dockerConfig)s > /dev/null
echo " UserKnownHostsFile /dev/null" | tee -a %(dockerConfig)s > /dev/null
echo " ProxyCommand ssh -q -W %%h:%%p localhost" | tee -a %(dockerConfig)s > /dev/null
echo | tee -a %(dockerConfig)s > /dev/null
""" % {
"username": self.__command.getUsername(),
"dockerConfig": dockerSshConfig,
"dockerIp": containerAddress,
"dockerName": containerName,
"host": self.__command.getHost()
if self.__command.getHost()
else "localhost",
}
return self.__command.runCommand(cmd)
def copyLocalSshPubKeyToRemote(self):
"""
Copies local user's identity file (eg. ~/.ssh/id_rsa.pub) to the
remote host authorized_keys file
If local identity file is not presnet new one will be generated
@return stdout of the executed remote command
"""
# 1. Check if local user has a generated local identity
# Usually it is id_rsa.pub file located in ~/.ssh folder
# If not present try to generate one
if not os.path.exists(self.LOCAL_USER_SSH_IDENTITY_FILE_PUBLIC):
logger.info(
"There is no local user's identity on this machine, we will create one"
)
# If we are on Windows host check if .ssh folder exists in local user's
# home directory and create it since the ssh keygeneration will fail otherwise
if (
self.__command.getHostPlatform() is Command.PLATFORM_OS_WIN32
and not os.path.exists(self.LOCAL_USER_SSH_ROOT_FOLDER)
):
logger.info(
"There is no .ssh folder in user's home direcotry on Windows host, we will creat one."
)
os.makedirs(self.LOCAL_USER_SSH_ROOT_FOLDER)
crateLocalUserPublicKeyCommand = self.SSH_KEYGEN_BINARY.format(
self.LOCAL_USER_SSH_IDENTITY_FILE_PRIVATE
)
self.__command.runCommand(crateLocalUserPublicKeyCommand, True)
# Also on Windows platform we need to create config file if it does not exist
# This file which will include config_overlay file which consists of container
# ssh jump-host configs
if self.__command.getHostPlatform() is Command.PLATFORM_OS_WIN32:
if not os.path.exists(self.LOCAL_LINUX_USER_SSH_CONFIG_FILE):
logger.info(
"There is no ssh config file, we will create one and patch it"
)
self.__fileLinePrepender(
self.LOCAL_LINUX_USER_SSH_CONFIG_FILE,
self.SSH_CONFIG_OVERLAY_INCLUDE_STRING,
True,
)
# If it exists we need to check if the config_overlay is already included
# If not, add that line at the begining of the file
else:
if not self.__fileLineSearch(
self.LOCAL_LINUX_USER_SSH_CONFIG_FILE,
self.SSH_CONFIG_OVERLAY_INCLUDE_STRING,
):
logger.info("ssh config file found but it will be patched")
self.__fileLinePrepender(
self.LOCAL_LINUX_USER_SSH_CONFIG_FILE,
self.SSH_CONFIG_OVERLAY_INCLUDE_STRING,
)
# Get the public key from the id_rsa.pub file
with open(self.LOCAL_USER_SSH_IDENTITY_FILE_PUBLIC, "r") as file:
publicKey = file.read().replace("\n", "")
logger.debug("User's public key: " + publicKey)
# 2. Check if authorized_keys file exists on remote side
# and create it if missing, check if user's public key
# is already there and append it if necessery
logger.debug("Transfering local user's public key to remote side if needed")
# Prepare bash shell command which will do the job
cmd = """
if [ ! -d "/home/%(username)s/.ssh" ]; then
mkdir "/home/%(username)s/.ssh"
fi
if [ -f "%(remoteAuthKeysFile)s" ]; then
echo "File authorized_keys exists, checking if user public key is already there"
if grep -Fxq "%(localUserPublicKey)s" "%(remoteAuthKeysFile)s"; then
echo "User public key found, do nothing"
else
echo "User public key not found, append it"
echo "%(localUserPublicKey)s" | tee -a "%(remoteAuthKeysFile)s" > /dev/null
fi
else
echo "File authorized_keys does not exist, create one and append user public key"
echo "%(localUserPublicKey)s" | tee -a "%(remoteAuthKeysFile)s" > /dev/null
fi
chmod 600 "%(remoteAuthKeysFile)s"
""" % {
"username": self.__command.getUsername(),
"remoteAuthKeysFile": self.REMOTE_LINUX_AUTH_KEYS_FILE.format(
self.__command.getUsername()
),
"localUserPublicKey": publicKey
if self.__command.getHostPlatform() is Command.PLATFORM_OS_LINUX
or Command.PLATFORM_OS_MACOS
else re.escape(publicKey),
}
return self.__command.runCommand(cmd)
def __copyDockerSshConfigToHost(self):
"""
Copies remote ssh config files to the local host
After this step the local host has the ssh config with jump-host
configuration to the remote docker containers
@return stdout of the executed commands
"""
# Set ssh config files for given user
remoteConfigFileLinux = self.REMOTE_LINUX_USER_SSH_CONFIG_FILE.format(
self.__command.getUsername()
)
localConfigFileLinux = self.LOCAL_LINUX_USER_SSH_CONFIG_FILE
remoteConfigFileWin = remoteConfigFileLinux + "{}".format("_windows")
# Determine local host and prepare copy commands accordingly
if (
self.__command.getHostPlatform() == Command.PLATFORM_OS_LINUX
or self.__command.getHostPlatform() == Command.PLATFORM_OS_MACOS
):
logger.debug("Prepare SSH config sync from remote to Linux host")
scpSshConfigCopyCommand = self.SCP_BINARY + " %(username)s@%(remoteHost)s:%(remoteConfigLinux)s %(localConfigLinux)s > \/dev\/null 2>&1" % {
"username": self.__command.getUsername(),
"userLocalWindowsSshConfig": self.LOCAL_WIN32_USER_SSH_CONFIG_FILE,
"remoteHost": self.__command.getHost(),
"remoteConfigLinux": remoteConfigFileLinux,
"localConfigLinux": localConfigFileLinux,
}
localSshConfigPath = localConfigFileLinux
elif self.__command.getHostPlatform() == Command.PLATFORM_OS_WIN32:
logger.debug("Prepare SSH config sync from remote to Win32 host")
scpSshConfigCopyCommand = self.SCP_BINARY + " %(remotePort)s %(username)s@%(remoteHost)s:%(configWin)s %(userLocalWindowsSshConfig)s" % {
"username": self.__command.getUsername(),
"userLocalWindowsSshConfig": self.LOCAL_WIN32_USER_SSH_CONFIG_FILE,
"remoteHost": self.__command.getHost(),
"remotePort": "-p {}".format(self.__command.getPort())
if self.__command.getPort()
else "",
"configWin": remoteConfigFileWin,
}
localSshConfigPath = self.LOCAL_WIN32_USER_SSH_CONFIG_FILE
# Copy the remote ssh config files to local host
scpSshCopyCmdParams = {"command": scpSshConfigCopyCommand, "local": True}
localSshConfigPathParams = {"path": localSshConfigPath}
command_list = [
(self.__command.runCommand, scpSshCopyCmdParams, CalledProcessError),
(os.remove, localSshConfigPathParams, FileNotFoundError),
]
result = None
for action, params, ex in command_list:
try:
result = action(**params)
break
except CalledProcessError as ex:
logger.debug(
"Remote SSH config file missing or some other error - do local cleanup. Return code is {}".format(
ex.returncode
)
)
continue
except FileNotFoundError as ex:
logger.debug(
"Local SSH config file missing or some other error. Strerror: {}, error number: {}".format(
ex.strerror, ex.errno
)
)
return result
def __sshWinConfigPatcher(self):
"""
Patches the ssh_config file on Win32 platform to disable StrictHostChecking option
for containers started by this tool
Call to this function needs to be done from within administrative context
This patching is not needed on Linux platform
"""
# Check if system ssh_config file exists
if self.__command.getHostPlatform() is Command.PLATFORM_OS_WIN32:
if not os.path.exists(self.WIN32_SYSTEM_SSH_CONFIG_FILE):
logger.info("There is no system ssh_config on this Windows host")
# Chek for admin rights
if not self.__command.checkAdmin()[1]:
# Inform user that in order to patch the system ssh_config file
# the tool needs to be restarted from shell with admin privileges
logger.info(
"Please restart this tool from shell with admin privileges, so we can create and patch it"
)
sys.exit()
else:
# Create the file and apply the patch to the begining of the file
self.__fileLinePrepender(
self.WIN32_SYSTEM_SSH_CONFIG_FILE,
self.WIN32_SSH_CONFIG_FILE_TEMPLATE,
True,
)
logger.info(
"We have admin rights... file is crated and patched successfully"
)
else:
if not self.__fileLineSearch(
self.WIN32_SYSTEM_SSH_CONFIG_FILE,
# Do search on the first line only, it is good enough
self.WIN32_SSH_CONFIG_FILE_TEMPLATE.partition("\n")[0],
):
logger.info(
"System ssh_config file found but it needs to be patched"
)
# Chek for admin rights
if not self.__command.checkAdmin()[1]:
# Inform user that in order to patch the system ssh_config file
# the tool needs to be restarted from shell with admin privileges
logger.info(
"Please restart this tool from shell with admin privileges, so we can patch it"
)
sys.exit()
else:
# Append the patch to the begining of the file
self.__fileLinePrepender(
self.WIN32_SYSTEM_SSH_CONFIG_FILE,
self.WIN32_SSH_CONFIG_FILE_TEMPLATE,
)
logger.info(
"We have admin rights... patching is finished successfully"
)
return
def __fileLinePrepender(self, filename, line, newFile=False):
"""
Adds string line to the begining of the file
@param filename File which will be modified (line prepended)
@param line String line
@param newFile If True it will create/overwrite the file. If False it will patch existing file
"""
with open(filename, "w+" if newFile else "r+") as f:
content = f.read()
f.seek(0, 0)
f.write(line.rstrip("\r\n") + "\n\n" + content)
def __fileLineSearch(self, filename, searchLine):
"""
Searches a string line in the file
@param filename File which will be used for serach
@param searchLine String line that is beeing searched
@return True if line is found, False otherwise
"""
with open(filename, "r") as f:
for line in f:
line = line.rstrip() # remove '\n' at end of line
if searchLine == line:
return True
return False
| 40.487952
| 152
| 0.605118
|
79520c495cc5c2afa45dc2248247c7cc96790088
| 4,774
|
py
|
Python
|
simplesigner/simple_signer.py
|
Chainfire/simple-signer
|
86f85f8c17cc21586e6510d9ad7880bfbdd18761
|
[
"MIT"
] | 5
|
2021-03-29T23:12:08.000Z
|
2021-12-24T19:44:10.000Z
|
simplesigner/simple_signer.py
|
Chainfire/simple-signer
|
86f85f8c17cc21586e6510d9ad7880bfbdd18761
|
[
"MIT"
] | null | null | null |
simplesigner/simple_signer.py
|
Chainfire/simple-signer
|
86f85f8c17cc21586e6510d9ad7880bfbdd18761
|
[
"MIT"
] | 2
|
2021-05-08T19:06:24.000Z
|
2021-05-20T07:25:27.000Z
|
from typing import Optional, Any
from .exceptions import *
from .util import KEY_PRIVATE_TYPES, KEY_PUBLIC_TYPES, KEY_ALL_TYPES, is_private_key, is_public_key
class SimpleSigner:
VERSION = 2
MAGIC = 'CFSS'
class SignResult:
def __init__(self, mode: str, bytes_signed: int, signature: str, public_key: KEY_PUBLIC_TYPES, metadata: Optional[Any]):
self.mode = mode
"""Signer mode"""
self.bytes_signed = bytes_signed
"""Number of bytes processed in signature"""
self.signature = signature
"""Base64 of produced signature"""
self.public_key = public_key
"""Public key that can be used to verify the signature"""
self.metadata = metadata
"""Metadata included"""
class VerifyResult:
def __init__(self, mode: str, bytes_verified: int, signature: str, public_key: KEY_PUBLIC_TYPES, public_key_verified: bool, metadata: Optional[Any]):
self.mode = mode
"""Signer mode"""
self.bytes_verified = bytes_verified
"""Number of bytes processed in signature verification"""
self.signature = signature
"""Base64 of verified signature"""
self.public_key = public_key
"""Public key used to verify signature"""
self.public_key_verified = public_key_verified
"""Whether the public key is verified. If False, the key from the signature itself was used to verify,
which only proves the content of the checked bytes have not been modified since signing, this does not
prove authenticity as anyone could re-sign the file. If True, the public key from the signature matches
the public key provided to the SimpleSigner constructor - if that public key has been retrieved from the
original signer through a secondary route from the supplied file, this proves authenticity (provided the
original signer's private key has not been leaked and RSA/Ed25519 have not been cracked)"""
self.metadata = metadata
"""Metadata included"""
def __init__(self, key_or_fingerprint: Optional[KEY_ALL_TYPES]):
"""
An xxxPrivateKey is required to sign. To verify properly an xxxPublicKey or fingerprint str needs to be provided;
if omitted, the public key stored in the signature is used for verification, which only proves the checked
bytes have not been modified since signing, it does not prove the authenticity of the whole.
If an xxxPrivateKey is provided, it's public part is also loaded automatically, so passing an xxxPrivateKey
allows you to both sign and properly verify.
:param key_or_fingerprint: RSAPrivateKey, Ed25519PrivateKey, EllipticCurvePrivateKey, RSAPublicKey, Ed25519PublicKey, EllipticCurvePublicKey, str (fingerprint), or None
"""
if key_or_fingerprint is None:
self._private_key = None # type: Optional[KEY_PRIVATE_TYPES]
self._public_key = None # type: Optional[KEY_PUBLIC_TYPES]
self._fingerprint = None # type: Optional[str]
elif isinstance(key_or_fingerprint, str):
self._private_key = None
self._public_key = None
self._fingerprint = key_or_fingerprint
elif is_private_key(key_or_fingerprint):
self._private_key = key_or_fingerprint
self._public_key = key_or_fingerprint.public_key()
self._fingerprint = None
elif is_public_key(key_or_fingerprint):
self._private_key = None
self._public_key = key_or_fingerprint
self._fingerprint = None
def _check_private_key(self):
if self._private_key is None:
raise PrivateKeyRequiredException()
def _check_public_key(self):
if self._public_key is None:
raise PublicKeyRequiredException()
def mode(self) -> str:
raise NotImplementedError()
def can_sign(self, infilename: str) -> bool:
raise NotImplementedError()
def can_verify(self, infilename: str) -> bool:
raise NotImplementedError()
def sign(self, infilename: str, outfilename: str, metadata: Optional[Any]=None) -> SignResult:
"""Sign infilename with private key and save to outfilename, the optional metadata should be json serializable.
Throws an exception unless successful."""
raise NotImplementedError()
def verify(self, infilename: str) -> VerifyResult:
"""Verify infilename with public key (if set, otherwise it uses the key stored next to the signature). Throws
an exception unless successful."""
raise NotImplementedError()
| 44.616822
| 176
| 0.671764
|
79520c6fd12247ca558a7c69763f901ec63cc9e5
| 59
|
py
|
Python
|
tests/components/hassio/__init__.py
|
MrDelik/core
|
93a66cc357b226389967668441000498a10453bb
|
[
"Apache-2.0"
] | 22,481
|
2020-03-02T13:09:59.000Z
|
2022-03-31T23:34:28.000Z
|
tests/components/hassio/__init__.py
|
MrDelik/core
|
93a66cc357b226389967668441000498a10453bb
|
[
"Apache-2.0"
] | 31,101
|
2020-03-02T13:00:16.000Z
|
2022-03-31T23:57:36.000Z
|
tests/components/hassio/__init__.py
|
MrDelik/core
|
93a66cc357b226389967668441000498a10453bb
|
[
"Apache-2.0"
] | 11,411
|
2020-03-02T14:19:20.000Z
|
2022-03-31T22:46:07.000Z
|
"""Tests for Hass.io component."""
HASSIO_TOKEN = "123456"
| 19.666667
| 34
| 0.694915
|
79520e50dfc18af2b7c16a3b39e287584e628651
| 664
|
py
|
Python
|
adts/variant.py
|
jasonsbarr/python-adts
|
0bf079062b1356b2a7e6f044afa0a932340aac65
|
[
"MIT"
] | null | null | null |
adts/variant.py
|
jasonsbarr/python-adts
|
0bf079062b1356b2a7e6f044afa0a932340aac65
|
[
"MIT"
] | null | null | null |
adts/variant.py
|
jasonsbarr/python-adts
|
0bf079062b1356b2a7e6f044afa0a932340aac65
|
[
"MIT"
] | null | null | null |
from dataclasses import dataclass, astuple
from typing import TypeVar, Generic
from abc import ABC
T = TypeVar("T")
@dataclass
class Variant(ABC, Generic[T]):
def __new__(cls):
if cls == Variant:
raise TypeError("Cannot instantiate Variant class directly")
return super().__new__(cls)
def __init__(self, value=None):
self.__dict__["_value"] = value
@property.getter
def value(self):
return self._value
def __setattr__(self, __name: str, __value: Any) -> None:
raise ValueError("Cannot change values on a Variant type")
def __iter__(self):
return iter(astuple(self.value))
| 23.714286
| 72
| 0.661145
|
79520e831a30881486726da0d66f1447247b85ab
| 49,996
|
py
|
Python
|
tests/test_playlist.py
|
EdwardBetts/pyspotify
|
851c622e814dee59362d27d978251bd3891c59f7
|
[
"Apache-2.0"
] | 2
|
2016-11-18T08:49:26.000Z
|
2018-05-14T13:27:19.000Z
|
tests/test_playlist.py
|
EdwardBetts/pyspotify
|
851c622e814dee59362d27d978251bd3891c59f7
|
[
"Apache-2.0"
] | null | null | null |
tests/test_playlist.py
|
EdwardBetts/pyspotify
|
851c622e814dee59362d27d978251bd3891c59f7
|
[
"Apache-2.0"
] | 1
|
2021-06-24T15:57:12.000Z
|
2021-06-24T15:57:12.000Z
|
# encoding: utf-8
from __future__ import unicode_literals
import collections
import unittest
import spotify
from spotify.playlist import _PlaylistCallbacks
import tests
from tests import mock
@mock.patch('spotify.playlist.lib', spec=spotify.lib)
class PlaylistTest(unittest.TestCase):
def setUp(self):
self.session = tests.create_session_mock()
def test_create_without_uri_or_sp_playlist_fails(self, lib_mock):
with self.assertRaises(AssertionError):
spotify.Playlist(self.session)
@mock.patch('spotify.Link', spec=spotify.Link)
def test_create_from_uri(self, link_mock, lib_mock):
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
link_instance_mock = link_mock.return_value
link_instance_mock._as_sp_playlist.return_value = sp_playlist
uri = 'spotify:playlist:foo'
result = spotify.Playlist(self.session, uri=uri)
link_mock.assert_called_once_with(self.session, uri)
link_instance_mock._as_sp_playlist.assert_called_once_with()
self.assertEqual(link_instance_mock.as_playlist.call_count, 0)
self.assertEqual(lib_mock.sp_playlist_add_ref.call_count, 0)
self.assertEqual(result._sp_playlist, sp_playlist)
@mock.patch('spotify.Link', spec=spotify.Link)
def test_create_from_uri_is_cached(self, link_mock, lib_mock):
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
link_instance_mock = link_mock.return_value
link_instance_mock._as_sp_playlist.return_value = sp_playlist
uri = 'spotify:playlist:foo'
result = spotify.Playlist(self.session, uri=uri)
self.assertEqual(self.session._cache[sp_playlist], result)
@mock.patch('spotify.Link', spec=spotify.Link)
def test_create_from_uri_fail_raises_error(self, link_mock, lib_mock):
link_instance_mock = link_mock.return_value
link_instance_mock._as_sp_playlist.return_value = None
uri = 'spotify:playlist:foo'
with self.assertRaises(spotify.Error):
spotify.Playlist(self.session, uri=uri)
def test_life_cycle(self, lib_mock):
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
sp_playlist = playlist._sp_playlist
lib_mock.sp_playlist_add_ref.assert_called_with(sp_playlist)
# Callbacks are only added when someone registers a Python event
# handler on the playlist:
lib_mock.sp_playlist_add_callbacks.assert_not_called()
playlist.on(spotify.PlaylistEvent.TRACKS_ADDED, lambda *args: None)
lib_mock.sp_playlist_add_callbacks.assert_called_with(
sp_playlist, mock.ANY, mock.ANY)
playlist = None # noqa
tests.gc_collect()
# Callbacks are removed when the playlist is GC-ed:
lib_mock.sp_playlist_remove_callbacks.assert_called_with(
sp_playlist, mock.ANY, mock.ANY)
# FIXME Won't be called because lib_mock has references to the
# sp_playlist object, and it thus won't be GC-ed.
# lib_mock.sp_playlist_release.assert_called_with(sp_playlist)
def test_cached_playlist(self, lib_mock):
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
result1 = spotify.Playlist._cached(self.session, sp_playlist)
result2 = spotify.Playlist._cached(self.session, sp_playlist)
self.assertIsInstance(result1, spotify.Playlist)
self.assertIs(result1, result2)
@mock.patch('spotify.Link', spec=spotify.Link)
def test_repr(self, link_mock, lib_mock):
lib_mock.sp_playlist_is_loaded.return_value = 1
link_instance_mock = link_mock.return_value
link_instance_mock.uri = 'foo'
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
result = repr(playlist)
self.assertEqual(result, 'Playlist(%r)' % 'foo')
@mock.patch('spotify.Link', spec=spotify.Link)
def test_repr_if_unloaded(self, link_mock, lib_mock):
lib_mock.sp_playlist_is_loaded.return_value = 0
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
result = repr(playlist)
self.assertEqual(result, 'Playlist(<not loaded>)')
@mock.patch('spotify.Link', spec=spotify.Link)
def test_repr_if_link_creation_fails(self, link_mock, lib_mock):
lib_mock.sp_playlist_is_loaded.return_value = 1
link_mock.side_effect = spotify.Error('error message')
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
result = repr(playlist)
self.assertEqual(result, 'Playlist(<error: error message>)')
def test_eq(self, lib_mock):
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist1 = spotify.Playlist(self.session, sp_playlist=sp_playlist)
playlist2 = spotify.Playlist(self.session, sp_playlist=sp_playlist)
self.assertTrue(playlist1 == playlist2)
self.assertFalse(playlist1 == 'foo')
def test_ne(self, lib_mock):
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist1 = spotify.Playlist(self.session, sp_playlist=sp_playlist)
playlist2 = spotify.Playlist(self.session, sp_playlist=sp_playlist)
self.assertFalse(playlist1 != playlist2)
def test_hash(self, lib_mock):
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist1 = spotify.Playlist(self.session, sp_playlist=sp_playlist)
playlist2 = spotify.Playlist(self.session, sp_playlist=sp_playlist)
self.assertEqual(hash(playlist1), hash(playlist2))
def test_is_loaded(self, lib_mock):
lib_mock.sp_playlist_is_loaded.return_value = 1
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
result = playlist.is_loaded
lib_mock.sp_playlist_is_loaded.assert_called_once_with(sp_playlist)
self.assertTrue(result)
@mock.patch('spotify.utils.load')
def test_load(self, load_mock, lib_mock):
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
playlist.load(10)
load_mock.assert_called_with(self.session, playlist, timeout=10)
@mock.patch('spotify.track.lib', spec=spotify.lib)
def test_tracks(self, track_lib_mock, lib_mock):
sp_track = spotify.ffi.cast('sp_track *', 43)
lib_mock.sp_playlist_num_tracks.return_value = 1
lib_mock.sp_playlist_track.return_value = sp_track
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
self.assertEqual(lib_mock.sp_playlist_add_ref.call_count, 1)
result = playlist.tracks
self.assertEqual(lib_mock.sp_playlist_add_ref.call_count, 2)
self.assertEqual(len(result), 1)
lib_mock.sp_playlist_num_tracks.assert_called_with(sp_playlist)
item = result[0]
self.assertIsInstance(item, spotify.Track)
self.assertEqual(item._sp_track, sp_track)
self.assertEqual(lib_mock.sp_playlist_track.call_count, 1)
lib_mock.sp_playlist_track.assert_called_with(sp_playlist, 0)
track_lib_mock.sp_track_add_ref.assert_called_with(sp_track)
def test_tracks_if_no_tracks(self, lib_mock):
lib_mock.sp_playlist_num_tracks.return_value = 0
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
result = playlist.tracks
self.assertEqual(len(result), 0)
lib_mock.sp_playlist_num_tracks.assert_called_with(sp_playlist)
self.assertEqual(lib_mock.sp_playlist_track.call_count, 0)
def test_tracks_if_unloaded(self, lib_mock):
lib_mock.sp_playlist_is_loaded.return_value = 0
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
result = playlist.tracks
lib_mock.sp_playlist_is_loaded.assert_called_with(sp_playlist)
self.assertEqual(len(result), 0)
def test_tracks_is_a_mutable_sequence(self, lib_mock):
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
self.assertIsInstance(playlist.tracks, collections.MutableSequence)
def test_tracks_setitem(self, lib_mock):
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
tracks = playlist.tracks
tracks.__len__ = mock.Mock(return_value=5)
playlist.remove_tracks = mock.Mock()
playlist.add_tracks = mock.Mock()
tracks[0] = mock.sentinel.track
playlist.add_tracks.assert_called_with(mock.sentinel.track, index=0)
playlist.remove_tracks.assert_called_with(1)
def test_tracks_setitem_with_slice(self, lib_mock):
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
tracks = playlist.tracks
tracks.__len__ = mock.Mock(return_value=5)
playlist.remove_tracks = mock.Mock()
playlist.add_tracks = mock.Mock()
tracks[0:2] = [mock.sentinel.track1, mock.sentinel.track2]
playlist.add_tracks.assert_has_calls([
mock.call(mock.sentinel.track1, index=0),
mock.call(mock.sentinel.track2, index=1),
], any_order=False)
playlist.remove_tracks.assert_has_calls([
mock.call(3),
mock.call(2),
], any_order=False)
def test_tracks_setittem_with_slice_and_noniterable_value_fails(
self, lib_mock):
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
tracks = playlist.tracks
tracks.__len__ = mock.Mock(return_value=5)
with self.assertRaises(TypeError):
tracks[0:2] = mock.sentinel.track
def test_tracks_setitem_raises_index_error_on_negative_index(
self, lib_mock):
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
tracks = playlist.tracks
tracks.__len__ = mock.Mock(return_value=5)
with self.assertRaises(IndexError):
tracks[-1] = None
def test_tracks_setitem_raises_index_error_on_too_high_index(
self, lib_mock):
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
tracks = playlist.tracks
tracks.__len__ = mock.Mock(return_value=1)
with self.assertRaises(IndexError):
tracks[1] = None
def test_tracks_setitem_raises_type_error_on_non_integral_index(
self, lib_mock):
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
tracks = playlist.tracks
tracks.__len__ = mock.Mock(return_value=1)
with self.assertRaises(TypeError):
tracks['abc'] = None
def test_tracks_delitem(self, lib_mock):
lib_mock.sp_playlist_remove_tracks.return_value = int(
spotify.ErrorType.OK)
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
tracks = playlist.tracks
tracks.__len__ = mock.Mock(return_value=4)
del tracks[3]
lib_mock.sp_playlist_remove_tracks.assert_called_with(
sp_playlist, [3], 1)
def test_tracks_delitem_with_slice(self, lib_mock):
lib_mock.sp_playlist_remove_tracks.return_value = int(
spotify.ErrorType.OK)
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
tracks = playlist.tracks
tracks.__len__ = mock.Mock(return_value=3)
del tracks[0:2]
# Delete items in reverse order, so the indexes doesn't change
lib_mock.sp_playlist_remove_tracks.assert_has_calls([
mock.call(sp_playlist, [1], 1),
mock.call(sp_playlist, [0], 1),
], any_order=False)
def test_tracks_delitem_raises_index_error_on_negative_index(
self, lib_mock):
lib_mock.sp_playlist_remove_tracks.return_value = int(
spotify.ErrorType.OK)
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
tracks = playlist.tracks
tracks.__len__ = mock.Mock(return_value=1)
with self.assertRaises(IndexError):
del tracks[-1]
def test_tracks_delitem_raises_index_error_on_too_high_index(
self, lib_mock):
lib_mock.sp_playlist_remove_tracks.return_value = int(
spotify.ErrorType.OK)
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
tracks = playlist.tracks
tracks.__len__ = mock.Mock(return_value=1)
with self.assertRaises(IndexError):
del tracks[1]
def test_tracks_delitem_raises_type_error_on_non_integral_index(
self, lib_mock):
lib_mock.sp_playlist_remove_tracks.return_value = int(
spotify.ErrorType.OK)
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
tracks = playlist.tracks
tracks.__len__ = mock.Mock(return_value=1)
with self.assertRaises(TypeError):
del tracks['abc']
def test_tracks_insert(self, lib_mock):
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
tracks = playlist.tracks
tracks.__len__ = mock.Mock(return_value=5)
playlist.add_tracks = mock.Mock()
tracks.insert(3, mock.sentinel.track)
playlist.add_tracks.assert_called_with(
mock.sentinel.track, index=3)
@mock.patch('spotify.playlist_track.lib', spec=spotify.lib)
def test_tracks_with_metadata(self, playlist_track_lib_mock, lib_mock):
lib_mock.sp_playlist_num_tracks.return_value = 1
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
# Created a Playlist with a ref to sp_playlist
self.assertEqual(lib_mock.sp_playlist_add_ref.call_count, 1)
self.assertEqual(
playlist_track_lib_mock.sp_playlist_add_ref.call_count, 0)
result = playlist.tracks_with_metadata
# Created a Sequence with a ref to sp_playlist
self.assertEqual(lib_mock.sp_playlist_add_ref.call_count, 2)
self.assertEqual(
playlist_track_lib_mock.sp_playlist_add_ref.call_count, 0)
self.assertEqual(len(result), 1)
lib_mock.sp_playlist_num_tracks.assert_called_with(sp_playlist)
item = result[0]
self.assertIsInstance(item, spotify.PlaylistTrack)
# Created a PlaylistTrack with a ref to sp_playlist
self.assertEqual(lib_mock.sp_playlist_add_ref.call_count, 2)
self.assertEqual(
playlist_track_lib_mock.sp_playlist_add_ref.call_count, 1)
def test_tracks_with_metadata_if_no_tracks(self, lib_mock):
lib_mock.sp_playlist_num_tracks.return_value = 0
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
result = playlist.tracks_with_metadata
self.assertEqual(len(result), 0)
lib_mock.sp_playlist_num_tracks.assert_called_with(sp_playlist)
self.assertEqual(lib_mock.sp_playlist_track.call_count, 0)
def test_tracks_with_metadata_if_unloaded(self, lib_mock):
lib_mock.sp_playlist_is_loaded.return_value = 0
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
result = playlist.tracks_with_metadata
lib_mock.sp_playlist_is_loaded.assert_called_with(sp_playlist)
self.assertEqual(len(result), 0)
def test_name(self, lib_mock):
lib_mock.sp_playlist_name.return_value = spotify.ffi.new(
'char[]', b'Foo Bar Baz')
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
result = playlist.name
lib_mock.sp_playlist_name.assert_called_once_with(sp_playlist)
self.assertEqual(result, 'Foo Bar Baz')
def test_name_is_none_if_unloaded(self, lib_mock):
lib_mock.sp_playlist_name.return_value = spotify.ffi.new('char[]', b'')
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
result = playlist.name
lib_mock.sp_playlist_name.assert_called_once_with(sp_playlist)
self.assertIsNone(result)
def test_rename(self, lib_mock):
lib_mock.sp_playlist_rename.return_value = int(spotify.ErrorType.OK)
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
playlist.rename('Quux')
lib_mock.sp_playlist_rename.assert_called_with(sp_playlist, mock.ANY)
self.assertEqual(
spotify.ffi.string(lib_mock.sp_playlist_rename.call_args[0][1]),
b'Quux')
def test_rename_fails_if_error(self, lib_mock):
lib_mock.sp_playlist_rename.return_value = int(
spotify.ErrorType.BAD_API_VERSION)
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
with self.assertRaises(spotify.Error):
playlist.rename('Quux')
def test_name_setter(self, lib_mock):
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
playlist.rename = mock.Mock()
playlist.name = 'Quux'
playlist.rename.assert_called_with('Quux')
@mock.patch('spotify.user.lib', spec=spotify.lib)
def test_owner(self, user_lib_mock, lib_mock):
sp_user = spotify.ffi.cast('sp_user *', 43)
lib_mock.sp_playlist_owner.return_value = sp_user
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
result = playlist.owner
lib_mock.sp_playlist_owner.assert_called_with(sp_playlist)
self.assertIsInstance(result, spotify.User)
self.assertEqual(result._sp_user, sp_user)
user_lib_mock.sp_user_add_ref.assert_called_with(sp_user)
def test_is_collaborative(self, lib_mock):
lib_mock.sp_playlist_is_collaborative.return_value = 1
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
result = playlist.collaborative
lib_mock.sp_playlist_is_collaborative.assert_called_with(sp_playlist)
self.assertTrue(result)
def test_set_collaborative(self, lib_mock):
lib_mock.sp_playlist_set_collaborative.return_value = int(
spotify.ErrorType.OK)
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
playlist.collaborative = False
lib_mock.sp_playlist_set_collaborative.assert_called_with(
sp_playlist, 0)
def test_set_collaborative_fails_if_error(self, lib_mock):
lib_mock.sp_playlist_set_collaborative.return_value = int(
spotify.ErrorType.BAD_API_VERSION)
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
with self.assertRaises(spotify.Error):
playlist.collaborative = False
def test_set_autolink_tracks(self, lib_mock):
lib_mock.sp_playlist_set_autolink_tracks.return_value = int(
spotify.ErrorType.OK)
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
playlist.set_autolink_tracks(True)
lib_mock.sp_playlist_set_autolink_tracks.assert_called_with(
sp_playlist, 1)
def test_set_autolink_tracks_fails_if_error(self, lib_mock):
lib_mock.sp_playlist_set_autolink_tracks.return_value = int(
spotify.ErrorType.BAD_API_VERSION)
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
with self.assertRaises(spotify.Error):
playlist.set_autolink_tracks(True)
def test_description(self, lib_mock):
lib_mock.sp_playlist_get_description.return_value = spotify.ffi.new(
'char[]', b'Lorem ipsum')
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
result = playlist.description
lib_mock.sp_playlist_get_description.assert_called_with(sp_playlist)
self.assertEqual(result, 'Lorem ipsum')
def test_description_is_none_if_unset(self, lib_mock):
lib_mock.sp_playlist_get_description.return_value = spotify.ffi.NULL
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
result = playlist.description
lib_mock.sp_playlist_get_description.assert_called_with(sp_playlist)
self.assertIsNone(result)
@mock.patch('spotify.Image', spec=spotify.Image)
def test_image(self, image_mock, lib_mock):
image_id = b'image-id'
def func(sp_playlist, sp_image_id):
buf = spotify.ffi.buffer(sp_image_id)
buf[:len(image_id)] = image_id
return 1
lib_mock.sp_playlist_get_image.side_effect = func
sp_image = spotify.ffi.cast('sp_image *', 43)
lib_mock.sp_image_create.return_value = sp_image
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
image_mock.return_value = mock.sentinel.image
callback = mock.Mock()
result = playlist.image(callback=callback)
self.assertIs(result, mock.sentinel.image)
lib_mock.sp_playlist_get_image.assert_called_with(
sp_playlist, mock.ANY)
lib_mock.sp_image_create.assert_called_with(
self.session._sp_session, mock.ANY)
self.assertEqual(
spotify.ffi.string(lib_mock.sp_image_create.call_args[0][1]),
b'image-id')
# Since we *created* the sp_image, we already have a refcount of 1 and
# shouldn't increase the refcount when wrapping this sp_image in an
# Image object
image_mock.assert_called_with(
self.session, sp_image=sp_image, add_ref=False, callback=callback)
def test_image_is_none_if_no_image(self, lib_mock):
lib_mock.sp_playlist_get_image.return_value = 0
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
result = playlist.image()
lib_mock.sp_playlist_get_image.assert_called_with(
sp_playlist, mock.ANY)
self.assertIsNone(result)
def test_has_pending_changes(self, lib_mock):
lib_mock.sp_playlist_has_pending_changes.return_value = 1
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
result = playlist.has_pending_changes
lib_mock.sp_playlist_has_pending_changes.assert_called_with(
sp_playlist)
self.assertTrue(result)
@mock.patch('spotify.track.lib', spec=spotify.lib)
def test_add_tracks(self, track_lib_mock, lib_mock):
lib_mock.sp_playlist_add_tracks.return_value = int(
spotify.ErrorType.OK)
sp_track1 = spotify.ffi.new('int * ')
track1 = spotify.Track(self.session, sp_track=sp_track1)
sp_track2 = spotify.ffi.new('int * ')
track2 = spotify.Track(self.session, sp_track=sp_track2)
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
playlist.add_tracks([track1, track2], index=4)
lib_mock.sp_playlist_add_tracks.assert_called_with(
sp_playlist, [sp_track1, sp_track2], 2, 4,
self.session._sp_session)
@mock.patch('spotify.track.lib', spec=spotify.lib)
def test_add_tracks_without_index(self, track_lib_mock, lib_mock):
lib_mock.sp_playlist_add_tracks.return_value = int(
spotify.ErrorType.OK)
lib_mock.sp_playlist_num_tracks.return_value = 10
sp_track1 = spotify.ffi.new('int * ')
track1 = spotify.Track(self.session, sp_track=sp_track1)
sp_track2 = spotify.ffi.new('int * ')
track2 = spotify.Track(self.session, sp_track=sp_track2)
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
playlist.add_tracks([track1, track2])
lib_mock.sp_playlist_add_tracks.assert_called_with(
sp_playlist, [sp_track1, sp_track2], 2, 10,
self.session._sp_session)
@mock.patch('spotify.track.lib', spec=spotify.lib)
def test_add_tracks_with_a_single_track(self, track_lib_mock, lib_mock):
lib_mock.sp_playlist_add_tracks.return_value = int(
spotify.ErrorType.OK)
sp_track = spotify.ffi.new('int * ')
track = spotify.Track(self.session, sp_track=sp_track)
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
playlist.add_tracks(track, index=7)
lib_mock.sp_playlist_add_tracks.assert_called_with(
sp_playlist, [sp_track], 1, 7, self.session._sp_session)
def test_add_tracks_fails_if_error(self, lib_mock):
lib_mock.sp_playlist_add_tracks.return_value = int(
spotify.ErrorType.PERMISSION_DENIED)
lib_mock.sp_playlist_num_tracks.return_value = 0
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
with self.assertRaises(spotify.Error):
playlist.add_tracks([])
@mock.patch('spotify.track.lib', spec=spotify.lib)
def test_remove_tracks(self, track_lib_mock, lib_mock):
lib_mock.sp_playlist_remove_tracks.return_value = int(
spotify.ErrorType.OK)
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
index1 = 13
index2 = 17
playlist.remove_tracks([index1, index2])
lib_mock.sp_playlist_remove_tracks.assert_called_with(
sp_playlist, mock.ANY, 2)
self.assertIn(
index1, lib_mock.sp_playlist_remove_tracks.call_args[0][1])
self.assertIn(
index2, lib_mock.sp_playlist_remove_tracks.call_args[0][1])
@mock.patch('spotify.track.lib', spec=spotify.lib)
def test_remove_tracks_with_a_single_track(self, track_lib_mock, lib_mock):
lib_mock.sp_playlist_remove_tracks.return_value = int(
spotify.ErrorType.OK)
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
index = 17
playlist.remove_tracks(index)
lib_mock.sp_playlist_remove_tracks.assert_called_with(
sp_playlist, [index], 1)
@mock.patch('spotify.track.lib', spec=spotify.lib)
def test_remove_tracks_with_duplicates(self, track_lib_mock, lib_mock):
lib_mock.sp_playlist_remove_tracks.return_value = int(
spotify.ErrorType.OK)
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
index = 17
playlist.remove_tracks([index, index])
lib_mock.sp_playlist_remove_tracks.assert_called_with(
sp_playlist, [index], 1)
@mock.patch('spotify.track.lib', spec=spotify.lib)
def test_remove_tracks_fails_if_error(self, track_lib_mock, lib_mock):
lib_mock.sp_playlist_remove_tracks.return_value = int(
spotify.ErrorType.PERMISSION_DENIED)
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
index = 17
with self.assertRaises(spotify.Error):
playlist.remove_tracks(index)
@mock.patch('spotify.track.lib', spec=spotify.lib)
def test_reorder_tracks(self, track_lib_mock, lib_mock):
lib_mock.sp_playlist_reorder_tracks.return_value = int(
spotify.ErrorType.OK)
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
position1 = 13
position2 = 17
playlist.reorder_tracks([position1, position2], 17)
lib_mock.sp_playlist_reorder_tracks.assert_called_with(
sp_playlist, mock.ANY, 2, 17)
self.assertIn(
position1, lib_mock.sp_playlist_reorder_tracks.call_args[0][1])
self.assertIn(
position2, lib_mock.sp_playlist_reorder_tracks.call_args[0][1])
@mock.patch('spotify.track.lib', spec=spotify.lib)
def test_reorder_tracks_with_a_single_track(
self, track_lib_mock, lib_mock):
lib_mock.sp_playlist_reorder_tracks.return_value = int(
spotify.ErrorType.OK)
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
position = 13
playlist.reorder_tracks(position, 17)
lib_mock.sp_playlist_reorder_tracks.assert_called_with(
sp_playlist, [position], 1, 17)
@mock.patch('spotify.track.lib', spec=spotify.lib)
def test_reorder_tracks_with_duplicates(self, track_lib_mock, lib_mock):
lib_mock.sp_playlist_reorder_tracks.return_value = int(
spotify.ErrorType.OK)
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
position = 13
playlist.reorder_tracks([position, position], 17)
lib_mock.sp_playlist_reorder_tracks.assert_called_with(
sp_playlist, [position], 1, 17)
@mock.patch('spotify.track.lib', spec=spotify.lib)
def test_reorder_tracks_fails_if_error(self, track_lib_mock, lib_mock):
lib_mock.sp_playlist_reorder_tracks.return_value = int(
spotify.ErrorType.PERMISSION_DENIED)
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
position = 13
with self.assertRaises(spotify.Error):
playlist.reorder_tracks(position, 17)
def test_num_subscribers(self, lib_mock):
lib_mock.sp_playlist_num_subscribers.return_value = 7
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
result = playlist.num_subscribers
lib_mock.sp_playlist_num_subscribers.assert_called_with(sp_playlist)
self.assertEqual(result, 7)
def test_subscribers(self, lib_mock):
sp_subscribers = spotify.ffi.new('sp_subscribers *')
sp_subscribers.count = 1
user_alice = spotify.ffi.new('char[]', b'alice')
sp_subscribers.subscribers = [user_alice]
lib_mock.sp_playlist_subscribers.return_value = sp_subscribers
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
result = playlist.subscribers
lib_mock.sp_playlist_subscribers.assert_called_with(sp_playlist)
tests.gc_collect()
lib_mock.sp_playlist_subscribers_free.assert_called_with(
sp_subscribers)
self.assertEqual(result, ['alice'])
def test_update_subscribers(self, lib_mock):
lib_mock.sp_playlist_update_subscribers.return_value = int(
spotify.ErrorType.OK)
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
playlist.update_subscribers()
lib_mock.sp_playlist_update_subscribers.assert_called_with(
self.session._sp_session, sp_playlist)
def test_update_subscribers_fails_if_error(self, lib_mock):
lib_mock.sp_playlist_update_subscribers.return_value = int(
spotify.ErrorType.BAD_API_VERSION)
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
with self.assertRaises(spotify.Error):
playlist.update_subscribers()
def test_is_in_ram(self, lib_mock):
lib_mock.sp_playlist_is_in_ram.return_value = 1
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
result = playlist.is_in_ram
lib_mock.sp_playlist_is_in_ram.assert_called_with(
self.session._sp_session, sp_playlist)
self.assertTrue(result)
def test_set_in_ram(self, lib_mock):
lib_mock.sp_playlist_set_in_ram.return_value = int(
spotify.ErrorType.OK)
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
playlist.set_in_ram(False)
lib_mock.sp_playlist_set_in_ram.assert_called_with(
self.session._sp_session, sp_playlist, 0)
def test_set_in_ram_fails_if_error(self, lib_mock):
lib_mock.sp_playlist_set_in_ram.return_value = int(
spotify.ErrorType.BAD_API_VERSION)
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
with self.assertRaises(spotify.Error):
playlist.set_in_ram(False)
def test_set_offline_mode(self, lib_mock):
lib_mock.sp_playlist_set_offline_mode.return_value = int(
spotify.ErrorType.OK)
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
playlist.set_offline_mode(False)
lib_mock.sp_playlist_set_offline_mode.assert_called_with(
self.session._sp_session, sp_playlist, 0)
def test_set_offline_mode_fails_if_error(self, lib_mock):
lib_mock.sp_playlist_set_offline_mode.return_value = int(
spotify.ErrorType.BAD_API_VERSION)
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
with self.assertRaises(spotify.Error):
playlist.set_offline_mode(False)
def test_offline_status(self, lib_mock):
lib_mock.sp_playlist_get_offline_status.return_value = 2
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
result = playlist.offline_status
lib_mock.sp_playlist_get_offline_status.assert_called_with(
self.session._sp_session, sp_playlist)
self.assertIs(result, spotify.PlaylistOfflineStatus.DOWNLOADING)
def test_offline_download_completed(self, lib_mock):
lib_mock.sp_playlist_get_offline_status.return_value = 2
lib_mock.sp_playlist_get_offline_download_completed.return_value = 73
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
result = playlist.offline_download_completed
lib_mock.sp_playlist_get_offline_download_completed.assert_called_with(
self.session._sp_session, sp_playlist)
self.assertEqual(result, 73)
def test_offline_download_completed_when_not_downloading(self, lib_mock):
lib_mock.sp_playlist_get_offline_status.return_value = 0
lib_mock.sp_playlist_get_offline_download_completed.return_value = 0
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
result = playlist.offline_download_completed
self.assertEqual(
lib_mock.sp_playlist_get_offline_download_completed.call_count, 0)
self.assertIsNone(result)
@mock.patch('spotify.Link', spec=spotify.Link)
def test_link_creates_link_to_playlist(self, link_mock, lib_mock):
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
sp_link = spotify.ffi.cast('sp_link *', 43)
lib_mock.sp_link_create_from_playlist.return_value = sp_link
link_mock.return_value = mock.sentinel.link
result = playlist.link
link_mock.assert_called_once_with(
self.session, sp_link=sp_link, add_ref=False)
self.assertEqual(result, mock.sentinel.link)
@mock.patch('spotify.Link', spec=spotify.Link)
def test_link_fails_if_playlist_not_loaded(
self, lik_mock, lib_mock):
lib_mock.sp_playlist_is_loaded.return_value = 0
lib_mock.sp_link_create_from_playlist.return_value = spotify.ffi.NULL
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
with self.assertRaises(spotify.Error):
playlist.link
# Condition is checked before link creation is tried
self.assertEqual(lib_mock.sp_link_create_from_playlist.call_count, 0)
@mock.patch('spotify.Link', spec=spotify.Link)
def test_link_may_fail_if_playlist_has_not_been_in_ram(
self, link_mock, lib_mock):
lib_mock.sp_playlist_is_loaded.return_value = 1
lib_mock.sp_link_create_from_playlist.return_value = spotify.ffi.NULL
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
with self.assertRaises(spotify.Error):
playlist.link
# Condition is checked only if link creation returns NULL
lib_mock.sp_link_create_from_playlist.assert_called_with(sp_playlist)
lib_mock.sp_playlist_is_in_ram.assert_called_with(
self.session._sp_session, sp_playlist)
def test_first_on_call_adds_ref_to_obj_on_session(self, lib_mock):
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
playlist.on(spotify.PlaylistEvent.TRACKS_ADDED, lambda *args: None)
self.assertIn(playlist, self.session._emitters)
def test_last_off_call_removes_ref_to_obj_from_session(self, lib_mock):
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
playlist.on(spotify.PlaylistEvent.TRACKS_ADDED, lambda *args: None)
playlist.off(spotify.PlaylistEvent.TRACKS_ADDED)
self.assertNotIn(playlist, self.session._emitters)
def test_other_off_calls_keeps_ref_to_obj_on_session(self, lib_mock):
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
playlist.on(spotify.PlaylistEvent.TRACKS_ADDED, lambda *args: None)
playlist.on(spotify.PlaylistEvent.TRACKS_MOVED, lambda *args: None)
playlist.off(spotify.PlaylistEvent.TRACKS_ADDED)
self.assertIn(playlist, self.session._emitters)
playlist.off(spotify.PlaylistEvent.TRACKS_MOVED)
self.assertNotIn(playlist, self.session._emitters)
@mock.patch('spotify.playlist.lib', spec=spotify.lib)
class PlaylistCallbacksTest(unittest.TestCase):
def setUp(self):
self.session = tests.create_session_mock()
spotify._session_instance = self.session
def tearDown(self):
spotify._session_instance = None
@mock.patch('spotify.track.lib', spec=spotify.lib)
def test_tracks_added_callback(self, track_lib_mock, lib_mock):
callback = mock.Mock()
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist._cached(
self.session, sp_playlist=sp_playlist)
playlist.on(spotify.PlaylistEvent.TRACKS_ADDED, callback)
sp_tracks = [
spotify.ffi.cast('sp_track *', 43),
spotify.ffi.cast('sp_track *', 44),
spotify.ffi.cast('sp_track *', 45),
]
index = 7
_PlaylistCallbacks.tracks_added(
sp_playlist, sp_tracks, len(sp_tracks), index, spotify.ffi.NULL)
callback.assert_called_once_with(playlist, mock.ANY, index)
tracks = callback.call_args[0][1]
self.assertEqual(len(tracks), len(sp_tracks))
self.assertIsInstance(tracks[0], spotify.Track)
self.assertEqual(tracks[0]._sp_track, sp_tracks[0])
track_lib_mock.sp_track_add_ref.assert_has_calls([
mock.call(sp_tracks[0]),
mock.call(sp_tracks[1]),
mock.call(sp_tracks[2]),
])
def test_tracks_removed_callback(self, lib_mock):
callback = mock.Mock()
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist._cached(
self.session, sp_playlist=sp_playlist)
playlist.on(spotify.PlaylistEvent.TRACKS_REMOVED, callback)
track_numbers = [43, 44, 45]
_PlaylistCallbacks.tracks_removed(
sp_playlist, track_numbers, len(track_numbers), spotify.ffi.NULL)
callback.assert_called_once_with(playlist, mock.ANY)
tracks = callback.call_args[0][1]
self.assertEqual(len(tracks), len(track_numbers))
self.assertEqual(tracks[0], 43)
def test_tracks_moved_callback(self, lib_mock):
callback = mock.Mock()
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist._cached(
self.session, sp_playlist=sp_playlist)
playlist.on(spotify.PlaylistEvent.TRACKS_MOVED, callback)
track_numbers = [43, 44, 45]
index = 7
_PlaylistCallbacks.tracks_moved(
sp_playlist, track_numbers, len(track_numbers), index,
spotify.ffi.NULL)
callback.assert_called_once_with(playlist, mock.ANY, index)
tracks = callback.call_args[0][1]
self.assertEqual(len(tracks), len(track_numbers))
self.assertEqual(tracks[0], 43)
def test_playlist_renamed_callback(self, lib_mock):
callback = mock.Mock()
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist._cached(
self.session, sp_playlist=sp_playlist)
playlist.on(spotify.PlaylistEvent.PLAYLIST_RENAMED, callback)
_PlaylistCallbacks.playlist_renamed(sp_playlist, spotify.ffi.NULL)
callback.assert_called_once_with(playlist)
def test_playlist_state_changed_callback(self, lib_mock):
callback = mock.Mock()
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist._cached(
self.session, sp_playlist=sp_playlist)
playlist.on(spotify.PlaylistEvent.PLAYLIST_STATE_CHANGED, callback)
_PlaylistCallbacks.playlist_state_changed(
sp_playlist, spotify.ffi.NULL)
callback.assert_called_once_with(playlist)
def test_playlist_update_in_progress_callback(self, lib_mock):
callback = mock.Mock()
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist._cached(
self.session, sp_playlist=sp_playlist)
playlist.on(
spotify.PlaylistEvent.PLAYLIST_UPDATE_IN_PROGRESS, callback)
done = True
_PlaylistCallbacks.playlist_update_in_progress(
sp_playlist, int(done), spotify.ffi.NULL)
callback.assert_called_once_with(playlist, done)
def test_playlist_metadata_updated_callback(self, lib_mock):
callback = mock.Mock()
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist._cached(
self.session, sp_playlist=sp_playlist)
playlist.on(spotify.PlaylistEvent.PLAYLIST_METADATA_UPDATED, callback)
_PlaylistCallbacks.playlist_metadata_updated(
sp_playlist, spotify.ffi.NULL)
callback.assert_called_once_with(playlist)
@mock.patch('spotify.user.lib', spec=spotify.lib)
def test_track_created_changed_callback(self, user_lib_mock, lib_mock):
callback = mock.Mock()
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist._cached(
self.session, sp_playlist=sp_playlist)
playlist.on(spotify.PlaylistEvent.TRACK_CREATED_CHANGED, callback)
index = 7
sp_user = spotify.ffi.cast('sp_user *', 43)
time = 123456789
_PlaylistCallbacks.track_created_changed(
sp_playlist, index, sp_user, time, spotify.ffi.NULL)
callback.assert_called_once_with(playlist, index, mock.ANY, time)
user = callback.call_args[0][2]
self.assertIsInstance(user, spotify.User)
self.assertEqual(user._sp_user, sp_user)
user_lib_mock.sp_user_add_ref.assert_called_with(sp_user)
def test_track_seen_changed_callback(self, lib_mock):
callback = mock.Mock()
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist._cached(
self.session, sp_playlist=sp_playlist)
playlist.on(spotify.PlaylistEvent.TRACK_SEEN_CHANGED, callback)
index = 7
seen = True
_PlaylistCallbacks.track_seen_changed(
sp_playlist, index, int(seen), spotify.ffi.NULL)
callback.assert_called_once_with(playlist, index, seen)
def test_description_changed_callback(self, lib_mock):
callback = mock.Mock()
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist._cached(
self.session, sp_playlist=sp_playlist)
playlist.on(spotify.PlaylistEvent.DESCRIPTION_CHANGED, callback)
description = 'foo bar æøå'
desc = spotify.ffi.new('char[]', description.encode('utf-8'))
_PlaylistCallbacks.description_changed(
sp_playlist, desc, spotify.ffi.NULL)
callback.assert_called_once_with(playlist, description)
@mock.patch('spotify.image.lib', spec=spotify.lib)
def test_image_changed_callback(self, image_lib_mock, lib_mock):
callback = mock.Mock()
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist._cached(
self.session, sp_playlist=sp_playlist)
playlist.on(spotify.PlaylistEvent.IMAGE_CHANGED, callback)
image_id = spotify.ffi.new('char[]', b'image-id')
sp_image = spotify.ffi.cast('sp_image *', 43)
lib_mock.sp_image_create.return_value = sp_image
image_lib_mock.sp_image_add_load_callback.return_value = int(
spotify.ErrorType.OK)
_PlaylistCallbacks.image_changed(
sp_playlist, image_id, spotify.ffi.NULL)
callback.assert_called_once_with(playlist, mock.ANY)
image = callback.call_args[0][1]
self.assertIsInstance(image, spotify.Image)
self.assertEqual(image._sp_image, sp_image)
lib_mock.sp_image_create.assert_called_once_with(
self.session._sp_session, image_id)
self.assertEqual(image_lib_mock.sp_image_add_ref.call_count, 0)
def test_track_message_changed_callback(self, lib_mock):
callback = mock.Mock()
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist._cached(
self.session, sp_playlist=sp_playlist)
playlist.on(spotify.PlaylistEvent.TRACK_MESSAGE_CHANGED, callback)
index = 7
message = 'foo bar æøå'
msg = spotify.ffi.new('char[]', message.encode('utf-8'))
_PlaylistCallbacks.track_message_changed(
sp_playlist, index, msg, spotify.ffi.NULL)
callback.assert_called_once_with(playlist, index, message)
def test_subscribers_changed_callback(self, lib_mock):
callback = mock.Mock()
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist._cached(
self.session, sp_playlist=sp_playlist)
playlist.on(spotify.PlaylistEvent.SUBSCRIBERS_CHANGED, callback)
_PlaylistCallbacks.subscribers_changed(sp_playlist, spotify.ffi.NULL)
callback.assert_called_once_with(playlist)
class PlaylistOfflineStatusTest(unittest.TestCase):
def test_has_constants(self):
self.assertEqual(spotify.PlaylistOfflineStatus.NO, 0)
self.assertEqual(spotify.PlaylistOfflineStatus.DOWNLOADING, 2)
| 41.250825
| 79
| 0.696176
|
79520ed9d5f9fcf41be1b6d0afe1d9d86af3e90c
| 915
|
py
|
Python
|
oo/pessoa.py
|
anthonysilvaa/pythonbirds
|
776ef93ffdb3f3365967e87df6cff54020c1d65c
|
[
"MIT"
] | null | null | null |
oo/pessoa.py
|
anthonysilvaa/pythonbirds
|
776ef93ffdb3f3365967e87df6cff54020c1d65c
|
[
"MIT"
] | null | null | null |
oo/pessoa.py
|
anthonysilvaa/pythonbirds
|
776ef93ffdb3f3365967e87df6cff54020c1d65c
|
[
"MIT"
] | null | null | null |
class Pessoa:
olhos = 2
def __init__(self, *filhos, nome: str, idade=35, peso=35):
self.nome = nome
self.idade = idade
self.peso = peso
self.filhos = list(filhos)
def cumprimentar(self):
return f'Olá {id(self)}'
if __name__ == '__main__':
usuario= Pessoa(nome='Bam')
#usuario.nome = 'Liane'
print(f'Nome: {usuario.nome} - Idade: {usuario.idade} - Peso: {usuario.peso}')
print(Pessoa.cumprimentar(usuario))
print(id(usuario))
print(usuario.cumprimentar())
amanda = Pessoa(nome='Amanda', idade=32, peso=67.52)
luciano = Pessoa(amanda, nome='Luciano')
for filho in luciano.filhos:
print(filho.nome)
luciano.sobrenome = 'Soares'
print(luciano.olhos)
luciano.olhos = 1
print(luciano.olhos)
print(Pessoa.olhos)
del luciano.filhos
print(luciano.__dict__)
print(amanda.__dict__)
| 20.333333
| 82
| 0.622951
|
79520f2079f590ca21b03e2c73c249d6d57ebac6
| 927
|
py
|
Python
|
src/zope/app/broken/interfaces.py
|
zopefoundation/zope.app.broken
|
828f59742e424871c53a5760851d7ce5a5c2f6cd
|
[
"ZPL-2.1"
] | null | null | null |
src/zope/app/broken/interfaces.py
|
zopefoundation/zope.app.broken
|
828f59742e424871c53a5760851d7ce5a5c2f6cd
|
[
"ZPL-2.1"
] | 6
|
2017-05-15T15:58:48.000Z
|
2021-05-06T09:00:31.000Z
|
src/zope/app/broken/interfaces.py
|
zopefoundation/zope.app.broken
|
828f59742e424871c53a5760851d7ce5a5c2f6cd
|
[
"ZPL-2.1"
] | 1
|
2015-04-03T07:30:37.000Z
|
2015-04-03T07:30:37.000Z
|
##############################################################################
#
# Copyright (c) 2007 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""zope.app.broken interfaces.
"""
from zope.deferredimport import deprecated
# BBB zope.app.broken 5.0: Names now moved to ZODB itself.
deprecated(
'Please import from ZODB.interfaces.'
' This module will go away in zope.app.broken 5.0.',
IBroken='ZODB.interfaces:IBroken',
)
| 37.08
| 78
| 0.621359
|
79520f2fb853c72cc43effea739b93bd0a3b20cc
| 154
|
py
|
Python
|
library/string_utils/base64/bench/metrics/main.py
|
HeyLey/catboost
|
f472aed90604ebe727537d9d4a37147985e10ec2
|
[
"Apache-2.0"
] | 2
|
2019-07-10T10:49:09.000Z
|
2020-06-19T11:40:04.000Z
|
library/string_utils/base64/bench/metrics/main.py
|
HeyLey/catboost
|
f472aed90604ebe727537d9d4a37147985e10ec2
|
[
"Apache-2.0"
] | 6
|
2020-02-18T22:12:29.000Z
|
2020-02-18T22:31:26.000Z
|
library/string_utils/base64/bench/metrics/main.py
|
HeyLey/catboost
|
f472aed90604ebe727537d9d4a37147985e10ec2
|
[
"Apache-2.0"
] | 1
|
2018-08-06T14:13:12.000Z
|
2018-08-06T14:13:12.000Z
|
import yatest.common as yc
def test_export_metrics(metrics):
metrics.set_benchmark(yc.execute_benchmark('library/string_utils/base64/bench/bench'))
| 25.666667
| 90
| 0.811688
|
795210398776484f32f8e6c5b9f3a563a744b5ab
| 6,671
|
py
|
Python
|
tests/test_auth_by_token.py
|
fpga-tom/mod_authnz_jwt
|
9e86777b8679df4f5e0b4ef56a77f57d1613e41a
|
[
"Apache-2.0"
] | 73
|
2016-09-01T00:01:48.000Z
|
2022-03-24T03:31:01.000Z
|
tests/test_auth_by_token.py
|
fpga-tom/mod_authnz_jwt
|
9e86777b8679df4f5e0b4ef56a77f57d1613e41a
|
[
"Apache-2.0"
] | 48
|
2016-10-27T22:01:58.000Z
|
2022-03-07T07:29:00.000Z
|
tests/test_auth_by_token.py
|
fpga-tom/mod_authnz_jwt
|
9e86777b8679df4f5e0b4ef56a77f57d1613e41a
|
[
"Apache-2.0"
] | 43
|
2016-10-17T20:53:48.000Z
|
2022-03-19T13:15:36.000Z
|
import time
import unittest
import json
from test_jwt import TestJWT
class TestAuthByToken(TestJWT):
@TestJWT.with_all_algorithms()
def test_login_with_urlencoded_should_success(self, alg, public_key, private_key, secured_url, login_url):
code, content, headers, cookies = self.http_post(login_url, {
self.USERNAME_FIELD: self.USERNAME,
self.PASSWORD_FIELD: self.PASSWORD
},
headers={"Content-Type": "application/x-www-form-urlencoded"})
self.assertEqual(code, 200)
@TestJWT.with_all_algorithms()
def test_login_with_json_should_fail(self, alg, public_key, private_key, secured_url, login_url):
code, content, headers, cookies = self.http_post(login_url, {
self.USERNAME_FIELD: self.USERNAME,
self.PASSWORD_FIELD: self.PASSWORD
},
headers={"Content-Type": "application/json"})
self.assertEqual(code, 415)
@TestJWT.with_all_algorithms()
def test_malformed_token_should_fail(self, alg, public_key, private_key, secured_url, login_url):
token = self.encode_jwt(
{"iss": self.JWT_ISS, "aud": self.JWT_AUD, "user": "toto", "iat": int(time.time()), "nbf": int(time.time()),
"exp": int(time.time()) + 1000}, private_key, alg)
# we replace . by # for the token to be malformed
token = token.replace('.', '#')
code, content, headers = self.http_get(secured_url, token=token)
self.assertEqual(code, 401)
self.assertEqual(headers["WWW-Authenticate"],
'Bearer realm="private area", error="invalid_token", '
'error_description="Token is malformed or signature is invalid"')
@TestJWT.with_all_algorithms()
def test_invalid_signature_should_fail(self, alg, public_key, private_key, secured_url, login_url):
token = self.encode_jwt(
{"iss": self.JWT_ISS, "aud": self.JWT_AUD, "user": "toto", "iat": int(time.time()), "nbf": int(time.time()),
"exp": int(time.time()) + 1000}, private_key, alg)
# we remove last 10 characters for the signature to be invalid
token = token[:-10]
code, content, headers = self.http_get(secured_url, token=token)
self.assertEqual(code, 401)
self.assertEqual(headers["WWW-Authenticate"],
'Bearer realm="private area", error="invalid_token", '
'error_description="Token is malformed or signature is invalid"')
@TestJWT.with_all_algorithms()
def test_invalid_iss_should_fail(self, alg, public_key, private_key, secured_url, login_url):
token = self.encode_jwt(
{"iss": "invalid", "aud": self.JWT_AUD, "user": "toto", "iat": int(time.time()), "nbf": int(time.time()),
"exp": int(time.time()) + 1000}, private_key, alg)
code, content, headers = self.http_get(secured_url, token=token)
self.assertEqual(code, 401)
self.assertEqual(headers["WWW-Authenticate"],
'Bearer realm="private area", error="invalid_token", '
'error_description="Issuer is not valid"')
@TestJWT.with_all_algorithms()
def test_invalid_aud_should_fail(self, alg, public_key, private_key, secured_url, login_url):
token = self.encode_jwt(
{"iss": self.JWT_ISS, "aud": "invalid", "user": "toto", "iat": int(time.time()), "nbf": int(time.time()),
"exp": int(time.time()) + 1000}, private_key, alg)
code, content, headers = self.http_get(secured_url, token=token)
self.assertEqual(code, 401)
self.assertEqual(headers["WWW-Authenticate"],
'Bearer realm="private area", error="invalid_token", '
'error_description="Audience is not valid"')
@TestJWT.with_all_algorithms()
def test_invalid_nbf_should_fail(self, alg, public_key, private_key, secured_url, login_url):
token = self.encode_jwt({"iss": self.JWT_ISS, "aud": self.JWT_AUD, "user": "toto", "iat": int(time.time()),
"nbf": int(time.time()) + 1000, "exp": int(time.time()) + 1000}, private_key, alg)
code, content, headers = self.http_get(secured_url, token=token)
self.assertEqual(code, 401)
self.assertEqual(headers["WWW-Authenticate"],
'Bearer realm="private area", error="invalid_token", '
'error_description="Token can\'t be processed now due to nbf field"')
@TestJWT.with_all_algorithms()
def test_invalid_exp_should_fail(self, alg, public_key, private_key, secured_url, login_url):
token = self.encode_jwt(
{"iss": self.JWT_ISS, "aud": self.JWT_AUD, "user": "toto", "iat": int(time.time()), "nbf": int(time.time()),
"exp": int(time.time()) - 1000}, private_key, alg)
code, content, headers = self.http_get(secured_url, token=token)
self.assertEqual(code, 401)
self.assertEqual(headers["WWW-Authenticate"],
'Bearer realm="private area", error="invalid_token", error_description="Token expired"')
@TestJWT.with_all_algorithms()
def test_token_exp_missing_should_success(self, alg, public_key, private_key, secured_url, login_url):
token = self.encode_jwt({"iss": self.JWT_ISS, "aud": self.JWT_AUD, "user": "toto", "iat": int(time.time()),
"nbf": int(time.time())}, private_key, alg)
code, content, headers = self.http_get(secured_url, token=token)
self.assertEqual(code, 200)
@TestJWT.with_all_algorithms()
def test_with_leeway_should_success(self, alg, public_key, private_key, secured_url, login_url):
token = self.encode_jwt(
{"iss": self.JWT_ISS, "aud": self.JWT_AUD, "user": "toto", "iat": int(time.time()) - 1000,
"nbf": int(time.time()) - 1000, "exp": int(time.time()) - self.JWT_LEEWAY + 1}, private_key, alg)
code, content, headers = self.http_get(secured_url, token=token)
self.assertEqual(code, 200)
@TestJWT.with_all_algorithms()
def test_should_success(self, alg, public_key, private_key, secured_url, login_url):
token = self.encode_jwt(
{"iss": self.JWT_ISS, "aud": self.JWT_AUD, "user": "toto", "iat": int(time.time()) - 1000,
"nbf": int(time.time()) - 1000, "exp": int(time.time()) + 10}, private_key, alg)
code, content, headers = self.http_get(secured_url, token=token)
self.assertEqual(code, 200)
| 57.017094
| 120
| 0.619997
|
79521051960b69ba2f821e42f7642af3cae86b74
| 510
|
py
|
Python
|
docassemble_webapp/docassemble/webapp/alembic/versions/77e8971ffcbf_first_alembic_revision.py
|
knod/docassemble
|
bd052b557743d098138a5f2129a9d3c2f68090a6
|
[
"MIT"
] | 568
|
2016-01-08T19:05:06.000Z
|
2022-03-30T19:44:47.000Z
|
docassemble_webapp/docassemble/webapp/alembic/versions/77e8971ffcbf_first_alembic_revision.py
|
knod/docassemble
|
bd052b557743d098138a5f2129a9d3c2f68090a6
|
[
"MIT"
] | 348
|
2016-01-25T02:17:36.000Z
|
2022-03-27T21:22:43.000Z
|
docassemble_webapp/docassemble/webapp/alembic/versions/77e8971ffcbf_first_alembic_revision.py
|
knod/docassemble
|
bd052b557743d098138a5f2129a9d3c2f68090a6
|
[
"MIT"
] | 262
|
2016-01-14T23:09:50.000Z
|
2022-03-23T15:06:08.000Z
|
"""first alembic revision
Revision ID: 77e8971ffcbf
Revises:
Create Date: 2017-08-13 09:07:33.368044
"""
from alembic import op
import sqlalchemy as sa
from docassemble.webapp.database import dbtableprefix
# revision identifiers, used by Alembic.
revision = '77e8971ffcbf'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
op.add_column(dbtableprefix + 'user', sa.Column('modified_at', sa.DateTime))
def downgrade():
op.drop_column(dbtableprefix + 'user', 'modified_at')
| 22.173913
| 80
| 0.758824
|
79521082030489af477e06feab63c1d3f5a6065d
| 1,307
|
py
|
Python
|
setup.py
|
hyperair/flake8-expandtab
|
bf0f5cbd4398145dd8de6892605de4e297be1c17
|
[
"MIT"
] | null | null | null |
setup.py
|
hyperair/flake8-expandtab
|
bf0f5cbd4398145dd8de6892605de4e297be1c17
|
[
"MIT"
] | 1
|
2018-12-17T22:42:38.000Z
|
2018-12-18T11:33:40.000Z
|
setup.py
|
hyperair/flake8-expandtab
|
bf0f5cbd4398145dd8de6892605de4e297be1c17
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
from setuptools import setup
def get_long_description():
try:
import pypandoc
return pypandoc.convert("README.md", "rst")
except ImportError:
with open("README.md") as f:
return f.read()
setup(
name='flake8-expandtab',
version='0.3',
description='flake8 for tab junkies',
long_description=get_long_description(),
author='Chow Loong Jin',
author_email='hyperair@debian.org',
url='https://www.github.com/hyperair/flake8-expandtab',
license='MIT',
entry_points={
'flake8.extension': [
'expandtab = flake8_expandtab:TabExpander',
],
},
py_modules=['flake8_expandtab'],
data_files=['README.md'],
tests_require=['mock', 'flake8'],
test_suite='tests',
classifiers=[
"Framework :: Flake8",
"Intended Audience :: Developers",
"Development Status :: 4 - Beta",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Software Development :: Quality Assurance",
"Operating System :: OS Independent"
]
)
| 28.413043
| 71
| 0.609028
|
795210e275ea82f35f5b6c1ff50e7f8fc98fe93c
| 1,020
|
py
|
Python
|
tests/job.py
|
stamp711/DBx1000
|
928912dd7e005ce5a63ad94fdcde412ab893e678
|
[
"ISC"
] | null | null | null |
tests/job.py
|
stamp711/DBx1000
|
928912dd7e005ce5a63ad94fdcde412ab893e678
|
[
"ISC"
] | null | null | null |
tests/job.py
|
stamp711/DBx1000
|
928912dd7e005ce5a63ad94fdcde412ab893e678
|
[
"ISC"
] | null | null | null |
workloads = {}
workloads["YCSB+RH"] = {
"WORKLOAD": "YCSB",
"READ_PERC": 0.9,
"WRITE_PERC": 0.1,
}
workloads["YCSB+RW"] = {
"WORKLOAD": "YCSB",
"READ_PERC": 0.5,
"WRITE_PERC": 0.5,
}
workloads["YCSB+WH"] = {
"WORKLOAD": "YCSB",
"READ_PERC": 0.1,
"WRITE_PERC": 0.9,
}
workloads["TPCC+1WH"] = {"WORKLOAD": "TPCC", "NUM_WH": 1}
workloads["TPCC+4WH"] = {"WORKLOAD": "TPCC", "NUM_WH": 1}
workloads["TPCC+XWH"] = {"WORKLOAD": "TPCC", "NUM_WH": "THREAD_CNT"}
threads = {}
threads["01"] = {"THREAD_CNT": 1, "NUM_LOGGER": 1}
threads["02"] = {"THREAD_CNT": 2, "NUM_LOGGER": 2}
threads["04"] = {"THREAD_CNT": 4, "NUM_LOGGER": 4}
threads["06"] = {"THREAD_CNT": 6, "NUM_LOGGER": 4}
threads["08"] = {"THREAD_CNT": 8, "NUM_LOGGER": 4}
threads["10"] = {"THREAD_CNT": 10, "NUM_LOGGER": 4}
threads["12"] = {"THREAD_CNT": 12, "NUM_LOGGER": 4}
threads["14"] = {"THREAD_CNT": 14, "NUM_LOGGER": 4}
threads["16"] = {"THREAD_CNT": 16, "NUM_LOGGER": 4}
threads["18"] = {"THREAD_CNT": 18, "NUM_LOGGER": 4}
| 29.142857
| 68
| 0.583333
|
795210eeb5512c9005fe9124e88e91470d04acdb
| 3,097
|
py
|
Python
|
ganjoor/spiders/ganjoor.net/roodaki/scrapyrobaeer.py
|
amirmasoud/ganjoor-crawler
|
a86fe379955ce854765086ab7ba0a78513d052bd
|
[
"MIT"
] | null | null | null |
ganjoor/spiders/ganjoor.net/roodaki/scrapyrobaeer.py
|
amirmasoud/ganjoor-crawler
|
a86fe379955ce854765086ab7ba0a78513d052bd
|
[
"MIT"
] | null | null | null |
ganjoor/spiders/ganjoor.net/roodaki/scrapyrobaeer.py
|
amirmasoud/ganjoor-crawler
|
a86fe379955ce854765086ab7ba0a78513d052bd
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import scrapy
class scrapyrobaeerSpider(scrapy.Spider):
name = "scrapyrobaeer"
allowed_domains = ["ganjoor.net"]
if 38 == 1:
start_urls = ["https://ganjoor.net/roodaki/robaeer/sh"]
else:
start_urls = ["https://ganjoor.net/roodaki/robaeer/sh" + "1"]
order = 1
def parse(self, response):
index = 0
sh = dict()
sh["type"] = "robaee"
sh["text"] = dict()
for i, poem in enumerate(response.css("div.poem>article>div")):
if poem.css("p:first-child::text").extract_first() is None:
continue
if index == 0:
if 1 == 1:
sh["title"] = "رباعی" + " شماره " + str(self.order) + " - " + ''.join(poem.css("div.m1>p::text").extract()).strip()
elif 1 == 2:
sh["title"] = "رباعی" + " شماره " + str(self.order) + " - " + ''.join(poem.css("div.m2>p::text").extract()).strip()
elif 1 == 3:
sh["title"] = "رباعی" + " شماره " + str(self.order) + " - " + ''.join(response.css("div.poem>article>h2>a::text").extract()).strip() + ': ' + ''.join(poem.css("div.m1>p::text").extract()).strip()
elif 1 == 4:
sh["title"] = "رباعی" + " شماره " + str(self.order) + " - " + ''.join(response.css("div.poem>article>h2>a::text").extract()).strip() + ': ' + ''.join(poem.css("div.m2>p::text").extract()).strip()
else:
sh["title"] = response.css("div.poem>article>h2>a::text").extract_first()
if len(poem.css("div.m1>p")) == 1:
if poem.css("div.b"):
sh["text"][index] = dict([
("m1", ''.join(poem.css("div.m1>p::text").extract()).strip()),
("m2", ''.join(poem.css("div.m2>p::text").extract()).strip()),
])
else:
sh["text"][index] = dict([
("t1", ''.join(poem.css("p:first-child::text").extract()).strip()),
("t2", ''.join(poem.css("p:last-child::text").extract()).strip()),
])
else:
if poem.css("div.b2"):
sh["text"][index] = dict([
("t1", ''.join(poem.css("p:first-child::text").extract()).strip()),
("t2", ''.join(poem.css("p:last-child::text").extract()).strip()),
])
else:
sh['text'][index] = dict([
('p', ''.join(poem.css('p:first-child::text').extract()).strip())
])
index = index + 1
sh["order"] = self.order
self.order = self.order + 1
yield sh
# next_page = response.css("div.navigation>div.navleft>a::attr(href)").extract_first()
if self.order < (38 + 1):
next_page = response.urljoin("https://ganjoor.net/roodaki/robaeer/sh" + str(self.order))
yield scrapy.Request(next_page, callback=self.parse)
| 50.770492
| 215
| 0.454634
|
79521104346211d0c8462da6a03bbba73f04ba5d
| 2,010
|
py
|
Python
|
Electronics/ADC.py
|
KyleKing/Another_Raspberry_Pi_Guide
|
c1bf871411e8c51df069ba8374fdec49ad43f02b
|
[
"MIT"
] | 1
|
2016-06-29T14:12:49.000Z
|
2016-06-29T14:12:49.000Z
|
Electronics/ADC.py
|
KyleKing/Another_Raspberry_Pi_Guide
|
c1bf871411e8c51df069ba8374fdec49ad43f02b
|
[
"MIT"
] | 2
|
2016-06-25T16:37:52.000Z
|
2016-12-07T02:26:16.000Z
|
Electronics/ADC.py
|
KyleKing/Another_Raspberry_Pi_Guide
|
c1bf871411e8c51df069ba8374fdec49ad43f02b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# Written by Limor "Ladyada" Fried for Adafruit Industries, (c) 2015
# This code is released into the public domain
# import time
# import os
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BCM)
DEBUG = 1
# read SPI data from MCP3008 chip, 8 possible adc's (0 thru 7)
def readadc(adcnum, clockpin, mosipin, misopin, cspin):
if ((adcnum > 7) or (adcnum < 0)):
return -1
GPIO.output(cspin, True)
GPIO.output(clockpin, False) # start clock low
GPIO.output(cspin, False) # bring CS low
commandout = adcnum
commandout |= 0x18 # start bit + single-ended bit
commandout <<= 3 # we only need to send 5 bits here
for i in range(5):
if (commandout & 0x80):
GPIO.output(mosipin, True)
else:
GPIO.output(mosipin, False)
commandout <<= 1
GPIO.output(clockpin, True)
GPIO.output(clockpin, False)
adcout = 0
# read in one empty bit, one null bit and 10 ADC bits
for i in range(12):
GPIO.output(clockpin, True)
GPIO.output(clockpin, False)
adcout <<= 1
if (GPIO.input(misopin)):
adcout |= 0x1
GPIO.output(cspin, True)
adcout >>= 1 # first bit is 'null' so drop it
return adcout
# change these as desired - they're the pins connected from the
# SPI port on the ADC to the Cobbler
SPICLK = 18
SPIMISO = 23
SPIMOSI = 24
SPICS = 25
# set up the SPI interface pins
GPIO.setup(SPIMOSI, GPIO.OUT)
GPIO.setup(SPIMISO, GPIO.IN)
GPIO.setup(SPICLK, GPIO.OUT)
GPIO.setup(SPICS, GPIO.OUT)
while True:
# read the analog pin
CH0 = readadc(0, SPICLK, SPIMOSI, SPIMISO, SPICS)
CH1 = readadc(1, SPICLK, SPIMOSI, SPIMISO, SPICS)
CH2 = readadc(2, SPICLK, SPIMOSI, SPIMISO, SPICS)
CH3 = readadc(3, SPICLK, SPIMOSI, SPIMISO, SPICS)
print '{0},{1},{2},{3}'.format(CH0, CH1, CH2, CH3)
if DEBUG:
print ">> CH0:", CH0
print " CH1:", CH1
print " CH2:", CH2
print " CH3:", CH3
| 26.8
| 68
| 0.620896
|
795211b2a4b83601f223170208d015ff06a667e3
| 1,500
|
py
|
Python
|
tests/core/test_local.py
|
saucynosoy/opta
|
e96f335d43c849803c1a588a812ee98f52e3f92a
|
[
"Apache-2.0"
] | null | null | null |
tests/core/test_local.py
|
saucynosoy/opta
|
e96f335d43c849803c1a588a812ee98f52e3f92a
|
[
"Apache-2.0"
] | null | null | null |
tests/core/test_local.py
|
saucynosoy/opta
|
e96f335d43c849803c1a588a812ee98f52e3f92a
|
[
"Apache-2.0"
] | null | null | null |
import json
import os
import unittest
from opta.core.local import Local
from opta.layer import Layer
class LocalTests(unittest.TestCase):
def setUp(self) -> None:
self.layer = Layer(
name="testname",
org_name="testorg",
providers={"local": {"path": "/tmp"}},
modules_data=[],
path="/tmp",
parent=None,
)
self.local = Local(self.layer)
self.local.tf_file = "/tmp/tfconfig"
self.local.config_file_path = "/tmp/localconfig"
with open(self.local.config_file_path, "w") as f:
json.dump({"a": "1"}, f)
with open(self.local.tf_file, "w") as f:
f.write("Some tf state for testing")
return super().setUp()
def tearDown(self) -> None:
if os.path.isfile("/tmp/localconfig"):
os.remove("/tmp/localconfig")
if os.path.isfile("/tmp/tfconfig"):
os.remove("/tmp/tfconfig")
return super().tearDown()
def test_get_remote_config(self) -> None:
assert self.local.get_remote_config() == {"a": "1"}
def test_upload_opta_config(self) -> None:
self.local.upload_opta_config()
dict = json.load(open(self.local.config_file_path, "r"))
assert set(dict.keys()) == set(["opta_version", "original_spec", "date"])
def test_delete_local_tf_state(self) -> None:
self.local.delete_local_tf_state()
assert os.path.isfile(self.local.tf_file) is False
| 31.25
| 81
| 0.592667
|
79521393a95f0c7642754b16fff3f920549ce635
| 3,919
|
py
|
Python
|
setup.py
|
bengranett/pypeline
|
33aba4dbb9743fb1227f4bfb15f1a97ea22afba4
|
[
"MIT"
] | null | null | null |
setup.py
|
bengranett/pypeline
|
33aba4dbb9743fb1227f4bfb15f1a97ea22afba4
|
[
"MIT"
] | null | null | null |
setup.py
|
bengranett/pypeline
|
33aba4dbb9743fb1227f4bfb15f1a97ea22afba4
|
[
"MIT"
] | null | null | null |
"""A setuptools based setup module for catstore.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='pypeline',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='0.0.1',
description='configure classes with ease',
long_description=long_description,
# The project's main homepage.
url='https://github.com/bengranett/pypeline',
# Author details
author='Ben Granett',
author_email='ben.granett@brera.inaf.it',
# Choose your license
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Developers',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
],
# What does your project relate to?
keywords='astronomy',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
# Alternatively, if you want to distribute just a my_module.py, uncomment
# this:
# py_modules=["my_module"],
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=['configargparseb'],
dependency_links=[
'git+https://github.com/bengranett/ConfigArgParseB.git@release#egg=ConfigArgParseB',
]
# # List additional groups of dependencies here (e.g. development
# # dependencies). You can install these using the following syntax,
# # for example:
# # $ pip install -e .[dev,test]
# extras_require={
# 'dev': ['check-manifest'],
# 'test': ['coverage'],
# },
# # If there are data files included in your packages that need to be
# # installed, specify them here. If using Python 2.6 or less, then these
# # have to be included in MANIFEST.in as well.
# package_data={
# 'sample': ['package_data.dat'],
# },
# # Although 'package_data' is the preferred approach, in some case you may
# # need to place data files outside of your packages. See:
# # http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
# # In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
# data_files=[('my_data', ['data/data_file'])],
# # To provide executable scripts, use entry points in preference to the
# # "scripts" keyword. Entry points provide cross-platform support and allow
# # pip to create the appropriate form of executable for the target platform.
# entry_points={
# 'console_scripts': [
# 'sample=sample:main',
# ],
# },
)
| 34.991071
| 96
| 0.667262
|
795213be0b24d02e4040510687ca1f3b419cc048
| 1,841
|
py
|
Python
|
nova/compute/task_states.py
|
russellb/nova
|
99c2e02b44a1012c8e26fc7658dc40ec4620a1ee
|
[
"Apache-2.0"
] | 1
|
2015-07-15T08:51:16.000Z
|
2015-07-15T08:51:16.000Z
|
nova/compute/task_states.py
|
russellb/nova
|
99c2e02b44a1012c8e26fc7658dc40ec4620a1ee
|
[
"Apache-2.0"
] | 1
|
2020-07-24T14:14:13.000Z
|
2020-07-24T14:14:13.000Z
|
nova/compute/task_states.py
|
russellb/nova
|
99c2e02b44a1012c8e26fc7658dc40ec4620a1ee
|
[
"Apache-2.0"
] | 2
|
2019-06-12T00:52:15.000Z
|
2020-07-24T10:35:29.000Z
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Possible task states for instances.
Compute instance task states represent what is happening to the instance at the
current moment. These tasks can be generic, such as 'spawning', or specific,
such as 'block_device_mapping'. These task states allow for a better view into
what an instance is doing and should be displayed to users/administrators as
necessary.
"""
SCHEDULING = 'scheduling'
BLOCK_DEVICE_MAPPING = 'block_device_mapping'
NETWORKING = 'networking'
SPAWNING = 'spawning'
IMAGE_SNAPSHOT = 'image_snapshot'
IMAGE_BACKUP = 'image_backup'
UPDATING_PASSWORD = 'updating_password'
RESIZE_PREP = 'resize_prep'
RESIZE_MIGRATING = 'resize_migrating'
RESIZE_MIGRATED = 'resize_migrated'
RESIZE_FINISH = 'resize_finish'
RESIZE_REVERTING = 'resize_reverting'
RESIZE_CONFIRMING = 'resize_confirming'
RESIZE_VERIFY = 'resize_verify'
REBOOTING = 'rebooting'
REBOOTING_HARD = 'rebooting_hard'
PAUSING = 'pausing'
UNPAUSING = 'unpausing'
SUSPENDING = 'suspending'
RESUMING = 'resuming'
POWERING_OFF = 'powering-off'
POWERING_ON = 'powering-on'
RESCUING = 'rescuing'
UNRESCUING = 'unrescuing'
DELETING = 'deleting'
STOPPING = 'stopping'
STARTING = 'starting'
| 30.180328
| 79
| 0.766431
|
795213f23d853b29bbb8529a9ac176c93d7888d1
| 2,605
|
py
|
Python
|
2-9-1.py
|
WAETW/aima_exercise
|
63c7e8c4f0e995cd57c6540aa895598b97ade709
|
[
"MIT"
] | null | null | null |
2-9-1.py
|
WAETW/aima_exercise
|
63c7e8c4f0e995cd57c6540aa895598b97ade709
|
[
"MIT"
] | null | null | null |
2-9-1.py
|
WAETW/aima_exercise
|
63c7e8c4f0e995cd57c6540aa895598b97ade709
|
[
"MIT"
] | null | null | null |
import random
import sys
'''初始化環境'''
def dirt_placing():
loc_A = random.choice(['Clean', 'Dirty'])
loc_B = random.choice(['Clean', 'Dirty'])
return loc_A,loc_B
'''agent初始位置'''
def init_loc():
vacuum_init_loc = random.choice(['loc_A','loc_B'])
return vacuum_init_loc
'''計算分數,agent往左往右扣一分,吸則得十分(參考Github Repo上提供的計算範例)'''
def socre(actions,loc_A,loc_B):
your_score = 0
vacuum_loc = ''
if actions is 'Left':
your_score = your_score - 1
vacuum_loc = 'loc_A'
elif actions is 'Right':
your_score = your_score - 1
vacuum_loc = 'loc_B'
elif actions is 'Suck':
if loc_A is 'Dirty':
your_score = your_score + 10
vacuum_loc = 'loc_A'
loc_A = "Clean"
elif loc_B is 'Dirty':
your_score = your_score + 10
vacuum_loc ='loc_B'
loc_B = "Clean"
return your_score,vacuum_loc,loc_A,loc_B
'''定義規則'''
def rule(location,loc_A,loc_B):
action = ''
enviroment_state = ''
if location is 'loc_A':
enviroment_state = loc_A
elif location is 'loc_B':
enviroment_state = loc_B
if location is "loc_A":
if enviroment_state is "Clean":
action = "Right"
else:
action = "Suck"
elif location is "loc_B":
if enviroment_state is "Clean":
action = "Left"
else:
action = "Suck"
return action,loc_A,loc_B
'''建立環境,episodes為執行次數'''
def enviroment(episodes):
performence_score = 0
vacuum_init_loc = init_loc()
vacuum_loc = ''
loc_A,loc_B = dirt_placing()
print("吸塵器起始位置:"+vacuum_init_loc)
print("環境狀態:"+"("+loc_A+","+loc_B+")")
for i in range(0,episodes):
if i is 0:
action,loc_A,loc_B =rule(vacuum_init_loc,loc_A,loc_B)
temp_score,vacuum_loc,loc_A,loc_B = socre(action,loc_A,loc_B)
print("第"+str(i+1)+"次的動作:"+action)
performence_score = performence_score + temp_score
else:
action,loc_A,loc_B =rule(vacuum_loc,loc_A,loc_B)
temp_score,vacuum_loc,loc_A,loc_B = socre(action,loc_A,loc_B)
print("第"+str(i+1)+"次的動作:"+action)
performence_score = performence_score + temp_score
print("總分:"+str(performence_score))
def main():
try:
input = sys.argv[1]
episodes = int(input[0])
enviroment(episodes)
except:
print("請輸入參數!!!")
if __name__ == "__main__":
main()
'''執行方法 python3 2-9.py 5,5為要執行的次數可自行更改 '''
| 31.768293
| 74
| 0.575816
|
7952147a024e5fdf6e83507b6ebd939c1e81db31
| 21,023
|
py
|
Python
|
huskies-worker-optimize/run.py
|
dajinchu/battlecode18
|
688f845b06867b23d60cdeddb6ccb8d0b2e0620a
|
[
"MIT"
] | null | null | null |
huskies-worker-optimize/run.py
|
dajinchu/battlecode18
|
688f845b06867b23d60cdeddb6ccb8d0b2e0620a
|
[
"MIT"
] | null | null | null |
huskies-worker-optimize/run.py
|
dajinchu/battlecode18
|
688f845b06867b23d60cdeddb6ccb8d0b2e0620a
|
[
"MIT"
] | null | null | null |
import battlecode as bc
import random
import sys
import traceback
import collections as fast
import time
import math
print("pystarting")
# A GameController is the main type that you talk to the game with.
# Its constructor will connect to a running game.
gc = bc.GameController()
print("pystarted")
# It's a good idea to try to keep your bots deterministic, to make debugging easier.
# determinism isn't required, but it means that the same things will happen in every thing you run,
# aside from turns taking slightly different amounts of time due to noise.
random.seed(6137)
# let's start off with some research!
# we can queue as much as we want.
# gc.queue_research(bc.UnitType.Rocket)
gc.queue_research(bc.UnitType.Ranger)
gc.queue_research(bc.UnitType.Ranger)
gc.queue_research(bc.UnitType.Ranger)
# disable timing logs for production code
TIMING_DISABLED = False
# SPEC CONSTANTS
REPLICATE_COST = 15
# CODING CONSTANTS
MY_TEAM = gc.team()
ENEMY_TEAM = bc.Team.Red if MY_TEAM == bc.Team.Blue else bc.Team.Blue
ALL_DIRS = list(bc.Direction)
MOVE_DIRS = list(bc.Direction)
MOVE_DIRS.remove(bc.Direction.Center)
EARTHMAP = gc.starting_map(bc.Planet.Earth)
MARSMAP = gc.starting_map(bc.Planet.Mars)
THIS_PLANETMAP = gc.starting_map(gc.planet())
HEIGHT = THIS_PLANETMAP.height
WIDTH = THIS_PLANETMAP.width
MARS_WIDTH = MARSMAP.width
MARS_HEIGHT = MARSMAP.height
EARTH_WIDTH = EARTHMAP.width
EARTH_HEIGHT = EARTHMAP.height
# Instead of instantiating new MapLocations constantly, we make them ALL at the start and recycle them
# I AM NOT SURE IF THIS ACTUALLY SAVES TIME, (doesn't appear to hurt though)
EARTH_MAPLOCATIONS = [bc.MapLocation(bc.Planet.Earth,i%EARTH_WIDTH,int(i/EARTH_WIDTH)) for i in range(EARTH_WIDTH*EARTH_HEIGHT)]
MARS_MAPLOCATIONS = [bc.MapLocation(bc.Planet.Mars,i%MARS_WIDTH,int(i/MARS_WIDTH)) for i in range(MARS_WIDTH*MARS_HEIGHT)]
def MapLocation(planetEnum,x,y):
if planetEnum == bc.Planet.Earth:
return EARTH_MAPLOCATIONS[y*EARTH_WIDTH+x]
else:
return MARS_MAPLOCATIONS[y*MARS_WIDTH+x]
def getWalls(planetmap):
impass = set()
for x in range(planetmap.width):
for y in range(planetmap.height):
if not planetmap.is_passable_terrain_at(MapLocation(planetmap.planet,x,y)):
impass.add(y*planetmap.width+x)
return impass
WATER = getWalls(EARTHMAP)
ROCKY = getWalls(MARSMAP)
THIS_PLANET_WALLS = WATER if gc.planet()==bc.Planet.Earth else ROCKY
def occupiableDirections(loc):
ret = []
for d in MOVE_DIRS:
l = loc.add(d)
if 0<=l.x<WIDTH and 0<=l.y<HEIGHT and gc.is_occupiable(l):
ret.append(d)
return ret
def randMoveDir(unit):
dirs = occupiableDirections(unit.location.map_location())
if dirs:
return random.choice(dirs)
else:
return bc.Direction.Center
def wander(unit): # pick a random movable direction:
d = randMoveDir(unit)
tryMove(unit,d)
def tryMove(unit, d):
if gc.is_move_ready(unit.id) and gc.can_move(unit.id, d):
gc.move_robot(unit.id, d)
def senseEnemies(loc,radius2):
return gc.sense_nearby_units_by_team(loc, radius2, ENEMY_TEAM)
def senseAdjacentEnemies(loc):
return senseEnemies(loc,2)
def senseAllEnemies(planet):
return senseEnemies(MapLocation(planet,0,0),1000)
def senseAllByType(planet,unitType):
return gc.sense_nearby_units_by_type(MapLocation(planet,0,0),1000,unitType)
# For worker try place factory blueprint
# spaces out the factories intelligently
def tryBlueprintFactory(unit):
if gc.karbonite() > bc.UnitType.Factory.blueprint_cost():
dirs = MOVE_DIRS
for d in dirs:
loc = unit.location.map_location().add(d)
# check that the place we're thinking about blueprinting is not adjacent to existing factories
adjacent = gc.sense_nearby_units_by_type(loc, 2, bc.UnitType.Factory)
if not adjacent and gc.can_blueprint(unit.id, bc.UnitType.Factory, d):
gc.blueprint(unit.id, bc.UnitType.Factory, d)
return True
return False
# For Worker, try to build on nearby factory blueprints.
# return true if we built and build is still in progress
def tryBuildFactory(unit):
# First try to build directly adjacent factories
adjacent = gc.sense_nearby_units_by_type(unit.location.map_location(), 2, bc.UnitType.Factory)
factoryToBuild = None
highestFactoryHealth = 0
for factory in adjacent:
# Build the factory if it isn't already finished
if not factory.structure_is_built() and factory.health > highestFactoryHealth:
factoryToBuild = factory
highestFactoryHealth = factory.health
if factoryToBuild and gc.can_build(unit.id,factory.id):
gc.build(unit.id, factoryToBuild.id)
# return true only if factory build is still in progress
return not factory.structure_is_built()
else:
return False
# For Worker, try to mine nearby karbonite
# return true if we mined and there is still more karbonite nearby
def tryMineKarbonite(unit):
karbs = []
for loc in gc.all_locations_within(unit.location.map_location(), 2):
if gc.karbonite_at(loc) > 0:
karbs.append(loc)
if karbs:
dirTo = unit.location.map_location().direction_to(karbs[0])
if gc.can_harvest(unit.id, dirTo):
gc.harvest(unit.id, dirTo)
if len(karbs)>1 or gc.karbonite_at(karbs[0]) > 0:
return True
return False
# @param goals is a list of MapLocations denoting goal locations (to be set to 0)
# @param walls is a Set denoting the places that can't be walked through
def dijkstraMap(goals,walls):
flen = 0
# instantiate initial grid with "infinite" values
grid = [[100 for i in range(HEIGHT)] for k in range(WIDTH)]
frontier = fast.deque()
for g in goals:
frontier.append(g)
grid[g[0]][g[1]] = g[2]
flen +=1
while flen:
# pop the first
curr = frontier.popleft()
flen -= 1
# set the value in the grid
grid[curr[0]][curr[1]]=curr[2]
# check cardinal directions for locations with higher v
# add the locations to frontier if they have higher
v = curr[2]
x = curr[0]+1
y = curr[1]
if 0<=x<WIDTH and 0<=y<HEIGHT and grid[x][y] > v+1 and not (y*WIDTH+x in walls):
grid[x][y]=v+1
frontier.append([x,y,v+1])
flen += 1
x = curr[0]-1
y = curr[1]
if 0<=x<WIDTH and 0<=y<HEIGHT and grid[x][y] > v+1 and not (y*WIDTH+x in walls):
grid[x][y]=v+1
frontier.append([x,y,v+1])
flen += 1
x = curr[0]
y = curr[1]+1
if 0<=x<WIDTH and 0<=y<HEIGHT and grid[x][y] > v+1 and not (y*WIDTH+x in walls):
grid[x][y]=v+1
frontier.append([x,y,v+1])
flen += 1
x = curr[0]
y = curr[1]-1
if 0<=x<WIDTH and 0<=y<HEIGHT and grid[x][y] > v+1 and not (y*WIDTH+x in walls):
grid[x][y]=v+1
frontier.append([x,y,v+1])
flen += 1
return grid
# Move unit down a dijkstraMap
def walkDownMap(unit, grid):
l = unit.location.map_location()
x = l.x
y = l.y
smallestLoc = [x,y]
smallest = grid[x][y]
adjacents = [[x+1,y+1],[x+1,y],[x+1,y-1],[x,y-1],[x-1,y-1],[x-1,y],[x-1,y+1],[x,y+1]]
for loc in adjacents:
if 0<=loc[0]<WIDTH and 0<=loc[1]<HEIGHT and grid[loc[0]][loc[1]] <= smallest and gc.is_occupiable(MapLocation(l.planet,loc[0],loc[1])):
smallest = grid[loc[0]][loc[1]]
smallestLoc = loc
tryMove(unit,l.direction_to(MapLocation(l.planet,smallestLoc[0],smallestLoc[1])))
# Move unit up a dijkstraMap
def walkUpMap(unit,grid):
l = unit.location.map_location()
x = l.x
y = l.y
biggestLoc = [x,y]
biggest = grid[x][y]
adjacents = [[x+1,y+1],[x+1,y],[x+1,y-1],[x,y-1],[x-1,y-1],[x-1,y],[x-1,y+1],[x,y+1]]
for loc in adjacents:
if 0<=loc[0]<WIDTH and 0<=loc[1]<HEIGHT and grid[loc[0]][loc[1]] >= biggest and gc.is_occupiable(MapLocation(l.planet,loc[0],loc[1])):
biggest = grid[loc[0]][loc[1]]
biggestLoc = loc
tryMove(unit,l.direction_to(MapLocation(l.planet,biggestLoc[0],biggestLoc[1])))
# Move unit towards goal value on dijkstraMap
def walkToValue(unit,grid,goalValue):
loc = unit.location.map_location()
x = loc.x
y = loc.y
if grid[x][y]>goalValue:
walkDownMap(unit,grid)
elif grid[x][y]<goalValue:
walkUpMap(unit,grid)
#if currvalue is goalvalue, just don't move
# Takes list of maplocations, makes map to all spots adjacent to those locations
# useful for pathing buildings into places where they can work on blueprints
def adjacentToMap(mapLocs):
goals = []
for loc in mapLocs:
adjacent = gc.all_locations_within(loc,2)
for adjLoc in adjacent:
if not adjLoc.x*WIDTH+adjLoc.y in THIS_PLANET_WALLS:# and not gc.is_occupiable(MapLocation(THIS_PLANETMAP.planet,adjLoc.x,adjLoc.y)):
goals.append([adjLoc.x,adjLoc.y,0])
return dijkstraMap(goals,THIS_PLANET_WALLS)
# Build map towards enemy
def mapToEnemy(planetMap):
s = time.time()
enemies = senseAllEnemies(planetMap.planet)
enemyLocs = []
walls = set()
for f in senseAllByType(planetMap.planet, bc.UnitType.Factory):
walls.add(f.location.map_location().y*WIDTH+f.location.map_location().x)
walls.update(THIS_PLANET_WALLS)
for e in enemies:
loc = e.location.map_location()
enemyLocs.append([loc.x,loc.y,0])
if not enemies:
enemyLocs=[[unit.location.map_location().x,unit.location.map_location().y,0] for unit in THIS_PLANETMAP.initial_units if unit.team!=MY_TEAM]
m = dijkstraMap(enemyLocs,walls)
# print('\n'.join([''.join(['{:4}'.format(item) for item in row])for row in m]))
# print("build enemy map took " + str(time.time()-s))
return m
# Build map for rangers where goals are in range
def rangerMap(planetMap, atkRange):
enemies = senseAllEnemies(planetMap.planet)
enemyLocs = []
walls = set()
for f in senseAllByType(planetMap.planet, bc.UnitType.Factory):
walls.add(f.location.map_location().x*WIDTH+f.location.map_location().y)
walls.update(THIS_PLANET_WALLS)
for e in enemies:
loc = e.location.map_location()
enemyLocs.append([loc.x,loc.y,0])
# find distances to enemy, ignoring walls, because rangers can shoot over
distMap = dijkstraMap(enemyLocs,[])
goalLocs = []
realAtkRange = int(math.sqrt(atkRange))
# now find where the distance is right for rangers
for x,col in enumerate(distMap):
for y,cell in enumerate(col):
if cell == realAtkRange:
goalLocs.append([x,y,0])
# now pathfind to those sweet spot
rangerMap = dijkstraMap(goalLocs,walls)
return rangerMap
TOTAL_EARTH_KARBONITE = 0
KARBONITE_LOCS = []
EARTH_KARBONITE_MAP = []
# Iterate through all spots to find karbonite
# count total karbonite and record their locations and amounts
def initKarbonite():
global TOTAL_EARTH_KARBONITE
for x in range(WIDTH):
for y in range(HEIGHT):
k = EARTHMAP.initial_karbonite_at(MapLocation(bc.Planet.Earth,x,y))
if k >= 5:
KARBONITE_LOCS.append([x,y,int(-k/4)])
TOTAL_EARTH_KARBONITE += k
initKarbonite()
# Build a Dijkstra Map for earth's karbonite using vision and initial
def updateKarbonite():
global TOTAL_EARTH_KARBONITE
KARBONITE_LOCS[:] = [k for k in KARBONITE_LOCS if
not gc.can_sense_location(MapLocation(bc.Planet.Earth,k[0],k[1]))
or gc.karbonite_at(MapLocation(bc.Planet.Earth,k[0],k[1]))]
for k in KARBONITE_LOCS:
TOTAL_EARTH_KARBONITE += k[2]
return dijkstraMap(KARBONITE_LOCS,WATER)
class Benchmark:
canStart = True
def __init__(self, name):
self.name = name
def start(self):
if TIMING_DISABLED:
return
if self.canStart:
self.canStart = False
self.startt = time.time()
else:
print("CALLED BENCHMARK.START AGAIN BEFORE CALLING .END()")
def end(self):
if TIMING_DISABLED:
return
print(self.name, "took ", 1000*(time.time()-self.startt), "ms")
self.canStart = True
# Create benchmarks for different parts
turnBench = Benchmark("Full turn")
enemyMapBench = Benchmark("Creating enemy map")
rangerMapBench = Benchmark("Creating ranger map")
healerMapBench = Benchmark("Creating healer map")
factoryMapBench = Benchmark("Creating factory map")
karboniteMapBench = Benchmark("Creating karbonite map")
rangerBench = Benchmark("Handling rangers")
healerBench = Benchmark("Handling healers")
factoryBench = Benchmark("Handling factories")
workerBench = Benchmark("Handling workers")
while True:
ROUND = gc.round()
# We only support Python 3, which means brackets around print()
print('pyround:', gc.round(), 'time left:', gc.get_time_left_ms(), 'ms')
turnBench.start()
# frequent try/catches are a good idea
try:
# sort our units
factories = []
factoryBlueprints = []
workers = []
rangers = []
knights = []
mages = []
healers = []
for unit in gc.my_units():
type = unit.unit_type
if type == bc.UnitType.Factory:
factories.append(unit)
if not unit.structure_is_built():
factoryBlueprints.append(unit)
elif type == bc.UnitType.Worker:
workers.append(unit)
elif type == bc.UnitType.Ranger:
rangers.append(unit)
elif type == bc.UnitType.Knight:
knights.append(unit)
elif type == bc.UnitType.Mage:
mages.append(unit)
elif type == bc.UnitType.Healer:
healers.append(unit)
numWorkers = len(workers)
numFactories = len(factories) + len(factoryBlueprints)
numRangers = len(rangers)
numHealers = len(healers)
# update the ranger atkRange, because it can change with research.
# SLIGHTLY BETTER TO UPDATE THIS JUST WHEN RESEARCH FINISHES INSTEAD OF POLLING EVERY TURN
rangerAtkRange = 0 # This is dumb, but you need at least one ranger to find the atk range of the ranger
if rangers:
rangerAtkRange = rangers[0].attack_range()
# Refresh enemy map
enemyMapBench.start()
ENEMY_MAP = mapToEnemy(THIS_PLANETMAP)
enemyMapBench.end()
RANGER_MAP = []
if rangers:
rangerMapBench.start()
RANGER_MAP = rangerMap(THIS_PLANETMAP,rangerAtkRange)
rangerMapBench.end()
# Healer map. Directs healers to get near rangers to heal them
HEALER_MAP = []
HURT_ALLIES = []
if healers:
healerMapBench.start()
goals = []
for ally in rangers:
if ally.health < ally.max_health:
loc = ally.location.map_location()
goals.append([loc.x, loc.y, ally.health - ally.max_health])
HURT_ALLIES.append(ally.id)
HEALER_MAP = dijkstraMap(goals, THIS_PLANET_WALLS)
healerMapBench.end()
# refresh units_wanted
WORKERS_WANTED = 8 + int(TOTAL_EARTH_KARBONITE/300)
FACTORIES_WANTED = 3 + int(gc.karbonite()/500)
# refresh factory map
FACTORY_MAP = []
if factoryBlueprints:
factoryMapBench.start()
factLocs = [f.location.map_location() for f in factoryBlueprints]
FACTORY_MAP = adjacentToMap(factLocs)
factoryMapBench.end()
# refresh karbonite map
if ROUND % 10 == 1:
EARTH_KARBONITE_MAP = updateKarbonite()
rangerBench.start()
for unit in rangers:
# Ranger logic
if unit.location.is_on_map():
walkDownMap(unit,RANGER_MAP)
enemies = senseEnemies(unit.location.map_location(),unit.attack_range())
for e in enemies:
if gc.is_attack_ready(unit.id) and gc.can_attack(unit.id,e.id):
gc.attack(unit.id,e.id)
rangerBench.end()
healerBench.start()
for unit in healers:
# Ranger logic
if unit.location.is_on_map():
walkDownMap(unit, HEALER_MAP)
if gc.is_heal_ready(unit.id):
for ally_id in HURT_ALLIES:
if gc.can_heal(unit.id, ally_id):
gc.heal(unit.id,ally_id)
healerBench.end()
factoryBench.start()
# Factory logic
for unit in factories:
garrison = unit.structure_garrison()
if len(garrison) > 0:
d = randMoveDir(unit)
if gc.can_unload(unit.id, d):
#print('unloaded a knight!')
gc.unload(unit.id, d)
continue
elif numWorkers == 0 and gc.can_produce_robot(unit.id, bc.UnitType.Worker):
gc.produce_robot(unit.id, bc.UnitType.Worker)
numWorkers += 1
#elif numRangers > (1+numHealers) * 8 and gc.can_produce_robot(unit.id,bc.UnitType.Healer):
# gc.produce_robot(unit.id, bc.UnitType.Healer)
# numHealers += 1
elif gc.can_produce_robot(unit.id, bc.UnitType.Ranger):
gc.produce_robot(unit.id, bc.UnitType.Ranger)
numRangers += 1
continue
factoryBench.end()
workerBench.start()
# Worker logic
for unit in workers:
if unit.location.is_on_map():
d = randMoveDir(unit)
# 1. Replicate if needed
if numWorkers < WORKERS_WANTED and gc.karbonite() > REPLICATE_COST and gc.can_replicate(unit.id,d):
gc.replicate(unit.id,d)
numWorkers += 1
# 2. look for and work on blueprints
elif tryBuildFactory(unit):
# if we worked on factory, move on to next unit
#print("worked on factory")
continue
elif FACTORY_MAP and FACTORY_MAP[unit.location.map_location().x][unit.location.map_location().y]<4:
walkDownMap(unit,FACTORY_MAP)
continue
# 0. Place blueprints if needed
elif numFactories < FACTORIES_WANTED and tryBlueprintFactory(unit):
#print('blueprinted')
numFactories += 1
continue
# 3. Look for and mine Karbonite
elif tryMineKarbonite(unit):
#print("mined")
# we mined and there's still more, stay in place and move on
continue
# 4. Walk towards karbonite
elif EARTH_KARBONITE_MAP[unit.location.map_location().x][unit.location.map_location().y]<5:
#print("walked down")
walkDownMap(unit, EARTH_KARBONITE_MAP)
# 5. Wander
else:
#print("wandered")
tryMove(unit,d)
workerBench.end()
for unit in knights:
if unit.location.is_on_map():
# Attack in range enemies
adjacent = senseAdjacentEnemies(unit.location.map_location())
for other in adjacent:
if gc.is_attack_ready(unit.id) and gc.can_attack(unit.id, other.id):
#print('attacked a thing!')
gc.attack(unit.id, other.id)
break
# Move towards enemies
walkDownMap(unit, ENEMY_MAP)
'''nearby = gc.sense_nearby_units_by_team(unit.location.map_location(), 50, ENEMY_TEAM)
for other in nearby:
tryMove(unit,unit.location.map_location().direction_to(other.location.map_location()))
wander(unit.id)
'''
# okay, there weren't any dudes around
# wander(unit.id)
except Exception as e:
print('Error:', e)
# use this to show where the error was
traceback.print_exc()
turnBench.end()
# send the actions we've performed, and wait for our next turn.
gc.next_turn()
# these lines are not strictly necessary, but it helps make the logs make more sense.
# it forces everything we've written this turn to be written to the manager.
sys.stdout.flush()
sys.stderr.flush()
| 36.882456
| 148
| 0.615754
|
795214ac1b0f6c954937dce3570a81e56050b498
| 2,424
|
py
|
Python
|
aliyun-python-sdk-adb/aliyunsdkadb/request/v20190315/ModifyAuditLogConfigRequest.py
|
yndu13/aliyun-openapi-python-sdk
|
12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5
|
[
"Apache-2.0"
] | 1,001
|
2015-07-24T01:32:41.000Z
|
2022-03-25T01:28:18.000Z
|
aliyun-python-sdk-adb/aliyunsdkadb/request/v20190315/ModifyAuditLogConfigRequest.py
|
yndu13/aliyun-openapi-python-sdk
|
12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5
|
[
"Apache-2.0"
] | 363
|
2015-10-20T03:15:00.000Z
|
2022-03-08T12:26:19.000Z
|
aliyun-python-sdk-adb/aliyunsdkadb/request/v20190315/ModifyAuditLogConfigRequest.py
|
yndu13/aliyun-openapi-python-sdk
|
12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5
|
[
"Apache-2.0"
] | 682
|
2015-09-22T07:19:02.000Z
|
2022-03-22T09:51:46.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkadb.endpoint import endpoint_data
class ModifyAuditLogConfigRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'adb', '2019-03-15', 'ModifyAuditLogConfig','ads')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_DBClusterId(self):
return self.get_query_params().get('DBClusterId')
def set_DBClusterId(self,DBClusterId):
self.add_query_param('DBClusterId',DBClusterId)
def get_OwnerAccount(self):
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self,OwnerAccount):
self.add_query_param('OwnerAccount',OwnerAccount)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_AuditLogStatus(self):
return self.get_query_params().get('AuditLogStatus')
def set_AuditLogStatus(self,AuditLogStatus):
self.add_query_param('AuditLogStatus',AuditLogStatus)
| 35.647059
| 79
| 0.775578
|
795214fd3ec7d8c8571e0714c2a1aab5aa52d3c8
| 6,096
|
py
|
Python
|
workspace/models/train_classifier.py
|
s-arora-1987/Disaster-Response-App
|
2c2a74bea481d4c52520b35d92f9344d63528ee3
|
[
"MIT"
] | null | null | null |
workspace/models/train_classifier.py
|
s-arora-1987/Disaster-Response-App
|
2c2a74bea481d4c52520b35d92f9344d63528ee3
|
[
"MIT"
] | null | null | null |
workspace/models/train_classifier.py
|
s-arora-1987/Disaster-Response-App
|
2c2a74bea481d4c52520b35d92f9344d63528ee3
|
[
"MIT"
] | null | null | null |
import sys
import pandas as pd
from sqlalchemy import create_engine
import time
import pickle
# for tokenizing
import nltk
nltk.download(['punkt', 'wordnet'])
import re
from nltk.tokenize import word_tokenize
from nltk.stem import WordNetLemmatizer
url_regex = 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'
# for feature extraction and modeling
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.ensemble import RandomForestClassifier
from sklearn.multioutput import MultiOutputClassifier
from sklearn.pipeline import Pipeline
from xgboost import XGBClassifier
from sklearn.metrics import classification_report
def load_data(database_filepath):
"""
Read the input database to create and return separate numpy arrays for
messages, category values, and names of categories respectively
"""
# creating sqlite engine to interact with database
engine = create_engine('sqlite:///'+database_filepath)
df = pd.read_sql_table("disaster_message_categories",engine)
# separating training and labels data and list of category names
X = df.message.values
Y_df = df[['related', 'request', 'offer', 'aid_related', 'medical_help', \
'medical_products', 'search_and_rescue', 'security', 'military',\
'child_alone', 'water', 'food', 'shelter', 'clothing', 'money', \
'missing_people', 'refugees', 'death', 'other_aid', \
'infrastructure_related', 'transport', 'buildings', 'electricity'\
, 'tools', 'hospitals', 'shops', 'aid_centers', \
'other_infrastructure', 'weather_related', 'floods', 'storm', \
'fire', 'earthquake', 'cold', 'other_weather', 'direct_report'\
]]
Y = Y_df.values
category_names=Y_df.columns
return X,Y,category_names
def tokenize(text):
"""
Perform following steps for tokeninzing input text string:
- get list of all urls using regex
- replace each url in text string with placeholder
- tokenize text using nltk
- for each token, lemmatize, normalize case, and remove leading/trailing white space
"""
detected_urls = re.findall(url_regex, text)
for url in detected_urls:
text = text.replace(url, "urlplaceholder")
tokens = word_tokenize(text)
lemmatizer = WordNetLemmatizer()
clean_tokens = []
for tok in tokens:
clean_tok = lemmatizer.lemmatize(tok).lower().strip()
clean_tokens.append(clean_tok)
return clean_tokens
def build_model():
"""
Create a ML pipeline with a tokenizer, a feature constructor,
and a multi output classifier with either Random Forest or
XGBoost.
Commenting RandomForest pipeline because XGBoost pipeline is
quicker in training.
"""
# create pipeline with desired transformers and ML methods
# pipeline = Pipeline([
# ('vect', CountVectorizer(tokenizer=tokenize,
# max_df=0.75,
# ngram_range=(1, 2))),
# ('tfidf', TfidfTransformer()),
# ('clf', MultiOutputClassifier(\
# RandomForestClassifier(max_features=500,
# n_estimators=100))
# )
# ])
# alternative for faster training
pipeline = Pipeline([
('vect', CountVectorizer(tokenizer=tokenize)),
('tfidf', TfidfTransformer()),
('clf', MultiOutputClassifier(\
XGBClassifier(max_depth = 4,
n_estimators = 100,
min_child_weight = 1,
gamma = 1,
subsample = 1.0,
colsample_bytree = 1.0)
))
])
return pipeline
def evaluate_model(model, X_test, Y_test, category_names):
"""
Using input testing data X_test and groundtruth Y_test:
- Report f1 score, precision and recall of input model
for each output category.
- Report overall acuuracy of the model.
"""
# make predictions for test set
y_pred = model.predict(X_test)
for i in range(0,len(Y_test[0,:])):
print(classification_report(Y_test[:,i], y_pred[:,i]))
print("...............................................................")
print("Overall Accuracy: %.3f" %((y_pred == Y_test).mean().mean()))
return None
def save_model(model, model_filepath):
""" save the model at a desired location """
pickle.dump(model, open(model_filepath, 'wb'))
return None
def main():
if len(sys.argv) == 3:
database_filepath, model_filepath = sys.argv[1:]
print('Loading data...\n DATABASE: {}'.format(database_filepath))
X, Y, category_names = load_data(database_filepath)
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2)
print('Building model...')
model = build_model()
print('Training model...')
start_time = time.time()
model.fit(X_train, Y_train)
print("--- Training finished in %s seconds ---" % (time.time() - start_time))
print('Evaluating model...')
evaluate_model(model, X_test, Y_test, category_names)
print('Saving model...\n MODEL: {}'.format(model_filepath))
save_model(model, model_filepath)
print('Trained model saved!')
else:
print('Please provide the filepath of the disaster messages database '\
'as the first argument and the filepath of the pickle file to '\
'save the model to as the second argument. \n\nExample: python '\
'train_classifier.py ../data/DisasterResponse.db classifier.pkl')
if __name__ == '__main__':
main()
| 36.285714
| 92
| 0.605479
|
7952153ee706807605f76c0c40838fa537f241c2
| 477
|
py
|
Python
|
Test_TagParser.py
|
whaleygeek/pyparsers
|
41e6e5c807959ff18c2f625f3d9b67e669132390
|
[
"MIT"
] | null | null | null |
Test_TagParser.py
|
whaleygeek/pyparsers
|
41e6e5c807959ff18c2f625f3d9b67e669132390
|
[
"MIT"
] | null | null | null |
Test_TagParser.py
|
whaleygeek/pyparsers
|
41e6e5c807959ff18c2f625f3d9b67e669132390
|
[
"MIT"
] | 1
|
2020-07-23T14:41:12.000Z
|
2020-07-23T14:41:12.000Z
|
# Test_TagParser.py (c) 2014 D.J.Whale
#
# A TagParser is a helper for SAXP (Simple Api For XML Processing)
# that makes it much easier to use for beginners.
# Just call the parse() method to parse a file.
#
# See other test and demo programs in this folder to find out
# how to actually process data using a TagParser
import sys
import parsers.TagParser as TagParser
FILENAME = "cars.xml"
if len(sys.argv) > 1:
FILENAME = sys.argv[1]
TagParser.parse(FILENAME)
# END
| 23.85
| 66
| 0.731656
|
79521580a244cc266d7f33807bf1aa6d35cb3070
| 10,651
|
py
|
Python
|
ndm/model_cnn12_bn_w2targs.py
|
oplatek/ndm
|
d32bd9d685902d9da52b7e7abd286fb5d9c7274a
|
[
"Apache-2.0"
] | 2
|
2016-01-26T15:29:30.000Z
|
2016-12-07T23:36:23.000Z
|
ndm/model_cnn12_bn_w2targs.py
|
oplatek/ndm
|
d32bd9d685902d9da52b7e7abd286fb5d9c7274a
|
[
"Apache-2.0"
] | 1
|
2018-05-10T08:03:32.000Z
|
2018-05-10T08:03:32.000Z
|
ndm/model_cnn12_bn_w2targs.py
|
oplatek/ndm
|
d32bd9d685902d9da52b7e7abd286fb5d9c7274a
|
[
"Apache-2.0"
] | 1
|
2016-02-25T14:35:47.000Z
|
2016-02-25T14:35:47.000Z
|
#!/usr/bin/env python3
import tensorflow as tf
from tfx.bricks import embedding, dense_to_one_hot, linear, dropout, reduce_max, batch_norm_lin, conv2d_bn, \
pow_1, softmax_2d
from model import ModelW2TArgs
class Model(ModelW2TArgs):
def __init__(self, data, FLAGS):
super(Model, self).__init__(data, FLAGS)
conv_mul = 2
histories_embedding_size = 16
histories_vocabulary_length = len(data.idx2word_history)
history_length = data.train_set['histories'].shape[1]
action_templates_vocabulary_length = len(data.idx2word_action_template)
action_templates_embedding_size = 8
num_actions_arguments = data.batch_actions_arguments.shape[2]
actions_arguments_vocabulary_length = len(data.idx2word_action_arguments)
with tf.name_scope('data'):
batch_histories = tf.Variable(data.batch_histories, name='histories',
trainable=False)
batch_actions_template = tf.Variable(data.batch_actions_template, name='actions',
trainable=False)
batch_action_arguments = tf.Variable(data.batch_actions_arguments, name='actions_arguments',
trainable=False)
histories = tf.gather(batch_histories, self.batch_idx)
actions_template = tf.gather(batch_actions_template, self.batch_idx)
actions_arguments = tf.gather(batch_action_arguments, self.batch_idx)
with tf.name_scope('model'):
encoder_embedding = embedding(
input=histories,
length=histories_vocabulary_length,
size=histories_embedding_size,
name='encoder_embedding'
)
with tf.name_scope("UtterancesEncoder"):
conv3 = encoder_embedding
# conv3 = dropout(conv3, pow_1(self.dropout_keep_prob, 2))
conv3 = conv2d_bn(
input=conv3,
filter=[1, 3, conv3.size, conv3.size * conv_mul],
phase_train=self.phase_train,
name='conv_utt_size_3_layer_1'
)
encoded_utterances = reduce_max(conv3, [2], keep_dims=True)
with tf.name_scope("HistoryEncoder"):
conv3 = encoded_utterances
conv3 = dropout(conv3, pow_1(self.dropout_keep_prob, 2))
conv3 = conv2d_bn(
input=conv3,
filter=[3, 1, conv3.size, conv3.size * conv_mul],
phase_train=self.phase_train,
name='conv_hist_size_3_layer_1'
)
conv3 = dropout(conv3, pow_1(self.dropout_keep_prob, 2))
conv3 = conv2d_bn(
input=conv3,
filter=[3, 1, conv3.size, conv3.size * conv_mul],
phase_train=self.phase_train,
name='conv_hist_size_3_layer_2'
)
encoded_history = reduce_max(conv3, [1, 2])
with tf.name_scope("Decoder"):
second_to_last_user_utterance = encoded_utterances[:, history_length - 3, 0, :]
last_system_utterance = encoded_utterances[:, history_length - 2, 0, :]
last_user_utterance = encoded_utterances[:, history_length - 1, 0, :]
dialogue_state = tf.concat(
1,
[
encoded_history,
last_user_utterance,
last_system_utterance,
second_to_last_user_utterance,
],
name='dialogue_state'
)
dialogue_state_size = conv3.size + \
3 * histories_embedding_size * conv_mul
dialogue_state = tf.nn.relu(dialogue_state)
dialogue_state = dropout(dialogue_state, self.dropout_keep_prob)
# action prediction
projection = linear(
input=dialogue_state,
input_size=dialogue_state_size,
output_size=dialogue_state_size,
name='linear_projection_1'
)
projection = batch_norm_lin(projection, dialogue_state_size, self.phase_train,
name='linear_projection_1_bn')
activation = tf.nn.relu(projection)
activation = dropout(activation, self.dropout_keep_prob)
projection = linear(
input=activation,
input_size=dialogue_state_size,
output_size=dialogue_state_size,
name='linear_projection_2'
)
projection = batch_norm_lin(projection, dialogue_state_size, self.phase_train,
name='linear_projection_2_bn')
activation = tf.nn.relu(projection)
activation = dropout(activation, self.dropout_keep_prob)
projection = linear(
input=activation,
input_size=dialogue_state_size,
output_size=action_templates_vocabulary_length,
name='linear_projection_3_predictions_action'
)
self.predictions_action = tf.nn.softmax(projection, name="softmax_output_prediction_action")
# argument prediction
# first encode decoded action template and teh true action template
choice = tf.floor(tf.random_uniform([1], self.use_inputs_prob, 1 + self.use_inputs_prob, tf.float32))
prediction_action_argmax = tf.stop_gradient(tf.argmax(self.predictions_action, 1))
predicted_action_templates_embedding = embedding(
input=prediction_action_argmax,
length=action_templates_vocabulary_length,
size=action_templates_embedding_size,
name='action_templates_embedding'
)
true_action_template_embedding = tf.gather(predicted_action_templates_embedding.embedding_table, actions_template)
predicted_action_templates_embedding = tf.stop_gradient(predicted_action_templates_embedding)
action_templates_embedding = choice * true_action_template_embedding + (1.0 - choice) * predicted_action_templates_embedding
dialogue_state_action_template = tf.concat(
1,
[
dialogue_state,
action_templates_embedding
],
name='dialogue_state_action_template'
)
dialogue_state_action_template_size = (
dialogue_state_size +
action_templates_embedding_size
)
# condition on the dialogue state and the decoded template
projection = linear(
input=dialogue_state_action_template,
input_size=dialogue_state_action_template_size,
output_size=dialogue_state_action_template_size,
name='linear_projection_1_predictions_arguments'
)
projection = batch_norm_lin(projection, dialogue_state_action_template_size, self.phase_train,
name='linear_projection_1_predictions_arguments_bn')
activation = tf.nn.relu(projection)
activation = dropout(activation, self.dropout_keep_prob)
projection = linear(
input=activation,
input_size=dialogue_state_action_template_size,
output_size=dialogue_state_action_template_size,
name='linear_projection_2_predictions_arguments'
)
projection = batch_norm_lin(projection, dialogue_state_action_template_size, self.phase_train,
name='linear_projection_2_predictions_arguments_bn')
activation = tf.nn.relu(projection)
activation = dropout(activation, self.dropout_keep_prob)
projection = linear(
input=activation,
input_size=dialogue_state_action_template_size,
output_size=num_actions_arguments * actions_arguments_vocabulary_length,
name='linear_projection_3_predictions_arguments'
)
self.predictions_arguments = softmax_2d(
input=projection,
n_classifiers=num_actions_arguments,
n_classes=actions_arguments_vocabulary_length,
name="softmax_2d_predictions_arguments")
if FLAGS.print_variables:
for v in tf.trainable_variables():
print(v.name)
with tf.name_scope('loss'):
one_hot_labels_action = dense_to_one_hot(actions_template, action_templates_vocabulary_length)
one_hot_labels_arguments = dense_to_one_hot(actions_arguments, actions_arguments_vocabulary_length)
loss_action = tf.reduce_mean(
- one_hot_labels_action * tf.log(tf.clip_by_value(self.predictions_action, 1e-10, 1.0)),
name='loss'
)
loss_arguments = tf.reduce_mean(
- one_hot_labels_arguments * tf.log(tf.clip_by_value(self.predictions_arguments, 1e-10, 1.0)),
name='loss'
)
self.loss = loss_action + loss_arguments
tf.scalar_summary('loss', self.loss)
with tf.name_scope('accuracy'):
correct_prediction_action = tf.equal(
tf.argmax(one_hot_labels_action, 1),
tf.argmax(self.predictions_action, 1)
)
self.accuracy_action = tf.reduce_mean(tf.cast(correct_prediction_action, 'float'))
tf.scalar_summary('accuracy_action', self.accuracy_action)
correct_prediction_arguments = tf.equal(tf.argmax(one_hot_labels_arguments, 2),
tf.argmax(self.predictions_arguments, 2))
self.accuracy_arguments = tf.reduce_mean(tf.cast(correct_prediction_arguments, 'float'))
tf.scalar_summary('accuracy_arguments', self.accuracy_arguments)
| 46.714912
| 140
| 0.579007
|
795216003cde59d0016ce71e1c30b2cc72b7e0e2
| 1,404
|
py
|
Python
|
poretools/fasta.py
|
orivej/poretools
|
567e8cd99aa5112bc77f7ee8ebdd3bfbfa142a09
|
[
"MIT"
] | null | null | null |
poretools/fasta.py
|
orivej/poretools
|
567e8cd99aa5112bc77f7ee8ebdd3bfbfa142a09
|
[
"MIT"
] | null | null | null |
poretools/fasta.py
|
orivej/poretools
|
567e8cd99aa5112bc77f7ee8ebdd3bfbfa142a09
|
[
"MIT"
] | null | null | null |
import Fast5File
import sys
def run(parser, args):
for fast5 in Fast5File.Fast5FileSet(args.files, args.group, args.basecaller_name):
if args.start_time or args.end_time:
read_start_time = fast5.get_start_time()
read_end_time = fast5.get_end_time()
if args.start_time and args.start_time > read_start_time:
fast5.close()
continue
if args.end_time and args.end_time < read_end_time:
fast5.close()
continue
fas = fast5.get_fastas(args.type)
# high quality 2D: means there are more nanopore events on the
# complement strand than on the template strand. We also
# require there to be a 2D base-called sequence from Metrichor.
if args.high_quality:
if (fast5.get_complement_events_count() <= \
fast5.get_template_events_count()) or not fast5.has_2D():
fast5.close()
continue
# norem quality 2D : means there are less (or equal) nanopore
# events on the complement strand than on the template strand.
# We also require there to be a 2D base-called sequence from Metrichor.
if args.normal_quality:
if (fast5.get_complement_events_count() > \
fast5.get_template_events_count()) or not fast5.has_2D():
fast5.close()
continue
for fa in fas:
if fa is None or \
len(fa.seq) < args.min_length or \
(len(fa.seq) > args.max_length and \
args.max_length > 0):
continue
print fa
fast5.close()
| 28.653061
| 83
| 0.708689
|
79521738195a4d45f5ab5cefe43ea3797d2ac9eb
| 17,636
|
py
|
Python
|
src/SettingsWidget.py
|
Essyer/PoETiS
|
94560dcb2de6bb87523ed7ec08c5dffcba136315
|
[
"MIT"
] | 22
|
2020-06-13T14:08:31.000Z
|
2022-01-02T10:52:56.000Z
|
src/SettingsWidget.py
|
Essyer/PoETiS
|
94560dcb2de6bb87523ed7ec08c5dffcba136315
|
[
"MIT"
] | 9
|
2020-06-14T09:21:12.000Z
|
2022-02-14T07:44:32.000Z
|
src/SettingsWidget.py
|
Essyer/PoETiS
|
94560dcb2de6bb87523ed7ec08c5dffcba136315
|
[
"MIT"
] | 6
|
2020-06-13T17:03:16.000Z
|
2022-02-12T10:09:01.000Z
|
import os
import xml.etree.ElementTree as ElementTree
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from src.PainterWidget import PainterWidget
from src.DragWidget import DragWidget
from src.Slider import Slider
from src.utils import log_method_name, prepare_cfg, load_styles, default_league_name, xml_indent
from src.ModsContainer import CONFIG_PATH, FILTER_DIR, DEFAULT_FILTER_PATH
slider_colors = ["brown", "green", "blue", "yellow", "white"]
stash_default_text = "Add your stash here..."
class SettingsWidget(DragWidget):
configuration_changed = pyqtSignal(dict)
stash_item_change_already_called = False # No idea what's going on... If I don't check it like that when you modify
# text of last (default) row, program ends up in infinite loop between _process_stash_item_changed and
# _add_stash_item_to_table. It is probably because in the latter insertRow() triggers signal of changed item.
# I don't have better solution right now and league starts in 2 days.
active_stash = ["", 0, "normal"]
def __init__(self, painter_widget: PainterWidget):
self.painter_geometry = None
super(SettingsWidget, self).__init__()
log_method_name()
self.painter_widget = painter_widget
self.painter_widget.colors = slider_colors
self._create_slider() # Need to create if before loading configuration file to set tiles colors
self._load_cfg()
self._setup_ui()
self.painter_widget.geometry_changed.connect(self.painter_geometry_changed)
def painter_geometry_changed(self, geometry: QRect) -> None:
self.painter_geometry = geometry
self.save_cfg()
def _setup_ui(self) -> None:
log_method_name()
self.setWindowFlags(Qt.FramelessWindowHint | Qt.WindowStaysOnTopHint)
load_styles(self)
layout_main = QVBoxLayout()
button_stashes = QPushButton("Add/remove stashes")
layout_main.addWidget(button_stashes)
self._prepare_stashes_window()
button_stashes.clicked.connect(self.window_stashes.show)
label_mod_config = QLabel("Mod Filter")
layout_main.addWidget(label_mod_config)
self.combo_mod_file = QComboBox()
self._update_mod_file_combo()
self.combo_mod_file.activated.connect(self.save_cfg)
self.combo_mod_file.installEventFilter(self)
layout_main.addWidget(self.combo_mod_file)
label_base_league_name = QLabel("League base name")
layout_main.addWidget(label_base_league_name)
self.edit_base_league_name = QLineEdit(self.league_base_name)
self.edit_base_league_name.textChanged.connect(self._update_leagues_combo)
self.edit_base_league_name.textChanged.connect(self.save_cfg)
layout_main.addWidget(self.edit_base_league_name)
label_league = QLabel("League")
layout_main.addWidget(label_league)
self.combo_league = QComboBox()
self._update_leagues_combo()
self.combo_league.currentTextChanged.connect(self.save_cfg)
layout_main.addWidget(self.combo_league)
self.button_show_account_session = QPushButton("Show/hide account name and session")
layout_main.addWidget(self.button_show_account_session)
self.label_account_name = QLabel("Account name")
layout_main.addWidget(self.label_account_name)
self.edit_account_name = QLineEdit(self.account_name)
self.edit_account_name.textChanged.connect(self.save_cfg)
layout_main.addWidget(self.edit_account_name)
self.label_session = QLabel("Session ID")
layout_main.addWidget(self.label_session)
self.edit_session = QLineEdit(self.session_id)
self.edit_session.textChanged.connect(self.save_cfg)
layout_main.addWidget(self.edit_session)
# Hide account name and session ID if any of them was provided before
if self.account_name or self.session_id:
self.hide_account_session(True)
self.button_show_account_session.clicked.connect(self.hide_account_session)
btn_adjust_net = QPushButton("Adjust net position and size")
btn_adjust_net.clicked.connect(self.painter_widget.show_hide_config)
layout_main.addWidget(btn_adjust_net)
label_session = QLabel("Item tiers to detect")
layout_main.addWidget(label_session)
layout_slider = QHBoxLayout()
self.slider.set_range(1, 5)
self.slider.set_value(self.slider_value)
load_styles(self.slider)
layout_slider.addWidget(self.slider)
layout_main.addLayout(layout_slider)
self.slider.on_value_changed_call(self.save_cfg)
self.btn_hide = QPushButton("Close")
self.btn_hide.clicked.connect(self.close)
layout_main.addWidget(self.btn_hide)
self.setLayout(layout_main)
def _prepare_stashes_window(self):
self.window_stashes = DragWidget()
self.window_stashes.setWindowFlags(Qt.WindowStaysOnTopHint | Qt.FramelessWindowHint)
load_styles(self.window_stashes)
self.window_stashes.setFixedWidth(400) # maybe some day I will make it to resize to columns width...
self.widget_stashes = QTableWidget(0, 2)
self.widget_stashes.itemChanged.connect(self._process_stash_item_changed)
self.widget_stashes.setHorizontalHeaderLabels(["Name", "Normal/Quad"])
self.widget_stashes.verticalHeader().hide()
header = self.widget_stashes.horizontalHeader()
header.setSectionResizeMode(0, QHeaderView.ResizeToContents)
header.setSectionResizeMode(1, QHeaderView.ResizeToContents)
self.widget_stashes.adjustSize()
self.stashes_ui = []
for stash in self.stashes:
if stash["name"]:
self._add_stash_item_to_table(stash["name"], stash["type"])
self._add_stash_item_to_table(stash_default_text, "normal")
load_styles(self.widget_stashes)
layout = QVBoxLayout()
layout.addWidget(self.widget_stashes)
close_button = QPushButton("Close")
close_button.clicked.connect(self.window_stashes.hide)
layout.addWidget(close_button)
self.window_stashes.setLayout(layout)
# I miss C++ lambdas
def _add_stash_item_to_table(self, name: str, stash_type: str) -> None:
row = self.widget_stashes.rowCount()
self.widget_stashes.insertRow(row)
table_item = QTableWidgetItem(name)
self.widget_stashes.setItem(row, 0, table_item)
group_box = QGroupBox()
box_layout = QHBoxLayout()
radio1 = QRadioButton()
radio2 = QRadioButton()
if stash_type == "normal":
radio1.setChecked(True)
else:
radio2.setChecked(True)
radio1.clicked.connect(self._process_stash_item_changed)
radio2.clicked.connect(self._process_stash_item_changed)
box_layout.addWidget(radio1)
box_layout.addStretch(1)
box_layout.addWidget(radio2)
group_box.setLayout(box_layout)
self.widget_stashes.setCellWidget(row, 1, group_box)
self.stashes_ui.append([table_item, radio1, radio2])
def _process_stash_item_changed(self):
if hasattr(self, "edit_account_name"): # verify if _setup_ui finished
self.stashes = []
stashes_to_remove = []
for index, item in enumerate(self.stashes_ui):
name = item[0].text()
stash_type = "normal" if item[1].isChecked() else "quad"
if name == "":
stashes_to_remove.append(index)
if name != stash_default_text and name != "":
self.stashes.append({"name": name, "type": stash_type})
for index in stashes_to_remove:
self.stashes_ui.remove(self.stashes_ui[index])
self.widget_stashes.removeRow(index)
if len(self.stashes_ui) == 2:
self.active_stash = [self.stashes_ui[0][0].text(), 0,
"normal" if self.stashes_ui[0][1].isChecked() else "quad"]
if self.stashes_ui:
stash_text = self.stashes_ui[-1][0].text()
if stash_text != stash_default_text and not self.stash_item_change_already_called:
self.stash_item_change_already_called = True
self._add_stash_item_to_table(stash_default_text, "normal")
self.stash_item_change_already_called = False
self.save_cfg()
def eventFilter(self, target, event) -> bool:
if target == self.combo_mod_file and event.type() == QEvent.MouseButtonPress:
self._update_mod_file_combo()
return False
def _create_slider(self) -> None:
self.slider = Slider()
def close(self) -> None:
self.hide()
self.painter_widget.hide_modification_mode()
def _update_leagues_combo(self) -> None:
self.combo_league.clear()
main_name = self.edit_base_league_name.text()
index = 0
for name in [main_name, main_name + " HC", "SSF " + main_name, "SSF " + main_name + " HC", "Standard",
"Hardcore", "SSF Hardcore", "SSF Standard"]:
self.combo_league.insertItem(index, name)
index += 1
self.combo_league.setCurrentText(self.league)
def _update_mod_file_combo(self) -> None:
text_bak = None
if self.combo_mod_file.currentText():
text_bak = self.combo_mod_file.currentText()
self.combo_mod_file.clear()
filters = os.listdir(FILTER_DIR)
# there is probably a better way to exclude gitignore...
ignore = ["mods_empty.xml", ".gitignore"]
for f in filters:
if f not in ignore and os.path.isfile(FILTER_DIR + f):
self.combo_mod_file.addItem(f)
if text_bak:
print(text_bak)
self.combo_mod_file.setCurrentText(os.path.basename(text_bak))
else:
self.combo_mod_file.setCurrentText(os.path.basename(self.mod_file))
def _load_cfg(self) -> None:
log_method_name()
if not os.path.isfile(CONFIG_PATH):
prepare_cfg(CONFIG_PATH)
tree = ElementTree.parse(CONFIG_PATH)
root = tree.getroot()
self.account_name = self._cfg_load_or_default(root, "account_name")
stashes_nodes = self._cfg_load_stashes(root)
self.stashes = []
for stash in stashes_nodes:
self.stashes.append(stash)
if self.stashes:
self.active_stash = [self.stashes[0]["name"], 0, self.stashes[0]["type"]]
# mod_file should probably be validated upon loading (for existence)
self.mod_file = self._cfg_load_or_default(root, "mod_file", DEFAULT_FILTER_PATH)
self.league = self._cfg_load_or_default(root, "league")
self.league_base_name = self._cfg_load_or_default(root, "league_base_name", default_league_name)
self.session_id = self._cfg_load_or_default(root, "session_id")
self.stash_type = self._cfg_load_or_default(root, "stash_type", "quad")
self.painter_widget.stash_type = self.stash_type
self._set_values_from_cfg()
def _set_values_from_cfg(self) -> None:
tree = ElementTree.parse(CONFIG_PATH)
root = tree.getroot()
slider_color1 = self._cfg_load_or_default(root, "slider_color1", "brown")
slider_color2 = self._cfg_load_or_default(root, "slider_color2", "blue")
slider_color3 = self._cfg_load_or_default(root, "slider_color3", "green")
slider_color4 = self._cfg_load_or_default(root, "slider_color4", "yellow")
slider_color5 = self._cfg_load_or_default(root, "slider_color5", "white")
colors = [slider_color1, slider_color2, slider_color3, slider_color4, slider_color5]
self.painter_widget.colors = colors
self.slider.set_colors(colors)
self.slider_value = int(self._cfg_load_or_default(root, "slider_value", "1"))
# maybe just read this from settings widget at run time?
self.painter_widget.number_of_mods_to_draw = self.slider_value
self.main_widget_y = int(self._cfg_load_or_default(root, "main_widget_y", "0"))
painter_x = int(self._cfg_load_or_default(root, "painter_x", "250"))
painter_y = int(self._cfg_load_or_default(root, "painter_y", "250"))
painter_w = int(self._cfg_load_or_default(root, "painter_w", "500"))
painter_h = int(self._cfg_load_or_default(root, "painter_h", "500"))
painter_geometry = QRect(painter_x, painter_y, painter_w, painter_h)
self.painter_widget.setGeometry(painter_geometry)
self.painter_widget.setFixedWidth(painter_geometry.width())
self.painter_widget.setFixedHeight(painter_geometry.height())
def save_cfg(self) -> None:
log_method_name()
tree = ElementTree.parse(CONFIG_PATH)
root = tree.getroot()
self._cfg_set_or_create(root, "account_name", self.edit_account_name.text())
self._cfg_save_stashes(root)
self._cfg_set_or_create(root, "mod_file", FILTER_DIR + self.combo_mod_file.currentText())
self._cfg_set_or_create(root, "league_base_name", self.edit_base_league_name.text())
self._cfg_set_or_create(root, "league", self.combo_league.currentText())
self._cfg_set_or_create(root, "session_id", self.edit_session.text())
self._cfg_set_or_create(root, "main_widget_y", str(self.main_widget_y))
if hasattr(self, "slider"):
self._cfg_set_or_create(root, "slider_value", str(self.slider.value))
if self.painter_geometry:
self._cfg_set_or_create(root, "painter_x", str(self.painter_geometry.x()))
self._cfg_set_or_create(root, "painter_y", str(self.painter_geometry.y()))
self._cfg_set_or_create(root, "painter_w", str(self.painter_geometry.width()))
self._cfg_set_or_create(root, "painter_h", str(self.painter_geometry.height()))
xml_indent(root)
tree.write(CONFIG_PATH)
# Painter already notifies us about size/position changes through signal,
# I don't know how to do bidirectional signaling so I'm setting values by reference
self.painter_widget.stash_type = self.stashes[0]["type"] if self.stashes else "normal"
if hasattr(self, "slider"):
self.painter_widget.number_of_mods_to_draw = self.slider.value
self.painter_widget.update()
if self.stashes and not self.active_stash[0]:
self.active_stash = [self.stashes[0]["name"], 0, self.stashes[0]["type"]]
self.configuration_changed.emit(self.get_settings_for_requester()) # Notify Requester
@staticmethod
def _cfg_set_or_create(root: ElementTree, match: str, new_value: str) -> None:
ele = root.find(match)
if ele is None:
ele = ElementTree.SubElement(root, match)
ele.text = new_value
@staticmethod
def _cfg_load_or_default(root: ElementTree, match: str, default="") -> str:
ele = root.find(match)
if ele is None:
return default
return ele.text
@staticmethod
def _cfg_load_stashes(root: ElementTree) -> list:
stashes = root.find('stashes')
if stashes is None:
return [{'name': "", 'type': "normal"}]
return [{'name': x.text, 'type': x.attrib['type']} for x in list(stashes)]
def _cfg_save_stashes(self, root: ElementTree) -> None:
stashes = root.find('stashes')
if stashes is None:
stashes = ElementTree.SubElement(root, "stashes")
for child in list(stashes):
stashes.remove(child)
for stash in self.stashes:
if stash["name"]:
node = ElementTree.SubElement(stashes, "stash")
node.text = stash["name"]
node.set("type", stash["type"])
def get_settings_for_requester(self) -> dict:
return {
"account_name": self.edit_account_name.text(),
"stash_name": self.active_stash[0],
"league": self.combo_league.currentText(),
"session_id": self.edit_session.text(),
"mod_file": FILTER_DIR + self.combo_mod_file.currentText()
}
def hide_account_session(self, force_hide=False):
if self.edit_account_name.isVisible() or force_hide:
self.edit_account_name.hide()
self.label_account_name.hide()
self.edit_session.hide()
self.label_session.hide()
self.adjustSize()
else:
self.edit_account_name.show()
self.label_account_name.show()
self.edit_session.show()
self.label_session.show()
self.adjustSize()
def set_next_active_stash(self):
index = self.active_stash[1]+1
if index < len(self.stashes):
self.active_stash = [self.stashes[index]["name"], index, self.stashes[index]["type"]]
else:
self.active_stash = [self.stashes[0]["name"], 0, self.stashes[0]["type"]]
self.painter_widget.stash_type = self.active_stash[2]
def set_prev_active_stash(self):
index = self.active_stash[1] - 1
if index >= 0:
self.active_stash = [self.stashes[index]["name"], index, self.stashes[index]["type"]]
else:
index = len(self.stashes) - 1
self.active_stash = [self.stashes[index]["name"], index, self.stashes[index]["type"]]
self.painter_widget.stash_type = self.active_stash[2]
| 44.761421
| 120
| 0.668009
|
79521858b55aa4286dd34289236842bf4db7125b
| 392
|
py
|
Python
|
espnApi/wsgi.py
|
thexdesk/ESPN-API
|
debaf328d385c688f90dbb96703244f87da3c100
|
[
"MIT"
] | null | null | null |
espnApi/wsgi.py
|
thexdesk/ESPN-API
|
debaf328d385c688f90dbb96703244f87da3c100
|
[
"MIT"
] | 3
|
2020-06-05T17:12:59.000Z
|
2021-06-10T18:09:18.000Z
|
espnApi/wsgi.py
|
thexdesk/ESPN-API
|
debaf328d385c688f90dbb96703244f87da3c100
|
[
"MIT"
] | 1
|
2020-02-09T08:17:18.000Z
|
2020-02-09T08:17:18.000Z
|
"""
WSGI config for espnApi project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "espnApi.settings")
application = get_wsgi_application()
| 23.058824
| 78
| 0.785714
|
79521861e9154a4b637c14aaedcd2a85bfae066d
| 6,834
|
py
|
Python
|
muddery/worlddata/editor/form_view.py
|
noahzaozao/muddery
|
294da6fb73cb04c62e5ba6eefe49b595ca76832a
|
[
"BSD-3-Clause"
] | null | null | null |
muddery/worlddata/editor/form_view.py
|
noahzaozao/muddery
|
294da6fb73cb04c62e5ba6eefe49b595ca76832a
|
[
"BSD-3-Clause"
] | null | null | null |
muddery/worlddata/editor/form_view.py
|
noahzaozao/muddery
|
294da6fb73cb04c62e5ba6eefe49b595ca76832a
|
[
"BSD-3-Clause"
] | null | null | null |
"""
This file checks user's edit actions and put changes into db.
"""
from django import http
from django.shortcuts import render
from django.http import HttpResponseRedirect
from django.conf import settings
from evennia.utils import logger
from muddery.utils.exception import MudderyError
from worlddata import forms
class FormView(object):
"""
This object deal with common forms and views.
"""
def __init__(self, form_name, request):
"""
Set form name and request.
Args:
form_name: model form's name
request: http request
Returns:
None
"""
self.form_name = form_name
self.request = request
# get request data
if self.request.POST:
self.request_data = self.request.POST
else:
self.request_data = self.request.GET
self.files = self.request.FILES
# initialize values
self.valid = None
self.form_class = None
self.form = None
self.page = None
self.record = None
self.key = None
self.template_file = None
self.error = None
def is_valid(self):
"""
Validate the request.
Returns:
boolean: is valid
"""
if self.valid is None:
# If the request has not been parsed, parse it.
self.valid = self.parse_request()
return self.valid
def parse_request(self):
"""
Parse request data.
Returns:
boolean: Parse success.
"""
# record's page and id
self.page = self.request_data.get("_page", None)
self.record = self.request_data.get("_record", None)
# Get form's class.
try:
self.form_class = forms.Manager.get_form(self.form_name)
except Exception, e:
logger.log_tracemsg("Invalid form %s: %s." % (self.form_name, e))
self.error = "Invalid form: %s." % self.form_name
return False
# Get template file's name.
self.template_file = getattr(self.form_class.Meta, "form_template", settings.DEFUALT_FORM_TEMPLATE)
return True
def query_view_data(self):
"""
Get db instance for view.
Returns:
None
"""
if not self.valid:
raise MudderyError("Invalid form: %s." % self.form_name)
self.form = None
self.key = None
if self.record:
try:
# Query record's data.
instance = self.form_class.Meta.model.objects.get(pk=self.record)
self.form = self.form_class(instance=instance)
self.key = getattr(instance, "key", None)
except Exception, e:
self.form = None
if not self.form:
# Get empty data.
self.form = self.form_class()
def get_context(self):
"""
Get render context.
Returns:
context
"""
if not self.valid:
raise MudderyError("Invalid form: %s." % self.form_name)
# Query data.
if not self.form:
self.query_view_data()
verbose_name = self.form_class.Meta.model._meta.verbose_name_plural
context = {"data": self.form,
"title": verbose_name,
"desc": getattr(self.form_class.Meta, "desc", verbose_name),
"can_delete": (self.record is not None)}
if self.record:
context["record"] = self.record
if self.page:
context["page"] = self.page
return context
def view_form(self):
"""
Show the edit form of a record.
Returns:
HttpResponse
"""
if not self.valid:
raise MudderyError("Invalid form: %s." % self.form_name)
context = self.get_context()
return render(self.request, self.template_file, context)
def query_submit_data(self):
"""
Get db instance to submit a record.
Returns:
None
"""
if not self.valid:
raise MudderyError("Invalid form: %s." % self.form_name)
self.form = None
self.key = None
if self.record:
try:
# Query existing record's data.
instance = self.form_class.Meta.model.objects.get(pk=self.record)
self.form = self.form_class(self.request_data, self.files, instance=instance)
self.key = getattr(instance, "key", None)
except Exception, e:
self.form = None
if not self.form:
# Create new data.
self.form = self.form_class(self.request_data, self.files)
def submit_form(self):
"""
Edit a record.
Returns:
HttpResponse
"""
if not self.is_valid():
raise MudderyError("Invalid form: %s." % self.form_name)
# Query data.
if not self.form:
self.query_submit_data()
# Save data
if self.form.is_valid():
instance = self.form.save()
self.record = instance.pk
try:
self.key = instance.key
except Exception, e:
pass
if "_save" in self.request_data:
# Save and quit.
return self.quit_form()
# Save and continue editing.
return self.view_form()
def add_form(self):
"""
Add a record.
Returns:
HttpResponse
"""
return self.view_form()
def quit_form(self):
"""
Quit a form without saving.
Returns:
HttpResponse
"""
self.parse_request()
try:
# Back to record list.
# Parse list's url from the request path.
pos = self.request.path.rfind("/")
if pos > 0:
url = self.request.path[:pos] + "/list.html"
if self.page:
url += "?_page=" + str(self.page)
return HttpResponseRedirect(url)
except Exception, e:
logger.log_tracemsg("Quit form error: %s" % e)
raise http.Http404
def delete_form(self):
"""
Delete a record.
Returns:
HttpResponse
"""
if not self.is_valid():
raise MudderyError("Invalid form: %s." % self.form_name)
# Delete record.
if self.record:
try:
instance = self.form_class.Meta.model.objects.get(pk=self.record)
instance.delete()
except Exception, e:
pass
return self.quit_form()
| 25.984791
| 107
| 0.530436
|
79521937d550d38bcbb25bfdfcfbad0eca232ff5
| 47
|
py
|
Python
|
exercicio9.py
|
llucaslopes/infosatc-lp-avaliativo-02
|
91708c7ec519bbde591ccd713002f80d37ec77c5
|
[
"MIT"
] | null | null | null |
exercicio9.py
|
llucaslopes/infosatc-lp-avaliativo-02
|
91708c7ec519bbde591ccd713002f80d37ec77c5
|
[
"MIT"
] | null | null | null |
exercicio9.py
|
llucaslopes/infosatc-lp-avaliativo-02
|
91708c7ec519bbde591ccd713002f80d37ec77c5
|
[
"MIT"
] | null | null | null |
lista = [9,2,3,4,7,10,66]
print(sorted(lista))
| 15.666667
| 25
| 0.638298
|
795219e1ad630d9943a74dae1b73450301411b15
| 607
|
py
|
Python
|
examples/ColorExample.py
|
melopero/Melopero_APDS-9960
|
9006454704abb7bea47435b58448133dc888d34f
|
[
"MIT"
] | null | null | null |
examples/ColorExample.py
|
melopero/Melopero_APDS-9960
|
9006454704abb7bea47435b58448133dc888d34f
|
[
"MIT"
] | null | null | null |
examples/ColorExample.py
|
melopero/Melopero_APDS-9960
|
9006454704abb7bea47435b58448133dc888d34f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: Leonardo La Rocca
"""
import time
import melopero_apds9960 as mp
def main():
device = mp.APDS_9960()
device.reset()
device.enable_als_engine()
device.set_als_integration_time(450)
saturation = device.get_saturation()
device.wake_up()
while True:
time.sleep(.5)
color = device.get_color_data()
color = map(lambda val : val / saturation * 255, color)
print(f"Alfa: {next(color)} Red: {next(color)} Green: {next(color)} Blue: {next(color)}")
if __name__ == "__main__":
main()
| 20.931034
| 100
| 0.62603
|
79521ada66eaa6fade9aac51bed690b64154e3dc
| 4,387
|
py
|
Python
|
source/libLinear.py
|
networkdynamics/attribution-extraction
|
5f4420769ea74f1c358ff8a2423fa8ca9083b4fc
|
[
"MIT"
] | 3
|
2018-03-08T21:38:42.000Z
|
2020-05-01T14:14:22.000Z
|
source/libLinear.py
|
networkdynamics/attribution-extraction
|
5f4420769ea74f1c358ff8a2423fa8ca9083b4fc
|
[
"MIT"
] | null | null | null |
source/libLinear.py
|
networkdynamics/attribution-extraction
|
5f4420769ea74f1c358ff8a2423fa8ca9083b4fc
|
[
"MIT"
] | null | null | null |
import numpy as np
import pandas as pd
import os
from sklearn import linear_model
from sklearn.cross_validation import train_test_split
from time import time
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
import operator
from sklearn.externals import joblib
import sys
data_dir = os.path.abspath(os.path.join(os.path.dirname( __file__ ), '..', 'data/'))
def testData():
logreg = joblib.load('sourceTrainer2.pkl')
#df = pd.read_csv(os.path.join(data_dir, "sourceEntityTrainingSet1.csv"), header=None, usecols=range(34))
#df = pd.read_csv(os.path.join(data_dir, "PARCdevSourceSpanFeatures.csv"), header=None, usecols=range(34))
df = pd.read_csv(os.path.join(data_dir, "testSpanFeatures7.csv"), header=None, usecols=range(35))
#df = pd.read_csv(os.path.join(data_dir, "sourceEntityMatchDev1.csv"), header=None, usecols=range(38))
#df = pd.read_csv(os.path.join(data_dir, "xab"), header=None, usecols=range(38))
labels = 'containsPerson,featNextPunctE,featNextPunctQ,featNextQuoteE,featNextSpeakerE,featNextSpeakerQ,featNextVerbE,featNextVerbQ,featPrevPunctE,featPrevPunctQ,featPrevQuoteE,featPrevSpeakerE,featPrevSpeakerQ,featPrevVerbE,featPrevVerbQ,isNSubj,isNSubjVC,numEntityBetween,numMentionsPrevPar,numMentionsThisPar,numQuotesAttributed,numQuotesBetween,numQuotesOtherSpeakers,numQuotesParag,numQuotesPrev9Parag,numWordsBetween,numWordsParag,numWordsPrev9Parag,otherSpeakerMentioned,quoteDistanceFromParag,sameSentence,thisSpeakerMentioned,wordLengthQuote'
df[0] = pd.Categorical(df[0]).codes
df[1] = df[1].map({'Y': 1, 'N': 0})
print labels
print df
df = df.sort_values(by = 0, ascending=False)
df = df.values
newFeats = np.split(df, np.where(np.diff(df[:,0]))[0]+1)
y_test = [[feat[:, 1]] for feat in newFeats]
x_test = newFeats
print logreg.coef_
maxcoef = np.argmax(logreg.coef_[0])
print maxcoef
labels = labels.split(',')
print "most important feat"
print labels[maxcoef]
print "all feats sorted"
indicesSorted = np.argsort(logreg.coef_[0])
print indicesSorted
for index in indicesSorted:
print labels[index]
flatXtest = np.array([item for sublist in x_test for item in sublist])
print flatXtest[:3]
flatYtest = flatXtest[:,1]
flatXtest = np.delete(flatXtest, [0,1], 1)
t0 = time()
print("Predicting entity")
y_pred = logreg.predict(flatXtest)
print y_pred[:10]
print("done in %0.3fs" % (time() - t0))
print(classification_report(flatYtest, y_pred))
print(confusion_matrix(flatYtest, y_pred))
print x_test[0]
print y_test[0]
print len(x_test[0])
print len(y_test[0])
total = 0
correct = 0
for i, elem in enumerate(x_test):
elem = np.delete(elem, [0,1], 1)
arrayProbas = logreg.predict_proba(elem)
# ... compute some result based on item ...
positics = np.delete(arrayProbas, 0, 1)
maxval = np.argmax(positics)
pred = y_test[i][0][maxval]
if pred == 1:
correct = correct + 1
total = total + 1
print correct
print total
print float(correct)/float(total)
#find a way to print actual prediction score
#for how many quotes is the top prediction score correct
def trainData():
#df = pd.read_csv(os.path.join(data_dir, "sourceEntityTrainingSet1.csv"), header=None, usecols=range(34))
#df = pd.read_csv(os.path.join(data_dir, "PARCtrainSourceSpanFeatures.csv"), header=None, usecols=range(34))
df = pd.read_csv(os.path.join(data_dir, "trainSpanFeatures7.csv"), header=None, usecols=range(35))
#df = pd.read_csv(os.path.join(data_dir, "xaa"), header=None, usecols=range(38))
df[0] = pd.Categorical(df[0]).codes
print 'here'
df[1] = df[1].map({'Y': 1, 'N': 0})
print df
df = df.sort_values(by = 0, ascending=False)
df = df.values
newFeats = np.split(df, np.where(np.diff(df[:,0]))[0]+1)
y_train = [[feat[:, 1]] for feat in newFeats]
x_train = newFeats
flatXtrain = np.array([item for sublist in x_train for item in sublist])
flatYtrain = flatXtrain[:,1]
flatXtrain = np.delete(flatXtrain, [0,1], 1)
logreg = linear_model.LogisticRegression()
logreg = logreg.fit(flatXtrain, flatYtrain)
joblib.dump(logreg, 'sourceTrainer2.pkl')
def main():
print sys.argv
if sys.argv[1] == '-test':
testData()
elif sys.argv[1] == '-train':
trainData()
else:
print 'Use of this command line is: python source/libLinearTests.py -test or -train'
#labelData()
if __name__ == '__main__':
main()
| 26.75
| 552
| 0.731707
|
79521aee37539c48a19e90939a05815c894b46e4
| 270
|
py
|
Python
|
ExamBankSpider/ExamBankSpider/items.py
|
Cary123/ExamSimulation
|
2d3f475457fed817063f7ab1c2b347a2dd2f0b07
|
[
"MIT"
] | null | null | null |
ExamBankSpider/ExamBankSpider/items.py
|
Cary123/ExamSimulation
|
2d3f475457fed817063f7ab1c2b347a2dd2f0b07
|
[
"MIT"
] | null | null | null |
ExamBankSpider/ExamBankSpider/items.py
|
Cary123/ExamSimulation
|
2d3f475457fed817063f7ab1c2b347a2dd2f0b07
|
[
"MIT"
] | null | null | null |
# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html
import scrapy
class ExambankspiderItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
pass
| 20.769231
| 53
| 0.722222
|
79521b2f6ec460168c86a05be6eef2befb906eae
| 1,284
|
py
|
Python
|
sandbox/compare_hypnograms_second_rater.py
|
skjerns/NT1-HRV
|
cb6de312f6b2710c4d059bb2a4638b053617c2f7
|
[
"MIT"
] | 1
|
2022-03-06T03:32:15.000Z
|
2022-03-06T03:32:15.000Z
|
sandbox/compare_hypnograms_second_rater.py
|
skjerns/NT1-HRV
|
cb6de312f6b2710c4d059bb2a4638b053617c2f7
|
[
"MIT"
] | null | null | null |
sandbox/compare_hypnograms_second_rater.py
|
skjerns/NT1-HRV
|
cb6de312f6b2710c4d059bb2a4638b053617c2f7
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 1 14:48:42 2020
@author: skjerns
"""
import config as cfg
import ospath
import shutil
import sleep_utils
import numpy as np
from sklearn.metrics import cohen_kappa_score
import matplotlib.pyplot as plt
files = ospath.list_files(cfg.folder_edf, exts=['hypno'])
accuracy = []
kohen = []
a=[]
b=[]
for file in files:
if ospath.exists(file.replace('.hypno', '.txt')):
hypno1 = sleep_utils.read_hypnogram(file)
hypno2 = sleep_utils.read_hypnogram(file.replace('.hypno', '.txt'))
minlen = min(len(hypno1), len(hypno2))
hypno1 = hypno1[:minlen]
hypno2 = hypno2[:minlen]
accuracy.append(np.mean(hypno1==hypno2))
kohen.append(cohen_kappa_score(hypno1, hypno2))
hypno1[0]=5
labels = {0: 'W', 4: 'REM', 1: 'S1', 2: 'S2', 3: 'SWS', 5: 'A'}
if accuracy[-1]>0.65:continue
a.append(accuracy[-1])
b.append(kohen[-1])
fig, axs = plt.subplots(2,1)
sleep_utils.plot_hypnogram(hypno1, ax=axs[0], labeldict=labels)
sleep_utils.plot_hypnogram(hypno2, ax=axs[1], labeldict=labels)
plt.title('Second rater')
plt.suptitle(f'{file} acc {accuracy[-1]:.2f}, kohen {kohen[-1]:.2f}, {ospath.basename(file)}')
| 27.913043
| 102
| 0.623832
|
79521b3e91a75839a6404eae8d5077983c23d7e9
| 152
|
py
|
Python
|
packages/core/minos-microservice-common/minos/common/injections/__init__.py
|
bhardwajRahul/minos-python
|
bad7a280ad92680abdeab01d1214688279cf6316
|
[
"MIT"
] | 247
|
2022-01-24T14:55:30.000Z
|
2022-03-25T12:06:17.000Z
|
packages/core/minos-microservice-common/minos/common/injections/__init__.py
|
bhardwajRahul/minos-python
|
bad7a280ad92680abdeab01d1214688279cf6316
|
[
"MIT"
] | 168
|
2022-01-24T14:54:31.000Z
|
2022-03-31T09:31:09.000Z
|
packages/core/minos-microservice-common/minos/common/injections/__init__.py
|
bhardwajRahul/minos-python
|
bad7a280ad92680abdeab01d1214688279cf6316
|
[
"MIT"
] | 21
|
2022-02-06T17:25:58.000Z
|
2022-03-27T04:50:29.000Z
|
from .decorators import (
Inject,
Injectable,
)
from .injectors import (
DependencyInjector,
)
from .mixins import (
InjectableMixin,
)
| 13.818182
| 25
| 0.684211
|
79521be3865602edec02b7c9f4447ca2d6e364e4
| 686
|
py
|
Python
|
app/core/migrations/0003_ingredient.py
|
aadnekar/recipe-app-api
|
c1c524a7dbd3fb73e00e90effc455c3fc66155fe
|
[
"MIT"
] | null | null | null |
app/core/migrations/0003_ingredient.py
|
aadnekar/recipe-app-api
|
c1c524a7dbd3fb73e00e90effc455c3fc66155fe
|
[
"MIT"
] | null | null | null |
app/core/migrations/0003_ingredient.py
|
aadnekar/recipe-app-api
|
c1c524a7dbd3fb73e00e90effc455c3fc66155fe
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.1.15 on 2019-12-30 00:23
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0002_tag'),
]
operations = [
migrations.CreateModel(
name='Ingredient',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 28.583333
| 118
| 0.618076
|
79521c64a3cca8100e056ee252d71787844a41f0
| 1,458
|
py
|
Python
|
src/perf/data_pipe_perf/py_curio_queue_perf.py
|
random-python/data_pipe
|
e64fbbdb04f9cd43f7f3e58688c4ac1e2c2bbb45
|
[
"Apache-2.0"
] | 14
|
2020-02-08T06:27:09.000Z
|
2021-06-02T07:35:09.000Z
|
src/perf/data_pipe_perf/py_curio_queue_perf.py
|
random-python/data_pipe
|
e64fbbdb04f9cd43f7f3e58688c4ac1e2c2bbb45
|
[
"Apache-2.0"
] | null | null | null |
src/perf/data_pipe_perf/py_curio_queue_perf.py
|
random-python/data_pipe
|
e64fbbdb04f9cd43f7f3e58688c4ac1e2c2bbb45
|
[
"Apache-2.0"
] | null | null | null |
"""
"""
import gc
import time
import curio
count = int(1e5) # number of objects to transfer
async def buffer_perf():
gc.collect() # start with clean memory
source = [index for index in range(count)] # pre-allocate data source
target = [None for index in range(count)] # pre-allocate data target
async def producer(queue):
for value in source:
await queue.put(value)
await queue.put(None)
async def consumer(queue):
index = 0
while True:
value = await queue.get()
if value is None:
break
target[index] = value
index += 1
async def transfer():
queue = curio.Queue(maxsize=256)
writer = await curio.spawn(producer, queue)
reader = await curio.spawn(consumer, queue)
await writer.join()
await reader.join()
time_start = time.time()
await transfer()
time_finish = time.time()
time_diff = time_finish - time_start
assert source == target # verify data integrity
return time_diff
def invoke_perf(session_size:int=3):
for session in range(session_size):
print(f"--- session={session} ---")
time_diff = curio.run(buffer_perf) # total test time
time_unit = int(1e6 * time_diff / count) # per-unit test time, microseconds
print(f"count={count} time_diff={time_diff:.3f} time_unit={time_unit} micro")
invoke_perf()
| 25.137931
| 85
| 0.616598
|
79521cc5e4138c18652fe17a25095056ea6ce387
| 2,025
|
py
|
Python
|
platformio/builder/scripts/frameworks/energia.py
|
eiginn/platformio
|
33502f82f26e731f5bdc38c1ea6b17d1565dedd3
|
[
"Apache-2.0"
] | null | null | null |
platformio/builder/scripts/frameworks/energia.py
|
eiginn/platformio
|
33502f82f26e731f5bdc38c1ea6b17d1565dedd3
|
[
"Apache-2.0"
] | null | null | null |
platformio/builder/scripts/frameworks/energia.py
|
eiginn/platformio
|
33502f82f26e731f5bdc38c1ea6b17d1565dedd3
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2014-2016 Ivan Kravets <me@ikravets.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Energia
Energia framework enables pretty much anyone to start easily creating
microcontroller-based projects and applications. Its easy-to-use libraries
and functions provide developers of all experience levels to start
blinking LEDs, buzzing buzzers and sensing sensors more quickly than ever
before.
http://energia.nu/reference/
"""
from os.path import join
from SCons.Script import DefaultEnvironment
env = DefaultEnvironment()
env.Replace(
PLATFORMFW_DIR=join("$PIOPACKAGES_DIR", "framework-energia${PLATFORM[2:]}")
)
ENERGIA_VERSION = int(
open(join(env.subst("$PLATFORMFW_DIR"),
"version.txt")).read().replace(".", "").strip())
# include board variant
env.VariantDirWrap(
join("$BUILD_DIR", "FrameworkEnergiaVariant"),
join("$PLATFORMFW_DIR", "variants", "${BOARD_OPTIONS['build']['variant']}")
)
env.Append(
CPPDEFINES=[
"ARDUINO=101",
"ENERGIA=%d" % ENERGIA_VERSION
],
CPPPATH=[
join("$BUILD_DIR", "FrameworkEnergia"),
join("$BUILD_DIR", "FrameworkEnergiaVariant")
]
)
if env.get("BOARD_OPTIONS", {}).get("build", {}).get("core") == "lm4f":
env.Append(
LINKFLAGS=["-Wl,--entry=ResetISR"]
)
#
# Target: Build Core Library
#
libs = []
libs.append(env.BuildLibrary(
join("$BUILD_DIR", "FrameworkEnergia"),
join("$PLATFORMFW_DIR", "cores", "${BOARD_OPTIONS['build']['core']}")
))
env.Append(LIBS=libs)
| 27
| 79
| 0.700741
|
79521e2c42f18ed882ceaf860c93f1b40b871469
| 10,746
|
py
|
Python
|
pyscf/cc/gccsd_rdm.py
|
azag0/pyscf
|
1e3e27b61b3cfd22c9679d2c9851c13b3ebc5a1b
|
[
"Apache-2.0"
] | null | null | null |
pyscf/cc/gccsd_rdm.py
|
azag0/pyscf
|
1e3e27b61b3cfd22c9679d2c9851c13b3ebc5a1b
|
[
"Apache-2.0"
] | null | null | null |
pyscf/cc/gccsd_rdm.py
|
azag0/pyscf
|
1e3e27b61b3cfd22c9679d2c9851c13b3ebc5a1b
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# Copyright 2014-2020 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
# Jun Yang <junyang4711@gmail.com>
#
import time
import numpy
from pyscf import lib
from pyscf.lib import logger
#einsum = numpy.einsum
einsum = lib.einsum
def _gamma1_intermediates(mycc, t1, t2, l1, l2):
doo =-einsum('ie,je->ij', l1, t1)
doo -= einsum('imef,jmef->ij', l2, t2) * .5
dvv = einsum('ma,mb->ab', t1, l1)
dvv += einsum('mnea,mneb->ab', t2, l2) * .5
xt1 = einsum('mnef,inef->mi', l2, t2) * .5
xt2 = einsum('mnfa,mnfe->ae', t2, l2) * .5
xt2 += einsum('ma,me->ae', t1, l1)
dvo = einsum('imae,me->ai', t2, l1)
dvo -= einsum('mi,ma->ai', xt1, t1)
dvo -= einsum('ie,ae->ai', t1, xt2)
dvo += t1.T
dov = l1
return doo, dov, dvo, dvv
# gamma2 intermediates in Chemist's notation
# When computing intermediates, the convention
# dm2[q,p,s,r] = <p^\dagger r^\dagger s q> is assumed in this function.
# It changes to dm2[p,q,r,s] = <p^\dagger r^\dagger s q> in _make_rdm2
def _gamma2_intermediates(mycc, t1, t2, l1, l2):
tau = t2 + einsum('ia,jb->ijab', t1, t1) * 2
miajb = einsum('ikac,kjcb->iajb', l2, t2)
goovv = 0.25 * (l2.conj() + tau)
tmp = einsum('kc,kica->ia', l1, t2)
goovv += einsum('ia,jb->ijab', tmp, t1)
tmp = einsum('kc,kb->cb', l1, t1)
goovv += einsum('cb,ijca->ijab', tmp, t2) * .5
tmp = einsum('kc,jc->kj', l1, t1)
goovv += einsum('kiab,kj->ijab', tau, tmp) * .5
tmp = numpy.einsum('ldjd->lj', miajb)
goovv -= einsum('lj,liba->ijab', tmp, tau) * .25
tmp = numpy.einsum('ldlb->db', miajb)
goovv -= einsum('db,jida->ijab', tmp, tau) * .25
goovv -= einsum('ldia,ljbd->ijab', miajb, tau) * .5
tmp = einsum('klcd,ijcd->ijkl', l2, tau) * .25**2
goovv += einsum('ijkl,klab->ijab', tmp, tau)
goovv = goovv.conj()
gvvvv = einsum('ijab,ijcd->abcd', tau, l2) * 0.125
goooo = einsum('klab,ijab->klij', l2, tau) * 0.125
gooov = einsum('jkba,ib->jkia', tau, l1) * -0.25
gooov += einsum('iljk,la->jkia', goooo, t1)
tmp = numpy.einsum('icjc->ij', miajb) * .25
gooov -= einsum('ij,ka->jkia', tmp, t1)
gooov += einsum('icja,kc->jkia', miajb, t1) * .5
gooov = gooov.conj()
gooov += einsum('jkab,ib->jkia', l2, t1) * .25
govvo = einsum('ia,jb->ibaj', l1, t1)
govvo += numpy.einsum('iajb->ibaj', miajb)
govvo -= einsum('ikac,jc,kb->ibaj', l2, t1, t1)
govvv = einsum('ja,ijcb->iacb', l1, tau) * .25
govvv += einsum('bcad,id->iabc', gvvvv, t1)
tmp = numpy.einsum('kakb->ab', miajb) * .25
govvv += einsum('ab,ic->iacb', tmp, t1)
govvv += einsum('kaib,kc->iabc', miajb, t1) * .5
govvv = govvv.conj()
govvv += einsum('ijbc,ja->iabc', l2, t1) * .25
dovov = goovv.transpose(0,2,1,3) - goovv.transpose(0,3,1,2)
dvvvv = gvvvv.transpose(0,2,1,3) - gvvvv.transpose(0,3,1,2)
doooo = goooo.transpose(0,2,1,3) - goooo.transpose(0,3,1,2)
dovvv = govvv.transpose(0,2,1,3) - govvv.transpose(0,3,1,2)
dooov = gooov.transpose(0,2,1,3) - gooov.transpose(1,2,0,3)
dovvo = govvo.transpose(0,2,1,3)
dovov =(dovov + dovov.transpose(2,3,0,1)) * .5
dvvvv = dvvvv + dvvvv.transpose(1,0,3,2).conj()
doooo = doooo + doooo.transpose(1,0,3,2).conj()
dovvo =(dovvo + dovvo.transpose(3,2,1,0).conj()) * .5
doovv = None # = -dovvo.transpose(0,3,2,1)
dvvov = None
return (dovov, dvvvv, doooo, doovv, dovvo, dvvov, dovvv, dooov)
def make_rdm1(mycc, t1, t2, l1, l2, ao_repr=False):
r'''
One-particle density matrix in the molecular spin-orbital representation
(the occupied-virtual blocks from the orbital response contribution are
not included).
dm1[p,q] = <q^\dagger p> (p,q are spin-orbitals)
The convention of 1-pdm is based on McWeeney's book, Eq (5.4.20).
The contraction between 1-particle Hamiltonian and rdm1 is
E = einsum('pq,qp', h1, rdm1)
'''
d1 = _gamma1_intermediates(mycc, t1, t2, l1, l2)
return _make_rdm1(mycc, d1, with_frozen=True, ao_repr=ao_repr)
def make_rdm2(mycc, t1, t2, l1, l2, ao_repr=False):
r'''
Two-particle density matrix in the molecular spin-orbital representation
dm2[p,q,r,s] = <p^\dagger r^\dagger s q>
where p,q,r,s are spin-orbitals. p,q correspond to one particle and r,s
correspond to another particle. The contraction between ERIs (in
Chemist's notation) and rdm2 is
E = einsum('pqrs,pqrs', eri, rdm2)
'''
d1 = _gamma1_intermediates(mycc, t1, t2, l1, l2)
d2 = _gamma2_intermediates(mycc, t1, t2, l1, l2)
return _make_rdm2(mycc, d1, d2, with_dm1=True, with_frozen=True,
ao_repr=ao_repr)
def _make_rdm1(mycc, d1, with_frozen=True, ao_repr=False):
r'''
One-particle density matrix in the molecular spin-orbital representation
(the occupied-virtual blocks from the orbital response contribution are
not included).
dm1[p,q] = <q^\dagger p> (p,q are spin-orbitals)
The convention of 1-pdm is based on McWeeney's book, Eq (5.4.20).
The contraction between 1-particle Hamiltonian and rdm1 is
E = einsum('pq,qp', h1, rdm1)
'''
doo, dov, dvo, dvv = d1
nocc, nvir = dov.shape
nmo = nocc + nvir
dm1 = numpy.empty((nmo,nmo), dtype=doo.dtype)
dm1[:nocc,:nocc] = doo + doo.conj().T
dm1[:nocc,nocc:] = dov + dvo.conj().T
dm1[nocc:,:nocc] = dm1[:nocc,nocc:].conj().T
dm1[nocc:,nocc:] = dvv + dvv.conj().T
dm1 *= .5
dm1[numpy.diag_indices(nocc)] += 1
if with_frozen and not (mycc.frozen is 0 or mycc.frozen is None):
nmo = mycc.mo_occ.size
nocc = numpy.count_nonzero(mycc.mo_occ > 0)
rdm1 = numpy.zeros((nmo,nmo), dtype=dm1.dtype)
rdm1[numpy.diag_indices(nocc)] = 1
moidx = numpy.where(mycc.get_frozen_mask())[0]
rdm1[moidx[:,None],moidx] = dm1
dm1 = rdm1
if ao_repr:
mo = mycc.mo_coeff
dm1 = lib.einsum('pi,ij,qj->pq', mo, dm1, mo.conj())
return dm1
def _make_rdm2(mycc, d1, d2, with_dm1=True, with_frozen=True, ao_repr=False):
r'''
dm2[p,q,r,s] = <p^\dagger r^\dagger s q>
Note the contraction between ERIs (in Chemist's notation) and rdm2 is
E = einsum('pqrs,pqrs', eri, rdm2)
'''
dovov, dvvvv, doooo, doovv, dovvo, dvvov, dovvv, dooov = d2
nocc, nvir = dovov.shape[:2]
nmo = nocc + nvir
dm2 = numpy.empty((nmo,nmo,nmo,nmo), dtype=doooo.dtype)
dovov = numpy.asarray(dovov)
dm2[:nocc,nocc:,:nocc,nocc:] = dovov
dm2[nocc:,:nocc,nocc:,:nocc] = dm2[:nocc,nocc:,:nocc,nocc:].transpose(1,0,3,2).conj()
dovov = None
dovvo = numpy.asarray(dovvo)
dm2[:nocc,:nocc,nocc:,nocc:] =-dovvo.transpose(0,3,2,1)
dm2[nocc:,nocc:,:nocc,:nocc] =-dovvo.transpose(2,1,0,3)
dm2[:nocc,nocc:,nocc:,:nocc] = dovvo
dm2[nocc:,:nocc,:nocc,nocc:] = dovvo.transpose(1,0,3,2).conj()
dovvo = None
dm2[nocc:,nocc:,nocc:,nocc:] = dvvvv
dm2[:nocc,:nocc,:nocc,:nocc] = doooo
dovvv = numpy.asarray(dovvv)
dm2[:nocc,nocc:,nocc:,nocc:] = dovvv
dm2[nocc:,nocc:,:nocc,nocc:] = dovvv.transpose(2,3,0,1)
dm2[nocc:,nocc:,nocc:,:nocc] = dovvv.transpose(3,2,1,0).conj()
dm2[nocc:,:nocc,nocc:,nocc:] = dovvv.transpose(1,0,3,2).conj()
dovvv = None
dooov = numpy.asarray(dooov)
dm2[:nocc,:nocc,:nocc,nocc:] = dooov
dm2[:nocc,nocc:,:nocc,:nocc] = dooov.transpose(2,3,0,1)
dm2[:nocc,:nocc,nocc:,:nocc] = dooov.transpose(1,0,3,2).conj()
dm2[nocc:,:nocc,:nocc,:nocc] = dooov.transpose(3,2,1,0).conj()
if with_frozen and not (mycc.frozen is 0 or mycc.frozen is None):
nmo, nmo0 = mycc.mo_occ.size, nmo
nocc = numpy.count_nonzero(mycc.mo_occ > 0)
rdm2 = numpy.zeros((nmo,nmo,nmo,nmo), dtype=dm2.dtype)
moidx = numpy.where(mycc.get_frozen_mask())[0]
idx = (moidx.reshape(-1,1) * nmo + moidx).ravel()
lib.takebak_2d(rdm2.reshape(nmo**2,nmo**2),
dm2.reshape(nmo0**2,nmo0**2), idx, idx)
dm2 = rdm2
if with_dm1:
dm1 = _make_rdm1(mycc, d1, with_frozen)
dm1[numpy.diag_indices(nocc)] -= 1
for i in range(nocc):
# Be careful with the convention of dm1 and the transpose of dm2 at the end
dm2[i,i,:,:] += dm1
dm2[:,:,i,i] += dm1
dm2[:,i,i,:] -= dm1
dm2[i,:,:,i] -= dm1.T
for i in range(nocc):
for j in range(nocc):
dm2[i,i,j,j] += 1
dm2[i,j,j,i] -= 1
# dm2 was computed as dm2[p,q,r,s] = < p^\dagger r^\dagger s q > in the
# above. Transposing it so that it be contracted with ERIs (in Chemist's
# notation):
# E = einsum('pqrs,pqrs', eri, rdm2)
dm2 = dm2.transpose(1,0,3,2)
if ao_repr:
from pyscf.cc import ccsd_rdm
dm2 = ccsd_rdm._rdm2_mo2ao(dm2, mycc.mo_coeff)
return dm2
if __name__ == '__main__':
from functools import reduce
from pyscf import gto
from pyscf import scf
from pyscf import ao2mo
from pyscf.cc import gccsd
from pyscf.cc import addons
mol = gto.Mole()
mol.atom = [
[8 , (0. , 0. , 0.)],
[1 , (0. , -0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)]]
mol.basis = '631g'
mol.spin = 2
mol.build()
mf = scf.UHF(mol).run(conv_tol=1.)
mf = scf.addons.convert_to_ghf(mf)
mycc = gccsd.GCCSD(mf)
ecc, t1, t2 = mycc.kernel()
l1, l2 = mycc.solve_lambda()
dm1 = make_rdm1(mycc, t1, t2, l1, l2)
dm2 = make_rdm2(mycc, t1, t2, l1, l2)
nao = mol.nao_nr()
mo_a = mf.mo_coeff[:nao]
mo_b = mf.mo_coeff[nao:]
nmo = mo_a.shape[1]
eri = ao2mo.kernel(mf._eri, mo_a+mo_b, compact=False).reshape([nmo]*4)
orbspin = mf.mo_coeff.orbspin
sym_forbid = (orbspin[:,None] != orbspin)
eri[sym_forbid,:,:] = 0
eri[:,:,sym_forbid] = 0
hcore = scf.RHF(mol).get_hcore()
h1 = reduce(numpy.dot, (mo_a.T.conj(), hcore, mo_a))
h1+= reduce(numpy.dot, (mo_b.T.conj(), hcore, mo_b))
e1 = numpy.einsum('ij,ji', h1, dm1)
e1+= numpy.einsum('ijkl,ijkl', eri, dm2) * .5
e1+= mol.energy_nuc()
print(e1 - mycc.e_tot)
#TODO: test 1pdm, 2pdm against FCI
| 36.181818
| 89
| 0.605807
|
79521f077daf92d426584dd12f539ddb5231bf52
| 42,493
|
py
|
Python
|
pycqed/instrument_drivers/physical_instruments/ZurichInstruments/ZI_HDAWG8.py
|
ZW7436/PycQED_py3
|
dcc19dbaedd226112a2f98a7985dcf2bab2c9734
|
[
"MIT"
] | 1
|
2019-07-05T13:41:51.000Z
|
2019-07-05T13:41:51.000Z
|
pycqed/instrument_drivers/physical_instruments/ZurichInstruments/ZI_HDAWG8.py
|
ball199578/PycQED_py3
|
dcc19dbaedd226112a2f98a7985dcf2bab2c9734
|
[
"MIT"
] | null | null | null |
pycqed/instrument_drivers/physical_instruments/ZurichInstruments/ZI_HDAWG8.py
|
ball199578/PycQED_py3
|
dcc19dbaedd226112a2f98a7985dcf2bab2c9734
|
[
"MIT"
] | null | null | null |
import time
import logging
import os
import numpy as np
from . import zishell_NH as zs
from qcodes.utils import validators as vals
from .ZI_base_instrument import ZI_base_instrument
from qcodes.instrument.parameter import ManualParameter
from zlib import crc32
import ctypes
from ctypes.wintypes import MAX_PATH
class ZI_HDAWG8(ZI_base_instrument):
"""
This is PycQED/QCoDeS driver driver for the Zurich Instruments HD AWG-8.
Parameter files are generated from the python API of the instrument
using the "create_parameter_file" method in the ZI_base_instrument class.
These are used to add parameters to the instrument.
"""
def __init__(self, name, device: str,
server: str='localhost', port=8004,
num_codewords: int = 32, **kw):
'''
Input arguments:
name: (str) name of the instrument as seen by the user
device (str) the name of the device e.g., "dev8008"
server (str) the ZI data server
port (int) the port to connect to
'''
t0 = time.time()
self._num_codewords = num_codewords
if os.name == 'nt':
dll = ctypes.windll.shell32
buf = ctypes.create_unicode_buffer(MAX_PATH + 1)
if dll.SHGetSpecialFolderPathW(None, buf, 0x0005, False):
_basedir = buf.value
else:
logging.warning('Could not extract my documents folder')
else:
_basedir = os.path.expanduser('~')
self.lab_one_webserver_path = os.path.join(
_basedir, 'Zurich Instruments', 'LabOne', 'WebServer')
super().__init__(name=name, **kw)
self._devname = device
self._dev = zs.ziShellDevice()
self._dev.connect_server(server, port)
print("Trying to connect to device {}".format(self._devname))
self._dev.connect_device(self._devname, '1GbE')
dir_path = os.path.dirname(os.path.abspath(__file__))
base_fn = os.path.join(dir_path, 'zi_parameter_files')
try:
self.add_parameters_from_file(
filename=os.path.join(base_fn, 'node_doc_HDAWG8.json'))
except FileNotFoundError:
logging.warning("parameter file for data parameters"
" {} not found".format(self._d_file_name))
self.add_ZIshell_device_methods_to_instrument()
dev_type = self.get('features_devtype')
if dev_type == 'HDAWG8':
self._num_channels = 8
elif dev_type == 'HDAWG4':
self._num_channels = 4
else:
self._num_channels = 8
logging.warning('Unknown device type. Assuming eight channels.')
self._add_codeword_parameters()
self._add_extra_parameters()
self.connect_message(begin_time=t0)
def _add_extra_parameters(self):
self.add_parameter('timeout', unit='s',
initial_value=10,
parameter_class=ManualParameter)
self.add_parameter(
'cfg_num_codewords', label='Number of used codewords', docstring=(
'This parameter is used to determine how many codewords to '
'upload in "self.upload_codeword_program".'),
initial_value=self._num_codewords,
# N.B. I have commentd out numbers larger than self._num_codewords
# see also issue #358
vals=vals.Enum(2, 4, 8, 16, 32), # , 64, 128, 256, 1024),
parameter_class=ManualParameter)
self.add_parameter(
'cfg_codeword_protocol', initial_value='identical',
vals=vals.Enum('identical', 'microwave', 'flux'), docstring=(
'Used in the configure codeword method to determine what DIO'
' pins are used in for which AWG numbers.'),
parameter_class=ManualParameter)
for i in range(4):
self.add_parameter(
'awgs_{}_sequencer_program_crc32_hash'.format(i),
parameter_class=ManualParameter,
initial_value=0, vals=vals.Ints())
def snapshot_base(self, update=False, params_to_skip_update=None):
if params_to_skip_update is None:
params_to_skip_update = self._params_to_skip_update
snap = super().snapshot_base(
update=update, params_to_skip_update=params_to_skip_update)
return snap
def add_ZIshell_device_methods_to_instrument(self):
"""
Some methods defined in the zishell are convenient as public
methods of the instrument. These are added here.
"""
self.reconnect = self._dev.reconnect
self.restart_device = self._dev.restart_device
self.poll = self._dev.poll
self.sync = self._dev.sync
self.read_from_scope = self._dev.read_from_scope
self.restart_scope_module = self._dev.restart_scope_module
self.restart_awg_module = self._dev.restart_awg_module
def configure_awg_from_string(self, awg_nr: int, program_string: str,
timeout: float=15):
"""
Uploads a program string to one of the AWGs of the AWG8.
Also
"""
self._dev.configure_awg_from_string(awg_nr=awg_nr,
program_string=program_string,
timeout=timeout)
hash = crc32(program_string.encode('utf-8'))
self.set('awgs_{}_sequencer_program_crc32_hash'.format(awg_nr),
hash)
def get_idn(self):
# FIXME, update using new parameters for this purpose
idn_dict = {'vendor': 'ZurichInstruments',
'model': self._dev.daq.getByte(
'/{}/features/devtype'.format(self._devname)),
'serial': self._devname,
'firmware': self._dev.geti('system/fwrevision'),
'fpga_firmware': self._dev.geti('system/fpgarevision')
}
return idn_dict
def stop(self):
"""
Stops the program on all AWG's part of this AWG8 unit
"""
for i in range(int(self._num_channels/2)):
self.set('awgs_{}_enable'.format(i), 0)
def start(self):
"""
Starts the program on all AWG's part of this AWG8 unit
"""
for i in range(int(self._num_channels/2)):
self.set('awgs_{}_enable'.format(i), 1)
# def _check_protocol(self, awg):
# # TODO: Add docstrings and make this more clear (Oct 2017)
# mask = self._dev.geti('awgs/' + str(awg) + '/dio/mask/value') << self._dev.geti(
# 'awgs/' + str(awg) + '/dio/mask/shift')
# strobe = 1 << self._dev.geti('awgs/' + str(awg) + '/dio/strobe/index')
# valid = 1 << self._dev.geti('awgs/' + str(awg) + '/dio/valid/index')
# #print('Codeword mask: {:08x}'.format(mask))
# #print('Strobe mask : {:08x}'.format(strobe))
# #print('Valid mask : {:08x}'.format(valid))
# got_bits = 0
# got_strobe_re = False
# got_strobe_fe = False
# got_valid_re = False
# got_valid_fe = False
# last_strobe = None
# last_valid = None
# strobe_low = 0
# strobe_high = 0
# count_low = False
# count_high = False
# count_ok = True
# ok = 0
# data = self._dev.getv('awgs/' + str(awg) + '/dio/data')
# for n, d in enumerate(data):
# curr_strobe = (d & strobe) != 0
# curr_valid = (d & valid) != 0
# if count_high:
# if curr_strobe:
# strobe_high += 1
# else:
# if (strobe_low > 0) and (strobe_low != strobe_high):
# count_ok = False
# if count_low:
# if not curr_strobe:
# strobe_low += 1
# else:
# if (strobe_high > 0) and (strobe_low != strobe_high):
# count_ok = False
# if (last_strobe != None):
# if (curr_strobe and not last_strobe):
# got_strobe_re = True
# strobe_high = 0
# count_high = True
# count_low = False
# index_high = n
# elif (not curr_strobe and last_strobe):
# got_strobe_fe = True
# strobe_low = 0
# count_low = True
# count_high = False
# index_low = n
# if (last_valid != None) and (curr_valid and not last_valid):
# got_valid_re = True
# if (last_valid != None) and (not curr_valid and last_valid):
# got_valid_fe = True
# got_bits |= (d & mask)
# last_strobe = curr_strobe
# last_valid = curr_valid
# if got_bits != mask:
# ok |= 1
# if not got_strobe_re or not got_strobe_fe:
# ok |= 2
# if not got_valid_re or not got_valid_fe:
# ok |= 4
# if not count_ok:
# ok |= 8
# return ok
# def _print_check_protocol_error_message(self, ok):
# if ok & 1:
# print('Did not see all codeword bits toggling')
# if ok & 2:
# print('Did not see toggle bit toggling')
# if ok & 4:
# print('Did not see valid bit toggling')
# if ok & 8:
# print('Toggle bit is not symmetrical')
# def calibrate_dio(self):
# # TODO: add docstrings to make it more clear
# ok = True
# print("Switching to internal clock")
# self.set('system_extclk', 0)
# time.sleep(1)
# print("Switching to external clock")
# self.set('system_extclk', 1)
# time.sleep(1)
# print("Calibrating internal DIO delays...")
# for i in [1, 0, 2, 3]: # strange order is necessary
# print(' Calibrating AWG {} '.format(i), end='')
# self._dev.seti('raw/awgs/*/dio/calib/start', 2)
# time.sleep(1)
# status = self._dev.geti('raw/awgs/' + str(i) + '/dio/calib/status')
# if (status != 0):
# print('[FAILURE]')
# ok = False
# else:
# print('[SUCCESS]')
# return ok
# def calibrate_dio_protocol(self):
# # TODO: add docstrings to make it more clear
# print("Checking DIO protocol...")
# done = 4*[False]
# timeout = 5
# while not all(done):
# ok = True
# for i in range(4):
# print(' Checking AWG {} '.format(i), end='')
# protocol = self._check_protocol(i)
# if protocol != 0:
# ok = False
# done[i] = False
# print('[FAILURE]')
# self._print_check_protocol_error_message(protocol)
# else:
# done[i] = True
# print('[SUCCESS]')
# if not ok:
# print(
# " A problem was detected with the protocol. Will try to reinitialize the clock as it sometimes helps.")
# self._dev.seti('awgs/*/enable', 0)
# self._dev.seti('system/extclk', 0)
# time.sleep(1)
# self._dev.seti('system/extclk', 1)
# time.sleep(1)
# self._dev.seti('awgs/*/enable', 1)
# timeout -= 1
# if timeout <= 0:
# print(" Too many retries, aborting!")
# return False
# ok = True
# print("Calibrating DIO protocol delays...")
# for i in range(4):
# print(' Calibrating AWG {} '.format(i), end='')
# self._dev.seti('raw/awgs/' + str(i) + '/dio/calib/start', 1)
# time.sleep(1)
# status = self._dev.geti('raw/awgs/' + str(i) + '/dio/calib/status')
# protocol = self._check_protocol(i)
# if (status != 0) or (protocol != 0):
# print('[FAILURE]')
# self._print_check_protocol_error_message(protocol)
# ok = False
# else:
# print('[SUCCESS]')
# return ok
def _add_codeword_parameters(self):
"""
Adds parameters parameters that are used for uploading codewords.
It also contains initial values for each codeword to ensure
that the "upload_codeword_program"
"""
docst = ('Specifies a waveform to for a specific codeword. ' +
'The waveforms must be uploaded using ' +
'"upload_codeword_program". The channel number corresponds' +
' to the channel as indicated on the device (1 is lowest).')
self._params_to_skip_update = []
for ch in range(self._num_channels):
for cw in range(self._num_codewords):
parname = 'wave_ch{}_cw{:03}'.format(ch+1, cw)
self.add_parameter(
parname,
label='Waveform channel {} codeword {:03}'.format(
ch+1, cw),
vals=vals.Arrays(), # min_value, max_value = unknown
set_cmd=self._gen_write_csv(parname),
get_cmd=self._gen_read_csv(parname),
docstring=docst)
self._params_to_skip_update.append(parname)
def _gen_write_csv(self, wf_name):
def write_func(waveform):
# The lenght of AWG8 waveforms should be a multiple of 8 samples.
if (len(waveform) % 8) != 0:
extra_zeros = 8-(len(waveform) % 8)
waveform = np.concatenate([waveform, np.zeros(extra_zeros)])
return self._write_csv_waveform(
wf_name=wf_name, waveform=waveform)
return write_func
def _gen_read_csv(self, wf_name):
def read_func():
return self._read_csv_waveform(
wf_name=wf_name)
return read_func
def _write_csv_waveform(self, wf_name: str, waveform):
filename = os.path.join(
self.lab_one_webserver_path, 'awg', 'waves',
self._devname+'_'+wf_name+'.csv')
with open(filename, 'w') as f:
np.savetxt(filename, waveform, delimiter=",")
def _read_csv_waveform(self, wf_name: str):
filename = os.path.join(
self.lab_one_webserver_path, 'awg', 'waves',
self._devname+'_'+wf_name+'.csv')
try:
return np.genfromtxt(filename, delimiter=',')
except OSError as e:
# if the waveform does not exist yet dont raise exception
logging.warning(e)
print(e)
return None
# Note: This was added for debugging by NielsH.
# If we do not need it for a few days we should remove it. (2/10/2017)
# def stop_awg(self):
# test_program = """
# // 'Counting' waveform
# const N = 80;
# setWaveDIO(0, ones(N), -ones(N));
# setWaveDIO(1, ones(N), -ones(N));
# setWaveDIO(2, -ones(N), ones(N));
# setWaveDIO(3, ones(N), ones(N));
# setWaveDIO(4, -blackman(N, 1.0, 0.2), -blackman(N, 1.0, 0.2));
# setWaveDIO(5, blackman(N, 1.0, 0.2), -blackman(N, 1.0, 0.2));
# setWaveDIO(6, -blackman(N, 1.0, 0.2), blackman(N, 1.0, 0.2));
# setWaveDIO(7, blackman(N, 1.0, 0.2), blackman(N, 1.0, 0.2));
# """
# for awg_nr in range(4):
# print('Configuring AWG {} with dummy program'.format(awg_nr))
# # disable all AWG channels
# self.set('awgs_{}_enable'.format(awg_nr), 0)
# self.configure_awg_from_string(awg_nr, test_program, self.timeout())
# self.set('awgs_{}_single'.format(awg_nr), 0)
# self.set('awgs_{}_enable'.format(awg_nr), 1)
# print('Waiting...')
# time.sleep(1)
# for awg_nr in range(4):
# # disable all AWG channels
# self.set('awgs_{}_enable'.format(awg_nr), 0)
def initialze_all_codewords_to_zeros(self):
"""
Generates all zeros waveforms for all codewords
"""
t0 = time.time()
wf = np.zeros(32)
waveform_params = [value for key, value in self.parameters.items()
if 'wave_ch' in key.lower()]
for par in waveform_params:
par(wf)
t1 = time.time()
print('Set all zeros waveforms in {:.1f} s'.format(t1-t0))
def upload_waveform_realtime(self, w0, w1, awg_nr: int, wf_nr: int =1):
"""
Warning! This method should be used with care.
Uploads a waveform to the awg in realtime, note that this get's
overwritten if a new program is uploaded.
Arguments:
w0 (array): waveform for ch0 of the awg pair.
w1 (array): waveform for ch1 of the awg pair.
awg_nr (int): awg_nr indicating what awg pair to use.
wf_nr (int): waveform in memory to overwrite, default is 1.
There are a few important notes when using this method
- w0 and w1 must be of the same length
- any parts of a waveform longer than w0/w1 will not be overwritten.
- loading speed depends on the size of w0 and w1 and is ~80ms for 20us.
"""
# these two attributes are added for debugging purposes.
# they allow checking what the realtime loaded waveforms are.
self._realtime_w0 = w0
self._realtime_w1 = w1
c = np.vstack((w0, w1)).reshape((-2,), order='F')
self._dev.seti('awgs/{}/waveform/index'.format(awg_nr), wf_nr)
self._dev.setv('awgs/{}/waveform/data'.format(awg_nr), c)
self._dev.seti('awgs/{}/enable'.format(awg_nr), wf_nr)
def upload_codeword_program(self, awgs=np.arange(4)):
"""
Generates a program that plays the codeword waves for each channel.
awgs (array): the awg numbers to which to upload the codeword program.
By default uploads to all channels but can be specific to
speed up the process.
"""
# Type conversion to ensure lists do not produce weird results
awgs = np.array(awgs)
# because awg_channels come in pairs and are numbered from 1-8 in API
awg_channels = awgs*2+1
for awg_nr in awgs:
# disable all AWG channels
self.set('awgs_{}_enable'.format(int(awg_nr)), 0)
codeword_mode_snippet = (
'while (1) { \n '
'\t// Wait for a trigger on the DIO interface\n'
'\twaitDIOTrigger();\n'
'\t// Play a waveform from the table based on the DIO code-word\n'
'\tplayWaveDIO(); \n'
'}')
if self.cfg_codeword_protocol() != 'flux':
for ch in awg_channels:
waveform_table = '// Define the waveform table\n'
for cw in range(self.cfg_num_codewords()):
wf0_name = '{}_wave_ch{}_cw{:03}'.format(
self._devname, ch, cw)
wf1_name = '{}_wave_ch{}_cw{:03}'.format(
self._devname, ch+1, cw)
waveform_table += 'setWaveDIO({}, "{}", "{}");\n'.format(
cw, wf0_name, wf1_name)
program = waveform_table + codeword_mode_snippet
# N.B. awg_nr in goes from 0 to 3 in API while in LabOne
# it is 1 to 4
awg_nr = ch//2 # channels are coupled in pairs of 2
self.configure_awg_from_string(awg_nr=int(awg_nr),
program_string=program,
timeout=self.timeout())
else: # if protocol is flux
for ch in awg_channels:
waveform_table = '//Flux mode\n// Define the waveform table\n'
mask_0 = 0b000111 # AWGx_ch0 uses lower bits for CW
mask_1 = 0b111000 # AWGx_ch1 uses higher bits for CW
# for cw in range(2**6):
for cw in range(8):
cw0 = cw & mask_0
cw1 = (cw & mask_1) >> 3
# FIXME: this is a hack because not all AWG8 channels support
# amp mode. It forces all AWG8's of a pair to behave identical.
cw1 = cw0
# if both wfs are triggered play both
if (cw0 != 0) and (cw1 != 0):
# if both waveforms exist, upload
wf0_cmd = '"{}_wave_ch{}_cw{:03}"'.format(
self._devname, ch, cw0)
wf1_cmd = '"{}_wave_ch{}_cw{:03}"'.format(
self._devname, ch+1, cw1)
# if single wf is triggered fill the other with zeros
elif (cw0 == 0) and (cw1 != 0):
wf0_cmd = 'zeros({})'.format(len(self.get(
'wave_ch{}_cw{:03}'.format(ch, cw1))))
wf1_cmd = '"{}_wave_ch{}_cw{:03}"'.format(
self._devname, ch+1, cw1)
elif (cw0 != 0) and (cw1 == 0):
wf0_cmd = '"{}_wave_ch{}_cw{:03}"'.format(
self._devname, ch, cw0)
wf1_cmd = 'zeros({})'.format(len(self.get(
'wave_ch{}_cw{:03}'.format(ch, cw0))))
# if no wfs are triggered play only zeros
else:
wf0_cmd = 'zeros({})'.format(42)
wf1_cmd = 'zeros({})'.format(42)
waveform_table += 'setWaveDIO({}, {}, {});\n'.format(
cw, wf0_cmd, wf1_cmd)
program = waveform_table + codeword_mode_snippet
# N.B. awg_nr in goes from 0 to 3 in API while in LabOne it
# is 1 to 4
awg_nr = ch//2 # channels are coupled in pairs of 2
self.configure_awg_from_string(awg_nr=int(awg_nr),
program_string=program,
timeout=self.timeout())
self.configure_codeword_protocol()
def configure_codeword_protocol(self, default_dio_timing: bool=False):
"""
This method configures the AWG-8 codeword protocol.
It includes specifying what bits are used to specify codewords
as well as setting the delays on the different bits.
The protocol uses several parts to specify the
These parameters are specific to each AWG-8 channel and depend on the
the function the AWG8 has in the setup.
The parameter "cfg_codeword_protocol" defines what protocol is used.
There are three options:
identical : all AWGs have the same configuration
microwave : AWGs 0 and 1 share bits
flux : Each AWG pair is responsible for 2 flux channels.
this also affects the "codeword_program" and
setting "wave_chX_cwXXX" parameters.
Protocol definition:
protocol
- mask/value -> some bits are masked to allow using only a few bits
to specify a codeword.
- mask/shift -> all acquired bits are shifted to allow specifying
which bits should be used.
The parameters below are global to all AWG channels.
- strobe/index -> this specifies which bit is the toggle/strobe bit
- strobe/slope -> check for codewords on rissing/falling or both
edges of the toggle bit.
- valid/index -> specifies the codeword valid bit
- valid/slope -> specifies the slope of the valid bit
Delay configuration
In this part the DIO delay indices are set. These should be
identical for each AWG channel.
- dio/delay/index -> selects which delay to change next
- dio/delay/value -> specifies an individual delay
Trun on device
The final step enablse the signal output of each AWG and sets
it to the right mode.
"""
####################################################
# Protocol definition
####################################################
# Configure the DIO interface for triggering on
for awg_nr in range(int(self._num_channels/2)):
# This is the bit index of the valid bit,
self.set('awgs_{}_dio_valid_index'.format(awg_nr), 31)
# Valid polarity is 'high' (hardware value 2),
# 'low' (hardware value 1), 'no valid needed' (hardware value 0)
self.set('awgs_{}_dio_valid_polarity'.format(awg_nr), 2)
# This is the bit index of the strobe signal (toggling signal),
self.set('awgs_{}_dio_strobe_index'.format(awg_nr), 30)
# Configure the DIO interface for triggering on the both edges of
# the strobe/toggle bit signal.
# 1: rising edge, 2: falling edge or 3: both edges
self.set('awgs_{}_dio_strobe_slope'.format(awg_nr), 3)
# the mask determines how many bits will be used in the protocol
# e.g., mask 3 will mask the bits with bin(3) = 00000011 using
# only the 2 Least Significant Bits.
# N.B. cfg_num_codewords must be a power of 2
self.set('awgs_{}_dio_mask_value'.format(awg_nr),
self.cfg_num_codewords()-1)
if self.cfg_codeword_protocol() == 'identical':
# In the identical protocol all bits are used to trigger
# the same codewords on all AWG's
# N.B. The shift is applied before the mask
# The relevant bits can be selected by first shifting them
# and then masking them.
self.set('awgs_{}_dio_mask_shift'.format(awg_nr), 0)
# In the mw protocol bits [0:7] -> CW0 and bits [(8+1):15] -> CW1
# N.B. DIO bit 8 (first of 2nd byte) not connected in AWG8!
elif self.cfg_codeword_protocol() == 'microwave':
if awg_nr in [0, 1]:
self.set('awgs_{}_dio_mask_shift'.format(awg_nr), 0)
elif awg_nr in [2, 3]:
self.set('awgs_{}_dio_mask_shift'.format(awg_nr), 9)
elif self.cfg_codeword_protocol() == 'flux':
# bits[0:3] for awg0_ch0, bits[4:6] for awg0_ch1 etc.
# self.set('awgs_{}_dio_mask_value'.format(awg_nr), 2**6-1)
# self.set('awgs_{}_dio_mask_shift'.format(awg_nr), awg_nr*6)
# FIXME: this is a protocol that does identical flux pulses
# on each channel.
self.set('awgs_{}_dio_mask_value'.format(awg_nr), 2**3-1)
# self.set('awgs_{}_dio_mask_shift'.format(awg_nr), 3)
self.set('awgs_{}_dio_mask_shift'.format(awg_nr), 0)
####################################################
# Turn on device
####################################################
time.sleep(.05)
self._dev.daq.setInt('/' + self._dev.device +
'/awgs/*/enable', 1)
# Turn on all outputs
self._dev.daq.setInt('/' + self._dev.device + '/sigouts/*/on', 1)
# Disable all function generators
self._dev.daq.setInt('/' + self._dev.device +
'/sigouts/*/enables/*', 0)
# when doing flux pulses, set everything to amp mode
if self.cfg_codeword_protocol() == 'flux':
for ch in range(8):
self.set('sigouts_{}_direct'.format(ch), 0)
self.set('sigouts_{}_range'.format(ch), 5)
# Switch all outputs into direct mode when not using flux pulses
else:
for ch in range(8):
self.set('sigouts_{}_direct'.format(ch), 1)
self.set('sigouts_{}_range'.format(ch), .8)
def _find_valid_delays(self, awg, expected_sequence, verbose=False):
"""
The function loops through the possible delay settings on the DIO interface
and records and analyzes DIO data for each setting. It then determines whether
a given delay setting results in valid DIO protocol data being recorded.
In order for data to be correct, two conditions must be satisfied: First,
no timing violations are allowed, and, second, the sequence of codewords
detected on the interface must match the expected sequence.
"""
if verbose:
print("INFO : Finding valid delays for AWG {}".format(awg))
vld_mask = 1 << self._dev.geti('awgs/{}/dio/valid/index'.format(awg))
vld_polarity = self._dev.geti('awgs/{}/dio/valid/polarity'.format(awg))
strb_mask = 1 << self._dev.geti('awgs/{}/dio/strobe/index'.format(awg))
strb_slope = self._dev.geti('awgs/{}/dio/strobe/slope'.format(awg))
cw_mask = self._dev.geti('awgs/{}/dio/mask/value'.format(awg))
cw_shift = self._dev.geti('awgs/{}/dio/mask/shift'.format(awg))
if verbose:
print('INFO : vld_mask = 0x{:08x}'.format(vld_mask))
print('INFO : vld_polarity =', vld_polarity)
print('INFO : strb_mask = 0x{:08x}'.format(strb_mask))
print('INFO : strb_slope =', strb_slope)
print('INFO : cw_mask = 0x{:08x}'.format(cw_mask))
print('INFO : cw_shift =', cw_shift)
valid_delays = []
for delay in range(0, 7):
if verbose:
print("INFO : Testing delay {} on AWG {}...".format(delay, awg))
self._set_dio_delay(awg, strb_mask,
(cw_mask << cw_shift) | vld_mask, delay)
data = self._dev.getv('awgs/' + str(awg) + '/dio/data')
codewords, timing_violations = _analyze_dio_data(
data, strb_mask, strb_slope, vld_mask, vld_polarity, cw_mask, cw_shift)
timeout_cnt = 0
while (cw_mask != 0) and len(codewords) == 0:
if timeout_cnt > 5:
break
if verbose:
print("WARNING: No codewords detected, trying again!")
data = self._dev.getv('awgs/' + str(awg) + '/dio/data')
codewords, timing_violations = _analyze_dio_data(
data, strb_mask, strb_slope, vld_mask, vld_polarity, cw_mask, cw_shift)
timeout_cnt += 1
# Compare codewords against sequence
if (cw_mask != 0) and len(codewords) == 0:
if verbose:
print(
"WARNING: No codewords detected on AWG {} for delay {}".format(awg, delay))
continue
# Can't do nothing with timing violations
if timing_violations:
if verbose:
print("WARNING: Timing violation detected on AWG {} for delay {}!".format(
awg, delay))
continue
# Check against expected sequence
valid_sequence = True
for n, codeword in enumerate(codewords):
if n == 0:
if codeword not in expected_sequence:
if verbose:
print("WARNING: Codeword {} with value {} not in expected sequence {}!".format(
n, codeword, expected_sequence))
if verbose:
print(
"INFO : Detected codeword sequence: {}".format(codewords))
valid_sequence = False
break
else:
index = expected_sequence.index(codeword)
else:
last_index = index
index = (index + 1) % len(expected_sequence)
if codeword != expected_sequence[index]:
if verbose:
print("WARNING: Codeword {} with value {} not expected to follow codeword {} in expected sequence {}!".format(
n, codeword, expected_sequence[last_index], expected_sequence))
if verbose:
print(
"INFO : Detected codeword sequence: {}".format(codewords))
valid_sequence = False
break
# If we get to this point the delay is valid
if valid_sequence:
valid_delays.append(delay)
if verbose:
print("INFO : Found valid delays of {}".format(list(valid_delays)))
return set(valid_delays)
def _set_dio_delay(self, awg, strb_mask, data_mask, delay):
"""
The function sets the DIO delay for a given FPGA. The valid delay range is
0 to 6. The delays are created by either delaying the data bits or the strobe
bit. The data_mask input represents all bits that are part of the codeword or
the valid bit. The strb_mask input represents the bit that define the strobe.
"""
if delay < 0:
print('WARNING: Clamping delay to 0')
if delay > 6:
print('WARNING: Clamping delay to 6')
delay = 6
strb_delay = 0
data_delay = 0
if delay > 3:
strb_delay = delay-3
else:
data_delay = 3-delay
for i in range(32):
self._dev.seti('awgs/{}/dio/delay/index'.format(awg), i)
if strb_mask & (1 << i):
self._dev.seti(
'awgs/{}/dio/delay/value'.format(awg), strb_delay)
elif data_mask & (1 << i):
self._dev.seti(
'awgs/{}/dio/delay/value'.format(awg), data_delay)
else:
self._dev.seti('awgs/{}/dio/delay/value'.format(awg), 0)
def ensure_symmetric_strobe(self, verbose=False):
done = False
good_shots = 0
bad_shots = 0
strb_bits = []
for awg in range(0, 4):
strb_bits.append(self._dev.geti(
'awgs/{}/dio/strobe/index'.format(awg)))
strb_bits = list(set(strb_bits))
if verbose:
print('INFO : Analyzing strobe bits {}'.format(strb_bits))
while not done:
data = self._dev.getv('raw/dios/0/data')
if _is_dio_strb_symmetric(data, strb_bits):
if verbose:
print('INFO : Found good shot')
bad_shots = 0
good_shots += 1
if good_shots > 5:
done = True
else:
if verbose:
print('INFO : Strobe bit(s) are not sampled symmetrically')
if verbose:
print("INFO : Disabling AWG's")
enables = 4*[0]
for awg in range(0, 4):
enables[awg] = self._dev.geti('awgs/{}/enable'.format(awg))
self._dev.seti('awgs/{}/enable'.format(awg), 0)
if verbose:
print("INFO : Switching to internal clock")
self.system_clocks_referenceclock_source(0)
time.sleep(5)
if verbose:
print("INFO : Switching to external clock")
self.system_clocks_referenceclock_source(1)
time.sleep(5)
if verbose:
print("INFO : Enabling AWG's")
for awg in range(0, 4):
self._dev.seti('awgs/{}/enable'.format(awg), enables[awg])
good_shots = 0
bad_shots += 1
if bad_shots > 5:
done = True
return (good_shots > 0) and (bad_shots == 0)
def calibrate_dio_protocol(self, awgs_and_sequences, verbose=False):
if verbose:
print("INFO : Calibrating DIO delays")
if not self.ensure_symmetric_strobe(verbose):
if verbose:
print("ERROR : Strobe is not symmetric!")
return False
else:
if verbose:
print("INFO : Strobe is symmetric")
all_valid_delays = []
for awg, sequence in awgs_and_sequences:
valid_delays = self._find_valid_delays(awg, sequence, verbose)
if valid_delays:
all_valid_delays.append(valid_delays)
else:
if verbose:
print(
"ERROR : Unable to find valid delays for AWG {}!".format(awg))
return False
# Figure out which delays are valid
combined_valid_delays = set.intersection(*all_valid_delays)
max_valid_delay = max(combined_valid_delays)
# Print information
if verbose:
print("INFO : Valid delays are {}".format(combined_valid_delays))
if verbose:
print("INFO : Setting delay to {}".format(max_valid_delay))
# And configure the delays
for awg, _ in awgs_and_sequences:
vld_mask = 1 << self._dev.geti(
'awgs/{}/dio/valid/index'.format(awg))
strb_mask = 1 << self._dev.geti(
'awgs/{}/dio/strobe/index'.format(awg))
cw_mask = self._dev.geti('awgs/{}/dio/mask/value'.format(awg))
cw_shift = self._dev.geti('awgs/{}/dio/mask/shift'.format(awg))
if verbose:
print("INFO : Setting delay of AWG {}".format(awg))
self._set_dio_delay(
awg, strb_mask,
(cw_mask << cw_shift) | vld_mask, max_valid_delay)
return True
##############################################################################
#
##############################################################################
def _get_edges(value, last_value, mask):
"""
Given two integer values representing a current and a past value,
and a bit mask, this function will return two
integer values representing the bits with rising (re) and falling (fe)
edges.
"""
changed = value ^ last_value
re = changed & value & mask
fe = changed & ~value & mask
return re, fe
def _is_dio_strb_symmetric(data, bits):
count_ok = True
for bit in bits:
strobe_mask = 1 << bit
count_low = False
count_high = False
strobe_low = 0
strobe_high = 0
last_strobe = None
for n, d in enumerate(data):
curr_strobe = (d & strobe_mask) != 0
if count_high:
if curr_strobe:
strobe_high += 1
else:
if (strobe_low > 0) and (strobe_low != strobe_high):
count_ok = False
break
if count_low:
if not curr_strobe:
strobe_low += 1
else:
if (strobe_high > 0) and (strobe_low != strobe_high):
count_ok = False
break
if (last_strobe != None):
if (curr_strobe and not last_strobe):
strobe_high = 0
count_high = True
count_low = False
elif (not curr_strobe and last_strobe):
strobe_low = 0
count_low = True
count_high = False
last_strobe = curr_strobe
if not count_ok:
break
return count_ok
def _analyze_dio_data(data, strb_mask, strb_slope, vld_mask, vld_polarity, cw_mask, cw_shift):
"""
Analyzes a list of integer values that represent samples recorded on the DIO interface.
The function needs information about the protocol used on the DIO interface. Based
on this information the function will return two lists: the detected codewords
and the positions where 'timing violations' are found. The codewords are sampled
according to the protocol configuration. Timing violations occur when a codeword
bit or the valid bit changes value at the same time as the strobe signal.
"""
timing_violations = []
codewords = []
last_d = None
for n, d in enumerate(data):
if n > 0:
strb_re = False
strb_fe = False
if strb_slope == 0:
strb_re = True
strb_fe = True
elif strb_slope == 1:
strb_re, _ = _get_edges(d, last_d, strb_mask)
elif strb_slope == 2:
_, strb_fe = _get_edges(d, last_d, strb_mask)
else:
strb_re, strb_fe = _get_edges(d, last_d, strb_mask)
vld_re = False
vld_fe = False
if vld_polarity != 0:
vld_re, vld_fe = _get_edges(d, last_d, vld_mask)
d_re = False
d_fe = False
if cw_mask != 0:
d_re, d_fe = _get_edges(d, last_d, cw_mask << cw_shift)
vld_active = ((vld_polarity & 1) and ((d & vld_mask) == 0)) or (
(vld_polarity & 2) and ((d & vld_mask) != 0))
codeword = (d >> cw_shift) & cw_mask
# Check for timing violation on vld
if (strb_re or strb_fe) and (vld_re or vld_fe):
timing_violations.append(n)
elif (strb_re or strb_fe) and (d_re or d_fe):
timing_violations.append(n)
# Get the codewords
if (strb_re or strb_fe) and vld_active:
codewords.append(codeword)
last_d = d
return codewords, timing_violations
| 41.98913
| 138
| 0.528911
|
79521fcbfb967e650cb7e2059d63cc45c8045a9e
| 1,202
|
py
|
Python
|
config.py
|
marcdjulien/govindex
|
18740206e54aecfb4193e910e5076ee504229779
|
[
"MIT"
] | null | null | null |
config.py
|
marcdjulien/govindex
|
18740206e54aecfb4193e910e5076ee504229779
|
[
"MIT"
] | null | null | null |
config.py
|
marcdjulien/govindex
|
18740206e54aecfb4193e910e5076ee504229779
|
[
"MIT"
] | null | null | null |
# -*- encoding: utf-8 -*-
"""
Copyright (c) 2019 - present AppSeed.us
"""
import os
from decouple import config
class Config(object):
basedir = os.path.abspath(os.path.dirname(__file__))
SECRET_KEY = config('SECRET_KEY', default='sdnfogui3wn')
SQLALCHEMY_DATABASE_URI = config('SQLALCHEMY_DATABASE_URI', default='sqlite:///test-3.db')
SQLALCHEMY_TRACK_MODIFICATIONS = False
class ProductionConfig(Config):
DEBUG = False
# Security
SESSION_COOKIE_HTTPONLY = True
REMEMBER_COOKIE_HTTPONLY = True
REMEMBER_COOKIE_DURATION = 3600
# PostgreSQL database
SQLALCHEMY_DATABASE_URI = '{}://{}:{}@{}:{}/{}'.format(
config('DB_ENGINE' , default='postgresql' ),
config('DB_USERNAME' , default='appseed' ),
config('DB_PASS' , default='pass' ),
config('DB_HOST' , default='localhost' ),
config('DB_PORT' , default=5432 ),
config('DB_NAME' , default='appseed-flask' )
)
class DebugConfig(Config):
DEBUG = True
SQLALCHEMY_ECHO = True
# Load all possible configurations
config_dict = {
'Production': ProductionConfig,
'Debug' : DebugConfig
}
| 28.619048
| 94
| 0.633943
|
79521fcd997a54511714792481bb583f27065d3a
| 4,103
|
py
|
Python
|
grr/server/grr_response_server/rdfvalues/hunts.py
|
billstackpole/grr
|
203a0a99990a2d4004aed84a5cd822cbda2b418c
|
[
"Apache-2.0"
] | 1
|
2019-03-28T07:09:41.000Z
|
2019-03-28T07:09:41.000Z
|
grr/server/grr_response_server/rdfvalues/hunts.py
|
gingogo/grr
|
203a0a99990a2d4004aed84a5cd822cbda2b418c
|
[
"Apache-2.0"
] | null | null | null |
grr/server/grr_response_server/rdfvalues/hunts.py
|
gingogo/grr
|
203a0a99990a2d4004aed84a5cd822cbda2b418c
|
[
"Apache-2.0"
] | 1
|
2018-08-30T14:50:24.000Z
|
2018-08-30T14:50:24.000Z
|
#!/usr/bin/env python
"""RDFValue implementations for hunts."""
from grr_response_core import config
from grr_response_core.lib import rdfvalue
from grr_response_core.lib import registry
from grr_response_core.lib.rdfvalues import client as rdf_client
from grr_response_core.lib.rdfvalues import client_stats as rdf_client_stats
from grr_response_core.lib.rdfvalues import stats as rdf_stats
from grr_response_core.lib.rdfvalues import structs as rdf_structs
from grr_response_proto import flows_pb2
from grr_response_proto import jobs_pb2
from grr_response_server import foreman_rules
from grr_response_server.rdfvalues import flow_runner as rdf_flow_runner
from grr_response_server.rdfvalues import objects as rdf_objects
from grr_response_server.rdfvalues import output_plugin as rdf_output_plugin
class HuntNotification(rdf_structs.RDFProtoStruct):
protobuf = jobs_pb2.HuntNotification
rdf_deps = [
rdf_client.ClientURN,
rdfvalue.SessionID,
]
class HuntContext(rdf_structs.RDFProtoStruct):
protobuf = flows_pb2.HuntContext
rdf_deps = [
rdf_client_stats.ClientResources,
rdf_stats.ClientResourcesStats,
rdfvalue.RDFDatetime,
rdfvalue.SessionID,
]
class FlowLikeObjectReference(rdf_structs.RDFProtoStruct):
"""A reference to a flow or a hunt."""
protobuf = flows_pb2.FlowLikeObjectReference
rdf_deps = [
rdf_objects.FlowReference,
rdf_objects.HuntReference,
]
@classmethod
def FromHuntId(cls, hunt_id):
res = FlowLikeObjectReference()
res.object_type = "HUNT_REFERENCE"
res.hunt_reference = rdf_objects.HuntReference(hunt_id=hunt_id)
return res
@classmethod
def FromFlowIdAndClientId(cls, flow_id, client_id):
res = FlowLikeObjectReference()
res.object_type = "FLOW_REFERENCE"
res.flow_reference = rdf_objects.FlowReference(
flow_id=flow_id, client_id=client_id)
return res
class HuntRunnerArgs(rdf_structs.RDFProtoStruct):
"""Hunt runner arguments definition."""
protobuf = flows_pb2.HuntRunnerArgs
rdf_deps = [
rdfvalue.Duration,
foreman_rules.ForemanClientRuleSet,
rdf_output_plugin.OutputPluginDescriptor,
rdfvalue.RDFURN,
FlowLikeObjectReference,
]
def __init__(self, initializer=None, **kwargs):
super(HuntRunnerArgs, self).__init__(initializer=initializer, **kwargs)
if initializer is None:
if not self.HasField("crash_limit"):
self.crash_limit = config.CONFIG["Hunt.default_crash_limit"]
if not self.HasField("avg_results_per_client_limit"):
self.avg_results_per_client_limit = config.CONFIG[
"Hunt.default_avg_results_per_client_limit"]
if not self.HasField("avg_cpu_seconds_per_client_limit"):
self.avg_cpu_seconds_per_client_limit = config.CONFIG[
"Hunt.default_avg_cpu_seconds_per_client_limit"]
if not self.HasField("avg_network_bytes_per_client_limit"):
self.avg_network_bytes_per_client_limit = config.CONFIG[
"Hunt.default_avg_network_bytes_per_client_limit"]
def Validate(self):
if self.HasField("client_rule_set"):
self.client_rule_set.Validate()
class HuntError(rdf_structs.RDFProtoStruct):
"""An RDFValue class representing a hunt error."""
protobuf = jobs_pb2.HuntError
rdf_deps = [
rdf_client.ClientURN,
]
class GenericHuntArgs(rdf_structs.RDFProtoStruct):
"""Arguments to the generic hunt."""
protobuf = flows_pb2.GenericHuntArgs
rdf_deps = [
rdf_flow_runner.FlowRunnerArgs,
rdf_output_plugin.OutputPluginDescriptor,
]
def Validate(self):
self.flow_runner_args.Validate()
self.flow_args.Validate()
def GetFlowArgsClass(self):
if self.flow_runner_args.flow_name:
flow_cls = registry.AFF4FlowRegistry.FlowClassByName(
self.flow_runner_args.flow_name)
# The required protobuf for this class is in args_type.
return flow_cls.args_type
class CreateGenericHuntFlowArgs(rdf_structs.RDFProtoStruct):
protobuf = flows_pb2.CreateGenericHuntFlowArgs
rdf_deps = [
GenericHuntArgs,
HuntRunnerArgs,
]
| 31.083333
| 76
| 0.763344
|
79521fd588437a1051879e55297993fc552cd60a
| 7,143
|
py
|
Python
|
checkout/views.py
|
lucaslamounier/django-ecommerce
|
c988909f0e1eb7e8de533fde141c14947de116fe
|
[
"CC0-1.0"
] | null | null | null |
checkout/views.py
|
lucaslamounier/django-ecommerce
|
c988909f0e1eb7e8de533fde141c14947de116fe
|
[
"CC0-1.0"
] | null | null | null |
checkout/views.py
|
lucaslamounier/django-ecommerce
|
c988909f0e1eb7e8de533fde141c14947de116fe
|
[
"CC0-1.0"
] | null | null | null |
# coding=utf-8
import logging
import json
from pagseguro import PagSeguro
from paypal.standard.forms import PayPalPaymentsForm
from paypal.standard.models import ST_PP_COMPLETED
from paypal.standard.ipn.signals import valid_ipn_received
from django.views.decorators.csrf import csrf_exempt
from django.shortcuts import get_object_or_404, redirect
from django.views.generic import (
RedirectView, TemplateView, ListView, DetailView, View
)
from django.forms import modelformset_factory
from django.contrib import messages
from django.contrib.auth.mixins import LoginRequiredMixin
from django.core.urlresolvers import reverse
from django.conf import settings
from django.http import HttpResponse
from catalog.models import Product
from .models import CartItem, Order
# create logging
logger = logging.getLogger('checkout.views')
class CreateCartItemView(View):
def get(self, request, *args, **kwargs):
product = get_object_or_404(Product, slug=self.kwargs['slug'])
# create message log
logger.debug('Produto %s adicionado ao carrinho' % product)
if self.request.session.session_key is None:
self.request.session.save()
cart_item, created = CartItem.objects.add_item(
self.request.session.session_key, product
)
if created:
message = 'Produto adicionado com sucesso'
else:
message = 'Produto atualizado com sucesso'
# if request is ajax, return json
if request.is_ajax():
return HttpResponse(
json.dumps({'message': message}),
content_type='application/javascript'
)
messages.success(request, message)
return redirect('checkout:cart_item')
class CartItemView(TemplateView):
template_name = 'checkout/cart.html'
def get_formset(self, clear=False):
CartItemFormSet = modelformset_factory(
CartItem, fields=('quantity',), can_delete=True, extra=0
)
session_key = self.request.session.session_key
if session_key:
if clear:
formset = CartItemFormSet(
queryset=CartItem.objects.filter(cart_key=session_key)
)
else:
formset = CartItemFormSet(
queryset=CartItem.objects.filter(cart_key=session_key),
data=self.request.POST or None
)
else:
formset = CartItemFormSet(queryset=CartItem.objects.none())
return formset
def get_context_data(self, **kwargs):
context = super(CartItemView, self).get_context_data(**kwargs)
context['formset'] = self.get_formset()
return context
def post(self, request, *args, **kwargs):
formset = self.get_formset()
context = self.get_context_data(**kwargs)
if formset.is_valid():
formset.save()
messages.success(request, 'Carrinho atualizado com sucesso')
context['formset'] = self.get_formset(clear=True)
return self.render_to_response(context)
class CheckoutView(LoginRequiredMixin, TemplateView):
template_name = 'checkout/checkout.html'
def get(self, request, *args, **kwargs):
session_key = request.session.session_key
if session_key and CartItem.objects.filter(cart_key=session_key).exists():
cart_items = CartItem.objects.filter(cart_key=session_key)
order = Order.objects.create_order(
user=request.user, cart_items=cart_items
)
cart_items.delete()
else:
messages.info(request, 'Não há itens no carrinho de compras')
return redirect('checkout:cart_item')
response = super(CheckoutView, self).get(request, *args, **kwargs)
response.context_data['order'] = order
return response
class OrderListView(LoginRequiredMixin, ListView):
template_name = 'checkout/order_list.html'
paginate_by = 10
def get_queryset(self):
return Order.objects.filter(user=self.request.user).order_by('-pk')
class OrderDetailView(LoginRequiredMixin, DetailView):
template_name = 'checkout/order_detail.html'
def get_queryset(self):
return Order.objects.filter(user=self.request.user)
class PagSeguroView(LoginRequiredMixin, RedirectView):
def get_redirect_url(self, *args, **kwargs):
order_pk = self.kwargs.get('pk')
order = get_object_or_404(
Order.objects.filter(user=self.request.user), pk=order_pk
)
pg = order.pagseguro()
pg.redirect_url = self.request.build_absolute_uri(
reverse('checkout:order_detail', args=[order.pk])
)
pg.notification_url = self.request.build_absolute_uri(
reverse('checkout:pagseguro_notification')
)
response = pg.checkout()
return response.payment_url
class PaypalView(LoginRequiredMixin, TemplateView):
template_name = 'checkout/paypal.html'
def get_context_data(self, **kwargs):
context = super(PaypalView, self).get_context_data(**kwargs)
order_pk = self.kwargs.get('pk')
order = get_object_or_404(
Order.objects.filter(user=self.request.user), pk=order_pk
)
paypal_dict = order.paypal()
paypal_dict['return_url'] = self.request.build_absolute_uri(
reverse('checkout:order_list')
)
paypal_dict['cancel_return'] = self.request.build_absolute_uri(
reverse('checkout:order_list')
)
paypal_dict['notify_url'] = self.request.build_absolute_uri(
reverse('paypal-ipn')
)
context['form'] = PayPalPaymentsForm(initial=paypal_dict)
return context
@csrf_exempt
def pagseguro_notification(request):
notification_code = request.POST.get('notificationCode', None)
if notification_code:
pg = PagSeguro(
email=settings.PAGSEGURO_EMAIL, token=settings.PAGSEGURO_TOKEN,
config={'sandbox': settings.PAGSEGURO_SANDBOX}
)
notification_data = pg.check_notification(notification_code)
status = notification_data.status
reference = notification_data.reference
try:
order = Order.objects.get(pk=reference)
except Order.DoesNotExist:
pass
else:
order.pagseguro_update_status(status)
return HttpResponse('OK')
def paypal_notification(sender, **kwargs):
ipn_obj = sender
if ipn_obj.payment_status == ST_PP_COMPLETED and \
ipn_obj.receiver_email == settings.PAYPAL_EMAIL:
try:
order = Order.objects.get(pk=ipn_obj.invoice)
order.complete()
except Order.DoesNotExist:
pass
valid_ipn_received.connect(paypal_notification)
create_cartitem = CreateCartItemView.as_view()
cart_item = CartItemView.as_view()
checkout = CheckoutView.as_view()
order_list = OrderListView.as_view()
order_detail = OrderDetailView.as_view()
pagseguro_view = PagSeguroView.as_view()
paypal_view = PaypalView.as_view()
| 33.535211
| 82
| 0.667087
|
7952201b41df41d08ba357edeaf6269d29692f02
| 4,637
|
py
|
Python
|
demo/python/rhino_demo_file.py
|
hellow554/rhino
|
bbea49a0671841e4577902c4455904572b49612f
|
[
"Apache-2.0"
] | 472
|
2018-10-29T16:51:52.000Z
|
2022-03-29T05:45:29.000Z
|
demo/python/rhino_demo_file.py
|
hellow554/rhino
|
bbea49a0671841e4577902c4455904572b49612f
|
[
"Apache-2.0"
] | 89
|
2018-12-24T04:13:56.000Z
|
2022-03-25T17:18:07.000Z
|
demo/python/rhino_demo_file.py
|
hellow554/rhino
|
bbea49a0671841e4577902c4455904572b49612f
|
[
"Apache-2.0"
] | 70
|
2018-11-03T02:39:11.000Z
|
2022-03-17T00:05:38.000Z
|
#
# Copyright 2018-2021 Picovoice Inc.
#
# You may not use this file except in compliance with the license. A copy of the license is located in the "LICENSE"
# file accompanying this source.
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
import argparse
import struct
import wave
import pvrhino
def read_file(file_name, sample_rate):
wav_file = wave.open(file_name, mode="rb")
channels = wav_file.getnchannels()
num_frames = wav_file.getnframes()
if wav_file.getframerate() != sample_rate:
raise ValueError("Audio file should have a sample rate of %d. got %d" % (sample_rate, wav_file.getframerate()))
samples = wav_file.readframes(num_frames)
wav_file.close()
frames = struct.unpack('h' * num_frames * channels, samples)
if channels == 2:
print("Picovoice processes single-channel audio but stereo file is provided. Processing left channel only.")
return frames[::channels]
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--input_audio_path', help='Absolute path to input audio file.',
required=True)
parser.add_argument('--access_key',
help='AccessKey obtained from Picovoice Console (https://picovoice.ai/console/)',
required=True)
parser.add_argument('--context_path', help="Absolute path to context file.", required=True)
parser.add_argument('--library_path', help='Absolute path to dynamic library.', default=pvrhino.LIBRARY_PATH)
parser.add_argument(
'--model_path',
help='Absolute path to the file containing model parameters.',
default=pvrhino.MODEL_PATH)
parser.add_argument(
'--sensitivity',
help="Inference sensitivity. It should be a number within [0, 1]. A higher sensitivity value results in " +
"fewer misses at the cost of (potentially) increasing the erroneous inference rate.",
type=float,
default=0.5)
parser.add_argument(
'--require_endpoint',
help="If set to `False`, Rhino does not require an endpoint (chunk of silence) before finishing inference.",
default='True',
choices=['True', 'False'])
args = parser.parse_args()
if args.require_endpoint.lower() == 'false':
require_endpoint = False
else:
require_endpoint = True
try:
rhino = pvrhino.create(
access_key=args.access_key,
library_path=args.library_path,
model_path=args.model_path,
context_path=args.context_path,
sensitivity=args.sensitivity,
require_endpoint=require_endpoint)
except pvrhino.RhinoInvalidArgumentError as e:
print(f"One or more arguments provided to Rhino is invalid: {args}")
print(f"If all other arguments seem valid, ensure that '{args.access_key}' is a valid AccessKey")
raise e
except pvrhino.RhinoActivationError as e:
print("AccessKey activation error")
raise e
except pvrhino.RhinoActivationLimitError as e:
print(f"AccessKey '{args.access_key}' has reached it's temporary device limit")
raise e
except pvrhino.RhinoActivationRefusedError as e:
print(f"AccessKey '{args.access_key}' refused")
raise e
except pvrhino.RhinoActivationThrottledError as e:
print(f"AccessKey '{args.access_key}' has been throttled")
raise e
except pvrhino.RhinoError as e:
print(f"Failed to initialize Rhino")
raise e
audio = read_file(args.input_audio_path, rhino.sample_rate)
num_frames = len(audio) // rhino.frame_length
for i in range(num_frames):
frame = audio[i * rhino.frame_length:(i + 1) * rhino.frame_length]
is_finalized = rhino.process(frame)
if is_finalized:
inference = rhino.get_inference()
if inference.is_understood:
print('{')
print(" intent : '%s'" % inference.intent)
print(' slots : {')
for slot, value in inference.slots.items():
print(" %s : '%s'" % (slot, value))
print(' }')
print('}')
else:
print("Didn't understand the command.")
break
rhino.delete()
if __name__ == '__main__':
main()
| 35.669231
| 119
| 0.64848
|
795221a12826c986c883c95a2eec38675fca1169
| 948
|
py
|
Python
|
iaso/utils/models/soft_deletable.py
|
ekhalilbsq/iaso
|
e6400c52aeb4f67ce1ca83b03efa3cb11ef235ee
|
[
"MIT"
] | 29
|
2020-12-26T07:22:19.000Z
|
2022-03-07T13:40:09.000Z
|
iaso/utils/models/soft_deletable.py
|
ekhalilbsq/iaso
|
e6400c52aeb4f67ce1ca83b03efa3cb11ef235ee
|
[
"MIT"
] | 150
|
2020-11-09T15:03:27.000Z
|
2022-03-07T15:36:07.000Z
|
iaso/utils/models/soft_deletable.py
|
ekhalilbsq/iaso
|
e6400c52aeb4f67ce1ca83b03efa3cb11ef235ee
|
[
"MIT"
] | 4
|
2020-11-09T10:38:13.000Z
|
2021-10-04T09:42:47.000Z
|
from django.db import models
from django.db.models.functions import Now
class DefaultSoftDeletableManager(models.Manager):
def get_queryset(self):
return super().get_queryset().filter(deleted_at=None)
class IncludeDeletedSoftDeletableManager(models.Manager):
def get_queryset(self):
return super().get_queryset()
class OnlyDeletedSoftDeletableManager(models.Manager):
def get_queryset(self):
return super().get_queryset().filter(deleted_at__isnull=False)
class SoftDeletableModel(models.Model):
class Meta:
abstract = True
deleted_at = models.DateTimeField(default=None, blank=True, null=True)
def delete_hard(self, using=None, keep_parents=False):
return super().delete(using, keep_parents)
def delete(self, using=None, keep_parents=False):
self.deleted_at = Now()
self.save()
def restore(self):
self.deleted_at = None
self.save()
| 26.333333
| 74
| 0.712025
|
7952222427b78b6a8fc84cf31d52ce6d433b0f22
| 288
|
py
|
Python
|
PacoteDownload/Mundo 3 do curso Parte 2/numeros.py
|
Gabriel-ER/CursoEmVideodoYoutube-Python-
|
0733ce05f28f8f87603270ef5ab7cb51c8f2c5ac
|
[
"MIT"
] | null | null | null |
PacoteDownload/Mundo 3 do curso Parte 2/numeros.py
|
Gabriel-ER/CursoEmVideodoYoutube-Python-
|
0733ce05f28f8f87603270ef5ab7cb51c8f2c5ac
|
[
"MIT"
] | null | null | null |
PacoteDownload/Mundo 3 do curso Parte 2/numeros.py
|
Gabriel-ER/CursoEmVideodoYoutube-Python-
|
0733ce05f28f8f87603270ef5ab7cb51c8f2c5ac
|
[
"MIT"
] | null | null | null |
import uteis #O meu módulo uteis está na minha pasta
'''
Todo arquivo python é considerado um módulo
Toda pasta é considerada um pacote
Sintaxe especiais para pacotes
'''
num = int(input('Número: '))
print(f'O fatorial de {num} é {uteis.fatorial(num)} e o dobro é {uteis.dobro(num)}.')
| 26.181818
| 85
| 0.729167
|
79522315f193b903ef3bbd29728cc41d8fe3599d
| 16,936
|
py
|
Python
|
cloningR.py
|
yketa/active_work
|
99107a0d4935296b673f67469c1e2bd258954b9b
|
[
"MIT"
] | 1
|
2022-01-19T14:24:46.000Z
|
2022-01-19T14:24:46.000Z
|
cloningR.py
|
yketa/active_work
|
99107a0d4935296b673f67469c1e2bd258954b9b
|
[
"MIT"
] | null | null | null |
cloningR.py
|
yketa/active_work
|
99107a0d4935296b673f67469c1e2bd258954b9b
|
[
"MIT"
] | 1
|
2021-02-09T16:36:15.000Z
|
2021-02-09T16:36:15.000Z
|
"""
Module cloningR launches cloning simulations of rotors and provides classes to
read output data files from these simulations.
Bias is chosen with environment variable `CLONING_BIAS':
(0) order parameter,
(1) squared order parameter.
(see https://yketa.github.io/DAMTP_MSC_2019_Wiki/#Brownian%20rotors%20cloning%20algorithm)
"""
import numpy as np
from numpy import random
from os import path
from shutil import rmtree as rmr
from shutil import move
import sys
from subprocess import Popen, DEVNULL, PIPE
import pickle
from active_work.read import _Read
from active_work.init import get_env, get_env_list, mkdir
from active_work.exponents import float_to_letters
from active_work.maths import mean_sterr
# FUNCTIONS AND CLASSES
class CloningOutput:
"""
Read and analyse aggregated data from cloning simulations launched with
active_work.cloningR.
"""
def __init__(self, filename):
"""
Get data.
Parameters
----------
filename : string
Path to data file.
"""
self.filename = filename
with open(self.filename, 'rb') as input:
(self.exec_path, # executable path (can help discriminate controlled dynamics method)
self.tmax, # dimensionless time simulated
self.nc, # number of clones
self.nRuns, # number of different runs
self.initSim, # number of initial elementary number of iterations to "randomise"
self.bias, # cloning bias
self.sValues, # biasing parameters
self.seed, # master random seed of master random seeds
self.seeds, # master random seeds
self.N, # number of rotors in the system
self.Dr, # rotational diffusivity
self._tau, # elementary number of steps
self.dt, # time step
self.tSCGF, # array of different measurements of the time scaled CGF per value of the biasing parameter
self.orderParameter, # array of different measurements of the order parameter per value of the biasing parameter
self.orderParameterSq, # array of different measurements of the squared order parameter per value of the biasing parameter
self.walltime # array of different running time per value of the biasing parameter
) = pickle.load(input)
self.order = [self.orderParameter, self.orderParameterSq][self.bias] # order parameter which bias the trajectories
self.SCGF = self.tSCGF/self.N # scaled cumulant generating function
self.tau = self._tau*self.dt # dimensionless elementary time
self.tinit = self.tau*self.initSim # dimensionless initial simulation time
self.I = np.empty((self.sValues.size, self.nRuns, 2)) # rate function
for i in range(self.sValues.size):
sValue = self.sValues[i]
for j, SCGF, order in zip(
range(self.nRuns), self.SCGF[i], self.order[i]):
self.I[i, j, 0] = order
self.I[i, j, 1] = -sValue*order - SCGF
def meanSterr(self, remove=False, max=None):
"""
Returns array of mean and standard error of measured data.
Parameters
----------
remove : bool
Remove inf and -inf as well as nan. (default: False)
NOTE: A warning will be issued if remove == False and such objects are
encountered.
max : float or None
Remove data which is strictly above max in absolute value.
(default: None)
NOTE: max != None will trigger remove = True.
Returns
-------
SCGF : (self.sValues.size, 3) float Numpy array
Scaled cumulant generating function.
orderParameter : (self.sValues.size, 3) float Numpy array.
Order parameter.
orderParameterSq : (self.sValues.size, 3) float Numpy array.
Squared order parameter.
NOTE: (0) Biasing parameter.
(1) Mean.
(2) Standard error.
I : (self.sValues.size, 4) float Numpy array
Rate function.
NOTE: (0) (Squared) order parameter.
(1) Standard error on (squared) order parameter.
(2) Rate function.
(3) Standard error on rate function.
"""
SCGF = np.empty((self.sValues.size, 3))
orderParameter = np.empty((self.sValues.size, 3))
orderParameterSq = np.empty((self.sValues.size, 3))
I = np.empty((self.sValues.size, 4))
for i in range(self.sValues.size):
SCGF[i] = [
self.sValues[i],
*mean_sterr(self.SCGF[i], remove=remove, max=max)]
orderParameter[i] = [
self.sValues[i],
*mean_sterr(self.orderParameter[i], remove=remove, max=max)]
orderParameterSq[i] = [
self.sValues[i],
*mean_sterr(self.orderParameterSq[i], remove=remove, max=max)]
I[i] = [
*mean_sterr(self.I[i, :, 0], remove=remove, max=max),
*mean_sterr(self.I[i, :, 1], remove=remove, max=max)]
return SCGF, orderParameter, orderParameterSq, I
class _CloningOutput(_Read):
"""
Read data from a single cloning simulation.
"""
def __init__(self, filename):
"""
Get data.
Parameters
----------
filename : string
Path to data file.
"""
# FILE
super().__init__(filename)
# HEADER INFORMATION
self.tmax = self._read('d') # dimensionless time simulated
self.nc = self._read('i') # number of clones
self.sValue = self._read('d') # biasing parameter
self.seed = self._read('i') # master random seed
self.nRuns = self._read('i') # number of different runs
self.cloneMethod = self._read('i') # cloning method
self.initSim = self._read('i') # number of initial elementary number of iterations to "randomise" the systems
self.N = self._read('i') # number of rotors in the system
self.Dr = self._read('d') # rotational diffusivity
self.tau = self._read('i') # elementary number of steps
self.dt = self._read('d') # time step
self.bias = self._read('i') # cloning bias
# FILE PARTS LENGTHS
self.headerLength = self.file.tell() # length of header in bytes
self.runLength = 4*self._bpe('d') # length the data of a run takes
# FILE CORRUPTION CHECK
if self.fileSize != self.headerLength + self.nRuns*self.runLength:
raise ValueError("Invalid data file size.")
# MEASUREMENTS
self.tSCGF = np.empty((self.nRuns,)) # time scaled cumulant generating function
self.orderParameter = np.empty((self.nRuns,)) # order parameter
self.orderParameterSq = np.empty((self.nRuns,)) # squared order parameter
self.walltime = np.empty((self.nRuns,)) # time taken for each run
for i in range(self.nRuns):
self.tSCGF[i] = self._read('d')
self.orderParameter[i] = self._read('d')
self.orderParameterSq[i] = self._read('d')
self.walltime[i] = self._read('d')
def filename(N, Dr, nc, bias, launch):
"""
Name of simulation output directory.
Parameters
----------
N : int
Number of rotors in the system.
Dr : float
Rotational diffusivity.
nc : int
Number of clones.
bias : int
Cloning bias.
launch : int
Launch identifier.
Returns
-------
name : str
File name.
"""
return 'N%s_R%s_NC%s_B%s_E%s' % tuple(map(float_to_letters,
(N, Dr, nc, bias, launch)))
# DEFAULT PARAMETERS
_tmax = 1 # default dimensionless time to simulate
_nc = 10 # default number of clones
_seed = random.randint(1e7) # default master random seed
_nRuns = 1 # default number of different runs
_initSim = 1 # default number of initial elementary number of iterations to "randomise" the systems
_bias = 0 # default cloning bias
_sMin = -0.1 # default minimum value of the biasing parameter
_sMax = 0.1 # default maximum value of the biasing parameter
_sNum = 10 # default number of values of the biasing parameter
_threads = -1 # [openMP] default number of threads
_N = 100 # default number of rotors in the system
_Dr = 1./2. # default rotational diffusivity
_tau = 100 # default elementary number of steps
_dt = 0.001 # default time step
_launch = 0 # default launch identifier
_exec_dir = path.join(path.dirname(path.realpath(__file__)), 'build') # default executable directory
_exec_name = { # default executable name
0: ('cloningR_B0', 'cloningR_B0'), # cloning bias `0' without and with control
1: ('cloningR_B1', 'cloningR_B1_C')} # cloning bias `1' without and with control
_slurm_path = path.join(path.dirname(path.realpath(__file__)), 'slurm.sh') # Slurm submitting script
_out_dir = _exec_dir # default simulation output directory
# SCRIPT
if __name__ == '__main__':
# VARIABLE DEFINITIONS
# CLONING PARAMETERS
tmax = get_env('TMAX', default=_tmax, vartype=float) # dimensionless time to simulate
nc = get_env('NC', default=_nc, vartype=int) # number of clones
nRuns = get_env('NRUNS', default=_nRuns, vartype=int) # number of different runs
initSim = get_env('INITSIM', default=_initSim, vartype=int) # number of initial elementary number of iterations to "randomise" the systems
bias = get_env('CLONING_BIAS', default=_bias, vartype=int) # cloning bias
# BIASING PARAMETERS
sMin = get_env('SMIN', default=_sMin, vartype=float) # minimum value of the biasing parameter
sMax = get_env('SMAX', default=_sMax, vartype=float) # maximum value of the biasing parameter
sNum = get_env('SNUM', default=_sNum, vartype=int) # number of values of the biasing parameter
sValues = np.linspace(sMin, sMax, sNum, endpoint=True) # array of values of the biasing parameter
# RANDOM SEEDS
seed = get_env('SEED', default=_seed, vartype=int) # master random seed of master random seeds
random.seed(seed) # set seed
seeds = random.randint(1e7, size=(sNum,)) # master random seeds
# OPENMP PARAMETERS
threads = get_env('THREADS', default=_threads, vartype=int) # number of threads
# SLURM PARAMETERS
slurm = get_env('SLURM', default=False, vartype=bool) # use Slurm job scheduler (see active_work/slurm.sh)
slurm_partition = get_env('SLURM_PARTITION', vartype=str) # partition for the ressource allocation
slurm_ntasks = get_env('SLURM_NTASKS', vartype=int) # number of MPI ranks running per node
slurm_time = get_env('SLURM_TIME', vartype=str) # required time
slurm_chain = get_env_list('SLURM_CHAIN', vartype=int) # execute after these jobs ID have completed (order has to be the same as sValues)
# PHYSICAL PARAMETERS
N = get_env('N', default=_N, vartype=int) # number of rotors in the system
Dr = get_env('DR', default=_Dr, vartype=float) # rotational diffusivity
# SIMULATION PARAMETERS
tau = get_env('TAU', default=_tau, vartype=int) # elementary number of steps
dt = get_env('DT', default=_dt, vartype=float) # time step
# EXECUTABLE PARAMETERS
exec_dir = get_env('EXEC_DIR', default=_exec_dir, vartype=str) # executable directory
exec_name = get_env('EXEC_NAME', # executable name
default=_exec_name[bias][
get_env('CONTROLLED_DYNAMICS', default=False, vartype=bool)],
vartype=str)
exec_path = path.join(exec_dir, exec_name) # executable path
# OUTPUT FILES PARAMETERS
launch = get_env('LAUNCH', default=_launch, vartype=float) # launch identifier
out_dir = get_env('OUT_DIR', default=_out_dir, vartype=str) # output directory
sim_name = filename(N, Dr, nc, bias, launch) # simulation output name
sim_dir = path.join(out_dir, sim_name) # simulation output directory name
mkdir(sim_dir, replace=True)
tmp_dir = path.join(sim_dir, 'tmp') # temporary files directory
mkdir(tmp_dir, replace=True)
tmp_template = '%010d.cloning.out' # template of temporary files
out_file = path.join(sim_dir, sim_name + '.cloR') # simulation output file name
# LAUNCH
env = lambda i: { # environment variables for cloning executables as function of sValues index
'TMAX': str(tmax), 'NC': str(nc), 'SVALUE': str(sValues[i]),
'SEED': str(seeds[i]), 'NRUNS': str(nRuns),
'INITSIM': str(initSim),
'THREADS': str(threads),
'N': str(N), 'DR': str(Dr),
'TAU': str(tau), 'DT': str(dt),
'FILE': path.join(tmp_dir,
tmp_template % i)}
if slurm: # using Slurm job scheduler
slurm_launch = ['bash', _slurm_path, '-w'] # commands to submit Slurm job
if slurm_partition != None: slurm_launch += ['-p', slurm_partition]
if slurm_ntasks != None: slurm_launch += ['-r', str(slurm_ntasks)]
if slurm_time != None: slurm_launch += ['-t', slurm_time]
# LAUNCH
procs, jobsID = [], []
for i in range(sNum):
procs += [
Popen(
['%s \"{ %s %s; }\"' %
(str(' ').join(slurm_launch # Slurm submitting script
+ ['-j', '\'' + exec_path.split('/')[-1]
+ ' %04i %s\'' % (i, env(i)['SVALUE'])]
+ ([] if slurm_chain == []
else ['-c', str(slurm_chain[i])])),
str(' ').join(['%s=%s' % (key, env(i)[key]) # environment variables
for key in env(i)]),
exec_path)], # cloning executable
stdout=PIPE, shell=True)]
getJobID = Popen( # get submitted job ID
['head', '-n1'],
stdin=procs[-1].stdout, stdout=PIPE)
getJobID.wait()
jobID = getJobID.communicate()[0].decode().split()
if jobID[:-1] == ['Submitted', 'batch', 'job']: # job has been succesfully submitted to Slurm
jobsID += [jobID[-1]]
else: # failed to submit job
raise ValueError("Job ID not returned.")
sys.stdout.write(':'.join(jobsID) + '\n') # print jobs ID to stdout with syntax compatible with active_work.init.get_env_list
sys.stdout.flush()
else: # not using Slurm job scheduler
# LAUNCH
procs = [
Popen(['{ %s; }' % exec_path],
stdout=DEVNULL, shell=True, env=env(i))
for i in range(sNum)]
for proc in procs: proc.wait() # wait for them to finish
# CLONING OUTPUT FILE
# LOAD TEMPORARY FILES
tmp_out = []
for i in range(sNum):
tmp_out += [_CloningOutput(
path.join(tmp_dir, tmp_template % i))]
# ARRAYS OF DATA
tSCGF = np.array(
[tmp_out[i].tSCGF for i in range(sNum)])
orderParameter = np.array(
[tmp_out[i].orderParameter for i in range(sNum)])
orderParameterSq = np.array(
[tmp_out[i].orderParameterSq for i in range(sNum)])
walltime = np.array(
[tmp_out[i].walltime for i in range(sNum)])
# OUT
with open(out_file, 'wb') as output:
pickle.dump([
exec_path,
tmax, nc, nRuns, initSim, bias, sValues,
seed, seeds,
N, Dr,
tau, dt,
tSCGF, orderParameter, orderParameterSq, walltime],
output)
# CLEAN
if get_env('CLEAN', default=True, vartype=bool):
move(out_file, path.join(out_dir, sim_name + '.cloR')) # move output file to output directory
rmr(sim_dir, ignore_errors=True) # delete simulation directory
| 42.34
| 146
| 0.57676
|
79522389fec074014b75e90887d3a346720ca616
| 8,639
|
py
|
Python
|
open-hackathon-server/src/hackathon/hack/azure_cert_manager.py
|
pcwl/open-hackathon
|
cbd0afbbf0b4047af17086a0d9c0a9c031bd3595
|
[
"MIT"
] | null | null | null |
open-hackathon-server/src/hackathon/hack/azure_cert_manager.py
|
pcwl/open-hackathon
|
cbd0afbbf0b4047af17086a0d9c0a9c031bd3595
|
[
"MIT"
] | null | null | null |
open-hackathon-server/src/hackathon/hack/azure_cert_manager.py
|
pcwl/open-hackathon
|
cbd0afbbf0b4047af17086a0d9c0a9c031bd3595
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
This file is covered by the LICENSING file in the root of this project.
"""
import sys
sys.path.append('..')
import os
from os.path import dirname, realpath, abspath, isfile
import commands
from hackathon.hazure.cloud_service_adapter import CloudServiceAdapter
from hackathon.hmongo.models import Hackathon, AzureKey, Experiment
from hackathon import RequiredFeature, Component, Context, internal_server_error
from hackathon.hackathon_response import ok, bad_request
from hackathon.constants import FILE_TYPE
__all__ = ["AzureCertManager"]
class AzureCertManager(Component):
"""Manage azure/mooncake certificates for the hackathon
Note that certificate is actually certificate pair: a cer file and a pem file.
The cer file is for user to download from our site and then upload to the azure/mooncake account.
The pem file is for azure python SDK. Everytime request from OHP to azure should include the pem cert.
cer file will be saved in different places according to the implementation of storage. But pem file will be saved
"""
storage = RequiredFeature("storage")
def create_certificate(self, subscription_id, management_host, hackathon):
"""Create certificate for specific subscription and hackathon
1. check certificate dir
2. generate pem file
3. generate cert file
4. add azure key to db
5. add hackathon azure key to db
:param subscription_id:
:param management_host:
:param hackathon:
:return:
"""
base_url = '%s/%s' % (self.CERT_BASE, subscription_id)
pem_url = base_url + '.pem'
cert_url = base_url + '.cer'
# avoid duplicate pem generation
if not os.path.isfile(pem_url):
pem_command = 'openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout %s -out %s -batch' % \
(pem_url, pem_url)
commands.getstatusoutput(pem_command)
else:
self.log.debug('%s exists' % pem_url)
# avoid duplicate cert generation
if not os.path.isfile(cert_url):
cert_command = 'openssl x509 -inform pem -in %s -outform der -out %s' % (pem_url, cert_url)
commands.getstatusoutput(cert_command)
else:
self.log.debug('%s exists' % cert_url)
azure_key = AzureKey.objects(subscription_id=subscription_id, management_host=management_host).first()
if azure_key is None:
azure_key = AzureKey(
cert_url=base_url + '.cer',
pem_url=base_url + '.pem',
subscription_id=subscription_id,
management_host=management_host,
verified=False
)
azure_key.save()
hackathon.azure_keys.append(azure_key)
hackathon.save()
else:
self.log.debug('azure key exists')
if not (azure_key in hackathon.azure_keys):
hackathon.azure_keys.append(azure_key)
hackathon.save()
else:
self.log.debug('hackathon azure key exists')
# store cer file
cer_context = Context(
hackathon_name=hackathon.name,
file_name=subscription_id + '.cer',
file_type=FILE_TYPE.AZURE_CERT,
content=file(cert_url)
)
self.log.debug("saving cerf file [%s] to azure" % cer_context.file_name)
cer_context = self.storage.save(cer_context)
azure_key.cert_url = cer_context.url
# store pem file
# encrypt certification file before upload to storage
encrypted_pem_url = self.__encrypt_content(pem_url)
pem_contex = Context(
hackathon_name=hackathon.name,
file_name=subscription_id + '.pem',
file_type=FILE_TYPE.AZURE_CERT,
content=file(encrypted_pem_url)
)
self.log.debug("saving pem file [%s] to azure" % pem_contex.file_name)
pem_contex = self.storage.save(pem_contex)
os.remove(encrypted_pem_url)
azure_key.pem_url = pem_contex.url
azure_key.save()
return azure_key.dic()
def get_certificates_by_expr(self, expr_id):
"""Get certificates by experiment id
"""
# expr = self.db.get_object(Experiment, expr_id)
expr = Experiment.objects(id=expr_id)
# hak = self.db.find_all_objects_by(HackathonAzureKey, hackathon_id=expr.hackathon_id)
hak = Hackathon.objects(id=expr.hackathon_id).first().azure_keys[0]
if not hak:
raise Exception("no azure key configured")
return map(lambda key: self.db.get_object(AzureKey, key.azure_key_id), hak)
def get_certificates(self, hackathon):
"""Get certificates by hackathon
:type hackathon: Hackathon
:param hackathon: instance of hackathon to search certificates
:rtype list
:return a list of AzureKey
"""
hackathon_azure_keys = [a.dic() for a in hackathon.azure_keys]
if len(hackathon_azure_keys) == 0:
# if no certificates added before, return 404
return []
return hackathon_azure_keys
def delete_certificate(self, certificate_id, hackathon):
"""Delete certificate by azureKey.id and hackathon
Delete the hackathon-azureKey relationship first. If the auzreKey is not needed any more, delete the azureKey too
:type certificate_id: int
:param certificate_id: id of AzureKey
:type hackathon: Hackathon
:param hackathon: instance of Hackathon
"""
# delete all hackathon-azureKey relationships first
azure_key = AzureKey.objects(id=certificate_id).first()
# if no relations left, delete the azureKey itself
if azure_key in hackathon.azure_keys:
try:
if isfile(azure_key.cert_url):
os.remove(azure_key.cert_url)
else:
self.storage.delete(azure_key.cert_url)
if isfile(azure_key.pem_url):
os.remove(azure_key.pem_url)
else:
self.storage.delete(azure_key.pem_url)
except Exception as e:
self.log.error(e)
hackathon.azure_keys.remove(azure_key)
hackathon.save()
return ok(True)
def check_sub_id(self, subscription_id):
azure_key = AzureKey.objects(subscription_id=subscription_id).first()
if self.util.is_local():
if azure_key is not None:
azure_key.verified = True
azure_key.save()
return ok("success")
if azure_key is None:
return internal_server_error("No available azure key on the server side.")
sms = CloudServiceAdapter(azure_key.subscription_id,
azure_key.get_local_pem_url(),
host=azure_key.management_host)
if sms.ping():
azure_key.verified = True
azure_key.save()
else:
return bad_request("Subscription id is not valid, check whether subscription id is valid and upload the right cer file to azure")
return ok("success")
def __init__(self):
self.CERT_BASE = self.util.get_config('azure.cert_base')
self.__ensure_certificates_dir()
def __ensure_certificates_dir(self):
"""Ensure that the directory to store azure certificate exists"""
if not self.CERT_BASE:
self.CERT_BASE = abspath("%s/../certificates" % dirname(realpath(__file__)))
if not os.path.exists(self.CERT_BASE):
os.makedirs(self.CERT_BASE)
def __encrypt_content(self, pem_url):
encrypted_pem_url = pem_url + ".encrypted"
cryptor = RequiredFeature("cryptor")
cryptor.encrypt(pem_url, encrypted_pem_url)
return encrypted_pem_url
def get_local_pem_url(self, pem_url):
local_pem_url = self.CERT_BASE + "/" + pem_url.split("/")[-1]
if not isfile(local_pem_url):
self.log.debug("Recover local %s.pem file from azure storage %s" % (local_pem_url, pem_url))
cryptor = RequiredFeature("cryptor")
cryptor.recover_local_file(pem_url, local_pem_url)
return local_pem_url
# recover a pem file from azure
def get_local_pem_url(azureKey):
azure_cert_manager = RequiredFeature("azure_cert_manager")
return azure_cert_manager.get_local_pem_url(azureKey.pem_url)
AzureKey.get_local_pem_url = get_local_pem_url
| 35.405738
| 141
| 0.639079
|
795223919443326b228d8bfb883162b8a1c06e84
| 23,553
|
py
|
Python
|
python/ray/tune/syncer.py
|
andenrx/ray
|
0ad4113e22aa4ec40f87d4a16e474a452f9c8af7
|
[
"Apache-2.0"
] | null | null | null |
python/ray/tune/syncer.py
|
andenrx/ray
|
0ad4113e22aa4ec40f87d4a16e474a452f9c8af7
|
[
"Apache-2.0"
] | null | null | null |
python/ray/tune/syncer.py
|
andenrx/ray
|
0ad4113e22aa4ec40f87d4a16e474a452f9c8af7
|
[
"Apache-2.0"
] | null | null | null |
from typing import Any, Callable, Dict, List, TYPE_CHECKING, Type, Union, Optional
import distutils
import logging
import os
import time
from dataclasses import dataclass
import warnings
from inspect import isclass
from shlex import quote
import ray
import yaml
from ray.tune import TuneError
from ray.tune.callback import Callback
from ray.tune.checkpoint_manager import _TuneCheckpoint
from ray.tune.result import NODE_IP
from ray.util import get_node_ip_address
from ray.util.debug import log_once
from ray.tune.cluster_info import get_ssh_key, get_ssh_user
from ray.tune.sync_client import (
CommandBasedClient,
get_sync_client,
get_cloud_sync_client,
NOOP,
SyncClient,
)
from ray.util.annotations import PublicAPI
if TYPE_CHECKING:
from ray.tune.trial import Trial
logger = logging.getLogger(__name__)
# Syncing period for syncing checkpoints between nodes or to cloud.
SYNC_PERIOD = 300
CLOUD_CHECKPOINTING_URL = (
"https://docs.ray.io/en/master/tune/user-guide.html#using-cloud-storage"
)
_log_sync_warned = False
_syncers = {}
def wait_for_sync():
for syncer in _syncers.values():
syncer.wait()
def set_sync_periods(sync_config: "SyncConfig"):
"""Sets sync period from config."""
global SYNC_PERIOD
SYNC_PERIOD = int(sync_config.sync_period)
def validate_sync_config(sync_config: "SyncConfig"):
if sync_config.node_sync_period >= 0 or sync_config.cloud_sync_period >= 0:
# Until fully deprecated, try to consolidate
if sync_config.node_sync_period >= 0 and sync_config.cloud_sync_period >= 0:
sync_period = min(
sync_config.node_sync_period, sync_config.cloud_sync_period
)
else:
sync_period = max(
sync_config.node_sync_period, sync_config.cloud_sync_period
)
sync_config.sync_period = sync_period
sync_config.node_sync_period = -1
sync_config.cloud_sync_period = -1
warnings.warn(
"The `node_sync_period` and "
"`cloud_sync_period` properties of `tune.SyncConfig` are "
"deprecated. Pass the `sync_period` property instead. "
"\nFor now, the lower of the two values (if provided) will "
f"be used as the sync_period. This value is: {sync_period}",
DeprecationWarning,
)
if sync_config.sync_to_cloud or sync_config.sync_to_driver:
if bool(sync_config.upload_dir):
syncer = sync_config.sync_to_cloud
help = "set"
else:
syncer = sync_config.sync_to_driver
help = "not set"
sync_config.syncer = syncer
sync_config.sync_to_cloud = None
sync_config.sync_to_driver = None
warnings.warn(
"The `sync_to_cloud` and `sync_to_driver` properties of "
"`tune.SyncConfig` are deprecated. Pass the `syncer` property "
"instead. Presence of an `upload_dir` decides if checkpoints "
"are synced to cloud or not. Syncing to driver is "
"automatically disabled if an `upload_dir` is given."
f"\nFor now, as the upload dir is {help}, the respective "
f"syncer is used. This value is: {syncer}",
DeprecationWarning,
)
def log_sync_template(options: str = ""):
"""Template enabling syncs between driver and worker when possible.
Requires ray cluster to be started with the autoscaler. Also requires
rsync to be installed.
Args:
options: Additional rsync options.
Returns:
Sync template with source and target parameters. None if rsync
unavailable.
"""
if not distutils.spawn.find_executable("rsync"):
if log_once("tune:rsync"):
logger.error("Log sync requires rsync to be installed.")
return None
global _log_sync_warned
ssh_key = get_ssh_key()
if ssh_key is None:
if not _log_sync_warned:
logger.debug("Log sync requires cluster to be setup with `ray up`.")
_log_sync_warned = True
return None
rsh = "ssh -i {ssh_key} -o ConnectTimeout=120s -o StrictHostKeyChecking=no"
rsh = rsh.format(ssh_key=quote(ssh_key))
options += " --exclude='checkpoint_tmp*'"
template = "rsync {options} -savz -e {rsh} {{source}} {{target}}"
return template.format(options=options, rsh=quote(rsh))
@PublicAPI
@dataclass
class SyncConfig:
"""Configuration object for syncing.
If an ``upload_dir`` is specified, both experiment and trial checkpoints
will be stored on remote (cloud) storage. Synchronization then only
happens via this remote storage.
Args:
upload_dir: Optional URI to sync training results and checkpoints
to (e.g. ``s3://bucket``, ``gs://bucket`` or ``hdfs://path``).
Specifying this will enable cloud-based checkpointing.
syncer: Function for syncing the local_dir to and
from remote storage. If string, then it must be a string template
that includes ``{source}`` and ``{target}`` for the syncer to run.
If not provided, it defaults to rsync for non cloud-based storage,
and to standard S3, gsutil or HDFS sync commands for cloud-based
storage.
If set to ``None``, no syncing will take place.
Defaults to ``"auto"`` (auto detect).
sync_on_checkpoint: Force sync-down of trial checkpoint to
driver (only non cloud-storage).
If set to False, checkpoint syncing from worker to driver
is asynchronous and best-effort. This does not affect persistent
storage syncing. Defaults to True.
sync_period: Syncing period for syncing between nodes.
"""
upload_dir: Optional[str] = None
syncer: Union[None, str] = "auto"
sync_on_checkpoint: bool = True
sync_period: int = 300
# Deprecated arguments
sync_to_cloud: Any = None
sync_to_driver: Any = None
node_sync_period: int = -1
cloud_sync_period: int = -1
def __post_init__(self):
validate_sync_config(self)
class Syncer:
def __init__(self, local_dir: str, remote_dir: str, sync_client: SyncClient = NOOP):
"""Syncs between two directories with the sync_function.
Arguments:
local_dir: Directory to sync. Uniquely identifies the syncer.
remote_dir: Remote directory to sync with.
sync_client: Client for syncing between local_dir and
remote_dir. Defaults to a Noop.
"""
self._local_dir = os.path.join(local_dir, "") if local_dir else local_dir
self._remote_dir = remote_dir
self.last_sync_up_time = float("-inf")
self.last_sync_down_time = float("-inf")
self.sync_client = sync_client
def sync_up_if_needed(self, sync_period: int, exclude: Optional[List] = None):
"""Syncs up if time since last sync up is greather than sync_period.
Args:
sync_period: Time period between subsequent syncs.
exclude: Pattern of files to exclude, e.g.
``["*/checkpoint_*]`` to exclude trial checkpoints.
"""
if time.time() - self.last_sync_up_time > sync_period:
self.sync_up(exclude)
def sync_down_if_needed(self, sync_period: int, exclude: Optional[List] = None):
"""Syncs down if time since last sync down is greather than sync_period.
Args:
sync_period: Time period between subsequent syncs.
exclude: Pattern of files to exclude, e.g.
``["*/checkpoint_*]`` to exclude trial checkpoints.
"""
if time.time() - self.last_sync_down_time > sync_period:
self.sync_down(exclude)
def sync_up(self, exclude: Optional[List] = None):
"""Attempts to start the sync-up to the remote path.
Args:
exclude: Pattern of files to exclude, e.g.
``["*/checkpoint_*]`` to exclude trial checkpoints.
Returns:
Whether the sync (if feasible) was successfully started.
"""
result = False
if self.validate_hosts(self._local_dir, self._remote_path):
try:
result = self.sync_client.sync_up(
self._local_dir, self._remote_path, exclude=exclude
)
self.last_sync_up_time = time.time()
except Exception:
logger.exception("Sync execution failed.")
return result
def sync_down(self, exclude: Optional[List] = None):
"""Attempts to start the sync-down from the remote path.
Args:
exclude: Pattern of files to exclude, e.g.
``["*/checkpoint_*]`` to exclude trial checkpoints.
Returns:
Whether the sync (if feasible) was successfully started.
"""
result = False
if self.validate_hosts(self._local_dir, self._remote_path):
try:
result = self.sync_client.sync_down(
self._remote_path, self._local_dir, exclude=exclude
)
self.last_sync_down_time = time.time()
except Exception:
logger.exception("Sync execution failed.")
return result
def validate_hosts(self, source, target):
if not (source and target):
logger.debug(
"Source or target is empty, skipping log sync for "
"{}".format(self._local_dir)
)
return False
return True
def wait(self):
"""Waits for the sync client to complete the current sync."""
self.sync_client.wait()
def reset(self):
self.last_sync_up_time = float("-inf")
self.last_sync_down_time = float("-inf")
self.sync_client.reset()
def close(self):
self.sync_client.close()
@property
def _remote_path(self):
return self._remote_dir
class CloudSyncer(Syncer):
"""Syncer for syncing files to/from the cloud."""
def __init__(self, local_dir, remote_dir, sync_client):
super(CloudSyncer, self).__init__(local_dir, remote_dir, sync_client)
def sync_up_if_needed(self, exclude: Optional[List] = None):
return super(CloudSyncer, self).sync_up_if_needed(SYNC_PERIOD, exclude=exclude)
def sync_down_if_needed(self, exclude: Optional[List] = None):
return super(CloudSyncer, self).sync_down_if_needed(
SYNC_PERIOD, exclude=exclude
)
class NodeSyncer(Syncer):
"""Syncer for syncing files to/from a remote dir to a local dir."""
def __init__(self, local_dir, remote_dir, sync_client):
self.local_ip = get_node_ip_address()
self.worker_ip = None
super(NodeSyncer, self).__init__(local_dir, remote_dir, sync_client)
def set_worker_ip(self, worker_ip):
"""Sets the worker IP to sync logs from."""
self.worker_ip = worker_ip
def has_remote_target(self):
"""Returns whether the Syncer has a remote target."""
if not self.worker_ip:
logger.debug("Worker IP unknown, skipping sync for %s", self._local_dir)
return False
if self.worker_ip == self.local_ip:
logger.debug("Worker IP is local IP, skipping sync for %s", self._local_dir)
return False
return True
def sync_up_if_needed(self, exclude: Optional[List] = None):
if not self.has_remote_target():
return True
return super(NodeSyncer, self).sync_up_if_needed(SYNC_PERIOD, exclude=exclude)
def sync_down_if_needed(self, exclude: Optional[List] = None):
if not self.has_remote_target():
return True
return super(NodeSyncer, self).sync_down_if_needed(SYNC_PERIOD, exclude=exclude)
def sync_up_to_new_location(self, worker_ip):
if worker_ip != self.worker_ip:
logger.debug("Setting new worker IP to %s", worker_ip)
self.set_worker_ip(worker_ip)
self.reset()
if not self.sync_up():
logger.warning(
"Sync up to new location skipped. This should not occur."
)
else:
logger.warning("Sync attempted to same IP %s.", worker_ip)
def sync_up(self, exclude: Optional[List] = None):
if not self.has_remote_target():
return True
return super(NodeSyncer, self).sync_up(exclude=exclude)
def sync_down(self, exclude: Optional[List] = None):
if not self.has_remote_target():
return True
logger.debug("Syncing from %s to %s", self._remote_path, self._local_dir)
return super(NodeSyncer, self).sync_down(exclude=exclude)
@property
def _remote_path(self):
ssh_user = get_ssh_user()
global _log_sync_warned
if not self.has_remote_target():
return None
if ssh_user is None:
if not _log_sync_warned:
logger.error("Syncer requires cluster to be setup with `ray up`.")
_log_sync_warned = True
return None
return "{}@{}:{}/".format(ssh_user, self.worker_ip, self._remote_dir)
def get_cloud_syncer(
local_dir: str,
remote_dir: Optional[str] = None,
sync_function: Optional[Union[Callable, str]] = None,
) -> CloudSyncer:
"""Returns a Syncer.
This syncer is in charge of syncing the local_dir with upload_dir.
If no ``remote_dir`` is provided, it will return a no-op syncer.
If a ``sync_function`` is provided, it will return a CloudSyncer using
a custom SyncClient initialized by the sync function. Otherwise it will
return a CloudSyncer with default templates for s3/gs/hdfs.
Args:
local_dir: Source directory for syncing.
remote_dir: Target directory for syncing. If not provided, a
no-op Syncer is returned.
sync_function: Function for syncing the local_dir to
remote_dir. If string, then it must be a string template for
syncer to run. If not provided, it defaults
to standard S3, gsutil or HDFS sync commands.
Raises:
ValueError if malformed remote_dir.
"""
key = (local_dir, remote_dir)
if key in _syncers:
return _syncers[key]
if not remote_dir:
_syncers[key] = CloudSyncer(local_dir, remote_dir, NOOP)
return _syncers[key]
if sync_function == "auto":
sync_function = None # Auto-detect
# Maybe get user-provided sync client here
client = get_sync_client(sync_function)
if client:
# If the user provided a sync template or function
_syncers[key] = CloudSyncer(local_dir, remote_dir, client)
else:
# Else, get default cloud sync client (e.g. S3 syncer)
sync_client = get_cloud_sync_client(remote_dir)
_syncers[key] = CloudSyncer(local_dir, remote_dir, sync_client)
return _syncers[key]
def get_node_syncer(
local_dir: str,
remote_dir: Optional[str] = None,
sync_function: Optional[Union[Callable, str, bool]] = None,
):
"""Returns a NodeSyncer.
Args:
local_dir: Source directory for syncing.
remote_dir: Target directory for syncing. If not provided, a
noop Syncer is returned.
sync_function: Function for syncing the local_dir to
remote_dir. If string, then it must be a string template for
syncer to run. If True or not provided, it defaults rsync. If
False, a noop Syncer is returned.
"""
if sync_function == "auto":
sync_function = None # Auto-detect
key = (local_dir, remote_dir)
if key in _syncers:
return _syncers[key]
elif isclass(sync_function) and issubclass(sync_function, Syncer):
_syncers[key] = sync_function(local_dir, remote_dir, None)
return _syncers[key]
elif not remote_dir or sync_function is False:
sync_client = NOOP
elif sync_function and sync_function is not True:
sync_client = get_sync_client(sync_function)
else:
sync = log_sync_template()
if sync:
sync_client = CommandBasedClient(sync, sync)
sync_client.set_logdir(local_dir)
else:
sync_client = NOOP
_syncers[key] = NodeSyncer(local_dir, remote_dir, sync_client)
return _syncers[key]
class SyncerCallback(Callback):
def __init__(self, sync_function: Union[None, bool, Callable]):
self._sync_function = sync_function
self._syncers: Dict["Trial", NodeSyncer] = {}
def _get_trial_syncer(self, trial: "Trial"):
if trial not in self._syncers:
self._syncers[trial] = self._create_trial_syncer(trial)
return self._syncers[trial]
def _create_trial_syncer(self, trial: "Trial"):
return get_node_syncer(
trial.logdir, remote_dir=trial.logdir, sync_function=self._sync_function
)
def _sync_trial_checkpoint(self, trial: "Trial", checkpoint: _TuneCheckpoint):
if checkpoint.storage == _TuneCheckpoint.MEMORY:
return
trial_syncer = self._get_trial_syncer(trial)
# If the sync_function is False, syncing to driver is disabled.
# In every other case (valid values include None, True Callable,
# NodeSyncer) syncing to driver is enabled.
if trial.sync_on_checkpoint and self._sync_function is not False:
try:
# Wait for any other syncs to finish. We need to sync again
# after this to handle checkpoints taken mid-sync.
trial_syncer.wait()
except TuneError as e:
# Errors occurring during this wait are not fatal for this
# checkpoint, so it should just be logged.
logger.error(
f"Trial {trial}: An error occurred during the "
f"checkpoint pre-sync wait: {e}"
)
# Force sync down and wait before tracking the new checkpoint.
try:
if trial_syncer.sync_down():
trial_syncer.wait()
else:
logger.error(
f"Trial {trial}: Checkpoint sync skipped. "
f"This should not happen."
)
except TuneError as e:
if trial.uses_cloud_checkpointing:
# Even though rsync failed the trainable can restore
# from remote durable storage.
logger.error(f"Trial {trial}: Sync error: {e}")
else:
# If the trainable didn't have remote storage to upload
# to then this checkpoint may have been lost, so we
# shouldn't track it with the checkpoint_manager.
raise e
if not trial.uses_cloud_checkpointing:
if not os.path.exists(checkpoint.value):
raise TuneError(
"Trial {}: Checkpoint path {} not "
"found after successful sync down. "
"Are you running on a Kubernetes or "
"managed cluster? rsync will not function "
"due to a lack of SSH functionality. "
"You'll need to use cloud-checkpointing "
"if that's the case, see instructions "
"here: {} .".format(
trial, checkpoint.value, CLOUD_CHECKPOINTING_URL
)
)
def on_trial_start(
self, iteration: int, trials: List["Trial"], trial: "Trial", **info
):
self._get_trial_syncer(trial)
def on_trial_result(
self,
iteration: int,
trials: List["Trial"],
trial: "Trial",
result: Dict,
**info,
):
trial_syncer = self._get_trial_syncer(trial)
trial_syncer.set_worker_ip(result.get(NODE_IP))
trial_syncer.sync_down_if_needed()
def on_trial_complete(
self, iteration: int, trials: List["Trial"], trial: "Trial", **info
):
trial_syncer = self._get_trial_syncer(trial)
if NODE_IP in trial.last_result:
trainable_ip = trial.last_result[NODE_IP]
else:
trainable_ip = ray.get(trial.runner.get_current_ip.remote())
trial_syncer.set_worker_ip(trainable_ip)
trial_syncer.sync_down_if_needed()
trial_syncer.close()
def on_checkpoint(
self,
iteration: int,
trials: List["Trial"],
trial: "Trial",
checkpoint: _TuneCheckpoint,
**info,
):
self._sync_trial_checkpoint(trial, checkpoint)
def detect_cluster_syncer(
sync_config: Optional[SyncConfig],
cluster_config_file: str = "~/ray_bootstrap_config.yaml",
) -> Union[bool, Type, NodeSyncer]:
"""Detect cluster Syncer given SyncConfig.
Returns False if cloud checkpointing is enabled (when upload dir is
set).
Else, returns sync config syncer if manually specified.
Else, detects cluster environment (e.g. Docker, Kubernetes) and returns
syncer accordingly.
"""
from ray.tune.integration.docker import DockerSyncer
sync_config = sync_config or SyncConfig()
if bool(sync_config.upload_dir) or sync_config.syncer is None:
# No sync to driver for cloud checkpointing or if manually disabled
return False
_syncer = sync_config.syncer
if _syncer == "auto":
_syncer = None
if isinstance(_syncer, Type):
return _syncer
# Else: True or None. Auto-detect.
cluster_config_file = os.path.expanduser(cluster_config_file)
if not os.path.exists(cluster_config_file):
return _syncer
with open(cluster_config_file, "rt") as fp:
config = yaml.safe_load(fp.read())
if config.get("docker"):
logger.debug(
"Detected docker autoscaling environment. Using `DockerSyncer` "
"as sync client. If this is not correct or leads to errors, "
"please pass a `syncer` parameter in the `SyncConfig` to "
"`tune.run().` to manually configure syncing behavior."
)
return DockerSyncer
if config.get("provider", {}).get("type", "") == "kubernetes":
from ray.tune.integration.kubernetes import (
NamespacedKubernetesSyncer,
try_import_kubernetes,
)
if not try_import_kubernetes():
logger.warning(
"Detected Ray autoscaling environment on Kubernetes, "
"but Kubernetes Python CLI is not installed. "
"Checkpoint syncing may not work properly across "
"multiple pods. Be sure to install 'kubernetes' on "
"each container."
)
namespace = config["provider"].get("namespace", "ray")
logger.debug(
f"Detected Ray autoscaling environment on Kubernetes. Using "
f"`NamespacedKubernetesSyncer` with namespace `{namespace}` "
f"as sync client. If this is not correct or leads to errors, "
f"please pass a `syncer` parameter in the `SyncConfig` "
f"to `tune.run()` to manually configure syncing behavior.."
)
return NamespacedKubernetesSyncer(namespace)
return _syncer
| 36.179724
| 88
| 0.627691
|
795224271191eb5321529833cfb1cc0dba6aa105
| 557
|
py
|
Python
|
src/parse_md.py
|
peter88213/yw2md
|
0a8ffa4ac9aed4d7e7abb0b22ac41fcbde86ccd1
|
[
"MIT"
] | null | null | null |
src/parse_md.py
|
peter88213/yw2md
|
0a8ffa4ac9aed4d7e7abb0b22ac41fcbde86ccd1
|
[
"MIT"
] | null | null | null |
src/parse_md.py
|
peter88213/yw2md
|
0a8ffa4ac9aed4d7e7abb0b22ac41fcbde86ccd1
|
[
"MIT"
] | null | null | null |
"""Save the result of Markdown-to-yWriter conversion
Just an auxiliary script for development,
to see the result of MdFile.convert_to_yw.
"""
from md_yw import MdFile
import sys
if __name__ == '__main__':
sourceFile = sys.argv[1]
with open(sourceFile, encoding='utf-8') as f:
mdText = f.read()
dummy = MdFile('', False, False)
text = MdFile.convert_to_yw(dummy, mdText)
lines = (text).split('\n')
text = '\n'.join(lines)
with open('test.md', 'w', encoding='utf-8') as f:
f.write(text)
| 24.217391
| 54
| 0.621185
|
795224f070a6535185cfa557cb11a443eccda961
| 1,459
|
py
|
Python
|
indra/tools/live_curation/util.py
|
djinnome/indra
|
382b7f236e0b1422c96a268ef873530b5e92d48f
|
[
"BSD-2-Clause"
] | null | null | null |
indra/tools/live_curation/util.py
|
djinnome/indra
|
382b7f236e0b1422c96a268ef873530b5e92d48f
|
[
"BSD-2-Clause"
] | null | null | null |
indra/tools/live_curation/util.py
|
djinnome/indra
|
382b7f236e0b1422c96a268ef873530b5e92d48f
|
[
"BSD-2-Clause"
] | null | null | null |
import json
import logging
from indra.statements import Statement
logger = logging.getLogger(__name__)
def _json_loader(fpath):
logger.info('Loading json file %s' % fpath)
with open(fpath, 'r') as f:
return json.load(f)
def _json_dumper(jsonobj, fpath):
try:
logger.info('Saving json object to file %s' % fpath)
with open(fpath, 'w') as f:
json.dump(obj=jsonobj, fp=f, indent=1)
return True
except Exception as e:
logger.error('Could not save json')
logger.exception(e)
return False
def _json_to_stmts_dict(stmt_jsons):
"""Return dict of statements keyed by uuid's from json statements
This function is the inverse of _stmts_dict_to_json()
Parameters
----------
stmt_jsons : list(json)
A list of json statements
Returns
-------
dict
Dict with statements keyed by their uuid's: {uuid: stmt}
"""
loaded_stmts = [Statement._from_json(s) for s in stmt_jsons]
return {s.uuid: s for s in loaded_stmts}
def _stmts_dict_to_json(stmt_dict):
"""Make a json representation from dict of statements
This function is the inverse of _json_to_stmts_dict()
Parameters
----------
stmt_dict : dict
Dict with statements keyed by their uuid's: {uuid: stmt}
Returns
-------
list(json)
A list of json statements
"""
return [s.to_json() for _, s in stmt_dict.items()]
| 23.918033
| 69
| 0.638108
|
7952285817860c2caf36ae8c14a47dc5b74e783b
| 27,023
|
py
|
Python
|
sdk/python/pulumi_aws/ec2/volume_attachment.py
|
rapzo/pulumi-aws
|
390a098221315d98a54ba97d1559e750dc3053b7
|
[
"ECL-2.0",
"Apache-2.0"
] | 260
|
2018-06-18T14:57:00.000Z
|
2022-03-29T11:41:03.000Z
|
sdk/python/pulumi_aws/ec2/volume_attachment.py
|
rapzo/pulumi-aws
|
390a098221315d98a54ba97d1559e750dc3053b7
|
[
"ECL-2.0",
"Apache-2.0"
] | 1,154
|
2018-06-19T20:38:20.000Z
|
2022-03-31T19:48:16.000Z
|
sdk/python/pulumi_aws/ec2/volume_attachment.py
|
rapzo/pulumi-aws
|
390a098221315d98a54ba97d1559e750dc3053b7
|
[
"ECL-2.0",
"Apache-2.0"
] | 115
|
2018-06-28T03:20:27.000Z
|
2022-03-29T11:41:06.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['VolumeAttachmentArgs', 'VolumeAttachment']
@pulumi.input_type
class VolumeAttachmentArgs:
def __init__(__self__, *,
device_name: pulumi.Input[str],
instance_id: pulumi.Input[str],
volume_id: pulumi.Input[str],
force_detach: Optional[pulumi.Input[bool]] = None,
skip_destroy: Optional[pulumi.Input[bool]] = None,
stop_instance_before_detaching: Optional[pulumi.Input[bool]] = None):
"""
The set of arguments for constructing a VolumeAttachment resource.
:param pulumi.Input[str] device_name: The device name to expose to the instance (for
example, `/dev/sdh` or `xvdh`). See [Device Naming on Linux Instances](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/device_naming.html#available-ec2-device-names) and [Device Naming on Windows Instances](https://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/device_naming.html#available-ec2-device-names) for more information.
:param pulumi.Input[str] instance_id: ID of the Instance to attach to
:param pulumi.Input[str] volume_id: ID of the Volume to be attached
:param pulumi.Input[bool] force_detach: Set to `true` if you want to force the
volume to detach. Useful if previous attempts failed, but use this option only
as a last resort, as this can result in **data loss**. See
[Detaching an Amazon EBS Volume from an Instance](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-detaching-volume.html) for more information.
:param pulumi.Input[bool] skip_destroy: Set this to true if you do not wish
to detach the volume from the instance to which it is attached at destroy
time, and instead just remove the attachment from this provider state. This is
useful when destroying an instance which has volumes created by some other
means attached.
:param pulumi.Input[bool] stop_instance_before_detaching: Set this to true to ensure that the target instance is stopped
before trying to detach the volume. Stops the instance, if it is not already stopped.
"""
pulumi.set(__self__, "device_name", device_name)
pulumi.set(__self__, "instance_id", instance_id)
pulumi.set(__self__, "volume_id", volume_id)
if force_detach is not None:
pulumi.set(__self__, "force_detach", force_detach)
if skip_destroy is not None:
pulumi.set(__self__, "skip_destroy", skip_destroy)
if stop_instance_before_detaching is not None:
pulumi.set(__self__, "stop_instance_before_detaching", stop_instance_before_detaching)
@property
@pulumi.getter(name="deviceName")
def device_name(self) -> pulumi.Input[str]:
"""
The device name to expose to the instance (for
example, `/dev/sdh` or `xvdh`). See [Device Naming on Linux Instances](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/device_naming.html#available-ec2-device-names) and [Device Naming on Windows Instances](https://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/device_naming.html#available-ec2-device-names) for more information.
"""
return pulumi.get(self, "device_name")
@device_name.setter
def device_name(self, value: pulumi.Input[str]):
pulumi.set(self, "device_name", value)
@property
@pulumi.getter(name="instanceId")
def instance_id(self) -> pulumi.Input[str]:
"""
ID of the Instance to attach to
"""
return pulumi.get(self, "instance_id")
@instance_id.setter
def instance_id(self, value: pulumi.Input[str]):
pulumi.set(self, "instance_id", value)
@property
@pulumi.getter(name="volumeId")
def volume_id(self) -> pulumi.Input[str]:
"""
ID of the Volume to be attached
"""
return pulumi.get(self, "volume_id")
@volume_id.setter
def volume_id(self, value: pulumi.Input[str]):
pulumi.set(self, "volume_id", value)
@property
@pulumi.getter(name="forceDetach")
def force_detach(self) -> Optional[pulumi.Input[bool]]:
"""
Set to `true` if you want to force the
volume to detach. Useful if previous attempts failed, but use this option only
as a last resort, as this can result in **data loss**. See
[Detaching an Amazon EBS Volume from an Instance](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-detaching-volume.html) for more information.
"""
return pulumi.get(self, "force_detach")
@force_detach.setter
def force_detach(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "force_detach", value)
@property
@pulumi.getter(name="skipDestroy")
def skip_destroy(self) -> Optional[pulumi.Input[bool]]:
"""
Set this to true if you do not wish
to detach the volume from the instance to which it is attached at destroy
time, and instead just remove the attachment from this provider state. This is
useful when destroying an instance which has volumes created by some other
means attached.
"""
return pulumi.get(self, "skip_destroy")
@skip_destroy.setter
def skip_destroy(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "skip_destroy", value)
@property
@pulumi.getter(name="stopInstanceBeforeDetaching")
def stop_instance_before_detaching(self) -> Optional[pulumi.Input[bool]]:
"""
Set this to true to ensure that the target instance is stopped
before trying to detach the volume. Stops the instance, if it is not already stopped.
"""
return pulumi.get(self, "stop_instance_before_detaching")
@stop_instance_before_detaching.setter
def stop_instance_before_detaching(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "stop_instance_before_detaching", value)
@pulumi.input_type
class _VolumeAttachmentState:
def __init__(__self__, *,
device_name: Optional[pulumi.Input[str]] = None,
force_detach: Optional[pulumi.Input[bool]] = None,
instance_id: Optional[pulumi.Input[str]] = None,
skip_destroy: Optional[pulumi.Input[bool]] = None,
stop_instance_before_detaching: Optional[pulumi.Input[bool]] = None,
volume_id: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering VolumeAttachment resources.
:param pulumi.Input[str] device_name: The device name to expose to the instance (for
example, `/dev/sdh` or `xvdh`). See [Device Naming on Linux Instances](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/device_naming.html#available-ec2-device-names) and [Device Naming on Windows Instances](https://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/device_naming.html#available-ec2-device-names) for more information.
:param pulumi.Input[bool] force_detach: Set to `true` if you want to force the
volume to detach. Useful if previous attempts failed, but use this option only
as a last resort, as this can result in **data loss**. See
[Detaching an Amazon EBS Volume from an Instance](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-detaching-volume.html) for more information.
:param pulumi.Input[str] instance_id: ID of the Instance to attach to
:param pulumi.Input[bool] skip_destroy: Set this to true if you do not wish
to detach the volume from the instance to which it is attached at destroy
time, and instead just remove the attachment from this provider state. This is
useful when destroying an instance which has volumes created by some other
means attached.
:param pulumi.Input[bool] stop_instance_before_detaching: Set this to true to ensure that the target instance is stopped
before trying to detach the volume. Stops the instance, if it is not already stopped.
:param pulumi.Input[str] volume_id: ID of the Volume to be attached
"""
if device_name is not None:
pulumi.set(__self__, "device_name", device_name)
if force_detach is not None:
pulumi.set(__self__, "force_detach", force_detach)
if instance_id is not None:
pulumi.set(__self__, "instance_id", instance_id)
if skip_destroy is not None:
pulumi.set(__self__, "skip_destroy", skip_destroy)
if stop_instance_before_detaching is not None:
pulumi.set(__self__, "stop_instance_before_detaching", stop_instance_before_detaching)
if volume_id is not None:
pulumi.set(__self__, "volume_id", volume_id)
@property
@pulumi.getter(name="deviceName")
def device_name(self) -> Optional[pulumi.Input[str]]:
"""
The device name to expose to the instance (for
example, `/dev/sdh` or `xvdh`). See [Device Naming on Linux Instances](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/device_naming.html#available-ec2-device-names) and [Device Naming on Windows Instances](https://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/device_naming.html#available-ec2-device-names) for more information.
"""
return pulumi.get(self, "device_name")
@device_name.setter
def device_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "device_name", value)
@property
@pulumi.getter(name="forceDetach")
def force_detach(self) -> Optional[pulumi.Input[bool]]:
"""
Set to `true` if you want to force the
volume to detach. Useful if previous attempts failed, but use this option only
as a last resort, as this can result in **data loss**. See
[Detaching an Amazon EBS Volume from an Instance](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-detaching-volume.html) for more information.
"""
return pulumi.get(self, "force_detach")
@force_detach.setter
def force_detach(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "force_detach", value)
@property
@pulumi.getter(name="instanceId")
def instance_id(self) -> Optional[pulumi.Input[str]]:
"""
ID of the Instance to attach to
"""
return pulumi.get(self, "instance_id")
@instance_id.setter
def instance_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "instance_id", value)
@property
@pulumi.getter(name="skipDestroy")
def skip_destroy(self) -> Optional[pulumi.Input[bool]]:
"""
Set this to true if you do not wish
to detach the volume from the instance to which it is attached at destroy
time, and instead just remove the attachment from this provider state. This is
useful when destroying an instance which has volumes created by some other
means attached.
"""
return pulumi.get(self, "skip_destroy")
@skip_destroy.setter
def skip_destroy(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "skip_destroy", value)
@property
@pulumi.getter(name="stopInstanceBeforeDetaching")
def stop_instance_before_detaching(self) -> Optional[pulumi.Input[bool]]:
"""
Set this to true to ensure that the target instance is stopped
before trying to detach the volume. Stops the instance, if it is not already stopped.
"""
return pulumi.get(self, "stop_instance_before_detaching")
@stop_instance_before_detaching.setter
def stop_instance_before_detaching(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "stop_instance_before_detaching", value)
@property
@pulumi.getter(name="volumeId")
def volume_id(self) -> Optional[pulumi.Input[str]]:
"""
ID of the Volume to be attached
"""
return pulumi.get(self, "volume_id")
@volume_id.setter
def volume_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "volume_id", value)
class VolumeAttachment(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
device_name: Optional[pulumi.Input[str]] = None,
force_detach: Optional[pulumi.Input[bool]] = None,
instance_id: Optional[pulumi.Input[str]] = None,
skip_destroy: Optional[pulumi.Input[bool]] = None,
stop_instance_before_detaching: Optional[pulumi.Input[bool]] = None,
volume_id: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Provides an AWS EBS Volume Attachment as a top level resource, to attach and
detach volumes from AWS Instances.
> **NOTE on EBS block devices:** If you use `ebs_block_device` on an `ec2.Instance`, this provider will assume management over the full set of non-root EBS block devices for the instance, and treats additional block devices as drift. For this reason, `ebs_block_device` cannot be mixed with external `ebs.Volume` + `aws_ebs_volume_attachment` resources for a given instance.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
web = aws.ec2.Instance("web",
ami="ami-21f78e11",
availability_zone="us-west-2a",
instance_type="t2.micro",
tags={
"Name": "HelloWorld",
})
example = aws.ebs.Volume("example",
availability_zone="us-west-2a",
size=1)
ebs_att = aws.ec2.VolumeAttachment("ebsAtt",
device_name="/dev/sdh",
volume_id=example.id,
instance_id=web.id)
```
## Import
EBS Volume Attachments can be imported using `DEVICE_NAME:VOLUME_ID:INSTANCE_ID`, e.g.
```sh
$ pulumi import aws:ec2/volumeAttachment:VolumeAttachment example /dev/sdh:vol-049df61146c4d7901:i-12345678
```
[1]https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/device_naming.html#available-ec2-device-names [2]https://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/device_naming.html#available-ec2-device-names [3]https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-detaching-volume.html
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] device_name: The device name to expose to the instance (for
example, `/dev/sdh` or `xvdh`). See [Device Naming on Linux Instances](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/device_naming.html#available-ec2-device-names) and [Device Naming on Windows Instances](https://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/device_naming.html#available-ec2-device-names) for more information.
:param pulumi.Input[bool] force_detach: Set to `true` if you want to force the
volume to detach. Useful if previous attempts failed, but use this option only
as a last resort, as this can result in **data loss**. See
[Detaching an Amazon EBS Volume from an Instance](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-detaching-volume.html) for more information.
:param pulumi.Input[str] instance_id: ID of the Instance to attach to
:param pulumi.Input[bool] skip_destroy: Set this to true if you do not wish
to detach the volume from the instance to which it is attached at destroy
time, and instead just remove the attachment from this provider state. This is
useful when destroying an instance which has volumes created by some other
means attached.
:param pulumi.Input[bool] stop_instance_before_detaching: Set this to true to ensure that the target instance is stopped
before trying to detach the volume. Stops the instance, if it is not already stopped.
:param pulumi.Input[str] volume_id: ID of the Volume to be attached
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: VolumeAttachmentArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides an AWS EBS Volume Attachment as a top level resource, to attach and
detach volumes from AWS Instances.
> **NOTE on EBS block devices:** If you use `ebs_block_device` on an `ec2.Instance`, this provider will assume management over the full set of non-root EBS block devices for the instance, and treats additional block devices as drift. For this reason, `ebs_block_device` cannot be mixed with external `ebs.Volume` + `aws_ebs_volume_attachment` resources for a given instance.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
web = aws.ec2.Instance("web",
ami="ami-21f78e11",
availability_zone="us-west-2a",
instance_type="t2.micro",
tags={
"Name": "HelloWorld",
})
example = aws.ebs.Volume("example",
availability_zone="us-west-2a",
size=1)
ebs_att = aws.ec2.VolumeAttachment("ebsAtt",
device_name="/dev/sdh",
volume_id=example.id,
instance_id=web.id)
```
## Import
EBS Volume Attachments can be imported using `DEVICE_NAME:VOLUME_ID:INSTANCE_ID`, e.g.
```sh
$ pulumi import aws:ec2/volumeAttachment:VolumeAttachment example /dev/sdh:vol-049df61146c4d7901:i-12345678
```
[1]https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/device_naming.html#available-ec2-device-names [2]https://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/device_naming.html#available-ec2-device-names [3]https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-detaching-volume.html
:param str resource_name: The name of the resource.
:param VolumeAttachmentArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(VolumeAttachmentArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
device_name: Optional[pulumi.Input[str]] = None,
force_detach: Optional[pulumi.Input[bool]] = None,
instance_id: Optional[pulumi.Input[str]] = None,
skip_destroy: Optional[pulumi.Input[bool]] = None,
stop_instance_before_detaching: Optional[pulumi.Input[bool]] = None,
volume_id: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = VolumeAttachmentArgs.__new__(VolumeAttachmentArgs)
if device_name is None and not opts.urn:
raise TypeError("Missing required property 'device_name'")
__props__.__dict__["device_name"] = device_name
__props__.__dict__["force_detach"] = force_detach
if instance_id is None and not opts.urn:
raise TypeError("Missing required property 'instance_id'")
__props__.__dict__["instance_id"] = instance_id
__props__.__dict__["skip_destroy"] = skip_destroy
__props__.__dict__["stop_instance_before_detaching"] = stop_instance_before_detaching
if volume_id is None and not opts.urn:
raise TypeError("Missing required property 'volume_id'")
__props__.__dict__["volume_id"] = volume_id
super(VolumeAttachment, __self__).__init__(
'aws:ec2/volumeAttachment:VolumeAttachment',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
device_name: Optional[pulumi.Input[str]] = None,
force_detach: Optional[pulumi.Input[bool]] = None,
instance_id: Optional[pulumi.Input[str]] = None,
skip_destroy: Optional[pulumi.Input[bool]] = None,
stop_instance_before_detaching: Optional[pulumi.Input[bool]] = None,
volume_id: Optional[pulumi.Input[str]] = None) -> 'VolumeAttachment':
"""
Get an existing VolumeAttachment resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] device_name: The device name to expose to the instance (for
example, `/dev/sdh` or `xvdh`). See [Device Naming on Linux Instances](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/device_naming.html#available-ec2-device-names) and [Device Naming on Windows Instances](https://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/device_naming.html#available-ec2-device-names) for more information.
:param pulumi.Input[bool] force_detach: Set to `true` if you want to force the
volume to detach. Useful if previous attempts failed, but use this option only
as a last resort, as this can result in **data loss**. See
[Detaching an Amazon EBS Volume from an Instance](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-detaching-volume.html) for more information.
:param pulumi.Input[str] instance_id: ID of the Instance to attach to
:param pulumi.Input[bool] skip_destroy: Set this to true if you do not wish
to detach the volume from the instance to which it is attached at destroy
time, and instead just remove the attachment from this provider state. This is
useful when destroying an instance which has volumes created by some other
means attached.
:param pulumi.Input[bool] stop_instance_before_detaching: Set this to true to ensure that the target instance is stopped
before trying to detach the volume. Stops the instance, if it is not already stopped.
:param pulumi.Input[str] volume_id: ID of the Volume to be attached
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _VolumeAttachmentState.__new__(_VolumeAttachmentState)
__props__.__dict__["device_name"] = device_name
__props__.__dict__["force_detach"] = force_detach
__props__.__dict__["instance_id"] = instance_id
__props__.__dict__["skip_destroy"] = skip_destroy
__props__.__dict__["stop_instance_before_detaching"] = stop_instance_before_detaching
__props__.__dict__["volume_id"] = volume_id
return VolumeAttachment(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="deviceName")
def device_name(self) -> pulumi.Output[str]:
"""
The device name to expose to the instance (for
example, `/dev/sdh` or `xvdh`). See [Device Naming on Linux Instances](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/device_naming.html#available-ec2-device-names) and [Device Naming on Windows Instances](https://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/device_naming.html#available-ec2-device-names) for more information.
"""
return pulumi.get(self, "device_name")
@property
@pulumi.getter(name="forceDetach")
def force_detach(self) -> pulumi.Output[Optional[bool]]:
"""
Set to `true` if you want to force the
volume to detach. Useful if previous attempts failed, but use this option only
as a last resort, as this can result in **data loss**. See
[Detaching an Amazon EBS Volume from an Instance](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-detaching-volume.html) for more information.
"""
return pulumi.get(self, "force_detach")
@property
@pulumi.getter(name="instanceId")
def instance_id(self) -> pulumi.Output[str]:
"""
ID of the Instance to attach to
"""
return pulumi.get(self, "instance_id")
@property
@pulumi.getter(name="skipDestroy")
def skip_destroy(self) -> pulumi.Output[Optional[bool]]:
"""
Set this to true if you do not wish
to detach the volume from the instance to which it is attached at destroy
time, and instead just remove the attachment from this provider state. This is
useful when destroying an instance which has volumes created by some other
means attached.
"""
return pulumi.get(self, "skip_destroy")
@property
@pulumi.getter(name="stopInstanceBeforeDetaching")
def stop_instance_before_detaching(self) -> pulumi.Output[Optional[bool]]:
"""
Set this to true to ensure that the target instance is stopped
before trying to detach the volume. Stops the instance, if it is not already stopped.
"""
return pulumi.get(self, "stop_instance_before_detaching")
@property
@pulumi.getter(name="volumeId")
def volume_id(self) -> pulumi.Output[str]:
"""
ID of the Volume to be attached
"""
return pulumi.get(self, "volume_id")
| 52.067437
| 382
| 0.667913
|
79522904f8f6e4268e91e7b9a8ad17fb96273750
| 3,241
|
py
|
Python
|
test/functional/wallet_create_tx.py
|
bvbfan/ain
|
71e3b3456f90a858d1325f612bd44393789d74d2
|
[
"MIT"
] | null | null | null |
test/functional/wallet_create_tx.py
|
bvbfan/ain
|
71e3b3456f90a858d1325f612bd44393789d74d2
|
[
"MIT"
] | null | null | null |
test/functional/wallet_create_tx.py
|
bvbfan/ain
|
71e3b3456f90a858d1325f612bd44393789d74d2
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2018-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import DefiTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
)
from test_framework.blocktools import (
TIME_GENESIS_BLOCK,
)
class CreateTxWalletTest(DefiTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
self.log.info('Create some old blocks')
self.nodes[0].setmocktime(TIME_GENESIS_BLOCK+1)
self.nodes[0].generate(200)
self.nodes[0].setmocktime(0)
self.test_anti_fee_sniping()
self.test_tx_size_too_large()
def test_anti_fee_sniping(self):
self.log.info('Check that we have some (old) blocks and that anti-fee-sniping is disabled')
assert_equal(self.nodes[0].getblockchaininfo()['blocks'], 200)
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1)
tx = self.nodes[0].decoderawtransaction(self.nodes[0].gettransaction(txid)['hex'])
assert_equal(tx['locktime'], 0)
self.log.info('Check that anti-fee-sniping is enabled when we mine a recent block')
self.nodes[0].generate(1)
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1)
tx = self.nodes[0].decoderawtransaction(self.nodes[0].gettransaction(txid)['hex'])
assert 0 < tx['locktime'] <= 201
def test_tx_size_too_large(self):
# More than 10kB of outputs, so that we hit -maxtxfee with a high feerate
outputs = {self.nodes[0].getnewaddress(address_type='bech32'): 0.000025 for i in range(400)}
raw_tx = self.nodes[0].createrawtransaction(inputs=[], outputs=outputs)
for fee_setting in ['-minrelaytxfee=0.01', '-mintxfee=0.01', '-paytxfee=0.01']:
self.log.info('Check maxtxfee in combination with {}'.format(fee_setting))
self.restart_node(0, extra_args=[fee_setting])
assert_raises_rpc_error(
-6,
"Fee exceeds maximum configured by -maxtxfee",
lambda: self.nodes[0].sendmany(dummy="", amounts=outputs),
)
assert_raises_rpc_error(
-4,
"Fee exceeds maximum configured by -maxtxfee",
lambda: self.nodes[0].fundrawtransaction(hexstring=raw_tx),
)
self.log.info('Check maxtxfee in combination with settxfee')
self.restart_node(0)
self.nodes[0].settxfee(0.01)
assert_raises_rpc_error(
-6,
"Fee exceeds maximum configured by -maxtxfee",
lambda: self.nodes[0].sendmany(dummy="", amounts=outputs),
)
assert_raises_rpc_error(
-4,
"Fee exceeds maximum configured by -maxtxfee",
lambda: self.nodes[0].fundrawtransaction(hexstring=raw_tx),
)
self.nodes[0].settxfee(0)
if __name__ == '__main__':
CreateTxWalletTest().main()
| 39.048193
| 100
| 0.650108
|
795229277f998b8ce46733de9c5726d940c18fc1
| 641
|
py
|
Python
|
acrobot.py
|
patrick-kidger/FasterNeuralDiffEq
|
510ca2aa3f9864d0190640d8d8c37b3f208cdfc9
|
[
"Apache-2.0"
] | 67
|
2020-09-22T10:17:53.000Z
|
2022-02-16T10:24:17.000Z
|
acrobot.py
|
patrick-kidger/FasterNeuralDiffEq
|
510ca2aa3f9864d0190640d8d8c37b3f208cdfc9
|
[
"Apache-2.0"
] | 1
|
2021-11-02T06:51:09.000Z
|
2021-11-02T17:55:42.000Z
|
acrobot.py
|
patrick-kidger/FasterNeuralDiffEq
|
510ca2aa3f9864d0190640d8d8c37b3f208cdfc9
|
[
"Apache-2.0"
] | 6
|
2020-09-30T08:21:18.000Z
|
2022-03-02T20:17:14.000Z
|
import datasets
import models
import train
def main(device, norm, rtol=1e-4, atol=1e-4):
batch_size = 256
name = 'acrobot/' + '-'.join(map(str, [norm, rtol, atol])).replace('.', '-')
save = True
max_epochs = 50
lr = 1e-3
weight_decay = 0.01
times, train_dataloader, val_dataloader, test_dataloader = datasets.acrobot(batch_size)
model = models.SymODE(times, norm, rtol, atol)
return train.main(name, train_dataloader, val_dataloader, test_dataloader, device, model, save, max_epochs, lr,
weight_decay)
def full(device, norm):
for _ in range(5):
main(device, norm)
| 25.64
| 115
| 0.648986
|
795229bc0a4a461da005a7d053801f77c20f2e01
| 144
|
py
|
Python
|
isc_dhcp_leases/__init__.py
|
cacoyle/python-isc-dhcp-leases
|
dc513ea124565d77a7a5bde742ec0a8ffb877fd4
|
[
"MIT"
] | 111
|
2015-02-11T21:36:40.000Z
|
2022-03-18T13:36:12.000Z
|
isc_dhcp_leases/__init__.py
|
cacoyle/python-isc-dhcp-leases
|
dc513ea124565d77a7a5bde742ec0a8ffb877fd4
|
[
"MIT"
] | 36
|
2015-05-05T12:04:07.000Z
|
2021-06-17T12:58:30.000Z
|
isc_dhcp_leases/__init__.py
|
cacoyle/python-isc-dhcp-leases
|
dc513ea124565d77a7a5bde742ec0a8ffb877fd4
|
[
"MIT"
] | 52
|
2015-05-02T19:31:20.000Z
|
2022-03-18T13:36:29.000Z
|
from __future__ import absolute_import
from .iscdhcpleases import IscDhcpLeases, Lease, Lease6
__author__ = 'Martijn Braam <martijn@brixit.nl>'
| 36
| 55
| 0.826389
|
79522a218258c8f22606988d269a2f2892659528
| 4,152
|
py
|
Python
|
scripts/floyd/floyd_distance_nb_cuda.py
|
linyuehzzz/hedetniemi_distance
|
e1e3378f7f7013e0e36f4026da1342beac725a98
|
[
"Apache-2.0"
] | 1
|
2020-08-03T11:30:28.000Z
|
2020-08-03T11:30:28.000Z
|
scripts/floyd/floyd_distance_nb_cuda.py
|
linyuehzzz/hedetniemi_distance
|
e1e3378f7f7013e0e36f4026da1342beac725a98
|
[
"Apache-2.0"
] | null | null | null |
scripts/floyd/floyd_distance_nb_cuda.py
|
linyuehzzz/hedetniemi_distance
|
e1e3378f7f7013e0e36f4026da1342beac725a98
|
[
"Apache-2.0"
] | null | null | null |
from timeit import default_timer
from numba import cuda, njit, float32
import numpy as np
import math
import timeout_decorator
##******** Read graph data ********##
## Number of nodes (100/1,000/10,000/100,000/1,000,000)
nodes = [100, 1000, 10000, 100000, 1000000]
print('Nodes: ', nodes)
## Total degree
degree = [3, 4, 5]
print('Degree: ', degree)
for i in nodes:
for j in degree:
locals()['data_n' + str(i) + '_d' + str(j)] = []
with open('graph_n' + str(i) + '_d' + str(j) + '.txt', 'r') as f:
lines = f.read().splitlines()
for line in lines:
l = line.split()
item = [int(l[0]), int(l[1]), float(l[2])]
locals()['data_n' + str(i) + '_d' + str(j)].append(item)
##******** Configure CUDA ********##
# number of threads per block: 4*4, 8*8, 16*16, 32*32
NUM_THREADS = [4, 8, 16, 32]
def get_cuda_execution_config(n, tpb):
dimBlock = (tpb, tpb)
dimGrid = (math.ceil(n / tpb), math.ceil(n / tpb))
return dimGrid, dimBlock
##******** Construct distance matrix ********##
@cuda.jit
def graph2dist(graph, dist_mtx, n):
stride = cuda.gridDim.x * cuda.blockDim.x
## initialize distance matrix
x, y = cuda.grid(2)
if x < n and y < n:
dist_mtx[x,y] = np.inf
## calculate distance matrix
for i in range(x, graph.shape[0], stride):
a = int(graph[i,0]) - 1
b = int(graph[i,1]) - 1
d = graph[i,2]
dist_mtx[a,b] = d
dist_mtx[b,a] = d
## set diagonal to 0
if x < n:
dist_mtx[x,x] = 0.0
@timeout_decorator.timeout(10800)
def distance_matrix(graph, n):
## copy data to device
graph_device = cuda.to_device(graph)
dist_mtx_device = cuda.device_array(shape=(n,n))
## calculate distance matrix
graph2dist[dimGrid, dimBlock](graph_device, dist_mtx_device, n)
## copy data to host
dist_mtx_host = dist_mtx_device.copy_to_host()
return dist_mtx_host
##******** Floyd–Warshall distance ********##
@cuda.jit
def all_pair_floyd(matrix, k, n):
x, y = cuda.grid(2)
if x < n and y < n:
matrix[x,y] = min(matrix[x,y], matrix[x,k] + matrix[k,y])
@timeout_decorator.timeout(10800)
def floyd_distance(matrix, n):
## copy data to device
matrix_device = cuda.to_device(matrix)
## calculate hedetniemi distance
for k in range(n):
all_pair_floyd[dimGrid, dimBlock](matrix_device, k, n)
## copy data to host
matrix_host = matrix_device.copy_to_host()
return matrix_host
##******** Compile ********##
d = [[1, 2, 30], [1, 4, 30], [1, 9, 40],
[2, 3, 25], [2, 4, 40], [3, 4, 50],
[4, 5, 30], [4, 6, 20], [5, 7, 25],
[6, 7, 20], [6, 9, 20], [7, 8, 25],
[8, 9, 20]]
n = 9
dimGrid, dimBlock = get_cuda_execution_config(n, 3)
dist_mtx = distance_matrix(np.array(d), n)
mtx_a_t = floyd_distance(dist_mtx, n)
##******** Main ********##
with open('floyd_results_nb_cuda.csv', 'w') as fw:
fw.write('bpg,tpb,nodes,degree,nb_cuda_t1,nb_cuda_t2\n')
fw.flush()
for k in NUM_THREADS:
for i in nodes:
dimGrid, dimBlock = get_cuda_execution_config(i, k)
if dimGrid[0] > 65535:
continue
for j in degree:
data = locals()['data_n' + str(i) + '_d' + str(j)]
## distance matrix
try:
start = default_timer()
dist_mtx = distance_matrix(np.array(data), i)
stop = default_timer()
cuda_t1 = stop - start
except:
cuda_t1 = float('inf')
## floyd distance
try:
start = default_timer()
mtx_a_t_floyd = floyd_distance(dist_mtx, i)
stop = default_timer()
cuda_floyd_t2 = stop - start
## print shortest path matrix
with open('floyd_dist_nb_cuda' + '_n' + str(i) + '_d' + str(j) + '_tpb' + str(k) + '.txt', 'w') as f:
f.write('\n'.join(['\t'.join([str(round(cell,2)) for cell in row]) for row in mtx_a_t_floyd.tolist()]))
except:
cuda_floyd_t2 = float('inf')
fw.write(str(dimGrid) + ',' + str(dimBlock) + ',' + str(i) + ',' + str(j) + ',' + str(cuda_t1) + ',' + str(cuda_floyd_t2) + '\n')
fw.flush()
| 25.95
| 137
| 0.570568
|
79522a5bb5e8c3f38baa9fae104ace2cce30cc55
| 67,697
|
py
|
Python
|
mmtbx/refinement/tst_occupancy_selections.py
|
mphancock/cctbx_project
|
ec8a239c5bcee9c9b2d1c6c95dc3fff2580bbb85
|
[
"BSD-3-Clause-LBNL"
] | 2
|
2021-03-18T12:31:57.000Z
|
2022-03-14T06:27:06.000Z
|
mmtbx/refinement/tst_occupancy_selections.py
|
mphancock/cctbx_project
|
ec8a239c5bcee9c9b2d1c6c95dc3fff2580bbb85
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
mmtbx/refinement/tst_occupancy_selections.py
|
mphancock/cctbx_project
|
ec8a239c5bcee9c9b2d1c6c95dc3fff2580bbb85
|
[
"BSD-3-Clause-LBNL"
] | 1
|
2021-03-26T12:52:30.000Z
|
2021-03-26T12:52:30.000Z
|
from __future__ import absolute_import, division, print_function
from mmtbx.monomer_library import pdb_interpretation
from mmtbx.refinement.occupancies import occupancy_selections
from mmtbx.command_line import fmodel
import mmtbx.model
from iotbx import file_reader
import iotbx.pdb
import iotbx.phil
from libtbx.test_utils import approx_equal, Exception_expected
from libtbx.utils import format_cpu_times, null_out, Sorry
import libtbx.load_env
from six.moves import cStringIO as StringIO
import os
import sys
from six.moves import zip
def extract_serials(atoms, occ_groups):
r = []
# for atom in atoms:
# assert atom.serial == atom.i_seq, "%s %d" % (atom.serial, atom.i_seq)
for i in occ_groups:
ri = []
for j in i:
ri.append([int(atoms[k].serial) for k in j])
r.append(ri)
return r
def make_up_other_constrained_groups_obj(selections):
result = []
class foo:
def __init__(self, selection):
self.selection=selection
for sel in selections:
result.append( foo(selection = sel) )
return result
def get_model(file_name, log):
pdb_interpretation_params = iotbx.phil.parse(
input_string=pdb_interpretation.grand_master_phil_str, process_includes=True).extract()
pdb_interpretation_params.pdb_interpretation.sort_atoms=False
pdb_inp = iotbx.pdb.input(file_name=file_name)
return mmtbx.model.manager(
model_input = pdb_inp,
process_input = True,
pdb_interpretation_params=pdb_interpretation_params,
stop_for_unknowns = False,
log=log)
def get_model_str(strings, log):
pdb_interpretation_params = iotbx.phil.parse(
input_string=pdb_interpretation.grand_master_phil_str, process_includes=True).extract()
pdb_interpretation_params.pdb_interpretation.sort_atoms=False
pdb_inp = iotbx.pdb.input(lines=strings, source_info=None)
return mmtbx.model.manager(
model_input = pdb_inp,
process_input = True,
pdb_interpretation_params=pdb_interpretation_params,
stop_for_unknowns = False,
log=log)
def exercise_00(verbose):
pdb_file = libtbx.env.find_in_repositories(
relative_path="phenix_regression/pdb/gocr.pdb",
test=os.path.isfile)
if (verbose): log = sys.stdout
else: log = StringIO()
model = get_model(pdb_file, log)
base = [ [[2],[3]], [[6,7,8,9,10],[11,12,13,14,15]], [[16],[17]], [[24,25,26,27],[28,29,30,31]] ]
# default
res = occupancy_selections(
model = model,
as_flex_arrays = False)
res = extract_serials(model.pdb_atoms, res)
target = base[:]
target.insert(3, [[21]])
target.insert(4, [[23]])
assert approx_equal(res, target)
# default + add water
res = occupancy_selections(
model = model,
add_water = True,
as_flex_arrays = False)
res = extract_serials(model.pdb_atoms, res)
base_21_23 = target[:]
target.extend([[[18]], [[19]], [[20]], [[22]]])
assert approx_equal(res, target)
# 1
res = occupancy_selections(
model = model,
as_flex_arrays = False,
other_individual_selection_strings = ['resseq 0 and not (altloc A or altloc B)'])
res = extract_serials(model.pdb_atoms, res)
target = base_21_23[:]
target.extend([[[0]], [[1]], [[4]], [[5]]])
assert approx_equal(res, target)
res = occupancy_selections(
model = model,
add_water = True,
as_flex_arrays = False,
other_individual_selection_strings = ['resseq 0 and not (altloc A or altloc B)'])
res = extract_serials(model.pdb_atoms, res)
target.extend([[[18]], [[19]], [[20]], [[22]]])
assert approx_equal(res, target)
# 2
other_constrained_groups = make_up_other_constrained_groups_obj(
selections = [ ['resseq 0 and (name S or name O1)'], ['resseq 0 and (name O3 or name O4)'] ])
res = occupancy_selections(
model = model,
as_flex_arrays = False,
other_constrained_groups = other_constrained_groups)
res = extract_serials(model.pdb_atoms, res)
target = base_21_23[:]
target.extend([[[0, 1]], [[4, 5]]])
assert approx_equal(res, target)
other_constrained_groups = make_up_other_constrained_groups_obj(
selections = [ ['resseq 0 and (name S or name O1)'], ['resseq 0 and (name O3 or name O4)'] ])
res = occupancy_selections(
model = model,
add_water = True,
as_flex_arrays = False,
other_constrained_groups = other_constrained_groups)
res = extract_serials(model.pdb_atoms, res)
target.extend([[[18]], [[19]], [[20]], [[22]]])
assert approx_equal(res, target)
# 3
other_constrained_groups = make_up_other_constrained_groups_obj(
selections = [ ['resseq 0 and (name O3 or name O4)'] ])
res = occupancy_selections(
model = model,
as_flex_arrays = False,
other_individual_selection_strings = ['resseq 0 and (name S or name O1)'],
other_constrained_groups = other_constrained_groups)
res = extract_serials(model.pdb_atoms, res)
target = base_21_23[:]
target.extend([[[0]], [[1]], [[4, 5]]])
assert approx_equal(res, target)
def exercise_01(verbose):
pdb_file = libtbx.env.find_in_repositories(
relative_path="phenix_regression/pdb/ala_h.pdb",
test=os.path.isfile)
if (verbose): log = sys.stdout
else: log = StringIO()
model = get_model(pdb_file, log)
#
base = [ [[0,1,2,3,4,10,12,14,16,18,20,22], [5,6,7,8,9,11,13,15,17,19,21,23]] ]
res = occupancy_selections(
model = model,
as_flex_arrays = False)
res = extract_serials(model.pdb_atoms, res)
assert approx_equal(res, base)
def exercise_02(verbose):
pdb_file = libtbx.env.find_in_repositories(
relative_path="phenix_regression/pdb/occ_mix1.pdb",
test=os.path.isfile)
if (verbose): log = sys.stdout
else: log = StringIO()
model = get_model(pdb_file, log)
#
base = [ [[0,1,2,3,4,5,6,7,8,9,10,11,12], [14,15,16,17,18,19,20,21,22,23,24,25,26]], [[13],[27]] ]
res = occupancy_selections(
model = model,
as_flex_arrays = False)
res = extract_serials(model.pdb_atoms, res)
assert approx_equal(res, base)
def exercise_03(verbose):
pdb_file = libtbx.env.find_in_repositories(
relative_path="phenix_regression/pdb/ala_hd.pdb",
test=os.path.isfile)
if (verbose): log = sys.stdout
else: log = StringIO()
model = get_model(pdb_file, log)
#
base = [ [[7]], [[8]], [[9],[12]], [[10],[13]], [[11],[14]] ]
res = occupancy_selections(
model = model,
as_flex_arrays = False)
res = extract_serials(model.pdb_atoms, res)
assert approx_equal(res, base)
def exercise_05(verbose):
pdb_file = libtbx.env.find_in_repositories(
relative_path="phenix_regression/pdb/ala_lys_arg_ser_tyr_neutron_hd.pdb",
test=os.path.isfile)
if (verbose): log = sys.stdout
else: log = StringIO()
model = get_model(pdb_file, log)
#
base = [ [[9],[12]], [[10],[13]], [[11],[14]], [[33],[37]], [[34],[38]],
[[35],[39]], [[36],[40]], [[59],[65]], [[60],[66]], [[61],[67]],
[[62],[68]], [[63],[69]], [[64],[70]], [[80],[82]], [[81],[83]],
[[103],[105]], [[104],[106]]]
res = occupancy_selections(
model = model,
as_flex_arrays = False)
res = extract_serials(model.pdb_atoms, res)
assert approx_equal(res, base)
def exercise_06(verbose):
pdb_file = libtbx.env.find_in_repositories(
relative_path="phenix_regression/pdb/NAD_594_HD.pdb",
test=os.path.isfile)
if (verbose): log = sys.stdout
else: log = StringIO()
model = get_model(pdb_file, log)
#
base = [ [[62]], [[113]], [[65],[77]], [[66],[78]], [[67],[79]], [[68],[80]],
[[69],[81]], [[70],[82]], [[71],[83]], [[72],[84]],
[[73],[85]], [[74],[86]], [[75],[87]], [[76],[88]],
[[124],[127]],[[125],[128]],[[126],[129]]]
res = occupancy_selections(
model = model,
as_flex_arrays = False)
assert approx_equal(res, base)
def exercise_07(verbose):
pdb_file = libtbx.env.find_in_repositories(
relative_path="phenix_regression/pdb/gocr_1.pdb",
test=os.path.isfile)
if (verbose): log = sys.stdout
else: log = StringIO()
model = get_model(pdb_file, log)
answer = [ [[0, 1, 2, 3, 4]] ]
other_constrained_groups = make_up_other_constrained_groups_obj(
selections = [ ['resseq 0'] ])
result = occupancy_selections(
model = model,
other_constrained_groups = other_constrained_groups,
as_flex_arrays = False)
assert approx_equal(result, answer)
def exercise_08(verbose):
pdb_file = libtbx.env.find_in_repositories(
relative_path="phenix_regression/pdb/gocr_2.pdb",
test=os.path.isfile)
if (verbose): log = sys.stdout
else: log = StringIO()
model = get_model(pdb_file, log)
answers = [
[ [[6,7,8,9,10],[11,12,13,14,15]], [[16],[17]], [[21]], [[23]], [[24,25,26,27],[28,29,30,31]], [[0,1,2,3,4,5]] ],
[ [[4],[5]], [[16],[17]], [[21]], [[23]], [[24,25,26,27],[28,29,30,31]], [[6,7,8,9,10,11,12,13,14,15]] ],
[ [[4],[5]], [[6,7,8,9,10],[11,12,13,14,15]], [[21]], [[23]], [[24,25,26,27],[28,29,30,31]], [[16,17]] ],
[ [[4],[5]], [[6,7,8,9,10],[11,12,13,14,15]], [[16],[17]], [[21]], [[23]], [[24,25,26,27],[28,29,30,31]], [[18,19,20]] ],
[ [[4],[5]], [[6,7,8,9,10],[11,12,13,14,15]], [[16],[17]], [[23]], [[24,25,26,27],[28,29,30,31]], [[21]] ],
[ [[4],[5]], [[6,7,8,9,10],[11,12,13,14,15]], [[16],[17]], [[21]], [[23]], [[24,25,26,27],[28,29,30,31]], [[22]] ],
[ [[4],[5]], [[6,7,8,9,10],[11,12,13,14,15]], [[16],[17]], [[21]], [[23,24,25,26,27,28,29,30,31]] ],
[ [[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31]] ]
]
group_selections = ['resseq 0',
'resseq 1',
'resseq 2',
'resseq 3',
'resseq 4',
'resseq 5',
'resseq 6',
'resseq 0:6']
for group_selection, answer in zip(group_selections, answers):
other_constrained_groups = make_up_other_constrained_groups_obj(
selections = [ [group_selection] ])
result = occupancy_selections(
model = model,
other_constrained_groups = other_constrained_groups,
as_flex_arrays = False)
assert approx_equal(result, answer)
def exercise_09(verbose):
pdb_file = libtbx.env.find_in_repositories(
relative_path="phenix_regression/pdb/gocr_2.pdb",
test=os.path.isfile)
if (verbose): log = sys.stdout
else: log = StringIO()
model = get_model(pdb_file, log)
answers = [
[ [[6,7,8,9,10],[11,12,13,14,15]], [[16],[17]], [[21]], [[23]], [[24,25,26,27],[28,29,30,31]], [[0]], [[1]], [[2]], [[3]], [[4]], [[5]] ],
[ [[4],[5]], [[16],[17]], [[21]], [[23]], [[24,25,26,27],[28,29,30,31]], [[6]], [[7]], [[8]], [[9]], [[10]], [[11]], [[12]], [[13]], [[14]], [[15]] ],
[ [[4],[5]], [[6,7,8,9,10],[11,12,13,14,15]], [[21]], [[23]], [[24,25,26,27],[28,29,30,31]], [[16]], [[17]] ],
[ [[4],[5]], [[6,7,8,9,10],[11,12,13,14,15]], [[16],[17]], [[21]], [[23]], [[24,25,26,27],[28,29,30,31]], [[18]], [[19]], [[20]] ],
[ [[4],[5]], [[6,7,8,9,10],[11,12,13,14,15]], [[16],[17]], [[23]], [[24,25,26,27],[28,29,30,31]], [[21]] ],
[ [[4],[5]], [[6,7,8,9,10],[11,12,13,14,15]], [[16],[17]], [[21]], [[23]], [[24,25,26,27],[28,29,30,31]], [[22]] ],
[ [[4],[5]], [[6,7,8,9,10],[11,12,13,14,15]], [[16],[17]], [[21]], [[23]], [[24]], [[25]], [[26]], [[27]], [[28]], [[29]], [[30]], [[31]] ]
]
individual_selections = ['resseq 0',
'resseq 1',
'resseq 2',
'resseq 3',
'resseq 4',
'resseq 5',
'resseq 6',
'resseq 0:6']
for individual_selection, answer in zip(individual_selections, answers):
result = occupancy_selections(
model = model,
other_individual_selection_strings = [individual_selection],
as_flex_arrays = False)
assert approx_equal(result, answer)
def exercise_10(verbose):
pdb_file = libtbx.env.find_in_repositories(
relative_path="phenix_regression/pdb/gocr.pdb",
test=os.path.isfile)
if (verbose): log = sys.stdout
else: log = StringIO()
model = get_model(pdb_file, log)
e = None
try:
other_constrained_groups = make_up_other_constrained_groups_obj(
selections = [ ['resseq 0'] ])
result = occupancy_selections(
model = model,
other_constrained_groups = other_constrained_groups,
other_individual_selection_strings = ['resseq 0'],
as_flex_arrays = False)
except Exception as e:
assert str(e) == "Duplicate selection: same atoms selected for individual and group occupancy refinement."
def exercise_11(verbose):
pdb_file = libtbx.env.find_in_repositories(
relative_path="phenix_regression/pdb/gocr.pdb",
test=os.path.isfile)
if (verbose): log = sys.stdout
else: log = StringIO()
model = get_model(pdb_file, log)
e = None
try:
result = occupancy_selections(
model = model,
remove_selection = ['resseq 0'],
other_individual_selection_strings = ['resseq 0'],
as_flex_arrays = False)
except Exception as e:
assert str(e) == "Duplicate selection: occupancies of same atoms selected to be fixed and to be refined."
e = None
try:
other_constrained_groups = make_up_other_constrained_groups_obj(
selections = [ ['resseq 0'] ])
result = occupancy_selections(
model = model,
other_constrained_groups = other_constrained_groups,
remove_selection = ['resseq 0'],
as_flex_arrays = False)
except Exception as e:
assert str(e) == "Duplicate selection: occupancies of same atoms selected to be fixed and to be refined."
def exercise_12(verbose):
pdb_file = libtbx.env.find_in_repositories(
relative_path="phenix_regression/pdb/gocr_2.pdb",
test=os.path.isfile)
if (verbose): log = sys.stdout
else: log = StringIO()
model = get_model(pdb_file, log)
answer = [ [[4],[5]], [[16],[17]], [[21]], [[23,24,25,26,27,28,29,30,31]] ]
other_constrained_groups = make_up_other_constrained_groups_obj(
selections = [ ['resseq 6'] ])
result = occupancy_selections(
model = model,
remove_selection = ['resseq 1'],
other_constrained_groups = other_constrained_groups,
as_flex_arrays = False)
assert approx_equal(result, answer)
#
answer = [ [[4],[5]], [[16],[17]], [[21]], [[23]], [[24]], [[25]], [[26]], [[27]], [[28]], [[29]], [[30]], [[31]] ]
result = occupancy_selections(
model = model,
remove_selection = ['resseq 1'],
other_individual_selection_strings = ['resseq 6'],
as_flex_arrays = False)
assert approx_equal(result, answer)
def exercise_13(verbose):
pdb_file = libtbx.env.find_in_repositories(
relative_path="phenix_regression/pdb/lys_1.pdb",
test=os.path.isfile)
if (verbose): log = sys.stdout
else: log = StringIO()
model = get_model(pdb_file, log)
answer = [ [[8],[9]], [[10]], [[0],[1]], [[2],[3]] ]
other_constrained_groups = make_up_other_constrained_groups_obj(
selections = [ ['chain A and resseq 1 and name N','chain A and resseq 1 and name CA'],
['chain A and resseq 1 and name C','chain A and resseq 1 and name O'] ]
)
result = occupancy_selections(
model = model,
other_constrained_groups = other_constrained_groups,
as_flex_arrays = False)
assert approx_equal(result, answer)
def exercise_14(verbose):
pdb_file = libtbx.env.find_in_repositories(
relative_path="phenix_regression/pdb/lys_1.pdb",
test=os.path.isfile)
if (verbose): log = sys.stdout
else: log = StringIO()
model = get_model(pdb_file, log)
answer = [ [[8],[9]], [[10]], [[0,1,2],[3,4]], [[5],[6]], [[7]] ]
other_constrained_groups = make_up_other_constrained_groups_obj(
selections = [ ['chain A and resseq 1 and (name N or name CA or name C)', 'chain A and resseq 1 and (name O or name CB)'],
['chain A and resseq 1 and name CG','chain A and resseq 1 and name CD'],
['chain A and resseq 1 and name CE'] ]
)
result = occupancy_selections(
model = model,
other_constrained_groups = other_constrained_groups,
as_flex_arrays = False)
assert approx_equal(result, answer)
def exercise_15(verbose):
pdb_file = libtbx.env.find_in_repositories(
relative_path="phenix_regression/pdb/lys_1.pdb",
test=os.path.isfile)
if (verbose): log = sys.stdout
else: log = StringIO()
model = get_model(pdb_file, log)
answer = [ [[8],[9]], [[0,1,2],[10]], [[5,7]] ]
other_constrained_groups = make_up_other_constrained_groups_obj(
selections = [ ['chain A and resseq 1 and (name N or name CA or name C)', 'chain S and resseq 1'],
['chain A and resseq 1 and name CG or chain A and resseq 1 and name CE'] ]
)
result = occupancy_selections(
model = model,
other_constrained_groups = other_constrained_groups,
as_flex_arrays = False)
assert approx_equal(result, answer)
def exercise_16(verbose):
pdb_file = libtbx.env.find_in_repositories(
relative_path="phenix_regression/pdb/lys_1.pdb",
test=os.path.isfile)
if (verbose): log = sys.stdout
else: log = StringIO()
model = get_model(pdb_file, log)
answer = [ [[8],[9],[10]] ]
other_constrained_groups = make_up_other_constrained_groups_obj(
selections = [
['chain A and resseq 1 and name NZ and altloc A', 'chain A and resseq 1 and name NZ and altloc B', 'chain S and resseq 1'] ]
)
result = occupancy_selections(
model = model,
other_constrained_groups = other_constrained_groups,
as_flex_arrays = False)
assert approx_equal(result, answer)
def exercise_17(verbose):
pdb_file = libtbx.env.find_in_repositories(
relative_path="phenix_regression/pdb/lys_1.pdb",
test=os.path.isfile)
if (verbose): log = sys.stdout
else: log = StringIO()
model = get_model(pdb_file, log)
answer = [ [[8,9,10]] ]
other_constrained_groups = make_up_other_constrained_groups_obj(
selections = [
['chain A and resseq 1 and name NZ and altloc A or chain A and resseq 1 and name NZ and altloc B or chain S and resseq 1'] ]
)
result = occupancy_selections(
model = model,
other_constrained_groups = other_constrained_groups,
as_flex_arrays = False)
assert approx_equal(result, answer)
def exercise_18(verbose):
pdb_file = libtbx.env.find_in_repositories(
relative_path="phenix_regression/pdb/lys_2.pdb",
test=os.path.isfile)
if (verbose): log = sys.stdout
else: log = StringIO()
model = get_model(pdb_file, log)
answer = [ [[8],[9],[10]] ]
other_constrained_groups = make_up_other_constrained_groups_obj(
selections = [
['chain A and resseq 1 and name NZ and altloc A','chain A and resseq 1 and name NZ and altloc B','chain S and resseq 1 and altloc C']]
)
result = occupancy_selections(
model = model,
other_constrained_groups = other_constrained_groups,
as_flex_arrays = False)
assert approx_equal(result, answer)
def exercise_19(verbose):
pdb_file = libtbx.env.find_in_repositories(
relative_path="phenix_regression/pdb/lys_1.pdb",
test=os.path.isfile)
if (verbose): log = sys.stdout
else: log = StringIO()
model = get_model(pdb_file, log)
answer = [ [[8],[9],[10]] ]
tmp = "chain A and resseq 1 and name XX and altloc A"
other_constrained_groups = make_up_other_constrained_groups_obj(
selections = [[
tmp,
'chain A and resseq 1 and name NZ and altloc B',
'chain S and resseq 1']])
try:
result = occupancy_selections(
model = model,
other_constrained_groups = other_constrained_groups,
as_flex_arrays = False)
except Exception as e:
assert str(e) == \
'Selection string results in empty selection (selects no atoms): "%s"' \
% tmp
def exercise_20(verbose):
pdb_file = libtbx.env.find_in_repositories(
relative_path="phenix_regression/pdb/ile_2conf_h.pdb",
test=os.path.isfile)
if (verbose): log = sys.stdout
else: log = StringIO()
model = get_model(pdb_file, log)
answer = [ [[4,5,6,7,8,9,10,11,12,13,14,15,16,17,18], [19,20,21,22,23,24,25,26,27,28,29,30,31,32,33]] ]
result = occupancy_selections(
model = model,
as_flex_arrays = False)
assert approx_equal(result, answer)
def exercise_21(verbose):
pdb_file = libtbx.env.find_in_repositories(
relative_path="phenix_regression/pdb/gocr_3.pdb",
test=os.path.isfile)
if (verbose): log = sys.stdout
else: log = StringIO()
model = get_model(pdb_file, log)
#
base = [[[2], [3]],
[[6, 7, 8, 9, 10], [11, 12, 13, 14, 15]],
[[16], [17]],
[[21]],
[[23]],
[[24, 25, 26, 27], [28, 29, 30, 31]],
[[36]],
[[47]],
[[48]],
[[49]],
[[50]],
[[51]],
[[53]],
[[56, 57, 58, 59]],
[[60, 61, 62, 63]],
[[64, 65, 66, 67, 68]],
[[37], [40]],
[[38], [41]],
[[39], [42]],
[[43, 44, 45, 46]]]
res = occupancy_selections(
model = model,
as_flex_arrays = False)
res = extract_serials(model.pdb_atoms, res)
assert approx_equal(res, base)
def exercise_22(verbose):
pdb_file = libtbx.env.find_in_repositories(
relative_path="phenix_regression/pdb/gocr_4.pdb",
test=os.path.isfile)
if (verbose): log = sys.stdout
else: log = StringIO()
model = get_model(pdb_file, log)
#
base = [[[0, 1, 2, 3, 8, 9, 10, 11, 12], [4, 5, 6, 7, 13, 14, 15, 16, 17]]]
res = occupancy_selections(
model = model,
as_flex_arrays = False)
res = extract_serials(model.pdb_atoms, res)
assert approx_equal(res, base)
def exercise_23(verbose):
pdb_file = libtbx.env.find_in_repositories(
relative_path="phenix_regression/pdb/gocr_5.pdb",
test=os.path.isfile)
if (verbose): log = sys.stdout
else: log = StringIO()
model = get_model(pdb_file, log)
#
base = [[[1, 2, 3, 4, 5, 6]], [[7, 8, 9, 10, 11], [12, 13, 14, 15, 16]]]
res = occupancy_selections(
model = model,
as_flex_arrays = False)
res = extract_serials(model.pdb_atoms, res)
assert approx_equal(res, base)
def exercise_24(verbose):
pdb_str1="""\
CRYST1 10.707 11.101 13.552 90.00 90.00 90.00 P 1
ATOM 0 N AALA A 9 3.452 6.807 3.508 0.19 9.33 A N
ATOM 1 CA AALA A 9 4.572 6.204 4.211 0.19 9.82 A C
ATOM 2 C AALA A 9 4.165 5.990 5.664 0.19 10.34 A C
ATOM 3 O AALA A 9 3.000 6.165 6.021 0.19 10.96 A O
ATOM 4 CB AALA A 9 5.792 7.098 4.116 0.19 10.31 A C
ATOM 5 H AALA A 9 3.466 7.667 3.487 0.19 8.78 A H
ATOM 6 HA AALA A 9 4.802 5.351 3.810 0.19 9.23 A H
ATOM 7 HB1AALA A 9 6.533 6.686 4.588 0.19 9.91 A H
ATOM 8 HB2AALA A 9 6.031 7.221 3.184 0.19 9.91 A H
ATOM 9 HB3AALA A 9 5.594 7.960 4.515 0.19 9.91 A H
ATOM 10 N BALA A 9 3.348 6.697 3.518 0.28 8.28 A N
ATOM 11 CA BALA A 9 4.461 6.052 4.195 0.28 9.14 A C
ATOM 12 C BALA A 9 4.138 5.964 5.683 0.28 9.84 A C
ATOM 13 O BALA A 9 3.003 6.215 6.089 0.28 10.68 A O
ATOM 14 CB BALA A 9 5.726 6.829 3.952 0.28 9.20 A C
ATOM 15 H BALA A 9 3.422 7.551 3.454 0.28 8.78 A H
ATOM 16 HA BALA A 9 4.597 5.156 3.849 0.28 9.23 A H
ATOM 17 HB1BALA A 9 6.465 6.395 4.406 0.28 9.91 A H
ATOM 18 HB2BALA A 9 5.907 6.863 3.000 0.28 9.91 A H
ATOM 19 HB3BALA A 9 5.623 7.731 4.294 0.28 9.91 A H
ATOM 20 N CALA A 9 3.608 6.763 3.402 0.28 8.32 A N
ATOM 21 CA CALA A 9 4.617 6.060 4.177 0.28 9.56 A C
ATOM 22 C CALA A 9 4.219 6.081 5.651 0.28 10.15 A C
ATOM 23 O CALA A 9 3.126 6.528 6.006 0.28 10.64 A O
ATOM 24 CB CALA A 9 5.981 6.684 3.973 0.28 10.39 A C
ATOM 25 H CALA A 9 3.801 7.579 3.210 0.28 8.78 A H
ATOM 26 HA CALA A 9 4.671 5.139 3.876 0.28 9.23 A H
ATOM 27 HB1CALA A 9 6.639 6.202 4.497 0.28 9.91 A H
ATOM 28 HB2CALA A 9 6.220 6.639 3.034 0.28 9.91 A H
ATOM 29 HB3CALA A 9 5.959 7.611 4.257 0.28 9.91 A H
ATOM 30 N DALA A 9 3.518 6.930 3.530 0.25 8.78 A N
ATOM 31 CA DALA A 9 4.639 6.333 4.232 0.25 9.23 A C
ATOM 32 C DALA A 9 4.203 6.093 5.674 0.25 10.10 A C
ATOM 33 O DALA A 9 3.051 6.346 6.031 0.25 10.72 A O
ATOM 34 CB DALA A 9 5.837 7.255 4.177 0.25 9.91 A C
ATOM 35 H DALA A 9 3.490 7.789 3.568 0.25 8.78 A H
ATOM 36 HA DALA A 9 4.898 5.494 3.819 0.25 9.23 A H
ATOM 37 HB1DALA A 9 6.581 6.848 4.648 0.25 9.91 A H
ATOM 38 HB2DALA A 9 6.086 7.408 3.252 0.25 9.91 A H
ATOM 39 HB3DALA A 9 5.614 8.101 4.595 0.25 9.91 A H
ATOM 40 N VAL A 10 5.119 5.606 6.502 1.00 11.13 A N
ATOM 41 CA VAL A 10 4.846 5.470 7.925 1.00 12.50 A C
ATOM 42 C VAL A 10 4.347 6.801 8.520 1.00 11.26 A C
ATOM 43 O VAL A 10 4.763 7.871 8.095 1.00 11.53 A O
ATOM 44 HA VAL A 10 4.118 4.835 8.017 1.00 12.50 A H
ATOM 45 CB AVAL A 10 5.994 4.806 8.722 0.21 14.17 A C
ATOM 46 CG1AVAL A 10 6.640 3.699 7.889 0.21 14.17 A C
ATOM 47 CG2AVAL A 10 7.005 5.815 9.197 0.21 15.20 A C
ATOM 48 H AVAL A 10 5.926 5.421 6.269 0.19 11.13 A H
ATOM 49 HB AVAL A 10 5.616 4.404 9.520 0.21 14.91 A H
ATOM 50 HG11AVAL A 10 7.358 3.289 8.396 0.21 16.29 A H
ATOM 51 HG12AVAL A 10 5.975 3.028 7.671 0.21 16.29 A H
ATOM 52 HG13AVAL A 10 6.998 4.077 7.070 0.21 16.29 A H
ATOM 53 HG21AVAL A 10 7.707 5.363 9.691 0.21 15.63 A H
ATOM 54 HG22AVAL A 10 7.391 6.271 8.433 0.21 15.63 A H
ATOM 55 HG23AVAL A 10 6.570 6.462 9.774 0.21 15.63 A H
ATOM 56 CB BVAL A 10 6.135 4.987 8.645 0.79 14.91 A C
ATOM 57 CG1BVAL A 10 6.081 5.228 10.144 0.79 16.28 A C
ATOM 58 CG2BVAL A 10 6.351 3.507 8.360 0.79 15.63 A C
ATOM 59 H BVAL A 10 5.928 5.441 6.263 0.28 11.13 A H
ATOM 60 HB BVAL A 10 6.879 5.504 8.299 0.79 14.91 A H
ATOM 61 HG11BVAL A 10 6.902 4.913 10.552 0.79 16.29 A H
ATOM 62 HG12BVAL A 10 5.978 6.177 10.316 0.79 16.29 A H
ATOM 63 HG13BVAL A 10 5.328 4.748 10.522 0.79 16.29 A H
ATOM 64 HG21BVAL A 10 7.156 3.205 8.809 0.79 15.63 A H
ATOM 65 HG22BVAL A 10 5.590 3.000 8.685 0.79 15.63 A H
ATOM 66 HG23BVAL A 10 6.445 3.372 7.404 0.79 15.63 A H
ATOM 67 H CVAL A 10 5.907 5.353 6.270 0.28 11.13 A H
ATOM 68 H DVAL A 10 5.903 5.349 6.260 0.25 11.13 A H
TER
END
"""
pdb_str2="""\
CRYST1 10.707 11.101 13.552 90.00 90.00 90.00 P 1
ATOM 0 N AALA A 9 3.452 6.807 3.508 0.19 9.33 A N
ATOM 1 CA AALA A 9 4.572 6.204 4.211 0.19 9.82 A C
ATOM 2 C AALA A 9 4.165 5.990 5.664 0.19 10.34 A C
ATOM 3 O AALA A 9 3.000 6.165 6.021 0.19 10.96 A O
ATOM 4 CB AALA A 9 5.792 7.098 4.116 0.19 10.31 A C
ATOM 5 D AALA A 9 3.466 7.667 3.487 0.19 8.78 A D
ATOM 6 DA AALA A 9 4.802 5.351 3.810 0.19 9.23 A D
ATOM 7 DB1AALA A 9 6.533 6.686 4.588 0.19 9.91 A D
ATOM 8 DB2AALA A 9 6.031 7.221 3.184 0.19 9.91 A D
ATOM 9 DB3AALA A 9 5.594 7.960 4.515 0.19 9.91 A D
ATOM 10 N BALA A 9 3.348 6.697 3.518 0.28 8.28 A N
ATOM 11 CA BALA A 9 4.461 6.052 4.195 0.28 9.14 A C
ATOM 12 C BALA A 9 4.138 5.964 5.683 0.28 9.84 A C
ATOM 13 O BALA A 9 3.003 6.215 6.089 0.28 10.68 A O
ATOM 14 CB BALA A 9 5.726 6.829 3.952 0.28 9.20 A C
ATOM 15 D BALA A 9 3.422 7.551 3.454 0.28 8.78 A D
ATOM 16 DA BALA A 9 4.597 5.156 3.849 0.28 9.23 A D
ATOM 17 DB1BALA A 9 6.465 6.395 4.406 0.28 9.91 A D
ATOM 18 DB2BALA A 9 5.907 6.863 3.000 0.28 9.91 A D
ATOM 19 DB3BALA A 9 5.623 7.731 4.294 0.28 9.91 A D
ATOM 20 N CALA A 9 3.608 6.763 3.402 0.28 8.32 A N
ATOM 21 CA CALA A 9 4.617 6.060 4.177 0.28 9.56 A C
ATOM 22 C CALA A 9 4.219 6.081 5.651 0.28 10.15 A C
ATOM 23 O CALA A 9 3.126 6.528 6.006 0.28 10.64 A O
ATOM 24 CB CALA A 9 5.981 6.684 3.973 0.28 10.39 A C
ATOM 25 D CALA A 9 3.801 7.579 3.210 0.28 8.78 A D
ATOM 26 DA CALA A 9 4.671 5.139 3.876 0.28 9.23 A D
ATOM 27 DB1CALA A 9 6.639 6.202 4.497 0.28 9.91 A D
ATOM 28 DB2CALA A 9 6.220 6.639 3.034 0.28 9.91 A D
ATOM 29 DB3CALA A 9 5.959 7.611 4.257 0.28 9.91 A D
ATOM 30 N DALA A 9 3.518 6.930 3.530 0.25 8.78 A N
ATOM 31 CA DALA A 9 4.639 6.333 4.232 0.25 9.23 A C
ATOM 32 C DALA A 9 4.203 6.093 5.674 0.25 10.10 A C
ATOM 33 O DALA A 9 3.051 6.346 6.031 0.25 10.72 A O
ATOM 34 CB DALA A 9 5.837 7.255 4.177 0.25 9.91 A C
ATOM 35 D DALA A 9 3.490 7.789 3.568 0.25 8.78 A D
ATOM 36 DA DALA A 9 4.898 5.494 3.819 0.25 9.23 A D
ATOM 37 DB1DALA A 9 6.581 6.848 4.648 0.25 9.91 A D
ATOM 38 DB2DALA A 9 6.086 7.408 3.252 0.25 9.91 A D
ATOM 39 DB3DALA A 9 5.614 8.101 4.595 0.25 9.91 A D
ATOM 40 N VAL A 10 5.119 5.606 6.502 1.00 11.13 A N
ATOM 41 CA VAL A 10 4.846 5.470 7.925 1.00 12.50 A C
ATOM 42 C VAL A 10 4.347 6.801 8.520 1.00 11.26 A C
ATOM 43 O VAL A 10 4.763 7.871 8.095 1.00 11.53 A O
ATOM 44 HA VAL A 10 4.118 4.835 8.017 1.00 12.50 A D
ATOM 45 CB AVAL A 10 5.994 4.806 8.722 0.21 14.17 A C
ATOM 46 CG1AVAL A 10 6.640 3.699 7.889 0.21 14.17 A C
ATOM 47 CG2AVAL A 10 7.005 5.815 9.197 0.21 15.20 A C
ATOM 48 D AVAL A 10 5.926 5.421 6.269 0.19 11.13 A D
ATOM 49 DB AVAL A 10 5.616 4.404 9.520 0.21 14.91 A D
ATOM 50 DG11AVAL A 10 7.358 3.289 8.396 0.21 16.29 A D
ATOM 51 DG12AVAL A 10 5.975 3.028 7.671 0.21 16.29 A D
ATOM 52 DG13AVAL A 10 6.998 4.077 7.070 0.21 16.29 A D
ATOM 53 DG21AVAL A 10 7.707 5.363 9.691 0.21 15.63 A D
ATOM 54 DG22AVAL A 10 7.391 6.271 8.433 0.21 15.63 A D
ATOM 55 DG23AVAL A 10 6.570 6.462 9.774 0.21 15.63 A D
ATOM 56 CB BVAL A 10 6.135 4.987 8.645 0.79 14.91 A C
ATOM 57 CG1BVAL A 10 6.081 5.228 10.144 0.79 16.28 A C
ATOM 58 CG2BVAL A 10 6.351 3.507 8.360 0.79 15.63 A C
ATOM 59 D BVAL A 10 5.928 5.441 6.263 0.28 11.13 A D
ATOM 60 DB BVAL A 10 6.879 5.504 8.299 0.79 14.91 A D
ATOM 61 DG11BVAL A 10 6.902 4.913 10.552 0.79 16.29 A D
ATOM 62 DG12BVAL A 10 5.978 6.177 10.316 0.79 16.29 A D
ATOM 63 DG13BVAL A 10 5.328 4.748 10.522 0.79 16.29 A D
ATOM 64 DG21BVAL A 10 7.156 3.205 8.809 0.79 15.63 A D
ATOM 65 DG22BVAL A 10 5.590 3.000 8.685 0.79 15.63 A D
ATOM 66 DG23BVAL A 10 6.445 3.372 7.404 0.79 15.63 A D
ATOM 67 D CVAL A 10 5.907 5.353 6.270 0.28 11.13 A D
ATOM 68 D DVAL A 10 5.903 5.349 6.260 0.25 11.13 A D
TER
END
"""
if (verbose): log = sys.stdout
else: log = StringIO()
for pdb_str in [pdb_str1, pdb_str2]:
model = get_model_str(pdb_str, log)
res = occupancy_selections(
model = model,
as_flex_arrays = False)
answer = \
[[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 48],
[10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 59],
[20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 67],
[30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 68]],
[[45, 46, 47, 49, 50, 51, 52, 53, 54, 55],
[56, 57, 58, 60, 61, 62, 63, 64, 65, 66]]]
assert approx_equal(res, answer)
def exercise_25(verbose):
pdb_str="""\
CRYST1 10.707 11.101 13.552 90.00 90.00 90.00 P 1
ATOM 0 N ALA A 9 3.452 6.807 3.508 1.00 9.33 A N
ATOM 1 CA ALA A 9 4.572 6.204 4.211 1.00 9.82 A C
ATOM 2 C ALA A 9 4.165 5.990 5.664 1.00 10.34 A C
ATOM 3 O ALA A 9 3.000 6.165 6.021 1.00 10.96 A O
ATOM 4 CB ALA A 9 5.792 7.098 4.116 1.00 10.31 A C
ATOM 5 HA ALA A 9 4.802 5.351 3.810 1.00 9.23 A H
ATOM 6 HB1 ALA A 9 6.533 6.686 4.588 1.00 9.91 A H
ATOM 7 HB2 ALA A 9 6.031 7.221 3.184 1.00 9.91 A H
ATOM 8 HB3 ALA A 9 5.594 7.960 4.515 1.00 9.91 A H
ATOM 9 H AALA A 9 3.466 7.667 3.487 0.40 8.78 A H
ATOM 10 D BALA A 9 3.466 7.667 3.487 0.60 8.78 A D
ATOM 11 N VAL A 10 5.119 5.606 6.502 1.00 11.13 A N
ATOM 12 CA VAL A 10 4.846 5.470 7.925 1.00 12.50 A C
ATOM 13 C VAL A 10 4.347 6.801 8.520 1.00 11.26 A C
ATOM 14 O VAL A 10 4.763 7.871 8.095 1.00 11.53 A O
ATOM 15 HA VAL A 10 4.118 4.835 8.017 1.00 12.50 A H
ATOM 16 CB VAL A 10 5.994 4.806 8.722 1.00 14.17 A C
ATOM 17 CG1 VAL A 10 6.640 3.699 7.889 1.00 14.17 A C
ATOM 18 CG2 VAL A 10 7.005 5.815 9.197 1.00 15.20 A C
ATOM 19 HB VAL A 10 5.616 4.404 9.520 1.00 14.91 A H
ATOM 20 HG11 VAL A 10 7.358 3.289 8.396 1.00 16.29 A H
ATOM 21 HG12 VAL A 10 5.975 3.028 7.671 1.00 16.29 A H
ATOM 22 HG13 VAL A 10 6.998 4.077 7.070 1.00 16.29 A H
ATOM 23 HG21 VAL A 10 7.707 5.363 9.691 1.00 15.63 A H
ATOM 24 HG22 VAL A 10 7.391 6.271 8.433 1.00 15.63 A H
ATOM 25 HG23 VAL A 10 6.570 6.462 9.774 1.00 15.63 A H
ATOM 26 H AVAL A 10 5.926 5.421 6.269 0.30 11.13 A H
ATOM 27 D BVAL A 10 5.926 5.421 6.269 0.70 11.13 A D
TER
END
"""
if (verbose): log = sys.stdout
else: log = StringIO()
model = get_model_str(pdb_str, log)
res = occupancy_selections(
model = model,
as_flex_arrays = False)
answer = [ [[9],[10]], [[26],[27]] ]
assert approx_equal(res, answer)
def exercise_26(verbose):
pdb_str="""\
CRYST1 71.040 72.017 72.362 90.00 100.48 90.00 C 1 2 1
ATOM 96 N PRO L 5 2.689 13.877 15.387 1.00 13.65 N
ATOM 97 CA PRO L 5 1.824 14.762 14.572 1.00 17.31 C
ATOM 98 C PRO L 5 0.338 14.432 14.641 1.00 20.79 C
ATOM 99 O PRO L 5 -0.466 15.376 14.642 1.00 20.37 O
ATOM 100 CB PRO L 5 2.330 14.534 13.143 1.00 20.71 C
ATOM 101 CG PRO L 5 3.772 14.184 13.326 1.00 20.25 C
ATOM 102 CD PRO L 5 3.871 13.403 14.633 1.00 16.57 C
ATOM 103 HA PRO L 5 1.981 15.805 14.846 1.00 17.31 H
ATOM 104 HB2 PRO L 5 1.780 13.709 12.691 1.00 20.71 H
ATOM 105 HB3 PRO L 5 2.220 15.447 12.558 1.00 20.71 H
ATOM 106 HG2 PRO L 5 4.103 13.567 12.492 1.00 20.25 H
ATOM 107 HG3 PRO L 5 4.363 15.098 13.382 1.00 20.25 H
ATOM 108 HD2 PRO L 5 3.805 12.331 14.446 1.00 16.57 H
ATOM 109 HD3 PRO L 5 4.791 13.666 15.154 1.00 16.57 H
ATOM 110 N LEU L 6 -0.052 13.175 14.677 1.00 13.93 N
ATOM 111 CA LEU L 6 -1.446 12.769 14.667 1.00 15.53 C
ATOM 112 C LEU L 6 -2.079 12.634 16.029 1.00 17.57 C
ATOM 113 O LEU L 6 -3.268 12.311 16.111 1.00 18.17 O
ATOM 114 CB LEU L 6 -1.648 11.435 13.889 1.00 17.76 C
ATOM 115 CG LEU L 6 -1.291 11.544 12.396 1.00 18.22 C
ATOM 116 CD1 LEU L 6 -1.474 10.257 11.651 1.00 18.93 C
ATOM 117 CD2 LEU L 6 -2.125 12.629 11.689 1.00 22.55 C
ATOM 118 HA LEU L 6 -2.017 13.534 14.144 1.00 15.53 H
ATOM 119 HB2 LEU L 6 -1.011 10.669 14.331 1.00 17.76 H
ATOM 120 HB3 LEU L 6 -2.693 11.135 13.959 1.00 17.76 H
ATOM 121 HG LEU L 6 -0.242 11.827 12.310 1.00 18.22 H
ATOM 122 HD11 LEU L 6 -0.750 10.210 10.838 1.00 18.93 H
ATOM 123 HD12 LEU L 6 -1.319 9.426 12.338 1.00 18.93 H
ATOM 124 HD13 LEU L 6 -2.488 10.221 11.252 1.00 18.93 H
ATOM 125 HD21 LEU L 6 -2.084 12.462 10.613 1.00 22.55 H
ATOM 126 HD22 LEU L 6 -3.156 12.565 12.037 1.00 22.55 H
ATOM 127 HD23 LEU L 6 -1.712 13.609 11.929 1.00 22.55 H
ATOM 128 H ALEU L 6 0.595 12.387 14.715 0.50 13.93 H
ATOM 129 D BLEU L 6 0.595 12.387 14.715 0.50 13.93 D
"""
if (verbose): log = sys.stdout
else: log = StringIO()
model = get_model_str(pdb_str, log)
res = occupancy_selections(
model = model,
as_flex_arrays = False)
answer = [ [[32], [33]] ]
assert approx_equal(res, answer)
def exercise_27(verbose):
pdb_str="""\
CRYST1 64.714 39.225 38.645 90.00 117.38 90.00 C 1 2 1
ATOM 0 N SER A -1 20.605 9.913 24.660 1.00 32.98 N
ATOM 1 CA SER A -1 21.415 10.057 23.431 1.00 25.22 C
ATOM 2 C SER A -1 20.514 10.247 22.233 1.00 25.05 C
ATOM 3 O SER A -1 19.332 9.926 22.266 1.00 28.08 O
ATOM 4 CB SER A -1 22.253 8.810 23.194 1.00 28.97 C
ATOM 5 OG SER A -1 21.417 7.708 22.900 1.00 37.21 O
ATOM 6 H1 SER A -1 19.896 10.449 24.612 1.00 38.17 H
ATOM 7 H2 SER A -1 20.335 9.069 24.737 1.00 27.38 H
ATOM 8 H3 SER A -1 21.098 10.134 25.368 1.00 38.75 H
ATOM 9 HA SER A -1 21.997 10.829 23.514 1.00 12.22 H
ATOM 10 HB2 SER A -1 22.844 8.970 22.440 1.00 22.78 H
ATOM 11 HB3 SER A -1 22.771 8.614 23.990 1.00 30.47 H
ATOM 12 HG SER A -1 21.872 7.007 22.826 1.00 42.35 H
ATOM 13 N AMET A 0 21.097 10.723 21.147 0.49 20.67 N
ATOM 14 CA AMET A 0 20.340 10.870 19.929 0.49 21.49 C
ATOM 15 C AMET A 0 21.236 10.795 18.720 0.49 18.70 C
ATOM 16 O AMET A 0 22.394 11.216 18.750 0.49 19.47 O
ATOM 17 CB AMET A 0 19.569 12.183 19.945 0.49 22.62 C
ATOM 18 CG AMET A 0 20.423 13.414 20.138 0.49 24.87 C
ATOM 19 SD AMET A 0 19.580 14.932 19.650 0.49 29.00 S
ATOM 20 CE AMET A 0 17.946 14.760 20.377 0.49 36.23 C
ATOM 21 H AMET A 0 21.920 10.964 21.095 0.49 28.25 H
ATOM 22 HA AMET A 0 19.697 10.146 19.870 0.49 7.25 H
ATOM 23 HB2AMET A 0 19.093 12.280 19.105 0.49 13.51 H
ATOM 24 HB3AMET A 0 18.941 12.141 20.681 0.49 7.62 H
ATOM 25 HG2AMET A 0 20.671 13.490 21.072 0.49 26.02 H
ATOM 26 HG3AMET A 0 21.219 13.333 19.589 0.49 30.87 H
ATOM 27 HE1AMET A 0 17.284 14.819 19.669 0.49 20.79 H
ATOM 28 HE2AMET A 0 17.863 13.908 20.829 0.49 8.45 H
ATOM 29 HE3AMET A 0 17.812 15.481 21.012 0.49 30.25 H
ATOM 30 N BMET A 0 21.082 10.809 21.171 0.51 21.19 N
ATOM 31 CA BMET A 0 20.368 11.023 19.923 0.51 23.13 C
ATOM 32 C BMET A 0 21.273 10.654 18.766 0.51 21.10 C
ATOM 33 O BMET A 0 22.496 10.703 18.893 0.51 19.93 O
ATOM 34 CB BMET A 0 19.961 12.488 19.782 0.51 27.15 C
ATOM 35 CG BMET A 0 19.070 12.993 20.889 0.51 29.67 C
ATOM 36 SD BMET A 0 18.685 14.739 20.684 0.51 41.63 S
ATOM 37 CE BMET A 0 17.734 15.043 22.171 0.51 35.23 C
ATOM 38 HA BMET A 0 19.568 10.476 19.897 0.51 36.28 H
ATOM 39 HB2BMET A 0 20.762 13.035 19.778 0.51 8.59 H
ATOM 40 HB3BMET A 0 19.485 12.602 18.945 0.51 27.25 H
ATOM 41 HG2BMET A 0 18.236 12.497 20.877 0.51 21.33 H
ATOM 42 HG3BMET A 0 19.519 12.877 21.741 0.51 34.36 H
ATOM 43 HE1BMET A 0 17.141 15.795 22.018 0.51 42.08 H
ATOM 44 HE2BMET A 0 17.217 14.249 22.380 0.51 22.21 H
ATOM 45 HE3BMET A 0 18.343 15.241 22.899 0.51 40.99 H
"""
if (verbose): log = sys.stdout
else: log = StringIO()
model = get_model_str(pdb_str, log)
res = occupancy_selections(
model = model,
as_flex_arrays = False)
answer = [[[13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29],
[30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45]]]
assert approx_equal(res, answer)
def exercise_28(verbose):
pdb_str="""\
CRYST1 64.360 64.360 46.038 90.00 90.00 120.00 P 63
ATOM 0 N ASP A 48 8.896 25.394 -7.791 1.00 8.05 N
ATOM 1 CA ASP A 48 8.495 26.452 -6.936 1.00 8.42 C
ATOM 2 C ASP A 48 8.287 26.047 -5.477 1.00 8.20 C
ATOM 3 O ASP A 48 8.309 26.881 -4.579 1.00 10.68 O
ATOM 4 CB ASP A 48 7.216 27.151 -7.426 1.00 9.40 C
ATOM 5 CG ASP A 48 7.457 27.744 -8.791 1.00 10.91 C
ATOM 6 OD1 ASP A 48 8.234 28.729 -8.836 1.00 16.64 O
ATOM 7 OD2 ASP A 48 6.845 27.293 -9.764 1.00 12.53 O
ATOM 8 HA ASP A 48 9.193 27.122 -6.935 1.00 8.42 H
ATOM 9 HB2 ASP A 48 6.494 26.507 -7.490 1.00 9.40 H
ATOM 10 HB3 ASP A 48 6.981 27.867 -6.815 1.00 9.40 H
ATOM 11 H AASP A 48 8.303 25.156 -8.367 0.50 8.04 H
ATOM 12 H BASP A 48 8.242 25.041 -8.223 0.50 8.04 H
ATOM 13 N ALEU A 49 8.083 24.740 -5.245 0.79 7.34 N
ATOM 14 CA ALEU A 49 7.817 24.239 -3.906 0.79 6.67 C
ATOM 15 C ALEU A 49 8.124 22.738 -3.941 0.79 5.81 C
ATOM 16 O ALEU A 49 7.880 22.074 -4.958 0.79 6.71 O
ATOM 17 CB ALEU A 49 6.385 24.559 -3.494 0.79 7.19 C
ATOM 18 CG ALEU A 49 5.914 24.092 -2.111 0.79 7.07 C
ATOM 19 CD1ALEU A 49 4.885 25.059 -1.536 0.79 8.84 C
ATOM 20 CD2ALEU A 49 5.323 22.713 -2.192 0.79 7.46 C
ATOM 21 H ALEU A 49 8.095 24.131 -5.852 0.79 7.25 H
ATOM 22 HA ALEU A 49 8.421 24.661 -3.275 0.79 7.14 H
ATOM 23 HB2ALEU A 49 6.277 25.523 -3.518 0.79 9.16 H
ATOM 24 HB3ALEU A 49 5.791 24.158 -4.147 0.79 9.16 H
ATOM 25 HG ALEU A 49 6.673 24.062 -1.508 0.79 6.91 H
ATOM 26 HD11ALEU A 49 4.592 24.730 -0.672 0.79 9.95 H
ATOM 27 HD12ALEU A 49 5.294 25.933 -1.437 0.79 9.95 H
ATOM 28 HD13ALEU A 49 4.130 25.113 -2.143 0.79 9.95 H
ATOM 29 HD21ALEU A 49 4.960 22.476 -1.324 0.79 8.29 H
ATOM 30 HD22ALEU A 49 4.616 22.710 -2.856 0.79 8.29 H
ATOM 31 HD23ALEU A 49 6.015 22.082 -2.442 0.79 8.29 H
ATOM 32 N BLEU A 49 7.975 24.768 -5.242 0.21 7.25 N
ATOM 33 CA BLEU A 49 7.654 24.205 -3.941 0.21 7.15 C
ATOM 34 C BLEU A 49 8.003 22.716 -3.887 0.21 7.83 C
ATOM 35 O BLEU A 49 7.689 22.025 -4.858 0.21 5.06 O
ATOM 36 CB BLEU A 49 6.162 24.365 -3.605 0.21 9.16 C
ATOM 37 CG BLEU A 49 5.681 23.652 -2.331 0.21 6.91 C
ATOM 38 CD1BLEU A 49 6.301 24.276 -1.095 0.21 9.95 C
ATOM 39 CD2BLEU A 49 4.156 23.640 -2.248 0.21 8.29 C
ATOM 40 H BLEU A 49 7.943 24.178 -5.867 0.21 7.25 H
ATOM 41 HA BLEU A 49 8.173 24.662 -3.262 0.21 7.14 H
ATOM 42 HB2BLEU A 49 5.975 25.310 -3.494 0.21 9.16 H
ATOM 43 HB3BLEU A 49 5.645 24.021 -4.346 0.21 9.16 H
ATOM 44 HG BLEU A 49 5.963 22.725 -2.358 0.21 6.91 H
ATOM 45 HD11BLEU A 49 6.470 23.579 -0.443 0.21 9.95 H
ATOM 46 HD12BLEU A 49 7.132 24.697 -1.346 0.21 9.95 H
ATOM 47 HD13BLEU A 49 5.691 24.937 -0.731 0.21 9.95 H
ATOM 48 HD21BLEU A 49 3.888 23.174 -1.441 0.21 8.29 H
ATOM 49 HD22BLEU A 49 3.834 24.555 -2.225 0.21 8.29 H
ATOM 50 HD23BLEU A 49 3.802 23.184 -3.027 0.21 8.29 H
ATOM 51 N VAL A 50 8.616 22.239 -2.807 1.00 5.93 N
ATOM 52 CA VAL A 50 8.845 20.793 -2.609 1.00 5.53 C
ATOM 53 C VAL A 50 7.981 20.307 -1.457 1.00 5.75 C
ATOM 54 O VAL A 50 7.971 20.912 -0.389 1.00 6.63 O
ATOM 55 CB VAL A 50 10.325 20.527 -2.343 1.00 6.31 C
ATOM 56 CG1 VAL A 50 10.556 19.043 -2.072 1.00 7.62 C
ATOM 57 CG2 VAL A 50 11.170 20.998 -3.512 1.00 7.52 C
ATOM 58 HA VAL A 50 8.593 20.305 -3.404 1.00 5.53 H
ATOM 59 HB VAL A 50 10.599 21.022 -1.555 1.00 6.31 H
ATOM 60 HG11 VAL A 50 11.507 18.860 -2.118 1.00 7.62 H
ATOM 61 HG12 VAL A 50 10.221 18.824 -1.188 1.00 7.62 H
ATOM 62 HG13 VAL A 50 10.087 18.523 -2.744 1.00 7.62 H
ATOM 63 HG21 VAL A 50 12.097 20.765 -3.345 1.00 7.52 H
ATOM 64 HG22 VAL A 50 10.860 20.562 -4.321 1.00 7.52 H
ATOM 65 HG23 VAL A 50 11.081 21.960 -3.600 1.00 7.52 H
ATOM 66 H AVAL A 50 8.830 22.718 -2.125 0.79 5.93 H
ATOM 67 H BVAL A 50 8.914 22.729 -2.166 0.21 5.93 H
TER
END
"""
if (verbose): log = sys.stdout
else: log = StringIO()
model = get_model_str(pdb_str, log)
res = occupancy_selections(
model = model,
as_flex_arrays = False)
answer = [ [[11],[12]],
[[13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,66],
[32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,67]]]
assert approx_equal(res, answer)
def exercise_29(verbose):
pdb_str="""\
CRYST1 148.270 44.010 47.390 90.00 101.57 90.00 C 1 2 1
ATOM 0 N GLY A 285 -41.269 16.430 -4.458 1.00 18.77 N
ATOM 1 CA GLY A 285 -41.021 16.772 -5.854 1.00 20.45 C
ATOM 2 H GLY A 285 -42.080 16.182 -4.313 1.00 22.53 H
ATOM 3 C AGLY A 285 -41.133 18.291 -6.119 0.85 20.52 C
ATOM 4 O AGLY A 285 -41.030 18.770 -7.258 0.85 22.89 O
ATOM 5 HA2AGLY A 285 -40.130 16.482 -6.104 0.85 24.54 H
ATOM 6 HA3AGLY A 285 -41.663 16.314 -6.418 0.85 24.54 H
ATOM 7 C BGLY A 285 -40.556 18.155 -6.113 0.15 20.45 C
ATOM 8 O BGLY A 285 -39.925 18.445 -7.127 0.15 21.06 O
ATOM 9 HA2BGLY A 285 -40.352 16.166 -6.208 0.15 24.54 H
ATOM 10 HA3BGLY A 285 -41.839 16.638 -6.357 0.15 24.54 H
ATOM 11 N AASN A 286 -41.375 19.070 -5.066 0.75 20.63 N
ATOM 12 CA AASN A 286 -41.558 20.524 -5.179 0.75 21.34 C
ATOM 13 C AASN A 286 -40.921 21.176 -3.941 0.75 19.76 C
ATOM 14 O AASN A 286 -41.136 20.695 -2.825 0.75 18.94 O
ATOM 15 CB AASN A 286 -43.061 20.822 -5.246 0.75 23.19 C
ATOM 16 CG AASN A 286 -43.390 22.293 -5.087 0.75 24.76 C
ATOM 17 OD1AASN A 286 -43.580 22.784 -3.975 0.75 25.15 O
ATOM 18 ND2AASN A 286 -43.491 22.996 -6.206 0.75 26.38 N
ATOM 19 H AASN A 286 -41.441 18.778 -4.260 0.75 24.76 H
ATOM 20 HA AASN A 286 -41.121 20.863 -5.988 0.75 25.61 H
ATOM 21 HB2AASN A 286 -43.400 20.532 -6.107 0.75 27.82 H
ATOM 22 HB3AASN A 286 -43.509 20.338 -4.535 0.75 27.82 H
ATOM 23 HD21AASN A 286 -43.371 22.614 -6.967 0.75 31.65 H
ATOM 24 HD22AASN A 286 -43.677 23.835 -6.171 0.75 31.65 H
ATOM 25 N BASN A 286 -40.878 19.026 -5.184 0.25 20.30 N
ATOM 26 CA BASN A 286 -40.589 20.401 -5.396 0.25 20.20 C
ATOM 27 C BASN A 286 -40.224 21.016 -4.085 0.25 18.88 C
ATOM 28 O BASN A 286 -40.136 20.364 -3.047 0.25 18.65 O
ATOM 29 CB BASN A 286 -41.798 21.088 -6.023 0.25 22.27 C
ATOM 30 CG BASN A 286 -42.950 21.238 -5.058 0.25 23.28 C
ATOM 31 OD1BASN A 286 -42.781 21.720 -3.938 0.25 23.18 O
ATOM 32 ND2BASN A 286 -44.137 20.828 -5.491 0.25 24.35 N
ATOM 33 H BASN A 286 -41.259 18.841 -4.435 0.25 24.36 H
ATOM 34 HA BASN A 286 -39.828 20.488 -6.007 0.25 24.24 H
ATOM 35 HB2BASN A 286 -41.538 21.974 -6.321 0.25 26.72 H
ATOM 36 HB3BASN A 286 -42.105 20.561 -6.777 0.25 26.72 H
ATOM 37 HD21BASN A 286 -44.216 20.499 -6.282 0.25 29.22 H
ATOM 38 HD22BASN A 286 -44.826 20.891 -4.981 0.25 29.22 H
ATOM 39 CA GLU A 287 -39.388 22.905 -3.000 1.00 16.67 C
ATOM 40 C GLU A 287 -40.376 23.372 -1.952 1.00 15.65 C
ATOM 41 O GLU A 287 -40.132 23.201 -0.755 1.00 14.31 O
ATOM 42 CB GLU A 287 -38.514 24.074 -3.481 1.00 17.80 C
ATOM 43 CG GLU A 287 -37.273 23.645 -4.302 1.00 19.41 C
ATOM 44 CD GLU A 287 -36.290 24.789 -4.558 1.00 20.84 C
ATOM 45 OE1 GLU A 287 -36.554 25.925 -4.128 1.00 21.26 O
ATOM 46 OE2 GLU A 287 -35.220 24.552 -5.185 1.00 22.93 O
ATOM 47 HB2 GLU A 287 -39.052 24.654 -4.041 1.00 21.36 H
ATOM 48 HB3 GLU A 287 -38.200 24.566 -2.707 1.00 21.36 H
ATOM 49 HG2 GLU A 287 -36.801 22.949 -3.818 1.00 23.29 H
ATOM 50 HG3 GLU A 287 -37.568 23.308 -5.163 1.00 23.29 H
ATOM 51 N AGLU A 287 -40.109 22.235 -4.122 0.02 18.26 N
ATOM 52 H AGLU A 287 -39.954 22.592 -4.889 0.02 21.91 H
ATOM 53 HA AGLU A 287 -38.796 22.250 -2.576 0.02 20.01 H
ATOM 54 N BGLU A 287 -40.017 22.305 -4.119 0.98 18.44 N
ATOM 55 H BGLU A 287 -40.228 22.836 -4.762 0.98 22.13 H
ATOM 56 HA BGLU A 287 -38.799 22.245 -2.580 0.98 20.01 H
TER
END
"""
if (verbose): log = sys.stdout
else: log = StringIO()
model = get_model_str(pdb_str, log)
res = occupancy_selections(
model = model,
as_flex_arrays = False)
answer = [ [[3,4,5,6,19],
[7,8,9,10,33]],
[[11,12,13,14,15,16,17,18,20,21,22,23,24,52],
[25,26,27,28,29,30,31,32,34,35,36,37,38,55]],
[[51,53],
[54,56]]]
assert approx_equal(res, answer)
def exercise_30(verbose):
pdb_str="""\
CRYST1 42.198 121.958 37.277 90.00 90.00 90.00 P 21 21 2
ATOM 0 CG GLU A 115 30.700 22.521 0.401 0.55 25.56 C
ATOM 1 CD GLU A 115 31.809 23.320 -0.265 1.00 25.96 C
ATOM 2 OE1 GLU A 115 32.842 22.797 -0.723 1.00 24.92 O
ATOM 3 OE2 GLU A 115 31.621 24.544 -0.376 1.00 27.30 O
ATOM 4 N AGLU A 115 27.819 20.841 -1.012 0.44 19.61 N
ATOM 5 CA AGLU A 115 28.757 21.222 -0.004 0.44 20.79 C
ATOM 6 C AGLU A 115 28.192 21.930 1.203 0.44 19.50 C
ATOM 7 O AGLU A 115 27.475 22.922 1.098 0.44 20.38 O
ATOM 8 CB AGLU A 115 29.799 22.079 -0.601 0.44 23.59 C
ATOM 9 N BGLU A 115 27.018 20.969 -0.446 0.56 27.49 N
ATOM 10 CA BGLU A 115 28.194 21.387 0.311 0.56 26.06 C
ATOM 11 C BGLU A 115 27.541 21.859 1.611 0.56 25.00 C
ATOM 12 O BGLU A 115 26.660 22.715 1.640 0.56 26.43 O
ATOM 13 CB BGLU A 115 29.189 22.459 -0.356 0.56 26.03 C
ATOM 14 N AVAL A 116 28.585 21.407 2.363 0.53 19.29 N
ATOM 15 CA AVAL A 116 28.181 21.931 3.670 0.53 18.27 C
ATOM 16 C AVAL A 116 29.427 21.990 4.589 0.53 17.81 C
ATOM 17 O AVAL A 116 30.464 21.420 4.280 0.53 17.67 O
ATOM 18 CB AVAL A 116 27.090 21.046 4.342 0.53 20.31 C
ATOM 19 CG1AVAL A 116 25.743 21.168 3.633 0.53 22.78 C
ATOM 20 CG2AVAL A 116 27.498 19.598 4.395 0.53 20.85 C
ATOM 21 H AVAL A 116 29.104 20.724 2.421 0.53 23.15 H
ATOM 22 HA AVAL A 116 27.827 22.838 3.564 0.53 21.92 H
ATOM 23 HB AVAL A 116 26.967 21.353 5.264 0.53 24.37 H
ATOM 24 N BVAL A 116 27.987 21.231 2.690 0.47 21.87 N
ATOM 25 CA BVAL A 116 27.614 21.560 4.041 0.47 19.86 C
ATOM 26 C BVAL A 116 28.915 21.857 4.746 0.47 19.34 C
ATOM 27 O BVAL A 116 29.983 21.603 4.213 0.47 18.81 O
ATOM 28 CB BVAL A 116 26.938 20.336 4.707 0.47 19.81 C
ATOM 29 CG1BVAL A 116 25.591 20.061 4.058 0.47 21.33 C
ATOM 30 CG2BVAL A 116 27.825 19.086 4.627 0.47 19.25 C
ATOM 31 H BVAL A 116 28.539 20.573 2.651 0.47 26.24 H
ATOM 32 HA BVAL A 116 27.021 22.340 4.070 0.47 23.83 H
ATOM 33 HB BVAL A 116 26.782 20.535 5.654 0.47 23.76 H
TER
END
"""
if (verbose): log = sys.stdout
else: log = StringIO()
model = get_model_str(pdb_str, log)
res = occupancy_selections(
model = model,
as_flex_arrays = False)
answer = [ [[0]],
[[4, 5, 6, 7, 8, 21],
[9, 10, 11, 12, 13, 31]],
[[14, 15, 16, 17, 18, 19, 20, 22, 23],
[24, 25, 26, 27, 28, 29, 30, 32, 33]] ]
assert approx_equal(res, answer)
def prepare_correlated_occupancy_inputs(
prefix="tst_group_correlated_occupancy",
create_mtz=False,
d_min=1.0):
pdb_raw = """\
CRYST1 21.937 4.866 23.477 90.00 107.08 90.00 P 1 21 1
SCALE1 0.045585 0.000000 0.014006 0.00000
SCALE2 0.000000 0.205508 0.000000 0.00000
SCALE3 0.000000 0.000000 0.044560 0.00000
ATOM 1 N GLY A 1 -9.056 4.638 6.050 1.00 16.77 N
ATOM 2 CA GLY A 1 -9.058 4.194 4.668 1.00 16.57 C
ATOM 3 C GLY A 1 -7.993 3.144 4.430 1.00 16.16 C
ATOM 4 O GLY A 1 -7.521 2.511 5.374 1.00 16.78 O
ATOM 5 N ASN A 2 -7.616 2.953 3.169 1.00 15.02 N
ATOM 6 CA ASN A 2 -6.526 2.044 2.840 1.00 14.10 C
ATOM 7 C ASN A 2 -5.216 2.527 3.434 1.00 13.13 C
ATOM 8 O ASN A 2 -4.943 3.727 3.466 1.00 11.91 O
ATOM 9 CB ASN A 2 -6.382 1.888 1.330 1.00 15.38 C
ATOM 10 CG ASN A 2 -7.632 1.344 0.685 1.00 14.08 C
ATOM 11 OD1 ASN A 2 -8.042 0.216 0.957 1.00 17.46 O
ATOM 12 ND2 ASN A 2 -8.247 2.142 -0.178 1.00 11.72 N
ATOM 13 N ASN A 3 -4.405 1.583 3.898 1.00 12.26 N
ATOM 14 CA ASN A 3 -3.172 1.915 4.595 1.00 11.74 C
ATOM 15 C ASN A 3 -1.922 1.362 3.915 1.00 11.10 C
ATOM 16 O ASN A 3 -1.816 0.158 3.672 1.00 10.42 O
ATOM 17 CB ASN A 3 -3.243 1.409 6.039 1.00 12.15 C
ATOM 18 CG ASN A 3 -2.000 1.749 6.841 1.00 12.82 C
ATOM 19 OD1 ASN A 3 -1.705 2.920 7.082 1.00 15.05 O
ATOM 20 ND2 ASN A 3 -1.272 0.724 7.270 1.00 13.48 N
ATOM 21 N GLN A 4 -0.987 2.256 3.598 1.00 10.29 N
ATOM 22 CA GLN A 4 0.361 1.860 3.201 1.00 10.53 C
ATOM 23 C GLN A 4 1.398 2.605 4.031 1.00 10.24 C
ATOM 24 O GLN A 4 1.454 3.834 4.025 1.00 8.86 O
ATOM 25 CB GLN A 4 0.626 2.117 1.712 1.00 9.80 C
ATOM 26 CG GLN A 4 1.924 1.459 1.221 1.00 10.25 C
ATOM 27 CD GLN A 4 2.465 2.050 -0.073 1.00 12.43 C
ATOM 28 OE1 GLN A 4 2.674 3.260 -0.178 1.00 14.62 O
ATOM 29 NE2 GLN A 4 2.708 1.192 -1.059 1.00 9.05 N
ATOM 30 N AGLN A 5 2.202 1.848 4.775 0.62 10.38 N
ATOM 31 CA AGLN A 5 3.288 2.419 5.569 0.62 11.39 C
ATOM 32 C AGLN A 5 4.638 1.844 5.123 0.62 11.52 C
ATOM 33 O AGLN A 5 4.824 0.625 5.095 0.62 12.05 O
ATOM 34 CB AGLN A 5 3.046 2.170 7.063 0.62 11.96 C
ATOM 35 CG AGLN A 5 1.854 2.946 7.622 0.62 10.81 C
ATOM 36 CD AGLN A 5 1.361 2.406 8.951 0.62 13.10 C
ATOM 37 OE1AGLN A 5 0.800 1.312 9.019 0.62 10.65 O
ATOM 38 NE2AGLN A 5 1.562 3.175 10.016 0.62 12.30 N
ATOM 39 N BGLN A 5 2.239 1.858 4.725 0.38 10.38 N
ATOM 40 CA BGLN A 5 3.326 2.476 5.450 0.38 11.39 C
ATOM 41 C BGLN A 5 4.639 1.850 5.057 0.38 11.52 C
ATOM 42 O BGLN A 5 4.814 0.627 5.020 0.38 12.05 O
ATOM 43 CB BGLN A 5 3.110 2.331 6.919 0.38 11.96 C
ATOM 44 CG BGLN A 5 2.695 0.980 7.141 0.38 10.81 C
ATOM 45 CD BGLN A 5 2.882 0.618 8.479 0.38 13.10 C
ATOM 46 OE1BGLN A 5 2.538 1.369 9.406 0.38 10.65 O
ATOM 47 NE2BGLN A 5 3.380 -0.597 8.664 0.38 12.30 N
ATOM 48 N ASN A 6 5.565 2.732 4.753 1.00 11.99 N
ATOM 49 CA ASN A 6 6.868 2.339 4.280 1.00 12.30 C
ATOM 50 C ASN A 6 7.881 2.785 5.302 1.00 13.40 C
ATOM 51 O ASN A 6 8.262 3.954 5.351 1.00 13.92 O
ATOM 52 CB ASN A 6 7.133 2.954 2.915 1.00 12.13 C
ATOM 53 CG ASN A 6 5.988 2.721 1.955 1.00 12.77 C
ATOM 54 OD1 ASN A 6 5.795 1.608 1.466 1.00 14.27 O
ATOM 55 ND2 ASN A 6 5.211 3.764 1.690 1.00 10.07 N
ATOM 56 N ATYR A 7 8.304 1.849 6.146 0.59 14.70 N
ATOM 57 CA ATYR A 7 9.167 2.166 7.280 0.59 15.18 C
ATOM 58 C ATYR A 7 10.622 2.326 6.868 0.59 15.91 C
ATOM 59 O ATYR A 7 11.054 1.799 5.844 0.59 15.76 O
ATOM 60 CB ATYR A 7 9.044 1.086 8.356 0.59 15.35 C
ATOM 61 CG ATYR A 7 7.640 0.946 8.887 0.59 14.45 C
ATOM 62 CD1ATYR A 7 6.759 0.027 8.335 0.59 15.68 C
ATOM 63 CD2ATYR A 7 7.187 1.750 9.924 0.59 14.80 C
ATOM 64 CE1ATYR A 7 5.469 -0.098 8.810 0.59 13.46 C
ATOM 65 CE2ATYR A 7 5.899 1.633 10.407 0.59 14.33 C
ATOM 66 CZ ATYR A 7 5.044 0.707 9.845 0.59 15.09 C
ATOM 67 OH ATYR A 7 3.759 0.583 10.319 0.59 14.39 O
ATOM 68 OXTATYR A 7 11.394 2.990 7.558 0.59 17.49 O
ATOM 70 N BTYR A 7 8.323 1.843 6.116 0.41 14.70 N
ATOM 71 CA BTYR A 7 9.149 2.183 7.247 0.41 15.18 C
ATOM 72 C BTYR A 7 10.629 2.316 6.861 0.41 15.91 C
ATOM 73 O BTYR A 7 11.084 1.756 5.864 0.41 15.76 O
ATOM 74 CB BTYR A 7 8.954 1.147 8.348 0.41 15.35 C
ATOM 75 CG BTYR A 7 9.942 1.356 9.417 0.41 14.45 C
ATOM 76 CD1BTYR A 7 9.807 2.381 10.320 0.41 15.68 C
ATOM 77 CD2BTYR A 7 11.054 0.580 9.473 0.41 14.80 C
ATOM 78 CE1BTYR A 7 10.746 2.569 11.248 0.41 13.46 C
ATOM 79 CE2BTYR A 7 11.968 0.749 10.405 0.41 14.33 C
ATOM 80 CZ BTYR A 7 11.858 1.724 11.252 0.41 15.09 C
ATOM 81 OH BTYR A 7 12.921 1.747 12.113 0.41 14.39 O
ATOM 82 OXTBTYR A 7 11.408 3.001 7.529 0.41 17.49 O
TER
HETATM 83 O HOH A 8 -6.471 5.227 7.124 1.00 22.62 O
HETATM 84 O HOH A 9 10.431 1.858 3.216 1.00 19.71 O
HETATM 85 O HOH A 10 -11.286 1.756 -1.468 1.00 17.08 O
HETATM 86 O AHOH A 11 11.808 4.179 9.970 0.60 23.99 O
HETATM 87 O HOH A 12 13.605 1.327 9.198 1.00 26.17 O
HETATM 88 O HOH A 13 -2.749 3.429 10.024 1.00 39.15 O
HETATM 89 O HOH A 14 -1.500 0.682 10.967 1.00 43.49 O
TER
"""
pdb_in = "%s_in.pdb" % prefix
open(pdb_in, "w").write(pdb_raw)
if (create_mtz):
args = [
pdb_in,
"high_resolution=%g" % d_min,
"type=real",
"label=F",
"add_sigmas=True",
"r_free_flags_fraction=0.1",
"random_seed=12345",
"output.file_name=%s.mtz" % prefix,
]
fmodel.run(args=args, log=null_out())
pdb_file = file_reader.any_file(pdb_in)
hierarchy = pdb_file.file_object.hierarchy
xrs = pdb_file.file_object.xray_structure_simple()
for atom in hierarchy.atoms():
atom.b = 5
if (atom.occ < 1.0):
atom.occ = 0.5
open("%s_start.pdb" % prefix, "w").write(
hierarchy.as_pdb_string(crystal_symmetry=xrs))
def exercise_regroup_3d(verbose):
if (verbose): log = sys.stdout
else: log = StringIO()
prepare_correlated_occupancy_inputs()
# File #1 (with homogenized occupancies) should work
# File #2 should fail due to inconsistent occupancies
pdb_files = [
"tst_group_correlated_occupancy_start.pdb",
"tst_group_correlated_occupancy_in.pdb",
]
for i_file, pdb_file in enumerate(pdb_files):
model = get_model(pdb_file, log)
try :
constraint_groups = occupancy_selections(
model = model,
constrain_correlated_3d_groups=True,
log=null_out())
except Sorry as s :
if (i_file == 0):
raise
else :
assert ("Inconsistent occupancies" in str(s)), str(s)
else :
if (i_file == 1):
raise Exception_expected
else :
assert (len(constraint_groups) == 1)
def run():
verbose = "--verbose" in sys.argv[1:]
exercise_00(verbose=verbose)
exercise_01(verbose=verbose)
exercise_02(verbose=verbose)
exercise_03(verbose=verbose)
exercise_05(verbose=verbose)
exercise_06(verbose=verbose)
exercise_07(verbose=verbose)
exercise_08(verbose=verbose)
exercise_09(verbose=verbose)
exercise_10(verbose=verbose)
exercise_11(verbose=verbose)
exercise_12(verbose=verbose)
exercise_13(verbose=verbose)
exercise_14(verbose=verbose)
exercise_15(verbose=verbose)
exercise_16(verbose=verbose)
exercise_17(verbose=verbose)
exercise_18(verbose=verbose)
exercise_19(verbose=verbose)
exercise_20(verbose=verbose)
exercise_21(verbose=verbose)
exercise_22(verbose=verbose)
exercise_23(verbose=verbose)
exercise_24(verbose=verbose)
exercise_25(verbose=verbose)
exercise_26(verbose=verbose)
exercise_27(verbose=verbose)
exercise_28(verbose=verbose)
exercise_29(verbose=verbose)
exercise_30(verbose=verbose)
exercise_regroup_3d(verbose=verbose)
print(format_cpu_times())
if (__name__ == "__main__"):
run()
| 51.994624
| 154
| 0.525326
|
79522ab9346408865a82ade12f54cd00c6577a2a
| 2,339
|
py
|
Python
|
kaggledatasets/structured/newyork_airbnb_opendata.py
|
kaggledatasets/kaggledatasets
|
685d16590667443546f7ad2c31dfac8ad6be6ca2
|
[
"Apache-2.0"
] | 48
|
2019-11-23T17:39:42.000Z
|
2022-02-11T16:38:12.000Z
|
kaggledatasets/structured/newyork_airbnb_opendata.py
|
kaggledatasets/kaggledatasets
|
685d16590667443546f7ad2c31dfac8ad6be6ca2
|
[
"Apache-2.0"
] | 5
|
2019-11-30T14:20:18.000Z
|
2019-12-10T12:07:54.000Z
|
kaggledatasets/structured/newyork_airbnb_opendata.py
|
kaggledatasets/kaggledatasets
|
685d16590667443546f7ad2c31dfac8ad6be6ca2
|
[
"Apache-2.0"
] | 6
|
2019-11-30T15:44:43.000Z
|
2022-02-11T16:38:14.000Z
|
# Copyright 2019 Omkar Prabhu
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""New York City Airbnb Open Dataset"""
from kaggledatasets.core.dataset import Dataset
class NewYorkAirbnbOpenData(Dataset):
r"""
New York City Airbnb Open Dataset
Airbnb listings and metrics in NYC, NY, USA (2019)
"""
author = "dgomonov"
slug = "new-york-city-airbnb-open-data"
title = "New York City Airbnb Open Data"
files = ["AB_NYC_2019.csv"]
def __init__(self, root=None, download=False):
r"""
Initializer for the New York City Airbnb Open Dataset
"""
super(NewYorkAirbnbOpenData, self).__init__(author=self.author, slug=self.slug, \
title=self.title, root=root, download=download)
def __getitem__(self, idx):
r"""
This will return one sample of data
"""
raise NotImplementedError
def __len__(self):
r"""
This denotes the total number of samples
"""
raise NotImplementedError
def data_frame(self):
r"""
This will return pandas data frame for using in Scikit Learn or raw processing
"""
import pandas as pd # pylint: disable=import-error
location = self.get_files()
return pd.read_csv(location[0])
def load(self, batch_size=1):
r"""
This will return tf dataset for Tensorflow 2.0
"""
import tensorflow as tf # pylint: disable=import-error
location = self.get_files()
return tf.data.experimental.make_csv_dataset(
location[0],
batch_size=batch_size,
num_epochs=1,
ignore_errors=True)
def data_loader(self):
r"""
This will return data loader for PyTorch
"""
# import torch # pylint: disable=import-error
| 28.180723
| 89
| 0.64814
|
79522bf319980d74a78c34c31426ca3fa49ec229
| 988
|
py
|
Python
|
login/login/urls.py
|
fga-eps-mds/2018.2-FGAPP-login
|
942ba0e7b3164733f9a9e2a9c1c601f22289e65c
|
[
"MIT"
] | 1
|
2020-07-26T03:28:55.000Z
|
2020-07-26T03:28:55.000Z
|
login/login/urls.py
|
bonfimjustino7/2018.2-FGAPP-login
|
942ba0e7b3164733f9a9e2a9c1c601f22289e65c
|
[
"MIT"
] | 13
|
2018-09-21T22:52:48.000Z
|
2018-12-11T14:50:07.000Z
|
login/login/urls.py
|
bonfimjustino7/2018.2-FGAPP-login
|
942ba0e7b3164733f9a9e2a9c1c601f22289e65c
|
[
"MIT"
] | 1
|
2020-05-10T13:18:03.000Z
|
2020-05-10T13:18:03.000Z
|
from django.contrib import admin
from django.urls import include, path
from django.conf.urls import url
from rest_framework_jwt.views import obtain_jwt_token
from rest_framework_jwt.views import refresh_jwt_token
from rest_framework_jwt.views import verify_jwt_token
from django.contrib.auth import views as auth_views
from django.contrib.sites.models import Site
from login.settings.development import LOGIN_DEFAULT_DOMAIN
from .views import status
urlpatterns = [
path('', status),
path('admin/', admin.site.urls),
path('api/', include('api.urls')),
url(r'^api/auth-token/', obtain_jwt_token),
url(r'^api/token-refresh/', refresh_jwt_token),
url(r'^api/token-verify/', verify_jwt_token),
url('^', include('django.contrib.auth.urls')),
]
try:
site = Site.objects.get(id=1)
site.name = LOGIN_DEFAULT_DOMAIN
site.domain = LOGIN_DEFAULT_DOMAIN
site.save()
current_domain = LOGIN_DEFAULT_DOMAIN
except:
print('please re-run the server')
| 32.933333
| 59
| 0.752024
|
79522cac960717d425fa70c0e6ff58239437a81f
| 1,064
|
py
|
Python
|
var/spack/repos/builtin/packages/r-affxparser/package.py
|
HaochengLIU/spack
|
26e51ff1705a4d6234e2a0cf734f93f7f95df5cb
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 2
|
2018-11-27T03:39:44.000Z
|
2021-09-06T15:50:35.000Z
|
var/spack/repos/builtin/packages/r-affxparser/package.py
|
HaochengLIU/spack
|
26e51ff1705a4d6234e2a0cf734f93f7f95df5cb
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1
|
2019-01-11T20:11:52.000Z
|
2019-01-11T20:11:52.000Z
|
var/spack/repos/builtin/packages/r-affxparser/package.py
|
HaochengLIU/spack
|
26e51ff1705a4d6234e2a0cf734f93f7f95df5cb
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1
|
2020-10-14T14:20:17.000Z
|
2020-10-14T14:20:17.000Z
|
# Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RAffxparser(RPackage):
"""Package for parsing Affymetrix files (CDF, CEL, CHP, BPMAP, BAR).
It provides methods for fast and memory efficient parsing of
Affymetrix files using the Affymetrix' Fusion SDK. Both ASCII-
and binary-based files are supported. Currently, there are methods
for reading chip definition file (CDF) and a cell intensity file (CEL).
These files can be read either in full or in part. For example,
probe signals from a few probesets can be extracted very quickly
from a set of CEL files into a convenient list structure."""
homepage = "https://www.bioconductor.org/packages/affxparser/"
git = "https://git.bioconductor.org/packages/affxparser.git"
version('1.48.0', commit='2461ea88f310b59c4a9a997a4b3dadedbd65a4aa')
depends_on('r@3.4.0:3.4.9', when='@1.48.0')
| 42.56
| 75
| 0.734023
|
79522cec803b3145ececdde954556c68e18686dc
| 4,206
|
py
|
Python
|
patent_system/settings.py
|
zypangpang/patent_management_system
|
5c9307359ac2fbb7fdfa2beff63f58f441932f86
|
[
"Apache-2.0"
] | null | null | null |
patent_system/settings.py
|
zypangpang/patent_management_system
|
5c9307359ac2fbb7fdfa2beff63f58f441932f86
|
[
"Apache-2.0"
] | null | null | null |
patent_system/settings.py
|
zypangpang/patent_management_system
|
5c9307359ac2fbb7fdfa2beff63f58f441932f86
|
[
"Apache-2.0"
] | null | null | null |
"""
Django settings for patent_system project.
Generated by 'django-admin startproject' using Django 2.1.1.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'r*%p^!_ynvi!qxow&ca($l8w92-qw0*4twc$sg2+hi-xp2v73u'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1','localhost','166.111.83.46','10.0.2.4']
# Application definition
INSTALLED_APPS = [
'account.apps.AccountConfig',
'main.apps.MainConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'patent_system.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'patent_system.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
#'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
'NAME': 'mydatabase',
'USER': 'django',
'PASSWORD': '123456',
'HOST': '127.0.0.1',
'PORT': '3306',
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
MEDIA_ROOT='/var/media/'
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'django_file': {
'level': 'INFO',
'class': 'logging.FileHandler',
'filename': '/var/log/django_log/info.log',
},
'main_file': {
'level': 'INFO',
'class': 'logging.FileHandler',
'filename': '/var/log/django_log/main_info.log',
},
},
'loggers': {
## Enabel this handler when deploying
#'django': {
# 'handlers': ['django_file'],
# 'level': 'INFO',
# 'propagate': True,
#},
'main':{
'handlers': ['main_file'],
'level': 'INFO',
'propagate': True,
},
},
}
| 26.124224
| 91
| 0.638849
|
79522e7fac28cbf45ac1cb1a9fec3f0eaea6c8e7
| 5,179
|
py
|
Python
|
instances/fcn/voc.fcn32s.R101_v1c/train.py
|
dontLoveBugs/MyTorch
|
d14bd1a231bde7f2e05282f86c640bcce4a55baf
|
[
"MIT"
] | 1
|
2020-02-25T00:35:00.000Z
|
2020-02-25T00:35:00.000Z
|
instances/fcn/voc.fcn32s.R101_v1c/train.py
|
dontLoveBugs/MyTorch
|
d14bd1a231bde7f2e05282f86c640bcce4a55baf
|
[
"MIT"
] | null | null | null |
instances/fcn/voc.fcn32s.R101_v1c/train.py
|
dontLoveBugs/MyTorch
|
d14bd1a231bde7f2e05282f86c640bcce4a55baf
|
[
"MIT"
] | null | null | null |
from __future__ import division
import os.path as osp
import sys
import argparse
from tqdm import tqdm
import torch
import torch.nn as nn
import torch.distributed as dist
import torch.backends.cudnn as cudnn
from torch.nn.parallel import DistributedDataParallel
from config import config
from dataloader import get_train_loader
from network import FCN
from datasets import VOC
from utils.init_func import init_weight, group_weight
from engine.lr_policy import PolyLR
from engine.engine import Engine
from seg_opr.sync_bn import DataParallelModel, Reduce, BatchNorm2d
try:
from third_libs.parallel import SyncBatchNorm
except ImportError:
raise ImportError(
"Please install apex from https://www.github.com/nvidia/apex .")
torch.manual_seed(config.seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(config.seed)
parser = argparse.ArgumentParser()
with Engine(custom_parser=parser) as engine:
args = parser.parse_args()
cudnn.benchmark = True
if engine.distributed:
torch.cuda.set_device(engine.local_rank)
# data loader
train_loader, train_sampler = get_train_loader(engine, VOC)
# config network and criterion
criterion = nn.CrossEntropyLoss(reduction='mean',
ignore_index=255)
if engine.distributed:
BatchNorm2d = SyncBatchNorm
else:
BatchNorm2d = BatchNorm2d
model = FCN(config.num_classes, criterion=criterion,
pretrained_model=config.pretrained_model,
norm_layer=BatchNorm2d)
init_weight(model.business_layer, nn.init.kaiming_normal_,
BatchNorm2d, config.bn_eps, config.bn_momentum,
mode='fan_out', nonlinearity='relu')
# group weight and config optimizer
base_lr = config.lr
if engine.distributed:
base_lr = config.lr * engine.world_size
params_list = []
params_list = group_weight(params_list, model,
BatchNorm2d, base_lr)
optimizer = torch.optim.SGD(params_list,
lr=base_lr,
momentum=config.momentum,
weight_decay=config.weight_decay)
# config lr policy
total_iteration = config.nepochs * config.niters_per_epoch
lr_policy = PolyLR(base_lr, config.lr_power, total_iteration)
if engine.distributed:
if torch.cuda.is_available():
model.cuda()
model = DistributedDataParallel(model,
device_ids=[engine.local_rank],
output_device=engine.local_rank)
else:
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = DataParallelModel(model, engine.devices)
model.to(device)
engine.register_state(dataloader=train_loader, model=model,
optimizer=optimizer)
if engine.continue_state_object:
engine.restore_checkpoint()
optimizer.zero_grad()
model.train()
for epoch in range(engine.state.epoch, config.nepochs):
if engine.distributed:
train_sampler.set_epoch(epoch)
bar_format = '{desc}[{elapsed}<{remaining},{rate_fmt}]'
pbar = tqdm(range(config.niters_per_epoch), file=sys.stdout,
bar_format=bar_format)
dataloader = iter(train_loader)
for idx in pbar:
engine.update_iteration(epoch, idx)
minibatch = dataloader.next()
imgs = minibatch['data']
gts = minibatch['label']
imgs = imgs.cuda(non_blocking=True)
gts = gts.cuda(non_blocking=True)
loss = model(imgs, gts)
# reduce the whole loss over multi-gpu
if engine.distributed:
dist.all_reduce(loss, dist.ReduceOp.SUM)
loss = loss / engine.world_size
else:
loss = Reduce.apply(*loss) / len(loss)
optimizer.zero_grad()
current_idx = epoch * config.niters_per_epoch + idx
lr = lr_policy.get_lr(current_idx)
for i in range(0, len(optimizer.param_groups)):
optimizer.param_groups[i]['lr'] = lr
loss.backward()
optimizer.step()
print_str = 'Epoch{}/{}'.format(epoch, config.nepochs) \
+ ' Iter{}/{}:'.format(idx + 1, config.niters_per_epoch) \
+ ' lr=%.2e' % lr \
+ ' loss=%.2f' % loss.item()
pbar.set_description(print_str, refresh=False)
if epoch % config.snapshot_iter == 0:
if engine.distributed and (engine.local_rank == 0):
engine.save_and_link_checkpoint(config.snapshot_dir,
config.log_dir,
config.log_dir_link)
elif not engine.distributed:
engine.save_and_link_checkpoint(config.snapshot_dir,
config.log_dir,
config.log_dir_link)
| 35.231293
| 82
| 0.603591
|
79522f1372f3cd0428507a0a68a60f9495aa571a
| 1,220
|
py
|
Python
|
models/G2G.py
|
GCL-staging/PyGCL
|
6cf2f4475053c631c6db1b8a2412bd811b586275
|
[
"Apache-2.0"
] | null | null | null |
models/G2G.py
|
GCL-staging/PyGCL
|
6cf2f4475053c631c6db1b8a2412bd811b586275
|
[
"Apache-2.0"
] | null | null | null |
models/G2G.py
|
GCL-staging/PyGCL
|
6cf2f4475053c631c6db1b8a2412bd811b586275
|
[
"Apache-2.0"
] | null | null | null |
import torch
import GCL.augmentors as A
from torch import nn
from typing import Optional, Tuple
from torch_geometric.nn import global_add_pool
class G2G(nn.Module):
def __init__(self, encoder: torch.nn.Module,
augmentor: Tuple[A.Augmentor, A.Augmentor],
loss,
hidden_dim: int, proj_dim: int):
super(G2G, self).__init__()
self.encoder = encoder
self.augmentor = augmentor
self.loss = loss
self.num_hidden = hidden_dim
def forward(self, x: torch.Tensor, batch: torch.Tensor,
edge_index: torch.Tensor, edge_weight: Optional[torch.Tensor] = None)\
-> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
aug1, aug2 = self.augmentor
x1, edge_index1, edge_weight1 = aug1(x, edge_index, edge_weight)
x2, edge_index2, edge_weight2 = aug2(x, edge_index, edge_weight)
z = self.encoder(x, edge_index, edge_weight)
z1 = self.encoder(x1, edge_index1, edge_weight1)
z2 = self.encoder(x2, edge_index2, edge_weight2)
g1 = global_add_pool(z1, batch)
g2 = global_add_pool(z2, batch)
return z, z1, z2, g1, g2
| 34.857143
| 91
| 0.641803
|
7952305a3f519ed35602ba78cda5315c43f22312
| 2,355
|
py
|
Python
|
debug_cqlnml_converter.py
|
garrettwrong/cql3d_f90
|
15ad9094ff2e2a29f6f5c3810b5b8c4fc724f2f8
|
[
"DOC"
] | 1
|
2019-06-23T12:11:16.000Z
|
2019-06-23T12:11:16.000Z
|
debug_cqlnml_converter.py
|
garrettwrong/cql3d_f90
|
15ad9094ff2e2a29f6f5c3810b5b8c4fc724f2f8
|
[
"DOC"
] | 38
|
2019-06-06T20:30:25.000Z
|
2019-12-13T17:05:56.000Z
|
debug_cqlnml_converter.py
|
garrettwrong/cql3d_f90
|
15ad9094ff2e2a29f6f5c3810b5b8c4fc724f2f8
|
[
"DOC"
] | null | null | null |
#!/usr/bin/env python
'''
In the process of running TRANSP, it can dump a debug nml.
At the time of writing this dump is enabled for MPI runs, to assist transition.
Developers may make this optional, perhaps only for debugging, in the future.
This namelist comes _from within cql3d_f90_ and so uses f90 derived type syntax.
The standalone xcql3d expects the legacy f77 syntax NML by request of Bob@CompX.
You can use this tool to convet to the legacy style.
Note, methods to read, write, and print both styles are implimented in the code.
Future developers may change to the new style if they wish.
You convert like so, assuming your dump is called debug_new:
./debug_cqlnml_converter.py debug_new cqlinput
I would also recomend that you set VERBOSE = 1, to re-enable debug prints.
Good luck.
'''
from __future__ import print_function
import sys
NML_SECTIONS = ['SETUP0%',
'SETUP%',
'TRSETUP%',
'SOUSETUP%',
'EQSETUP%',
'RFSETUP%',
'FRSETUP%']
def convert(in_fn, out_fn=None):
if not out_fn:
out_fn = in_fn
with open(in_fn, 'r') as f:
lines = f.readlines()
res = []
missing_freya = True
for line in lines:
# if we dont have freya section, we will need to add it later
if 'frsetup' in line.lower():
missing_freya = False
# remove the _NML from section name
line = line.replace('_NML','')
# remove the dertype prefix
for sec in NML_SECTIONS:
# though we could just partition,
# this check is slightly more robust, since
# we could conceivably have % in a txt field
if sec.lower() in line.lower():
_,_,line = line.partition('%')
break # only going to be one nml section...
res.append(line)
if missing_freya:
#add an empty freya block
res.append('\n\n')
res.append('&FRSETUP\n')
res.append('&END\n\n')
with open(out_fn, 'w') as f:
f.write(''.join(res))
if __name__ == "__main__":
out_fn = None
if len(sys.argv)==3:
out_fn = sys.argv[2]
elif len(sys.argv) != 2:
print("{x} input_filename <output_filename>".format(x=sys.argv[0]))
exit(1)
convert(sys.argv[1], out_fn)
| 27.383721
| 80
| 0.613588
|
795231b20e223ea6419a12a5d89fa53b912efe92
| 149
|
py
|
Python
|
test/test_login.py
|
DmitriyNeurov/python_training_mantis
|
db94fad4e01e7d29a962d80791c984ddcacf1033
|
[
"Apache-2.0"
] | null | null | null |
test/test_login.py
|
DmitriyNeurov/python_training_mantis
|
db94fad4e01e7d29a962d80791c984ddcacf1033
|
[
"Apache-2.0"
] | null | null | null |
test/test_login.py
|
DmitriyNeurov/python_training_mantis
|
db94fad4e01e7d29a962d80791c984ddcacf1033
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
def test_login(app):
app.session.login("administrator", "root")
assert app.session.is_logget_in_as("administrator")
| 24.833333
| 55
| 0.684564
|
795231d674d2da24e3def8b22bf4ddf5c425e00c
| 9,667
|
py
|
Python
|
style_transfer.py
|
Ashiroes/flask_api_dl
|
2a39b653ad2de7fd5da48735b4711d791ce62053
|
[
"Apache-2.0"
] | null | null | null |
style_transfer.py
|
Ashiroes/flask_api_dl
|
2a39b653ad2de7fd5da48735b4711d791ce62053
|
[
"Apache-2.0"
] | null | null | null |
style_transfer.py
|
Ashiroes/flask_api_dl
|
2a39b653ad2de7fd5da48735b4711d791ce62053
|
[
"Apache-2.0"
] | null | null | null |
# =======================================================================================================================
# TRANSFERT DE STYLE
# =======================================================================================================================
import base64
import json
import time
from io import BytesIO
import IPython.display as display
import PIL.Image
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from keras import backend as K
tf.config.experimental_run_functions_eagerly(True)
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# FONCTIONS UTILES
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def chargement_image(chemin_image):
max_dim = 512
# Chargement de l'image
image = tf.io.read_file(chemin_image)
# Recherche du type d'image (jpg, png...) et transformation en Tensor
image = tf.image.decode_image(image, channels=3)
print(" Dimensions originales de l'image (w, h, canaux) : " + str(image.shape))
# Convertion de chaque pixel en type décimal
image = tf.image.convert_image_dtype(image, tf.float32)
# Transformation de l'image en espace vectoriel (tensor) ayant une longueur de 512 pixels
shape = tf.cast(tf.shape(image)[:-1], tf.float32)
long_dim = max(shape)
scale = max_dim / long_dim
new_shape = tf.cast(shape * scale, tf.int32)
image = tf.image.resize(image, new_shape)
image = image[tf.newaxis, :]
print(" Vecteurs Tensorflow de l'image (nbImages, w , h, canaux) : " + str(image.shape))
return image
def conversion_tenseur_vers_image(tenseur):
tenseur = tenseur * 255
# Transformation des valeur du tenseur en tableau d'entier de 8 bytes
tenseur = np.array(tenseur, dtype=np.uint8)
if (np.ndim(tenseur) > 3):
assert tenseur.shape[0] == 1
tenseur = tenseur[0]
pil_img = PIL.Image.fromarray(tenseur)
buff = BytesIO()
pil_img.save(buff, format="JPEG")
new_image_string = base64.b64encode(buff.getvalue()).decode("utf-8")
# utilisation de la librairie Pillow pour transformer le tableau en image
return (new_image_string)
def affichage_image(image, titre=None):
# Si l'image comporte plus de 3 dimensions, on supprime l'axe 0
if (len(image.shape) > 3):
image = tf.squeeze(image, axis=0)
# Affichage avec matplotlib
plt.imshow(image)
if titre:
plt.title(titre)
def clip_0_1(image):
return tf.clip_by_value(image, clip_value_min=0.0, clip_value_max=1.0)
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# FONCTIONS POUR LA GENERATION
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def creation_nouveau_modele(modele, couches_selectionnees):
modele.trainable = False
couches = [modele.get_layer(name).output for name in couches_selectionnees]
# On crée un modele avec pour entrée, le format à VGG (image de 224 x 224)
# Mais ne contenant que les couches selectionnées
modele = tf.keras.Model([modele.input], couches)
return modele
def matrice_de_Gram(tenseur):
resultat = tf.linalg.einsum('bijc,bijd->bcd', tenseur, tenseur)
format = tf.shape(tenseur)
nombre_positions = tf.cast(format[1] * format[2], tf.float32)
return resultat / (nombre_positions)
class Extracteur_Style_Contenu(tf.keras.models.Model):
def __init__(self, modele, couches_style, couches_contenu):
super(Extracteur_Style_Contenu, self).__init__()
# On crée un modele VGG comportant les couches de styles et les couches de contenu
self.vgg = creation_nouveau_modele(modele, couches_style + couches_contenu)
self.couches_styles = couches_style
self.couches_contenu = couches_contenu
self.nb_couches_style = len(couches_style)
# On n'accepte pas la mise à jour des poids lors de la rétropropagation
self.vgg.trainable = False
# Extraction des valeurs des couches sur une image (input)
def call(self, inputs):
"Expects float input in [0,1]"
inputs = inputs * 255.0
# Les images sont converties de RVB en BGR, puis chaque canal de couleur est centré sur zéro par rapport à l'ensemble de données ImageNet.
preprocessed_input = tf.keras.applications.vgg19.preprocess_input(inputs)
# On utilise le réseau VGG avec les couches paramétrées pour en calculer leur valeurs
valeurs_couches = self.vgg(preprocessed_input)
# On dispatch les valeurs des couches.
valeurs_couches_style, valeurs_couches_contenu = (
valeurs_couches[:self.nb_couches_style], valeurs_couches[self.nb_couches_style:])
# Calcul de la matrice de Gram pour chaque couche de style. Cette matrice vient remplacer les valeurs des couches
# de sorties du style
valeurs_couches_style = [matrice_de_Gram(valeur_couche_style)
for valeur_couche_style in valeurs_couches_style]
contenu_dictionnaire = {content_name: value
for content_name, value
in zip(self.couches_contenu, valeurs_couches_contenu)}
style_dictionnaire = {style_name: value
for style_name, value
in zip(self.couches_styles, valeurs_couches_style)}
return {'contenu': contenu_dictionnaire, 'style': style_dictionnaire}
def calcul_du_cout(valeurs_couches, cible_contenu, cible_style, poids_style, nb_couches_style, poids_contenu,
nb_couches_contenu):
valeurs_couches_style = valeurs_couches['style']
valeurs_couches_contenu = valeurs_couches['contenu']
# -- COUT SUR LE STYLE
# Erreur (La génération par rapport à la cible) = MSE
fonction_cout_style = tf.add_n([tf.reduce_mean((valeurs_couches_style[name] - cible_style[name]) ** 2)
for name in valeurs_couches_style.keys()])
# Utilisation d'un poids
fonction_cout_style *= poids_style / nb_couches_style
# -- COUT SUR LE CONTENU
# Erreur sur le contenu (La génération par rapport à la cible) = MSE
fonction_cout_contenu = tf.add_n([tf.reduce_mean((valeurs_couches_contenu[name] - cible_contenu[name]) ** 2)
for name in valeurs_couches_contenu.keys()])
fonction_cout_contenu *= poids_contenu / nb_couches_contenu
cout = fonction_cout_style + fonction_cout_contenu
return cout
@tf.function()
def etape_generation(image, optimiseur, extracteur, cible_contenu, cible_style, poids_style, nb_couches_style,
poids_contenu, nb_couches_contenu, poids_filtres_hf):
# Creation d'un pipeline d'execution
with tf.GradientTape() as pipeline:
# Calcul des valeurs des couches de contenu et de style
valeurs_couches = extracteur(image)
# Calcul du cout total
cout = calcul_du_cout(valeurs_couches, cible_contenu, cible_style, poids_style, nb_couches_style, poids_contenu,
nb_couches_contenu)
# Reduction des hautes frequences de l'image (diminution des contours)
cout += poids_filtres_hf * tf.image.total_variation(image)
# Calcul du gradient
grad = pipeline.gradient(cout, image)
optimiseur.apply_gradients([(grad, image)])
# Conversion des valeur de l'image entre 0 et 1
# On remplace l'image reçue en parametre...
image.assign(clip_0_1(image))
return image
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# APPLICATION
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def style_transfer(chemin_image_source, chemin_image_style) -> str:
chemin_image_composite = "generation-image-"+str(time.time())+".jpg"
image_source = chargement_image(chemin_image_source)
image_style = chargement_image(chemin_image_style)
couche_VGG19_contenu = ['block5_conv2']
nombre_couches_contenu = len(couche_VGG19_contenu)
couche_VGG19_style = ['block1_conv2', 'block2_conv2', 'block3_conv2', 'block4_conv2', 'block5_conv2']
nombre_couches_style = len(couche_VGG19_style)
optimiseur = tf.optimizers.Adam(learning_rate=0.02, beta_1=0.99, epsilon=1e-1)
poids_du_style_beta = 1e-2
poids_du_contenu_alpha = 1e4
# suppression des hautes frequences
poids_filtres_hf = 30
# chargement du modele vgg10 sans sa tete
vgg19 = tf.keras.applications.VGG19(include_top=False, weights='imagenet')
# definition des cibles a atteindre
extracteur = Extracteur_Style_Contenu(modele=vgg19, couches_contenu=couche_VGG19_contenu,
couches_style=couche_VGG19_style)
cible_style = extracteur(image_style)['style']
cible_contenu = extracteur(image_source)['contenu']
image = tf.Variable(image_source)
epochs = 1
etapes_generation_epoch = 2
# Generarion de l'image
DEBUT = time.time()
step = 0
for n in range(epochs):
print("Epoch :" + str(n))
for n in range(etapes_generation_epoch):
step += 1
etape_generation(image, optimiseur, extracteur, cible_contenu, cible_style, poids_du_style_beta,
nombre_couches_style, poids_du_contenu_alpha, nombre_couches_contenu, poids_filtres_hf)
print(".", end="")
display.clear_output(wait=True)
FIN = time.time()
#conversion_tenseur_vers_image(image).save(chemin_image_composite)
K.clear_session()
return conversion_tenseur_vers_image(image)
| 38.059055
| 146
| 0.642702
|
795232c30179af89fbb4c7c519cdd51a417c7b5d
| 602
|
py
|
Python
|
site-root/.py/model/map.py
|
TED-996/krait-twostones
|
51b27793b9cd536d680fb9a6785c57473d35cac1
|
[
"MIT"
] | null | null | null |
site-root/.py/model/map.py
|
TED-996/krait-twostones
|
51b27793b9cd536d680fb9a6785c57473d35cac1
|
[
"MIT"
] | null | null | null |
site-root/.py/model/map.py
|
TED-996/krait-twostones
|
51b27793b9cd536d680fb9a6785c57473d35cac1
|
[
"MIT"
] | null | null | null |
import json
import krait
import os
class Map(object):
def __init__(self, mapId, source_file):
self.id = mapId
self.source_file = source_file
self.height = 0
self.width = 0
self.data = []
self.parse_map()
def parse_map(self):
fd = open(os.path.join(krait.site_root, self.source_file or "map/medievil.json"))
map_json = json.load(fd)
self.height = map_json['height']
self.width = map_json['width']
self.data = map_json['layers'][0]['data']
self.data = [d-1 for d in self.data]
fd.close()
| 25.083333
| 89
| 0.586379
|
7952340753498b4f82d22f5cf08643b00e4f23cb
| 244
|
py
|
Python
|
src/ps_collector/__init__.py
|
djw8605/ps-collector
|
34d5fe8bd3db231157140f3fbfee7dd46f35be2a
|
[
"Apache-2.0"
] | null | null | null |
src/ps_collector/__init__.py
|
djw8605/ps-collector
|
34d5fe8bd3db231157140f3fbfee7dd46f35be2a
|
[
"Apache-2.0"
] | null | null | null |
src/ps_collector/__init__.py
|
djw8605/ps-collector
|
34d5fe8bd3db231157140f3fbfee7dd46f35be2a
|
[
"Apache-2.0"
] | null | null | null |
import sharedrabbitmq
# Shared RabbitMQ
shared_rabbitmq = None
def get_rabbitmq_connection(cp):
global shared_rabbitmq
if shared_rabbitmq == None:
shared_rabbitmq = sharedrabbitmq.SharedRabbitMQ(cp)
return shared_rabbitmq
| 22.181818
| 59
| 0.77459
|
79523424bd4038cb5a99d435355827070b1c0505
| 643
|
py
|
Python
|
backend/manage.py
|
crowdbotics-apps/morning-waterfall-30060
|
45d08f37cc5bd66fb3b494a3162bb2273523f91c
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
backend/manage.py
|
crowdbotics-apps/morning-waterfall-30060
|
45d08f37cc5bd66fb3b494a3162bb2273523f91c
|
[
"FTL",
"AML",
"RSA-MD"
] | 32
|
2021-08-23T23:51:20.000Z
|
2022-03-20T15:31:58.000Z
|
backend/manage.py
|
crowdbotics-apps/morning-waterfall-30060
|
45d08f37cc5bd66fb3b494a3162bb2273523f91c
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'morning_waterfall_30060.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 29.227273
| 87
| 0.690513
|
79523499776726250bf489f010945bdb7bc69d4b
| 4,571
|
py
|
Python
|
autodist/kernel/synchronization/synchronizer.py
|
big-data-lab-umbc/autodist
|
c8514b27cf5608f35254b63c4ac8093c7295a8e7
|
[
"Apache-2.0"
] | null | null | null |
autodist/kernel/synchronization/synchronizer.py
|
big-data-lab-umbc/autodist
|
c8514b27cf5608f35254b63c4ac8093c7295a8e7
|
[
"Apache-2.0"
] | null | null | null |
autodist/kernel/synchronization/synchronizer.py
|
big-data-lab-umbc/autodist
|
c8514b27cf5608f35254b63c4ac8093c7295a8e7
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Petuum. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Synchronizer."""
from abc import ABC, abstractmethod
from tensorflow.python import ops
from autodist.kernel.common.utils import get_op_name, update_consumers, update_control_consumers, replica_prefix, \
strip_replica_prefix, get_index_from_tensor_name
class Synchronizer(ABC):
"""
Synchronizer.
Given a variable, can modify the TF Graph to synchronize its
gradients in either an in-graph or a between-graph fashion.
- In-graph means the synchronization happens in one `tf.Graph`
- Between-graph means the synchronization happens across
multiple `tf.Graphs` (e.g., each worker has its own graph)
"""
def __init__(self):
self.num_workers = None
self.num_replicas = None
self.worker_device = None
self.worker_id = None
self.var_op_to_agg_grad = None
self.var_op_to_accum_apply_op = None
self.is_chief = None
self.all_canonical_replica_devices = None
# pylint: disable=too-many-arguments
def assign_cluster_information(self,
num_workers,
num_replicas,
worker_device,
worker_id,
canonical_replica_devices,
is_chief=False):
"""Store cluster information in the synchronizer."""
self.num_workers = num_workers
self.num_replicas = num_replicas
self.worker_device = worker_device # local worker device
self.worker_id = worker_id # local worker id
self.all_canonical_replica_devices = canonical_replica_devices
self.is_chief = is_chief
return self
@abstractmethod
def in_graph_apply(self, graph_item, var_name):
"""
Apply in-graph synchronization to the grad and target in the graph.
Args:
graph_item (graph_item.GraphItem): The graph to put the new ops in.
var_name (str): The variable name w/o the replica prefix.
Returns:
graph_item.GraphItem
"""
return
@abstractmethod
def between_graph_apply(self, graph_item, var_name):
"""
Apply between-graph synchronization to the target ops in the graph.
Args:
graph_item (graph_item.GraphItem): The graph to put the new ops in.
var_name (str): The variable name w/o the replica prefix.
Returns:
graph_item.GraphItem
"""
return
@classmethod
def create(cls, name, *args, **kwargs):
"""
Create new Synchronizer instance given subclass name.
Args:
name: Name of the Synchronizer subclass (e.g. PSSynchronizer).
*args: Any args for the subclass constructor.
**kwargs: Any kwargs for the subclass constructor.
Returns:
Synchronizer
"""
subclass = next(subclass for subclass in cls.__subclasses__() if subclass.__name__ == name)
return subclass(*args, **kwargs)
@staticmethod
def _update_gradient_consumers(new_graph_item, consumer_ops, control_consumer_ops,
old_tensor_name, new_tensor):
"""Make gradient's consumers consume the aggregated gradient instead of the original one of replica_0."""
# Get the original tensor (the one from replica 0) to replace
old_op_name = strip_replica_prefix(get_op_name(old_tensor_name))
replica_0_op_name = ops.prepend_name_scope(old_op_name, replica_prefix(0))
replica_0_op = new_graph_item.graph.get_operation_by_name(replica_0_op_name)
output_idx = get_index_from_tensor_name(old_tensor_name)
replica_0_tensor = replica_0_op.outputs[output_idx]
update_consumers(consumer_ops, replica_0_tensor, new_tensor)
update_control_consumers(control_consumer_ops, replica_0_tensor.op, new_tensor.op)
| 38.411765
| 115
| 0.660906
|
795234a535a4b848e922d273d92232252bd9ca5b
| 666
|
py
|
Python
|
test/unit/test_utils.py
|
kzawisto/arima_python
|
251b3ce28dedfe3118befeb7b008c4e7c91e33cf
|
[
"MIT"
] | 4
|
2020-04-07T07:18:07.000Z
|
2021-12-13T10:09:13.000Z
|
test/unit/test_utils.py
|
kzawisto/arima_python
|
251b3ce28dedfe3118befeb7b008c4e7c91e33cf
|
[
"MIT"
] | null | null | null |
test/unit/test_utils.py
|
kzawisto/arima_python
|
251b3ce28dedfe3118befeb7b008c4e7c91e33cf
|
[
"MIT"
] | 2
|
2021-12-02T15:06:09.000Z
|
2022-01-15T18:09:30.000Z
|
from nose.tools import *
from arima.utils import *
from hamcrest import *
def test_np_shift():
assert_that(np_shift([1, 2, 3, 4], 2, -1), only_contains(-1, -1, 1, 2))
assert_that(np_shift([1, 2, 3, 4], -2, -1), only_contains(3, 4, -1, -1))
assert_that(np_shift([1, 2, 3, 4], 0, -1), only_contains(1, 2, 3, 4))
def test_shift_stack():
assert_that(shift_and_stack([1, 2, 3, 4], 2)[
:, 0], only_contains(0, 1, 2, 3))
assert_that(shift_and_stack([1, 2, 3, 4], 2)[
:, 1], only_contains(0, 0, 1, 2))
def test_ols():
assert_that(ols_0dim([1, 2, 3, 4, 5], [1, 0, 2, 1, 3]),
close_to(0.47, 0.01))
| 30.272727
| 76
| 0.555556
|
795234d3acc9261e8dd3b2994c4e3a04b7105bf0
| 8,187
|
py
|
Python
|
statsig.py
|
jensengroup/statsig
|
f59754c66eb6c12de3e6563c08d4e12a2ec0d6ee
|
[
"MIT"
] | 2
|
2017-02-19T10:55:03.000Z
|
2018-01-09T11:57:49.000Z
|
statsig.py
|
jensengroup/statsig
|
f59754c66eb6c12de3e6563c08d4e12a2ec0d6ee
|
[
"MIT"
] | 2
|
2017-01-09T18:49:18.000Z
|
2017-02-21T20:08:20.000Z
|
statsig.py
|
jensengroup/statsig
|
f59754c66eb6c12de3e6563c08d4e12a2ec0d6ee
|
[
"MIT"
] | 3
|
2016-12-15T08:16:46.000Z
|
2021-04-24T10:54:01.000Z
|
import numpy as np
check = "rmse"
#check = "pearson"
def correl(X,Y):
N, = X.shape
if N < 9:
print "not enough points. {} datapoints given. at least 9 is required".format(n)
return
r = np.corrcoef(X,Y)[0][1]
r_sig = 1.96/np.sqrt(n-2+1.96**2)
F_plus = 0.5*np.log((1+r)/(1-r))+r_sig
F_minus = 0.5*np.log((1+r)/(1-r))-r_sig
le = r - (np.exp(2*F_minus)-1)/(np.exp(2*F_minus)+1)
ue = (np.exp(2*F_plus)-1)/(np.exp(2*F_plus)+1) - r
return r, le, ue
def rmse(X, Y):
"""
Root-Mean-Square Error
Lower Error = RMSE \left( 1- \sqrt{ 1- \frac{1.96\sqrt{2}}{\sqrt{N-1}} } \right )
Upper Error = RMSE \left( \sqrt{ 1+ \frac{1.96\sqrt{2}}{\sqrt{N-1}} } - 1 \right )
This only works for N >= 8.6832, otherwise the lower error will be
imaginary.
Parameters:
X -- One dimensional Numpy array of floats
Y -- One dimensional Numpy array of floats
Returns:
rmse -- Root-mean-square error between X and Y
le -- Lower error on the RMSE value
ue -- Upper error on the RMSE value
"""
N, = X.shape
if N < 9:
print "Not enough points. {} datapoints given. At least 9 is required".format(N)
return
diff = X - Y
diff = diff**2
rmse = np.sqrt(diff.mean())
le = rmse * (1.0 - np.sqrt(1-1.96*np.sqrt(2.0)/np.sqrt(N-1)))
ue = rmse * (np.sqrt(1 + 1.96*np.sqrt(2.0)/np.sqrt(N-1))-1)
return rmse, le, ue
def mae(X, Y):
"""
Mean Absolute Error (MAE)
Lower Error = MAE_X \left( 1- \sqrt{ 1- \frac{1.96\sqrt{2}}{\sqrt{N-1}} } \right )
Upper Error = MAE_X \left( \sqrt{ 1+ \frac{1.96\sqrt{2}}{\sqrt{N-1}} }-1 \right )
Parameters:
X -- One dimensional Numpy array of floats
Y -- One dimensional Numpy array of floats
Returns:
mae -- Mean-absolute error between X and Y
le -- Lower error on the MAE value
ue -- Upper error on the MAE value
"""
N, = X.shape
mae = np.abs(X - Y)
mae = mae.mean()
le = mae * (1 - np.sqrt(1 - 1.96*np.sqrt(2)/np.sqrt(N-1) ) )
ue = mae * ( np.sqrt(1 + 1.96*np.sqrt(2)/np.sqrt(N-1) ) -1 )
return mae, le, ue
def me(X, Y):
"""
mean error (ME)
L_X = U_X = \frac{1.96 s_N}{\sqrt{N}}
where sN is the standard population deviation (e.g. STDEVP in Excel).
Parameters:
X -- One dimensional Numpy array of floats
Y -- One dimensional Numpy array of floats
Returns:
mae -- Mean error between X and Y
e -- Upper and Lower error on the ME
"""
N, = X.shape
error = X - Y
me = error.mean()
s_N = stdevp(error, me, N)
e = 1.96*s_N/np.sqrt(N)
return me, e
def stdevp(X, X_hat, N):
"""
Parameters:
X -- One dimensional Numpy array of floats
X_hat -- Float
N -- Integer
Returns:
Calculates standard deviation based on the entire population given as
arguments. The standard deviation is a measure of how widely values are
dispersed from the average value (the mean).
"""
return np.sqrt(np.sum((X-X_hat)**2)/N)
if __name__ == '__main__':
import sys
import numpy as np
import matplotlib.pyplot as plt
if len(sys.argv) < 2:
exit("usage: python example.py example_input.csv")
filename = sys.argv[1]
f = open(filename, "r")
data = np.genfromtxt(f, delimiter=',', names=True)
f.close()
try:
ref = data['REF']
except:
ref = data['\xef\xbb\xbfREF']
n = len(ref)
methods = data.dtype.names
methods = methods[1:]
nm = len(methods)
rmse_list = []
rmse_lower = []
rmse_upper = []
mae_list = []
mae_lower = []
mae_upper = []
me_list = []
me_lower = []
me_upper = []
r_list = []
r_lower = []
r_upper = []
for method in methods:
mdata = data[method]
# RMSE
mrmse, mle, mue = rmse(mdata, ref)
rmse_list.append(mrmse)
rmse_lower.append(mle)
rmse_upper.append(mue)
# MAD
mmae, maele, maeue = mae(mdata, ref)
mae_list.append(mmae)
mae_lower.append(maele)
mae_upper.append(maeue)
# ME
mme, mmee = me(mdata, ref)
me_list.append(mme)
me_lower.append(mmee)
me_upper.append(mmee)
# r
r, rle, rue = correl(mdata, ref)
r_list.append(r)
r_lower.append(rle)
r_upper.append(rue)
print "Method_A Method_B RMSE_A RMSE_B RMSE_A-RMSE_B Comp Err same?"
ps = "{:10s} "*2 + "{:8.3f} "*2 + "{:8.3f}" + "{:15.3f}" + " {:}"
if check == "pearson":
measure = r_list
upper_error = r_upper
lower_error = r_lower
else:
measure = rmse_list
upper_error = rmse_upper
lower_error = rmse_lower
# measure = mae_list
# upper_error = mae_upper
# lower_error = mae_lower
for i in xrange(nm):
for j in xrange(i+1, nm):
m_i = methods[i]
m_j = methods[j]
rmse_i = measure[i]
rmse_j = measure[j]
r_ij = np.corrcoef(data[m_i], data[m_j])[0][1]
if rmse_i > rmse_j:
lower = lower_error[i]
upper = upper_error[j]
else:
lower = lower_error[j]
upper = upper_error[i]
comp_error = np.sqrt(upper**2 + lower**2 - 2.0*r_ij*upper*lower)
significance = abs(rmse_i - rmse_j) < comp_error
print ps.format(m_i, m_j, rmse_i, rmse_j, rmse_i-rmse_j, comp_error, significance)
print
print "\\begin{table}[]"
print "\centering"
print "\caption{}"
print "\label{}"
print "\\begin{tabular}{l" + nm*"c" + "}"
print "\midrule"
print "& " + " & ".join(methods) + "\\\\"
print "\midrule"
# for i in xrange(nm-1):
# print '%.1f $\pm$ %.1f/%.1f &'%(rmse_list[i],lower_error[i],rmse_upper[i]),
# print '%.1f $\pm$ %.1f/%.1f'%(rmse_list[-1],lower_error[-1],rmse_upper[-1])
print "RMSE & " + " & ".join(format(x, "3.1f") for x in rmse_list) + "\\\\"
temp_list = [ i+"/"+j for i,j in zip([format(x, "3.1f") for x in rmse_upper],[format(x, "3.1f") for x in rmse_lower])]
print "95 \% conf & $\pm$ " + " & $\pm$ ".join(temp_list) + "\\\\"
temp_list = [ i+" $\pm$ "+j for i,j in zip([format(x, "3.1f") for x in me_list],[format(x, "3.1f") for x in me_upper])]
print "ME & " + " & ".join(temp_list) + "\\\\"
print "$r$ & " + " & ".join(format(x, "3.2f") for x in r_list) + "\\\\"
temp_list = [ i+"/"+j for i,j in zip([format(x, "3.2f") for x in r_upper],[format(x, "3.2f") for x in r_lower])]
print "95 \% conf & $\pm$ " + " & $\pm$ ".join(temp_list) + "\\\\"
print "\midrule"
print "\end{tabular}"
print "\end{table}"
# Create x-axis
x = range(len(methods))
# Errorbar (upper and lower)
asymmetric_error = [rmse_lower, rmse_upper]
# Add errorbar for RMSE
plt.errorbar(x, rmse_list, yerr=asymmetric_error, fmt='o')
# change x-axis to method names and rotate the ticks 30 degrees
plt.xticks(x, methods, rotation=30)
# Pad margins so that markers don't get clipped by the axes
plt.margins(0.2)
# Tweak spacing to prevent clipping of tick-labels
plt.subplots_adjust(bottom=0.15)
# Add grid to plot
plt.grid(True)
# Set plot title
plt.title('Root-mean-sqaure error')
# Save plot to PNG format
plt.savefig('example_rmsd.png')
# Clear figure
plt.clf()
# MAE plot
asymmetric_error = [mae_lower, mae_upper]
plt.errorbar(x, mae_list, yerr=asymmetric_error, fmt='o')
plt.xticks(x, methods, rotation=30)
plt.margins(0.2)
plt.subplots_adjust(bottom=0.15)
plt.grid(True)
plt.title('Mean Absolute Error')
plt.savefig('example_mae.png')
# Clear figure
plt.clf()
# ME plot
asymmetric_error = [me_lower, me_upper]
plt.errorbar(x, me_list, yerr=asymmetric_error, fmt='o')
plt.xticks(x, methods, rotation=30)
plt.margins(0.2)
plt.subplots_adjust(bottom=0.15)
plt.grid(True)
plt.title('Mean Error')
plt.savefig('example_me.png')
| 25.425466
| 123
| 0.562233
|
7952366c676a7a29d90e5013ad86e14768a5dd11
| 2,023
|
py
|
Python
|
L-System.py
|
Moonire/L-system_python
|
b21f2d7a7b9dd571f47c95fda3cad5e8cff2e00b
|
[
"MIT"
] | null | null | null |
L-System.py
|
Moonire/L-system_python
|
b21f2d7a7b9dd571f47c95fda3cad5e8cff2e00b
|
[
"MIT"
] | null | null | null |
L-System.py
|
Moonire/L-system_python
|
b21f2d7a7b9dd571f47c95fda3cad5e8cff2e00b
|
[
"MIT"
] | null | null | null |
import matplotlib.pyplot as plt
import numpy as np
"""
Non Stochastic Context free L-system implementation for python 3.x
Conformaly to the description given in 'The Algorithmic Beauty of Plants' by Lindenmayer (algorithmicbotany.org/papers/abop/abop.pdf)
i.e : the angles are in degrees and grow clockwise
supports edge and node rewriting
supports branching
all lower cases : forward line
all upper cases : ignored in the drawing prosses
_ : forward without drawing
[ , ] : respectively saving and popping a state
h : initial heading, for manual correction of the orientation of the figure (in degrees)
Displayed using matplotlib (quicker and smoother than turtle)
"""
def L_system( axiom, rule , angle, iterations=2, h=0 ):
d, h, axiom_ = angle*np.pi/180, h*np.pi/180, axiom
R = { '-': [ [np.cos( d), -np.sin( d)],[np.sin( d), np.cos( d)] ] ,
'+': [ [np.cos(-d), -np.sin(-d)],[np.sin(-d), np.cos(-d)] ] }
for i in range(iterations):
sequence =''
for i in axiom_ :
try :
sequence += rule[i]
except :
sequence += i
axiom_ = sequence
print(axiom_)
a, k = (0,0), []
r = [ [np.cos(h), -np.sin(h)],[np.sin(h), np.cos(h)] ]
for i in axiom_ :
if i.islower() :
b = np.transpose(np.add(a, np.matmul(r, np.transpose([0,1]) )))
plt.plot([ a[0],b[0] ], [ a[1],b[1] ], color='g')
a = b
elif i in R:
r = np.transpose(np.matmul(R[i], np.transpose(r) ))
elif i=='_':
a = np.transpose(np.add(a, np.matmul(r, np.transpose([0,1]) )))
elif i=='[':
k.extend([a,r])
elif i==']':
r, a = k.pop(), k.pop()
plt.suptitle("n=%s, angle=%s°, axiom=%s" %(iterations, angle, axiom))
plt.title(str(rule).replace("'",""))
#exemple (d) page 25
axiom = 'X'
rule = { 'f':'ff' , 'X':'f[+X]f[-X]+X' }
angle = 20
iterations = 7
L_system(axiom, rule, angle, iterations)
plt.axis('off')
plt.axis('equal')
plt.savefig( '%s_%s' %(axiom,iterations) )
plt.show()
| 30.651515
| 134
| 0.587741
|
7952367810c53905c83c957437db2994c42cb727
| 17,004
|
py
|
Python
|
pymatgen/io/abinitio/abiinspect.py
|
ctoher/pymatgen
|
54df358f61fbe60417e90850811b75c1a9e2e230
|
[
"MIT"
] | null | null | null |
pymatgen/io/abinitio/abiinspect.py
|
ctoher/pymatgen
|
54df358f61fbe60417e90850811b75c1a9e2e230
|
[
"MIT"
] | null | null | null |
pymatgen/io/abinitio/abiinspect.py
|
ctoher/pymatgen
|
54df358f61fbe60417e90850811b75c1a9e2e230
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
This module provides objects to inspect the status of the Abinit tasks at run-time.
by extracting information from the main output file (text format).
"""
from __future__ import unicode_literals, division, print_function
import collections
import numpy as np
import yaml
import six
from six.moves import cStringIO, map, zip
from prettytable import PrettyTable
from pymatgen.util.plotting_utils import add_fig_kwargs
def straceback():
"""Returns a string with the traceback."""
import traceback
return traceback.format_exc()
def _magic_parser(stream, magic):
"""
Parse the section with the SCF cycle
Returns:
dict where the key are the name of columns and
the values are list of numbers. Note if no section was found.
.. warning::
The parser is very fragile and should be replaced by YAML.
"""
#Example (SCF cycle, similar format is used for phonons):
#
# iter Etot(hartree) deltaE(h) residm vres2
# ETOT 1 -8.8604027880849 -8.860E+00 2.458E-02 3.748E+00
# At SCF step 5 vres2 = 3.53E-08 < tolvrs= 1.00E-06 =>converged.
in_doc, fields = 0, None
for line in stream:
line = line.strip()
if line.startswith(magic):
keys = line.split()
fields = collections.OrderedDict((k, []) for k in keys)
if fields is not None:
#print(line)
in_doc += 1
if in_doc == 1:
continue
# End of the section.
if not line: break
tokens = list(map(float, line.split()[1:]))
assert len(tokens) == len(keys)
for l, v in zip(fields.values(), tokens):
l.append(v)
return fields
def plottable_from_outfile(filepath):
"""
Factory function that returns a plottable object by inspecting the main output file of abinit
Returns None if it is not able to detect the class to instantiate.
"""
# TODO
# Figure out how to detect the type of calculations
# without having to parse the input. Possible approach: YAML doc
#with YamlTokenizer(filepath) as r:
# doc = r.next_doc_with_tag("!CalculationType")
# d = yaml.load(doc.text_notag)
# calc_type = d["calculation_type"]
#ctype2class = {
# "Ground State": GroundStateScfCycle,
# "Phonon": PhononScfCycle,
# "Relaxation": Relaxation,
#}
#obj = ctype2class.get(calc_type, None)
obj = GroundStateScfCycle
if obj is not None:
return obj.from_file(filepath)
else:
return None
class ScfCycle(collections.Mapping):
"""
It essentially consists of a dictionary mapping string
to list of floats containing the data at the different iterations.
"""
def __init__(self, fields):
self.fields = fields
#print(fields)
all_lens = [len(lst) for lst in self.values()]
self.num_iterations = all_lens[0]
assert all(n == self.num_iterations for n in all_lens)
def __getitem__(self, slice):
return self.fields.__getitem__(slice)
def __iter__(self):
return self.fields.__iter__()
def __len__(self):
return len(self.fields)
def __str__(self):
"""String representation."""
table = PrettyTable([list(self.fields.keys())])
for it in range(self.num_iterations):
row = list(map(str, (self[k][it] for k in self.keys())))
table.add_row(row)
stream = cStringIO()
print(table, out=stream)
stream.seek(0)
return "".join(stream)
@property
def last_iteration(self):
"""Returns a dictionary with the values of the last iteration."""
return {k: v[-1] for k, v in self.items()}
@classmethod
def from_file(cls, filepath):
"""Read the first occurrence of ScfCycle from file."""
with open(filepath, "r") as stream:
return cls.from_stream(stream)
@classmethod
def from_stream(cls, stream):
"""
Read the first occurrence of ScfCycle from stream.
Returns:
None if no `ScfCycle` entry is found.
"""
fields = _magic_parser(stream, magic=cls.MAGIC)
if fields:
fields.pop("iter")
return cls(fields)
else:
return None
@add_fig_kwargs
def plot(self, **kwargs):
"""
Uses matplotlib to plot the evolution of the SCF cycle. Return `matplotlib` figure
"""
import matplotlib.pyplot as plt
# Build grid of plots.
num_plots, ncols, nrows = len(self), 1, 1
if num_plots > 1:
ncols = 2
nrows = (num_plots//ncols) + (num_plots % ncols)
fig, ax_list = plt.subplots(nrows=nrows, ncols=ncols, sharex=True, squeeze=False)
ax_list = ax_list.ravel()
iter_num = np.array(list(range(self.num_iterations)))
for ((key, values), ax) in zip(self.items(), ax_list):
ax.grid(True)
ax.set_xlabel('Iteration')
ax.set_xticks(iter_num, minor=False)
ax.set_ylabel(key)
xx, yy = iter_num, values
if self.num_iterations > 1:
# Don't show the first iteration since it's not very useful.
xx, yy = xx[1:] + 1, values[1:]
#print("xx ",xx, "yy ",yy)
ax.plot(xx, yy, "-o", lw=2.0)
# Get around a bug in matplotlib.
if (num_plots % ncols) != 0:
ax_list[-1].plot(xx, yy, lw=0.0)
ax_list[-1].axis('off')
return fig
class GroundStateScfCycle(ScfCycle):
"""Result of the Ground State self-consistent cycle."""
#yaml_tag = '!GroundStateScfCycle'
MAGIC = "iter Etot(hartree)"
@property
def last_etotal(self):
"""The total energy at the last iteration."""
return self["Etot(hartree)"][-1]
class PhononScfCycle(ScfCycle):
"""Result of the Phonon self-consistent cycle."""
#yaml_tag = '!PhononScfCycle'
MAGIC = "iter 2DEtotal(Ha)"
@property
def last_etotal(self):
"""The 2-nd order derivative of the energy at the last iteration."""
return self["2DEtotal(Ha)"][-1]
class Relaxation(collections.Iterable):
"""
A list of :class:`GroundStateScfCycle` objects.
.. note::
Forces, stresses and crystal structures are missing.
Solving this problem would require the standardization
of the Abinit output file (YAML).
"""
def __init__(self, cycles):
self.cycles = cycles
def __iter__(self):
return self.cycles.__iter__()
def __len__(self):
return self.cycles.__len__()
def __str__(self):
"""String representation."""
lines = []
app = lines.append
for i, cycle in enumerate(self):
app("")
app("RELAXATION STEP: %d" % i)
app(str(cycle))
app("")
return "\n".join(lines)
@classmethod
def from_file(cls, filepath):
"""Initialize the object from the Abinit main output file."""
with open(filepath, "r") as stream:
return cls.from_stream(stream)
@classmethod
def from_stream(cls, stream):
"""
Extract data from stream. Returns None if some error occurred.
"""
cycles = []
while True:
scf_cycle = GroundStateScfCycle.from_stream(stream)
if scf_cycle is None: break
cycles.append(scf_cycle)
return cls(cycles) if cycles else None
@property
def history(self):
"""
Dictionary of lists with the evolution of the data as function of the relaxation step.
"""
try:
return self._history
except AttributeError:
self._history = history = collections.defaultdict(list)
for cycle in self:
d = cycle.last_iteration
for k, v in d.items():
history[k].append(v)
return self._history
@add_fig_kwargs
def plot(self, **kwargs):
"""
Uses matplotlib to plot the evolution of the structural relaxation.
Returns:
`matplotlib` figure
"""
import matplotlib.pyplot as plt
history = self.history
#print(history)
relax_step = list(range(len(self)))
# Build grid of plots.
num_plots, ncols, nrows = len(list(history.keys())), 1, 1
if num_plots > 1:
ncols = 2
nrows = (num_plots//ncols) + (num_plots % ncols)
fig, ax_list = plt.subplots(nrows=nrows, ncols=ncols, sharex=True, squeeze=False)
ax_list = ax_list.ravel()
if (num_plots % ncols) != 0:
ax_list[-1].axis('off')
for (key, values), ax in zip(history.items(), ax_list):
ax.grid(True)
ax.set_xlabel('Relaxation Step')
ax.set_xticks(relax_step, minor=False)
ax.set_ylabel(key)
ax.plot(relax_step, values, "-o", lw=2.0)
return fig
# TODO
#class HaydockIterations(collections.Iterable):
# """This object collects info on the different steps of the Haydock technique used in the Bethe-Salpeter code"""
# @classmethod
# def from_file(cls, filepath):
# """Initialize the object from file."""
# with open(filepath, "r") as stream:
# return cls.from_stream(stream)
#
# @classmethod
# def from_stream(cls, stream):
# """Extract data from stream. Returns None if some error occurred."""
# cycles = []
# while True:
# scf_cycle = GroundStateScfCycle.from_stream(stream)
# if scf_cycle is None: break
# cycles.append(scf_cycle)
#
# return cls(cycles) if cycles else None
#
# #def __init__(self):
#
# def plot(self, **kwargs):
# """
# Uses matplotlib to plot the evolution of the structural relaxation.
# ============== ==============================================================
# kwargs Meaning
# ============== ==============================================================
# title Title of the plot (Default: None).
# how True to show the figure (Default).
# savefig 'abc.png' or 'abc.eps'* to save the figure to a file.
# ============== ==============================================================
# Returns:
# `matplotlib` figure
# """
# import matplotlib.pyplot as plt
# title = kwargs.pop("title", None)
# show = kwargs.pop("show", True)
# savefig = kwargs.pop("savefig", None)
# if title: fig.suptitle(title)
# if savefig is not None: fig.savefig(savefig)
# if show: plt.show()
# return fig
class YamlTokenizerError(Exception):
"""Exceptions raised by :class:`YamlTokenizer`."""
class YamlTokenizer(collections.Iterator):
"""
Provides context-manager support so you can use it in a with statement.
"""
Error = YamlTokenizerError
def __init__(self, filename):
# The position inside the file.
self.linepos = 0
self.stream = open(filename, "r")
def __iter__(self):
return self
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def __del__(self):
self.close()
def close(self):
try:
self.stream.close()
except:
print("Exception in YAMLTokenizer.close()")
print(straceback())
def seek(self, offset, whence=0):
"""
seek(offset[, whence]) -> None. Move to new file position.
Argument offset is a byte count. Optional argument whence defaults to
0 (offset from start of file, offset should be >= 0); other values are 1
(move relative to current position, positive or negative), and 2 (move
relative to end of file, usually negative, although many platforms allow
seeking beyond the end of a file). If the file is opened in text mode,
only offsets returned by tell() are legal. Use of other offsets causes
undefined behavior.
Note that not all file objects are seekable.
"""
assert offset == 0
self.linepos = 0
return self.stream.seek(offset, whence)
# Python 3 compatibility
def __next__(self):
return self.next()
def next(self):
"""
Returns the first YAML document in stream.
.. warning::
Assume that the YAML document are closed explicitely with the sentinel '...'
"""
in_doc, lines, doc_tag = None, [], None
for i, line in enumerate(self.stream):
self.linepos += 1
#print(i, line)
if line.startswith("---"):
# Include only lines in the form:
# "--- !tag"
# "---"
# Other lines are spurious.
in_doc = False
l = line[3:].strip().lstrip()
if l.startswith("!"):
# "--- !tag"
doc_tag = l
in_doc = True
elif not l:
# "---"
in_doc = True
doc_tag = None
if in_doc:
lineno = self.linepos
if in_doc:
lines.append(line)
if in_doc and line.startswith("..."):
return YamlDoc(text="".join(lines), lineno=lineno, tag=doc_tag)
raise StopIteration("Cannot find next YAML document")
def all_yaml_docs(self):
"""
Returns a list with all the YAML docs found in stream.
Seek the stream before returning.
.. warning::
Assume that all the YAML docs (with the exception of the last one)
are closed explicitely with the sentinel '...'
"""
docs = [doc for doc in self]
self.seek(0)
return docs
def next_doc_with_tag(self, doc_tag):
"""
Returns the next document with the specified tag. Empty string is no doc is found.
"""
while True:
try:
doc = six.advance_iterator(self)
if doc.tag == doc_tag:
return doc
except StopIteration:
raise
def all_docs_with_tag(self, doc_tag):
"""
Returns all the documents with the specified tag.
"""
docs = []
while True:
try:
doc = self.next_doc_with(doc_tag)
docs.append(doc)
except StopIteration:
break
self.seek(0)
return docs
def yaml_read_kpoints(filename, doc_tag="!Kpoints"):
"""Read the K-points from file."""
with YamlTokenizer(filename) as r:
doc = r.next_doc_with_tag(doc_tag)
d = yaml.load(doc.text_notag)
return np.array(d["reduced_coordinates_of_qpoints"])
def yaml_read_irred_perts(filename, doc_tag="!IrredPerts"):
"""Read the list of irreducible perturbations from file."""
with YamlTokenizer(filename) as r:
doc = r.next_doc_with_tag(doc_tag)
d = yaml.load(doc.text_notag)
return d["irred_perts"]
class YamlDoc(object):
"""
Handy object that stores that YAML document, its main tag and the
position inside the file.
"""
__slots__ = [
"text",
"lineno",
"tag",
]
def __init__(self, text, lineno, tag=None):
"""
Args:
text: String with the YAML document.
lineno: The line number where the document is located.
tag: The YAML tag associate to the document.
"""
# Sanitize strings: use "ignore" to skip invalid characters in .encode/.decode like
if isinstance(text, bytes):
text = text.decode("utf-8", "ignore")
text = text.rstrip().lstrip()
self.text = text
self.lineno = lineno
if isinstance(tag, bytes):
tag = tag.decode("utf-8", "ignore")
self.tag = tag
def __str__(self):
return self.text
def __eq__(self, other):
if other is None: return False
return (self.text == other.text and
self.lineno == other.lineno and
self.tag == other.tag)
def __ne__(self, other):
return not self == other
@property
def text_notag(self):
"""
Returns the YAML text without the tag.
Useful if we don't have any constructor registered for the tag
(we used the tag just to locate the document).
"""
if self.tag is not None:
return self.text.replace(self.tag, "")
else:
return self.text
| 29.317241
| 116
| 0.563985
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.