blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9ce14fca10d2ddb156b0712bfbb6af3a9ece4b33 | 5ca4a0d91f5bd119e80478b5bd3d43ed30133a42 | /film20/core/forms.py | adb00b502ae71fb5993bc1d567b86ff73d69687b | [] | no_license | thuvh/filmmaster | 1fc81377feef5a9e13f792b329ef90f840404ec5 | dd6a2ee5a4951b2397170d5086c000169bf91350 | refs/heads/master | 2021-01-17T16:10:54.682908 | 2012-04-29T18:19:52 | 2012-04-29T18:19:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,150 | py | #-------------------------------------------------------------------------------
# Filmaster - a social web network and recommendation engine
# Copyright (c) 2009 Filmaster (Borys Musielak, Adam Zielinski).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#-------------------------------------------------------------------------------
from django.utils.translation import gettext_lazy as _
from django import forms
from django.forms.util import ErrorList
from django.utils.translation import string_concat
from film20.core.search_helper import *
from film20.utils.slughifi import slughifi
import logging
logger = logging.getLogger(__name__)
def get_related_people_as_comma_separated_string(related_people):
related_names = ''
for related_name in related_people:
related_names = related_names + unicode(related_name) + ", "
related_names = related_names.rstrip(", ")
return related_names
def do_clean_related_person(self, related_person_str='related_person'):
related = []
related_person_form_data = self.cleaned_data[related_person_str]
if isinstance(related_person_form_data, (list, tuple)):
return related_person_form_data
if len(related_person_form_data) ==0:
self.cleaned_data[related_person_str] == ""
return self.cleaned_data[related_person_str]
else:
related_person_form_data = related_person_form_data.replace(", ",",").rstrip(", ")
related_people = related_person_form_data.rstrip(",").split(",")
for related_person in related_people:
related_permalink = slughifi(related_person)
namesurname = related_person.split(" ")
person_name = None
person_surname = None
if len(namesurname) != 1:
person_surname = namesurname[-1]
namesurname = namesurname[:-1]
person_name = " ".join(namesurname)
else:
person_surname = namesurname[0]
search_helper = Search_Helper()
search_results = search_helper.search_person_by_phrase(related_person)
best_results = list(search_results['best_results'])
other_results = list(search_results['results'])
people = best_results + other_results
if people:
names = ""
for person in people:
person_permalink = slughifi(unicode(person))
if related_person != unicode(person):
names = names + ", " + unicode(person)
msg = string_concat(_('Person'), " '", unicode(related_person), "' ", _('is not present in the database!'), " ", _('Maybe you were looking for these people:'), names)
self._errors[related_person_str] = ErrorList([msg])
else:
related.append(person)
break
else:
msg = string_concat(_('Person is not present in the database:'), unicode(related_person))
self._errors[related_person_str] = ErrorList([msg])
logger.debug("Related: " + str(related))
return related
def comma_split(s):
out = ''
lastc=''
for c in s:
if lastc=='\\':
out+=c
elif c==',':
out = out.strip()
if out:
yield out
out=''
elif c!='\\':
out+=c
lastc = c
out = out.strip()
if out:
yield out
def comma_escape(s):
return s.replace('\\','\\\\').replace(',','\\,')
| [
"email@ibrahimcesar.com"
] | email@ibrahimcesar.com |
79ccd0a82b25f9eaa786743f1ddefc8eaeb949f1 | 238e46a903cf7fac4f83fa8681094bf3c417d22d | /output/python37/Lib/email/errors.py | 95aa000352daf17b4eed229313953cfe30607bb5 | [
"bzip2-1.0.6",
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-newlib-historical",
"OpenSSL",
"Python-2.0",
"TCL",
"BSD-3-Clause"
] | permissive | baojunli/FastCAE | da1277f90e584084d461590a3699b941d8c4030b | a3f99f6402da564df87fcef30674ce5f44379962 | refs/heads/master | 2023-02-25T20:25:31.815729 | 2021-02-01T03:17:33 | 2021-02-01T03:17:33 | 268,390,180 | 1 | 0 | BSD-3-Clause | 2020-06-01T00:39:31 | 2020-06-01T00:39:31 | null | UTF-8 | Python | false | false | 3,642 | py | # Copyright (C) 2001-2006 Python Software Foundation
# Author: Barry Warsaw
# Contact: email-sig@python.org
"""email package exception classes."""
class MessageError(Exception):
"""Base class for errors in the email package."""
class MessageParseError(MessageError):
"""Base class for message parsing errors."""
class HeaderParseError(MessageParseError):
"""Error while parsing headers."""
class BoundaryError(MessageParseError):
"""Couldn't find terminating boundary."""
class MultipartConversionError(MessageError, TypeError):
"""Conversion to a multipart is prohibited."""
class CharsetError(MessageError):
"""An illegal charset was given."""
# These are parsing defects which the parser was able to work around.
class MessageDefect(ValueError):
"""Base class for a message defect."""
def __init__(self, line=None):
if line is not None:
super().__init__(line)
self.line = line
class NoBoundaryInMultipartDefect(MessageDefect):
"""A message claimed to be a multipart but had no boundary parameter."""
class StartBoundaryNotFoundDefect(MessageDefect):
"""The claimed start boundary was never found."""
class CloseBoundaryNotFoundDefect(MessageDefect):
"""A start boundary was found, but not the corresponding close boundary."""
class FirstHeaderLineIsContinuationDefect(MessageDefect):
"""A message had a continuation line as its first header line."""
class MisplacedEnvelopeHeaderDefect(MessageDefect):
"""A 'Unix-from' header was found in the middle of a header block."""
class MissingHeaderBodySeparatorDefect(MessageDefect):
"""Found line with no leading whitespace and no colon before blank line."""
# XXX: backward compatibility, just in case (it was never emitted).
MalformedHeaderDefect = MissingHeaderBodySeparatorDefect
class MultipartInvariantViolationDefect(MessageDefect):
"""A message claimed to be a multipart but no subparts were found."""
class InvalidMultipartContentTransferEncodingDefect(MessageDefect):
"""An invalid content transfer encoding was set on the multipart itself."""
class UndecodableBytesDefect(MessageDefect):
"""Header contained bytes that could not be decoded"""
class InvalidBase64PaddingDefect(MessageDefect):
"""base64 encoded sequence had an incorrect length"""
class InvalidBase64CharactersDefect(MessageDefect):
"""base64 encoded sequence had characters not in base64 alphabet"""
# These errors are specific to header parsing.
class HeaderDefect(MessageDefect):
"""Base class for a header defect."""
def __init__(self, *args, **kw):
super().__init__(*args, **kw)
class InvalidHeaderDefect(HeaderDefect):
"""Header is not valid, message gives details."""
class HeaderMissingRequiredValue(HeaderDefect):
"""A header that must have a value had none"""
class NonPrintableDefect(HeaderDefect):
"""ASCII characters outside the ascii-printable range found"""
def __init__(self, non_printables):
super().__init__(non_printables)
self.non_printables = non_printables
def __str__(self):
return ("the following ASCII non-printables found in header: "
"{}".format(self.non_printables))
class ObsoleteHeaderDefect(HeaderDefect):
"""Header uses syntax declared obsolete by RFC 5322"""
class NonASCIILocalPartDefect(HeaderDefect):
"""local_part contains non-ASCII characters"""
# This defect only occurs during unicode parsing, not when
# parsing messages decoded from binary.
| [
"l”ibaojunqd@foxmail.com“"
] | l”ibaojunqd@foxmail.com“ |
e80c0c25ac94b096af9c8a3e75349f48c170d549 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_347/ch11_2020_03_03_17_26_22_152978.py | 5f675aae94174bd2c5adb96335721c2230d4f7f2 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 89 | py | def distancia_euclidiana(x1,x2,y1,y2):
d = ((x1 + x2)/2), ((y1 + y2)/2)
return dx | [
"you@example.com"
] | you@example.com |
d68c7916badbfb576b18ac6ccf18e9336831c7fd | b05b89e1f6378905bbb62e2a2bf2d4f8e3187932 | /nonDuplicateNumber.py | 47a770f039bc1c3e8aa91571be7ab8bc35b4f9b9 | [
"MIT"
] | permissive | anishmo99/Daily-Interview-Pro | c959cd336209132aebad67a409df685e654cfdfc | d8724e8feec558ab1882d22c9ca63b850b767753 | refs/heads/master | 2023-04-10T08:09:46.089227 | 2021-04-27T07:27:38 | 2021-04-27T07:27:38 | 269,157,996 | 1 | 1 | MIT | 2020-06-08T07:09:19 | 2020-06-03T17:57:21 | C++ | UTF-8 | Python | false | false | 233 | py | def nonDuplicateNumber(nums):
nums.sort()
i=0
while(i<len(nums)-1):
if nums[i]!=nums[i+1]:
return nums[i]
i+=2
return nums[len(nums)-1]
print nonDuplicateNumber([4, 3, 3, 2, 1, 2, 1])
# 1
| [
"ani10sh@gmail.com"
] | ani10sh@gmail.com |
d0200942933649d20982e8d0c1ba94566e8a5c5e | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_116/1767.py | 32d55ab5e1dd3b425a76fa09552b6e7c540fce0e | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,772 | py | def getRow( s ):
return [
s[0], s[1], s[2], s[3]
]
def checkCell( s, scores ):
if s == 'X':
return (scores[0],scores[1]+1)
if s == 'O':
return (scores[0]+1,scores[1])
if s == 'T':
return (scores[0]+1,scores[1]+1)
return scores
def findWinner( board ):
j = 0
dots = 0
#horizontal
while j < 4:
i = 0
(O, X) = (0, 0)
while i < 4:
(O, X) = checkCell( board[j][i], (O, X))
if board[j][i] == '.': dots += 1
i += 1
if X == 4: return 'X won'
if O == 4: return 'O won'
j += 1
j=0
#vertical
while j < 4:
i = 0
(O, X) = (0, 0)
while i < 4:
(O, X) = checkCell( board[i][j], (O, X))
i += 1
if X == 4: return 'X won'
if O == 4: return 'O won'
j += 1
i=0
#diagonal
(O, X) = (0, 0)
while i < 4:
(O, X) = checkCell( board[i][i], (O, X))
i += 1
if X == 4: return 'X won'
if O == 4: return 'O won'
(O, X) = (0, 0)
i=0
while i < 4:
(O, X) = checkCell( board[3-i][i], (O, X))
i += 1
if X == 4: return 'X won'
if O == 4: return 'O won'
if dots == 0:
return 'Draw'
else:
return 'Game has not completed'
input = open('A-large.in','r')
n = int(input.readline())
i = n
ret = ''
while i > 0:
case = []
j = 4
while j > 0:
case.append(getRow(input.readline()))
j -= 1
input.readline()
i -= 1
ret += 'Case #'+str(n-i)+': '+findWinner(case)+'\n'
input.close()
output = open('output.txt', 'w')
output.write(ret)
output.close()
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
4141d7cc2fbc369307bee0785e1cf1982a1e9a81 | f5652eab2e9efa7ec26cade6c5ecabdd5a067929 | /src/lib/Bcfg2/Options/OptionGroups.py | 70cb5d0dda8a4f57a39a8a38bbe8e52a03f9b0da | [
"BSD-2-Clause",
"mpich2",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | dhutty/bcfg2 | 949f3053f3f54beb304fad50182d8c12c72f73ca | fdf47ccf128645bd099f7da80487320e086d17fe | refs/heads/master | 2020-12-24T17:44:06.213854 | 2013-08-12T13:40:08 | 2013-08-12T13:40:08 | 1,522,043 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,578 | py | """ Option grouping classes """
import re
import copy
import fnmatch
from Options import Option # pylint: disable=W0403
from itertools import chain
__all__ = ["OptionGroup", "ExclusiveOptionGroup", "Subparser",
"WildcardSectionGroup"]
class OptionContainer(list):
""" Parent class of all option groups """
def list_options(self):
""" Get a list of all options contained in this group,
including options contained in option groups in this group,
and so on. """
return list(chain(*[o.list_options() for o in self]))
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, list.__repr__(self))
def add_to_parser(self, parser):
""" Add this option group to a :class:`Bcfg2.Options.Parser`
object. """
for opt in self:
opt.add_to_parser(parser)
class OptionGroup(OptionContainer):
""" Generic option group that is used only to organize options.
This uses :meth:`argparse.ArgumentParser.add_argument_group`
behind the scenes. """
def __init__(self, *items, **kwargs):
r"""
:param \*args: Child options
:type \*args: Bcfg2.Options.Option
:param title: The title of the option group
:type title: string
:param description: A longer description of the option group
:param description: string
"""
OptionContainer.__init__(self, items)
self.title = kwargs.pop('title')
self.description = kwargs.pop('description', None)
def add_to_parser(self, parser):
group = parser.add_argument_group(self.title, self.description)
OptionContainer.add_to_parser(self, group)
class ExclusiveOptionGroup(OptionContainer):
""" Option group that ensures that only one argument in the group
is present. This uses
:meth:`argparse.ArgumentParser.add_mutually_exclusive_group`
behind the scenes."""
def __init__(self, *items, **kwargs):
r"""
:param \*args: Child options
:type \*args: Bcfg2.Options.Option
:param required: Exactly one argument in the group *must* be
specified.
:type required: boolean
"""
OptionContainer.__init__(self, items)
self.required = kwargs.pop('required', False)
def add_to_parser(self, parser):
group = parser.add_mutually_exclusive_group(required=self.required)
OptionContainer.add_to_parser(self, group)
class Subparser(OptionContainer):
""" Option group that adds options in it to a subparser. This
uses a lot of functionality tied to `argparse Sub-commands
<http://docs.python.org/dev/library/argparse.html#sub-commands>`_.
The subcommand string itself is stored in the
:attr:`Bcfg2.Options.setup` namespace as ``subcommand``.
This is commonly used with :class:`Bcfg2.Options.Subcommand`
groups.
"""
_subparsers = dict()
def __init__(self, *items, **kwargs):
r"""
:param \*args: Child options
:type \*args: Bcfg2.Options.Option
:param name: The name of the subparser. Required.
:type name: string
:param help: A help message for the subparser
:param help: string
"""
self.name = kwargs.pop('name')
self.help = kwargs.pop('help', None)
OptionContainer.__init__(self, items)
def __repr__(self):
return "%s %s(%s)" % (self.__class__.__name__,
self.name,
list.__repr__(self))
def add_to_parser(self, parser):
if parser not in self._subparsers:
self._subparsers[parser] = parser.add_subparsers(dest='subcommand')
subparser = self._subparsers[parser].add_parser(self.name,
help=self.help)
OptionContainer.add_to_parser(self, subparser)
class WildcardSectionGroup(OptionContainer, Option):
""" WildcardSectionGroups contain options that may exist in
several different sections of the config that match a glob. It
works by creating options on the fly to match the sections
described in the glob. For example, consider:
.. code-block:: python
options = [
Bcfg2.Options.WildcardSectionGroup(
Bcfg2.Options.Option(cf=("myplugin:*", "number"), type=int),
Bcfg2.Options.Option(cf=("myplugin:*", "description"))]
If the config file contained ``[myplugin:foo]`` and
``[myplugin:bar]`` sections, then this would automagically create
options for each of those. The end result would be:
.. code-block:: python
>>> Bcfg2.Options.setup
Namespace(myplugin_bar_description='Bar description', myplugin_bar_number=2, myplugin_foo_description='Foo description', myplugin_foo_number=1, myplugin_sections=['myplugin:foo', 'myplugin:bar'])
All options must have the same section glob.
The options are stored in an automatically-generated destination
given by::
<prefix><section>_<destination>
``<destination>`` is the original `dest
<http://docs.python.org/dev/library/argparse.html#dest>`_ of the
option. ``<section>`` is the section that it's found in.
``<prefix>`` is automatically generated from the section glob by
replacing all consecutive characters disallowed in Python variable
names into underscores. (This can be overridden with the
constructor.)
This group stores an additional option, the sections themselves,
in an option given by ``<prefix>sections``.
"""
#: Regex to automatically get a destination for this option
_dest_re = re.compile(r'(\A(_|[^A-Za-z])+)|((_|[^A-Za-z0-9])+)')
def __init__(self, *items, **kwargs):
r"""
:param \*args: Child options
:type \*args: Bcfg2.Options.Option
:param prefix: The prefix to use for options generated by this
option group. By default this is generated
automatically from the config glob; see above
for details.
:type prefix: string
:param dest: The destination for the list of known sections
that match the glob.
:param dest: string
"""
OptionContainer.__init__(self, [])
self._section_glob = items[0].cf[0]
# get a default destination
self._prefix = kwargs.get("prefix",
self._dest_re.sub('_', self._section_glob))
Option.__init__(self, dest=kwargs.get('dest',
self._prefix + "sections"))
self._options = items
def list_options(self):
return [self] + OptionContainer.list_options(self)
def from_config(self, cfp):
sections = []
for section in cfp.sections():
if fnmatch.fnmatch(section, self._section_glob):
sections.append(section)
newopts = []
for opt_tmpl in self._options:
option = copy.deepcopy(opt_tmpl)
option.cf = (section, option.cf[1])
option.dest = self._prefix + section + "_" + option.dest
newopts.append(option)
self.extend(newopts)
for parser in self.parsers:
parser.add_options(newopts)
return sections
def add_to_parser(self, parser):
Option.add_to_parser(self, parser)
OptionContainer.add_to_parser(self, parser)
| [
"chris.a.st.pierre@gmail.com"
] | chris.a.st.pierre@gmail.com |
d32468db483cb91e5c3d3e4cfbbe931b6e5f991c | 92237641f61e9b35ff6af6294153a75074757bec | /Machine Learning/수업 자료/3주차_기계학습 알고리즘/14일차_로지스틱회귀분석/mnistNeuralNet03.py | 77b742b01f40b34c1e9f6e4e0735237777105fdf | [] | no_license | taepd/study | 8ded115765c4f804813e255d9272b727bf41ec80 | 846d3f2a5a4100225b750f00f992a640e9287d9c | refs/heads/master | 2023-03-08T13:56:57.366577 | 2022-05-08T15:24:35 | 2022-05-08T15:24:35 | 245,838,600 | 0 | 1 | null | 2023-03-05T23:54:41 | 2020-03-08T15:25:15 | JavaScript | UTF-8 | Python | false | false | 2,587 | py | # mnistNeuralNet01.py
from tensorflow.python.keras.datasets import mnist
from keras.utils import to_categorical
from tensorflow.python.keras.models import Sequential
from tensorflow.python.keras.layers import Dense
import matplotlib.pyplot as plt
image_row, image_col, image_dim = 28, 28, 28*28
(x_train, y_train), (x_test, y_test) = mnist.load_data()
print(x_train[1])
x_train = x_train.reshape(60000, image_dim)
x_train = x_train.astype('float') / 255.0
print(x_train[1])
x_test = x_test.reshape(10000, image_dim)
x_test = x_test.astype('float') / 255.0
# one-hot encoding
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
print('y_train[0]:', y_train[0])
# 모델 생성
model = Sequential()
# one-hot encoding 이후 이므로 컬럼수로 정답수 계산. np.unique()하면 2 나옴(0,1뿐이므로)
NB_CLASSES = y_train.shape[1]
print('nb: ', NB_CLASSES)
HIDDEN_LAYER_1 = 512
model.add(Dense(units=HIDDEN_LAYER_1, input_shape=(image_dim,), activation='relu'))
model.add(Dense(units=NB_CLASSES, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
print('model.fit() 중입니다.')
hist = model.fit(x_train, y_train, validation_split=0.3, epochs=5, batch_size=64, verbose=1)
print('히스토리 목록 보기')
print(hist.history.keys())
print('-'*30)
for key, value in hist.history.items():
print(f'키: {key}, 값: {value}')
print('-'*30)
print('model.evaluate 실행중')
score = model.evaluate(x_test, y_test, verbose=1)
print(f'test_acc: {score[1]: .4f}')
print('-'*30)
print(f'test_loss: {score[0]: .4f}')
print('-'*30)
# # 모델의 정확도에 대한 히스토리를 시각화
# plt.title('model accuracy')
# plt.xlabel('epoch')
# plt.ylabel('accuracy')
#
# accuracy = hist.history['accuracy']
# val_accuracy = hist.history['val_accuracy']
#
# plt.plot(accuracy)
# plt.plot(val_accuracy)
#
# # plot 이후에 legend 설정해야 한다?
# plt.legend(['train', 'test'], loc='upper left')
#
# filename = 'mnistNeuralNet01_01.png'
# plt.savefig(filename)
# print(filename + ' 파일 저장됨')
#
# # 모델의 손실(비용)함수에 대한 히스토리를 시각화
#
# plt.figure()
# plt.title('model loss')
# plt.xlabel('epoch')
# plt.ylabel('loss')
#
# accuracy = hist.history['loss']
# val_accuracy = hist.history['val_loss']
#
# plt.plot(accuracy)
# plt.plot(val_accuracy)
#
# # plot 이후에 legend 설정해야 한다?
# plt.legend(['train', 'test'], loc='best')
#
# filename = 'mnistNeuralNet01_02.png'
# plt.savefig(filename)
# print(filename + ' 파일 저장됨') | [
"taepd1@gmail.com"
] | taepd1@gmail.com |
3bb1ce23ad811a88a750d495850bd3be33a763b0 | 19fb971011a0d3977abfde325f77eedbff180b23 | /kf/kf/doctype/statement_of_account_for_gl/test_statement_of_account_for_gl.py | 93ab173a19461e78fccded89a5ae3a74472b26b3 | [
"MIT"
] | permissive | sagar30051991/KF-HR | a489174181869c0300f4c659e41162fcb84ce80b | 182c52563243fd609473bb5411ad61f789e9441e | refs/heads/master | 2021-01-18T23:33:26.702772 | 2016-05-16T05:20:14 | 2016-05-16T05:20:14 | 58,905,560 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 297 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Indictrans and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
# test_records = frappe.get_test_records('Statement of Account for GL')
class TestStatementofAccountforGL(unittest.TestCase):
pass
| [
"sagarshiragawakar@gmail.com"
] | sagarshiragawakar@gmail.com |
4e09cc6f747a652d23a1385e5d090c163e840bc7 | cb2882bd79c4af7a145f0639a5c7f473b1d22456 | /python/tvm/topi/cumsum.py | 2013a352874d76955b69a5223da7f6bb6280377c | [
"Apache-2.0",
"Zlib",
"MIT",
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense"
] | permissive | vinx13/tvm | 8e37dd7735eeadc476596ba96f683a93f44a26c3 | fe398bf206d01b54a2d74603e6bc9c012d63b2c9 | refs/heads/master | 2023-08-30T17:50:49.337568 | 2021-02-17T08:16:26 | 2021-02-17T08:16:26 | 141,384,391 | 4 | 0 | Apache-2.0 | 2022-09-21T18:53:08 | 2018-07-18T05:16:49 | Python | UTF-8 | Python | false | false | 4,159 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Cumsum operator"""
from ..tir import decl_buffer, ir_builder
from ..te import extern
from .utils import prod, get_const_int
from .math import cast
def cumsum(data, axis=None, dtype=None, exclusive=None):
"""Numpy style cumsum op. Return the cumulative sum of the elements along a given axis.
Parameters
----------
data : tvm.te.Tensor
The input data to the operator.
axis : int, optional
Axis along which the cumulative sum is computed. The default (None) is to compute
the cumsum over the flattened array.
dtype : string, optional
Type of the returned array and of the accumulator in which the elements are summed.
If dtype is not specified, it defaults to the dtype of data.
exclusive : int, optional
If set to 1 will return exclusive sum in which the first element is not
included. In other terms, if set to 1, the j-th output element would be
the sum of the first (j-1) elements. Otherwise, it would be the sum of
the first j elements.
Returns
-------
result : tvm.te.Tensor
The result has the same size as data, and the same shape as data if axis is not None.
If axis is None, the result is a 1-d array.
"""
if dtype is None or dtype == "":
dtype = data.dtype
def maybe_cast(x):
if dtype != data.dtype:
return cast(x, dtype)
return x
axis_mul_before = 1
axis_mul_after = 1
if axis is None:
axis = 0
cumsum_axis_len = prod(data.shape)
shape = (cumsum_axis_len,)
else:
if not isinstance(axis, int):
axis = get_const_int(axis)
shape = data.shape
cumsum_axis_len = shape[axis]
if axis < 0:
axis = len(shape) + axis
for i, value in enumerate(shape, 0):
if i < axis:
axis_mul_before *= value
elif i > axis:
axis_mul_after *= value
if exclusive is None:
exclusive = 0
def gen_ir(data_buf, out_buf):
ib = ir_builder.create()
data_buf = ib.buffer_ptr(data_buf)
out_buf = ib.buffer_ptr(out_buf)
with ib.for_range(0, axis_mul_before * axis_mul_after, "fused", kind="parallel") as fused:
i = fused // axis_mul_after
j = fused % axis_mul_after
base_idx = i * cumsum_axis_len * axis_mul_after + j
if exclusive == 0:
out_buf[base_idx] = maybe_cast(data_buf[base_idx])
else:
out_buf[base_idx] = cast(0, dtype)
with ib.for_range(0, cumsum_axis_len - 1, "_k") as _k:
k = _k + 1
cur_idx = base_idx + k * axis_mul_after
prev_idx = base_idx + (k - 1) * axis_mul_after
if exclusive == 0:
out_buf[cur_idx] = out_buf[prev_idx] + maybe_cast(data_buf[cur_idx])
else:
out_buf[cur_idx] = out_buf[prev_idx] + maybe_cast(data_buf[prev_idx])
return ib.get()
out_buf = decl_buffer(shape, dtype, "out_buf")
return extern(
[shape],
[data],
lambda ins, outs: gen_ir(ins[0], outs[0]),
dtype=dtype,
out_buffers=[out_buf],
name="cumsum_generic",
tag="cumsum_generic",
)
| [
"noreply@github.com"
] | vinx13.noreply@github.com |
c5c27117168845db40692da47f0e4b594df6e4e8 | 73e063b43d0890f13cf1936826e2a1833447806f | /sqlalchemy/query.py | 078159db8bdaceb69c6ad698741a117d24e62e41 | [] | no_license | sodewumi/hb-skills | f1814495ee5540243449b1df3c43f3ce62dae8fe | 502f398a75568804393272aa896621811eb7bacb | refs/heads/master | 2021-01-19T13:53:00.523056 | 2015-05-11T16:06:52 | 2015-05-11T16:06:52 | 34,186,938 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,080 | py | # Note: this file will not run. It is only for recording answers.
# Part 2: Write queries
# Get the brand with the **id** of 8.
Brand.query.filter_by(id=8).one()
# Get all models with the **name** Corvette and the **brand_name** Chevrolet.
Brand.query.filter_by(name="Corvette", brand_name="Chevrolet").all()
# Get all models that are older than 1960.
db.session.query(Model).filter
# Get all brands that were founded after 1920.
# Get all models with names that begin with "Cor".
# Get all brands with that were founded in 1903 and that are not yet discontinued.
# Get all brands with that are either discontinued or founded before 1950.
# Get any model whose brand_name is not Chevrolet.
# Part 2.5: Advanced and Optional
def search_brands_by_name(mystr):
pass
def get_models_between(start_year, end_year):
pass
# Part 3: Discussion Questions
# 1. What is the returned value and datatype of ``Brand.query.filter_by(name='Ford')``?
# 2. In your own words, what is an association table, and what *type* of relationship
# does an association table manage?
| [
"info@hackbrightacademy.com"
] | info@hackbrightacademy.com |
628e23f96ff86b40610fd3627ca993c1abd63e56 | 4137167a68f487343ef5cfd4e99e730fb39efa56 | /polyfile/fileutils.py | c43d5b9f09ccb380558b05c773a85496fcaecf95 | [
"Apache-2.0",
"LicenseRef-scancode-public-domain"
] | permissive | chrismattmann/polyfile | b57ccc72ce2fcfd67c885815204b40d61924c0cb | 6b86b2d91c9af19abc1520c1339935569b2cc964 | refs/heads/master | 2020-09-24T08:42:10.991623 | 2019-11-25T22:16:29 | 2019-11-25T22:16:29 | 225,717,204 | 2 | 1 | Apache-2.0 | 2019-12-03T21:14:40 | 2019-12-03T21:14:39 | null | UTF-8 | Python | false | false | 7,221 | py | import mmap
import os
import tempfile as tf
import sys
def make_stream(path_or_stream, mode='rb', close_on_exit=None):
if isinstance(path_or_stream, FileStream):
return path_or_stream
else:
return FileStream(path_or_stream, mode=mode, close_on_exit=close_on_exit)
class Tempfile:
def __init__(self, contents, prefix=None, suffix=None):
self._temp = None
self._data = contents
self._prefix = prefix
self._suffix = suffix
def __enter__(self):
self._temp = tf.NamedTemporaryFile(prefix=self._prefix, suffix=self._suffix, delete=False)
self._temp.write(self._data)
self._temp.flush()
self._temp.close()
return self._temp.name
def __exit__(self, type, value, traceback):
if self._temp is not None:
os.unlink(self._temp.name)
self._temp = None
class PathOrStdin:
def __init__(self, path):
self._path = path
if self._path == '-':
self._tempfile = Tempfile(sys.stdin.buffer.read())
else:
self._tempfile = None
def __enter__(self):
if self._tempfile is None:
return self._path
else:
return self._tempfile.__enter__()
def __exit__(self, *args, **kwargs):
if self._tempfile is not None:
return self._tempfile.__exit__(*args, **kwargs)
class FileStream:
def __init__(self, path_or_stream, start=0, length=None, mode='rb', close_on_exit=None):
if isinstance(path_or_stream, str):
self._stream = open(path_or_stream, mode)
if close_on_exit is None:
close_on_exit = True
else:
if not path_or_stream.seekable():
raise ValueError('FileStream can only wrap streams that are seekable')
elif not path_or_stream.readable():
raise ValueError('FileStream can only wrap streams that are readable')
self._stream = path_or_stream
if isinstance(path_or_stream, FileStream):
if length is None:
self._length = len(path_or_stream) - start
else:
self._length = min(length, len(path_or_stream))
else:
filesize = os.path.getsize(self._stream.name)
if length is None:
self._length = filesize - start
else:
self._length = min(filesize, length) - start
if close_on_exit is None:
close_on_exit = False
self._name = self._stream.name
self.start = start
self.close_on_exit = close_on_exit
self._entries = 0
self._listeners = []
self._root = None
def __len__(self):
return self._length
def add_listener(self, listener):
self._listeners.append(listener)
def remove_listener(self, listener):
ret = False
for i in reversed(range(len(self._listeners))):
if self._listeners[i] == listener:
del self._listeners[i]
ret = True
return ret
def seekable(self):
return True
def writable(self):
return False
def readable(self):
return True
@property
def name(self):
return self._name
@property
def root(self):
if self._root is None:
if isinstance(self._stream, FileStream):
self._root = self._stream.root
else:
self._root = self._stream
return self._root
def save_pos(self):
f = self
class SP:
def __init__(self):
self.pos = f.root.tell()
def __enter__(self, *args, **kwargs):
return f
def __exit__(self, *args, **kwargs):
f.root.seek(self.pos)
return SP()
def fileno(self):
return self._stream.fileno()
def offset(self):
if isinstance(self._stream, FileStream):
return self._stream.offset() + self.start
else:
return self.start
def seek(self, offset, from_what=0):
if from_what == 1:
offset = self.tell() + offset
elif from_what == 2:
offset = len(self) + offset
if offset - self.start > self._length:
raise IndexError(f"{self!r} is {len(self)} bytes long, but seek was requested for byte {offset}")
self._stream.seek(self.start + offset)
def tell(self):
return min(max(self._stream.tell() - self.start, 0), self._length)
def read(self, n=None, update_listeners=True):
if self._stream.tell() - self.start < 0:
# another context moved the position, so move it back to our zero index:
self.seek(0)
pos = 0
else:
pos = self.tell()
if update_listeners:
for listener in self._listeners:
listener(self, pos)
ls = len(self)
if pos >= ls:
return b''
elif n is None:
return self._stream.read()[:ls - pos]
else:
return self._stream.read(min(n, ls - pos))
def contains_all(self, *args):
if args:
with mmap.mmap(self.fileno(), 0, access=mmap.ACCESS_READ) as filecontent:
for string in args:
if filecontent.find(string, self.offset(), self.offset() + len(self)) < 0:
return False
return True
def tempfile(self, prefix=None, suffix=None):
class FSTempfile:
def __init__(self, file_stream):
self._temp = None
self._fs = file_stream
def __enter__(self):
self._temp = tf.NamedTemporaryFile(prefix=prefix, suffix=suffix, delete=False)
self._fs.seek(0)
self._temp.write(self._fs.read(len(self._fs)))
self._temp.flush()
self._temp.close()
return self._temp.name
def __exit__(self, type, value, traceback):
if self._temp is not None:
os.unlink(self._temp.name)
self._temp = None
return FSTempfile(self)
def __getitem__(self, index):
if isinstance(index, int):
self.seek(index)
return self.read(1)
elif not isinstance(index, slice):
raise ValueError(f"unexpected argument {index}")
if index.step is not None and index.step != 1:
raise ValueError(f"Invalid slice step: {index}")
length=None
if index.stop is not None:
if index.stop < 0:
length = len(self) + index.stop - index.start
else:
length = len(self) - (index.stop - index.start)
return FileStream(self, start=index.start, length=length, close_on_exit=False)
def __enter__(self):
self._entries += 1
return self
def __exit__(self, type, value, traceback):
self._entries -= 1
assert self._entries >= 0
if self._entries == 0 and self.close_on_exit:
self.close_on_exit = False
self._stream.close()
| [
"evan.sultanik@trailofbits.com"
] | evan.sultanik@trailofbits.com |
a21d90c8123d243d0d9f25a6bb5b26fef8402688 | f139a99d51cfa01a7892f0ac5bbb022c0cee0664 | /Pythonlogy/ShareYourSystem/Standards/Itemizers/Commander/draft/__init__ copy 4.py | d4d6123f0377e98d87a1ecf7f40eb6d9814659cc | [
"MIT"
] | permissive | Ledoux/ShareYourSystem | 90bb2e6be3088b458348afa37ace68c93c4b6a7a | 3a2ffabf46f1f68b2c4fd80fa6edb07ae85fa3b2 | refs/heads/master | 2021-01-25T12:14:34.118295 | 2017-01-12T14:44:31 | 2017-01-12T14:44:31 | 29,198,670 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 16,768 | py | # -*- coding: utf-8 -*-
"""
<DefineSource>
@Date : Fri Nov 14 13:20:38 2014 \n
@Author : Erwan Ledoux \n\n
</DefineSource>
A Commander gather Variables to set them with an UpdateList.
The command process can be AllSetsForEach (ie a map of the update succesively for each)
or a EachSetForAll (ie each set is a map of each).
NOTE : the walk and self attributes are always resetted to False after a call of command.
"""
#<DefineAugmentation>
import ShareYourSystem as SYS
BaseModuleStr="ShareYourSystem.Standards.Itemizers.Pather"
DecorationModuleStr="ShareYourSystem.Standards.Classors.Classer"
SYS.setSubModule(globals())
#</DefineAugmentation>
#<ImportSpecificModules>
from ShareYourSystem.Standards.Itemizers import Getter,Setter
#</ImportSpecificModules>
#<DefineLocals>
CommandPrefixStr="--"
CommandWalkStr="..."
CommandSelfStr="/"
CommandAddStr="+"
#</DefineLocals>
#<DefineClass>
@DecorationClass()
class CommanderClass(BaseClass):
def default_init(
self,
_CommandTopDeriveCommanderRigidVariable=None,
_CommandingKeyVariable=None,
_CommandingSetVariable=None,
_CommandingOrderStr="AllSetsForEachGet",
_CommandingBeforeWalkRigidBool=False,
_CommandingAfterWalkRigidBool=False,
_CommandingBeforeSelfRigidBool=False,
_CommandingAfterSelfRigidBool=False,
_CommandingGetRigidBool=True,
_CommandingSetRigidBool=True,
_CommandingSetAttrOrCallRigidBool=False,
_CommandingExtraKeyVariable=None,
_CommandedValueVariablesList=None,
_CommandedSetVariablesList=None,
_CommandedExtraValueVariablesList=None,
**_KwargVariablesDict
):
#Call the parent __init__ method
BaseClass.__init__(self,**_KwargVariablesDict)
def do_command(self):
""" """
#/####################/#
# Determine the top Commander
#
#debug
'''
self.debug(
[
'First determine the CommandTopDeriveCommanderRigidVariable',
('self.',self,['CommandTopDeriveCommanderRigidVariable'])
]
)
'''
#Check
if self.CommandTopDeriveCommanderRigidVariable==None:
self.CommandTopDeriveCommanderRigidVariable=self
#/####################/#
# Adapt maybe the type for getting things to command
#
#Check
if self.CommandingGetRigidBool:
#debug
'''
self.debug(
[
'Adapt the type for getting things to command',
("self.",self,[
'CommandingKeyVariable',
'CommandingSetVariable',
'CommandingBeforeWalkRigidBool',
'CommandingBeforeSelfRigidBool'
])
]
)
'''
#init
self.CommandedValueVariablesList=SYS.GetList(
self.CommandingKeyVariable,
self
)
#init
self.CommandedExtraValueVariablesList=SYS.GetList(
self.CommandingExtraKeyVariable,
self
)
else:
#init
self.CommandedValueVariablesList=self.CommandingKeyVariable
#init
self.CommandedExtraValueVariablesList=self.CommandingExtraKeyVariable
#debug
'''
self.debug(
[
('self.',self,['CommandingKeyVariable']),
'in the end, self.CommandedValueVariablesList is ',
SYS._str(self.CommandedValueVariablesList)
]
)
'''
#/###################/#
# Inform the getted values who is the top
#
#debug
'''
self.debug(
[
'We inform the commanded values who is the top commander'
]
)
'''
#map
map(
lambda __CommandedValueVariable:
setattr(
__CommandedValueVariable,
'CommandTopDeriveCommanderRigidVariable',
self.CommandTopDeriveCommanderRigidVariable
),
self.CommandedValueVariablesList
)
#/###################/#
# Check if we have to walk before
#
#Check
if self.CommandingBeforeWalkRigidBool:
#debug
'''
self.debug(
[
'we are going to walk before the command',
'before we setCommand'
]
)
'''
#set
self.setCommand()
#Debug
'''
for __CommandedValueVariable in CommandedValueVariablesList:
#debug
self.debug(
'__CommandedValueVariable is '+SYS._str( __CommandedValueVariable)
)
#set
__CommandedValueVariable.set(
'GettingNewBool',False
).command(
*self.getDoing().values()
).set(
'GettingNewBool',True
)
'''
#debug
'''
self.debug(
[
'Ok we can setAttr now'
]
)
'''
#map the recursion but pay watch to not set new things to walk in...it is an infinite walk either !
map(
lambda __CommandedValueVariable:
__CommandedValueVariable.setAttr(
'GettingNewBool',False
).command(
).setAttr(
'GettingNewBool',True
),
self.CommandedValueVariablesList+self.CommandedExtraValueVariablesList
)
#/####################/#
# Adapt maybe the type for setting things in the commanded variables
#
#Check
if self.CommandingSetRigidBool:
#debug
'''
self.debug(
[
'Adapt the type for setting things in the commanded variables',
("self.",self,['CommandingSetVariable'])
]
)
'''
#inits
self.CommandedSetVariablesList=SYS.SetList(self.CommandingSetVariable)
else:
#alias direct
self.CommandedSetVariablesList=self.CommandingSetVariable
#debug
'''
self.debug(
[
'in the end, CommandedSetVariablesList is ',
SYS._str(CommandedSetVariablesList)
]
)
'''
#/###################/#
# Ok now we command locally
#
#Check
if self.CommandingBeforeSelfRigidBool:
#debug
'''
self.debug(
[
'We command before self here',
('self.',self,[
'CommandingSetRigidBool',
'CommandingSetAttrOrCallRigidBool'
])
]
)
'''
#Check
if self.CommandingSetAttrOrCallRigidBool==False:
#add
self.mapSet(
self.CommandedSetVariablesList
)
else:
#add
map(
lambda __ElementVariable:
self.setAttrOrCall(
__ElementVariable
),
self.CommandedSetVariablesList
)
#Check for the order
if self.CommandingOrderStr=="AllSetsForEachGet":
#debug
'''
self.debug(
[
'Ok now we do a AllSetsForEachGet'
]
)
'''
#Debug
"""
for __CommandedValueVariable in CommandedValueVariablesList:
#debug
self.debug(
[
'__CommandedValueVariable is ',
SYS._str(__CommandedValueVariable),
'CommandedSetVariablesList is ',
SYS._str(CommandedSetVariablesList)
]
)
#map
map(
lambda __CommandedSetVariable:
__CommandedValueVariable.set(
*__CommandedSetVariable
),
CommandedSetVariablesList
)
"""
#Check
if self.CommandingSetAttrOrCallRigidBool:
#debug
'''
self.debug(
[
'map a SetAttrOrCallBool',
('self.',self,[
'CommandedValueVariablesList',
'CommandedSetVariablesList'
])
]
)
'''
#map
map(
lambda __CommandedValueVariable:
map(
lambda __CommandedSetVariable:
__CommandedValueVariable.setAttrOrCall(
__CommandedSetVariable
),
self.CommandedSetVariablesList
),
self.CommandedValueVariablesList
)
#debug
'''
self.debug(
[
'Ok end of SetAttrOrCallBool'
]
)
'''
else:
#debug
'''
self.debug(
[
'We call a map set',
('self.',self,[
'CommandedValueVariablesList',
'CommandedSetVariablesList'
])
]
)
'''
#map
map(
lambda __CommandedValueVariable:
map(
lambda __CommandedSetVariable:
__CommandedValueVariable.set(
*__CommandedSetVariable
)
if hasattr(__CommandedValueVariable,'set')
else None,
self.CommandedSetVariablesList
),
self.CommandedValueVariablesList
)
elif self.CommandingOrderStr=="EachSetForAllGets":
#Check
if self.CommandingSetAttrOrCallRigidBool:
#map
map(
lambda __CommandedSetVariable:
map(
lambda __CommandedValueVariable:
__CommandedValueVariable.setAttrOrCall(
__CommandedSetVariable
),
self.CommandedValueVariablesList
),
self.CommandedSetVariablesList
)
else:
#map
map(
lambda __CommandedSetVariable:
map(
lambda __CommandedValueVariable:
__CommandedValueVariable.set(
*__CommandedSetVariable
),
self.CommandedValueVariablesList
),
self.CommandedSetVariablesList
)
#Check
if self.CommandingAfterSelfRigidBool:
#debug
self.debug(
[
'We command after self here'
]
)
#Check
if self.CommandingSetAttrOrCallRigidBool==False:
#add
self.mapSet(
self.CommandedSetVariablesList
)
else:
#add
map(
lambda __ElementVariable:
self.setAttrOrCall(
__ElementVariable
),
self.CommandedSetVariablesList
)
#/###################/#
# And we check for a walk after
#
#Check
if self.CommandingAfterWalkRigidBool:
#debug
'''
self.debug(
[
'we are going to walk after the command',
#'self.CommandedValueVariablesList is '+SYS._str(
# self.CommandedValueVariablesList),
#('self.',self,['CommandingKeyVariable']),
'We have to determine the things to propagate',
'CommandingKeyVariable and CommandingSetVariable notably ',
'if it is None in the commanded value"
]
)
'''
#set
self.setCommand()
#Debug
'''
for __CommandedValueVariable in CommandedValueVariablesList:
#debug
self.debug(
'__CommandedValueVariable is '+SYS._str( __CommandedValueVariable)
)
#set
__CommandedValueVariable.set(
'GettingNewBool',False
).command(
*self.getDoing().values()
).set(
'GettingNewBool',True
)
'''
#debug
self.debug(
[
'Ok we can command now',
('self.',self,[
'CommandedValueVariablesList',
'CommandedExtraValueVariablesList'
])
]
)
#map the recursion but pay watch to not set new things to walk in...it is an infinite walk either !
map(
lambda __CommandedValueVariable:
__CommandedValueVariable.setAttr(
'GettingNewBool',False
).command(
).setAttr(
'GettingNewBool',True
)
if hasattr(
__CommandedValueVariable,
'command'
)
else None,
self.CommandedValueVariablesList+self.CommandedExtraValueVariablesList
)
#/#######################/#
# Reset always these values to False
#
#set
self.CommandingBeforeWalkRigidBool=False
self.CommandingAfterWalkRigidBool=False
self.CommandingBeforeSelfRigidBool=False
self.CommandingAfterSelfRigidBool=False
self.CommandingSetAttrOrCallRigidBool=False
self.CommandingGetRigidBool=True
self.CommandingSetRigidBool=True
self.CommandTopDeriveCommanderRigidVariable=None
#debug
'''
self.debug(
[
'End of the command'
]
)
'''
def setCommand(self):
#/##############/#
# Get all the commanding attributes
#
#set
CommandedOrderedDict=self.getDoing(
SYS.CommanderClass
)
CommandedOrderedDict['CommandingBeforeSelfRigidBool']=False
CommandedLiargVariablesList=CommandedOrderedDict.values()
#/##############/#
# Special get for KeyVariable and SetVariable
#
#get
CommandedNewKeyVariable=CommandedLiargVariablesList[0]
#get
CommandedNewSetVariable=CommandedLiargVariablesList[1]
#get
CommandedNewTuplesList=zip(
CommandedOrderedDict.keys()[2:],
CommandedLiargVariablesList[2:]
)
#/##############/#
# Map a setAttr
#
#map
map(
lambda __CommandedValueVariable:
__CommandedValueVariable.setAttr(
'CommandingKeyVariable',
CommandedNewKeyVariable
)
if __CommandedValueVariable.CommandingKeyVariable==None
else None,
self.CommandedValueVariablesList
)
#map
map(
lambda __CommandedValueVariable:
__CommandedValueVariable.setAttr(
'CommandingSetVariable',
CommandedNewSetVariable
)
if __CommandedValueVariable.CommandingSetVariable==None
else None,
self.CommandedValueVariablesList
)
#map
map(
lambda __CommandedValueVariable:
__CommandedValueVariable.mapSetAttr(
CommandedNewTuplesList
),
self.CommandedValueVariablesList
)
def mimic_get(self):
#debug
'''
self.debug(
('self.',self,[
'GettingKeyVariable',
])
)
'''
#Check
if type(self.GettingKeyVariable)==str:
#Check
if self.GettingKeyVariable.startswith(CommandAddStr):
#split
AddGetKeyStrsList=self.GettingKeyVariable.split(CommandAddStr)[1:]
#debug
'''
self.debug(
[
'We map get',
'AddGetKeyStrsList is '+str(AddGetKeyStrsList)
]
)
'''
#map get
AddVariablesList=self[
Getter.GetMapStr
](*AddGetKeyStrsList).ItemizedMapValueVariablesList
#debug
'''
self.debug(
[
'We sum now',
'AddVariablesList is '+SYS._str(AddVariablesList)
]
)
'''
#map get
self.GettedValueVariable=SYS.sum(AddVariablesList)
#return
return {'HookingIsBool':False}
#return
return BaseClass.get(self)
def mimic_set(self):
#debug
'''
self.debug(
('self.',self,[
'SettingKeyVariable',
'SettingValueVariable'
])
)
'''
#Check
if type(self.SettingKeyVariable)==str:
#Check
if self.SettingKeyVariable.startswith(
CommandPrefixStr
):
#debug
'''
self.debug(
'We command here'
)
'''
#deprefix
CommandGetKeyStr=SYS.deprefix(
self.SettingKeyVariable,
CommandPrefixStr
)
#Check
if CommandGetKeyStr.startswith(CommandWalkStr):
#debug
'''
self.debug(
'We command-walk here'
)
'''
#command
self.command(
SYS.deprefix(
CommandGetKeyStr,
CommandWalkStr
),
self.SettingValueVariable,
_AfterWalkRigidBool=True
)
#stop the setting
return {'HookingIsBool':False}
elif CommandGetKeyStr.startswith(CommandSelfStr+CommandWalkStr):
#debug
'''
self.debug(
'We command-self-walk here'
)
'''
#command
self.command(
SYS.deprefix(
CommandGetKeyStr,
CommandSelfStr+CommandWalkStr
),
self.SettingValueVariable,
_AfterWalkRigidBool=True,
_SelfBool=True
)
#stop the setting
return {'HookingIsBool':False}
else:
#command
self.command(
CommandGetKeyStr,
self.SettingValueVariable
)
#stop the setting
return {'HookingIsBool':False}
#Check
elif self.SettingKeyVariable.startswith(
CommandWalkStr
):
#debug
'''
self.debug(
'We walk-command here'
)
'''
CommandGetKeyStr=SYS.deprefix(
self.SettingKeyVariable,
CommandWalkStr
)
#Check
if CommandGetKeyStr.startswith(CommandPrefixStr):
#command
self.command(
SYS.deprefix(
CommandGetKeyStr,
CommandPrefixStr
),
self.SettingValueVariable,
_BeforeWalkRigidBool=True
)
#stop the setting
return {'HookingIsBool':False}
elif CommandGetKeyStr.startswith(CommandSelfStr):
#command
self.command(
SYS.deprefix(
CommandGetKeyStr,
CommandSelfStr+CommandPrefixStr
),
self.SettingValueVariable,
_BeforeWalkRigidBool=True,
_AfterSelfRigidBool=True
)
#stop the setting
return {'HookingIsBool':False}
#Check
elif self.SettingKeyVariable.startswith(
CommandSelfStr+CommandWalkStr+CommandPrefixStr
):
#command
self.command(
SYS.deprefix(
self.SettingKeyVariable,
CommandSelfStr+CommandWalkStr+CommandPrefixStr
),
self.SettingValueVariable,
_BeforeWalkRigidBool=True,
_BeforeSelfRigidBool=True
)
#stop the setting
return {'HookingIsBool':False}
#debug
'''
self.debug(
[
'Call the base set method',
'BaseClass is '+str(BaseClass),
('self.',self,['SettingKeyVariable'])
]
)
'''
#Call the base method
BaseClass.set(self)
#</DefineClass>
#</DefinePrint>
CommanderClass.PrintingClassSkipKeyStrsList.extend(
[
'CommandTopDeriveCommanderRigidVariable',
'CommandingKeyVariable',
'CommandingSetVariable',
'CommandingOrderStr',
'CommandingBeforeWalkRigidBool',
'CommandingAfterWalkRigidBool',
'CommandingBeforeSelfRigidBool',
'CommandingAfterSelfRigidBool',
'CommandingGetRigidBool',
'CommandingSetRigidBool',
'CommandingSetAttrOrCallRigidBool',
'CommandingExtraKeyVariable',
'CommandedValueVariablesList',
'CommandedSetVariablesList',
'CommandedLiargVariablesList',
'CommandedExtraValueVariablesList'
]
)
#<DefinePrint> | [
"erwan.ledoux@ens.fr"
] | erwan.ledoux@ens.fr |
19fbe3ae6d843d850ff23562c5b8b31594a1e2a0 | 4b64dd47fa9321b50875e96298a5f0766ffe97c9 | /leetcode/p122.py | 162b1bac0c661d114c05ccc4dc65293548240690 | [] | no_license | choupi/puzzle | 2ce01aa85201660da41378c6df093036fa2d3a19 | 736964767717770fe786197aecdf7b170d421c8e | refs/heads/master | 2021-07-23T13:17:45.086526 | 2021-07-20T11:06:28 | 2021-07-20T11:06:28 | 13,580,701 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 519 | py | class Solution:
def maxProfit(self, prices: List[int]) -> int:
trans = False
buy_price = None
profit = 0
for i in range(len(prices)):
if trans:
if i == len(prices)-1 or prices[i+1]<prices[i]:
profit += prices[i] - buy_price
trans = False
else:
if i<len(prices)-1 and prices[i+1]>prices[i]:
buy_price = prices[i]
trans = True
return profit
| [
"chromosome460@gmail.com"
] | chromosome460@gmail.com |
1bd82cb13ed2585404f2436b99d32f708c8e9d82 | d96ffbadf4526db6c30a3278f644c1bc25ff4054 | /src/storage/cluster_storage.py | d972d9137b5369a66d792d63c71147b0a0d4d5b5 | [
"MIT"
] | permissive | dballesteros7/master-thesis-2015 | 07c03726f6ceb66e6d706ffe06e4e5eb37dcda75 | 8c0bf9a6eef172fc8167a30780ae0666f8ea2d88 | refs/heads/master | 2021-05-03T11:22:28.333473 | 2016-04-26T14:00:30 | 2016-04-26T14:00:30 | 44,601,064 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,148 | py | import logging
from pymongo import MongoClient
class ClusterStorage:
def __init__(self):
client = MongoClient()
self.collection = client.flickrdata.clusters
def get_clusters(self, city_name, bandwidth):
result = self.collection.find({
'city_name': city_name,
'bandwidth': bandwidth
})
return list(result)
def get_cluster(self, cluster_id):
return self.collection.find_one({
'_id': cluster_id
})
def insert_clusters(self, city_name, bandwidth, entries, cluster_centers, cluster_labels):
logging.info('Collecting cluster data.')
clusters = []
unique_users_per_cluster = []
for cluster_center in cluster_centers:
clusters.append({
'latitude': cluster_center[0],
'longitude': cluster_center[1],
'photos': [],
'unique_users': 0,
'number_of_photos': 0,
'city_name': city_name,
'bandwidth': bandwidth
})
unique_users_per_cluster.append(set())
for cluster_label, entry in zip(cluster_labels, entries):
if cluster_label == -1:
continue
clusters[cluster_label]['photos'].append(entry['_id'])
clusters[cluster_label]['number_of_photos'] += 1
unique_users_per_cluster[cluster_label].add(entry['owner'])
for cluster, unique_users in zip(clusters, unique_users_per_cluster):
cluster['unique_users'] = len(unique_users)
self.collection.insert_many(clusters, ordered=False)
return clusters
def get_cluster_for_photo(self, photo_id, city_name, bandwidth):
return self.collection.find_one({
'photos': photo_id,
'city_name': city_name,
'bandwidth': bandwidth
})
def get_top_ten_clusters(self, city_name, bandwidth):
return self.collection.find({
'city_name': city_name,
'bandwidth': bandwidth
}, sort=[('number_of_photos', -1), ('unique_users', -1)], limit=10)
| [
"diegob@student.ethz.ch"
] | diegob@student.ethz.ch |
22fb657a6dc5caa60f4275c099a2c55e2f160222 | fc678a0a5ede80f593a29ea8f43911236ed1b862 | /206-ReverseLinkedList.py | f14547c884d1681052b51980876ae39bad92b593 | [] | no_license | dq-code/leetcode | 4be0b1b154f8467aa0c07e08b5e0b6bd93863e62 | 14dcf9029486283b5e4685d95ebfe9979ade03c3 | refs/heads/master | 2020-12-13T15:57:30.171516 | 2017-11-07T17:43:19 | 2017-11-07T17:43:19 | 35,846,262 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 519 | py | # Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class Solution(object):
def reverseList(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
if head==None: return None
pivot = ListNode(0)
node = head
while node:
temp = node.next
node.next = pivot.next
pivot.next = node
node = temp
return pivot.next | [
"dengqianwork@gmail.com"
] | dengqianwork@gmail.com |
9a11d37952046077e55c37b647b7389092a639fb | e6dab5aa1754ff13755a1f74a28a201681ab7e1c | /.parts/lib/jinja2-2.6/ext/djangojinja2.py | 9f36ff8d409bc951b6171990891cd526725d0f79 | [] | no_license | ronkagan/Euler_1 | 67679203a9510147320f7c6513eefd391630703e | 022633cc298475c4f3fd0c6e2bde4f4728713995 | refs/heads/master | 2021-01-06T20:45:52.901025 | 2014-09-06T22:34:16 | 2014-09-06T22:34:16 | 23,744,842 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 85 | py | /home/action/.parts/packages/googleappengine/1.9.4/lib/jinja2-2.6/ext/djangojinja2.py | [
"ron.y.kagan@gmail.com"
] | ron.y.kagan@gmail.com |
24f48f4e7877569bac60585779454cfe233aaefd | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/133/usersdata/216/56457/submittedfiles/al15.py | 335df9423439a593dc804db11f003f83a6d2d251 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 62 | py | # -*- coding: utf-8 -*-
for i in range(1000,9999,1):
if
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
8616f51bef26951ccf5dd36eea282b9742d4f87d | 930309163b930559929323647b8d82238724f392 | /arc109_a.py | e8f0ea60a8a44793ef14ac47d27ac9e2ebf69e2c | [] | no_license | GINK03/atcoder-solvers | 874251dffc9f23b187faa77c439b445e53f8dfe1 | b1e7ac6e9d67938de9a85df4a2f9780fb1fbcee7 | refs/heads/master | 2021-11-07T14:16:52.138894 | 2021-09-12T13:32:29 | 2021-09-12T13:32:29 | 11,724,396 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,393 | py |
SA,SB,X,Y=map(int,input().split())
SA-=1;
SB-=1;
N=202
G = [[]*N for _ in range(N)]
A = [a for a in range(100)]
B = [b+100 for b in range(100)]
for i in range(len(A)-1):
G[A[i]].append( (A[i+1], Y) )
G[A[i+1]].append( (A[i], Y) )
for i in range(len(B)-1):
G[B[i]].append( (B[i+1], Y) )
G[B[i+1]].append( (B[i], Y) )
for i in range(0, len(A)):
G[B[i]].append( (A[i], X) )
G[A[i]].append( (B[i], X) )
for i in range(1, len(A)):
G[B[i-1]].append( (A[i], X) )
G[A[i]].append( (B[i-1], X) )
import collections
import heapq
Entity = collections.namedtuple("Entity", ["node", "w"])
Entity.__lt__ = lambda self, other: self.w <= other.w
def dijkstra(start) -> "List":
dist = [-1 for _ in range(N)]
dist[start] = 0
que = []
heapq.heappush(que, Entity(start, 0))
done = [False for _ in range(N)]
while que:
i, w = heapq.heappop(que)
# すでに訪れたところは処理しない
if done[i]:
continue
done[i] = True
for j, c in G[i]:
# 評価が未知のエッジ or より安くなる可能性がある場合は探索し、結果をヒープに入れる
if dist[j] == -1 or dist[j] > dist[i] + c:
dist[j] = dist[i] + c
heapq.heappush(que, Entity(j, dist[j]))
return dist
dist = dijkstra(SA)
#print(dist)
print(dist[SB+100])
| [
"gim.kobayashi@gmail.com"
] | gim.kobayashi@gmail.com |
6290eefa14764641f8055e2653a7a53a963ac2a6 | 807305b8aefbd7aac4f44c67deed06c059ca02d9 | /src/stk/ea/crossover/crossers/molecule/__init__.py | 6bdb934722095014d51048ac26fccec745cce7c2 | [
"MIT"
] | permissive | supramolecular-toolkit/stk | c40103b4820c67d110cbddc7be30d9b58d85f7af | 46f70cd000890ca7c2312cc0fdbab306565f1400 | refs/heads/master | 2022-11-27T18:22:25.187588 | 2022-11-16T13:23:11 | 2022-11-16T13:23:11 | 129,884,045 | 22 | 5 | MIT | 2019-08-19T18:16:41 | 2018-04-17T09:58:28 | Python | UTF-8 | Python | false | false | 76 | py | from .crosser import * # noqa
from .genetic_recombination import * # noqa
| [
"noreply@github.com"
] | supramolecular-toolkit.noreply@github.com |
acc8718c446ff2f61372886aa27513bf83191698 | 6aa8fd438e12e4e285d9b89be15e211e607821e0 | /.metadata/.plugins/org.eclipse.core.resources/.history/cc/604d9aa93aac00141484e17924c72bfe | 220dd8bf723cd98eacd7547909b0124e9522e153 | [] | no_license | phoenixproject/python | 2aa251c9fe9a3a665043d5f3d29d48c0f95b9273 | f8171d31d1d33a269d29374e7605a8f5bce6b5d6 | refs/heads/master | 2021-03-12T19:15:01.611936 | 2015-02-04T08:25:27 | 2015-02-04T08:25:27 | 30,287,884 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 491 | #!/usr/bin/python3
from br.edu.ifes.poo2.adapter.cdp.FiltroAguaNatural import FiltroAguaNatural
from br.edu.ifes.poo2.adapter.cdp.FiltroAguaGelada import FiltroAguaGelada
from br.edu.ifes.poo2.adapter.cdp.FiltroAguaESaiGelo import FiltroAguaESaiGelo
def main():
filtro = FiltroAguaNatural()
filtro.FiltrarAgua()
filtro = FiltroAguaGelada()
filtro.FiltrarAgua()
#filtro2 = FiltroAguaESaiGelo()
f#iltro2.FiltrarAgua()
if __name__ == "__main__" : main() | [
"phoenixproject.erp@gmail.com"
] | phoenixproject.erp@gmail.com | |
a2a09b8055dfde80a01fcb05669f1e1078e5b234 | e10a6d844a286db26ef56469e31dc8488a8c6f0e | /constrained_language_typology/sigtyp_reader_main.py | c3641d426176bf96df9381cb5e3c2f02b22c2a70 | [
"Apache-2.0",
"CC-BY-4.0"
] | permissive | Jimmy-INL/google-research | 54ad5551f97977f01297abddbfc8a99a7900b791 | 5573d9c5822f4e866b6692769963ae819cb3f10d | refs/heads/master | 2023-04-07T19:43:54.483068 | 2023-03-24T16:27:28 | 2023-03-24T16:32:17 | 282,682,170 | 1 | 0 | Apache-2.0 | 2020-07-26T15:50:32 | 2020-07-26T15:50:31 | null | UTF-8 | Python | false | false | 5,528 | py | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Reader for the format provided by SIGTYP 2020 Shared Task.
More information on the format is available here:
https://sigtyp.github.io/st2020.html
Example:
--------
Clone the GitHub data to ST2020_DIR. Then run:
> ST2020_DIR=...
> python3 sigtyp_reader_main.py --sigtyp_dir ${ST2020_DIR}/data \
--output_dir ${OUTPUT_DIR}
The above will create "train.csv", "dev.csv" and "test_blinded.csv" files
converted from the format provided by SIGTYP. Our models should be able to
injest these csv files. Along each of the above files, an accompanying
"data_train_*.json.gz" file is generated that contains metainformation on
various features and their values.
TODO:
-----
Following needs to be done:
- Latitude and longitude need to be on a point on a unit sphere? Keep as is
and add three further columns for (x,y,z)?
- Country codes are *several*.
- Other types of SOMs.
- Use BaseMap for visualizations?
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import json
import os
import tempfile
from absl import app
from absl import flags
from absl import logging
import constants as const
import data_info as data_lib
import sigtyp_reader as sigtyp
flags.DEFINE_string(
"sigtyp_dir", "",
"Directory containing SIGTYP original training and development.")
flags.DEFINE_string(
"output_dir", "",
"Output directory for preprocessed files.")
flags.DEFINE_bool(
"categorical_as_ints", False,
"Encode all the categorical features as ints.")
FLAGS = flags.FLAGS
def _write_dict(data, file_type, output_filename):
"""Writes dictionary of a specified type to a file in output directory."""
output_filename = os.path.join(
FLAGS.output_dir,
output_filename + "_" + file_type + data_lib.FILE_EXTENSION)
data_lib.write_data_info(output_filename, data)
def _process_file(filename, base_dir=None):
"""Preprocesses supplied data file."""
if not base_dir:
base_dir = FLAGS.sigtyp_dir
full_path = os.path.join(base_dir, filename + ".csv")
_, df, data_info = sigtyp.read(
full_path, categorical_as_ints=FLAGS.categorical_as_ints)
_write_dict(data_info, filename, const.DATA_INFO_FILENAME)
# Save preprocessed data frames to a csv.
output_file = os.path.join(FLAGS.output_dir, filename + ".csv")
logging.info("Saving preprocessed data to \"%s\" ...", output_file)
df.to_csv(output_file, sep="|", index=False, float_format="%g")
return data_info
def _write_combined_data(file_types, output_file_name):
"""Combine data from multiple files."""
with tempfile.TemporaryDirectory() as temp_dir:
temp_file = os.path.join(temp_dir, output_file_name + ".csv")
with open(temp_file, "w", encoding=const.ENCODING) as out_f:
header = None
all_lines = []
for file_type in file_types:
input_path = os.path.join(FLAGS.sigtyp_dir, file_type + ".csv")
with open(input_path, "r", encoding=const.ENCODING) as in_f:
lines = in_f.readlines()
if not header:
header = lines[0]
lines.pop(0) # Remove header.
all_lines.extend(lines)
# Sort the lines by the WALS code and dump them.
all_lines = sorted(all_lines, key=lambda x: x.split("|")[0])
all_lines.insert(0, header)
out_f.write("".join(all_lines))
_process_file(output_file_name, base_dir=temp_dir)
def _process_files():
"""Processes input files."""
# Process training and development files individually.
_process_file(const.TRAIN_FILENAME)
_process_file(const.DEV_FILENAME)
_process_file(const.TEST_GOLD_FILENAME)
test_data_info = _process_file(const.TEST_BLIND_FILENAME)
# Save features requested for prediction in the test set.
features_to_predict = test_data_info[const.DATA_KEY_FEATURES_TO_PREDICT]
if not features_to_predict:
raise ValueError("No features requested for prediction!")
predict_dict_path = os.path.join(FLAGS.output_dir,
const.FEATURES_TO_PREDICT_FILENAME + ".json")
logging.info("Saving features for prediction in \"%s\" ...",
predict_dict_path)
with open(predict_dict_path, "w", encoding=const.ENCODING) as f:
json.dump(features_to_predict, f)
# Process the combine the datasets.
_write_combined_data([const.TRAIN_FILENAME, const.DEV_FILENAME],
const.TRAIN_DEV_FILENAME)
_write_combined_data([const.TRAIN_FILENAME, const.DEV_FILENAME,
const.TEST_BLIND_FILENAME],
const.TRAIN_DEV_TEST_FILENAME)
def main(unused_argv):
# Check flags.
if not FLAGS.sigtyp_dir:
raise ValueError("Specify --sigtyp_dir for input data!")
if not FLAGS.output_dir:
raise ValueError("Specify --output_dir for preprocessed data!")
_process_files()
if __name__ == "__main__":
app.run(main)
| [
"copybara-worker@google.com"
] | copybara-worker@google.com |
eb023275d736cbac3ff00334ca4f12d84d44429a | f6e78129c6669e8f46a65a3d7c45cf10cca083b9 | /scripts/inject_powerloss.py | 1c4d01f0ff71e14aeb4c309ca1dea09dbfc96d04 | [] | no_license | realraum/door_and_sensors | 339259c16ed27d2466f3cf9d5a51a93bfc7b326c | 8c70cdc4a28eabb04ce1e2df6aab0fb86c5b4f28 | refs/heads/master | 2023-08-30T08:53:28.419146 | 2023-08-07T22:16:21 | 2023-08-07T22:16:21 | 31,234,025 | 1 | 0 | null | 2023-08-11T11:46:30 | 2015-02-23T22:45:47 | Go | UTF-8 | Python | false | false | 983 | py | #!/usr/bin/python3
# -*- coding: utf-8 -*-
from __future__ import with_statement
import paho.mqtt.client as mqtt
import json
import time
import sys
######## r3 ############
def sendR3Message(client, structname, datadict):
client.publish(structname, json.dumps(datadict))
# Start zmq connection to publish / forward sensor data
client = mqtt.Client()
client.connect("mqtt.realraum.at", 1883, 60)
# listen for sensor data and forward them
if len(sys.argv) < 3:
sendR3Message(client, "realraum/backdoorcx/powerloss",
{"Ts": int(time.time()),
"OnBattery": bool(True),
"PercentBattery": float(42.0),
"LineVoltage": float(2904.0),
"LoadPercent": float(0815.0)
})
else:
client.publish(sys.argv[1], sys.argv[2])
client.loop(timeout=1.0, max_packets=1)
client.disconnect()
# {“OnBattery”:bool, PercentBattery:float, LineVoltage: float, LoadPercent: float,
| [
"dev@2904.cc"
] | dev@2904.cc |
9fd094b78b8730d2a64729c0e9348ca22cbac3c0 | e01c5d1ee81cc4104b248be375e93ae29c4b3572 | /Sequence5/CTCI/2-Linked-List/1remove-dup.py | 17fed028ad554d4eca493329a1f98a574741788a | [] | no_license | lalitzz/DS | 7de54281a34814601f26ee826c722d123ee8bd99 | 66272a7a8c20c0c3e85aa5f9d19f29e0a3e11db1 | refs/heads/master | 2021-10-14T09:47:08.754570 | 2018-12-29T11:00:25 | 2018-12-29T11:00:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 797 | py | import singlell
def remove_duplicate(S):
tmp = S.head
seen_nodes = set()
prev = None
while tmp is not None:
if tmp.data in seen_nodes:
if prev is not None:
prev.next = tmp.next
else:
S.head = None
if tmp.next is None:
S.tail = prev
else:
seen_nodes.add(tmp.data)
prev = tmp
tmp = tmp.next
def remove_dup_notmp(S):
tmp = S.head
prev = None
while tmp is not None:
cur = tmp
while cur.next is not None:
if tmp.data == cur.next.data:
cur.next = cur.next.next
else:
cur = cur.next
tmp = tmp.next
S = singlell.SingleLinkList()
S.appendLeft(1)
S.appendLeft(2)
S.appendLeft(2)
S.appendLeft(2)
S.appendLeft(2)
S.appendLeft(2)
S.appendLeft(2)
remove_dup_notmp(S)
S.print_list() | [
"lalit.slg007@gmail.com"
] | lalit.slg007@gmail.com |
58fab79e5b39430682a825af92f73644698e4c5c | 64a296ffabb013ad8c8a55380718fcc629bc7755 | /cry1ac/src/pb_d_major_subset.py | f941b371c4ba31b579b7b1e815e7bf8cb87a6720 | [] | no_license | maxwshen/evoracle-dataprocessinganalysis | 3ed16fc20ff52d4be81bb171893e64562a81d5d7 | ed400f6ddfd7b7bba161dd3a06254013a2c90770 | refs/heads/master | 2022-04-30T07:50:07.486482 | 2020-04-27T18:21:08 | 2020-04-27T18:21:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,397 | py | #
from __future__ import division
import _config
import sys, os, fnmatch, datetime, subprocess
sys.path.append('/home/unix/maxwshen/')
import numpy as np
from collections import defaultdict
from mylib import util, compbio
import pandas as pd
# Default params
inp_dir = _config.OUT_PLACE + f'pb_c_convert/'
NAME = util.get_fn(__file__)
out_dir = _config.OUT_PLACE + NAME + '/'
util.ensure_dir_exists(out_dir)
exp_design = pd.read_csv(_config.DATA_DIR + f'Badran2015_SraRunTable.csv')
pacbio_nms = exp_design[exp_design['Instrument'] == 'PacBio RS II']['Library Name']
pacbio_nms = sorted(pacbio_nms)
params = {
# 21
'major_positions': [
-76,
-73,
# -69,
15,
# 61,
68,
198,
286,
304,
332,
344,
347,
361,
363,
384,
404,
417,
461,
463,
515,
582,
],
}
pos_to_ref = {
-76: 'A',
-73: 'M',
# -69: 'G',
15: 'C',
# 61: 'V',
68: 'F',
198: 'R',
286: 'G',
304: 'T',
332: 'E',
344: 'A',
347: 'Q',
361: 'T',
363: 'S',
384: 'D',
404: 'S',
417: 'N',
461: 'E',
463: 'N',
515: 'E',
582: 'S',
}
ordered_time_strings = [
'0hrs',
'12hrs',
'24hrs',
'36hrs',
'48hrs',
'60hrs',
'72hrs',
'84hrs',
'96hrs',
'108hrs',
'120hrs',
'132hrs',
'144hrs',
'156hrs',
'168hrs',
'180hrs',
'192hrs',
'204hrs',
'216hrs',
'228hrs',
'240hrs',
'264hrs',
'276hrs',
'300hrs',
'324hrs',
'348hrs',
'372hrs',
'396hrs',
'408hrs',
'432hrs',
'456hrs',
'480hrs',
'504hrs',
'528hrs',
]
##
# Functions
##
def get_short_genotypes(dfs):
short_gts = []
for read_nm in set(dfs['Read name']):
df = dfs[dfs['Read name'] == read_nm]
obs_pos_to_mut = {pos: mut for pos, mut in zip(df['Position'], df['Mutated amino acid'])}
short_gt = ''.join([obs_pos_to_mut[pos] if pos in obs_pos_to_mut else '.' for pos in params['major_positions']])
# short_gt = ''.join([obs_pos_to_mut[pos] if pos in obs_pos_to_mut else pos_to_ref[pos] for pos in params['major_positions']])
short_gts.append(short_gt)
# Filter genotypes with amino acid 'e' representing a deletion
print(f'Found {len(short_gts)} genotypes')
short_gts = [s for s in short_gts if 'e' not in s]
print(f'Filtered out e, leaving {len(short_gts)} genotypes')
return short_gts
##
# Primary
##
def major_subset():
get_time_from_nm = lambda nm: nm.split('_')[2]
dd = defaultdict(list)
for nm in pacbio_nms:
print(nm)
df = pd.read_csv(inp_dir + f'{nm}.csv', index_col = 0)
dfs = df[df['Position'].isin(params['major_positions'])]
short_gts = get_short_genotypes(dfs)
time = get_time_from_nm(nm)
dd['Abbrev genotype'] += short_gts
dd['Timepoint'] += [time] * len(short_gts)
df = pd.DataFrame(dd)
# Add stats
df['Read count'] = 1
dfs = df.groupby(['Abbrev genotype', 'Timepoint']).agg(sum).reset_index()
sums = dfs.groupby(['Timepoint'])['Read count'].sum()
time_to_sum = {time: ct for time, ct in zip(sums.index, list(sums))}
dfs['Total count'] = [time_to_sum[t] for t in dfs['Timepoint']]
dfs['Frequency'] = dfs['Read count'] / dfs['Total count']
dfs.to_csv(out_dir + f'badran_pacbio.csv')
pv_df = dfs.pivot(index = 'Abbrev genotype', columns = 'Timepoint', values = 'Frequency')
pv_df = pv_df.fillna(value = 0)
pv_df = pv_df[ordered_time_strings]
pv_df.to_csv(out_dir + f'badran_pacbio_pivot.csv')
# Subset to > 1% fq and renormalize
t = pv_df.apply(max, axis = 'columns')
gt_to_max_fq = {gt: max_fq for gt, max_fq in zip(t.index, list(t))}
keep_gts = [gt for gt, max_fq in zip(t.index, list(t)) if max_fq > 0.01]
print(f'Filtered {len(pv_df)} to {len(keep_gts)} genotypes with >1% fq in any timepoint')
# Normalize
pv_df = pv_df.loc[keep_gts]
pv_df /= pv_df.apply(sum)
pv_df = pv_df.sort_values(by = '528hrs', ascending = False)
pv_df.to_csv(out_dir + f'badran_pacbio_pivot_1pct.csv')
return
##
# qsub
##
def gen_qsubs():
# Generate qsub shell scripts and commands for easy parallelization
print('Generating qsub scripts...')
qsubs_dir = _config.QSUBS_DIR + NAME + '/'
util.ensure_dir_exists(qsubs_dir)
qsub_commands = []
pacbio_nms = exp_design[exp_design['Instrument'] == 'PacBio RS II']['Library Name']
num_scripts = 0
for nm in pacbio_nms:
command = f'python {NAME}.py {nm}'
script_id = NAME.split('_')[0]
# Write shell scripts
sh_fn = qsubs_dir + f'q_{script_id}_{nm}.sh'
with open(sh_fn, 'w') as f:
f.write(f'#!/bin/bash\n{command}\n')
num_scripts += 1
# Write qsub commands
qsub_commands.append(f'qsub -V -P regevlab -l h_rt=4:00:00,h_vmem=8G -wd {_config.SRC_DIR} {sh_fn} &')
# Save commands
commands_fn = qsubs_dir + '_commands.sh'
with open(commands_fn, 'w') as f:
f.write('\n'.join(qsub_commands))
subprocess.check_output(f'chmod +x {commands_fn}', shell = True)
print(f'Wrote {num_scripts} shell scripts to {qsubs_dir}')
return
##
# Main
##
@util.time_dec
def main():
print(NAME)
# Function calls
major_subset()
return
if __name__ == '__main__':
# if len(sys.argv) > 1:
# main(sys.argv[1:])
# else:
# gen_qsubs()
main() | [
"maxwshen@gmail.com"
] | maxwshen@gmail.com |
4558cc6e46898c6d04ac0c115bcdacc63c8181fa | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_34/619.py | 42c863876e3fa0be7bf9a9744685f50aab0ed2a3 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 558 | py | #!/usr/bin/env python
import re
import sys
def match(words, pattern):
count = 0
for word in words:
if re.match(pattern.replace("(", "[").replace(")", "]"), word):
count += 1
return count
def main():
readline = sys.stdin.readline
l, d, n = [int(x) for x in readline().split(" ", 2)]
words = []
for i in range(d):
words.append(readline()[:-1])
for i in range(n):
pattern = readline()[:-1]
print "Case #%s: %s" % (i+1, match(words, pattern))
if __name__ == "__main__":
main()
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
680b2b82bf857e26d0faafe6e4775c0ed3a62c5a | 56f1bb713f0651ac63391349deb81790df14e4b5 | /Mirror Images/mirror.py | 2f0d48862fb5ca30e5ed19cfae2c329da4a5f92f | [
"CC0-1.0"
] | permissive | rajitbanerjee/kattis | 4cd46a2fe335120b8f53ca71544fc0681474118b | 3a5dd4c84c07e21f09ef45ebd9c1bad2a0adc6ad | refs/heads/master | 2022-05-05T03:19:28.744660 | 2020-08-12T18:48:55 | 2020-08-12T18:48:55 | 192,208,120 | 4 | 2 | CC0-1.0 | 2022-04-15T05:50:16 | 2019-06-16T15:38:15 | Python | UTF-8 | Python | false | false | 643 | py | """https://open.kattis.com/problems/mirror"""
T = int(input())
ans = []
def doubleMirror(image, R, C):
mirror_im = [[[] for _ in range(C)] for _ in range(R)]
for i in range(R):
for j in range(C):
mirror_im[R - i - 1][C - j - 1] = image[i][j]
for i in range(R):
mirror_im[i] = "".join(mirror_im[i])
return "\n".join(mirror_im)
for _ in range(T):
R, C = map(int, input().split())
image = []
for _ in range(R):
row = list(input())
image.append(row)
ans.append(doubleMirror(image, R, C))
for i, a in enumerate(ans):
print(f"Test {i + 1}")
print(a) | [
"rajit.banerjee@ucdconnect.ie"
] | rajit.banerjee@ucdconnect.ie |
eafc46e1ba8143c22bc2a42c20e428126f928e52 | a7cf2d5a062ccb93285f034baa472703599336fb | /lec03-loop1/D2.py | 7307318b9a03803ea5bf315a8edb09cd9185df9f | [] | no_license | webturing/PythonProgramming_20DS123 | 5538535e039a87e20752549bb0fa75d32b022cf9 | d971277b0401e342adad637ebfac5f88f321abe6 | refs/heads/master | 2023-02-16T17:02:07.385325 | 2021-01-20T09:53:28 | 2021-01-20T09:53:28 | 297,176,647 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 82 | py | n = 10
if n % 2 == 1:
print(((n + 1) // 2) ** 2)
else:
print(n ** 2 // 4)
| [
"zj@webturing.com"
] | zj@webturing.com |
f6892953b61e5bd8d916108ba1c4c6a22883b75e | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/sum-of-multiples/6cd4a956dd254262aaa0ea881b44e3fe.py | 689a350a7f9eb5a21bf4e588871116ce841d8f22 | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 676 | py | class SumOfMultiples:
def __init__(self, *args):
if not args:
self.nums = (3, 5)
else:
self.nums = args
# @profile
def to_old(self, num):
ans = []
for i in range(num):
for j in self.nums:
if i % j == 0:
ans.append(i)
return sum(set(ans))
# @profile
def to(self, num):
ans = set()
for j in self.nums:
temp = [x for x in range(j, num, j)]
ans.update(temp)
return sum(ans)
if __name__ == "__main__":
print(SumOfMultiples(3, 5, 7, 2).to(10**6))
print(SumOfMultiples(43, 47).to_old(10**6))
| [
"rrc@berkeley.edu"
] | rrc@berkeley.edu |
6212ce1b008a751d592f27035ba0f6e02bc76f93 | 706dd00183f5f4a3ccb80e78efc35c9173a7d88b | /backend/prototype/migrations/0023_auto_20210827_2116.py | 70b299cf4cbc9978bb85a7710a1eb4e33178f229 | [] | no_license | jiaweioss/2021_Summer_Project | d82e89e431c500cde07201b150a4390ecf09ce6f | 136f007f1a4449710659b7424025d15402b7344a | refs/heads/main | 2023-07-14T14:17:39.239612 | 2021-08-29T12:59:03 | 2021-08-29T12:59:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 420 | py | # Generated by Django 3.1.7 on 2021-08-27 13:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('prototype', '0022_questionnaire_shownumbers'),
]
operations = [
migrations.AlterField(
model_name='questionnaire',
name='showNumbers',
field=models.BooleanField(default=True, null=True),
),
]
| [
"455436082@qq.com"
] | 455436082@qq.com |
9508aa89ecf08ec9d6866cda6052c802a93eb274 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_320/ch40_2019_08_26_19_13_34_255376.py | daf09fb1bcba4e7885a4e0cdad8525c5c25316da | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 132 | py | def fatorial(numero):
fat = 1
while numero > 0:
fat *= numero
numero -= 1
return fat
print(fatorial(5)) | [
"you@example.com"
] | you@example.com |
40a45937067dd33457d82b9d157f6a37f2980933 | 25faa623b069a9423e040903f4f2c5c123f53825 | /src/Sparrow/Python/setup.py | 8ec59326f11aa2e3e8e06d300befe7899648e147 | [
"BSD-3-Clause"
] | permissive | DockBio/sparrow | 99d3eb316426351312e74397c5cc4bb962118306 | f82cf86584e9edfc6f2c78af4896dc6f2ee8a455 | refs/heads/master | 2022-07-14T11:44:21.748779 | 2020-04-27T20:41:58 | 2020-04-27T20:41:58 | 257,099,197 | 0 | 0 | BSD-3-Clause | 2020-04-27T20:42:00 | 2020-04-19T20:48:28 | null | UTF-8 | Python | false | false | 1,000 | py | import setuptools
# Read README.md for the long description
with open("README.md", "r") as fh:
long_description = fh.read()
# Define the setup
setuptools.setup(
name="scine_sparrow",
version="2.0.0",
author="ETH Zurich, Laboratory for Physical Chemistry, Reiher Group",
author_email="scine@phys.chem.ethz.ch",
description="Open source semi-empirical quantum chemistry implementations.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://www.scine.ethz.ch",
packages=["scine_sparrow"],
package_data={"scine_sparrow": ["scine_sparrow.so"]},
classifiers=[
"Programming Language :: Python",
"Programming Language :: C++",
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: BSD License",
"Natural Language :: English",
"Topic :: Scientific/Engineering :: Chemistry"
],
zip_safe=False,
test_suite='pytest',
tests_require=['pytest']
)
| [
"scine@phys.chem.ethz.ch"
] | scine@phys.chem.ethz.ch |
b40323c2dade3b11429fceddf7181bb8297ac62b | 025230a618b49c5f255c34e4389f87064df32a6f | /hypertools/tools/cluster.py | 88b0ff6d482d9bc04b10998d88acccca77304cd5 | [
"MIT"
] | permissive | shannonyu/hypertools | 18b44b502992e7748c8eabdab188b41e0120bf08 | 8134d46b6031169bb12d03e49357802c923a175f | refs/heads/master | 2021-01-25T05:50:54.314315 | 2017-02-01T22:16:57 | 2017-02-01T22:16:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,707 | py | #!/usr/bin/env python
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
import numpy as np
from .._shared.helpers import *
def cluster(x, n_clusters=8, ndims=None):
"""
Aligns a list of arrays
This function takes a list of high dimensional arrays and 'hyperaligns' them
to a 'common' space, or coordinate system following the approach outlined by
Haxby et al, 2011. Hyperalignment uses linear transformations (rotation,
reflection, translation, scaling) to register a group of arrays to a common
space. This can be useful when two or more datasets describe an identical
or similar system, but may not be in same coordinate system. For example,
consider the example of fMRI recordings (voxels by time) from the visual
cortex of a group of subjects watching the same movie: The brain responses
should be highly similar, but the coordinates may not be aligned.
Haxby JV, Guntupalli JS, Connolly AC, Halchenko YO, Conroy BR, Gobbini
MI, Hanke M, and Ramadge PJ (2011) A common, high-dimensional model of
the representational space in human ventral temporal cortex. Neuron 72,
404 -- 416.
Parameters
----------
data : list
A list of Numpy arrays or Pandas Dataframes
method : str
Either 'hyper' or 'SRM'. If 'hyper' (default),
Returns
----------
aligned : list
An aligned list of numpy arrays
"""
x = format_data(x)
if type(x) is list:
x = np.vstack(x)
if ndims:
x = PCA(n_components=ndims).fit_transform(x)
kmeans = KMeans(init='k-means++', n_clusters=n_clusters, n_init=10)
kmeans.fit(x)
return list(kmeans.labels_)
| [
"andrew.heusser@gmail.com"
] | andrew.heusser@gmail.com |
f4876c464f794477270f2c5c04c1902be29d18ef | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02785/s998943177.py | af54361e577f369c4e98be5c6ef60ce49c5108ea | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py | n,k=map(int,input().split())
h=list(map(int,input().split()))
h.sort(reverse=True)
if n-k<=0:
print(0)
else:
print(sum(h[k:])) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
4212e86c28029d266b8be09cb40da53ac4ceb49a | ee7e74fa14f176c7f1e9cff57cae14092a0baacf | /HomePlugPWN/plcmon.py | a5a3bbc1ddf4d810d49c224543e56c8f7383fe9c | [] | no_license | Cloudxtreme/powerline-arsenal | f720b047cc4fe24ceb44588456ad0fc1ce627202 | f62f9ea8b27b8079c7f52b7b81d6a8bf5e4baa1a | refs/heads/master | 2021-05-29T01:02:14.512647 | 2015-03-31T10:25:24 | 2015-03-31T10:25:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,267 | py | #!/usr/bin/en python2
import sys
import binascii
from layerscapy.HomePlugAV import *
from optparse import OptionParser
from genDAK import *
dictio = {}
def appendindic(pkt):
macad = iter(binascii.hexlify(pkt.load[0xe:0xe+6]))
macad = ':'.join(a+b for a,b in zip(macad, macad))
if macad not in dictio.keys() and macad != "00:00:00:00:00:00":
dictio[macad] = DAKgen(macad).generate()
print "\t Found CCo: %s (DAK: %s)" % (macad, dictio[macad])
if __name__ == "__main__":
usage = "usage: %prog [options] arg"
parser = OptionParser(usage)
parser.add_option("-i", "--iface", dest="iface", default="eth0",
help="select an interface to Enable sniff mode and sniff indicates packets", metavar="INTERFACE")
parser.add_option("-s", "--source", dest="sourcemac", default="00:c4:ff:ee:00:00",
help="source MAC address to use", metavar="SOURCEMARC")
(options, args) = parser.parse_args()
print "[+] Enabling sniff mode"
pkt = Ether(src=options.sourcemac)/HomePlugAV()/SnifferRequest(SnifferControl=1) # We enable Sniff mode here
sendp(pkt, iface=options.iface)
print "[+] Listening for CCo station..."
sniff(prn=appendindic, lfilter=lambda pkt:pkt.haslayer(HomePlugAV)) # capture the signal
| [
"oleg.kupreev@gmail.com"
] | oleg.kupreev@gmail.com |
eeb5f9a896f8ab7a065ad51028e0e523de125320 | 54ddb3f38cd09ac25213a7eb8743376fe778fee8 | /topic_12_exceptions/examples/3_raise.py | 440f4bf353deb4f9fa1bc82e675a5b93f5c32774 | [] | no_license | ryndovaira/leveluppythonlevel1_300321 | dbfd4ee41485870097ee490f652751776ccbd7ab | 0877226e6fdb8945531775c42193a90ddb9c8a8b | refs/heads/master | 2023-06-06T07:44:15.157913 | 2021-06-18T11:53:35 | 2021-06-18T11:53:35 | 376,595,962 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 103 | py | # вводим 1 затем 0
a, b = int(input()), int(input())
if b == 0:
raise ZeroDivisionError
| [
"ryndovaira@gmail.com"
] | ryndovaira@gmail.com |
62389e69298b9990655be9065e2afbfd0fc50a59 | 88b667f671beb285e567f7648e92247d644a3abd | /python-sc2/sc2/ids/buff_id.py | 6fca1be9afe79564a40d5b94423931ebe6af54c1 | [
"MIT"
] | permissive | august-k/sharpy-paul | e7fbd6ac181d7e81e1f60eb34016063227590a64 | 6f4462208842db6dd32b30d5c1ebf90e79e975a0 | refs/heads/master | 2022-07-17T15:40:31.603339 | 2021-02-26T02:56:34 | 2021-02-26T02:56:34 | 257,317,358 | 1 | 1 | MIT | 2022-06-22T02:07:06 | 2020-04-20T15:05:13 | Python | UTF-8 | Python | false | false | 9,266 | py | # DO NOT EDIT!
# This file was automatically generated by "generate_id_constants_from_stableid.py"
import enum
class BuffId(enum.Enum):
NULL = 0
RADAR25 = 1
TAUNTB = 2
DISABLEABILS = 3
TRANSIENTMORPH = 4
GRAVITONBEAM = 5
GHOSTCLOAK = 6
BANSHEECLOAK = 7
POWERUSERWARPABLE = 8
VORTEXBEHAVIORENEMY = 9
CORRUPTION = 10
QUEENSPAWNLARVATIMER = 11
GHOSTHOLDFIRE = 12
GHOSTHOLDFIREB = 13
LEECH = 14
LEECHDISABLEABILITIES = 15
EMPDECLOAK = 16
FUNGALGROWTH = 17
GUARDIANSHIELD = 18
SEEKERMISSILETIMEOUT = 19
TIMEWARPPRODUCTION = 20
ETHEREAL = 21
NEURALPARASITE = 22
NEURALPARASITEWAIT = 23
STIMPACKMARAUDER = 24
SUPPLYDROP = 25
_250MMSTRIKECANNONS = 26
STIMPACK = 27
PSISTORM = 28
CLOAKFIELDEFFECT = 29
CHARGING = 30
AIDANGERBUFF = 31
VORTEXBEHAVIOR = 32
SLOW = 33
TEMPORALRIFTUNIT = 34
SHEEPBUSY = 35
CONTAMINATED = 36
TIMESCALECONVERSIONBEHAVIOR = 37
BLINDINGCLOUDSTRUCTURE = 38
COLLAPSIBLEROCKTOWERCONJOINEDSEARCH = 39
COLLAPSIBLEROCKTOWERRAMPDIAGONALCONJOINEDSEARCH = 40
COLLAPSIBLETERRANTOWERCONJOINEDSEARCH = 41
COLLAPSIBLETERRANTOWERRAMPDIAGONALCONJOINEDSEARCH = 42
DIGESTERCREEPSPRAYVISION = 43
INVULNERABILITYSHIELD = 44
MINEDRONECOUNTDOWN = 45
MOTHERSHIPSTASIS = 46
MOTHERSHIPSTASISCASTER = 47
MOTHERSHIPCOREENERGIZEVISUAL = 48
ORACLEREVELATION = 49
GHOSTSNIPEDOT = 50
NEXUSPHASESHIFT = 51
NEXUSINVULNERABILITY = 52
ROUGHTERRAINSEARCH = 53
ROUGHTERRAINSLOW = 54
ORACLECLOAKFIELD = 55
ORACLECLOAKFIELDEFFECT = 56
SCRYERFRIENDLY = 57
SPECTRESHIELD = 58
VIPERCONSUMESTRUCTURE = 59
RESTORESHIELDS = 60
MERCENARYCYCLONEMISSILES = 61
MERCENARYSENSORDISH = 62
MERCENARYSHIELD = 63
SCRYER = 64
STUNROUNDINITIALBEHAVIOR = 65
BUILDINGSHIELD = 66
LASERSIGHT = 67
PROTECTIVEBARRIER = 68
CORRUPTORGROUNDATTACKDEBUFF = 69
BATTLECRUISERANTIAIRDISABLE = 70
BUILDINGSTASIS = 71
STASIS = 72
RESOURCESTUN = 73
MAXIMUMTHRUST = 74
CHARGEUP = 75
CLOAKUNIT = 76
NULLFIELD = 77
RESCUE = 78
BENIGN = 79
LASERTARGETING = 80
ENGAGE = 81
CAPRESOURCE = 82
BLINDINGCLOUD = 83
DOOMDAMAGEDELAY = 84
EYESTALK = 85
BURROWCHARGE = 86
HIDDEN = 87
MINEDRONEDOT = 88
MEDIVACSPEEDBOOST = 89
EXTENDBRIDGEEXTENDINGBRIDGENEWIDE8OUT = 90
EXTENDBRIDGEEXTENDINGBRIDGENWWIDE8OUT = 91
EXTENDBRIDGEEXTENDINGBRIDGENEWIDE10OUT = 92
EXTENDBRIDGEEXTENDINGBRIDGENWWIDE10OUT = 93
EXTENDBRIDGEEXTENDINGBRIDGENEWIDE12OUT = 94
EXTENDBRIDGEEXTENDINGBRIDGENWWIDE12OUT = 95
PHASESHIELD = 96
PURIFY = 97
VOIDSIPHON = 98
ORACLEWEAPON = 99
ANTIAIRWEAPONSWITCHCOOLDOWN = 100
ARBITERMPSTASISFIELD = 101
IMMORTALOVERLOAD = 102
CLOAKINGFIELDTARGETED = 103
LIGHTNINGBOMB = 104
ORACLEPHASESHIFT = 105
RELEASEINTERCEPTORSCOOLDOWN = 106
RELEASEINTERCEPTORSTIMEDLIFEWARNING = 107
RELEASEINTERCEPTORSWANDERDELAY = 108
RELEASEINTERCEPTORSBEACON = 109
ARBITERMPCLOAKFIELDEFFECT = 110
PURIFICATIONNOVA = 111
CORRUPTIONBOMBDAMAGE = 112
CORSAIRMPDISRUPTIONWEB = 113
DISRUPTORPUSH = 114
LIGHTOFAIUR = 115
LOCKON = 116
OVERCHARGE = 117
OVERCHARGEDAMAGE = 118
OVERCHARGESPEEDBOOST = 119
SEEKERMISSILE = 120
TEMPORALFIELD = 121
VOIDRAYSWARMDAMAGEBOOST = 122
VOIDMPIMMORTALREVIVESUPRESSED = 123
DEVOURERMPACIDSPORES = 124
DEFILERMPCONSUME = 125
DEFILERMPDARKSWARM = 126
DEFILERMPPLAGUE = 127
QUEENMPENSNARE = 128
ORACLESTASISTRAPTARGET = 129
SELFREPAIR = 130
AGGRESSIVEMUTATION = 131
PARASITICBOMB = 132
PARASITICBOMBUNITKU = 133
PARASITICBOMBSECONDARYUNITSEARCH = 134
ADEPTDEATHCHECK = 135
LURKERHOLDFIRE = 136
LURKERHOLDFIREB = 137
TIMESTOPSTUN = 138
SLAYNELEMENTALGRABSTUN = 139
PURIFICATIONNOVAPOST = 140
DISABLEINTERCEPTORS = 141
BYPASSARMORDEBUFFONE = 142
BYPASSARMORDEBUFFTWO = 143
BYPASSARMORDEBUFFTHREE = 144
CHANNELSNIPECOMBAT = 145
TEMPESTDISRUPTIONBLASTSTUNBEHAVIOR = 146
GRAVITONPRISON = 147
INFESTORDISEASE = 148
SS_LIGHTNINGPROJECTOR = 149
PURIFIERPLANETCRACKERCHARGE = 150
SPECTRECLOAKING = 151
WRAITHCLOAK = 152
PSYTROUSOXIDE = 153
BANSHEECLOAKCROSSSPECTRUMDAMPENERS = 154
SS_BATTLECRUISERHUNTERSEEKERTIMEOUT = 155
SS_STRONGERENEMYBUFF = 156
SS_TERRATRONARMMISSILETARGETCHECK = 157
SS_MISSILETIMEOUT = 158
SS_LEVIATHANBOMBCOLLISIONCHECK = 159
SS_LEVIATHANBOMBEXPLODETIMER = 160
SS_LEVIATHANBOMBMISSILETARGETCHECK = 161
SS_TERRATRONCOLLISIONCHECK = 162
SS_CARRIERBOSSCOLLISIONCHECK = 163
SS_CORRUPTORMISSILETARGETCHECK = 164
SS_INVULNERABLE = 165
SS_LEVIATHANTENTACLEMISSILETARGETCHECK = 166
SS_LEVIATHANTENTACLEMISSILETARGETCHECKINVERTED = 167
SS_LEVIATHANTENTACLETARGETDEATHDELAY = 168
SS_LEVIATHANTENTACLEMISSILESCANSWAPDELAY = 169
SS_POWERUPDIAGONAL2 = 170
SS_BATTLECRUISERCOLLISIONCHECK = 171
SS_TERRATRONMISSILESPINNERMISSILELAUNCHER = 172
SS_TERRATRONMISSILESPINNERCOLLISIONCHECK = 173
SS_TERRATRONMISSILELAUNCHER = 174
SS_BATTLECRUISERMISSILELAUNCHER = 175
SS_TERRATRONSTUN = 176
SS_VIKINGRESPAWN = 177
SS_WRAITHCOLLISIONCHECK = 178
SS_SCOURGEMISSILETARGETCHECK = 179
SS_SCOURGEDEATH = 180
SS_SWARMGUARDIANCOLLISIONCHECK = 181
SS_FIGHTERBOMBMISSILEDEATH = 182
SS_FIGHTERDRONEDAMAGERESPONSE = 183
SS_INTERCEPTORCOLLISIONCHECK = 184
SS_CARRIERCOLLISIONCHECK = 185
SS_MISSILETARGETCHECKVIKINGDRONE = 186
SS_MISSILETARGETCHECKVIKINGSTRONG1 = 187
SS_MISSILETARGETCHECKVIKINGSTRONG2 = 188
SS_POWERUPHEALTH1 = 189
SS_POWERUPHEALTH2 = 190
SS_POWERUPSTRONG = 191
SS_POWERUPMORPHTOBOMB = 192
SS_POWERUPMORPHTOHEALTH = 193
SS_POWERUPMORPHTOSIDEMISSILES = 194
SS_POWERUPMORPHTOSTRONGERMISSILES = 195
SS_CORRUPTORCOLLISIONCHECK = 196
SS_SCOUTCOLLISIONCHECK = 197
SS_PHOENIXCOLLISIONCHECK = 198
SS_SCOURGECOLLISIONCHECK = 199
SS_LEVIATHANCOLLISIONCHECK = 200
SS_SCIENCEVESSELCOLLISIONCHECK = 201
SS_TERRATRONSAWCOLLISIONCHECK = 202
SS_LIGHTNINGPROJECTORCOLLISIONCHECK = 203
SHIFTDELAY = 204
BIOSTASIS = 205
PERSONALCLOAKINGFREE = 206
EMPDRAIN = 207
MINDBLASTSTUN = 208
_330MMBARRAGECANNONS = 209
VOODOOSHIELD = 210
SPECTRECLOAKINGFREE = 211
ULTRASONICPULSESTUN = 212
IRRADIATE = 213
NYDUSWORMLAVAINSTANTDEATH = 214
PREDATORCLOAKING = 215
PSIDISRUPTION = 216
MINDCONTROL = 217
QUEENKNOCKDOWN = 218
SCIENCEVESSELCLOAKFIELD = 219
SPORECANNONMISSILE = 220
ARTANISTEMPORALRIFTUNIT = 221
ARTANISCLOAKINGFIELDEFFECT = 222
ARTANISVORTEXBEHAVIOR = 223
INCAPACITATED = 224
KARASSPSISTORM = 225
DUTCHMARAUDERSLOW = 226
JUMPSTOMPSTUN = 227
JUMPSTOMPFSTUN = 228
RAYNORMISSILETIMEDLIFE = 229
PSIONICSHOCKWAVEHEIGHTANDSTUN = 230
SHADOWCLONE = 231
AUTOMATEDREPAIR = 232
SLIMED = 233
RAYNORTIMEBOMBMISSILE = 234
RAYNORTIMEBOMBUNIT = 235
TYCHUSCOMMANDOSTIMPACK = 236
VIRALPLASMA = 237
NAPALM = 238
BURSTCAPACITORSDAMAGEBUFF = 239
COLONYINFESTATION = 240
DOMINATION = 241
EMPBURST = 242
HYBRIDCZERGYROOTS = 243
HYBRIDFZERGYROOTS = 244
LOCKDOWNB = 245
SPECTRELOCKDOWNB = 246
VOODOOLOCKDOWN = 247
ZERATULSTUN = 248
BUILDINGSCARAB = 249
VORTEXBEHAVIORERADICATOR = 250
GHOSTBLAST = 251
HEROICBUFF03 = 252
CANNONRADAR = 253
SS_MISSILETARGETCHECKVIKING = 254
SS_MISSILETARGETCHECK = 255
SS_MAXSPEED = 256
SS_MAXACCELERATION = 257
SS_POWERUPDIAGONAL1 = 258
WATER = 259
DEFENSIVEMATRIX = 260
TESTATTRIBUTE = 261
TESTVETERANCY = 262
SHREDDERSWARMDAMAGEAPPLY = 263
CORRUPTORINFESTING = 264
MERCGROUNDDROPDELAY = 265
MERCGROUNDDROP = 266
MERCAIRDROPDELAY = 267
SPECTREHOLDFIRE = 268
SPECTREHOLDFIREB = 269
ITEMGRAVITYBOMBS = 270
CARRYMINERALFIELDMINERALS = 271
CARRYHIGHYIELDMINERALFIELDMINERALS = 272
CARRYHARVESTABLEVESPENEGEYSERGAS = 273
CARRYHARVESTABLEVESPENEGEYSERGASPROTOSS = 274
CARRYHARVESTABLEVESPENEGEYSERGASZERG = 275
PERMANENTLYCLOAKED = 276
RAVENSCRAMBLERMISSILE = 277
RAVENSHREDDERMISSILETIMEOUT = 278
RAVENSHREDDERMISSILETINT = 279
RAVENSHREDDERMISSILEARMORREDUCTION = 280
CHRONOBOOSTENERGYCOST = 281
NEXUSSHIELDRECHARGEONPYLONBEHAVIOR = 282
NEXUSSHIELDRECHARGEONPYLONBEHAVIORSECONDARYONTARGET = 283
INFESTORENSNARE = 284
INFESTORENSNAREMAKEPRECURSORREHEIGHTSOURCE = 285
NEXUSSHIELDOVERCHARGE = 286
PARASITICBOMBDELAYTIMEDLIFE = 287
TRANSFUSION = 288
ACCELERATIONZONETEMPORALFIELD = 289
ACCELERATIONZONEFLYINGTEMPORALFIELD = 290
INHIBITORZONEFLYINGTEMPORALFIELD = 291
INHIBITORZONETEMPORALFIELD = 292
RESONATINGGLAIVESPHASESHIFT = 293
AMORPHOUSARMORCLOUD = 294
RAVENSHREDDERMISSILEARMORREDUCTIONUISUBTRUCT = 295
def __repr__(self):
return f"BuffId.{self.name}"
for item in BuffId:
assert not item.name in globals()
globals()[item.name] = item
| [
"august.kaplan@gmail.com"
] | august.kaplan@gmail.com |
e54038178c4fa4d44718db97ca7343f349b2592c | d1de9fdc4a444ff1c322e09c684ccb5247c22164 | /OpenElectrophy/classes/neo/io/asciisignalio.py | 3e15feb9068b0f9c35d8cb45b0849551f6806123 | [] | no_license | AntoineValera/SynaptiQs | a178ddf5aa3269fe677afa68f6838db219763a65 | b44a27ba01262e68d74488f98502083c9d681eb6 | refs/heads/master | 2021-01-18T21:12:16.543581 | 2016-05-12T14:52:27 | 2016-05-12T14:52:27 | 40,709,264 | 0 | 0 | null | 2015-10-07T11:32:01 | 2015-08-14T10:17:16 | Python | UTF-8 | Python | false | false | 8,028 | py | # -*- coding: utf-8 -*-
"""
Class for reading/writing analog signals in a text file.
Covers many case when part of a file can be viewed as a CSV format.
Supported : Read/Write
@author : sgarcia
"""
from baseio import BaseIO
#from neo.core import *
from ..core import *
import numpy
from numpy import *
from baseio import BaseIO
from numpy import *
import csv
class AsciiSignalIO(BaseIO):
"""
Class for reading/writing data in a text file.
Covers many cases when part of a file can be viewed as a CSV format.
**Example**
# read a file
io = AsciiSignalIO(filename = 'myfile.txt')
seg = io.read() # read the entire file
seg.get_analogsignals() # return all AnalogSignals
# write a file
io = AsciiSignalIO(filename = 'myfile.txt')
seg = Segment()
io.write(seg)
"""
is_readable = True
is_writable = True
supported_objects = [Segment , AnalogSignal]
readable_objects = [Segment]
writeable_objects = [Segment]
has_header = False
is_streameable = False
read_params = {
Segment : [
('delimiter' , {'value' : '\t', 'possible' : ['\t' , ' ' , ',' , ';'] }) ,
('usecols' , { 'value' : None , 'type' : int } ),
('skiprows' , { 'value' :0 } ),
('timecolumn' , { 'value' : None, 'type' : int } ) ,
('samplerate' , { 'value' : 1000., } ),
('t_start' , { 'value' : 0., } ),
('method' , { 'value' : 'homemade', 'possible' : ['genfromtxt' , 'csv' , 'homemade' ] }) ,
]
}
write_params = {
Segment : [
('delimiter' , {'value' : '\t', 'possible' : ['\t' , ' ' , ',' , ';'] }) ,
('timecolumn' , { 'value' : None, 'type' : int } ) ,
]
}
name = None
extensions = [ 'txt' ]
mode = 'file'
def __init__(self , filename = None) :
"""
This class read/write AnalogSignal in a text file.
Each signal is a column.
One of the column can be the time vector
**Arguments**
filename : the filename to read/write
"""
BaseIO.__init__(self)
self.filename = filename
def read(self , **kargs):
"""
Read the file.
Return a neo.Segment
See read_segment for detail.
"""
return self.read_segment( **kargs)
def read_segment(self,
delimiter = '\t',
usecols = None,
skiprows =0,
timecolumn = None,
samplerate = 1000.,
t_start = 0.,
method = 'genfromtxt',
):
"""
**Arguments**
delimiter : columns delimiter in file '\t' or one space or two space or ',' or ';'
usecols : if None take all columns otherwise a list for selected columns
skiprows : skip n first lines in case they contains header informations
timecolumn : None or a valid int that point the time vector
samplerate : the samplerate of signals if timecolumn is not None this is not take in account
t_start : time of the first sample
method : 'genfromtxt' or 'csv' or 'homemade'
in case of bugs you can try one of this methods
'genfromtxt' use numpy.genfromtxt
'csv' use cvs module
'homemade' use a intuitive more robust but slow method
"""
#loadtxt
if method == 'genfromtxt' :
sig = genfromtxt(self.filename,
delimiter = delimiter,
usecols = usecols ,
skiprows = skiprows,
dtype = 'f4')
if len(sig.shape) ==1:
sig = sig[:,newaxis]
elif method == 'csv' :
tab = [l for l in csv.reader( open(self.filename,'rU') , delimiter = delimiter ) ]
tab = tab[skiprows:]
sig = array( tab , dtype = 'f4')
elif method == 'homemade' :
fid = open(self.filename,'rU')
for l in range(skiprows):
fid.readline()
tab = [ ]
for line in fid.readlines():
line = line.replace('\r','')
line = line.replace('\n','')
l = line.split(delimiter)
while '' in l :
l.remove('')
tab.append(l)
sig = array( tab , dtype = 'f4')
if timecolumn is not None:
samplerate = 1./mean(diff(sig[:,timecolumn]))
t_start = sig[0,timecolumn]
#TODO :
#Add channel support here
blck=Block()
for i in xrange(sig.shape[1]) :
seg = Segment()
if usecols is not None :
if timecolumn == usecols[i] :
# time comlumn not a signal
continue
else :
if timecolumn == i :
continue
#print 'lkjjlkj', len(sig[:,i])
analogSig = AnalogSignal( signal = sig[:,i] ,
sampling_rate = samplerate,
t_start = t_start)
analogSig.channel = i
analogSig.name = 'SynaptiQsImport'
seg._analogsignals.append( analogSig )
blck._segments.append(seg)
return blck
def write(self , *args , **kargs):
"""
Write segment in a raw file.
See write_segment for detail.
"""
self.write_segment(*args , **kargs)
def write_segment(self, segment,
delimiter = '\t',
skiprows =0,
timecolumn = None,
):
"""
Write a segment and AnalogSignal in a text file.
**Arguments**
delimiter : columns delimiter in file '\t' or one space or two space or ',' or ';'
skiprows : skip n first lines in case they contains header informations
timecolumn : None or a valid int that point the time vector
"""
sigs = None
for analogSig in segment.get_analogsignals():
if sigs is None :
sigs = analogSig.signal[:,newaxis]
else :
sigs = concatenate ((sigs, analogSig.signal[:,newaxis]) , axis = 1 )
if timecolumn is not None:
t = segment.get_analogsignals()[0].t()
print sigs.shape , t.shape
sigs = concatenate ((sigs, t[:,newaxis]*nan) , axis = 1 )
sigs[:,timecolumn+1:] = sigs[:,timecolumn:-1].copy()
sigs[:,timecolumn] = t
savetxt(self.filename , sigs , delimiter = delimiter)
| [
"a.valera@ucl.ac.uk"
] | a.valera@ucl.ac.uk |
7981081268ce40b43f61b4b1ac9d555a9fc68f34 | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_2464487_1/Python/axelbrz/a.py | 994845f27594079c0fb1112cac19471ac08a4d95 | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 677 | py | def paintRC(r, n):
return 2*n*r + n + 2*(n*(n-1))
def solve(m):
r, t = m.split(" ")
r, t = int(r), int(t)
d = 1
while paintRC(r, d) <= t:
d *= 2
i = 0
m = (d+i)/2
_m = m-1
while True:
if m == _m: return m
_m = m
#print i, m, d
p = paintRC(r, m)
if p < t: i = m
elif p > t: d = m
else: return m
m = (d+i)/2
return m
prob = "a"
f = open(prob+".in","r")
d = f.read()[:-1]
f.close()
f = open(prob+".out","w")
ms = "\n".join(d.split("\n")[1:]).split("\n")
T = 1
for m in ms:
S = "Case #%d: %s" % (T, solve(m))
print S
f.write(S + "\n")
T += 1
f.close()
| [
"eewestman@gmail.com"
] | eewestman@gmail.com |
3f1b9b261e4a4d694620e0ce8d59268a346bd906 | 286f8399eff95792886a2a0fdef88f87a9f6b8a4 | /demo/basics/enumerate_demo.py | f4feaf12ef1fe91d9c949ba01d74fad930702bef | [] | no_license | srikanthpragada/PYTHON_30_APR_2020 | 9e09537bc77e5ce8247b60f333267a94413bc32f | 72645c6644f00beca3c10b30e103073aa1212ccd | refs/heads/master | 2022-10-20T04:41:15.450004 | 2020-06-15T02:29:37 | 2020-06-15T02:29:37 | 260,595,703 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 58 | py | l = [31, 20, 10]
for i,v in enumerate(l):
print(i,v)
| [
"srikanthpragada@gmail.com"
] | srikanthpragada@gmail.com |
7bfe7a5915f7160ca7a1a6ceb0804daf9b43cdb7 | 49296c69348c743f234807ff6390687079b6b5d9 | /client/server_lib/omero_model_Pixels_ice.py | 8b5361393d5f0753382ee4e9d47806981f6cb73f | [] | no_license | crs4/omero.biobank-docker | 2cb4b00f37115916d5b140cbdaf24c12bd8be9ef | e332a6eccad44384cd6a3a12e6da17eb89a6cd96 | refs/heads/master | 2023-09-02T04:36:21.401597 | 2014-12-07T17:34:27 | 2014-12-07T17:34:27 | 26,125,831 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 91,688 | py | # **********************************************************************
#
# Copyright (c) 2003-2011 ZeroC, Inc. All rights reserved.
#
# This copy of Ice is licensed to you under the terms described in the
# ICE_LICENSE file included in this distribution.
#
# **********************************************************************
#
# Ice version 3.4.2
#
# <auto-generated>
#
# Generated from file `Pixels.ice'
#
# Warning: do not edit this file.
#
# </auto-generated>
#
import Ice, IcePy, __builtin__
import omero_model_IObject_ice
import omero_RTypes_ice
import omero_System_ice
import omero_Collections_ice
# Included module omero
_M_omero = Ice.openModule('omero')
# Included module omero.model
_M_omero.model = Ice.openModule('omero.model')
# Included module Ice
_M_Ice = Ice.openModule('Ice')
# Included module omero.sys
_M_omero.sys = Ice.openModule('omero.sys')
# Included module omero.api
_M_omero.api = Ice.openModule('omero.api')
# Start of module omero
__name__ = 'omero'
# Start of module omero.model
__name__ = 'omero.model'
if not _M_omero.model.__dict__.has_key('Image'):
_M_omero.model._t_Image = IcePy.declareClass('::omero::model::Image')
_M_omero.model._t_ImagePrx = IcePy.declareProxy('::omero::model::Image')
if not _M_omero.model.__dict__.has_key('Pixels'):
_M_omero.model._t_Pixels = IcePy.declareClass('::omero::model::Pixels')
_M_omero.model._t_PixelsPrx = IcePy.declareProxy('::omero::model::Pixels')
if not _M_omero.model.__dict__.has_key('PixelsType'):
_M_omero.model._t_PixelsType = IcePy.declareClass('::omero::model::PixelsType')
_M_omero.model._t_PixelsTypePrx = IcePy.declareProxy('::omero::model::PixelsType')
if not _M_omero.model.__dict__.has_key('DimensionOrder'):
_M_omero.model._t_DimensionOrder = IcePy.declareClass('::omero::model::DimensionOrder')
_M_omero.model._t_DimensionOrderPrx = IcePy.declareProxy('::omero::model::DimensionOrder')
if not _M_omero.model.__dict__.has_key('PlaneInfo'):
_M_omero.model._t_PlaneInfo = IcePy.declareClass('::omero::model::PlaneInfo')
_M_omero.model._t_PlaneInfoPrx = IcePy.declareProxy('::omero::model::PlaneInfo')
if not _M_omero.model.__dict__.has_key('PixelsOriginalFileMap'):
_M_omero.model._t_PixelsOriginalFileMap = IcePy.declareClass('::omero::model::PixelsOriginalFileMap')
_M_omero.model._t_PixelsOriginalFileMapPrx = IcePy.declareProxy('::omero::model::PixelsOriginalFileMap')
if not _M_omero.model.__dict__.has_key('OriginalFile'):
_M_omero.model._t_OriginalFile = IcePy.declareClass('::omero::model::OriginalFile')
_M_omero.model._t_OriginalFilePrx = IcePy.declareProxy('::omero::model::OriginalFile')
if not _M_omero.model.__dict__.has_key('Channel'):
_M_omero.model._t_Channel = IcePy.declareClass('::omero::model::Channel')
_M_omero.model._t_ChannelPrx = IcePy.declareProxy('::omero::model::Channel')
if not _M_omero.model.__dict__.has_key('RenderingDef'):
_M_omero.model._t_RenderingDef = IcePy.declareClass('::omero::model::RenderingDef')
_M_omero.model._t_RenderingDefPrx = IcePy.declareProxy('::omero::model::RenderingDef')
if not _M_omero.model.__dict__.has_key('Thumbnail'):
_M_omero.model._t_Thumbnail = IcePy.declareClass('::omero::model::Thumbnail')
_M_omero.model._t_ThumbnailPrx = IcePy.declareProxy('::omero::model::Thumbnail')
if not _M_omero.model.__dict__.has_key('PixelsAnnotationLink'):
_M_omero.model._t_PixelsAnnotationLink = IcePy.declareClass('::omero::model::PixelsAnnotationLink')
_M_omero.model._t_PixelsAnnotationLinkPrx = IcePy.declareProxy('::omero::model::PixelsAnnotationLink')
if not _M_omero.model.__dict__.has_key('Annotation'):
_M_omero.model._t_Annotation = IcePy.declareClass('::omero::model::Annotation')
_M_omero.model._t_AnnotationPrx = IcePy.declareProxy('::omero::model::Annotation')
if not _M_omero.model.__dict__.has_key('Details'):
_M_omero.model._t_Details = IcePy.declareClass('::omero::model::Details')
_M_omero.model._t_DetailsPrx = IcePy.declareProxy('::omero::model::Details')
if not _M_omero.model.__dict__.has_key('_t_PixelsPlaneInfoSeq'):
_M_omero.model._t_PixelsPlaneInfoSeq = IcePy.defineSequence('::omero::model::PixelsPlaneInfoSeq', (), _M_omero.model._t_PlaneInfo)
if not _M_omero.model.__dict__.has_key('_t_PixelsPixelsFileMapsSeq'):
_M_omero.model._t_PixelsPixelsFileMapsSeq = IcePy.defineSequence('::omero::model::PixelsPixelsFileMapsSeq', (), _M_omero.model._t_PixelsOriginalFileMap)
if not _M_omero.model.__dict__.has_key('_t_PixelsLinkedOriginalFileSeq'):
_M_omero.model._t_PixelsLinkedOriginalFileSeq = IcePy.defineSequence('::omero::model::PixelsLinkedOriginalFileSeq', (), _M_omero.model._t_OriginalFile)
if not _M_omero.model.__dict__.has_key('_t_PixelsChannelsSeq'):
_M_omero.model._t_PixelsChannelsSeq = IcePy.defineSequence('::omero::model::PixelsChannelsSeq', (), _M_omero.model._t_Channel)
if not _M_omero.model.__dict__.has_key('_t_PixelsSettingsSeq'):
_M_omero.model._t_PixelsSettingsSeq = IcePy.defineSequence('::omero::model::PixelsSettingsSeq', (), _M_omero.model._t_RenderingDef)
if not _M_omero.model.__dict__.has_key('_t_PixelsThumbnailsSeq'):
_M_omero.model._t_PixelsThumbnailsSeq = IcePy.defineSequence('::omero::model::PixelsThumbnailsSeq', (), _M_omero.model._t_Thumbnail)
if not _M_omero.model.__dict__.has_key('_t_PixelsAnnotationLinksSeq'):
_M_omero.model._t_PixelsAnnotationLinksSeq = IcePy.defineSequence('::omero::model::PixelsAnnotationLinksSeq', (), _M_omero.model._t_PixelsAnnotationLink)
if not _M_omero.model.__dict__.has_key('_t_PixelsLinkedAnnotationSeq'):
_M_omero.model._t_PixelsLinkedAnnotationSeq = IcePy.defineSequence('::omero::model::PixelsLinkedAnnotationSeq', (), _M_omero.model._t_Annotation)
if not _M_omero.model.__dict__.has_key('Pixels'):
_M_omero.model.Pixels = Ice.createTempClass()
class Pixels(_M_omero.model.IObject):
def __init__(self, _id=None, _details=None, _loaded=False, _version=None, _image=None, _relatedTo=None, _pixelsType=None, _sizeX=None, _sizeY=None, _sizeZ=None, _sizeC=None, _sizeT=None, _sha1=None, _dimensionOrder=None, _physicalSizeX=None, _physicalSizeY=None, _physicalSizeZ=None, _waveStart=None, _waveIncrement=None, _timeIncrement=None, _methodology=None, _planeInfoSeq=None, _planeInfoLoaded=False, _pixelsFileMapsSeq=None, _pixelsFileMapsLoaded=False, _pixelsFileMapsCountPerOwner=None, _channelsSeq=None, _channelsLoaded=False, _settingsSeq=None, _settingsLoaded=False, _thumbnailsSeq=None, _thumbnailsLoaded=False, _annotationLinksSeq=None, _annotationLinksLoaded=False, _annotationLinksCountPerOwner=None):
if __builtin__.type(self) == _M_omero.model.Pixels:
raise RuntimeError('omero.model.Pixels is an abstract class')
_M_omero.model.IObject.__init__(self, _id, _details, _loaded)
self._version = _version
self._image = _image
self._relatedTo = _relatedTo
self._pixelsType = _pixelsType
self._sizeX = _sizeX
self._sizeY = _sizeY
self._sizeZ = _sizeZ
self._sizeC = _sizeC
self._sizeT = _sizeT
self._sha1 = _sha1
self._dimensionOrder = _dimensionOrder
self._physicalSizeX = _physicalSizeX
self._physicalSizeY = _physicalSizeY
self._physicalSizeZ = _physicalSizeZ
self._waveStart = _waveStart
self._waveIncrement = _waveIncrement
self._timeIncrement = _timeIncrement
self._methodology = _methodology
self._planeInfoSeq = _planeInfoSeq
self._planeInfoLoaded = _planeInfoLoaded
self._pixelsFileMapsSeq = _pixelsFileMapsSeq
self._pixelsFileMapsLoaded = _pixelsFileMapsLoaded
self._pixelsFileMapsCountPerOwner = _pixelsFileMapsCountPerOwner
self._channelsSeq = _channelsSeq
self._channelsLoaded = _channelsLoaded
self._settingsSeq = _settingsSeq
self._settingsLoaded = _settingsLoaded
self._thumbnailsSeq = _thumbnailsSeq
self._thumbnailsLoaded = _thumbnailsLoaded
self._annotationLinksSeq = _annotationLinksSeq
self._annotationLinksLoaded = _annotationLinksLoaded
self._annotationLinksCountPerOwner = _annotationLinksCountPerOwner
def ice_ids(self, current=None):
return ('::Ice::Object', '::omero::model::IObject', '::omero::model::Pixels')
def ice_id(self, current=None):
return '::omero::model::Pixels'
def ice_staticId():
return '::omero::model::Pixels'
ice_staticId = staticmethod(ice_staticId)
def getVersion(self, current=None):
pass
def setVersion(self, theVersion, current=None):
pass
def getImage(self, current=None):
pass
def setImage(self, theImage, current=None):
pass
def getRelatedTo(self, current=None):
pass
def setRelatedTo(self, theRelatedTo, current=None):
pass
def getPixelsType(self, current=None):
pass
def setPixelsType(self, thePixelsType, current=None):
pass
def getSizeX(self, current=None):
pass
def setSizeX(self, theSizeX, current=None):
pass
def getSizeY(self, current=None):
pass
def setSizeY(self, theSizeY, current=None):
pass
def getSizeZ(self, current=None):
pass
def setSizeZ(self, theSizeZ, current=None):
pass
def getSizeC(self, current=None):
pass
def setSizeC(self, theSizeC, current=None):
pass
def getSizeT(self, current=None):
pass
def setSizeT(self, theSizeT, current=None):
pass
def getSha1(self, current=None):
pass
def setSha1(self, theSha1, current=None):
pass
def getDimensionOrder(self, current=None):
pass
def setDimensionOrder(self, theDimensionOrder, current=None):
pass
def getPhysicalSizeX(self, current=None):
pass
def setPhysicalSizeX(self, thePhysicalSizeX, current=None):
pass
def getPhysicalSizeY(self, current=None):
pass
def setPhysicalSizeY(self, thePhysicalSizeY, current=None):
pass
def getPhysicalSizeZ(self, current=None):
pass
def setPhysicalSizeZ(self, thePhysicalSizeZ, current=None):
pass
def getWaveStart(self, current=None):
pass
def setWaveStart(self, theWaveStart, current=None):
pass
def getWaveIncrement(self, current=None):
pass
def setWaveIncrement(self, theWaveIncrement, current=None):
pass
def getTimeIncrement(self, current=None):
pass
def setTimeIncrement(self, theTimeIncrement, current=None):
pass
def getMethodology(self, current=None):
pass
def setMethodology(self, theMethodology, current=None):
pass
def unloadPlaneInfo(self, current=None):
pass
def sizeOfPlaneInfo(self, current=None):
pass
def copyPlaneInfo(self, current=None):
pass
def addPlaneInfo(self, target, current=None):
pass
def addAllPlaneInfoSet(self, targets, current=None):
pass
def removePlaneInfo(self, theTarget, current=None):
pass
def removeAllPlaneInfoSet(self, targets, current=None):
pass
def clearPlaneInfo(self, current=None):
pass
def reloadPlaneInfo(self, toCopy, current=None):
pass
def unloadPixelsFileMaps(self, current=None):
pass
def sizeOfPixelsFileMaps(self, current=None):
pass
def copyPixelsFileMaps(self, current=None):
pass
def addPixelsOriginalFileMap(self, target, current=None):
pass
def addAllPixelsOriginalFileMapSet(self, targets, current=None):
pass
def removePixelsOriginalFileMap(self, theTarget, current=None):
pass
def removeAllPixelsOriginalFileMapSet(self, targets, current=None):
pass
def clearPixelsFileMaps(self, current=None):
pass
def reloadPixelsFileMaps(self, toCopy, current=None):
pass
def getPixelsFileMapsCountPerOwner(self, current=None):
pass
def linkOriginalFile(self, addition, current=None):
pass
def addPixelsOriginalFileMapToBoth(self, link, bothSides, current=None):
pass
def findPixelsOriginalFileMap(self, removal, current=None):
pass
def unlinkOriginalFile(self, removal, current=None):
pass
def removePixelsOriginalFileMapFromBoth(self, link, bothSides, current=None):
pass
def linkedOriginalFileList(self, current=None):
pass
def unloadChannels(self, current=None):
pass
def sizeOfChannels(self, current=None):
pass
def copyChannels(self, current=None):
pass
def addChannel(self, target, current=None):
pass
def addAllChannelSet(self, targets, current=None):
pass
def removeChannel(self, theTarget, current=None):
pass
def removeAllChannelSet(self, targets, current=None):
pass
def clearChannels(self, current=None):
pass
def reloadChannels(self, toCopy, current=None):
pass
def getChannel(self, index, current=None):
pass
def setChannel(self, index, theElement, current=None):
pass
def getPrimaryChannel(self, current=None):
pass
def setPrimaryChannel(self, theElement, current=None):
pass
def unloadSettings(self, current=None):
pass
def sizeOfSettings(self, current=None):
pass
def copySettings(self, current=None):
pass
def addRenderingDef(self, target, current=None):
pass
def addAllRenderingDefSet(self, targets, current=None):
pass
def removeRenderingDef(self, theTarget, current=None):
pass
def removeAllRenderingDefSet(self, targets, current=None):
pass
def clearSettings(self, current=None):
pass
def reloadSettings(self, toCopy, current=None):
pass
def unloadThumbnails(self, current=None):
pass
def sizeOfThumbnails(self, current=None):
pass
def copyThumbnails(self, current=None):
pass
def addThumbnail(self, target, current=None):
pass
def addAllThumbnailSet(self, targets, current=None):
pass
def removeThumbnail(self, theTarget, current=None):
pass
def removeAllThumbnailSet(self, targets, current=None):
pass
def clearThumbnails(self, current=None):
pass
def reloadThumbnails(self, toCopy, current=None):
pass
def unloadAnnotationLinks(self, current=None):
pass
def sizeOfAnnotationLinks(self, current=None):
pass
def copyAnnotationLinks(self, current=None):
pass
def addPixelsAnnotationLink(self, target, current=None):
pass
def addAllPixelsAnnotationLinkSet(self, targets, current=None):
pass
def removePixelsAnnotationLink(self, theTarget, current=None):
pass
def removeAllPixelsAnnotationLinkSet(self, targets, current=None):
pass
def clearAnnotationLinks(self, current=None):
pass
def reloadAnnotationLinks(self, toCopy, current=None):
pass
def getAnnotationLinksCountPerOwner(self, current=None):
pass
def linkAnnotation(self, addition, current=None):
pass
def addPixelsAnnotationLinkToBoth(self, link, bothSides, current=None):
pass
def findPixelsAnnotationLink(self, removal, current=None):
pass
def unlinkAnnotation(self, removal, current=None):
pass
def removePixelsAnnotationLinkFromBoth(self, link, bothSides, current=None):
pass
def linkedAnnotationList(self, current=None):
pass
def __str__(self):
return IcePy.stringify(self, _M_omero.model._t_Pixels)
__repr__ = __str__
_M_omero.model.PixelsPrx = Ice.createTempClass()
class PixelsPrx(_M_omero.model.IObjectPrx):
def getVersion(self, _ctx=None):
return _M_omero.model.Pixels._op_getVersion.invoke(self, ((), _ctx))
def begin_getVersion(self, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Pixels._op_getVersion.begin(self, ((), _response, _ex, _sent, _ctx))
def end_getVersion(self, _r):
return _M_omero.model.Pixels._op_getVersion.end(self, _r)
def setVersion(self, theVersion, _ctx=None):
return _M_omero.model.Pixels._op_setVersion.invoke(self, ((theVersion, ), _ctx))
def begin_setVersion(self, theVersion, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Pixels._op_setVersion.begin(self, ((theVersion, ), _response, _ex, _sent, _ctx))
def end_setVersion(self, _r):
return _M_omero.model.Pixels._op_setVersion.end(self, _r)
def getImage(self, _ctx=None):
return _M_omero.model.Pixels._op_getImage.invoke(self, ((), _ctx))
def begin_getImage(self, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Pixels._op_getImage.begin(self, ((), _response, _ex, _sent, _ctx))
def end_getImage(self, _r):
return _M_omero.model.Pixels._op_getImage.end(self, _r)
def setImage(self, theImage, _ctx=None):
return _M_omero.model.Pixels._op_setImage.invoke(self, ((theImage, ), _ctx))
def begin_setImage(self, theImage, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Pixels._op_setImage.begin(self, ((theImage, ), _response, _ex, _sent, _ctx))
def end_setImage(self, _r):
return _M_omero.model.Pixels._op_setImage.end(self, _r)
def getRelatedTo(self, _ctx=None):
return _M_omero.model.Pixels._op_getRelatedTo.invoke(self, ((), _ctx))
def begin_getRelatedTo(self, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Pixels._op_getRelatedTo.begin(self, ((), _response, _ex, _sent, _ctx))
def end_getRelatedTo(self, _r):
return _M_omero.model.Pixels._op_getRelatedTo.end(self, _r)
def setRelatedTo(self, theRelatedTo, _ctx=None):
return _M_omero.model.Pixels._op_setRelatedTo.invoke(self, ((theRelatedTo, ), _ctx))
def begin_setRelatedTo(self, theRelatedTo, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Pixels._op_setRelatedTo.begin(self, ((theRelatedTo, ), _response, _ex, _sent, _ctx))
def end_setRelatedTo(self, _r):
return _M_omero.model.Pixels._op_setRelatedTo.end(self, _r)
def getPixelsType(self, _ctx=None):
return _M_omero.model.Pixels._op_getPixelsType.invoke(self, ((), _ctx))
def begin_getPixelsType(self, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Pixels._op_getPixelsType.begin(self, ((), _response, _ex, _sent, _ctx))
def end_getPixelsType(self, _r):
return _M_omero.model.Pixels._op_getPixelsType.end(self, _r)
def setPixelsType(self, thePixelsType, _ctx=None):
return _M_omero.model.Pixels._op_setPixelsType.invoke(self, ((thePixelsType, ), _ctx))
def begin_setPixelsType(self, thePixelsType, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Pixels._op_setPixelsType.begin(self, ((thePixelsType, ), _response, _ex, _sent, _ctx))
def end_setPixelsType(self, _r):
return _M_omero.model.Pixels._op_setPixelsType.end(self, _r)
def getSizeX(self, _ctx=None):
return _M_omero.model.Pixels._op_getSizeX.invoke(self, ((), _ctx))
def begin_getSizeX(self, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Pixels._op_getSizeX.begin(self, ((), _response, _ex, _sent, _ctx))
def end_getSizeX(self, _r):
return _M_omero.model.Pixels._op_getSizeX.end(self, _r)
def setSizeX(self, theSizeX, _ctx=None):
return _M_omero.model.Pixels._op_setSizeX.invoke(self, ((theSizeX, ), _ctx))
def begin_setSizeX(self, theSizeX, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Pixels._op_setSizeX.begin(self, ((theSizeX, ), _response, _ex, _sent, _ctx))
def end_setSizeX(self, _r):
return _M_omero.model.Pixels._op_setSizeX.end(self, _r)
def getSizeY(self, _ctx=None):
return _M_omero.model.Pixels._op_getSizeY.invoke(self, ((), _ctx))
def begin_getSizeY(self, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Pixels._op_getSizeY.begin(self, ((), _response, _ex, _sent, _ctx))
def end_getSizeY(self, _r):
return _M_omero.model.Pixels._op_getSizeY.end(self, _r)
def setSizeY(self, theSizeY, _ctx=None):
return _M_omero.model.Pixels._op_setSizeY.invoke(self, ((theSizeY, ), _ctx))
def begin_setSizeY(self, theSizeY, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Pixels._op_setSizeY.begin(self, ((theSizeY, ), _response, _ex, _sent, _ctx))
def end_setSizeY(self, _r):
return _M_omero.model.Pixels._op_setSizeY.end(self, _r)
def getSizeZ(self, _ctx=None):
return _M_omero.model.Pixels._op_getSizeZ.invoke(self, ((), _ctx))
def begin_getSizeZ(self, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Pixels._op_getSizeZ.begin(self, ((), _response, _ex, _sent, _ctx))
def end_getSizeZ(self, _r):
return _M_omero.model.Pixels._op_getSizeZ.end(self, _r)
def setSizeZ(self, theSizeZ, _ctx=None):
return _M_omero.model.Pixels._op_setSizeZ.invoke(self, ((theSizeZ, ), _ctx))
def begin_setSizeZ(self, theSizeZ, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Pixels._op_setSizeZ.begin(self, ((theSizeZ, ), _response, _ex, _sent, _ctx))
def end_setSizeZ(self, _r):
return _M_omero.model.Pixels._op_setSizeZ.end(self, _r)
def getSizeC(self, _ctx=None):
return _M_omero.model.Pixels._op_getSizeC.invoke(self, ((), _ctx))
def begin_getSizeC(self, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Pixels._op_getSizeC.begin(self, ((), _response, _ex, _sent, _ctx))
def end_getSizeC(self, _r):
return _M_omero.model.Pixels._op_getSizeC.end(self, _r)
def setSizeC(self, theSizeC, _ctx=None):
return _M_omero.model.Pixels._op_setSizeC.invoke(self, ((theSizeC, ), _ctx))
def begin_setSizeC(self, theSizeC, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Pixels._op_setSizeC.begin(self, ((theSizeC, ), _response, _ex, _sent, _ctx))
def end_setSizeC(self, _r):
return _M_omero.model.Pixels._op_setSizeC.end(self, _r)
def getSizeT(self, _ctx=None):
return _M_omero.model.Pixels._op_getSizeT.invoke(self, ((), _ctx))
def begin_getSizeT(self, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Pixels._op_getSizeT.begin(self, ((), _response, _ex, _sent, _ctx))
def end_getSizeT(self, _r):
return _M_omero.model.Pixels._op_getSizeT.end(self, _r)
def setSizeT(self, theSizeT, _ctx=None):
return _M_omero.model.Pixels._op_setSizeT.invoke(self, ((theSizeT, ), _ctx))
def begin_setSizeT(self, theSizeT, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Pixels._op_setSizeT.begin(self, ((theSizeT, ), _response, _ex, _sent, _ctx))
def end_setSizeT(self, _r):
return _M_omero.model.Pixels._op_setSizeT.end(self, _r)
def getSha1(self, _ctx=None):
return _M_omero.model.Pixels._op_getSha1.invoke(self, ((), _ctx))
def begin_getSha1(self, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Pixels._op_getSha1.begin(self, ((), _response, _ex, _sent, _ctx))
def end_getSha1(self, _r):
return _M_omero.model.Pixels._op_getSha1.end(self, _r)
def setSha1(self, theSha1, _ctx=None):
return _M_omero.model.Pixels._op_setSha1.invoke(self, ((theSha1, ), _ctx))
def begin_setSha1(self, theSha1, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Pixels._op_setSha1.begin(self, ((theSha1, ), _response, _ex, _sent, _ctx))
def end_setSha1(self, _r):
return _M_omero.model.Pixels._op_setSha1.end(self, _r)
def getDimensionOrder(self, _ctx=None):
return _M_omero.model.Pixels._op_getDimensionOrder.invoke(self, ((), _ctx))
def begin_getDimensionOrder(self, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Pixels._op_getDimensionOrder.begin(self, ((), _response, _ex, _sent, _ctx))
def end_getDimensionOrder(self, _r):
return _M_omero.model.Pixels._op_getDimensionOrder.end(self, _r)
def setDimensionOrder(self, theDimensionOrder, _ctx=None):
return _M_omero.model.Pixels._op_setDimensionOrder.invoke(self, ((theDimensionOrder, ), _ctx))
def begin_setDimensionOrder(self, theDimensionOrder, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Pixels._op_setDimensionOrder.begin(self, ((theDimensionOrder, ), _response, _ex, _sent, _ctx))
def end_setDimensionOrder(self, _r):
return _M_omero.model.Pixels._op_setDimensionOrder.end(self, _r)
def getPhysicalSizeX(self, _ctx=None):
return _M_omero.model.Pixels._op_getPhysicalSizeX.invoke(self, ((), _ctx))
def begin_getPhysicalSizeX(self, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Pixels._op_getPhysicalSizeX.begin(self, ((), _response, _ex, _sent, _ctx))
def end_getPhysicalSizeX(self, _r):
return _M_omero.model.Pixels._op_getPhysicalSizeX.end(self, _r)
def setPhysicalSizeX(self, thePhysicalSizeX, _ctx=None):
return _M_omero.model.Pixels._op_setPhysicalSizeX.invoke(self, ((thePhysicalSizeX, ), _ctx))
def begin_setPhysicalSizeX(self, thePhysicalSizeX, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Pixels._op_setPhysicalSizeX.begin(self, ((thePhysicalSizeX, ), _response, _ex, _sent, _ctx))
def end_setPhysicalSizeX(self, _r):
return _M_omero.model.Pixels._op_setPhysicalSizeX.end(self, _r)
def getPhysicalSizeY(self, _ctx=None):
return _M_omero.model.Pixels._op_getPhysicalSizeY.invoke(self, ((), _ctx))
def begin_getPhysicalSizeY(self, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Pixels._op_getPhysicalSizeY.begin(self, ((), _response, _ex, _sent, _ctx))
def end_getPhysicalSizeY(self, _r):
return _M_omero.model.Pixels._op_getPhysicalSizeY.end(self, _r)
def setPhysicalSizeY(self, thePhysicalSizeY, _ctx=None):
return _M_omero.model.Pixels._op_setPhysicalSizeY.invoke(self, ((thePhysicalSizeY, ), _ctx))
def begin_setPhysicalSizeY(self, thePhysicalSizeY, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Pixels._op_setPhysicalSizeY.begin(self, ((thePhysicalSizeY, ), _response, _ex, _sent, _ctx))
def end_setPhysicalSizeY(self, _r):
return _M_omero.model.Pixels._op_setPhysicalSizeY.end(self, _r)
def getPhysicalSizeZ(self, _ctx=None):
return _M_omero.model.Pixels._op_getPhysicalSizeZ.invoke(self, ((), _ctx))
def begin_getPhysicalSizeZ(self, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Pixels._op_getPhysicalSizeZ.begin(self, ((), _response, _ex, _sent, _ctx))
def end_getPhysicalSizeZ(self, _r):
return _M_omero.model.Pixels._op_getPhysicalSizeZ.end(self, _r)
def setPhysicalSizeZ(self, thePhysicalSizeZ, _ctx=None):
return _M_omero.model.Pixels._op_setPhysicalSizeZ.invoke(self, ((thePhysicalSizeZ, ), _ctx))
def begin_setPhysicalSizeZ(self, thePhysicalSizeZ, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Pixels._op_setPhysicalSizeZ.begin(self, ((thePhysicalSizeZ, ), _response, _ex, _sent, _ctx))
def end_setPhysicalSizeZ(self, _r):
return _M_omero.model.Pixels._op_setPhysicalSizeZ.end(self, _r)
def getWaveStart(self, _ctx=None):
return _M_omero.model.Pixels._op_getWaveStart.invoke(self, ((), _ctx))
def begin_getWaveStart(self, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Pixels._op_getWaveStart.begin(self, ((), _response, _ex, _sent, _ctx))
def end_getWaveStart(self, _r):
return _M_omero.model.Pixels._op_getWaveStart.end(self, _r)
def setWaveStart(self, theWaveStart, _ctx=None):
return _M_omero.model.Pixels._op_setWaveStart.invoke(self, ((theWaveStart, ), _ctx))
def begin_setWaveStart(self, theWaveStart, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Pixels._op_setWaveStart.begin(self, ((theWaveStart, ), _response, _ex, _sent, _ctx))
def end_setWaveStart(self, _r):
return _M_omero.model.Pixels._op_setWaveStart.end(self, _r)
def getWaveIncrement(self, _ctx=None):
return _M_omero.model.Pixels._op_getWaveIncrement.invoke(self, ((), _ctx))
def begin_getWaveIncrement(self, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Pixels._op_getWaveIncrement.begin(self, ((), _response, _ex, _sent, _ctx))
def end_getWaveIncrement(self, _r):
return _M_omero.model.Pixels._op_getWaveIncrement.end(self, _r)
def setWaveIncrement(self, theWaveIncrement, _ctx=None):
return _M_omero.model.Pixels._op_setWaveIncrement.invoke(self, ((theWaveIncrement, ), _ctx))
def begin_setWaveIncrement(self, theWaveIncrement, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Pixels._op_setWaveIncrement.begin(self, ((theWaveIncrement, ), _response, _ex, _sent, _ctx))
def end_setWaveIncrement(self, _r):
return _M_omero.model.Pixels._op_setWaveIncrement.end(self, _r)
def getTimeIncrement(self, _ctx=None):
return _M_omero.model.Pixels._op_getTimeIncrement.invoke(self, ((), _ctx))
def begin_getTimeIncrement(self, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Pixels._op_getTimeIncrement.begin(self, ((), _response, _ex, _sent, _ctx))
def end_getTimeIncrement(self, _r):
return _M_omero.model.Pixels._op_getTimeIncrement.end(self, _r)
def setTimeIncrement(self, theTimeIncrement, _ctx=None):
return _M_omero.model.Pixels._op_setTimeIncrement.invoke(self, ((theTimeIncrement, ), _ctx))
def begin_setTimeIncrement(self, theTimeIncrement, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Pixels._op_setTimeIncrement.begin(self, ((theTimeIncrement, ), _response, _ex, _sent, _ctx))
def end_setTimeIncrement(self, _r):
return _M_omero.model.Pixels._op_setTimeIncrement.end(self, _r)
def getMethodology(self, _ctx=None):
return _M_omero.model.Pixels._op_getMethodology.invoke(self, ((), _ctx))
def begin_getMethodology(self, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Pixels._op_getMethodology.begin(self, ((), _response, _ex, _sent, _ctx))
def end_getMethodology(self, _r):
return _M_omero.model.Pixels._op_getMethodology.end(self, _r)
def setMethodology(self, theMethodology, _ctx=None):
return _M_omero.model.Pixels._op_setMethodology.invoke(self, ((theMethodology, ), _ctx))
def begin_setMethodology(self, theMethodology, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Pixels._op_setMethodology.begin(self, ((theMethodology, ), _response, _ex, _sent, _ctx))
def end_setMethodology(self, _r):
return _M_omero.model.Pixels._op_setMethodology.end(self, _r)
def unloadPlaneInfo(self, _ctx=None):
return _M_omero.model.Pixels._op_unloadPlaneInfo.invoke(self, ((), _ctx))
def begin_unloadPlaneInfo(self, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Pixels._op_unloadPlaneInfo.begin(self, ((), _response, _ex, _sent, _ctx))
def end_unloadPlaneInfo(self, _r):
return _M_omero.model.Pixels._op_unloadPlaneInfo.end(self, _r)
def sizeOfPlaneInfo(self, _ctx=None):
return _M_omero.model.Pixels._op_sizeOfPlaneInfo.invoke(self, ((), _ctx))
def begin_sizeOfPlaneInfo(self, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Pixels._op_sizeOfPlaneInfo.begin(self, ((), _response, _ex, _sent, _ctx))
def end_sizeOfPlaneInfo(self, _r):
return _M_omero.model.Pixels._op_sizeOfPlaneInfo.end(self, _r)
def copyPlaneInfo(self, _ctx=None):
return _M_omero.model.Pixels._op_copyPlaneInfo.invoke(self, ((), _ctx))
def begin_copyPlaneInfo(self, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Pixels._op_copyPlaneInfo.begin(self, ((), _response, _ex, _sent, _ctx))
def end_copyPlaneInfo(self, _r):
return _M_omero.model.Pixels._op_copyPlaneInfo.end(self, _r)
def addPlaneInfo(self, target, _ctx=None):
return _M_omero.model.Pixels._op_addPlaneInfo.invoke(self, ((target, ), _ctx))
def begin_addPlaneInfo(self, target, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Pixels._op_addPlaneInfo.begin(self, ((target, ), _response, _ex, _sent, _ctx))
def end_addPlaneInfo(self, _r):
return _M_omero.model.Pixels._op_addPlaneInfo.end(self, _r)
def addAllPlaneInfoSet(self, targets, _ctx=None):
return _M_omero.model.Pixels._op_addAllPlaneInfoSet.invoke(self, ((targets, ), _ctx))
def begin_addAllPlaneInfoSet(self, targets, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Pixels._op_addAllPlaneInfoSet.begin(self, ((targets, ), _response, _ex, _sent, _ctx))
def end_addAllPlaneInfoSet(self, _r):
return _M_omero.model.Pixels._op_addAllPlaneInfoSet.end(self, _r)
def removePlaneInfo(self, theTarget, _ctx=None):
return _M_omero.model.Pixels._op_removePlaneInfo.invoke(self, ((theTarget, ), _ctx))
def begin_removePlaneInfo(self, theTarget, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Pixels._op_removePlaneInfo.begin(self, ((theTarget, ), _response, _ex, _sent, _ctx))
def end_removePlaneInfo(self, _r):
return _M_omero.model.Pixels._op_removePlaneInfo.end(self, _r)
def removeAllPlaneInfoSet(self, targets, _ctx=None):
return _M_omero.model.Pixels._op_removeAllPlaneInfoSet.invoke(self, ((targets, ), _ctx))
def begin_removeAllPlaneInfoSet(self, targets, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Pixels._op_removeAllPlaneInfoSet.begin(self, ((targets, ), _response, _ex, _sent, _ctx))
def end_removeAllPlaneInfoSet(self, _r):
return _M_omero.model.Pixels._op_removeAllPlaneInfoSet.end(self, _r)
def clearPlaneInfo(self, _ctx=None):
return _M_omero.model.Pixels._op_clearPlaneInfo.invoke(self, ((), _ctx))
def begin_clearPlaneInfo(self, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Pixels._op_clearPlaneInfo.begin(self, ((), _response, _ex, _sent, _ctx))
def end_clearPlaneInfo(self, _r):
return _M_omero.model.Pixels._op_clearPlaneInfo.end(self, _r)
def reloadPlaneInfo(self, toCopy, _ctx=None):
return _M_omero.model.Pixels._op_reloadPlaneInfo.invoke(self, ((toCopy, ), _ctx))
def begin_reloadPlaneInfo(self, toCopy, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Pixels._op_reloadPlaneInfo.begin(self, ((toCopy, ), _response, _ex, _sent, _ctx))
def end_reloadPlaneInfo(self, _r):
return _M_omero.model.Pixels._op_reloadPlaneInfo.end(self, _r)
def unloadPixelsFileMaps(self, _ctx=None):
return _M_omero.model.Pixels._op_unloadPixelsFileMaps.invoke(self, ((), _ctx))
def begin_unloadPixelsFileMaps(self, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Pixels._op_unloadPixelsFileMaps.begin(self, ((), _response, _ex, _sent, _ctx))
def end_unloadPixelsFileMaps(self, _r):
return _M_omero.model.Pixels._op_unloadPixelsFileMaps.end(self, _r)
def sizeOfPixelsFileMaps(self, _ctx=None):
return _M_omero.model.Pixels._op_sizeOfPixelsFileMaps.invoke(self, ((), _ctx))
def begin_sizeOfPixelsFileMaps(self, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Pixels._op_sizeOfPixelsFileMaps.begin(self, ((), _response, _ex, _sent, _ctx))
def end_sizeOfPixelsFileMaps(self, _r):
return _M_omero.model.Pixels._op_sizeOfPixelsFileMaps.end(self, _r)
def copyPixelsFileMaps(self, _ctx=None):
return _M_omero.model.Pixels._op_copyPixelsFileMaps.invoke(self, ((), _ctx))
def begin_copyPixelsFileMaps(self, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Pixels._op_copyPixelsFileMaps.begin(self, ((), _response, _ex, _sent, _ctx))
def end_copyPixelsFileMaps(self, _r):
return _M_omero.model.Pixels._op_copyPixelsFileMaps.end(self, _r)
def addPixelsOriginalFileMap(self, target, _ctx=None):
return _M_omero.model.Pixels._op_addPixelsOriginalFileMap.invoke(self, ((target, ), _ctx))
def begin_addPixelsOriginalFileMap(self, target, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Pixels._op_addPixelsOriginalFileMap.begin(self, ((target, ), _response, _ex, _sent, _ctx))
def end_addPixelsOriginalFileMap(self, _r):
return _M_omero.model.Pixels._op_addPixelsOriginalFileMap.end(self, _r)
def addAllPixelsOriginalFileMapSet(self, targets, _ctx=None):
return _M_omero.model.Pixels._op_addAllPixelsOriginalFileMapSet.invoke(self, ((targets, ), _ctx))
def begin_addAllPixelsOriginalFileMapSet(self, targets, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Pixels._op_addAllPixelsOriginalFileMapSet.begin(self, ((targets, ), _response, _ex, _sent, _ctx))
def end_addAllPixelsOriginalFileMapSet(self, _r):
return _M_omero.model.Pixels._op_addAllPixelsOriginalFileMapSet.end(self, _r)
def removePixelsOriginalFileMap(self, theTarget, _ctx=None):
return _M_omero.model.Pixels._op_removePixelsOriginalFileMap.invoke(self, ((theTarget, ), _ctx))
def begin_removePixelsOriginalFileMap(self, theTarget, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Pixels._op_removePixelsOriginalFileMap.begin(self, ((theTarget, ), _response, _ex, _sent, _ctx))
def end_removePixelsOriginalFileMap(self, _r):
return _M_omero.model.Pixels._op_removePixelsOriginalFileMap.end(self, _r)
def removeAllPixelsOriginalFileMapSet(self, targets, _ctx=None):
return _M_omero.model.Pixels._op_removeAllPixelsOriginalFileMapSet.invoke(self, ((targets, ), _ctx))
def begin_removeAllPixelsOriginalFileMapSet(self, targets, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Pixels._op_removeAllPixelsOriginalFileMapSet.begin(self, ((targets, ), _response, _ex, _sent, _ctx))
def end_removeAllPixelsOriginalFileMapSet(self, _r):
return _M_omero.model.Pixels._op_removeAllPixelsOriginalFileMapSet.end(self, _r)
def clearPixelsFileMaps(self, _ctx=None):
return _M_omero.model.Pixels._op_clearPixelsFileMaps.invoke(self, ((), _ctx))
def begin_clearPixelsFileMaps(self, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Pixels._op_clearPixelsFileMaps.begin(self, ((), _response, _ex, _sent, _ctx))
def end_clearPixelsFileMaps(self, _r):
return _M_omero.model.Pixels._op_clearPixelsFileMaps.end(self, _r)
def reloadPixelsFileMaps(self, toCopy, _ctx=None):
return _M_omero.model.Pixels._op_reloadPixelsFileMaps.invoke(self, ((toCopy, ), _ctx))
def begin_reloadPixelsFileMaps(self, toCopy, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Pixels._op_reloadPixelsFileMaps.begin(self, ((toCopy, ), _response, _ex, _sent, _ctx))
def end_reloadPixelsFileMaps(self, _r):
return _M_omero.model.Pixels._op_reloadPixelsFileMaps.end(self, _r)
def getPixelsFileMapsCountPerOwner(self, _ctx=None):
return _M_omero.model.Pixels._op_getPixelsFileMapsCountPerOwner.invoke(self, ((), _ctx))
def begin_getPixelsFileMapsCountPerOwner(self, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Pixels._op_getPixelsFileMapsCountPerOwner.begin(self, ((), _response, _ex, _sent, _ctx))
def end_getPixelsFileMapsCountPerOwner(self, _r):
return _M_omero.model.Pixels._op_getPixelsFileMapsCountPerOwner.end(self, _r)
def linkOriginalFile(self, addition, _ctx=None):
return _M_omero.model.Pixels._op_linkOriginalFile.invoke(self, ((addition, ), _ctx))
def begin_linkOriginalFile(self, addition, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Pixels._op_linkOriginalFile.begin(self, ((addition, ), _response, _ex, _sent, _ctx))
def end_linkOriginalFile(self, _r):
return _M_omero.model.Pixels._op_linkOriginalFile.end(self, _r)
def addPixelsOriginalFileMapToBoth(self, link, bothSides, _ctx=None):
return _M_omero.model.Pixels._op_addPixelsOriginalFileMapToBoth.invoke(self, ((link, bothSides), _ctx))
def begin_addPixelsOriginalFileMapToBoth(self, link, bothSides, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Pixels._op_addPixelsOriginalFileMapToBoth.begin(self, ((link, bothSides), _response, _ex, _sent, _ctx))
def end_addPixelsOriginalFileMapToBoth(self, _r):
return _M_omero.model.Pixels._op_addPixelsOriginalFileMapToBoth.end(self, _r)
def findPixelsOriginalFileMap(self, removal, _ctx=None):
return _M_omero.model.Pixels._op_findPixelsOriginalFileMap.invoke(self, ((removal, ), _ctx))
def begin_findPixelsOriginalFileMap(self, removal, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Pixels._op_findPixelsOriginalFileMap.begin(self, ((removal, ), _response, _ex, _sent, _ctx))
def end_findPixelsOriginalFileMap(self, _r):
return _M_omero.model.Pixels._op_findPixelsOriginalFileMap.end(self, _r)
def unlinkOriginalFile(self, removal, _ctx=None):
return _M_omero.model.Pixels._op_unlinkOriginalFile.invoke(self, ((removal, ), _ctx))
def begin_unlinkOriginalFile(self, removal, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Pixels._op_unlinkOriginalFile.begin(self, ((removal, ), _response, _ex, _sent, _ctx))
def end_unlinkOriginalFile(self, _r):
return _M_omero.model.Pixels._op_unlinkOriginalFile.end(self, _r)
def removePixelsOriginalFileMapFromBoth(self, link, bothSides, _ctx=None):
return _M_omero.model.Pixels._op_removePixelsOriginalFileMapFromBoth.invoke(self, ((link, bothSides), _ctx))
def begin_removePixelsOriginalFileMapFromBoth(self, link, bothSides, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Pixels._op_removePixelsOriginalFileMapFromBoth.begin(self, ((link, bothSides), _response, _ex, _sent, _ctx))
def end_removePixelsOriginalFileMapFromBoth(self, _r):
return _M_omero.model.Pixels._op_removePixelsOriginalFileMapFromBoth.end(self, _r)
def linkedOriginalFileList(self, _ctx=None):
return _M_omero.model.Pixels._op_linkedOriginalFileList.invoke(self, ((), _ctx))
def begin_linkedOriginalFileList(self, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Pixels._op_linkedOriginalFileList.begin(self, ((), _response, _ex, _sent, _ctx))
def end_linkedOriginalFileList(self, _r):
return _M_omero.model.Pixels._op_linkedOriginalFileList.end(self, _r)
def unloadChannels(self, _ctx=None):
return _M_omero.model.Pixels._op_unloadChannels.invoke(self, ((), _ctx))
def begin_unloadChannels(self, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Pixels._op_unloadChannels.begin(self, ((), _response, _ex, _sent, _ctx))
def end_unloadChannels(self, _r):
return _M_omero.model.Pixels._op_unloadChannels.end(self, _r)
def sizeOfChannels(self, _ctx=None):
return _M_omero.model.Pixels._op_sizeOfChannels.invoke(self, ((), _ctx))
def begin_sizeOfChannels(self, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Pixels._op_sizeOfChannels.begin(self, ((), _response, _ex, _sent, _ctx))
def end_sizeOfChannels(self, _r):
return _M_omero.model.Pixels._op_sizeOfChannels.end(self, _r)
def copyChannels(self, _ctx=None):
return _M_omero.model.Pixels._op_copyChannels.invoke(self, ((), _ctx))
def begin_copyChannels(self, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Pixels._op_copyChannels.begin(self, ((), _response, _ex, _sent, _ctx))
def end_copyChannels(self, _r):
return _M_omero.model.Pixels._op_copyChannels.end(self, _r)
def addChannel(self, target, _ctx=None):
return _M_omero.model.Pixels._op_addChannel.invoke(self, ((target, ), _ctx))
def begin_addChannel(self, target, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Pixels._op_addChannel.begin(self, ((target, ), _response, _ex, _sent, _ctx))
def end_addChannel(self, _r):
return _M_omero.model.Pixels._op_addChannel.end(self, _r)
def addAllChannelSet(self, targets, _ctx=None):
return _M_omero.model.Pixels._op_addAllChannelSet.invoke(self, ((targets, ), _ctx))
def begin_addAllChannelSet(self, targets, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Pixels._op_addAllChannelSet.begin(self, ((targets, ), _response, _ex, _sent, _ctx))
def end_addAllChannelSet(self, _r):
return _M_omero.model.Pixels._op_addAllChannelSet.end(self, _r)
def removeChannel(self, theTarget, _ctx=None):
return _M_omero.model.Pixels._op_removeChannel.invoke(self, ((theTarget, ), _ctx))
def begin_removeChannel(self, theTarget, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Pixels._op_removeChannel.begin(self, ((theTarget, ), _response, _ex, _sent, _ctx))
def end_removeChannel(self, _r):
return _M_omero.model.Pixels._op_removeChannel.end(self, _r)
def removeAllChannelSet(self, targets, _ctx=None):
return _M_omero.model.Pixels._op_removeAllChannelSet.invoke(self, ((targets, ), _ctx))
def begin_removeAllChannelSet(self, targets, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Pixels._op_removeAllChannelSet.begin(self, ((targets, ), _response, _ex, _sent, _ctx))
def end_removeAllChannelSet(self, _r):
return _M_omero.model.Pixels._op_removeAllChannelSet.end(self, _r)
def clearChannels(self, _ctx=None):
return _M_omero.model.Pixels._op_clearChannels.invoke(self, ((), _ctx))
def begin_clearChannels(self, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Pixels._op_clearChannels.begin(self, ((), _response, _ex, _sent, _ctx))
def end_clearChannels(self, _r):
return _M_omero.model.Pixels._op_clearChannels.end(self, _r)
def reloadChannels(self, toCopy, _ctx=None):
return _M_omero.model.Pixels._op_reloadChannels.invoke(self, ((toCopy, ), _ctx))
def begin_reloadChannels(self, toCopy, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Pixels._op_reloadChannels.begin(self, ((toCopy, ), _response, _ex, _sent, _ctx))
def end_reloadChannels(self, _r):
return _M_omero.model.Pixels._op_reloadChannels.end(self, _r)
def getChannel(self, index, _ctx=None):
return _M_omero.model.Pixels._op_getChannel.invoke(self, ((index, ), _ctx))
def begin_getChannel(self, index, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Pixels._op_getChannel.begin(self, ((index, ), _response, _ex, _sent, _ctx))
def end_getChannel(self, _r):
return _M_omero.model.Pixels._op_getChannel.end(self, _r)
def setChannel(self, index, theElement, _ctx=None):
return _M_omero.model.Pixels._op_setChannel.invoke(self, ((index, theElement), _ctx))
def begin_setChannel(self, index, theElement, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Pixels._op_setChannel.begin(self, ((index, theElement), _response, _ex, _sent, _ctx))
def end_setChannel(self, _r):
return _M_omero.model.Pixels._op_setChannel.end(self, _r)
def getPrimaryChannel(self, _ctx=None):
return _M_omero.model.Pixels._op_getPrimaryChannel.invoke(self, ((), _ctx))
def begin_getPrimaryChannel(self, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Pixels._op_getPrimaryChannel.begin(self, ((), _response, _ex, _sent, _ctx))
def end_getPrimaryChannel(self, _r):
return _M_omero.model.Pixels._op_getPrimaryChannel.end(self, _r)
def setPrimaryChannel(self, theElement, _ctx=None):
return _M_omero.model.Pixels._op_setPrimaryChannel.invoke(self, ((theElement, ), _ctx))
def begin_setPrimaryChannel(self, theElement, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Pixels._op_setPrimaryChannel.begin(self, ((theElement, ), _response, _ex, _sent, _ctx))
def end_setPrimaryChannel(self, _r):
return _M_omero.model.Pixels._op_setPrimaryChannel.end(self, _r)
def unloadSettings(self, _ctx=None):
return _M_omero.model.Pixels._op_unloadSettings.invoke(self, ((), _ctx))
def begin_unloadSettings(self, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Pixels._op_unloadSettings.begin(self, ((), _response, _ex, _sent, _ctx))
def end_unloadSettings(self, _r):
return _M_omero.model.Pixels._op_unloadSettings.end(self, _r)
def sizeOfSettings(self, _ctx=None):
return _M_omero.model.Pixels._op_sizeOfSettings.invoke(self, ((), _ctx))
def begin_sizeOfSettings(self, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Pixels._op_sizeOfSettings.begin(self, ((), _response, _ex, _sent, _ctx))
def end_sizeOfSettings(self, _r):
return _M_omero.model.Pixels._op_sizeOfSettings.end(self, _r)
def copySettings(self, _ctx=None):
return _M_omero.model.Pixels._op_copySettings.invoke(self, ((), _ctx))
def begin_copySettings(self, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Pixels._op_copySettings.begin(self, ((), _response, _ex, _sent, _ctx))
def end_copySettings(self, _r):
return _M_omero.model.Pixels._op_copySettings.end(self, _r)
def addRenderingDef(self, target, _ctx=None):
return _M_omero.model.Pixels._op_addRenderingDef.invoke(self, ((target, ), _ctx))
def begin_addRenderingDef(self, target, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Pixels._op_addRenderingDef.begin(self, ((target, ), _response, _ex, _sent, _ctx))
def end_addRenderingDef(self, _r):
return _M_omero.model.Pixels._op_addRenderingDef.end(self, _r)
def addAllRenderingDefSet(self, targets, _ctx=None):
return _M_omero.model.Pixels._op_addAllRenderingDefSet.invoke(self, ((targets, ), _ctx))
def begin_addAllRenderingDefSet(self, targets, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Pixels._op_addAllRenderingDefSet.begin(self, ((targets, ), _response, _ex, _sent, _ctx))
def end_addAllRenderingDefSet(self, _r):
return _M_omero.model.Pixels._op_addAllRenderingDefSet.end(self, _r)
def removeRenderingDef(self, theTarget, _ctx=None):
return _M_omero.model.Pixels._op_removeRenderingDef.invoke(self, ((theTarget, ), _ctx))
def begin_removeRenderingDef(self, theTarget, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Pixels._op_removeRenderingDef.begin(self, ((theTarget, ), _response, _ex, _sent, _ctx))
def end_removeRenderingDef(self, _r):
return _M_omero.model.Pixels._op_removeRenderingDef.end(self, _r)
def removeAllRenderingDefSet(self, targets, _ctx=None):
return _M_omero.model.Pixels._op_removeAllRenderingDefSet.invoke(self, ((targets, ), _ctx))
def begin_removeAllRenderingDefSet(self, targets, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Pixels._op_removeAllRenderingDefSet.begin(self, ((targets, ), _response, _ex, _sent, _ctx))
def end_removeAllRenderingDefSet(self, _r):
return _M_omero.model.Pixels._op_removeAllRenderingDefSet.end(self, _r)
def clearSettings(self, _ctx=None):
return _M_omero.model.Pixels._op_clearSettings.invoke(self, ((), _ctx))
def begin_clearSettings(self, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Pixels._op_clearSettings.begin(self, ((), _response, _ex, _sent, _ctx))
def end_clearSettings(self, _r):
return _M_omero.model.Pixels._op_clearSettings.end(self, _r)
def reloadSettings(self, toCopy, _ctx=None):
return _M_omero.model.Pixels._op_reloadSettings.invoke(self, ((toCopy, ), _ctx))
def begin_reloadSettings(self, toCopy, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Pixels._op_reloadSettings.begin(self, ((toCopy, ), _response, _ex, _sent, _ctx))
def end_reloadSettings(self, _r):
return _M_omero.model.Pixels._op_reloadSettings.end(self, _r)
def unloadThumbnails(self, _ctx=None):
return _M_omero.model.Pixels._op_unloadThumbnails.invoke(self, ((), _ctx))
def begin_unloadThumbnails(self, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Pixels._op_unloadThumbnails.begin(self, ((), _response, _ex, _sent, _ctx))
def end_unloadThumbnails(self, _r):
return _M_omero.model.Pixels._op_unloadThumbnails.end(self, _r)
def sizeOfThumbnails(self, _ctx=None):
return _M_omero.model.Pixels._op_sizeOfThumbnails.invoke(self, ((), _ctx))
def begin_sizeOfThumbnails(self, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Pixels._op_sizeOfThumbnails.begin(self, ((), _response, _ex, _sent, _ctx))
def end_sizeOfThumbnails(self, _r):
return _M_omero.model.Pixels._op_sizeOfThumbnails.end(self, _r)
def copyThumbnails(self, _ctx=None):
return _M_omero.model.Pixels._op_copyThumbnails.invoke(self, ((), _ctx))
def begin_copyThumbnails(self, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Pixels._op_copyThumbnails.begin(self, ((), _response, _ex, _sent, _ctx))
def end_copyThumbnails(self, _r):
return _M_omero.model.Pixels._op_copyThumbnails.end(self, _r)
def addThumbnail(self, target, _ctx=None):
return _M_omero.model.Pixels._op_addThumbnail.invoke(self, ((target, ), _ctx))
def begin_addThumbnail(self, target, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Pixels._op_addThumbnail.begin(self, ((target, ), _response, _ex, _sent, _ctx))
def end_addThumbnail(self, _r):
return _M_omero.model.Pixels._op_addThumbnail.end(self, _r)
def addAllThumbnailSet(self, targets, _ctx=None):
return _M_omero.model.Pixels._op_addAllThumbnailSet.invoke(self, ((targets, ), _ctx))
def begin_addAllThumbnailSet(self, targets, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Pixels._op_addAllThumbnailSet.begin(self, ((targets, ), _response, _ex, _sent, _ctx))
def end_addAllThumbnailSet(self, _r):
return _M_omero.model.Pixels._op_addAllThumbnailSet.end(self, _r)
def removeThumbnail(self, theTarget, _ctx=None):
return _M_omero.model.Pixels._op_removeThumbnail.invoke(self, ((theTarget, ), _ctx))
def begin_removeThumbnail(self, theTarget, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Pixels._op_removeThumbnail.begin(self, ((theTarget, ), _response, _ex, _sent, _ctx))
def end_removeThumbnail(self, _r):
return _M_omero.model.Pixels._op_removeThumbnail.end(self, _r)
def removeAllThumbnailSet(self, targets, _ctx=None):
return _M_omero.model.Pixels._op_removeAllThumbnailSet.invoke(self, ((targets, ), _ctx))
def begin_removeAllThumbnailSet(self, targets, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Pixels._op_removeAllThumbnailSet.begin(self, ((targets, ), _response, _ex, _sent, _ctx))
def end_removeAllThumbnailSet(self, _r):
return _M_omero.model.Pixels._op_removeAllThumbnailSet.end(self, _r)
def clearThumbnails(self, _ctx=None):
return _M_omero.model.Pixels._op_clearThumbnails.invoke(self, ((), _ctx))
def begin_clearThumbnails(self, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Pixels._op_clearThumbnails.begin(self, ((), _response, _ex, _sent, _ctx))
def end_clearThumbnails(self, _r):
return _M_omero.model.Pixels._op_clearThumbnails.end(self, _r)
def reloadThumbnails(self, toCopy, _ctx=None):
return _M_omero.model.Pixels._op_reloadThumbnails.invoke(self, ((toCopy, ), _ctx))
def begin_reloadThumbnails(self, toCopy, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Pixels._op_reloadThumbnails.begin(self, ((toCopy, ), _response, _ex, _sent, _ctx))
def end_reloadThumbnails(self, _r):
return _M_omero.model.Pixels._op_reloadThumbnails.end(self, _r)
def unloadAnnotationLinks(self, _ctx=None):
return _M_omero.model.Pixels._op_unloadAnnotationLinks.invoke(self, ((), _ctx))
def begin_unloadAnnotationLinks(self, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Pixels._op_unloadAnnotationLinks.begin(self, ((), _response, _ex, _sent, _ctx))
def end_unloadAnnotationLinks(self, _r):
return _M_omero.model.Pixels._op_unloadAnnotationLinks.end(self, _r)
def sizeOfAnnotationLinks(self, _ctx=None):
return _M_omero.model.Pixels._op_sizeOfAnnotationLinks.invoke(self, ((), _ctx))
def begin_sizeOfAnnotationLinks(self, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Pixels._op_sizeOfAnnotationLinks.begin(self, ((), _response, _ex, _sent, _ctx))
def end_sizeOfAnnotationLinks(self, _r):
return _M_omero.model.Pixels._op_sizeOfAnnotationLinks.end(self, _r)
def copyAnnotationLinks(self, _ctx=None):
return _M_omero.model.Pixels._op_copyAnnotationLinks.invoke(self, ((), _ctx))
def begin_copyAnnotationLinks(self, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Pixels._op_copyAnnotationLinks.begin(self, ((), _response, _ex, _sent, _ctx))
def end_copyAnnotationLinks(self, _r):
return _M_omero.model.Pixels._op_copyAnnotationLinks.end(self, _r)
def addPixelsAnnotationLink(self, target, _ctx=None):
return _M_omero.model.Pixels._op_addPixelsAnnotationLink.invoke(self, ((target, ), _ctx))
def begin_addPixelsAnnotationLink(self, target, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Pixels._op_addPixelsAnnotationLink.begin(self, ((target, ), _response, _ex, _sent, _ctx))
def end_addPixelsAnnotationLink(self, _r):
return _M_omero.model.Pixels._op_addPixelsAnnotationLink.end(self, _r)
def addAllPixelsAnnotationLinkSet(self, targets, _ctx=None):
return _M_omero.model.Pixels._op_addAllPixelsAnnotationLinkSet.invoke(self, ((targets, ), _ctx))
def begin_addAllPixelsAnnotationLinkSet(self, targets, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Pixels._op_addAllPixelsAnnotationLinkSet.begin(self, ((targets, ), _response, _ex, _sent, _ctx))
def end_addAllPixelsAnnotationLinkSet(self, _r):
return _M_omero.model.Pixels._op_addAllPixelsAnnotationLinkSet.end(self, _r)
def removePixelsAnnotationLink(self, theTarget, _ctx=None):
return _M_omero.model.Pixels._op_removePixelsAnnotationLink.invoke(self, ((theTarget, ), _ctx))
def begin_removePixelsAnnotationLink(self, theTarget, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Pixels._op_removePixelsAnnotationLink.begin(self, ((theTarget, ), _response, _ex, _sent, _ctx))
def end_removePixelsAnnotationLink(self, _r):
return _M_omero.model.Pixels._op_removePixelsAnnotationLink.end(self, _r)
def removeAllPixelsAnnotationLinkSet(self, targets, _ctx=None):
return _M_omero.model.Pixels._op_removeAllPixelsAnnotationLinkSet.invoke(self, ((targets, ), _ctx))
def begin_removeAllPixelsAnnotationLinkSet(self, targets, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Pixels._op_removeAllPixelsAnnotationLinkSet.begin(self, ((targets, ), _response, _ex, _sent, _ctx))
def end_removeAllPixelsAnnotationLinkSet(self, _r):
return _M_omero.model.Pixels._op_removeAllPixelsAnnotationLinkSet.end(self, _r)
def clearAnnotationLinks(self, _ctx=None):
return _M_omero.model.Pixels._op_clearAnnotationLinks.invoke(self, ((), _ctx))
def begin_clearAnnotationLinks(self, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Pixels._op_clearAnnotationLinks.begin(self, ((), _response, _ex, _sent, _ctx))
def end_clearAnnotationLinks(self, _r):
return _M_omero.model.Pixels._op_clearAnnotationLinks.end(self, _r)
def reloadAnnotationLinks(self, toCopy, _ctx=None):
return _M_omero.model.Pixels._op_reloadAnnotationLinks.invoke(self, ((toCopy, ), _ctx))
def begin_reloadAnnotationLinks(self, toCopy, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Pixels._op_reloadAnnotationLinks.begin(self, ((toCopy, ), _response, _ex, _sent, _ctx))
def end_reloadAnnotationLinks(self, _r):
return _M_omero.model.Pixels._op_reloadAnnotationLinks.end(self, _r)
def getAnnotationLinksCountPerOwner(self, _ctx=None):
return _M_omero.model.Pixels._op_getAnnotationLinksCountPerOwner.invoke(self, ((), _ctx))
def begin_getAnnotationLinksCountPerOwner(self, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Pixels._op_getAnnotationLinksCountPerOwner.begin(self, ((), _response, _ex, _sent, _ctx))
def end_getAnnotationLinksCountPerOwner(self, _r):
return _M_omero.model.Pixels._op_getAnnotationLinksCountPerOwner.end(self, _r)
def linkAnnotation(self, addition, _ctx=None):
return _M_omero.model.Pixels._op_linkAnnotation.invoke(self, ((addition, ), _ctx))
def begin_linkAnnotation(self, addition, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Pixels._op_linkAnnotation.begin(self, ((addition, ), _response, _ex, _sent, _ctx))
def end_linkAnnotation(self, _r):
return _M_omero.model.Pixels._op_linkAnnotation.end(self, _r)
def addPixelsAnnotationLinkToBoth(self, link, bothSides, _ctx=None):
return _M_omero.model.Pixels._op_addPixelsAnnotationLinkToBoth.invoke(self, ((link, bothSides), _ctx))
def begin_addPixelsAnnotationLinkToBoth(self, link, bothSides, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Pixels._op_addPixelsAnnotationLinkToBoth.begin(self, ((link, bothSides), _response, _ex, _sent, _ctx))
def end_addPixelsAnnotationLinkToBoth(self, _r):
return _M_omero.model.Pixels._op_addPixelsAnnotationLinkToBoth.end(self, _r)
def findPixelsAnnotationLink(self, removal, _ctx=None):
return _M_omero.model.Pixels._op_findPixelsAnnotationLink.invoke(self, ((removal, ), _ctx))
def begin_findPixelsAnnotationLink(self, removal, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Pixels._op_findPixelsAnnotationLink.begin(self, ((removal, ), _response, _ex, _sent, _ctx))
def end_findPixelsAnnotationLink(self, _r):
return _M_omero.model.Pixels._op_findPixelsAnnotationLink.end(self, _r)
def unlinkAnnotation(self, removal, _ctx=None):
return _M_omero.model.Pixels._op_unlinkAnnotation.invoke(self, ((removal, ), _ctx))
def begin_unlinkAnnotation(self, removal, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Pixels._op_unlinkAnnotation.begin(self, ((removal, ), _response, _ex, _sent, _ctx))
def end_unlinkAnnotation(self, _r):
return _M_omero.model.Pixels._op_unlinkAnnotation.end(self, _r)
def removePixelsAnnotationLinkFromBoth(self, link, bothSides, _ctx=None):
return _M_omero.model.Pixels._op_removePixelsAnnotationLinkFromBoth.invoke(self, ((link, bothSides), _ctx))
def begin_removePixelsAnnotationLinkFromBoth(self, link, bothSides, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Pixels._op_removePixelsAnnotationLinkFromBoth.begin(self, ((link, bothSides), _response, _ex, _sent, _ctx))
def end_removePixelsAnnotationLinkFromBoth(self, _r):
return _M_omero.model.Pixels._op_removePixelsAnnotationLinkFromBoth.end(self, _r)
def linkedAnnotationList(self, _ctx=None):
return _M_omero.model.Pixels._op_linkedAnnotationList.invoke(self, ((), _ctx))
def begin_linkedAnnotationList(self, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Pixels._op_linkedAnnotationList.begin(self, ((), _response, _ex, _sent, _ctx))
def end_linkedAnnotationList(self, _r):
return _M_omero.model.Pixels._op_linkedAnnotationList.end(self, _r)
def checkedCast(proxy, facetOrCtx=None, _ctx=None):
return _M_omero.model.PixelsPrx.ice_checkedCast(proxy, '::omero::model::Pixels', facetOrCtx, _ctx)
checkedCast = staticmethod(checkedCast)
def uncheckedCast(proxy, facet=None):
return _M_omero.model.PixelsPrx.ice_uncheckedCast(proxy, facet)
uncheckedCast = staticmethod(uncheckedCast)
_M_omero.model._t_PixelsPrx = IcePy.defineProxy('::omero::model::Pixels', PixelsPrx)
_M_omero.model._t_Pixels = IcePy.defineClass('::omero::model::Pixels', Pixels, (), True, _M_omero.model._t_IObject, (), (
('_version', (), _M_omero._t_RInt),
('_image', (), _M_omero.model._t_Image),
('_relatedTo', (), _M_omero.model._t_Pixels),
('_pixelsType', (), _M_omero.model._t_PixelsType),
('_sizeX', (), _M_omero._t_RInt),
('_sizeY', (), _M_omero._t_RInt),
('_sizeZ', (), _M_omero._t_RInt),
('_sizeC', (), _M_omero._t_RInt),
('_sizeT', (), _M_omero._t_RInt),
('_sha1', (), _M_omero._t_RString),
('_dimensionOrder', (), _M_omero.model._t_DimensionOrder),
('_physicalSizeX', (), _M_omero._t_RDouble),
('_physicalSizeY', (), _M_omero._t_RDouble),
('_physicalSizeZ', (), _M_omero._t_RDouble),
('_waveStart', (), _M_omero._t_RInt),
('_waveIncrement', (), _M_omero._t_RInt),
('_timeIncrement', (), _M_omero._t_RDouble),
('_methodology', (), _M_omero._t_RString),
('_planeInfoSeq', (), _M_omero.model._t_PixelsPlaneInfoSeq),
('_planeInfoLoaded', (), IcePy._t_bool),
('_pixelsFileMapsSeq', (), _M_omero.model._t_PixelsPixelsFileMapsSeq),
('_pixelsFileMapsLoaded', (), IcePy._t_bool),
('_pixelsFileMapsCountPerOwner', (), _M_omero.sys._t_CountMap),
('_channelsSeq', (), _M_omero.model._t_PixelsChannelsSeq),
('_channelsLoaded', (), IcePy._t_bool),
('_settingsSeq', (), _M_omero.model._t_PixelsSettingsSeq),
('_settingsLoaded', (), IcePy._t_bool),
('_thumbnailsSeq', (), _M_omero.model._t_PixelsThumbnailsSeq),
('_thumbnailsLoaded', (), IcePy._t_bool),
('_annotationLinksSeq', (), _M_omero.model._t_PixelsAnnotationLinksSeq),
('_annotationLinksLoaded', (), IcePy._t_bool),
('_annotationLinksCountPerOwner', (), _M_omero.sys._t_CountMap)
))
Pixels._ice_type = _M_omero.model._t_Pixels
Pixels._op_getVersion = IcePy.Operation('getVersion', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (), (), _M_omero._t_RInt, ())
Pixels._op_setVersion = IcePy.Operation('setVersion', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (((), _M_omero._t_RInt),), (), None, ())
Pixels._op_getImage = IcePy.Operation('getImage', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (), (), _M_omero.model._t_Image, ())
Pixels._op_setImage = IcePy.Operation('setImage', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (((), _M_omero.model._t_Image),), (), None, ())
Pixels._op_getRelatedTo = IcePy.Operation('getRelatedTo', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (), (), _M_omero.model._t_Pixels, ())
Pixels._op_setRelatedTo = IcePy.Operation('setRelatedTo', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (((), _M_omero.model._t_Pixels),), (), None, ())
Pixels._op_getPixelsType = IcePy.Operation('getPixelsType', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (), (), _M_omero.model._t_PixelsType, ())
Pixels._op_setPixelsType = IcePy.Operation('setPixelsType', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (((), _M_omero.model._t_PixelsType),), (), None, ())
Pixels._op_getSizeX = IcePy.Operation('getSizeX', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (), (), _M_omero._t_RInt, ())
Pixels._op_setSizeX = IcePy.Operation('setSizeX', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (((), _M_omero._t_RInt),), (), None, ())
Pixels._op_getSizeY = IcePy.Operation('getSizeY', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (), (), _M_omero._t_RInt, ())
Pixels._op_setSizeY = IcePy.Operation('setSizeY', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (((), _M_omero._t_RInt),), (), None, ())
Pixels._op_getSizeZ = IcePy.Operation('getSizeZ', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (), (), _M_omero._t_RInt, ())
Pixels._op_setSizeZ = IcePy.Operation('setSizeZ', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (((), _M_omero._t_RInt),), (), None, ())
Pixels._op_getSizeC = IcePy.Operation('getSizeC', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (), (), _M_omero._t_RInt, ())
Pixels._op_setSizeC = IcePy.Operation('setSizeC', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (((), _M_omero._t_RInt),), (), None, ())
Pixels._op_getSizeT = IcePy.Operation('getSizeT', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (), (), _M_omero._t_RInt, ())
Pixels._op_setSizeT = IcePy.Operation('setSizeT', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (((), _M_omero._t_RInt),), (), None, ())
Pixels._op_getSha1 = IcePy.Operation('getSha1', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (), (), _M_omero._t_RString, ())
Pixels._op_setSha1 = IcePy.Operation('setSha1', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (((), _M_omero._t_RString),), (), None, ())
Pixels._op_getDimensionOrder = IcePy.Operation('getDimensionOrder', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (), (), _M_omero.model._t_DimensionOrder, ())
Pixels._op_setDimensionOrder = IcePy.Operation('setDimensionOrder', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (((), _M_omero.model._t_DimensionOrder),), (), None, ())
Pixels._op_getPhysicalSizeX = IcePy.Operation('getPhysicalSizeX', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (), (), _M_omero._t_RDouble, ())
Pixels._op_setPhysicalSizeX = IcePy.Operation('setPhysicalSizeX', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (((), _M_omero._t_RDouble),), (), None, ())
Pixels._op_getPhysicalSizeY = IcePy.Operation('getPhysicalSizeY', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (), (), _M_omero._t_RDouble, ())
Pixels._op_setPhysicalSizeY = IcePy.Operation('setPhysicalSizeY', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (((), _M_omero._t_RDouble),), (), None, ())
Pixels._op_getPhysicalSizeZ = IcePy.Operation('getPhysicalSizeZ', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (), (), _M_omero._t_RDouble, ())
Pixels._op_setPhysicalSizeZ = IcePy.Operation('setPhysicalSizeZ', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (((), _M_omero._t_RDouble),), (), None, ())
Pixels._op_getWaveStart = IcePy.Operation('getWaveStart', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (), (), _M_omero._t_RInt, ())
Pixels._op_setWaveStart = IcePy.Operation('setWaveStart', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (((), _M_omero._t_RInt),), (), None, ())
Pixels._op_getWaveIncrement = IcePy.Operation('getWaveIncrement', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (), (), _M_omero._t_RInt, ())
Pixels._op_setWaveIncrement = IcePy.Operation('setWaveIncrement', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (((), _M_omero._t_RInt),), (), None, ())
Pixels._op_getTimeIncrement = IcePy.Operation('getTimeIncrement', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (), (), _M_omero._t_RDouble, ())
Pixels._op_setTimeIncrement = IcePy.Operation('setTimeIncrement', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (((), _M_omero._t_RDouble),), (), None, ())
Pixels._op_getMethodology = IcePy.Operation('getMethodology', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (), (), _M_omero._t_RString, ())
Pixels._op_setMethodology = IcePy.Operation('setMethodology', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (((), _M_omero._t_RString),), (), None, ())
Pixels._op_unloadPlaneInfo = IcePy.Operation('unloadPlaneInfo', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (), (), None, ())
Pixels._op_sizeOfPlaneInfo = IcePy.Operation('sizeOfPlaneInfo', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (), (), IcePy._t_int, ())
Pixels._op_copyPlaneInfo = IcePy.Operation('copyPlaneInfo', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (), (), _M_omero.model._t_PixelsPlaneInfoSeq, ())
Pixels._op_addPlaneInfo = IcePy.Operation('addPlaneInfo', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (((), _M_omero.model._t_PlaneInfo),), (), None, ())
Pixels._op_addAllPlaneInfoSet = IcePy.Operation('addAllPlaneInfoSet', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (((), _M_omero.model._t_PixelsPlaneInfoSeq),), (), None, ())
Pixels._op_removePlaneInfo = IcePy.Operation('removePlaneInfo', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (((), _M_omero.model._t_PlaneInfo),), (), None, ())
Pixels._op_removeAllPlaneInfoSet = IcePy.Operation('removeAllPlaneInfoSet', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (((), _M_omero.model._t_PixelsPlaneInfoSeq),), (), None, ())
Pixels._op_clearPlaneInfo = IcePy.Operation('clearPlaneInfo', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (), (), None, ())
Pixels._op_reloadPlaneInfo = IcePy.Operation('reloadPlaneInfo', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (((), _M_omero.model._t_Pixels),), (), None, ())
Pixels._op_unloadPixelsFileMaps = IcePy.Operation('unloadPixelsFileMaps', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (), (), None, ())
Pixels._op_sizeOfPixelsFileMaps = IcePy.Operation('sizeOfPixelsFileMaps', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (), (), IcePy._t_int, ())
Pixels._op_copyPixelsFileMaps = IcePy.Operation('copyPixelsFileMaps', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (), (), _M_omero.model._t_PixelsPixelsFileMapsSeq, ())
Pixels._op_addPixelsOriginalFileMap = IcePy.Operation('addPixelsOriginalFileMap', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (((), _M_omero.model._t_PixelsOriginalFileMap),), (), None, ())
Pixels._op_addAllPixelsOriginalFileMapSet = IcePy.Operation('addAllPixelsOriginalFileMapSet', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (((), _M_omero.model._t_PixelsPixelsFileMapsSeq),), (), None, ())
Pixels._op_removePixelsOriginalFileMap = IcePy.Operation('removePixelsOriginalFileMap', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (((), _M_omero.model._t_PixelsOriginalFileMap),), (), None, ())
Pixels._op_removeAllPixelsOriginalFileMapSet = IcePy.Operation('removeAllPixelsOriginalFileMapSet', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (((), _M_omero.model._t_PixelsPixelsFileMapsSeq),), (), None, ())
Pixels._op_clearPixelsFileMaps = IcePy.Operation('clearPixelsFileMaps', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (), (), None, ())
Pixels._op_reloadPixelsFileMaps = IcePy.Operation('reloadPixelsFileMaps', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (((), _M_omero.model._t_Pixels),), (), None, ())
Pixels._op_getPixelsFileMapsCountPerOwner = IcePy.Operation('getPixelsFileMapsCountPerOwner', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (), (), _M_omero.sys._t_CountMap, ())
Pixels._op_linkOriginalFile = IcePy.Operation('linkOriginalFile', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (((), _M_omero.model._t_OriginalFile),), (), _M_omero.model._t_PixelsOriginalFileMap, ())
Pixels._op_addPixelsOriginalFileMapToBoth = IcePy.Operation('addPixelsOriginalFileMapToBoth', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (((), _M_omero.model._t_PixelsOriginalFileMap), ((), IcePy._t_bool)), (), None, ())
Pixels._op_findPixelsOriginalFileMap = IcePy.Operation('findPixelsOriginalFileMap', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (((), _M_omero.model._t_OriginalFile),), (), _M_omero.model._t_PixelsPixelsFileMapsSeq, ())
Pixels._op_unlinkOriginalFile = IcePy.Operation('unlinkOriginalFile', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (((), _M_omero.model._t_OriginalFile),), (), None, ())
Pixels._op_removePixelsOriginalFileMapFromBoth = IcePy.Operation('removePixelsOriginalFileMapFromBoth', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (((), _M_omero.model._t_PixelsOriginalFileMap), ((), IcePy._t_bool)), (), None, ())
Pixels._op_linkedOriginalFileList = IcePy.Operation('linkedOriginalFileList', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (), (), _M_omero.model._t_PixelsLinkedOriginalFileSeq, ())
Pixels._op_unloadChannels = IcePy.Operation('unloadChannels', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (), (), None, ())
Pixels._op_sizeOfChannels = IcePy.Operation('sizeOfChannels', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (), (), IcePy._t_int, ())
Pixels._op_copyChannels = IcePy.Operation('copyChannels', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (), (), _M_omero.model._t_PixelsChannelsSeq, ())
Pixels._op_addChannel = IcePy.Operation('addChannel', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (((), _M_omero.model._t_Channel),), (), None, ())
Pixels._op_addAllChannelSet = IcePy.Operation('addAllChannelSet', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (((), _M_omero.model._t_PixelsChannelsSeq),), (), None, ())
Pixels._op_removeChannel = IcePy.Operation('removeChannel', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (((), _M_omero.model._t_Channel),), (), None, ())
Pixels._op_removeAllChannelSet = IcePy.Operation('removeAllChannelSet', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (((), _M_omero.model._t_PixelsChannelsSeq),), (), None, ())
Pixels._op_clearChannels = IcePy.Operation('clearChannels', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (), (), None, ())
Pixels._op_reloadChannels = IcePy.Operation('reloadChannels', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (((), _M_omero.model._t_Pixels),), (), None, ())
Pixels._op_getChannel = IcePy.Operation('getChannel', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (((), IcePy._t_int),), (), _M_omero.model._t_Channel, ())
Pixels._op_setChannel = IcePy.Operation('setChannel', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (((), IcePy._t_int), ((), _M_omero.model._t_Channel)), (), _M_omero.model._t_Channel, ())
Pixels._op_getPrimaryChannel = IcePy.Operation('getPrimaryChannel', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (), (), _M_omero.model._t_Channel, ())
Pixels._op_setPrimaryChannel = IcePy.Operation('setPrimaryChannel', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (((), _M_omero.model._t_Channel),), (), _M_omero.model._t_Channel, ())
Pixels._op_unloadSettings = IcePy.Operation('unloadSettings', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (), (), None, ())
Pixels._op_sizeOfSettings = IcePy.Operation('sizeOfSettings', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (), (), IcePy._t_int, ())
Pixels._op_copySettings = IcePy.Operation('copySettings', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (), (), _M_omero.model._t_PixelsSettingsSeq, ())
Pixels._op_addRenderingDef = IcePy.Operation('addRenderingDef', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (((), _M_omero.model._t_RenderingDef),), (), None, ())
Pixels._op_addAllRenderingDefSet = IcePy.Operation('addAllRenderingDefSet', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (((), _M_omero.model._t_PixelsSettingsSeq),), (), None, ())
Pixels._op_removeRenderingDef = IcePy.Operation('removeRenderingDef', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (((), _M_omero.model._t_RenderingDef),), (), None, ())
Pixels._op_removeAllRenderingDefSet = IcePy.Operation('removeAllRenderingDefSet', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (((), _M_omero.model._t_PixelsSettingsSeq),), (), None, ())
Pixels._op_clearSettings = IcePy.Operation('clearSettings', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (), (), None, ())
Pixels._op_reloadSettings = IcePy.Operation('reloadSettings', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (((), _M_omero.model._t_Pixels),), (), None, ())
Pixels._op_unloadThumbnails = IcePy.Operation('unloadThumbnails', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (), (), None, ())
Pixels._op_sizeOfThumbnails = IcePy.Operation('sizeOfThumbnails', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (), (), IcePy._t_int, ())
Pixels._op_copyThumbnails = IcePy.Operation('copyThumbnails', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (), (), _M_omero.model._t_PixelsThumbnailsSeq, ())
Pixels._op_addThumbnail = IcePy.Operation('addThumbnail', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (((), _M_omero.model._t_Thumbnail),), (), None, ())
Pixels._op_addAllThumbnailSet = IcePy.Operation('addAllThumbnailSet', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (((), _M_omero.model._t_PixelsThumbnailsSeq),), (), None, ())
Pixels._op_removeThumbnail = IcePy.Operation('removeThumbnail', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (((), _M_omero.model._t_Thumbnail),), (), None, ())
Pixels._op_removeAllThumbnailSet = IcePy.Operation('removeAllThumbnailSet', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (((), _M_omero.model._t_PixelsThumbnailsSeq),), (), None, ())
Pixels._op_clearThumbnails = IcePy.Operation('clearThumbnails', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (), (), None, ())
Pixels._op_reloadThumbnails = IcePy.Operation('reloadThumbnails', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (((), _M_omero.model._t_Pixels),), (), None, ())
Pixels._op_unloadAnnotationLinks = IcePy.Operation('unloadAnnotationLinks', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (), (), None, ())
Pixels._op_sizeOfAnnotationLinks = IcePy.Operation('sizeOfAnnotationLinks', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (), (), IcePy._t_int, ())
Pixels._op_copyAnnotationLinks = IcePy.Operation('copyAnnotationLinks', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (), (), _M_omero.model._t_PixelsAnnotationLinksSeq, ())
Pixels._op_addPixelsAnnotationLink = IcePy.Operation('addPixelsAnnotationLink', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (((), _M_omero.model._t_PixelsAnnotationLink),), (), None, ())
Pixels._op_addAllPixelsAnnotationLinkSet = IcePy.Operation('addAllPixelsAnnotationLinkSet', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (((), _M_omero.model._t_PixelsAnnotationLinksSeq),), (), None, ())
Pixels._op_removePixelsAnnotationLink = IcePy.Operation('removePixelsAnnotationLink', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (((), _M_omero.model._t_PixelsAnnotationLink),), (), None, ())
Pixels._op_removeAllPixelsAnnotationLinkSet = IcePy.Operation('removeAllPixelsAnnotationLinkSet', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (((), _M_omero.model._t_PixelsAnnotationLinksSeq),), (), None, ())
Pixels._op_clearAnnotationLinks = IcePy.Operation('clearAnnotationLinks', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (), (), None, ())
Pixels._op_reloadAnnotationLinks = IcePy.Operation('reloadAnnotationLinks', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (((), _M_omero.model._t_Pixels),), (), None, ())
Pixels._op_getAnnotationLinksCountPerOwner = IcePy.Operation('getAnnotationLinksCountPerOwner', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (), (), _M_omero.sys._t_CountMap, ())
Pixels._op_linkAnnotation = IcePy.Operation('linkAnnotation', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (((), _M_omero.model._t_Annotation),), (), _M_omero.model._t_PixelsAnnotationLink, ())
Pixels._op_addPixelsAnnotationLinkToBoth = IcePy.Operation('addPixelsAnnotationLinkToBoth', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (((), _M_omero.model._t_PixelsAnnotationLink), ((), IcePy._t_bool)), (), None, ())
Pixels._op_findPixelsAnnotationLink = IcePy.Operation('findPixelsAnnotationLink', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (((), _M_omero.model._t_Annotation),), (), _M_omero.model._t_PixelsAnnotationLinksSeq, ())
Pixels._op_unlinkAnnotation = IcePy.Operation('unlinkAnnotation', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (((), _M_omero.model._t_Annotation),), (), None, ())
Pixels._op_removePixelsAnnotationLinkFromBoth = IcePy.Operation('removePixelsAnnotationLinkFromBoth', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (((), _M_omero.model._t_PixelsAnnotationLink), ((), IcePy._t_bool)), (), None, ())
Pixels._op_linkedAnnotationList = IcePy.Operation('linkedAnnotationList', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (), (), _M_omero.model._t_PixelsLinkedAnnotationSeq, ())
_M_omero.model.Pixels = Pixels
del Pixels
_M_omero.model.PixelsPrx = PixelsPrx
del PixelsPrx
# End of module omero.model
__name__ = 'omero'
# End of module omero
| [
"gmauro@crs4.it"
] | gmauro@crs4.it |
0022e5af4b34bac0101c46ed56fdda12471bfbc2 | aba8e48577dba352eaebcfa6743bdf2e7e2de315 | /setup.py | 5c4e38b2e95bf9ce3d2ca27df97fa4822ae4d364 | [
"MIT"
] | permissive | tiagocoutinho/xkcd | 2ab019bcd4d5ac10ca638f268b0ed1d223ff47d0 | 33adc0bc1c15ae40e16a27575a710906e351e6d2 | refs/heads/master | 2021-01-22T21:41:10.590103 | 2017-03-19T11:09:51 | 2017-03-19T11:09:51 | 85,462,691 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 967 | py | # -*- coding: utf-8 -*-
#
# This file is part of the xkcd-get project
#
# Copyright (c) 2017 Tiago Coutinho
# Distributed under the MIT license. See LICENSE for more info.
import os
import sys
from setuptools import setup
requirements = [
'grequests',
'bs4',
]
setup(
name='xkcd-get',
version='0.0.3',
description="downloader of xkcd comics",
author="Tiago Coutinho",
author_email='coutinhotiago@gmail.com',
url='https://github.com/tiagocoutinho/xkcd',
py_modules=['xkcd_get'],
entry_points={
'console_scripts': [
'xkcd-get=xkcd_get:main'
]
},
install_requires=requirements,
zip_safe=False,
keywords='xkcd',
classifiers=[
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
)
| [
"coutinhotiago@gmail.com"
] | coutinhotiago@gmail.com |
49372f57347b9b25f4a13f2f90ba51d3e00fb64d | 428b2789f055f35a3d7221dfdd35ef2a74262f76 | /백준_문제집/BFS/적록색약.py | 3c82e8fa43361ccdf165a1ba80a21b6b44552229 | [] | no_license | sinsomi/Coding-Test | eb9fcf9c9ef2b427287a8f9ea27320bf6616e49a | 881974b533dc8d1ba44e8734346e38a3e668fda8 | refs/heads/master | 2022-12-10T04:56:50.280532 | 2020-09-14T02:37:55 | 2020-09-14T02:37:55 | 287,198,959 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,515 | py | import sys
n=int(input())
matrix=[list([p for p in sys.stdin.readline().strip()]) for _ in range(n)]
visit=[[0]*n for _ in range(n)]
dx=[1,0,-1,0]
dy=[0,-1,0,1]
def bfs(x,y,text):
queue=[]
queue.append([x,y])
while queue:
x,y=queue.pop(0)
for i in range(4):
nx,ny=x+dx[i],y+dy[i]
if nx<0 or nx>=n or ny<0 or ny>=n:
continue
if matrix[nx][ny]==text and visit[nx][ny]==0:
visit[nx][ny]=1
queue.append([nx,ny])
def bfs2(x,y,text,text2):
queue=[]
queue.append([x,y])
while queue:
x,y=queue.pop(0)
for i in range(4):
nx,ny=x+dx[i],y+dy[i]
if nx<0 or nx>=n or ny<0 or ny>=n:
continue
if (matrix[nx][ny]==text or matrix[nx][ny]==text2) and visit[nx][ny]==0:
visit[nx][ny]=1
queue.append([nx,ny])
r_cnt,g_cnt,b_cnt,rg_cnt=0,0,0,0
for i in range(n):
for j in range(n):
if matrix[i][j]=="R" and visit[i][j]==0:
bfs(i,j,'R')
r_cnt+=1
if matrix[i][j]=="G" and visit[i][j]==0:
bfs(i,j,'G')
g_cnt+=1
if matrix[i][j]=="B" and visit[i][j]==0:
bfs(i,j,'B')
b_cnt+=1
visit=[[0]*n for _ in range(n)]
for i in range(n):
for j in range(n):
if (matrix[i][j]=="R" or matrix[i][j]=="G") and visit[i][j]==0:
bfs2(i,j,'R','G')
rg_cnt+=1
print(r_cnt+b_cnt+g_cnt,rg_cnt+b_cnt) | [
"cindy960602@naver.com"
] | cindy960602@naver.com |
f0edcf01a50a21c0908381b011ca7b52dd8c04fb | 9e5eca27222871dd04e42c9106bb2fba07e598ff | /src/osxification/core_foundation/cf_number_type.py | f0cd9f2a858be41f18bf1d41f034984fe5d59fe2 | [] | no_license | jepebe/osxification | b2a68dec07cd0be3b7ebd519bd99d0bbd51e61c7 | c9a539f4dbeda9200e32a2eea2c955dd94e6f45e | refs/heads/master | 2016-09-03T06:35:41.659315 | 2015-05-19T18:00:23 | 2015-05-19T18:00:23 | 35,567,134 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,432 | py | from osxification.c import Enum
class CFNumberType(Enum):
kCFNumberSInt8Type = None
kCFNumberSInt16Type = None
kCFNumberSInt32Type = None
kCFNumberSInt64Type = None
kCFNumberFloat32Type = None
kCFNumberFloat64Type = None
kCFNumberCharType = None
kCFNumberShortType = None
kCFNumberIntType = None
kCFNumberLongType = None
kCFNumberLongLongType = None
kCFNumberFloatType = None
kCFNumberDoubleType = None
kCFNumberCFIndexType = None
kCFNumberNSIntegerType = None
kCFNumberCGFloatType = None
kCFNumberMaxType = None
CFNumberType.addEnum("kCFNumberSInt8Type", 1)
CFNumberType.addEnum("kCFNumberSInt16Type", 2)
CFNumberType.addEnum("kCFNumberSInt32Type", 3)
CFNumberType.addEnum("kCFNumberSInt64Type", 4)
CFNumberType.addEnum("kCFNumberFloat32Type", 5)
CFNumberType.addEnum("kCFNumberFloat64Type", 6)
CFNumberType.addEnum("kCFNumberCharType", 7)
CFNumberType.addEnum("kCFNumberShortType", 8)
CFNumberType.addEnum("kCFNumberIntType", 9)
CFNumberType.addEnum("kCFNumberLongType", 10)
CFNumberType.addEnum("kCFNumberLongLongType", 11)
CFNumberType.addEnum("kCFNumberFloatType", 12)
CFNumberType.addEnum("kCFNumberDoubleType", 13)
CFNumberType.addEnum("kCFNumberCFIndexType", 14)
CFNumberType.addEnum("kCFNumberNSIntegerType", 15)
CFNumberType.addEnum("kCFNumberCGFloatType", 16)
CFNumberType.addEnum("kCFNumberMaxType", 16)
CFNumberType.registerEnum("CFNumberType")
| [
"jepebe@users.noreply.github.com"
] | jepebe@users.noreply.github.com |
47d464054b8f55b003a9117767310c2e27b0ee56 | 9b64f0f04707a3a18968fd8f8a3ace718cd597bc | /huaweicloud-sdk-drs/huaweicloudsdkdrs/v3/model/data_transformation_resp.py | 57c96c1b9a290c243df869933fe1713af14d0dc7 | [
"Apache-2.0"
] | permissive | jaminGH/huaweicloud-sdk-python-v3 | eeecb3fb0f3396a475995df36d17095038615fba | 83ee0e4543c6b74eb0898079c3d8dd1c52c3e16b | refs/heads/master | 2023-06-18T11:49:13.958677 | 2021-07-16T07:57:47 | 2021-07-16T07:57:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,705 | py | # coding: utf-8
import re
import six
class DataTransformationResp:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'id': 'str',
'status': 'str',
'error_code': 'str',
'error_msg': 'str'
}
attribute_map = {
'id': 'id',
'status': 'status',
'error_code': 'error_code',
'error_msg': 'error_msg'
}
def __init__(self, id=None, status=None, error_code=None, error_msg=None):
"""DataTransformationResp - a model defined in huaweicloud sdk"""
self._id = None
self._status = None
self._error_code = None
self._error_msg = None
self.discriminator = None
if id is not None:
self.id = id
if status is not None:
self.status = status
if error_code is not None:
self.error_code = error_code
if error_msg is not None:
self.error_msg = error_msg
@property
def id(self):
"""Gets the id of this DataTransformationResp.
任务id
:return: The id of this DataTransformationResp.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this DataTransformationResp.
任务id
:param id: The id of this DataTransformationResp.
:type: str
"""
self._id = id
@property
def status(self):
"""Gets the status of this DataTransformationResp.
状态
:return: The status of this DataTransformationResp.
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this DataTransformationResp.
状态
:param status: The status of this DataTransformationResp.
:type: str
"""
self._status = status
@property
def error_code(self):
"""Gets the error_code of this DataTransformationResp.
错误码
:return: The error_code of this DataTransformationResp.
:rtype: str
"""
return self._error_code
@error_code.setter
def error_code(self, error_code):
"""Sets the error_code of this DataTransformationResp.
错误码
:param error_code: The error_code of this DataTransformationResp.
:type: str
"""
self._error_code = error_code
@property
def error_msg(self):
"""Gets the error_msg of this DataTransformationResp.
错误信息
:return: The error_msg of this DataTransformationResp.
:rtype: str
"""
return self._error_msg
@error_msg.setter
def error_msg(self, error_msg):
"""Sets the error_msg of this DataTransformationResp.
错误信息
:param error_msg: The error_msg of this DataTransformationResp.
:type: str
"""
self._error_msg = error_msg
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
import simplejson as json
return json.dumps(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DataTransformationResp):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
c671c9bc14faeca0c67ea4b50d76925245201787 | 1d7eec692553afc411ec1e7325634f71a2aed291 | /backend/git_real/helpers.py | bf3c422e05de22a0a2d2461619aa0a66fe26c830 | [] | no_license | Andy-Nkumane/Tilde | a41a2a65b3901b92263ae94d527de403f59a5caf | 80de97edaf99f4831ca8cb989b93e3be5e09fdd6 | refs/heads/develop | 2023-05-09T10:02:41.240517 | 2021-05-28T09:20:51 | 2021-05-28T09:20:51 | 299,501,586 | 0 | 0 | null | 2020-10-25T22:37:30 | 2020-09-29T04:10:48 | Python | UTF-8 | Python | false | false | 6,310 | py | import base64
from social_auth.github_api import Api
from git_real import models
from git_real.constants import GITHUB_DATETIME_FORMAT, GITHUB_DEFAULT_TIMEZONE
from timezone_helpers import timestamp_str_to_tz_aware_datetime
from django.http import Http404
def strp_github_standard_time(timestamp: str):
if timestamp:
return timestamp_str_to_tz_aware_datetime(
timestamp=timestamp,
dt_format=GITHUB_DATETIME_FORMAT,
zone_name=GITHUB_DEFAULT_TIMEZONE,
)
return None
def upload_readme(api, repo_full_name, readme_text):
readme_path = f"repos/{repo_full_name}/contents/README.md"
try:
response = api.request(readme_path, json=False)
except Http404:
# it doesn't exist. Create it
response = api.put(
readme_path,
{
"message": "Added README.md",
"content": base64.b64encode(readme_text.encode("utf-8")).decode(
"utf-8"
),
},
json=False,
)
assert str(response.status_code).startswith("2"), f"{response}\n {response.json()}"
def create_org_repo(api, repo_full_name, private=True, exists_ok=False, **post_kwargs):
(org, repo) = repo_full_name.split("/")
args = {
"name": repo,
"private": private,
# "scopes": ["repo"],
}
args.update(post_kwargs)
result = api.post(f"orgs/{org}/repos", args)
if "errors" in result:
if result["errors"][0]["message"] == "name already exists on this account":
if not exists_ok:
raise Exception(result)
else:
# unhandled error
print("===============")
print(args)
print("================")
raise Exception(result)
return fetch_and_save_repo(repo_full_name=repo_full_name, api=api)
def _protection_settings(restrictions_users=None, restrictions_teams=None):
restrictions_users = restrictions_users or []
restrictions_teams = restrictions_teams or []
return {
"required_status_checks": None,
"enforce_admins": False,
"required_pull_request_reviews": {
"dismissal_restrictions": {},
"dismiss_stale_reviews": True,
"require_code_owner_reviews": False,
"required_approving_review_count": 2,
},
"dismissal_restrictions": {
"users": restrictions_users,
"teams": restrictions_teams,
},
# "restrictions": {"users": restrictions_users, "teams": restrictions_teams,},
"restrictions": None,
}
def protect_master(api, repo_full_name):
response = api.put(
f"repos/{repo_full_name}/branches/main/protection",
_protection_settings(),
headers={"Accept": "application/vnd.github.luke-cage-preview+json"},
)
# {'message': "If you would like to help us test the Require Multiple Reviewers API during its preview period, you must specify a custom media type in the 'Accept' header. Please see the docs for full details.", 'documentation_url': 'https://developer.github.com/v3/repos/branches/#update-branch-protection'}
if "errors" in response:
raise Exception(response)
def get_repo(repo_full_name, github_auth_login="", api=None, response404=None):
api = api or Api(github_auth_login)
return api.request(f"repos/{repo_full_name}", response404=response404)
def list_collaborators(api, repo_full_name):
"""queries gihub for a list of collaborator names associated with this repo"""
response = api.request(
f"repos/{repo_full_name}/collaborators",
json=True,
)
return [d["login"] for d in response]
def add_collaborator(api, repo_full_name, github_user_name, github_auth_login=None):
api = api or Api(github_auth_login)
# print(list_collaborators(api, repo_full_name))
response = api.put(
f"repos/{repo_full_name}/collaborators/{github_user_name}",
# {"permission": "push"},
headers={"accept": "application/vnd.github.v3+json"},
json=False,
data={},
)
# breakpoint()
if response.status_code == 404:
raise Exception(f"user or repo not found: {repo_full_name} {github_user_name}")
if response.status_code not in [201, 204]:
raise Exception(response.content)
# collaborators = get_collaborators(github_auth_login, repo_full_name, api=api)
# if github_user_name not in collaborators:
# EXCEPTION is always raised because collaborators is a list of dictionaries and github_user_name is a stringz
# raise Exception(f"Adding collaborator: {github_user_name} unsuccessful.")
def save_repo(repo: dict, user=None):
print(f"saving: {repo['full_name']}")
obj, created = models.Repository.objects.get_or_create(
ssh_url=repo["ssh_url"],
defaults={
"full_name": repo["full_name"],
"owner": repo["owner"]["login"],
"ssh_url": repo["ssh_url"],
"private": repo["private"],
"created_at": strp_github_standard_time(
repo["created_at"],
),
"archived": repo["archived"],
"user": user,
},
)
if not created:
obj.archived = obj.archived or repo["archived"]
obj.save()
return obj
def fetch_and_save_repo(api, repo_full_name):
repo_dict = get_repo(api=api, repo_full_name=repo_full_name, response404=404)
if repo_dict == 404:
return
o = save_repo(repo_dict)
assert o != None
return o
# def create_required_webhooks(api, repo_full_name, webhook_url):
# response = api.post(
# f"repos/{repo_full_name}/hooks",
# headers={"accept": "application/vnd.github.v3+json"},
# data={
# "config": {
# "url": webhook_url,
# "content_type": "json",
# "events": [
# # https://docs.github.com/en/developers/webhooks-and-events/github-event-types
# "PullRequestEvent",
# "PullRequestReviewCommentEvent",
# "PushEvent",
# ],
# }
# },
# json=False,
# )
# breakpoint()
# pass
| [
"sheena.oconnell@gmail.com"
] | sheena.oconnell@gmail.com |
0641e580798d05608958c806b1f8e45d5f9962c6 | a0db06c233d73b275c657b14ebc5e87dd91bc5e1 | /benchmark/egfrd/out_BD.py | e4918445bcc9c4c65cd550c9bb825413f0a53011 | [] | no_license | likr/ecell3-spatiocyte | 9a7cd258aa3fbd837ff3867a3cf8e9e99233a19e | 26a3231e9b022a239956938feabab9099baaee97 | refs/heads/master | 2021-01-14T14:16:35.291030 | 2013-03-07T04:27:28 | 2013-03-07T04:27:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,563 | py | # dt_factor = 1e-05
data_BD = [
# T=1e-06, N=100, V=1e-12
# steps= 8001, steps/sec= 7113.922237, steps/N= 80.010000
# run_times = [1.089055061340332, 1.0960071086883545, 1.1246960163116455]
[10890550.61340332, 10960071.086883545, 11246960.163116455],
# T=3.33333e-07, N=300, V=1e-12
# steps= 2667, steps/sec= 2568.901297, steps/N= 8.890000
# run_times = [1.025061845779419, 1.037959098815918, 1.038187026977539]
[30751855.37338257, 31138772.96447754, 31145610.809326172],
# T=1e-07, N=1000, V=1e-12
# steps= 801, steps/sec= 730.918151, steps/N= 0.801000
# run_times = [1.1104531288146973, 1.108593225479126, 1.0958819389343262]
[111045312.88146971, 110859322.54791258, 109588193.8934326],
# T=3.33333e-08, N=3000, V=1e-12
# steps= 267, steps/sec= 228.073911, steps/N= 0.089000
# run_times = [1.1678550243377686, 1.1749780178070068, 1.170673131942749]
[350356507.30133057, 352493405.34210205, 351201939.5828247],
# T=1e-08, N=10000, V=1e-12
# steps= 81, steps/sec= 66.546508, steps/N= 0.008100
# run_times = [1.219210147857666, 1.2195098400115967, 1.217193841934204]
[1219210147.857666, 1219509840.0115967, 1217193841.934204],
# T=3.33333e-09, N=30000, V=1e-12
# steps= 27, steps/sec= 17.751455, steps/N= 0.000900
# run_times = [1.5134360790252686, 1.507871150970459, 1.5210020542144775]
[4540308237.075806, 4523613452.911377, 4563006162.643433],
# T=1e-09, N=100000, V=1e-12
# steps= 8, steps/sec= 3.700389, steps/N= 0.000080
# run_times = [2.140352964401245, 2.145677089691162, 2.1619350910186768]
[21403529644.01245, 21456770896.91162, 21619350910.186768],
]
| [
"satya.arjunan@gmail.com"
] | satya.arjunan@gmail.com |
adc2011e57740926b2a60ca255e2411fb52098be | d34f82e7aa5da1d535f30ba7c0be6f8efff18107 | /backend/spaceshooter_3559/settings.py | 1a6e5daa445b9e858f47516c4f4629810933b373 | [] | no_license | crowdbotics-apps/spaceshooter-3559 | cc5f5c7e45e7db8733de12cbd53de15b6a01ce1c | 49d2a7f8fbba710dc74ffc5f3add81d4f72a9cf5 | refs/heads/master | 2020-05-25T00:02:37.230248 | 2019-05-19T20:49:44 | 2019-05-19T20:49:44 | 187,526,671 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,588 | py | """
Django settings for spaceshooter_3559 project.
Generated by 'django-admin startproject' using Django 1.11.20.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '8)x@grvzi66tmeplo^=4j405!7=ycfx_qj@ic2mp*u9zbpm+b5'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'spaceshooter_3559.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'spaceshooter_3559.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
import environ
env = environ.Env()
ALLOWED_HOSTS = ['*']
SITE_ID = 1
MIDDLEWARE += ['whitenoise.middleware.WhiteNoiseMiddleware']
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
if env.str("DATABASE_URL", default=None):
DATABASES = {
'default': env.db()
}
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend'
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static')
]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
LOCAL_APPS = [
'home',
]
THIRD_PARTY_APPS = [
'rest_framework',
'rest_framework.authtoken',
'bootstrap4',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
]
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS
# allauth
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = None
LOGIN_REDIRECT_URL = '/'
if DEBUG:
# output email to console instead of sending
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
EMAIL_HOST = "smtp.sendgrid.net"
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
2431b176a55f8c6554eaee9d4f08a9dd7b51f8d2 | f4f181f2c970a163801b4202fc8d6c92a4e8113d | /google-cloud-sdk/lib/googlecloudsdk/core/cache/exceptions.py | 2a53ffbf78a8f4a08d4a1c7348e5267837dd01bf | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"MIT"
] | permissive | Sorsly/subtle | 7732a6cb910f5e2f4eed1ac0d3b5979001582340 | 718e79a3e04f1f57f39b6ebe90dec9e028e88d40 | refs/heads/master | 2021-05-24T01:21:39.218495 | 2017-10-28T01:33:58 | 2017-10-28T01:33:58 | 83,103,372 | 0 | 1 | MIT | 2020-07-25T11:21:05 | 2017-02-25T03:33:07 | Python | UTF-8 | Python | false | false | 1,730 | py | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Exceptions for the Cloud SDK persistent cache module."""
class Error(Exception):
"""Base for all persistent cache exceptions."""
class CacheVersionMismatch(Error):
"""Cache version mismatch."""
def __init__(self, message, actual, requested):
super(CacheVersionMismatch, self).__init__(message)
self.actual = actual
self.requested = requested
class CacheInvalid(Error):
"""Cach object is invalid."""
class CacheNameInvalid(Error):
"""Name is not a valid cache name."""
class CacheNotFound(Error):
"""Cache not found."""
class CacheTableDeleted(Error):
"""Cache table deleted."""
class CacheTableExpired(Error):
"""Cache table expired."""
class CacheTableRestricted(Error):
"""Cache table is restricted."""
class CacheTableNameInvalid(Error):
"""Cache table invalid table name."""
class CacheTableColumnsInvalid(Error):
"""Cache table columns invalid."""
class CacheTableKeysInvalid(Error):
"""Cache table keys invalid."""
class CacheTableNotFound(Error):
"""Cache table not found."""
class CacheTableRowSizeInvalid(Error):
"""Cache table row has incorrect size."""
| [
"han300@purdue.edu"
] | han300@purdue.edu |
15cb0c801573dc6c1fa20b20b008cf2ebbbab028 | 19d47d47c9614dddcf2f8d744d883a90ade0ce82 | /pynsxt/swagger_client/models/protocol_version.py | ecf2c99557eb577188be0e061a75187b063fc04b | [] | no_license | darshanhuang1/pynsxt-1 | 9ed7c0da9b3a64e837a26cbbd8b228e811cee823 | fb1091dff1af7f8b8f01aec715682dea60765eb8 | refs/heads/master | 2020-05-25T14:51:09.932853 | 2018-05-16T12:43:48 | 2018-05-16T12:43:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,888 | py | # coding: utf-8
"""
NSX API
VMware NSX REST API # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class ProtocolVersion(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'enabled': 'bool',
'name': 'str'
}
attribute_map = {
'enabled': 'enabled',
'name': 'name'
}
def __init__(self, enabled=None, name=None): # noqa: E501
"""ProtocolVersion - a model defined in Swagger""" # noqa: E501
self._enabled = None
self._name = None
self.discriminator = None
self.enabled = enabled
self.name = name
@property
def enabled(self):
"""Gets the enabled of this ProtocolVersion. # noqa: E501
Enable status for this protocol version # noqa: E501
:return: The enabled of this ProtocolVersion. # noqa: E501
:rtype: bool
"""
return self._enabled
@enabled.setter
def enabled(self, enabled):
"""Sets the enabled of this ProtocolVersion.
Enable status for this protocol version # noqa: E501
:param enabled: The enabled of this ProtocolVersion. # noqa: E501
:type: bool
"""
if enabled is None:
raise ValueError("Invalid value for `enabled`, must not be `None`") # noqa: E501
self._enabled = enabled
@property
def name(self):
"""Gets the name of this ProtocolVersion. # noqa: E501
Name of the TLS protocol version # noqa: E501
:return: The name of this ProtocolVersion. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this ProtocolVersion.
Name of the TLS protocol version # noqa: E501
:param name: The name of this ProtocolVersion. # noqa: E501
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ProtocolVersion):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"tcraft@pivotal.io"
] | tcraft@pivotal.io |
fe576e708d4593189a39418e127740d8fb4917db | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-5/4cb959150b31b8e8000fbce71583b4bae828e7b0-<test_delete_install>-fix.py | 57a327550c539cacc60b763701c92aeef81d3564 | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 283 | py | @responses.activate
def test_delete_install(self):
responses.add(url='https://example.com/webhook', method=responses.POST, body={
})
self.login_as(user=self.user)
response = self.client.delete(self.url, format='json')
assert (response.status_code == 204) | [
"dg1732004@smail.nju.edu.cn"
] | dg1732004@smail.nju.edu.cn |
07f7368b4fbb438613eaef2bbd3af716121c3aad | 1065ec75d9ee668ffd7aafc6a8de912d7c2cee6f | /addons/script.icechannel.extn.extra.uk/plugins/livetv_uk/the_vault_ltvi.py | 72df20a16aef7097c9ee51ce4e86b218c5dc699f | [] | no_license | bopopescu/kodiprofile | 64c067ee766e8a40e5c148b8e8ea367b4879ffc7 | 7e78640a569a7f212a771aab6a4a4d9cb0eecfbe | refs/heads/master | 2021-06-11T17:16:15.498281 | 2016-04-03T06:37:30 | 2016-04-03T06:37:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,010 | py | '''
Ice Channel
'''
from entertainment.plugnplay.interfaces import LiveTVIndexer
from entertainment.plugnplay import Plugin
from entertainment import common
class the_vault(LiveTVIndexer):
implements = [LiveTVIndexer]
display_name = "The Vault"
name = "the_vault"
other_names = "the_vault,The Vault"
import xbmcaddon
import os
addon_id = 'script.icechannel.extn.extra.uk'
addon = xbmcaddon.Addon(addon_id)
img = os.path.join( addon.getAddonInfo('path'), 'resources', 'images', name + '.png' )
regions = [
{
'name':'United Kingdom',
'img':addon.getAddonInfo('icon'),
'fanart':addon.getAddonInfo('fanart')
},
]
languages = [
{'name':'English', 'img':'', 'fanart':''},
]
genres = [
{'name':'Music', 'img':'', 'fanart':''}
]
addon = None
| [
"sokasoka@hotmail.com"
] | sokasoka@hotmail.com |
b26abcb7e4a798915466f32474e2d44d8fdea758 | 4a7462f65826586edccfe5709259603d53da5b10 | /presentation/scripts/test-6.py | 092c5bf4ea0afc58c86889083599969770c3bec5 | [] | no_license | ctn-archive/bekolay-fnme2015 | 846ba3bdfdae121cd1ca10de81d5eae2e570e84f | ad85f5a6f33031d5229344f92ba1df60f4515488 | refs/heads/master | 2021-01-18T01:45:50.708267 | 2015-11-04T22:40:43 | 2015-11-05T09:59:54 | 39,850,277 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 625 | py | import nengo; import numpy as np
from nengo.tests.conftest import Simulator, plt, seed
from nengo.tests.conftest import pytest_generate_tests
def test_ensemble(Simulator, nl, seed, plt):
with nengo.Network(seed=seed) as model:
model.config[nengo.Ensemble].neuron_type = nl()
stim = nengo.Node([0.5])
ens = nengo.Ensemble(40, dimensions=1)
nengo.Connection(stim, ens)
probe = nengo.Probe(ens, synapse=0.05)
sim = Simulator(model)
sim.run(0.5)
plt.plot(sim.trange(), sim.data[probe])
assert np.allclose(
sim.data[probe][sim.trange() > 0.4], 0.5, atol=0.1)
| [
"tbekolay@gmail.com"
] | tbekolay@gmail.com |
c4972e52a17ca8eb8805acb3b86cdc7e6b6a6934 | 62b84f877ccb4171f558c225fa0fdd4fd2c44d6c | /tests/counter_mnist.py | b97377f0211334b4226a6d8638c506e786fe1952 | [] | no_license | guicho271828/latplan | b6dfb55f3cceac947df770fb623d496111f9ab19 | 75a2fc773de245b422a695b51fccaf17294da123 | refs/heads/master | 2022-10-25T02:02:05.547143 | 2022-03-25T20:42:06 | 2022-03-25T20:59:29 | 96,482,151 | 77 | 19 | null | 2023-03-04T14:10:46 | 2017-07-07T00:11:52 | Python | UTF-8 | Python | false | false | 784 | py | #!/usr/bin/env python3
import numpy as np
import sys
sys.path.append('../../')
from latplan.puzzles.counter_mnist import generate_configs, successors, generate, states, transitions
from plot import plot_image, plot_grid
configs = generate_configs(10)
puzzles = generate(configs)
print(puzzles[9])
plot_image(puzzles[9],"counter_mnist.png")
plot_grid(puzzles[:36],"counter_mnists.png")
_transitions = transitions(10)
import numpy.random as random
indices = random.randint(0,_transitions[0].shape[0],18)
_transitions = _transitions[:,indices]
print(_transitions.shape)
transitions_for_show = \
np.einsum('ba...->ab...',_transitions) \
.reshape((-1,)+_transitions.shape[2:])
print(transitions_for_show.shape)
plot_grid(transitions_for_show,"counter_mnist_transitions.png")
| [
"guicho2.71828@gmail.com"
] | guicho2.71828@gmail.com |
5dc355023b3a4dd052b3885c1226e09917476700 | e073873a34f227b4b7ff5a5d071cfd486cf0a881 | /illumina2cluster/prep_sample_sheet.py | dcaf15b732788d59dfdbec8224feea69a1f14b38 | [
"Artistic-2.0"
] | permissive | nandr0id/genomics | b067fcb1c8c7a6ffaa1e83606688fafdef2d6941 | 0cac3a75b6802ec623522d060df5dff8d823bfec | refs/heads/master | 2020-06-26T02:57:21.822505 | 2019-07-04T14:15:06 | 2019-07-04T14:15:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,050 | py | #!/usr/bin/env python
#
# prep_sample_sheet.py: prepare sample sheet file for Illumina sequencers
# Copyright (C) University of Manchester 2012-16,2019 Peter Briggs
#
########################################################################
#
# prep_sample_sheet.py
#
#########################################################################
"""prep_sample_sheet.py
Prepare sample sheet file for Illumina sequencers.
"""
__version__ = "0.4.1"
#######################################################################
# Imports
#######################################################################
import os
import sys
import argparse
import logging
import pydoc
# Put .. onto Python search path for modules
SHARE_DIR = os.path.abspath(
os.path.normpath(
os.path.join(os.path.dirname(sys.argv[0]),'..')))
sys.path.append(SHARE_DIR)
import bcftbx.IlluminaData as IlluminaData
from bcftbx.utils import parse_lanes
from bcftbx.utils import parse_named_lanes
#######################################################################
# Functions
#######################################################################
def truncate_barcode(seq,length):
"""Return barcode sequence truncated to requested length
'seq' is a barcode sequence (note that dual index sequences
are of the form e.g. 'AGGTAC-GGCCTT' i.e. the name includes
a hyphen) and 'length' is the desired length (i.e. number of
bases to keep).
"""
try:
i = seq.index('-')
# Dual index barcode
if i >= length:
return seq[:length]
else:
return seq[:length+1]
except ValueError:
# No hyphen: single index barcode
return seq[:length]
#######################################################################
# Unit tests
#######################################################################
import unittest
class TestTruncateBarcodeFunction(unittest.TestCase):
"""Tests for the 'truncate_barcode' function
"""
def test_truncate_single_index_barcode(self):
self.assertEqual(truncate_barcode('CGTACTAG',0),'')
self.assertEqual(truncate_barcode('CGTACTAG',6),'CGTACT')
self.assertEqual(truncate_barcode('CGTACTAG',8),'CGTACTAG')
self.assertEqual(truncate_barcode('CGTACTAG',10),'CGTACTAG')
def test_truncate_dual_index_barcode(self):
self.assertEqual(truncate_barcode('AGGCAGAA-TAGATCGC',0),'')
self.assertEqual(truncate_barcode('AGGCAGAA-TAGATCGC',6),'AGGCAG')
self.assertEqual(truncate_barcode('AGGCAGAA-TAGATCGC',8),'AGGCAGAA')
self.assertEqual(truncate_barcode('AGGCAGAA-TAGATCGC',10),'AGGCAGAA-TA')
self.assertEqual(truncate_barcode('AGGCAGAA-TAGATCGC',16),'AGGCAGAA-TAGATCGC')
#######################################################################
# Main program
#######################################################################
if __name__ == "__main__":
# Set up logging output
logging.basicConfig(format="%(levelname)s %(message)s")
# Set up parser
p = argparse.ArgumentParser(
version="%(prog)s "+__version__,
description="Utility to prepare SampleSheet files from "
"Illumina sequencers. Can be used to view, validate and "
"update or fix information such as sample IDs and project "
"names before running BCL to FASTQ conversion.")
p.add_argument('-o',action="store",dest="samplesheet_out",
default=None,
help="output new sample sheet to SAMPLESHEET_OUT")
p.add_argument('-f','--format',action="store",dest="fmt",
help="specify the format of the output sample sheet "
"written by the -o option; can be either 'CASAVA' or "
"'IEM' (defaults to the format of the original file)")
p.add_argument('-V','--view',action="store_true",dest="view",
help="view predicted outputs from sample sheet")
p.add_argument('--fix-spaces',action="store_true",dest="fix_spaces",
help="replace spaces in sample ID and project fields "
"with underscores")
p.add_argument('--fix-duplicates',action="store_true",
dest="fix_duplicates",
help="append unique indices to sample IDs where the "
"original ID and project name combination are "
"duplicated")
p.add_argument('--fix-empty-projects',action="store_true",
dest="fix_empty_projects",
help="create sample project names where these are "
"blank in the original sample sheet")
p.add_argument('--set-id',action="append",dest="sample_id",
default=[],
help="update/set the values in sample ID field; "
"SAMPLE_ID should be of the form '<lanes>:<name>', "
"where <lanes> is a single integer (e.g. 1), a set of "
"integers (e.g. 1,3,...), a range (e.g. 1-3), or "
"a combination (e.g. 1,3-5,7)")
p.add_argument('--set-project',action="append",dest="sample_project",
default=[],
help="update/set values in the sample project field; "
"SAMPLE_PROJECT should be of the form '[<lanes>:]<name>', "
"where the optional <lanes> part can be a single "
"integer (e.g. 1), a set of integers (e.g. 1,3,...), a "
"range (e.g. 1-3), or a combination (e.g. 1,3-5,7). If no "
"lanes are specified then all samples will have their "
"project set to <name>")
p.add_argument('--ignore-warnings',action="store_true",
dest="ignore_warnings",default=False,
help="ignore warnings about spaces and duplicated "
"sampleID/sampleProject combinations when writing new "
"samplesheet.csv file")
p.add_argument('--include-lanes',action="store",dest="lanes",
default=None,
help="specify a subset of lanes to include in the output "
"sample sheet; LANES should be single integer (e.g. 1), a "
"list of integers (e.g. 1,3,...), a range (e.g. 1-3) or a "
"combination (e.g. 1,3-5,7). Default is to include all "
"lanes")
p.add_argument('--set-adapter',action="store",dest="adapter",default=None,
help="set the adapter sequence in the 'Settings' section "
"to ADAPTER")
p.add_argument('--set-adapter-read2',action="store",dest="adapter_read2",
default=None,
help="set the adapter sequence for read 2 in the 'Settings'"
"section to ADAPTER_READ2")
deprecated_options = p.add_argument_group("Deprecated options")
deprecated_options.add_argument('--truncate-barcodes',
action="store",dest="barcode_len",
default=None,type=int,
help="trim barcode sequences in sample "
"sheet to number of bases specified by "
"BARCODE_LEN. Default is to leave "
"barcode sequences unmodified (deprecated; "
"only works for CASAVA-style sample "
"sheets)")
deprecated_options.add_argument('--miseq',
action="store_true",dest="miseq",
help="convert input MiSEQ sample sheet to "
"CASAVA-compatible format (deprecated; "
"specify -f/--format CASAVA to convert "
"IEM sample sheet to older format)")
p.add_argument('sample_sheet',metavar="SAMPLE_SHEET",
help="input sample sheet file")
# Process command line
args = p.parse_args()
if args.miseq:
logging.warning("--miseq option no longer necessary; "
"MiSEQ-style sample sheets are now converted "
"automatically")
# Get input sample sheet file
samplesheet = args.sample_sheet
if not os.path.isfile(samplesheet):
logging.error("sample sheet '%s': not found" % samplesheet)
sys.exit(1)
# Read in the sample sheet
data = IlluminaData.SampleSheet(samplesheet)
if data.format is None:
logging.error("Unable to determine samplesheet format")
sys.exit(1)
print("Sample sheet format: %s" % data.format)
# Remove lanes
if args.lanes is not None:
if not data.has_lanes:
logging.error("sample sheet doesn't define any lanes")
sys.exit(1)
lanes = parse_lanes(args.lanes)
print("Keeping lanes %s, removing the rest" %
','.join([str(x) for x in lanes]))
i = 0
while i < len(data):
line = data[i]
if line['Lane'] in lanes:
print("Keeping %s" % line)
i += 1
else:
del(data[i])
# Update the SampleID and SampleProject fields
for sample_id in args.sample_id:
if not data.has_lanes:
logging.error("No lanes in sample sheet for assigning sample ids")
sys.exit(1)
lanes,name = parse_named_lanes(sample_id)
if lanes is None:
logging.error("No lanes specified for sample id assignment")
sys.exit(1)
for line in data:
if line['Lane'] in lanes:
print("Setting SampleID for lane %d: '%s'" % (line['Lane'],
name))
line[data.sample_id_column] = name
# Update the SampleProject field
for sample_project in args.sample_project:
lanes,name = parse_named_lanes(sample_project)
if lanes is None:
logging.warning("Setting project for all samples to '%s'" % name)
for line in data:
line[data.sample_project_column] = name
else:
if not data.has_lanes:
logging.error("No lanes in sample sheet for assigning sample projects")
sys.exit(1)
for line in data:
if line['Lane'] in lanes:
print("Setting SampleProject for lane %d: '%s' "
" (%s)"% (line['Lane'],
name,
line[data.sample_id_column]))
line[data.sample_project_column] = name
# Truncate barcodes
if args.barcode_len is not None:
logging.warning("barcode truncation function is deprecated")
if 'Index' not in data.column_names:
logging.error("barcode truncation not possible without 'Index' column")
sys.exit(1)
barcode_len = args.barcode_len
for line in data:
barcode = truncate_barcode(line['Index'],args.barcode_len)
print("Lane %d '%s/%s': barcode '%s' -> '%s'" %
(line['Lane'],
line['SampleProject'],
line['SampleID'],
line['Index'],
barcode))
line['Index'] = barcode
# Set adapter sequences
if args.adapter is not None:
data.settings['Adapter'] = args.adapter
if args.adapter_read2 is not None:
data.settings['AdapterRead2'] = args.adapter_read2
# Fix spaces
if args.fix_spaces:
data.fix_illegal_names()
# Fix empty projects
if args.fix_empty_projects:
for line in data:
if not line[data.sample_project_column]:
line[data.sample_project_column] = line[data.sample_id_column]
# Fix duplicates
if args.fix_duplicates:
data.fix_duplicated_names()
# Check for non-unique id/project combinations, spaces and empty names
check_status = 0
# Duplicated names
duplicates = data.duplicated_names
if len(duplicates) > 0:
check_status = 1
for duplicate_set in duplicates:
for line in duplicate_set:
logging.warning("Duplicated %s/%s in line:\n%s" %
(data.sample_id_column,
data.sample_project_column,
line))
# Illegal characters/spaces in names
illegal_names = data.illegal_names
if len(illegal_names) > 0:
check_status = 1
for line in illegal_names:
logging.warning("Spaces in %s/%s in line:\n%s" %
(data.sample_id_column,
data.sample_project_column,
line))
# Empty names
empty_names = data.empty_names
if len(empty_names) > 0:
check_status = 1
for line in empty_names:
logging.warning("Empty %s and/or %s in line:\n%s" %
(data.sample_id_column,
data.sample_project_column,
line))
# Predict outputs
if check_status == 0 or args.ignore_warnings or args.view:
# Generate prediction
prediction = []
predictor = IlluminaData.SampleSheetPredictor(sample_sheet=data)
title = "Predicted projects:"
prediction.append("%s\n%s" % (title,('='*len(title))))
for project_name in predictor.project_names:
prediction.append("- %s" % project_name)
for project_name in predictor.project_names:
project = predictor.get_project(project_name)
title = "%s (%d samples)" % (project_name,
len(project.sample_ids))
prediction.append("\n%s\n%s" % (title,('-'*len(title))))
for sample_id in project.sample_ids:
sample = project.get_sample(sample_id)
for barcode in sample.barcode_seqs:
lanes = sample.lanes(barcode)
if lanes:
lanes = "L%s" % (','.join([str(l)
for l in lanes]))
else:
lanes = "L*"
line = [sample_id,
"S%d" % sample.s_index,
barcode,
lanes]
prediction.append("%s" % '\t'.join([str(i) for i in line]))
prediction = '\n'.join(prediction)
# Handle paginated output
if os.isatty(sys.stdout.fileno()):
# Detected that stdout is a terminal
prediction += '\n'
# Acquire a pager command
try:
pager = os.environ["PAGER"]
except KeyError:
pager = None
# Output the prediction with paging
if pager is not None:
pydoc.pipepager(prediction,cmd=pager)
else:
pydoc.pager(prediction)
else:
# Stdout not a terminal
print(prediction)
# Write out new sample sheet
if args.samplesheet_out:
if check_status and not args.ignore_warnings:
logging.error("please fix above errors in sample sheet data")
else:
if args.fmt is not None:
fmt = str(args.fmt).upper()
else:
fmt = data.format
if fmt not in ('CASAVA','IEM'):
logging.error("unknown output format '%s'" % fmt)
sys.exit(1)
print("Writing to %s in %s format" % (args.samplesheet_out,
fmt))
data.write(args.samplesheet_out,fmt=fmt)
# Finish
sys.exit(check_status)
| [
"peter.briggs@manchester.ac.uk"
] | peter.briggs@manchester.ac.uk |
414441c3ce1089e1e1406ee76644be8bf0e77341 | 271c7959a39f3d7ff63dddf285004fd5badee4d9 | /venv/Lib/site-packages/netaddr/strategy/eui64.py | 03de537533557e72ce24476b3810c9d5fcf1cc2b | [
"MIT"
] | permissive | natemellendorf/configpy | b6b01ea4db1f2b9109fd4ddb860e9977316ed964 | 750da5eaef33cede9f3ef532453d63e507f34a2c | refs/heads/master | 2022-12-11T05:22:54.289720 | 2019-07-22T05:26:09 | 2019-07-22T05:26:09 | 176,197,442 | 4 | 1 | MIT | 2022-12-08T02:48:51 | 2019-03-18T03:24:12 | Python | UTF-8 | Python | false | false | 7,707 | py | #-----------------------------------------------------------------------------
# Copyright (c) 2008 by David P. D. Moss. All rights reserved.
#
# Released under the BSD license. See the LICENSE file for details.
#-----------------------------------------------------------------------------
"""
IEEE 64-bit EUI (Extended Unique Indentifier) logic.
"""
import struct as _struct
import re as _re
from netaddr.core import AddrFormatError
from netaddr.strategy import (
valid_words as _valid_words, int_to_words as _int_to_words,
words_to_int as _words_to_int, valid_bits as _valid_bits,
bits_to_int as _bits_to_int, int_to_bits as _int_to_bits,
valid_bin as _valid_bin, int_to_bin as _int_to_bin,
bin_to_int as _bin_to_int)
# This is a fake constant that doesn't really exist. Here for completeness.
AF_EUI64 = 64
#: The width (in bits) of this address type.
width = 64
#: The AF_* constant value of this address type.
family = AF_EUI64
#: A friendly string name address type.
family_name = 'EUI-64'
#: The version of this address type.
version = 64
#: The maximum integer value that can be represented by this address type.
max_int = 2 ** width - 1
#-----------------------------------------------------------------------------
# Dialect classes.
#-----------------------------------------------------------------------------
class eui64_base(object):
"""A standard IEEE EUI-64 dialect class."""
#: The individual word size (in bits) of this address type.
word_size = 8
#: The number of words in this address type.
num_words = width // word_size
#: The maximum integer value for an individual word in this address type.
max_word = 2 ** word_size - 1
#: The separator character used between each word.
word_sep = '-'
#: The format string to be used when converting words to string values.
word_fmt = '%.2X'
#: The number base to be used when interpreting word values as integers.
word_base = 16
class eui64_unix(eui64_base):
"""A UNIX-style MAC address dialect class."""
word_size = 8
num_words = width // word_size
word_sep = ':'
word_fmt = '%x'
word_base = 16
class eui64_unix_expanded(eui64_unix):
"""A UNIX-style MAC address dialect class with leading zeroes."""
word_fmt = '%.2x'
class eui64_cisco(eui64_base):
"""A Cisco 'triple hextet' MAC address dialect class."""
word_size = 16
num_words = width // word_size
word_sep = '.'
word_fmt = '%.4x'
word_base = 16
class eui64_bare(eui64_base):
"""A bare (no delimiters) MAC address dialect class."""
word_size = 64
num_words = width // word_size
word_sep = ''
word_fmt = '%.16X'
word_base = 16
#: The default dialect to be used when not specified by the user.
DEFAULT_EUI64_DIALECT = eui64_base
#-----------------------------------------------------------------------------
#: Regular expressions to match all supported MAC address formats.
RE_EUI64_FORMATS = (
# 2 bytes x 8 (UNIX, Windows, EUI-64)
'^' + ':'.join(['([0-9A-F]{1,2})'] * 8) + '$',
'^' + '-'.join(['([0-9A-F]{1,2})'] * 8) + '$',
# 4 bytes x 4 (Cisco like)
'^' + ':'.join(['([0-9A-F]{1,4})'] * 4) + '$',
'^' + '-'.join(['([0-9A-F]{1,4})'] * 4) + '$',
'^' + '\.'.join(['([0-9A-F]{1,4})'] * 4) + '$',
# 16 bytes (bare, no delimiters)
'^(' + ''.join(['[0-9A-F]'] * 16) + ')$',
)
# For efficiency, each string regexp converted in place to its compiled
# counterpart.
RE_EUI64_FORMATS = [_re.compile(_, _re.IGNORECASE) for _ in RE_EUI64_FORMATS]
def _get_match_result(address, formats):
for regexp in formats:
match = regexp.findall(address)
if match:
return match[0]
def valid_str(addr):
"""
:param addr: An IEEE EUI-64 indentifier in string form.
:return: ``True`` if EUI-64 indentifier is valid, ``False`` otherwise.
"""
try:
if _get_match_result(addr, RE_EUI64_FORMATS):
return True
except TypeError:
pass
return False
def str_to_int(addr):
"""
:param addr: An IEEE EUI-64 indentifier in string form.
:return: An unsigned integer that is equivalent to value represented
by EUI-64 string address formatted according to the dialect
"""
words = []
try:
words = _get_match_result(addr, RE_EUI64_FORMATS)
if not words:
raise TypeError
except TypeError:
raise AddrFormatError('invalid IEEE EUI-64 identifier: %r!' % addr)
if isinstance(words, tuple):
pass
else:
words = (words,)
if len(words) == 8:
# 2 bytes x 8 (UNIX, Windows, EUI-48)
int_val = int(''.join(['%.2x' % int(w, 16) for w in words]), 16)
elif len(words) == 4:
# 4 bytes x 4 (Cisco like)
int_val = int(''.join(['%.4x' % int(w, 16) for w in words]), 16)
elif len(words) == 1:
# 16 bytes (bare, no delimiters)
int_val = int('%016x' % int(words[0], 16), 16)
else:
raise AddrFormatError(
'bad word count for EUI-64 identifier: %r!' % addr)
return int_val
def int_to_str(int_val, dialect=None):
"""
:param int_val: An unsigned integer.
:param dialect: (optional) a Python class defining formatting options
:return: An IEEE EUI-64 identifier that is equivalent to unsigned integer.
"""
if dialect is None:
dialect = eui64_base
words = int_to_words(int_val, dialect)
tokens = [dialect.word_fmt % i for i in words]
addr = dialect.word_sep.join(tokens)
return addr
def int_to_packed(int_val):
"""
:param int_val: the integer to be packed.
:return: a packed string that is equivalent to value represented by an
unsigned integer.
"""
words = int_to_words(int_val)
return _struct.pack('>8B', *words)
def packed_to_int(packed_int):
"""
:param packed_int: a packed string containing an unsigned integer.
It is assumed that string is packed in network byte order.
:return: An unsigned integer equivalent to value of network address
represented by packed binary string.
"""
words = list(_struct.unpack('>8B', packed_int))
int_val = 0
for i, num in enumerate(reversed(words)):
word = num
word = word << 8 * i
int_val = int_val | word
return int_val
def valid_words(words, dialect=None):
if dialect is None:
dialect = DEFAULT_EUI64_DIALECT
return _valid_words(words, dialect.word_size, dialect.num_words)
def int_to_words(int_val, dialect=None):
if dialect is None:
dialect = DEFAULT_EUI64_DIALECT
return _int_to_words(int_val, dialect.word_size, dialect.num_words)
def words_to_int(words, dialect=None):
if dialect is None:
dialect = DEFAULT_EUI64_DIALECT
return _words_to_int(words, dialect.word_size, dialect.num_words)
def valid_bits(bits, dialect=None):
if dialect is None:
dialect = DEFAULT_EUI64_DIALECT
return _valid_bits(bits, width, dialect.word_sep)
def bits_to_int(bits, dialect=None):
if dialect is None:
dialect = DEFAULT_EUI64_DIALECT
return _bits_to_int(bits, width, dialect.word_sep)
def int_to_bits(int_val, dialect=None):
if dialect is None:
dialect = DEFAULT_EUI64_DIALECT
return _int_to_bits(
int_val, dialect.word_size, dialect.num_words, dialect.word_sep)
def valid_bin(bin_val, dialect=None):
if dialect is None:
dialect = DEFAULT_EUI64_DIALECT
return _valid_bin(bin_val, width)
def int_to_bin(int_val):
return _int_to_bin(int_val, width)
def bin_to_int(bin_val):
return _bin_to_int(bin_val, width)
| [
"nate.mellendorf@gmail.com"
] | nate.mellendorf@gmail.com |
af477fc6a0296522ff4102bc09ec1664af163abf | 868e1bc0cbdbab12365c293656ee7a2a1373cac1 | /config.py | f3dc3803dc659577659c8068ac224dd0d5d08ec0 | [
"MIT"
] | permissive | xavierxross/nazurin | 15b811fbca984fe17f8d19fba5ab07c7517e5a69 | 9703781b14f626c39388c716cd412441198eb7e3 | refs/heads/master | 2023-02-17T05:37:03.095408 | 2021-01-13T14:09:49 | 2021-01-13T14:09:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 798 | py | from ast import literal_eval
from os import environ
ENV = environ.get('ENV', 'production')
TOKEN = environ.get('TOKEN')
# Webhook url, eg: https://xxx.herokuapp.com/, should end with '/'
WEBHOOK_URL = environ.get('WEBHOOK_URL')
# Port is given by Heroku
PORT = int(environ.get('PORT', '8443'))
TEMP_DIR = './temp/'
STORAGE = literal_eval(environ.get('STORAGE', "['Local']"))
STORAGE_DIR = environ.get('STORAGE_DIR', 'Pictures')
DATABASE = environ.get('DATABASE', 'Local')
# nazurin data collection in database
NAZURIN_DATA = 'nazurin'
ALBUM_ID = int(environ.get('ALBUM_ID'))
GALLERY_ID = int(environ.get('GALLERY_ID'))
ADMIN_ID = int(environ.get('ADMIN_ID'))
UA = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.116 Safari/537.36"
RETRIES = 5 | [
"yyoung2001@gmail.com"
] | yyoung2001@gmail.com |
c055be50f5dc5548077e87745ee2a24464f6b1b3 | d094ba0c8a9b1217fbf014aa79a283a49aabe88c | /env/lib/python3.6/site-packages/sympy/core/sympify.py | 2e6f3de62f4dfa889be4163f89e43e7a9175875d | [
"Apache-2.0"
] | permissive | Raniac/NEURO-LEARN | d9274e0baadd97bb02da54bdfcf6ca091fc1c703 | 3c3acc55de8ba741e673063378e6cbaf10b64c7a | refs/heads/master | 2022-12-25T23:46:54.922237 | 2020-09-06T03:15:14 | 2020-09-06T03:15:14 | 182,013,100 | 9 | 2 | Apache-2.0 | 2022-12-09T21:01:00 | 2019-04-18T03:57:00 | CSS | UTF-8 | Python | false | false | 15,823 | py | """sympify -- convert objects SymPy internal format"""
from __future__ import print_function, division
from inspect import getmro
from .core import all_classes as sympy_classes
from .compatibility import iterable, string_types, range
from .evaluate import global_evaluate
class SympifyError(ValueError):
def __init__(self, expr, base_exc=None):
self.expr = expr
self.base_exc = base_exc
def __str__(self):
if self.base_exc is None:
return "SympifyError: %r" % (self.expr,)
return ("Sympify of expression '%s' failed, because of exception being "
"raised:\n%s: %s" % (self.expr, self.base_exc.__class__.__name__,
str(self.base_exc)))
converter = {} # See sympify docstring.
class CantSympify(object):
"""
Mix in this trait to a class to disallow sympification of its instances.
Examples
========
>>> from sympy.core.sympify import sympify, CantSympify
>>> class Something(dict):
... pass
...
>>> sympify(Something())
{}
>>> class Something(dict, CantSympify):
... pass
...
>>> sympify(Something())
Traceback (most recent call last):
...
SympifyError: SympifyError: {}
"""
pass
def _convert_numpy_types(a):
"""
Converts a numpy datatype input to an appropriate sympy type.
"""
import numpy as np
if not isinstance(a, np.floating):
func = converter[complex] if np.iscomplex(a) else sympify
return func(np.asscalar(a))
else:
try:
from sympy.core.numbers import Float
prec = np.finfo(a).nmant + 1
# E.g. double precision means prec=53 but nmant=52
# Leading bit of mantissa is always 1, so is not stored
a = str(list(np.reshape(np.asarray(a),
(1, np.size(a)))[0]))[1:-1]
return Float(a, precision=prec)
except NotImplementedError:
raise SympifyError('Translation for numpy float : %s '
'is not implemented' % a)
def sympify(a, locals=None, convert_xor=True, strict=False, rational=False,
evaluate=None):
"""Converts an arbitrary expression to a type that can be used inside SymPy.
For example, it will convert Python ints into instances of sympy.Integer,
floats into instances of sympy.Float, etc. It is also able to coerce symbolic
expressions which inherit from Basic. This can be useful in cooperation
with SAGE.
It currently accepts as arguments:
- any object defined in sympy
- standard numeric python types: int, long, float, Decimal
- strings (like "0.09" or "2e-19")
- booleans, including ``None`` (will leave ``None`` unchanged)
- lists, sets or tuples containing any of the above
.. warning::
Note that this function uses ``eval``, and thus shouldn't be used on
unsanitized input.
If the argument is already a type that SymPy understands, it will do
nothing but return that value. This can be used at the beginning of a
function to ensure you are working with the correct type.
>>> from sympy import sympify
>>> sympify(2).is_integer
True
>>> sympify(2).is_real
True
>>> sympify(2.0).is_real
True
>>> sympify("2.0").is_real
True
>>> sympify("2e-45").is_real
True
If the expression could not be converted, a SympifyError is raised.
>>> sympify("x***2")
Traceback (most recent call last):
...
SympifyError: SympifyError: "could not parse u'x***2'"
Locals
------
The sympification happens with access to everything that is loaded
by ``from sympy import *``; anything used in a string that is not
defined by that import will be converted to a symbol. In the following,
the ``bitcount`` function is treated as a symbol and the ``O`` is
interpreted as the Order object (used with series) and it raises
an error when used improperly:
>>> s = 'bitcount(42)'
>>> sympify(s)
bitcount(42)
>>> sympify("O(x)")
O(x)
>>> sympify("O + 1")
Traceback (most recent call last):
...
TypeError: unbound method...
In order to have ``bitcount`` be recognized it can be imported into a
namespace dictionary and passed as locals:
>>> from sympy.core.compatibility import exec_
>>> ns = {}
>>> exec_('from sympy.core.evalf import bitcount', ns)
>>> sympify(s, locals=ns)
6
In order to have the ``O`` interpreted as a Symbol, identify it as such
in the namespace dictionary. This can be done in a variety of ways; all
three of the following are possibilities:
>>> from sympy import Symbol
>>> ns["O"] = Symbol("O") # method 1
>>> exec_('from sympy.abc import O', ns) # method 2
>>> ns.update(dict(O=Symbol("O"))) # method 3
>>> sympify("O + 1", locals=ns)
O + 1
If you want *all* single-letter and Greek-letter variables to be symbols
then you can use the clashing-symbols dictionaries that have been defined
there as private variables: _clash1 (single-letter variables), _clash2
(the multi-letter Greek names) or _clash (both single and multi-letter
names that are defined in abc).
>>> from sympy.abc import _clash1
>>> _clash1
{'C': C, 'E': E, 'I': I, 'N': N, 'O': O, 'Q': Q, 'S': S}
>>> sympify('I & Q', _clash1)
I & Q
Strict
------
If the option ``strict`` is set to ``True``, only the types for which an
explicit conversion has been defined are converted. In the other
cases, a SympifyError is raised.
>>> print(sympify(None))
None
>>> sympify(None, strict=True)
Traceback (most recent call last):
...
SympifyError: SympifyError: None
Evaluation
----------
If the option ``evaluate`` is set to ``False``, then arithmetic and
operators will be converted into their SymPy equivalents and the
``evaluate=False`` option will be added. Nested ``Add`` or ``Mul`` will
be denested first. This is done via an AST transformation that replaces
operators with their SymPy equivalents, so if an operand redefines any
of those operations, the redefined operators will not be used.
>>> sympify('2**2 / 3 + 5')
19/3
>>> sympify('2**2 / 3 + 5', evaluate=False)
2**2/3 + 5
Extending
---------
To extend ``sympify`` to convert custom objects (not derived from ``Basic``),
just define a ``_sympy_`` method to your class. You can do that even to
classes that you do not own by subclassing or adding the method at runtime.
>>> from sympy import Matrix
>>> class MyList1(object):
... def __iter__(self):
... yield 1
... yield 2
... return
... def __getitem__(self, i): return list(self)[i]
... def _sympy_(self): return Matrix(self)
>>> sympify(MyList1())
Matrix([
[1],
[2]])
If you do not have control over the class definition you could also use the
``converter`` global dictionary. The key is the class and the value is a
function that takes a single argument and returns the desired SymPy
object, e.g. ``converter[MyList] = lambda x: Matrix(x)``.
>>> class MyList2(object): # XXX Do not do this if you control the class!
... def __iter__(self): # Use _sympy_!
... yield 1
... yield 2
... return
... def __getitem__(self, i): return list(self)[i]
>>> from sympy.core.sympify import converter
>>> converter[MyList2] = lambda x: Matrix(x)
>>> sympify(MyList2())
Matrix([
[1],
[2]])
Notes
=====
Sometimes autosimplification during sympification results in expressions
that are very different in structure than what was entered. Until such
autosimplification is no longer done, the ``kernS`` function might be of
some use. In the example below you can see how an expression reduces to
-1 by autosimplification, but does not do so when ``kernS`` is used.
>>> from sympy.core.sympify import kernS
>>> from sympy.abc import x
>>> -2*(-(-x + 1/x)/(x*(x - 1/x)**2) - 1/(x*(x - 1/x))) - 1
-1
>>> s = '-2*(-(-x + 1/x)/(x*(x - 1/x)**2) - 1/(x*(x - 1/x))) - 1'
>>> sympify(s)
-1
>>> kernS(s)
-2*(-(-x + 1/x)/(x*(x - 1/x)**2) - 1/(x*(x - 1/x))) - 1
"""
if evaluate is None:
if global_evaluate[0] is False:
evaluate = global_evaluate[0]
else:
evaluate = True
try:
if a in sympy_classes:
return a
except TypeError: # Type of a is unhashable
pass
try:
cls = a.__class__
except AttributeError: # a is probably an old-style class object
cls = type(a)
if cls in sympy_classes:
return a
if cls is type(None):
if strict:
raise SympifyError(a)
else:
return a
# Support for basic numpy datatypes
# Note that this check exists to avoid importing NumPy when not necessary
if type(a).__module__ == 'numpy':
import numpy as np
if np.isscalar(a):
return _convert_numpy_types(a)
try:
return converter[cls](a)
except KeyError:
for superclass in getmro(cls):
try:
return converter[superclass](a)
except KeyError:
continue
if isinstance(a, CantSympify):
raise SympifyError(a)
try:
return a._sympy_()
except AttributeError:
pass
if not strict:
# Put numpy array conversion _before_ float/int, see
# <https://github.com/sympy/sympy/issues/13924>.
try:
from ..tensor.array import Array
return Array(a.flat, a.shape) # works with e.g. NumPy arrays
except AttributeError:
pass
if not isinstance(a, string_types):
for coerce in (float, int):
try:
return sympify(coerce(a))
except (TypeError, ValueError, AttributeError, SympifyError):
continue
if strict:
raise SympifyError(a)
if iterable(a):
try:
return type(a)([sympify(x, locals=locals, convert_xor=convert_xor,
rational=rational) for x in a])
except TypeError:
# Not all iterables are rebuildable with their type.
pass
if isinstance(a, dict):
try:
return type(a)([sympify(x, locals=locals, convert_xor=convert_xor,
rational=rational) for x in a.items()])
except TypeError:
# Not all iterables are rebuildable with their type.
pass
# At this point we were given an arbitrary expression
# which does not inherit from Basic and doesn't implement
# _sympy_ (which is a canonical and robust way to convert
# anything to SymPy expression).
#
# As a last chance, we try to take "a"'s normal form via unicode()
# and try to parse it. If it fails, then we have no luck and
# return an exception
try:
from .compatibility import unicode
a = unicode(a)
except Exception as exc:
raise SympifyError(a, exc)
from sympy.parsing.sympy_parser import (parse_expr, TokenError,
standard_transformations)
from sympy.parsing.sympy_parser import convert_xor as t_convert_xor
from sympy.parsing.sympy_parser import rationalize as t_rationalize
transformations = standard_transformations
if rational:
transformations += (t_rationalize,)
if convert_xor:
transformations += (t_convert_xor,)
try:
a = a.replace('\n', '')
expr = parse_expr(a, local_dict=locals, transformations=transformations, evaluate=evaluate)
except (TokenError, SyntaxError) as exc:
raise SympifyError('could not parse %r' % a, exc)
return expr
def _sympify(a):
"""
Short version of sympify for internal usage for __add__ and __eq__ methods
where it is ok to allow some things (like Python integers and floats) in
the expression. This excludes things (like strings) that are unwise to
allow into such an expression.
>>> from sympy import Integer
>>> Integer(1) == 1
True
>>> Integer(1) == '1'
False
>>> from sympy.abc import x
>>> x + 1
x + 1
>>> x + '1'
Traceback (most recent call last):
...
TypeError: unsupported operand type(s) for +: 'Symbol' and 'str'
see: sympify
"""
return sympify(a, strict=True)
def kernS(s):
"""Use a hack to try keep autosimplification from distributing a
a number into an Add; this modification doesn't
prevent the 2-arg Mul from becoming an Add, however.
Examples
========
>>> from sympy.core.sympify import kernS
>>> from sympy.abc import x, y, z
The 2-arg Mul distributes a number (or minus sign) across the terms
of an expression, but kernS will prevent that:
>>> 2*(x + y), -(x + 1)
(2*x + 2*y, -x - 1)
>>> kernS('2*(x + y)')
2*(x + y)
>>> kernS('-(x + 1)')
-(x + 1)
If use of the hack fails, the un-hacked string will be passed to sympify...
and you get what you get.
XXX This hack should not be necessary once issue 4596 has been resolved.
"""
import string
from random import choice
from sympy.core.symbol import Symbol
hit = False
quoted = '"' in s or "'" in s
if '(' in s and not quoted:
if s.count('(') != s.count(")"):
raise SympifyError('unmatched left parenthesis')
# strip all space from s
s = ''.join(s.split())
olds = s
# now use space to represent a symbol that
# will
# step 1. turn potential 2-arg Muls into 3-arg versions
# 1a. *( -> * *(
s = s.replace('*(', '* *(')
# 1b. close up exponentials
s = s.replace('** *', '**')
# 2. handle the implied multiplication of a negated
# parenthesized expression in two steps
# 2a: -(...) --> -( *(...)
target = '-( *('
s = s.replace('-(', target)
# 2b: double the matching closing parenthesis
# -( *(...) --> -( *(...))
i = nest = 0
assert target.endswith('(') # assumption below
while True:
j = s.find(target, i)
if j == -1:
break
j += len(target) - 1
for j in range(j, len(s)):
if s[j] == "(":
nest += 1
elif s[j] == ")":
nest -= 1
if nest == 0:
break
s = s[:j] + ")" + s[j:]
i = j + 2 # the first char after 2nd )
if ' ' in s:
# get a unique kern
kern = '_'
while kern in s:
kern += choice(string.ascii_letters + string.digits)
s = s.replace(' ', kern)
hit = kern in s
for i in range(2):
try:
expr = sympify(s)
break
except: # the kern might cause unknown errors, so use bare except
if hit:
s = olds # maybe it didn't like the kern; use un-kerned s
hit = False
continue
expr = sympify(s) # let original error raise
if not hit:
return expr
rep = {Symbol(kern): 1}
def _clear(expr):
if isinstance(expr, (list, tuple, set)):
return type(expr)([_clear(e) for e in expr])
if hasattr(expr, 'subs'):
return expr.subs(rep, hack2=True)
return expr
expr = _clear(expr)
# hope that kern is not there anymore
return expr
| [
"leibingye@outlook.com"
] | leibingye@outlook.com |
92410f6f12b802736db44b21188677bab8cbad4f | 6f23adb3da803dda89e21cfa21a024a015ec1710 | /2019/1-2.py | 800457275fe7eb521f7e86398de742615d6dee85 | [] | no_license | Remboooo/adventofcode | 1478252bcb19c0dd19e4fa2effd355ee71a5d349 | 5647b8eddd0a3c7781a9c21019f6f06f6edc09bd | refs/heads/master | 2022-12-15T10:21:29.219459 | 2022-12-13T23:02:03 | 2022-12-13T23:02:03 | 226,883,142 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 402 | py | from argparse import ArgumentParser
def get_fuel(mass):
fuel = max(0, mass // 3 - 2)
if fuel > 0:
fuel += get_fuel(fuel)
return fuel
def main():
argparse = ArgumentParser()
argparse.add_argument("file", type=str)
args = argparse.parse_args()
with open(args.file, "r") as f:
print(sum(get_fuel(int(l)) for l in f))
if __name__ == '__main__':
main()
| [
"rembrand.vanlakwijk@nedap.com"
] | rembrand.vanlakwijk@nedap.com |
ba0a659c399e903901cc3fd621947e907b604fa0 | 5ec7d0bad8a77c79843a2813f5effcb3a2b7e288 | /tests/test_cli.py | 3a62b8ef70fa0a80004a25aa8c49925cf07a0ded | [
"Apache-2.0"
] | permissive | xdpknx/lean-cli | aca9b9c9c4e156c9faefcfa8ccdfc20423b510a0 | c1051bd3e8851ae96f6e84f608a7116b1689c9e9 | refs/heads/master | 2023-08-08T02:30:09.827647 | 2021-09-21T21:36:24 | 2021-09-21T21:36:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,503 | py | # QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean CLI v1.0. Copyright 2021 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import re
import shutil
import subprocess
import tempfile
import uuid
from datetime import datetime
from pathlib import Path
from typing import List, Optional
import pytest
from lean.components.api.api_client import APIClient
from lean.components.util.http_client import HTTPClient
from lean.components.util.logger import Logger
# These tests require a QuantConnect user id and API token
# The credentials can also be provided using the QC_USER_ID and QC_API_TOKEN environment variables
# The tests in this file call the CLI itself to verify it works as expected
# Be aware that these tests change the global CLI configuration on this system
USER_ID = ""
API_TOKEN = ""
@pytest.fixture(autouse=True)
def fake_filesystem() -> None:
"""A pytest fixture which disables the mocking of the filesystem for the tests in this file."""
return
@pytest.fixture(autouse=True)
def requests_mock() -> None:
"""A pytest fixture which disables the mocking of HTTP requests for the tests in this file."""
return
@pytest.fixture(autouse=True)
def clear_global_config() -> None:
"""A pytest fixture which clears global configuration before running the tests and restores it afterwards."""
global_config_path = Path("~/.lean").expanduser()
global_config_files = [global_config_path / file for file in ["config", "credentials"]]
for global_config_file in global_config_files:
if global_config_file.is_file():
backup_file = global_config_file.parent / f"{global_config_file.name}.bak"
if backup_file.is_file():
backup_file.unlink()
global_config_file.rename(backup_file)
yield None
for global_config_file in global_config_files:
if global_config_file.is_file():
global_config_file.unlink()
backup_file = global_config_file.parent / f"{global_config_file.name}.bak"
if backup_file.is_file():
backup_file.rename(global_config_file)
def run_command(args: List[str],
cwd: Optional[Path] = None,
input: List[str] = [],
expected_return_code: int = 0,
expected_output: Optional[str] = None,
timeout: int = 120) -> str:
"""Runs a command and runs assertions on the return code and output.
:param args: the command to run
:param cwd: the directory to run the command in, or None to use the current directory
:param input: the lines to provide to stdin
:param expected_return_code: the expected return code of the command
:param expected_output: the string the output of the command is expected to contain
:param timeout: the timeout of the command in seconds
:return: the output of the command
"""
print(f"Running {args}")
try:
process = subprocess.run(args,
cwd=cwd,
input=str.encode("\n".join(input) + "\n"),
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
timeout=timeout)
except subprocess.TimeoutExpired as error:
print(error.stdout.decode("utf-8"))
raise error
output = process.stdout.decode("utf-8")
print(output)
assert process.returncode == expected_return_code
if expected_output is not None:
assert expected_output in output
return output
def test_cli() -> None:
"""Tests the CLI by actually calling it like a real user would do.
Unlike "normal" tests, this file only contains a single test method which steps through all commands.
This is done on purpose to make the test as close to what real users do as possible.
"""
user_id = USER_ID or os.environ.get("QC_USER_ID", "")
api_token = API_TOKEN or os.environ.get("QC_API_TOKEN", "")
if user_id == "" or api_token == "":
pytest.skip("API credentials not specified")
credentials_path = Path("~/.lean").expanduser() / "credentials"
# Create an empty directory to perform tests in
test_dir = Path(tempfile.mkdtemp())
# We use project names suffixed by a timestamp to prevent conflicts when we synchronize with the cloud
timestamp = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
python_project_name = f"Python Project {timestamp}"
csharp_project_name = f"CSharp Project {timestamp}"
# Log in
run_command(["lean", "login"], input=[user_id, api_token])
assert credentials_path.exists()
assert json.loads(credentials_path.read_text(encoding="utf-8")) == {
"user-id": user_id,
"api-token": api_token
}
# Check that we are logged in
run_command(["lean", "whoami"])
# Download sample data and LEAN configuration file
run_command(["lean", "init"], cwd=test_dir, input=["python"])
assert (test_dir / "data").is_dir()
assert (test_dir / "lean.json").is_file()
# Generate random data
# This is the first command that uses the LEAN Docker image, so we increase the timeout to have time to pull it
generate_output = run_command(["lean", "data", "generate",
"--start", "20150101",
"--symbol-count", "1",
"--resolution", "Daily"],
cwd=test_dir,
timeout=600)
matches = re.findall(
r"Begin data generation of 1 randomly generated Equity assets\.\.\.\r?\n\s*Symbol\[1]: ([A-Z]+)",
generate_output)
assert len(matches) == 1
assert (test_dir / "data" / "equity" / "usa" / "daily" / f"{matches[0].lower()}.zip").is_file()
# Configure global settings
run_command(["lean", "config", "set", "default-language", "csharp"])
run_command(["lean", "config", "get", "default-language"], expected_output="csharp")
run_command(["lean", "config", "unset", "default-language"])
run_command(["lean", "config", "get", "default-language"], expected_return_code=1)
run_command(["lean", "config", "set", "default-language", "python"])
run_command(["lean", "config", "get", "default-language"], expected_output="python")
list_output = run_command(["lean", "config", "list"])
assert len(re.findall(r"default-language[ ]+[^ ] python", list_output)) == 1
# Create Python project
run_command(["lean", "create-project", "--language", "python", python_project_name], cwd=test_dir)
python_project_dir = test_dir / python_project_name
assert (python_project_dir / "main.py").is_file()
assert (python_project_dir / "research.ipynb").is_file()
assert (python_project_dir / "config.json").is_file()
assert (python_project_dir / ".vscode" / "launch.json").is_file()
assert (python_project_dir / ".vscode" / "settings.json").is_file()
assert (python_project_dir / ".idea" / f"{python_project_name}.iml").is_file()
assert (python_project_dir / ".idea" / "misc.xml").is_file()
assert (python_project_dir / ".idea" / "modules.xml").is_file()
assert (python_project_dir / ".idea" / "workspace.xml").is_file()
# Create C# project
run_command(["lean", "create-project", "--language", "csharp", csharp_project_name], cwd=test_dir)
csharp_project_dir = test_dir / csharp_project_name
assert (csharp_project_dir / "Main.cs").is_file()
assert (csharp_project_dir / "research.ipynb").is_file()
assert (csharp_project_dir / "config.json").is_file()
assert (csharp_project_dir / f"{csharp_project_name}.csproj").is_file()
assert (csharp_project_dir / ".vscode" / "launch.json").is_file()
# Add custom Python library
run_command(["lean", "library", "add", python_project_name, "altair"], cwd=test_dir)
assert (python_project_dir / "requirements.txt").is_file()
assert f"altair==" in (python_project_dir / "requirements.txt").read_text(encoding="utf-8")
# Cannot add custom Python library incompatible with Python 3.6
run_command(["lean", "library", "add", python_project_name, "PyS3DE"], cwd=test_dir, expected_return_code=1)
# Cannot add custom Python library without version when it's not on PyPI
run_command(["lean", "library", "add", python_project_name, str(uuid.uuid4())],
cwd=test_dir,
expected_return_code=1)
# Cannot add custom Python library with version when version is invalid
run_command(["lean", "library", "add", python_project_name, "matplotlib", "--version", "0.0.0.0.0.1"],
cwd=test_dir,
expected_return_code=1)
# Cannot add custom Python library with version when version is incompatible with Python 3.6
run_command(["lean", "library", "add", python_project_name, "matplotlib", "--version", "3.4.2"],
cwd=test_dir,
expected_return_code=1)
# Add custom C# library
run_command(["lean", "library", "add", csharp_project_name, "Microsoft.ML"], cwd=test_dir)
csproj_file = csharp_project_dir / f"{csharp_project_name}.csproj"
assert 'Include="Microsoft.ML"' in csproj_file.read_text(encoding="utf-8")
# Cannot add custom C# library without version when it's not on NuGet
run_command(["lean", "library", "add", csharp_project_name, str(uuid.uuid4())],
cwd=test_dir,
expected_return_code=1)
# Copy over algorithms containing a SPY buy-and-hold strategy with custom libraries
fixtures_dir = Path(__file__).parent / "fixtures"
shutil.copy(fixtures_dir / "local" / "main.py", python_project_dir / "main.py")
shutil.copy(fixtures_dir / "local" / "Main.cs", csharp_project_dir / "Main.cs")
# Backtest Python project locally
run_command(["lean", "backtest", python_project_name], cwd=test_dir, expected_output="Total Trades 1")
python_backtest_dirs = list((python_project_dir / "backtests").iterdir())
assert len(python_backtest_dirs) == 1
# Backtest C# project locally
run_command(["lean", "backtest", csharp_project_name], cwd=test_dir, expected_output="Total Trades 1")
csharp_backtest_dirs = list((csharp_project_dir / "backtests").iterdir())
assert len(csharp_backtest_dirs) == 1
# Remove custom Python library
run_command(["lean", "library", "remove", python_project_name, "altair"], cwd=test_dir)
assert f"altair==" not in (python_project_dir / "requirements.txt").read_text(encoding="utf-8")
# Remove custom C# library
run_command(["lean", "library", "remove", csharp_project_name, "Microsoft.ML"], cwd=test_dir)
assert 'Include="Microsoft.ML"' not in csproj_file.read_text(encoding="utf-8")
# Custom Python library is removed, so Python backtest should now fail
run_command(["lean", "backtest", python_project_name], cwd=test_dir, expected_return_code=1)
# Custom C# library is removed, so C# backtest should now fail
run_command(["lean", "backtest", csharp_project_name], cwd=test_dir, expected_return_code=1)
# Generate reports
python_results_file = next(f for f in python_backtest_dirs[0].iterdir() if
f.name.endswith(".json") and not f.name.endswith("-order-events.json"))
run_command(["lean", "report",
"--backtest-results", str(python_results_file),
"--report-destination", "python.html"], cwd=test_dir)
csharp_results_file = next(f for f in csharp_backtest_dirs[0].iterdir() if
f.name.endswith(".json") and not f.name.endswith("-order-events.json"))
run_command(["lean", "report",
"--backtest-results", str(csharp_results_file),
"--report-destination", "csharp.html"], cwd=test_dir)
assert (test_dir / "python.html").is_file()
assert (test_dir / "csharp.html").is_file()
# Copy over algorithms containing a SPY buy-and-hold strategy without custom libraries
shutil.copy(fixtures_dir / "cloud" / "main.py", python_project_dir / "main.py")
shutil.copy(fixtures_dir / "cloud" / "Main.cs", csharp_project_dir / "Main.cs")
# Push projects to the cloud
run_command(["lean", "cloud", "push", "--project", python_project_name], cwd=test_dir)
run_command(["lean", "cloud", "push", "--project", csharp_project_name], cwd=test_dir)
# Remove some files and see if we can successfully pull them from the cloud
(python_project_dir / "main.py").unlink()
(csharp_project_dir / "Main.cs").unlink()
# Pull projects from the cloud
run_command(["lean", "cloud", "pull", "--project", python_project_name], cwd=test_dir)
run_command(["lean", "cloud", "pull", "--project", csharp_project_name], cwd=test_dir)
# Ensure deleted files have been pulled
(python_project_dir / "main.py").is_file()
(csharp_project_dir / "Main.cs").is_file()
# Run Python backtest in the cloud
run_command(["lean", "cloud", "backtest", python_project_name], cwd=test_dir)
# Run C# backtest in the cloud
run_command(["lean", "cloud", "backtest", csharp_project_name], cwd=test_dir)
# Get cloud project status
run_command(["lean", "cloud", "status", python_project_name], cwd=test_dir)
run_command(["lean", "cloud", "status", csharp_project_name], cwd=test_dir)
# Log out
run_command(["lean", "logout"])
assert not credentials_path.exists()
# Delete the test directory that we used
shutil.rmtree(test_dir, ignore_errors=True)
# Delete the cloud projects that we used
api_client = APIClient(Logger(), HTTPClient(Logger()), user_id, api_token)
cloud_projects = api_client.projects.get_all()
api_client.projects.delete(next(p.projectId for p in cloud_projects if p.name == python_project_name))
api_client.projects.delete(next(p.projectId for p in cloud_projects if p.name == csharp_project_name))
| [
"jaspervmerle@gmail.com"
] | jaspervmerle@gmail.com |
d401750857d7f3143f7269271b3de9fba9186096 | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/contrib/cv/pose_estimation/Hourglass_for_PyTorch/mmpose-master/demo/bottom_up_img_demo.py | a3738019b2ae94fbc79da7904081f7b92e5575c7 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"GPL-1.0-or-later"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 3,366 | py | # -*- coding: utf-8 -*-
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import os
from argparse import ArgumentParser
from xtcocotools.coco import COCO
from mmpose.apis import (inference_bottom_up_pose_model, init_pose_model,
vis_pose_result)
def main():
"""Visualize the demo images."""
parser = ArgumentParser()
parser.add_argument('pose_config', help='Config file for detection')
parser.add_argument('pose_checkpoint', help='Checkpoint file')
parser.add_argument('--img-root', type=str, default='', help='Image root')
parser.add_argument(
'--json-file',
type=str,
default='',
help='Json file containing image info.')
parser.add_argument(
'--show',
action='store_true',
default=False,
help='whether to show img')
parser.add_argument(
'--out-img-root',
type=str,
default='',
help='Root of the output img file. '
'Default not saving the visualization images.')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
parser.add_argument(
'--kpt-thr', type=float, default=0.3, help='Keypoint score threshold')
args = parser.parse_args()
assert args.show or (args.out_img_root != '')
coco = COCO(args.json_file)
# build the pose model from a config file and a checkpoint file
pose_model = init_pose_model(
args.pose_config, args.pose_checkpoint, device=args.device)
dataset = pose_model.cfg.data['test']['type']
assert (dataset == 'BottomUpCocoDataset')
img_keys = list(coco.imgs.keys())
# optional
return_heatmap = False
# e.g. use ('backbone', ) to return backbone feature
output_layer_names = None
# process each image
for i in range(len(img_keys)):
image_id = img_keys[i]
image = coco.loadImgs(image_id)[0]
image_name = os.path.join(args.img_root, image['file_name'])
# test a single image, with a list of bboxes.
pose_results, returned_outputs = inference_bottom_up_pose_model(
pose_model,
image_name,
return_heatmap=return_heatmap,
outputs=output_layer_names)
if args.out_img_root == '':
out_file = None
else:
os.makedirs(args.out_img_root, exist_ok=True)
out_file = os.path.join(args.out_img_root, f'vis_{i}.jpg')
# show the results
vis_pose_result(
pose_model,
image_name,
pose_results,
dataset=dataset,
kpt_score_thr=args.kpt_thr,
show=args.show,
out_file=out_file)
if __name__ == '__main__':
main()
| [
"wangjiangben@huawei.com"
] | wangjiangben@huawei.com |
d92d843f8cadc0c0977926f81dc2a5c920771c79 | 98b1956594921aeef6e4b3c0f5b15703c3eee6a7 | /atom/nucleus/python/nucleus_api/models/allocation_composition_aggregated_vo.py | 41c3a6009da956afb7651788ba9a8cc58db92530 | [
"Apache-2.0"
] | permissive | sumit4-ttn/SDK | d4db3dcac077e9c9508a8227010a2ab764c31023 | b3ae385e5415e47ac70abd0b3fdeeaeee9aa7cff | refs/heads/master | 2022-11-25T14:05:16.911068 | 2020-08-09T17:31:55 | 2020-08-09T17:31:55 | 286,413,715 | 0 | 0 | Apache-2.0 | 2020-08-10T08:03:04 | 2020-08-10T08:03:03 | null | UTF-8 | Python | false | false | 14,494 | py | # coding: utf-8
"""
Hydrogen Atom API
The Hydrogen Atom API # noqa: E501
OpenAPI spec version: 1.7.0
Contact: info@hydrogenplatform.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class AllocationCompositionAggregatedVO(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'allocation_composition_create_date': 'datetime',
'allocation_composition_date': 'datetime',
'allocation_composition_id': 'str',
'allocation_composition_update_date': 'datetime',
'model_category': 'str',
'model_current_weight': 'float',
'model_description': 'str',
'model_holdings': 'list[AllocationCompositionModelHoldingsVO]',
'model_id': 'str',
'model_name': 'str',
'model_secondary_id': 'str',
'model_strategic_weight': 'float'
}
attribute_map = {
'allocation_composition_create_date': 'allocation_composition_create_date',
'allocation_composition_date': 'allocation_composition_date',
'allocation_composition_id': 'allocation_composition_id',
'allocation_composition_update_date': 'allocation_composition_update_date',
'model_category': 'model_category',
'model_current_weight': 'model_current_weight',
'model_description': 'model_description',
'model_holdings': 'model_holdings',
'model_id': 'model_id',
'model_name': 'model_name',
'model_secondary_id': 'model_secondary_id',
'model_strategic_weight': 'model_strategic_weight'
}
def __init__(self, allocation_composition_create_date=None, allocation_composition_date=None, allocation_composition_id=None, allocation_composition_update_date=None, model_category=None, model_current_weight=None, model_description=None, model_holdings=None, model_id=None, model_name=None, model_secondary_id=None, model_strategic_weight=None): # noqa: E501
"""AllocationCompositionAggregatedVO - a model defined in Swagger""" # noqa: E501
self._allocation_composition_create_date = None
self._allocation_composition_date = None
self._allocation_composition_id = None
self._allocation_composition_update_date = None
self._model_category = None
self._model_current_weight = None
self._model_description = None
self._model_holdings = None
self._model_id = None
self._model_name = None
self._model_secondary_id = None
self._model_strategic_weight = None
self.discriminator = None
if allocation_composition_create_date is not None:
self.allocation_composition_create_date = allocation_composition_create_date
if allocation_composition_date is not None:
self.allocation_composition_date = allocation_composition_date
if allocation_composition_id is not None:
self.allocation_composition_id = allocation_composition_id
if allocation_composition_update_date is not None:
self.allocation_composition_update_date = allocation_composition_update_date
if model_category is not None:
self.model_category = model_category
if model_current_weight is not None:
self.model_current_weight = model_current_weight
if model_description is not None:
self.model_description = model_description
if model_holdings is not None:
self.model_holdings = model_holdings
if model_id is not None:
self.model_id = model_id
if model_name is not None:
self.model_name = model_name
if model_secondary_id is not None:
self.model_secondary_id = model_secondary_id
if model_strategic_weight is not None:
self.model_strategic_weight = model_strategic_weight
@property
def allocation_composition_create_date(self):
"""Gets the allocation_composition_create_date of this AllocationCompositionAggregatedVO. # noqa: E501
:return: The allocation_composition_create_date of this AllocationCompositionAggregatedVO. # noqa: E501
:rtype: datetime
"""
return self._allocation_composition_create_date
@allocation_composition_create_date.setter
def allocation_composition_create_date(self, allocation_composition_create_date):
"""Sets the allocation_composition_create_date of this AllocationCompositionAggregatedVO.
:param allocation_composition_create_date: The allocation_composition_create_date of this AllocationCompositionAggregatedVO. # noqa: E501
:type: datetime
"""
self._allocation_composition_create_date = allocation_composition_create_date
@property
def allocation_composition_date(self):
"""Gets the allocation_composition_date of this AllocationCompositionAggregatedVO. # noqa: E501
:return: The allocation_composition_date of this AllocationCompositionAggregatedVO. # noqa: E501
:rtype: datetime
"""
return self._allocation_composition_date
@allocation_composition_date.setter
def allocation_composition_date(self, allocation_composition_date):
"""Sets the allocation_composition_date of this AllocationCompositionAggregatedVO.
:param allocation_composition_date: The allocation_composition_date of this AllocationCompositionAggregatedVO. # noqa: E501
:type: datetime
"""
self._allocation_composition_date = allocation_composition_date
@property
def allocation_composition_id(self):
"""Gets the allocation_composition_id of this AllocationCompositionAggregatedVO. # noqa: E501
:return: The allocation_composition_id of this AllocationCompositionAggregatedVO. # noqa: E501
:rtype: str
"""
return self._allocation_composition_id
@allocation_composition_id.setter
def allocation_composition_id(self, allocation_composition_id):
"""Sets the allocation_composition_id of this AllocationCompositionAggregatedVO.
:param allocation_composition_id: The allocation_composition_id of this AllocationCompositionAggregatedVO. # noqa: E501
:type: str
"""
self._allocation_composition_id = allocation_composition_id
@property
def allocation_composition_update_date(self):
"""Gets the allocation_composition_update_date of this AllocationCompositionAggregatedVO. # noqa: E501
:return: The allocation_composition_update_date of this AllocationCompositionAggregatedVO. # noqa: E501
:rtype: datetime
"""
return self._allocation_composition_update_date
@allocation_composition_update_date.setter
def allocation_composition_update_date(self, allocation_composition_update_date):
"""Sets the allocation_composition_update_date of this AllocationCompositionAggregatedVO.
:param allocation_composition_update_date: The allocation_composition_update_date of this AllocationCompositionAggregatedVO. # noqa: E501
:type: datetime
"""
self._allocation_composition_update_date = allocation_composition_update_date
@property
def model_category(self):
"""Gets the model_category of this AllocationCompositionAggregatedVO. # noqa: E501
:return: The model_category of this AllocationCompositionAggregatedVO. # noqa: E501
:rtype: str
"""
return self._model_category
@model_category.setter
def model_category(self, model_category):
"""Sets the model_category of this AllocationCompositionAggregatedVO.
:param model_category: The model_category of this AllocationCompositionAggregatedVO. # noqa: E501
:type: str
"""
self._model_category = model_category
@property
def model_current_weight(self):
"""Gets the model_current_weight of this AllocationCompositionAggregatedVO. # noqa: E501
:return: The model_current_weight of this AllocationCompositionAggregatedVO. # noqa: E501
:rtype: float
"""
return self._model_current_weight
@model_current_weight.setter
def model_current_weight(self, model_current_weight):
"""Sets the model_current_weight of this AllocationCompositionAggregatedVO.
:param model_current_weight: The model_current_weight of this AllocationCompositionAggregatedVO. # noqa: E501
:type: float
"""
self._model_current_weight = model_current_weight
@property
def model_description(self):
"""Gets the model_description of this AllocationCompositionAggregatedVO. # noqa: E501
:return: The model_description of this AllocationCompositionAggregatedVO. # noqa: E501
:rtype: str
"""
return self._model_description
@model_description.setter
def model_description(self, model_description):
"""Sets the model_description of this AllocationCompositionAggregatedVO.
:param model_description: The model_description of this AllocationCompositionAggregatedVO. # noqa: E501
:type: str
"""
self._model_description = model_description
@property
def model_holdings(self):
"""Gets the model_holdings of this AllocationCompositionAggregatedVO. # noqa: E501
:return: The model_holdings of this AllocationCompositionAggregatedVO. # noqa: E501
:rtype: list[AllocationCompositionModelHoldingsVO]
"""
return self._model_holdings
@model_holdings.setter
def model_holdings(self, model_holdings):
"""Sets the model_holdings of this AllocationCompositionAggregatedVO.
:param model_holdings: The model_holdings of this AllocationCompositionAggregatedVO. # noqa: E501
:type: list[AllocationCompositionModelHoldingsVO]
"""
self._model_holdings = model_holdings
@property
def model_id(self):
"""Gets the model_id of this AllocationCompositionAggregatedVO. # noqa: E501
:return: The model_id of this AllocationCompositionAggregatedVO. # noqa: E501
:rtype: str
"""
return self._model_id
@model_id.setter
def model_id(self, model_id):
"""Sets the model_id of this AllocationCompositionAggregatedVO.
:param model_id: The model_id of this AllocationCompositionAggregatedVO. # noqa: E501
:type: str
"""
self._model_id = model_id
@property
def model_name(self):
"""Gets the model_name of this AllocationCompositionAggregatedVO. # noqa: E501
:return: The model_name of this AllocationCompositionAggregatedVO. # noqa: E501
:rtype: str
"""
return self._model_name
@model_name.setter
def model_name(self, model_name):
"""Sets the model_name of this AllocationCompositionAggregatedVO.
:param model_name: The model_name of this AllocationCompositionAggregatedVO. # noqa: E501
:type: str
"""
self._model_name = model_name
@property
def model_secondary_id(self):
"""Gets the model_secondary_id of this AllocationCompositionAggregatedVO. # noqa: E501
:return: The model_secondary_id of this AllocationCompositionAggregatedVO. # noqa: E501
:rtype: str
"""
return self._model_secondary_id
@model_secondary_id.setter
def model_secondary_id(self, model_secondary_id):
"""Sets the model_secondary_id of this AllocationCompositionAggregatedVO.
:param model_secondary_id: The model_secondary_id of this AllocationCompositionAggregatedVO. # noqa: E501
:type: str
"""
self._model_secondary_id = model_secondary_id
@property
def model_strategic_weight(self):
"""Gets the model_strategic_weight of this AllocationCompositionAggregatedVO. # noqa: E501
:return: The model_strategic_weight of this AllocationCompositionAggregatedVO. # noqa: E501
:rtype: float
"""
return self._model_strategic_weight
@model_strategic_weight.setter
def model_strategic_weight(self, model_strategic_weight):
"""Sets the model_strategic_weight of this AllocationCompositionAggregatedVO.
:param model_strategic_weight: The model_strategic_weight of this AllocationCompositionAggregatedVO. # noqa: E501
:type: float
"""
self._model_strategic_weight = model_strategic_weight
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(AllocationCompositionAggregatedVO, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AllocationCompositionAggregatedVO):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hydrogen@Hydrogens-MacBook-Pro.local"
] | hydrogen@Hydrogens-MacBook-Pro.local |
2fb1ddc9401e3c85ad0110a9619972c887b8bb66 | 0fd1aca15bc8680aea34678f08f804223ee85bee | /jupyter_kernel_mgmt/__init__.py | 79860d72e20dcc27c48bdf88fb96d76f888a1e9a | [
"BSD-3-Clause"
] | permissive | kevin-bates/jupyter_kernel_mgmt | 3f6865ebeace4064f40cde8a96151ecf847137f0 | 8a74331520940e4a35b37273e299319e64dccad3 | refs/heads/master | 2020-04-05T05:29:56.213683 | 2018-11-08T16:48:02 | 2018-11-08T16:48:02 | 156,598,252 | 1 | 0 | NOASSERTION | 2018-11-07T19:34:20 | 2018-11-07T19:34:20 | null | UTF-8 | Python | false | false | 79 | py | """Manage and connect to Jupyter kernels"""
from ._version import __version__
| [
"thomas@kluyver.me.uk"
] | thomas@kluyver.me.uk |
7766359a647d8cdbdc258f8c7528ab053da15bfe | 5c6137c33283e479cb61ad1cf3d5381c528bfbf3 | /11-dnn-keras/mnist_ann.py | 4f21c7800fa78e2da5376b4cf53c9dc5106320d2 | [
"Apache-2.0"
] | permissive | iproduct/course-social-robotics | 4d2ff7e8df701f3d2a009af48c84d160c3dc8bb8 | dcdc6f5a947413510a030b9b89639fc804777c0d | refs/heads/master | 2023-07-20T13:03:19.623265 | 2023-06-09T14:50:01 | 2023-06-09T14:50:01 | 32,006,612 | 15 | 4 | NOASSERTION | 2023-07-13T07:19:01 | 2015-03-11T08:31:43 | JavaScript | UTF-8 | Python | false | false | 1,664 | py | import datetime
from keras import layers
from keras import models
from keras.datasets import mnist
from keras.utils import to_categorical
import tensorflow as tf
import os
if __name__ == '__main__':
os.environ["XLA_FLAGS"] = '--xla_gpu_cuda_data_dir="D:/Program Files/CUDA/v11.2/development"'
physical_devices = tf.config.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(physical_devices[0], True) # important!
tf.config.optimizer.set_jit(True)
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()
train_images = train_images.reshape((60000, 28 * 28))
train_images = train_images.astype('float32') / 255
test_images = test_images.reshape((10000, 28 * 28))
test_images = test_images.astype('float32') / 255
train_labels = to_categorical(train_labels)
test_labels = to_categorical(test_labels)
network = models.Sequential()
network.add(layers.Dense(512, activation='relu', input_shape=(28 * 28,)))
network.add(layers.Dense(10, activation='softmax'))
network.summary()
network.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
logdir = os.path.join("logs", datetime.datetime.now().strftime("!%Y%m%d-%H%M%S"))
tensorboard_callback = tf.keras.callbacks.TensorBoard(logdir, histogram_freq=1)
network.fit(train_images, train_labels, epochs=5, batch_size=128, callbacks=[tensorboard_callback])
test_loss, test_acc = network.evaluate(test_images, test_labels)
print(f'Test Accuracy: {test_acc}')
print(f'Test Loss: {test_loss}')
print('Demo finished')
| [
"office@iproduct.org"
] | office@iproduct.org |
dd25971e8d004a3482e8472b283f9ec585583bbc | 1fb9816f9c63a1dcfa5f8b18247e54725bc43ea5 | /django_inmo/apps/solicitudes/models.py | 0634e7918120a45616e303819f20765b4e0bb467 | [] | no_license | juanros13/inmo | c83860e6cb76a4c15f7d2128954adfb040992f9b | 3d70c4539d82056019f9851dbe35616342fc2359 | refs/heads/master | 2020-04-06T06:58:11.401762 | 2016-09-02T04:05:08 | 2016-09-02T04:05:08 | 63,124,876 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,121 | py | import datetime
from django.db import models
from django.contrib.auth.models import User
from apps.inmuebles.models import Departamento
from django.db.models.signals import post_save
class Mantenimiento(models.Model):
usuario_creo = models.ForeignKey(
User
)
departamento = models.ForeignKey(
Departamento
)
problema = models.CharField(
max_length=450
)
descripcion = models.TextField()
fecha_creacion = models.DateTimeField(editable=False)
fecha_modificacion = models.DateTimeField(editable=False)
def save(self, *args, **kwargs):
''' On save, update timestamps '''
if not self.id:
self.fecha_creacion = datetime.datetime.today()
self.fecha_modificacion = datetime.datetime.today()
super(Mantenimiento, self).save(*args, **kwargs)
class ComentarioMantenimiento(models.Model):
comentario = models.TextField()
mantenimiento = models.ForeignKey(
Mantenimiento
)
usuario_creo = models.ForeignKey(
User
)
fecha_creacion = models.DateTimeField(editable=False)
fecha_modificacion = models.DateTimeField(editable=False)
def save(self, *args, **kwargs):
''' On save, update timestamps '''
if not self.id:
self.fecha_creacion = datetime.datetime.today()
self.fecha_modificacion = datetime.datetime.today()
super(ComentarioMantenimiento, self).save(*args, **kwargs)
def enviar_mail_mantenimiento(sender, **kwargs):
obj = kwargs['instance']
departamento = Departamento.objects.filter(pk=obj.departamento.pk, idusuario=obj.usuario.get_profile().id_inquilino)[0]
#responsables = Responsable.objects.filter(edificio=departamento.edificio)
# Enviando el correo de confirmacion operario
subject, from_email, to = 'PLUMBAGO - Nuevo mantenimiento - %s ' % obj, 'juanros13@gmail.com', ['juanros13@gmail.com', 'edgarcisneros88@gmail.com', 'alejandro@poware.com']
html_content = render_to_string('include/mail_mantenimiento.html', {
'edificio':departamento.edificio,
'departamento':departamento,
'mantenimiento':obj,
'usuario':obj.usuario.get_profile(),
'correo':obj.usuario.email
})
text_content = strip_tags(html_content) # this strips the html, so people will have the text as well.
# create the email, and attach the HTML version as well.
mail = EmailMultiAlternatives(subject, text_content, from_email, to)
mail.attach_alternative(html_content, "text/html")
mail.send()
# Enviando el correo de confirmacion usuario
subject, from_email, to = 'PLUMBAGO - Se ha creado un nuevo mantenimiento', 'juanros13@gmail.com', ['juanros13@gmail.com', 'edgarcisneros88@gmail.com', 'alejandro@poware.com']
html_content = render_to_string('include/mail_mantenimiento_usuario.html', {
'mantenimiento':obj,
})
text_content = strip_tags(html_content) # this strips the html, so people will have the text as well.
# create the email, and attach the HTML version as well.
mail = EmailMultiAlternatives(subject, text_content, from_email, to)
mail.attach_alternative(html_content, "text/html")
mail.send()
post_save.connect(enviar_mail_mantenimiento, sender=Mantenimiento) | [
"juanros13@gmail.com"
] | juanros13@gmail.com |
179f735b28effe5d26e924e9863035f844aa0393 | 4142b8c513d87361da196631f7edd82f11465abb | /python/1283A.py | d82d8b25daa008c44354bd181c3dca5782eed666 | [] | no_license | npkhanhh/codeforces | b52b66780426682ea1a3d72c66aedbe6dc71d7fe | 107acd623b0e99ef0a635dfce3e87041347e36df | refs/heads/master | 2022-02-08T17:01:01.731524 | 2022-02-07T10:29:52 | 2022-02-07T10:29:52 | 228,027,631 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 200 | py | n = int(input())
for i in range(n):
h, m = list(map(int, input().split()))
res = 0
if m == 0:
res = (24 - h) * 60
else:
res = (23 - h) * 60 + (60 - m)
print(res)
| [
"npkhanh93@gmail.com"
] | npkhanh93@gmail.com |
81cb9106ab0d1bacd68813c487ba163e70be8a05 | 4ef31d0f04f4d6d7725a530bffb1a4b115283d6f | /site/_build/jupyter_execute/notebooks/09-deep-learning1/05-pytorch-mnist.py | 7805d77a7f8ab7a187bc5a2ee4bc298603f5b7ff | [
"MIT"
] | permissive | rpi-techfundamentals/introml_website_fall_2020 | 98bb1cc4712f416b393b996b849f39c660167057 | b85e5c297954bcaae565a8d25a18d2904d40f543 | refs/heads/master | 2023-07-14T16:49:21.625260 | 2020-12-10T17:51:34 | 2020-12-10T17:51:34 | 287,033,509 | 2 | 3 | null | null | null | null | UTF-8 | Python | false | false | 4,492 | py | [](http://rpi.analyticsdojo.com)
<center><h1>Pytorch with the MNIST Dataset - MINST</h1></center>
<center><h3><a href = 'http://rpi.analyticsdojo.com'>rpi.analyticsdojo.com</a></h3></center>
[](https://colab.research.google.com/github/rpi-techfundamentals/spring2019-materials/blob/master/11-deep-learning1/04_pytorch_mnist.ipynb)
# PyTorch Deep Explainer MNIST example
A simple example showing how to explain an MNIST CNN trained using PyTorch with Deep Explainer.
Adopted from: https://www.kaggle.com/ceshine/pytorch-deep-explainer-mnist-example
### Install the modified SHAP package
!pip install https://github.com/ceshine/shap/archive/master.zip
### Proceed
import torch, torchvision
from torchvision import datasets, transforms
from torch import nn, optim
from torch.nn import functional as F
import numpy as np
import shap
## Set Parameters for Neural Network
- Convolutional Neural network followed by fully connected.
batch_size = 128
num_epochs = 2
device = torch.device('cpu')
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv_layers = nn.Sequential(
nn.Conv2d(1, 10, kernel_size=5),
nn.MaxPool2d(2),
nn.ReLU(),
nn.Conv2d(10, 20, kernel_size=5),
nn.Dropout(),
nn.MaxPool2d(2),
nn.ReLU(),
)
self.fc_layers = nn.Sequential(
nn.Linear(320, 50),
nn.ReLU(),
nn.Dropout(),
nn.Linear(50, 10),
nn.Softmax(dim=1)
)
def forward(self, x):
x = self.conv_layers(x)
x = x.view(-1, 320)
x = self.fc_layers(x)
return x
def train(model, device, train_loader, optimizer, epoch):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output.log(), target)
loss.backward()
optimizer.step()
if batch_idx % 100 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
def test(model, device, test_loader):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += F.nll_loss(output.log(), target).item() # sum up batch loss
pred = output.max(1, keepdim=True)[1] # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
train_loader = torch.utils.data.DataLoader(
datasets.MNIST('mnist_data', train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor()
])),
batch_size=batch_size, shuffle=True)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST('mnist_data', train=False, transform=transforms.Compose([
transforms.ToTensor()
])),
batch_size=batch_size, shuffle=True)
model = Net().to(device)
optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.5)
for epoch in range(1, num_epochs + 1):
train(model, device, train_loader, optimizer, epoch)
test(model, device, test_loader)
# since shuffle=True, this is a random sample of test data
batch = next(iter(test_loader))
images, _ = batch
background = images[:100]
test_images = images[100:103]
e = shap.DeepExplainer(model, background)
shap_values = e.shap_values(test_images)
shap_numpy = [np.swapaxes(np.swapaxes(s, 1, -1), 1, 2) for s in shap_values]
test_numpy = np.swapaxes(np.swapaxes(test_images.numpy(), 1, -1), 1, 2)
# plot the feature attributions
shap.image_plot(shap_numpy, -test_numpy)
The plot above shows the explanations for each class on four predictions. Note that the explanations are ordered for the classes 0-9 going left to right along the rows. | [
"jkuruzovich@gmail.com"
] | jkuruzovich@gmail.com |
ae59dd6e9cfbeb11a8ca44b432223362fd172702 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03607/s162140997.py | 54020c8894a588eeb5032c387da605123b0a3e76 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 530 | py | import sys
sys.setrecursionlimit(500000)
def input():
return sys.stdin.readline()[:-1]
def mi():
return map(int, input().split())
def ii():
return int(input())
def i2(n):
tmp = [list(mi()) for i in range(n)]
return [list(i) for i in zip(*tmp)]
def main():
N = ii()
A = [ii() for _ in range(N)]
dic = {}
for a in A:
if (not a in dic) or (dic[a] == 0):
dic[a] = 1
else:
dic[a] = 0
print(sum(dic.values()))
if __name__ == '__main__':
main()
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
c777e77fd91ee0ea1bd155528af27da135fb9698 | 7b28649a9635c1ef4501117f91a410e44742c175 | /tunobase/core/constants.py | bf503544a57ef18d1a275b841469b1bfcc1f9265 | [] | no_license | unomena/tunobase-core | 311a0e9406c0898a48101d743528ab08faa55d3b | fd24e378c87407131805fa56ade8669fceec8dfa | refs/heads/master | 2016-09-05T22:59:11.254754 | 2016-01-06T08:41:36 | 2016-01-06T08:41:36 | 39,237,113 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 304 | py | '''
CORE APP
'''
STATE_PUBLISHED = 0
STATE_UNPUBLISHED = 1
STATE_STAGED = 2
STATE_DELETED = 3
STATE_CHOICES = (
(STATE_PUBLISHED, 'Published'),
(STATE_UNPUBLISHED, 'Unpublished'),
(STATE_STAGED, 'Staged'),
(STATE_DELETED, 'Deleted'),
)
PERMITTED_STATE = [STATE_PUBLISHED, STATE_STAGED] | [
"euan@unomena.com"
] | euan@unomena.com |
7678faa1454e94cba32949b71761c79f3f38cd97 | 06984002a22f41b6eb63f9bdf3eb3529792d766f | /trunk/keystone-debian/tests/test_exception.py | c74a60c6c56b5e05dbff01f0dc274839404564a6 | [
"Apache-2.0"
] | permissive | lixmgl/Intern_OpenStack_Swift | d6195c25cd59dfe603203f727ed409a61891a3bf | 40c241319c6b9a7aabacc9d927486864d13b8055 | refs/heads/master | 2020-04-14T20:40:15.496239 | 2015-08-06T22:24:38 | 2015-08-06T22:24:38 | 40,329,555 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,696 | py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
import json
from keystone.common import wsgi
from keystone import exception
from keystone import test
class ExceptionTestCase(test.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def assertValidJsonRendering(self, e):
resp = wsgi.render_exception(e)
self.assertEqual(resp.status_int, e.code)
self.assertEqual(resp.status, '%s %s' % (e.code, e.title))
j = json.loads(resp.body)
self.assertIsNotNone(j.get('error'))
self.assertIsNotNone(j['error'].get('code'))
self.assertIsNotNone(j['error'].get('title'))
self.assertIsNotNone(j['error'].get('message'))
self.assertNotIn('\n', j['error']['message'])
self.assertNotIn(' ', j['error']['message'])
self.assertTrue(type(j['error']['code']) is int)
def test_all_json_renderings(self):
"""Everything callable in the exception module should be renderable.
... except for the base error class (exception.Error), which is not
user-facing.
This test provides a custom message to bypass docstring parsing, which
should be tested seperately.
"""
for cls in [x for x in exception.__dict__.values() if callable(x)]:
if cls is not exception.Error:
self.assertValidJsonRendering(cls(message='Overriden.'))
def test_validation_error(self):
target = uuid.uuid4().hex
attribute = uuid.uuid4().hex
e = exception.ValidationError(target=target, attribute=attribute)
self.assertValidJsonRendering(e)
self.assertIn(target, str(e))
self.assertIn(attribute, str(e))
def test_forbidden_action(self):
action = uuid.uuid4().hex
e = exception.ForbiddenAction(action=action)
self.assertValidJsonRendering(e)
self.assertIn(action, str(e))
def test_not_found(self):
target = uuid.uuid4().hex
e = exception.NotFound(target=target)
self.assertValidJsonRendering(e)
self.assertIn(target, str(e))
| [
"lixmgl@gmail.com"
] | lixmgl@gmail.com |
6acc39766857da7618a51b01c84e116ee615a3ff | 817ff801938d25776b2564b3087c8a3c674da1a7 | /NUP153_AnalyseComplex/Mutation_BindingAffinity/chainEFV/G1413M_chainEFV.py | c8b31df94c58e072e505d58db0b8527e3920361b | [] | no_license | yanghaobojordan/HIV1-Capsid | b22e21a9ad530ae11f128f409e298c5ab68871ee | f44f04dc9886e660c1fe870936c48e0e5bb5adc6 | refs/heads/main | 2023-04-09T01:27:26.626676 | 2021-04-23T18:17:07 | 2021-04-23T18:17:07 | 360,968,418 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,501 | py | from pyrosetta import *
from pyrosetta import PyMOLMover
from pyrosetta.toolbox import cleanATOM
from pyrosetta.toolbox import get_secstruct
from pyrosetta.teaching import *
from pyrosetta.toolbox import get_hbonds
from pyrosetta.toolbox import mutate_residue
from pyrosetta.rosetta.protocols.relax import *
from pyrosetta.rosetta.protocols.simple_moves import *
from pyrosetta.rosetta.core.fragment import *
from pyrosetta.rosetta.protocols.moves import *
from pyrosetta.rosetta.protocols.rigid import *
from pyrosetta.rosetta.protocols.docking import *
import sys
init()
def main():
filename=sys.argv[1]
pose=pose_from_pdb(filename)
scorefxn=get_fa_scorefxn()
mutate_residue(pose, pose.pdb_info().pdb2pose('V', 1413), "M")
MC(pose, scorefxn, "M")
def MC(pose, scorefxn, mutant):
test=Pose()
test.assign(pose)
dumpfile = 'G1413'+str(mutant)+'_chainEFV.pdb'
txtfile = 'G1413'+str(mutant)+'_chainEFV.txt'
moveList= 'G1413'+str(mutant)+'_chainEFV_MoveList.txt'
move_list_file=open(moveList, 'w')
newfile = open(txtfile, "w")
newfile.write(str(scorefxn(test)))
newfile.write('\n')
kT = 1
mc = MonteCarlo(test, scorefxn, kT)
count=0
move_list=[]
residue=int(test.pdb_info().pdb2pose('V', 1413))
residue=test.residue(residue).xyz("CA")
for i in range(1, test.total_residue()+1):
i_residue=test.residue(i).xyz("CA")
if (residue-i_residue).norm()<10:
move_list.append(i)
count +=1
move_list_file.write(str(count))
move_list_file.write('\n')
for i in move_list:
move_list_file.write(str(pose.pdb_info().pose2pdb(i)))
move_list_file.write(' ')
move_list_file.write(pose.residue(i).name())
move_list_file.write('\n')
move_list_file.close()
min_mover = MinMover()
mm = MoveMap()
mm.set_bb(False)
mm.set_chi(False)
for i in move_list:
mm.set_bb(i, True)
mm.set_chi(i, True)
min_mover.movemap(mm)
min_mover.score_function(scorefxn)
min_mover.min_type("dfpmin")
min_mover.tolerance(0.001)
smallmover=SmallMover(mm, kT, 1) #1 is the number of moves
#smallmover.angle_max(7)
shearmover=ShearMover(mm, kT, 1) #1 is the number of moves
#shearmover.angle_max(7)
task_pack = standard_packer_task(test)
task_pack.restrict_to_repacking()
task_pack.or_include_current(True)
task_pack.temporarily_fix_everything()
for i in move_list:
task_pack.temporarily_set_pack_residue(i,True)
pack_mover=PackRotamersMover(scorefxn, task_pack)
combined_mover = SequenceMover()
combined_mover.add_mover(smallmover)
combined_mover.add_mover(shearmover)
combined_mover.add_mover(min_mover)
trial_mover = TrialMover(combined_mover, mc)
for i in range (20):
pack_mover.apply(test)
mc.boltzmann(test)
newfile.write(str(i))
newfile.write(' ')
newfile.write(str(scorefxn(test)))
newfile.write(' ')
newfile.write(str(CA_rmsd(pose, test)))
newfile.write('\n')
mc.recover_low(test)
print ('Repacking Complete')
print ('Lowest Score ', scorefxn(test))
print (mc.show_scores())
print (mc.show_counters())
print (mc.show_state())
newfile.write('Repacking_Complete')
newfile.write(' ')
newfile.write(str(scorefxn(test)))
newfile.write('\n')
for i in range(5000):
trial_mover.apply(test)
#mc.boltzmann(test)
#print scorefxn(test), i
newfile.write(str(scorefxn(test)))
newfile.write(' ')
newfile.write(str(i))
newfile.write(' ')
newfile.write(str(CA_rmsd(pose, test)))
newfile.write('\n')
mc.recover_low(test)
newfile.write('Minimization Complete')
newfile.write(' ')
newfile.write(str(scorefxn(test)))
newfile.write('\n')
newfile.write('RMSD')
newfile.write(' ')
newfile.write(str(CA_rmsd(pose, test)))
newfile.write('Acceptance Rate')
newfile.write(' ')
newfile.write(str(trial_mover.acceptance_rate()))
newfile.close()
test.dump_pdb(dumpfile)
print('Lowest Score ', scorefxn(test))
print("Number of Acceptances: ", trial_mover.num_accepts())
print("Acceptance Rate: ", trial_mover.acceptance_rate())
main()
| [
"yanghaobojordan@gmail.com"
] | yanghaobojordan@gmail.com |
75a3c01092d3f26b7e6532b9259cda3dc5a8da42 | 6e94333f805544e8b0a640e37638139e74084cbe | /effectlayer_demo.py | 99042ae4b4144b59ebe62820ae391049e6e1def5 | [
"MIT"
] | permissive | encukou/gillcup_graphics | 6b41b6afdb8223b1bdf5b02431c21d09cf5c36c8 | e107feff05aa31001316ffdcac3d5dc696f25b34 | refs/heads/master | 2021-01-18T16:28:27.841445 | 2013-09-28T23:39:06 | 2013-09-28T23:39:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,558 | py | #! /usr/bin/env python
from __future__ import division
import math
import gillcup
from gillcup_graphics import Window, run, RealtimeClock, Layer, EffectLayer
from gillcup_graphics import Rectangle
clock = RealtimeClock()
def makeColorrect(parent, i, speed, color):
colorrect = Rectangle(parent, position=(.5, .5),
anchor=(.5, .5), color=color)
colorrect.scale = 0, 0, 0
colorrect.opacity = 1
anim = gillcup.Animation(colorrect, 'rotation', speed, time=1,
timing='infinite')
anim |= gillcup.Animation(colorrect, 'scale', .5, .5, .5,
delay=i, time=5, easing='sine.out')
anim |= gillcup.Animation(colorrect, 'opacity', 1 - i / 7,
delay=i, time=.05, easing='cubic.out')
clock.schedule(anim)
return colorrect
def demo():
rootLayer = EffectLayer()
rootLayer.mosaic = 10, 10
fooLayer = EffectLayer(rootLayer)
makeColorrect(fooLayer, 0, 90, (.5, .5, .5))
makeColorrect(fooLayer, 1, -90, (1, 0, 0))
makeColorrect(fooLayer, 2, 80, (1, 1, 0))
makeColorrect(fooLayer, 3, -80, (0, 1, 0))
makeColorrect(fooLayer, 4, 70, (1, 0, 1))
makeColorrect(fooLayer, 5, -70, (0, 0, 1))
makeColorrect(fooLayer, 6, 60, (0, 1, 1))
makeColorrect(fooLayer, 7, -60, (.5, .5, .5))
clock.schedule(gillcup.Animation(rootLayer, 'mosaic', 1, 1, time=10))
clock.schedule(5 + gillcup.Animation(fooLayer, 'color', 0, 1, 0,
timing=lambda t, s, d: (0.5 + math.sin(t - s) * 0.5) ** 5))
Window(rootLayer, resizable=True)
run()
demo()
| [
"encukou@gmail.com"
] | encukou@gmail.com |
e494056506906db24da3e6c1b863e7a0d64e9b7f | d87483a2c0b50ed97c1515d49d62c6e9feaddbe0 | /.history/test_20210205211907.py | 46965da94a0c9c05d6c85ced3cd7ea5b4ea00084 | [
"MIT"
] | permissive | HopperKremer/hoptrader | 0d36b6e33922414003cf689fb81f924da076a54b | 406793c10bc888648290fd15c7c2af62cf8c6c67 | refs/heads/main | 2023-06-12T15:51:00.910310 | 2021-07-06T16:15:41 | 2021-07-06T16:15:41 | 334,754,936 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 3,784 | py | import requests
import os, sys
from tda import auth, client
from tda.orders.equities import equity_buy_market, equity_buy_limit
from tda.orders.common import Duration, Session
import tda
currentdir = os.path.dirname(os.path.realpath(__file__))
# currentdir = os.path.abspath('')
parentdir = os.path.dirname(currentdir)
sys.path.append(parentdir)
import config
from selenium.webdriver.chrome.options import Options
options = Options()
options.binary_location = "/home/hopper/chromedriver"
# PATH = "/home/hopper/chromedriver"
token_path = "token"
redirect_uri = "https://localhost"
# Then when you authenticate. excecutable_path is where chromedriver is located on your system.
### AUTENTICATE ###
try:
c = auth.client_from_token_file(config.token_path, config.api_key)
except FileNotFoundError:
from selenium import webdriver
with webdriver.Chrome(chrome_options=options, executable_path= r'C:\Users\absco\Anaconda3\envs\td_ameritrade\chromedriver') as driver:
c = auth.client_from_login_flow(
driver, config.api_key, config.redirect_uri, config.token_path)
from selenium import webdriver
import time
import json
# token_path = "token"
# DRIVER_PATH = "/home/hopper/chromedriver"
print("hi")
# driver = webdriver.Chrome(DRIVER_PATH)
# try:
# c = auth.client_from_token_file(token_path, config.api_key)
# except FileNotFoundError:
# c = auth.client_from_login_flow(driver, config.api_key, redirect_uri, token_path)
# All this scraping code works
driver.get("https://financhill.com/screen/stock-score")
time.sleep(2)
print('1.1')
driver.find_element_by_css_selector(
'span[data-sort-name="stock_score_normalized"]'
).click()
time.sleep(2)
print('1.2')
tickers = driver.find_elements_by_tag_name("td")
positions = c.Account.Fields.POSITIONS
r = c.get_account(config.tda_acct_num, fields=positions)
stocks = r.json()['securitiesAccount']['positions']
stock_symbols = [] #append later
for stock in stocks:
stock_symbols.append([stock['instrument']['symbol'], stock['instrument']['symbol']])
new_stocks_found = False
already_owned = []
advanced_mode = True
i = 0
bought = 0
# [0]:Ticker, [1]:Share Price, [2]:Rating, [3]:Score, [4]:Rating Change Date, [5]:Price Change %
# Check the top 20 stocks on Financhill
while i < 20:
# Get ticker and price of stock
ticker = str(tickers[10*i].text)
share_price = float(tickers[10*i + 1].text)
# Calculate how many shares to buy in order to equal about $1000
desired_dollar_amount = 1000 # How many dollars of each stock to buy
num_shares = round(desired_dollar_amount / share_price)
if bought >= 6:
break
# Skip if ticker is already owned
elif (ticker in stock_symbols):
already_owned.append(str(i) + '. You already own ' + ticker)
i+=1
if advanced_mode:
shares_to_buy = int(input("You already own " + ticker + ", enter how many shares to buy(0 to skip):"))
# Build, place, & print order (uncomment next 2 lines to buy)
# order = equity_buy_market(ticker, shares_to_buy)
# r = c.place_order(config.tda_acct_num, order)
bought+=1
else:
# Build, place, & print order (uncomment next 2 lines to buy)
# order = equity_buy_market(ticker, num_shares)
# r = c.place_order(config.tda_acct_num, order)
print(str(i) + ". Bought " + str(num_shares) + " shares of " + ticker + " up " + tickers[10*i + 5].text + " at $" + tickers[10*i + 1].text)
bought += 1
# Toggle message and increment counter
new_stocks_found = True
i += 1
for sentence in already_owned:
print(sentence)
# If no new stocks were found
if (not new_stocks_found):
print("You already own all the top stocks")
driver.quit() | [
"hopperkremer@gmail.com"
] | hopperkremer@gmail.com |
f68180be7a5d89b9ca9656c4d2d8902e27d08ce4 | 6cd32e8b7ab4116a5132a36268c9ba1486445399 | /app/blog/sample.py | bc084fe37180d0de36037f9966f7f960153ec6f4 | [] | no_license | mongkyo/django_prac | 81c185b597f47959153a17849620a5650fb2e10e | 790e179608b281099943d60a399b40793d9e69f3 | refs/heads/master | 2020-03-30T00:59:18.573469 | 2018-09-28T11:37:50 | 2018-09-28T11:37:50 | 150,553,902 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 283 | py | import os
current_file = os.path.abspath(__file__)
blog_forder = os.path.dirname(current_file)
app_forder = os.path.dirname(blog_forder)
#templates_forder = os.path.join(app_forder, post_list.html)
print(current_file)
print(blog_forder)
print(app_forder)
#print(templates_forder)
| [
"dreamong91@gmail.com"
] | dreamong91@gmail.com |
080d10e9391a506b83b3f365d85c8631e66c0175 | 22279487bee5c983c13887ba11e6a4cd40e8bbe3 | /PreprocessData/all_class_files/Brewery.py | 062e2da9eb98d0aed0b9efd574956a07aee396b5 | [
"MIT"
] | permissive | DylanNEU/Schema | 018c9f683c683068422ed7b6392dcebd4ab4d4cd | 4854720a15894dd814691a55e03329ecbbb6f558 | refs/heads/main | 2023-08-30T01:50:20.541634 | 2021-11-01T15:30:41 | 2021-11-01T15:30:41 | 425,238,713 | 1 | 0 | MIT | 2021-11-06T12:29:12 | 2021-11-06T12:29:11 | null | UTF-8 | Python | false | false | 2,488 | py | from PreprocessData.all_class_files.FoodEstablishment import FoodEstablishment
import global_data
class Brewery(FoodEstablishment):
def __init__(self, additionalType=None, alternateName=None, description=None, disambiguatingDescription=None, identifier=None, image=None, mainEntityOfPage=None, name=None, potentialAction=None, sameAs=None, url=None, address=None, aggregateRating=None, alumni=None, areaServed=None, award=None, brand=None, contactPoint=None, department=None, dissolutionDate=None, duns=None, email=None, employee=None, event=None, faxNumber=None, founder=None, foundingDate=None, foundingLocation=None, funder=None, globalLocationNumber=None, hasOfferCatalog=None, hasPOS=None, isicV4=None, legalName=None, leiCode=None, location=None, logo=None, makesOffer=None, member=None, memberOf=None, naics=None, numberOfEmployees=None, owns=None, parentOrganization=None, publishingPrinciples=None, review=None, seeks=None, sponsor=None, subOrganization=None, taxID=None, telephone=None, vatID=None, additionalProperty=None, amenityFeature=None, branchCode=None, containedInPlace=None, containsPlace=None, geo=None, hasMap=None, isAccessibleForFree=None, maximumAttendeeCapacity=None, openingHoursSpecification=None, photo=None, publicAccess=None, smokingAllowed=None, specialOpeningHoursSpecification=None, currenciesAccepted=None, openingHours=None, paymentAccepted=None, priceRange=None, acceptsReservations=None, hasMenu=None, servesCuisine=None, starRating=None):
FoodEstablishment.__init__(self, additionalType, alternateName, description, disambiguatingDescription, identifier, image, mainEntityOfPage, name, potentialAction, sameAs, url, address, aggregateRating, alumni, areaServed, award, brand, contactPoint, department, dissolutionDate, duns, email, employee, event, faxNumber, founder, foundingDate, foundingLocation, funder, globalLocationNumber, hasOfferCatalog, hasPOS, isicV4, legalName, leiCode, location, logo, makesOffer, member, memberOf, naics, numberOfEmployees, owns, parentOrganization, publishingPrinciples, review, seeks, sponsor, subOrganization, taxID, telephone, vatID, additionalProperty, amenityFeature, branchCode, containedInPlace, containsPlace, geo, hasMap, isAccessibleForFree, maximumAttendeeCapacity, openingHoursSpecification, photo, publicAccess, smokingAllowed, specialOpeningHoursSpecification, currenciesAccepted, openingHours, paymentAccepted, priceRange, acceptsReservations, hasMenu, servesCuisine, starRating)
| [
"2213958880@qq.com"
] | 2213958880@qq.com |
97880779a3fbbc77db757da3cd217a3858bf47b1 | 3199331cede4a22b782f945c6a71150a10c61afc | /20210517PythonAdvanced/04-generator/gen01.py | ba7c00e986c7ad9a7314cfcec0a21d5154f97993 | [] | no_license | AuroraBoreas/language-review | 6957a3cde2ef1b6b996716addaee077e70351de8 | 2cb0c491db7d179c283dba205b4d124a8b9a52a3 | refs/heads/main | 2023-08-19T23:14:24.981111 | 2021-10-11T12:01:47 | 2021-10-11T12:01:47 | 343,345,371 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 343 | py | "#Python is a protocol orienated lang; every top-level function or syntax has a corresponding duner method implemented;"
import time
def compute(n: int)->list:
rv = list()
for i in range(n):
rv.append(i)
time.sleep(.5)
return rv
if __name__ == "__main__":
for i in compute(10):
print(i) | [
"noreply@github.com"
] | AuroraBoreas.noreply@github.com |
ba42e48b971949bce9e5230814036b18659e60a5 | 3d228d5eac44b31d460dd81767b43309b7356577 | /euler/cipher.py | b22f3e3f5dbebcb24d5b4aba07b62794ce277489 | [
"BSD-3-Clause"
] | permissive | lsbardel/mathfun | da65a6f09faacdb4815111dae287c9b974acf928 | 98e7c210409c2b5777e91059c3651cef4f3045dd | refs/heads/master | 2021-05-02T08:56:05.565539 | 2020-07-30T09:14:04 | 2020-07-30T09:14:04 | 26,242,622 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 814 | py | from string import ascii_lowercase
def cipher(codes):
while True:
keys = {}
while True:
s = 0
for c in ascii_lowercase:
if c in keys:
continue
key = ord(c)
i = len(keys)
for code in codes[i::3]:
b = code ^ key
if 31 < b < 127:
s += b
else:
s = 0
break
if s:
keys[c] = s
break
return s
if __name__ == '__main__':
import requests
codes = list(map(int, requests.get(
'https://projecteuler.net/project/resources/p059_cipher.txt'
).text.split(',')))
print(cipher(codes))
| [
"luca@quantmind.com"
] | luca@quantmind.com |
608b6f08a6631a536aef3b1583b7532ca1a24787 | c5a618ab198a7cc93b35715af2575ad4932f8dbb | /y_CVPR/z_bn/a.py | c1b06a27b80b369405a01d7f9ebb481c116a0a49 | [] | no_license | JaeDukSeo/Personal_Daily_NeuralNetwork_Practice | f33808a0413e130beae27f80fb4cc524834a8cc5 | f83ad23faefd726c647cc1d78021c25e086581be | refs/heads/master | 2021-09-12T07:20:49.212032 | 2018-04-15T08:15:37 | 2018-04-15T08:15:37 | 114,972,816 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,428 | py | import numpy as np
import tensorflow as tf
import sklearn
import matplotlib.pyplot as plt
import sys
np.random.seed(6789)
# create random data
data = np.random.normal(size=[10,5])
alpa,beta = 1.0,1.0
batch_e = 0.00001
data_mean = np.sum(data)/len(data)
print(data_mean.shape)
mini_var = np.sum(np.square(data-data_mean)) / len(data)
print(mini_var.shape)
normalize = (data-data_mean)/(np.sqrt(mini_var) + batch_e)
print(normalize.shape)
output = alpa*normalize + beta
# print(data)
# print("MAx: ",data.max())
# print("Min: ",data.min())
# print("Meanx: ",data.mean())
# print('========')
# print(normalize)
# print("MAx: ",normalize.max())
# print("Min: ",normalize.min())
# print("Meanx: ",normalize.mean())
# print('========')
# print(output)
# print("MAx: ",output.max())
# print("Min: ",output.min())
# print("Meanx: ",output.mean())
def batchnorm_forward(x, gamma, beta, eps):
N, D = x.shape
#step1: calculate mean
mu = 1./N * np.sum(x, axis = 0)
#step2: subtract mean vector of every trainings example
xmu = x - mu
#step3: following the lower branch - calculation denominator
sq = xmu ** 2
#step4: calculate variance
var = 1./N * np.sum(sq, axis = 0)
#step5: add eps for numerical stability, then sqrt
sqrtvar = np.sqrt(var + eps)
#step6: invert sqrtwar
ivar = 1./sqrtvar
#step7: execute normalization
xhat = xmu * ivar
#step8: Nor the two transformation steps
gammax = gamma * xhat
#step9
out = gammax + beta
#store intermediate
cache = (xhat,gamma,xmu,ivar,sqrtvar,var,eps)
return out, cache
print('--------')
data_mean = np.sum(data,axis=0)/len(data)
print(data_mean.shape)
mini_var = np.sum(np.square(data-data_mean),axis=0) / len(data)
print(mini_var.shape)
normalize = (data-data_mean)/(np.sqrt(mini_var) + batch_e)
print(normalize.shape)
output = alpa*normalize + beta
print(data)
print("MAx: ",data.max())
print("Min: ",data.min())
print("Meanx: ",data.mean())
print('========')
print(normalize)
print("MAx: ",normalize.max())
print("Min: ",normalize.min())
print("Meanx: ",normalize.mean())
print('========')
print(output)
print("MAx: ",output.max())
print("Min: ",output.min())
print("Meanx: ",output.mean())
print('========')
print('========')
sss = batchnorm_forward(data,1.0,1.0,batch_e)
print(sss[0])
print('========')
print('========')
print(( np.round(sss[0],decimals=4)- np.round(output,decimals=4) ).sum())
# -- end code -- | [
"jae.duk.seo@ryerson.ca"
] | jae.duk.seo@ryerson.ca |
91c938255952262e25f7b7131e69aa3929ff49a4 | 7c63a96fad4257f4959ffeba0868059fc96566fb | /py/d_beazly-python_cookbook/ch_01-data_structures_and_algorithms/12-determining_the_most_frequently_occuring_itesm/main.py | 1a09301f9f4c6596ff3ebf08d5d2eef0f107d568 | [
"MIT"
] | permissive | ordinary-developer/education | b426148f5690f48e0ed4853adfc3740bd038b72c | 526e5cf86f90eab68063bb7c75744226f2c54b8d | refs/heads/master | 2023-08-31T14:42:37.237690 | 2023-08-30T18:15:18 | 2023-08-30T18:15:18 | 91,232,306 | 8 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,176 | py | def example_1():
words = [
'look', 'into', 'my', 'eyes', 'look', 'into', 'my', 'eyes',
'the', 'eyes', 'the', 'eyes', 'the', 'eyes', 'not', 'around', 'the',
'eyes', "don't", 'look', 'around', 'the' 'eyes', 'look', 'into',
'my', 'eyes', "you're", 'under'
]
from collections import Counter
word_counts = Counter(words)
top_three = word_counts.most_common(3)
print(top_three)
print(word_counts['not'])
print(word_counts['eyes'])
def example_2():
words = [
'look', 'into', 'my', 'eyes', 'look', 'into', 'my', 'eyes',
'the', 'eyes', 'the', 'eyes', 'the', 'eyes', 'not', 'around', 'the',
'eyes', "don't", 'look', 'around', 'the' 'eyes', 'look', 'into',
'my', 'eyes', "you're", 'under'
]
morewords = ['why', 'are', 'you', 'not', 'looking', 'in', 'my', 'eyes']
from collections import Counter
word_counts = Counter(words)
for word in morewords:
word_counts[word] += 1
print(word_counts['eyes'])
def example_3():
words = [
'look', 'into', 'my', 'eyes', 'look', 'into', 'my', 'eyes',
'the', 'eyes', 'the', 'eyes', 'the', 'eyes', 'not', 'around', 'the',
'eyes', "don't", 'look', 'around', 'the' 'eyes', 'look', 'into',
'my', 'eyes', "you're", 'under'
]
morewords = ['why', 'are', 'you', 'not', 'looking', 'in', 'my', 'eyes']
from collections import Counter
word_counts = Counter(words)
word_counts.update(morewords)
print(word_counts['eyes'])
def example_4():
words = [
'look', 'into', 'my', 'eyes', 'look', 'into', 'my', 'eyes',
'the', 'eyes', 'the', 'eyes', 'the', 'eyes', 'not', 'around', 'the',
'eyes', "don't", 'look', 'around', 'the' 'eyes', 'look', 'into',
'my', 'eyes', "you're", 'under'
]
morewords = ['why', 'are', 'you', 'not', 'looking', 'in', 'my', 'eyes']
from collections import Counter
a = Counter(words)
b = Counter(morewords)
print(a)
print(b)
c = a + b
print(c)
d = a - b
print(d)
if __name__ == '__main__':
example_1()
example_2()
example_3()
example_4()
| [
"merely.ordinary.developer@gmail.com"
] | merely.ordinary.developer@gmail.com |
080664a40b0bc54179cc500c91d7b1c410ab2368 | efcde5b4ea4fbf01a08e4b2b4edb712fae46be48 | /shapes/size_config.py | 4fe0336834ec6fcf32964732df989e4176759495 | [] | no_license | vermashresth/Referential_Shapes | 9611a6450a8d3d65b4dae602fae8e178d5f32f67 | b8fae15561cafa741471065d7920162e4add2e54 | refs/heads/master | 2023-02-23T19:53:59.151580 | 2021-01-27T05:59:57 | 2021-01-27T05:59:57 | 292,020,284 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 167 | py | train_size = 7450
val_size = 827
test_size = 4050
def return_sizes():
global train_size, val_size, test_size
return train_size, val_size, test_size, val_size
| [
"vermashresth@gmail.com"
] | vermashresth@gmail.com |
d06246ae8222a63d97c09c147d260a5bc954bcae | b7f3edb5b7c62174bed808079c3b21fb9ea51d52 | /tools/style_variable_generator/views_generator.py | e9cb4d9aa4779ac7b9c4812bd254cb62193c0d27 | [
"Zlib",
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"LGPL-2.1-only",
"LGPL-2.0-or-later",
"APSL-2.0",
"MIT",
"Apache-2.0",
"LGPL-2.0-only",
"LicenseRef-scancode-unknown"
] | permissive | otcshare/chromium-src | 26a7372773b53b236784c51677c566dc0ad839e4 | 64bee65c921db7e78e25d08f1e98da2668b57be5 | refs/heads/webml | 2023-03-21T03:20:15.377034 | 2020-11-16T01:40:14 | 2020-11-16T01:40:14 | 209,262,645 | 18 | 21 | BSD-3-Clause | 2023-03-23T06:20:07 | 2019-09-18T08:52:07 | null | UTF-8 | Python | false | false | 2,350 | py | # Copyright 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
from base_generator import Color, Modes, BaseGenerator, VariableType
class ViewsStyleGenerator(BaseGenerator):
'''Generator for Views Variables'''
@staticmethod
def GetName():
return 'Views'
def Render(self):
self.Validate()
return self.ApplyTemplate(self, 'views_generator_h.tmpl',
self.GetParameters())
def GetParameters(self):
return {
'colors': self._CreateColorList(),
}
def GetFilters(self):
return {
'to_const_name': self._ToConstName,
'cpp_color': self._CppColor,
}
def GetGlobals(self):
globals = {
'Modes': Modes,
'out_file_path': None,
'namespace_name': None,
'in_files': self.in_file_to_context.keys(),
}
if self.out_file_path:
globals['out_file_path'] = self.out_file_path
globals['namespace_name'] = os.path.splitext(
os.path.basename(self.out_file_path))[0]
return globals
def _CreateColorList(self):
color_list = []
for name, mode_values in self.model[VariableType.COLOR].items():
color_list.append({'name': name, 'mode_values': mode_values})
return color_list
def _ToConstName(self, var_name):
return 'k%s' % var_name.title().replace('_', '')
def _CppColor(self, c):
'''Returns the C++ color representation of |c|'''
assert (isinstance(c, Color))
def AlphaToInt(alpha):
return int(alpha * 255)
if c.var:
return ('ResolveColor(ColorName::%s, color_mode)' %
self._ToConstName(c.var))
if c.rgb_var:
return (
'SkColorSetA(ResolveColor(ColorName::%s, color_mode), 0x%X)' %
(self._ToConstName(c.RGBVarToVar()), AlphaToInt(c.a)))
if c.a != 1:
return 'SkColorSetARGB(0x%X, 0x%X, 0x%X, 0x%X)' % (AlphaToInt(c.a),
c.r, c.g, c.b)
else:
return 'SkColorSetRGB(0x%X, 0x%X, 0x%X)' % (c.r, c.g, c.b)
| [
"commit-bot@chromium.org"
] | commit-bot@chromium.org |
843c6a6f952a549747c621ed38e3d7f642a494ac | 4b88a8451274ac577f2f7b014a8da315a189182c | /aliyun-python-sdk-ess/aliyunsdkess/request/v20140828/DeleteScalingRuleRequest.py | df7bd5674986fb961edd3a94ae2c6fa0d6a1f3a5 | [
"Apache-2.0"
] | permissive | gitfengri/aliyun-openapi-python-sdk | 4d447c65fcf3c509bbc6466693916ec2e067cff0 | 528822933f071d3c79c2f108d98c61d44284c9e4 | refs/heads/master | 2020-03-22T15:06:05.022068 | 2018-07-06T02:17:27 | 2018-07-06T02:17:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,738 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class DeleteScalingRuleRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Ess', '2014-08-28', 'DeleteScalingRule','ESS')
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_OwnerAccount(self):
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self,OwnerAccount):
self.add_query_param('OwnerAccount',OwnerAccount)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_ScalingRuleId(self):
return self.get_query_params().get('ScalingRuleId')
def set_ScalingRuleId(self,ScalingRuleId):
self.add_query_param('ScalingRuleId',ScalingRuleId) | [
"haowei.yao@alibaba-inc.com"
] | haowei.yao@alibaba-inc.com |
dc37df7186790e774c06d2e977de5fedcee72a29 | 122f9bf0d996c104f541453ab35c56f6ff3fc7cd | /z수업용문제/JunminLim/17362_손가락.py | 0b4db49695ea13de9384bcef8fdd13bfcb73bf3c | [] | no_license | JannaKim/PS | 1302e9b6bc529d582ecc7d7fe4f249a52311ff30 | b9c3ce6a7a47afeaa0c62d952b5936d407da129b | refs/heads/master | 2023-08-10T17:49:00.925460 | 2021-09-13T02:21:34 | 2021-09-13T02:21:34 | 312,822,458 | 0 | 0 | null | 2021-04-23T15:31:11 | 2020-11-14T13:27:34 | Python | UTF-8 | Python | false | false | 71 | py | L=[1,2,3,4,5,4,3,2]
#1,2,3,4,5,6,7,8
n=int(input())
x=n%8
print(L[x-1]) | [
"baradamoh@gmail.com"
] | baradamoh@gmail.com |
1f62556e20ec9f8343ddcb47fd0b25729b596fee | 5865a8a69c58ca09a5537858f636469dad35971e | /first_project/first_app/migrations/0001_initial.py | d6d8ddcb99c58143f19c729c344bbee45679b62e | [] | no_license | ohduran-attempts/theDisSilent | 3ee757e2c50ced7988fa1787f680e49e8b9a9c58 | 6016b639146412d7e3f0ea2ddf3fae5702d973c1 | refs/heads/master | 2020-03-21T16:29:52.677005 | 2018-07-19T21:06:36 | 2018-07-19T21:06:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,547 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-07-09 06:18
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='AccessRecord',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateField()),
],
),
migrations.CreateModel(
name='Topic',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('top_name', models.CharField(max_length=264, unique=True)),
],
),
migrations.CreateModel(
name='Webpage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=264, unique=True)),
('url', models.URLField(unique=True)),
('topic', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='first_app.Topic')),
],
),
migrations.AddField(
model_name='accessrecord',
name='name',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='first_app.Webpage'),
),
]
| [
"alvaro.duranb@gmail.com"
] | alvaro.duranb@gmail.com |
717e0d16d5221d1f6c041b15c5b0159c2dc8478b | 87b3738de6d748754c61578dbab5c155907fa08f | /lexi.py | 15b20a481d189fcebaaae20ca4841269fcd3b973 | [] | no_license | Ponkiruthika112/codeset5 | a2461416d1e2d526928aa882cc4977a7b4629940 | c40e5f70ffb367f5653061d0f0a77ccbbcb58484 | refs/heads/master | 2020-04-15T22:33:47.000910 | 2019-05-19T14:27:33 | 2019-05-19T14:27:33 | 165,077,468 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 70 | py | #lexical order
s=input()
a=""
l=list(s)
l.sort()
k=a.join(l)
print(k)
| [
"noreply@github.com"
] | Ponkiruthika112.noreply@github.com |
5d3a11ad3b206e8da3a8e466159889fc036ca511 | 16785f35ceb0f6336760e2c415047ea95037a8af | /run_game.py | 3bc8533ff445689329ac94947ea5f07ca4743ec5 | [] | no_license | bitcraft/pyweek18 | 07fd1c36c202806cb6412fd54ae7f693e3c64d63 | 9a9b33adf1445b4777565a604a6cffbb434beebe | refs/heads/master | 2021-01-20T12:04:57.211424 | 2015-12-28T18:10:28 | 2015-12-28T18:10:28 | 19,759,835 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,009 | py | from castlebats import config
import os
# load configuration
filename = os.path.join('config', 'castlebats.ini')
config.read(filename)
import logging
logger = logging.getLogger('castlebats.run')
logging.basicConfig(
level=getattr(logging, config.get('general', 'debug-level')),
format="%(name)s:%(filename)s:%(lineno)d:%(levelname)s: %(message)s")
from castlebats import resources
from castlebats.game import Game
import pygame
#import pymunkoptions
#pymunkoptions.options["debug"] = False
def check_libs():
import pytmx
import pymunktmx
import pyscroll
logger.info('pygame version:\t%s', pygame.__version__)
logger.info('pytmx version:\t%s', pytmx.__version__)
logger.info('pymunktmx version:\t%s', pymunktmx.__version__)
logger.info('pyscroll version:\t%s', pyscroll.__version__)
import pymunk
logger.info('pymunk version:\t%s', pymunk.__version__)
if __name__ == '__main__':
# simple wrapper to keep the screen resizeable
def init_screen(width, height):
if fullscreen:
return pygame.display.set_mode((width, height), pygame.FULLSCREEN)
else:
return pygame.display.set_mode((width, height), pygame.RESIZABLE)
check_libs()
screen_width = config.getint('display', 'width')
screen_height = config.getint('display', 'height')
fullscreen = config.getboolean('display', 'fullscreen')
window_caption = config.get('display', 'window-caption')
sound_buffer_size = config.getint('sound', 'buffer')
sound_frequency = config.getint('sound', 'frequency')
pygame.mixer.init(frequency=sound_frequency, buffer=sound_buffer_size)
screen = init_screen(screen_width, screen_height)
pygame.display.set_caption(window_caption)
pygame.init()
pygame.font.init()
screen.fill((0, 0, 0))
for thing in resources.load():
pygame.event.get()
pygame.display.flip()
game = Game()
try:
game.run()
except:
pygame.quit()
raise
| [
"leif.theden@gmail.com"
] | leif.theden@gmail.com |
d2b7fb8a90525f2c5372ebe29067cc85a1e85473 | aa3f670fcc2b43d8a5eb8a131082510bed2eb4d8 | /cgi-bin/request/raob.py | 03e604c77af7175b5f30b888cbfef24dff42f9f3 | [
"MIT"
] | permissive | jamayfieldjr/iem | e0d496311d82790ad518c600c2fcffe44e834da1 | 275b77a65f3b12e26e6cbdb230786b9c7d2b9c9a | refs/heads/master | 2020-08-07T11:55:56.256857 | 2019-10-04T04:22:36 | 2019-10-04T04:22:36 | 213,439,554 | 1 | 0 | MIT | 2019-10-07T17:01:20 | 2019-10-07T17:01:20 | null | UTF-8 | Python | false | false | 2,529 | py | #!/usr/bin/env python
"""
Download interface for data from RAOB network
"""
import sys
import cgi
import datetime
import pytz
from pyiem.util import get_dbconn, ssw
from pyiem.network import Table as NetworkTable
def m(val):
"""Helper"""
if val is None:
return 'M'
return val
def fetcher(station, sts, ets):
"""Do fetching"""
dbconn = get_dbconn('postgis')
cursor = dbconn.cursor('raobstreamer')
stations = [station, ]
if station.startswith("_"):
nt = NetworkTable("RAOB")
stations = nt.sts[station]['name'].split("--")[1].strip().split(",")
cursor.execute("""
SELECT f.valid at time zone 'UTC', p.levelcode, p.pressure, p.height,
p.tmpc, p.dwpc, p.drct, round((p.smps * 1.94384)::numeric,0),
p.bearing, p.range_miles, f.station from
raob_profile p JOIN raob_flights f on
(f.fid = p.fid) WHERE f.station in %s and valid >= %s and valid < %s
""", (tuple(stations), sts, ets))
ssw(("station,validUTC,levelcode,pressure_mb,height_m,tmpc,"
"dwpc,drct,speed_kts,bearing,range_sm\n"))
for row in cursor:
ssw(("%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s\n"
) % (row[10], m(row[0]),
m(row[1]), m(row[2]), m(row[3]), m(row[4]),
m(row[5]), m(row[6]), m(row[7]),
m(row[8]), m(row[9])))
def friendly_date(form, key):
"""More forgiving date conversion"""
val = form.getfirst(key)
try:
val = val.strip()
if len(val.split()) == 1:
dt = datetime.datetime.strptime(val, '%m/%d/%Y')
else:
dt = datetime.datetime.strptime(val, '%m/%d/%Y %H:%M')
dt = dt.replace(tzinfo=pytz.UTC)
except Exception as _exp:
ssw('Content-type: text/plain\n\n')
ssw(('Invalid %s date provided, should be "%%m/%%d/%%Y %%H:%%M"'
' in UTC timezone'
) % (key, ))
sys.exit()
return dt
def main():
"""Go Main Go"""
form = cgi.FieldStorage()
sts = friendly_date(form, 'sts')
ets = friendly_date(form, 'ets')
station = form.getfirst('station', 'KOAX')[:4]
if form.getfirst('dl', None) is not None:
ssw('Content-type: application/octet-stream\n')
ssw(("Content-Disposition: attachment; filename=%s_%s_%s.txt\n\n"
) % (station, sts.strftime("%Y%m%d%H"),
ets.strftime("%Y%m%d%H")))
else:
ssw('Content-type: text/plain\n\n')
fetcher(station, sts, ets)
if __name__ == '__main__':
main()
| [
"akrherz@iastate.edu"
] | akrherz@iastate.edu |
e37ae85cde6cdf500f9a4cfc2af4b9e11831abb4 | 05e3c6d28bbaf56f058d95ea0aab0006843b2420 | /swagger_client/models/file_metadata.py | 7f41346fc2bc051237d690812c7b9f89bf2d19df | [] | no_license | TheAdsOnTop/dynamix-python-client | 4ac5bf8bc975e3b1230bdf8ed0900e6b1382e318 | ace7ff34502cbbbb11b0c65bb3385b8c48247082 | refs/heads/master | 2020-03-08T00:10:05.566157 | 2018-04-02T19:02:47 | 2018-04-02T19:02:47 | 127,799,685 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,592 | py | # coding: utf-8
"""
Dynamix
Sign up for Dynamix & grab your token. # noqa: E501
OpenAPI spec version: v0.1.0
Contact: david@theadsontop.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class FileMetadata(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'extension': 'str',
'name': 'str',
'uploaded_by_user_profile_rid': 'str'
}
attribute_map = {
'extension': 'extension',
'name': 'name',
'uploaded_by_user_profile_rid': 'uploadedByUserProfileRid'
}
def __init__(self, extension=None, name=None, uploaded_by_user_profile_rid=None): # noqa: E501
"""FileMetadata - a model defined in Swagger""" # noqa: E501
self._extension = None
self._name = None
self._uploaded_by_user_profile_rid = None
self.discriminator = None
if extension is not None:
self.extension = extension
if name is not None:
self.name = name
if uploaded_by_user_profile_rid is not None:
self.uploaded_by_user_profile_rid = uploaded_by_user_profile_rid
@property
def extension(self):
"""Gets the extension of this FileMetadata. # noqa: E501
:return: The extension of this FileMetadata. # noqa: E501
:rtype: str
"""
return self._extension
@extension.setter
def extension(self, extension):
"""Sets the extension of this FileMetadata.
:param extension: The extension of this FileMetadata. # noqa: E501
:type: str
"""
self._extension = extension
@property
def name(self):
"""Gets the name of this FileMetadata. # noqa: E501
:return: The name of this FileMetadata. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this FileMetadata.
:param name: The name of this FileMetadata. # noqa: E501
:type: str
"""
self._name = name
@property
def uploaded_by_user_profile_rid(self):
"""Gets the uploaded_by_user_profile_rid of this FileMetadata. # noqa: E501
:return: The uploaded_by_user_profile_rid of this FileMetadata. # noqa: E501
:rtype: str
"""
return self._uploaded_by_user_profile_rid
@uploaded_by_user_profile_rid.setter
def uploaded_by_user_profile_rid(self, uploaded_by_user_profile_rid):
"""Sets the uploaded_by_user_profile_rid of this FileMetadata.
:param uploaded_by_user_profile_rid: The uploaded_by_user_profile_rid of this FileMetadata. # noqa: E501
:type: str
"""
self._uploaded_by_user_profile_rid = uploaded_by_user_profile_rid
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, FileMetadata):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"akhanna@princeton.edu"
] | akhanna@princeton.edu |
5ca8a48f56293d94fcefe0d3de937b11003af9e6 | 2bc677218d0c13fe0551510b64782f2f001def17 | /tests/unit/testing_utils/test_biased.py | 9231acb98578e0a41978723ab09cfb6f9ccc7645 | [
"MIT"
] | permissive | drizm-team/python-commons | dc6a558c682315f12b7175d9395070c6ffcd3661 | 55e14754222cc1aa8a6c9137f75d529158864fee | refs/heads/master | 2023-02-10T13:59:51.338932 | 2021-01-01T22:58:36 | 2021-01-01T22:58:36 | 306,466,336 | 0 | 0 | MIT | 2021-01-01T19:45:43 | 2020-10-22T21:49:57 | Python | UTF-8 | Python | false | false | 1,106 | py | from drizm_commons.testing import self_to_id
def test__self_to_id():
"""
GIVEN I have a JSONified body
AND that body matches the Drizm HATEOAS format
WHEN I extract the identifier from a valid URI
THEN I should be get back a valid identifier
"""
test_key = 1
test_body = {
"self": {
"href": f"http://example.net/resources/{test_key}/"
}
}
found_key = self_to_id(test_body)
assert found_key == test_key
assert type(found_key) == int
test_key = "rgftbiuftbiubtiu"
test_body = lambda k: {
"self": {
"href": f"https://www.bsdomain.com/resources/okay/{k}"
"?state=1&mode=test"
}
}
found_key = self_to_id(test_body(test_key))
assert found_key == test_key
assert type(found_key) == str
test_key = 30
found_key = self_to_id(test_body(test_key))
assert found_key == test_key
assert type(found_key) == int
found_key = self_to_id(test_body(test_key), force_str=True)
assert found_key == str(test_key)
assert type(found_key) == str
| [
"kochbe.ber@gmail.com"
] | kochbe.ber@gmail.com |
0a0ffcba409b7f0645fe4fcadbf974bf793f1e0f | ba62f1d4c2d4209cbbe12bbf94ac2b44e56646eb | /loaders/liverct.py | 0dc37a9fc547faceed36efe4e0cf2f5b37297078 | [] | no_license | falconjhc/APDNet-SourceCodes | 358f4387254aa30b04277910b67a31ae050dd1ce | d612177dd462910019f31f32f2ec81aa046a602c | refs/heads/master | 2022-12-05T01:28:49.500452 | 2020-08-27T10:11:11 | 2020-08-27T10:11:11 | 290,631,642 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24,643 | py |
import os
import scipy.io as sio
import nibabel as nib
import numpy as np
from skimage import transform
from PIL import Image
import utils.data_utils
from loaders.base_loader import Loader
from loaders.data import Data
from parameters import conf
import logging
from utils.image_utils import image_show
import cv2
class LiverCtLoader(Loader):
def __init__(self):
super(LiverCtLoader, self).__init__()
self.num_anato_masks = 1
self.num_patho_masks = 1
self.num_volumes = 75
self.input_shape = (256, 256, 1)
self.data_folder = conf['liverct']
self.log = logging.getLogger('liverct')
def sub_mask_generation(self, whole_mask, org_sub):
mask_num = len(whole_mask)
output_sub = []
for ii in range(mask_num):
current_whole = whole_mask[ii]
current_sub = org_sub[ii]
corrected_sub = current_whole - current_sub
corrected_sub[np.where(corrected_sub==-1)]=0
output_sub.append(corrected_sub)
return output_sub
def splits(self):
"""
:return: an array of splits into validation, test and train indices
"""
# valid_volume = [vol for vol in os.listdir(self.data_folder)
# if (not vol[0]=='.'
# and os.path.isdir(os.path.join(self.data_folder,
# os.path.join(vol,'LGE'))))]
# total_vol_num = len(valid_volume)
# split_train_num_0 = 80
# train_num_0 = np.float(split_train_num_0) / 100.0 * total_vol_num
splits = [
# {'validation': list(range(115,131)), # --> test on p11
# 'test': list(range(101,115)),
# 'training': list(range(1,101))
# },
{'validation': [4,73,23,8,67], # --> test on p11
'test': [22,129,53,15,2],
'training': [12,66,122,115,125,37,35,59,18,106,5,84,29,128,13,40,52,79,108,21]
},
{'validation': [66, 37, 106, 128, 79], # --> test on p11
'test': [115, 59, 84, 40, 21],
'training': [12, 122, 125, 35, 18, 5, 29, 13, 52, 108, 4, 73, 23, 8, 67, 22, 129, 53, 15, 2]
}
]
return splits
def load_labelled_data(self, split, split_type, modality='LGE',
normalise=True, value_crop=True, downsample=1, segmentation_option=-1):
"""
Load labelled data, and return a Data object. In ACDC there are ES and ED annotations. Preprocessed data
are saved in .npz files. If they don't exist, load the original images and preprocess.
:param split: Cross validation split: can be 0, 1, 2.
:param split_type: Cross validation type: can be training, validation, test, all
:param modality: Data modality. Unused here.
:param normalise: Use normalised data: can be True/False
:param value_crop: Crop extreme values: can be True/False
:param downsample: Downsample data to smaller size. Only used for testing.
:return: a Data object
"""
# if segmentation_option == 0:
# input("Segmentation 0")
if split < 0 or split > 4:
raise ValueError('Invalid value for split: %d. Allowed values are 0, 1, 2.' % split)
if split_type not in ['training', 'validation', 'test', 'all']:
raise ValueError('Invalid value for split_type: %s. Allowed values are training, validation, test, all'
% split_type)
npz_prefix = 'norm_' if normalise else 'unnorm_'
def _only_get_pahtology_data():
data_num = masks_tumour.shape[0]
new_images, new_anato_masks, new_patho_masks,new_index, news_slice = [],[],[],[],[]
for ii in range(data_num):
if np.sum(patho_masks[ii,:,:,:])==0:
continue
new_images.append(np.expand_dims(images[ii,:,:,:],axis=0))
new_anato_masks.append(np.expand_dims(anato_masks[ii,:,:,:],axis=0))
new_patho_masks.append(np.expand_dims(patho_masks[ii,:,:,:], axis=0))
new_index.append(index[ii])
news_slice.append(slice[ii])
new_images = np.concatenate(new_images)
new_anato_masks = np.concatenate(new_anato_masks)
new_patho_masks = np.concatenate(new_patho_masks)
new_index = np.concatenate(np.expand_dims(new_index,axis=0))
news_slice = np.concatenate(np.expand_dims(news_slice,axis=0))
return new_images, new_anato_masks, new_patho_masks,new_index,news_slice
# If numpy arrays are not saved, load and process raw data
if not os.path.exists(os.path.join(self.data_folder, npz_prefix + 'liverct_image.npz')):
if modality == 'LGE':
value_crop = False
images, masks_liver, masks_tumour, patient_index,index, slice = \
self.load_raw_labelled_data(normalise, value_crop)
# save numpy arrays
np.savez_compressed(os.path.join(self.data_folder, npz_prefix + 'liverct_image'), images)
np.savez_compressed(os.path.join(self.data_folder, npz_prefix + 'liverct_liver_mask'), masks_liver)
np.savez_compressed(os.path.join(self.data_folder, npz_prefix + 'liverct_tumour_mask'), masks_tumour)
np.savez_compressed(os.path.join(self.data_folder, npz_prefix + 'liverct_patienet_index'), patient_index)
np.savez_compressed(os.path.join(self.data_folder, npz_prefix + 'liverct_index'),index)
np.savez_compressed(os.path.join(self.data_folder, npz_prefix + 'liverct_slice'), slice)
# Load data from saved numpy arrays
else:
images = np.load(os.path.join(self.data_folder, npz_prefix + 'liverct_image.npz'))['arr_0']
masks_liver = np.load(os.path.join(self.data_folder, npz_prefix + 'liverct_liver_mask.npz'))['arr_0']
masks_tumour = np.load(os.path.join(self.data_folder, npz_prefix + 'liverct_tumour_mask.npz'))['arr_0']
patient_index = np.load(os.path.join(self.data_folder, npz_prefix + 'liverct_index.npz'))['arr_0']
index = np.load(os.path.join(self.data_folder, npz_prefix + 'liverct_index.npz'))['arr_0']
slice = np.load(os.path.join(self.data_folder, npz_prefix + 'liverct_slice.npz'))['arr_0']
assert images is not None and masks_liver is not None and masks_tumour is not None \
and index is not None, 'Could not find saved data'
assert images.max() == 1 and images.min() == -1, \
'Images max=%.3f, min=%.3f' % (images.max(), images.min())
self.log.debug('Loaded compressed liverct data of shape: ' + str(images.shape) + ' ' + str(index.shape))
anato_masks = masks_liver
patho_masks = masks_tumour
anato_mask_names = ['liver']
patho_mask_names = ['tumour']
images, anato_masks, patho_masks, index, slice = _only_get_pahtology_data()
assert anato_masks.max() == 1 and anato_masks.min() == 0, 'Anatomy Masks max=%.3f, min=%.3f' \
% (anato_masks.max(), anato_masks.min())
assert patho_masks.max() == 1 and patho_masks.min() == 0, 'Pathology Masks max=%.3f, min=%.3f' \
% (anato_masks.max(), anato_masks.min())
scanner = np.array([modality] * index.shape[0])
# Select images belonging to the volumes of the split_type (training, validation, test)
volumes = self.splits()[split][split_type]
images = np.concatenate([images[index == v] for v in volumes])
anato_masks = np.concatenate([anato_masks[index == v] for v in volumes])
patho_masks = np.concatenate([patho_masks[index == v] for v in volumes])
assert images.shape[0] == anato_masks.shape[0] == patho_masks.shape[0], "Num of Images inconsistent"
# create a volume index
slice = np.concatenate([slice[index == v] for v in volumes])
index = np.concatenate([index[index == v] for v in volumes])
scanner = np.array([modality] * index.shape[0])
assert images.shape[0] == index.shape[0]
self.log.debug(split_type + ' set: ' + str(images.shape))
return Data(images, [anato_masks, patho_masks], [anato_mask_names, patho_mask_names], index, slice, scanner, downsample)
def load_unlabelled_data(self, split, split_type, modality='LGE', normalise=True, value_crop=True):
"""
Load unlabelled data. In ACDC, this contains images from the cardiac phases between ES and ED.
:param split: Cross validation split: can be 0, 1, 2.
:param split_type: Cross validation type: can be training, validation, test, all
:param modality: Data modality. Unused here.
:param normalise: Use normalised data: can be True/False
:param value_crop: Crop extreme values: can be True/False
:return: a Data object
"""
images, index, slice = self.load_unlabelled_images('liverct', split, split_type, False, normalise, value_crop,modality=modality)
masks = np.zeros(shape=(images.shape[:-1]) + (1,))
scanner = np.array([modality] * index.shape[0])
return Data(images, masks, '-1', index, slice, scanner)
def load_all_data(self, split, split_type, modality='MR', normalise=True, value_crop=True, segmentation_option='-1'):
"""
Load all images, unlabelled and labelled, meaning all images from all cardiac phases.
:param split: Cross validation split: can be 0, 1, 2.
:param split_type: Cross validation type: can be training, validation, test, all
:param modality: Data modality. Unused here.
:param normalise: Use normalised data: can be True/False
:param value_crop: Crop extreme values: can be True/False
:return: a Data object
"""
images, index, slice = self.load_unlabelled_images('liverct', split, split_type, True, normalise, value_crop,modality=modality)
masks = np.zeros(shape=(images.shape[:-1]) + (1,))
scanner = np.array([modality] * index.shape[0])
return Data(images, masks, '-1', index, slice, scanner)
def load_raw_labelled_data(self, normalise=True, value_crop=True):
"""
Load labelled data iterating through the ACDC folder structure.
:param normalise: normalise data between -1, 1
:param value_crop: crop between 5 and 95 percentile
:return: a tuple of the image and mask arrays
"""
self.log.debug('Loading liver-ct data from original location')
images, masks_liver, masks_tumour, patient_index, index, slice = [], [], [], [], [], []
existed_directories = [vol for vol in os.listdir(self.data_folder)
if (not vol.startswith('.')) and os.path.isdir(os.path.join(self.data_folder, vol))]
existed_directories.sort()
# assert len(existed_directories) == len(self.volumes), 'Incorrect Volume Num !'
self.volumes = np.unique(self.volumes)
self.volumes.sort()
for patient_counter, patient_i in enumerate(self.volumes):
patient_image, patient_liver, patient_tumour = [], [], []
# if not os.path.isdir(os.path.join(self.data_folder,existed_directories[patient_i-1])):
# continue
patient = existed_directories[patient_i-1]
print('Extracting Labeled Patient: %s @ %d / %d' % (patient, patient_counter+1, len(self.volumes)))
patient_folder = os.path.join(self.data_folder,patient)
img_file_list = [file for file in os.listdir(patient_folder)
if (not file.startswith('.')) and (not file.find('tr')==-1)]
liver_file_list = [file for file in os.listdir(patient_folder)
if (not file.startswith('.')) and (not file.find('livermask') == -1)]
tumour_file_list = [file for file in os.listdir(patient_folder)
if (not file.startswith('.')) and (not file.find('tumourmask') == -1)]
img_file_list.sort()
liver_file_list.sort()
tumour_file_list.sort()
slices_num = len(img_file_list)
# for patient index (patient names)
for ii in range(slices_num):
patient_index.append(patient)
index.append(patient_i)
volume_num = len(img_file_list)
for v in range(volume_num):
current_img_name = img_file_list[v]
current_liver_name = liver_file_list[v]
current_tumour_name = tumour_file_list[v]
v_id_from_img = current_img_name.split('_')[1]
v_id_from_liver = current_liver_name.split('_')[1]
v_id_from_tumour = current_tumour_name.split('_')[1]
assert v_id_from_img==v_id_from_liver==v_id_from_tumour, 'Mis-Alignment !'
slice.append(v_id_from_img[5:])
# for original images
for org_img_path in img_file_list:
im = np.array(Image.open(os.path.join(patient_folder,org_img_path)))
# im = im / np.max(im - np.min(im))
# im = im[:,:,0]
patient_image.append(np.expand_dims(im,axis=-1))
patient_image = np.concatenate(patient_image, axis=-1)
# crop to 5-95 percentile
if value_crop:
p5 = np.percentile(patient_image.flatten(), 5)
p95 = np.percentile(patient_image.flatten(), 95)
patient_image = np.clip(patient_image, p5, p95)
# normalise to -1, 1
if normalise:
patient_image = utils.data_utils.normalise(patient_image, -1, 1)
images.append(np.expand_dims(patient_image,axis=-1))
for liver_seg_path in liver_file_list:
liver = np.array(Image.open(os.path.join(patient_folder,liver_seg_path)))
if not (len(np.unique(liver)) == 1 and np.unique(liver)[0] == 0):
liver = liver / np.max(liver)
patient_liver.append(np.expand_dims(liver, axis=-1))
patient_liver = np.concatenate(patient_liver,axis=-1)
masks_liver.append(np.expand_dims(patient_liver,axis=-1))
for tumour_seg_path in tumour_file_list:
tumour = np.array(Image.open(os.path.join(patient_folder,tumour_seg_path)))
if not (len(np.unique(tumour)) == 1 and np.unique(tumour)[0] == 0):
tumour = tumour / np.max(tumour)
patient_tumour.append(np.expand_dims(tumour, axis=-1))
patient_tumour = np.concatenate(patient_tumour,axis=-1)
masks_tumour.append(np.expand_dims(patient_tumour, axis=-1))
# move slice axis to the first position
images = [np.moveaxis(im, 2, 0) for im in images]
masks_liver = [np.moveaxis(m, 2, 0) for m in masks_liver]
masks_tumour = [np.moveaxis(m, 2, 0) for m in masks_tumour]
# crop images and masks to the same pixel dimensions and concatenate all data
images_cropped, masks_liver_cropped = utils.data_utils.crop_same(images, masks_liver,
(self.input_shape[0], self.input_shape[1]))
_, masks_tumour_cropped = utils.data_utils.crop_same(images, masks_tumour,
(self.input_shape[0], self.input_shape[1]))
images_cropped = np.concatenate(images_cropped, axis=0)
masks_tumour_cropped = np.concatenate(masks_tumour_cropped, axis=0)
masks_liver_cropped = np.concatenate(masks_liver_cropped, axis=0)
patient_index = np.array(patient_index)
index = np.array(index)
slice = np.array(slice)
return images_cropped, masks_liver_cropped, masks_tumour_cropped, patient_index, index, slice
def resample_raw_image(self, mask_fname, patient_folder, binary=True):
"""
Load raw data (image/mask) and resample to fixed resolution.
:param mask_fname: filename of mask
:param patient_folder: folder containing patient data
:param binary: boolean to define binary masks or not
:return: the resampled image
"""
m_nii_fname = os.path.join(patient_folder, mask_fname)
new_res = (1.37, 1.37)
print('Resampling %s at resolution %s to file %s' % (m_nii_fname, str(new_res), new_res))
im_nii = nib.load(m_nii_fname)
im_data = im_nii.get_data()
voxel_size = im_nii.header.get_zooms()
scale_vector = [voxel_size[i] / new_res[i] for i in range(len(new_res))]
order = 0 if binary else 1
result = []
for i in range(im_data.shape[-1]):
im = im_data[..., i]
rescaled = transform.rescale(im, scale_vector, order=order, preserve_range=True, mode='constant')
result.append(np.expand_dims(rescaled, axis=-1))
return np.concatenate(result, axis=-1)
def process_raw_image(self, im_fname, patient_folder, value_crop, normalise):
"""
Rescale between -1 and 1 and crop extreme values of an image
:param im_fname: filename of the image
:param patient_folder: folder of patient data
:param value_crop: True/False to crop values between 5/95 percentiles
:param normalise: True/False normalise images
:return: a processed image
"""
im = self.resample_raw_image(im_fname, patient_folder, binary=False)
# crop to 5-95 percentile
if value_crop:
p5 = np.percentile(im.flatten(), 5)
p95 = np.percentile(im.flatten(), 95)
im = np.clip(im, p5, p95)
# normalise to -1, 1
if normalise:
im = utils.data_utils.normalise(im, -1, 1)
return im
def load_raw_unlabelled_data(self, include_labelled=True, normalise=True, value_crop=True, modality='LGE'):
"""
Load unlabelled data iterating through the ACDC folder structure.
:param include_labelled: include images from ES, ED phases that are labelled. Can be True/False
:param normalise: normalise data between -1, 1
:param value_crop: crop between 5 and 95 percentile
:return: an image array
"""
self.log.debug('Loading unlabelled liverct data from original location')
images, patient_index, index, slice = [], [], [], []
existed_directories = [vol for vol in os.listdir(self.data_folder)
if (not vol.startswith('.')) and os.path.isdir(os.path.join(self.data_folder,vol))]
existed_directories.sort()
# assert len(existed_directories) == len(self.volumes), 'Incorrect Volume Num !'
self.volumes = np.unique(self.volumes)
self.volumes.sort()
for patient_counter, patient_i in enumerate(self.volumes):
patient_images = []
# if not os.path.isdir(os.path.join(self.data_folder,existed_directories[patient_i-1])):
# continue
patient = existed_directories[patient_i-1]
print('Extracting UnLabeled Patient: %s @ %d / %d' % (patient, patient_counter+1, len(self.volumes)))
patient_folder = os.path.join(self.data_folder, patient)
img_file_list = [file for file in os.listdir(patient_folder)
if (not file.startswith('.')) and (not file.find('tr') == -1)]
img_file_list.sort()
slices_num = len(img_file_list)
for v in range(slices_num):
current_img_name = img_file_list[v]
v_id_from_img = current_img_name.split('_')[1]
slice.append(v_id_from_img[5:])
# for patient index (patient names)
for ii in range(slices_num):
patient_index.append(patient)
index.append(patient_i)
# for original images
for org_img_path in img_file_list:
im = np.array(Image.open(os.path.join(patient_folder, org_img_path)))
# im = im / np.max(im - np.min(im))
# im = im[:, :, 0]
patient_images.append(np.expand_dims(im, axis=-1))
patient_images = np.concatenate(patient_images, axis=-1)
# crop to 5-95 percentile
if value_crop:
p5 = np.percentile(patient_images.flatten(), 5)
p95 = np.percentile(patient_images.flatten(), 95)
patient_images = np.clip(patient_images, p5, p95)
# normalise to -1, 1
if normalise:
patient_images = utils.data_utils.normalise(patient_images, -1, 1)
images.append(np.expand_dims(patient_images, axis=-1))
images = [np.moveaxis(im, 2, 0) for im in images]
zeros = [np.zeros(im.shape) for im in images]
images_cropped, _ = utils.data_utils.crop_same(images, zeros,
(self.input_shape[0], self.input_shape[1]))
images_cropped = np.concatenate(images_cropped, axis=0)[..., 0]
index = np.array(index)
slice = np.array(slice)
return images_cropped, patient_index, index, slice
def load_unlabelled_images(self, dataset, split, split_type, include_labelled, normalise, value_crop, modality):
"""
Load only images.
:param dataset:
:param split:
:param split_type:
:param include_labelled:
:param normalise:
:param value_crop:
:return:
"""
npz_prefix_type = 'ul_' if not include_labelled else 'all_'
npz_prefix = npz_prefix_type + 'norm_' if normalise else npz_prefix_type + 'unnorm_'
# Load saved numpy array
if os.path.exists(os.path.join(self.data_folder, npz_prefix + 'liverct_image.npz')):
images = \
np.load(os.path.join(self.data_folder,
npz_prefix + 'liverct_image.npz'))['arr_0']
index = \
np.load(os.path.join(self.data_folder,
npz_prefix + 'liverct_index.npz'))['arr_0']
patient_index = \
np.load(os.path.join(self.data_folder,
npz_prefix + 'liverct_patient_index.npz'))['arr_0']
slice = \
np.load(os.path.join(self.data_folder,
npz_prefix + 'liverct_patient_slice.npz'))['arr_0']
self.log.debug('Loaded compressed ' + dataset + ' unlabelled data of shape ' + str(images.shape))
# Load from source
else:
if modality == 'LGE':
value_crop = False
images, patient_index, index, slice = \
self.load_raw_unlabelled_data(include_labelled, normalise, value_crop, modality=modality)
images = np.expand_dims(images, axis=3)
np.savez_compressed(os.path.join(self.data_folder,
npz_prefix + 'liverct_image'), images)
np.savez_compressed(os.path.join(self.data_folder,
npz_prefix + 'liverct_index'), index)
np.savez_compressed(os.path.join(self.data_folder,
npz_prefix + 'liverct_patient_index'), patient_index)
np.savez_compressed(os.path.join(self.data_folder,
npz_prefix + 'liverct_patient_slice'), slice)
assert split_type in ['training', 'validation', 'test', 'all'], 'Unknown split_type: ' + split_type
if split_type == 'all':
return images, index
volumes = self.splits()[split][split_type]
images = np.concatenate([images[index == v] for v in volumes])
slice = np.concatenate([slice[index == v] for v in volumes])
index = np.concatenate([index[index==v] for v in volumes])
return images, index, slice | [
"falconjhc@gmail.com"
] | falconjhc@gmail.com |
11f906061793106fec7baf4e559968a3faea303a | 6a2b0db7d6c4ecef8434f3b35fcaef71eeb0d896 | /VENV/py3_venv/lib/python3.6/site-packages/pydocstyle/checker.py | 019da0d6283a1c3de36aa7ed4c4dae6c18e83fd3 | [] | no_license | pseudonode/nornircourse | 9bf890ecfadd1a08691f113e0cd2acadd4b9bffa | 1ad0372f9673de784233937cc15779bc2391e267 | refs/heads/master | 2022-11-09T20:18:22.714703 | 2019-10-04T08:06:42 | 2019-10-04T08:06:42 | 211,856,983 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 37,755 | py | """Parsed source code checkers for docstring violations."""
import ast
import string
import sys
import tokenize as tk
from itertools import takewhile, chain
from re import compile as re
from collections import namedtuple
from . import violations
from .config import IllegalConfiguration
from .parser import (Package, Module, Class, NestedClass, Definition, AllError,
Method, Function, NestedFunction, Parser, StringIO,
ParseError)
from .utils import log, is_blank, pairwise
from .wordlists import IMPERATIVE_VERBS, IMPERATIVE_BLACKLIST, stem
__all__ = ('check', )
def check_for(kind, terminal=False):
def decorator(f):
f._check_for = kind
f._terminal = terminal
return f
return decorator
class ConventionChecker:
"""Checker for PEP 257, NumPy and Google conventions.
D10x: Missing docstrings
D20x: Whitespace issues
D30x: Docstring formatting
D40x: Docstring content issues
"""
NUMPY_SECTION_NAMES = (
'Short Summary',
'Extended Summary',
'Parameters',
'Returns',
'Yields',
'Other Parameters',
'Raises',
'See Also',
'Notes',
'References',
'Examples',
'Attributes',
'Methods'
)
GOOGLE_SECTION_NAMES = (
'Args',
'Arguments',
'Attention',
'Attributes',
'Caution',
'Danger',
'Error',
'Example',
'Examples',
'Hint',
'Important',
'Keyword Args',
'Keyword Arguments',
'Methods',
'Note',
'Notes',
'Other Parameters',
'Parameters',
'Return',
'Returns',
'Raises',
'References',
'See Also',
'Tip',
'Todo',
'Warning',
'Warnings',
'Warns',
'Yield',
'Yields',
)
# Examples that will be matched -
# " random: Test" where random will be captured as the param
# " random : test" where random will be captured as the param
# " random_t (Test) : test " where random_t will be captured as the param
GOOGLE_ARGS_REGEX = re(
# Matches anything that fulfills all the following conditions:
r"^\s*" # Begins with 0 or more whitespace characters
r"(\w+)" # Followed by 1 or more unicode chars, numbers or underscores
# The above is captured as the first group as this is the paramater name.
r"\s*" # Followed by 0 or more whitespace characters
r"\(?(.*?)\)?" # Matches patterns contained within round brackets.
# The `(.*?)` is the second capturing group which matches any sequence of
# characters in a non-greedy way (denoted by the `*?`)
r"\s*" # Followed by 0 or more whitespace chars
r":" # Followed by a colon
".+" # Followed by 1 or more characters - which is the docstring for the parameter
)
def check_source(self, source, filename, ignore_decorators=None):
module = parse(StringIO(source), filename)
for definition in module:
for this_check in self.checks:
terminate = False
if isinstance(definition, this_check._check_for):
skipping_all = (definition.skipped_error_codes == 'all')
decorator_skip = ignore_decorators is not None and any(
len(ignore_decorators.findall(dec.name)) > 0
for dec in definition.decorators)
if not skipping_all and not decorator_skip:
error = this_check(self, definition,
definition.docstring)
else:
error = None
errors = error if hasattr(error, '__iter__') else [error]
for error in errors:
if error is not None and error.code not in \
definition.skipped_error_codes:
partition = this_check.__doc__.partition('.\n')
message, _, explanation = partition
error.set_context(explanation=explanation,
definition=definition)
yield error
if this_check._terminal:
terminate = True
break
if terminate:
break
@property
def checks(self):
all = [this_check for this_check in vars(type(self)).values()
if hasattr(this_check, '_check_for')]
return sorted(all, key=lambda this_check: not this_check._terminal)
@check_for(Definition, terminal=True)
def check_docstring_missing(self, definition, docstring):
"""D10{0,1,2,3}: Public definitions should have docstrings.
All modules should normally have docstrings. [...] all functions and
classes exported by a module should also have docstrings. Public
methods (including the __init__ constructor) should also have
docstrings.
Note: Public (exported) definitions are either those with names listed
in __all__ variable (if present), or those that do not start
with a single underscore.
"""
if (not docstring and definition.is_public or
docstring and is_blank(ast.literal_eval(docstring))):
codes = {Module: violations.D100,
Class: violations.D101,
NestedClass: violations.D106,
Method: (lambda: violations.D105() if definition.is_magic
else (violations.D107() if definition.is_init
else violations.D102())),
Function: violations.D103,
NestedFunction: violations.D103,
Package: violations.D104}
return codes[type(definition)]()
@check_for(Definition)
def check_one_liners(self, definition, docstring):
"""D200: One-liner docstrings should fit on one line with quotes.
The closing quotes are on the same line as the opening quotes.
This looks better for one-liners.
"""
if docstring:
lines = ast.literal_eval(docstring).split('\n')
if len(lines) > 1:
non_empty_lines = sum(1 for l in lines if not is_blank(l))
if non_empty_lines == 1:
return violations.D200(len(lines))
@check_for(Function)
def check_no_blank_before(self, function, docstring): # def
"""D20{1,2}: No blank lines allowed around function/method docstring.
There's no blank line either before or after the docstring.
"""
if docstring:
before, _, after = function.source.partition(docstring)
blanks_before = list(map(is_blank, before.split('\n')[:-1]))
blanks_after = list(map(is_blank, after.split('\n')[1:]))
blanks_before_count = sum(takewhile(bool, reversed(blanks_before)))
blanks_after_count = sum(takewhile(bool, blanks_after))
if blanks_before_count != 0:
yield violations.D201(blanks_before_count)
if not all(blanks_after) and blanks_after_count != 0:
yield violations.D202(blanks_after_count)
@check_for(Class)
def check_blank_before_after_class(self, class_, docstring):
"""D20{3,4}: Class docstring should have 1 blank line around them.
Insert a blank line before and after all docstrings (one-line or
multi-line) that document a class -- generally speaking, the class's
methods are separated from each other by a single blank line, and the
docstring needs to be offset from the first method by a blank line;
for symmetry, put a blank line between the class header and the
docstring.
"""
# NOTE: this gives false-positive in this case
# class Foo:
#
# """Docstring."""
#
#
# # comment here
# def foo(): pass
if docstring:
before, _, after = class_.source.partition(docstring)
blanks_before = list(map(is_blank, before.split('\n')[:-1]))
blanks_after = list(map(is_blank, after.split('\n')[1:]))
blanks_before_count = sum(takewhile(bool, reversed(blanks_before)))
blanks_after_count = sum(takewhile(bool, blanks_after))
if blanks_before_count != 0:
yield violations.D211(blanks_before_count)
if blanks_before_count != 1:
yield violations.D203(blanks_before_count)
if not all(blanks_after) and blanks_after_count != 1:
yield violations.D204(blanks_after_count)
@check_for(Definition)
def check_blank_after_summary(self, definition, docstring):
"""D205: Put one blank line between summary line and description.
Multi-line docstrings consist of a summary line just like a one-line
docstring, followed by a blank line, followed by a more elaborate
description. The summary line may be used by automatic indexing tools;
it is important that it fits on one line and is separated from the
rest of the docstring by a blank line.
"""
if docstring:
lines = ast.literal_eval(docstring).strip().split('\n')
if len(lines) > 1:
post_summary_blanks = list(map(is_blank, lines[1:]))
blanks_count = sum(takewhile(bool, post_summary_blanks))
if blanks_count != 1:
return violations.D205(blanks_count)
@staticmethod
def _get_docstring_indent(definition, docstring):
"""Return the indentation of the docstring's opening quotes."""
before_docstring, _, _ = definition.source.partition(docstring)
_, _, indent = before_docstring.rpartition('\n')
return indent
@check_for(Definition)
def check_indent(self, definition, docstring):
"""D20{6,7,8}: The entire docstring should be indented same as code.
The entire docstring is indented the same as the quotes at its
first line.
"""
if docstring:
indent = self._get_docstring_indent(definition, docstring)
lines = docstring.split('\n')
if len(lines) > 1:
lines = lines[1:] # First line does not need indent.
indents = [leading_space(l) for l in lines if not is_blank(l)]
if set(' \t') == set(''.join(indents) + indent):
yield violations.D206()
if (len(indents) > 1 and min(indents[:-1]) > indent or
indents[-1] > indent):
yield violations.D208()
if min(indents) < indent:
yield violations.D207()
@check_for(Definition)
def check_newline_after_last_paragraph(self, definition, docstring):
"""D209: Put multi-line docstring closing quotes on separate line.
Unless the entire docstring fits on a line, place the closing
quotes on a line by themselves.
"""
if docstring:
lines = [l for l in ast.literal_eval(docstring).split('\n')
if not is_blank(l)]
if len(lines) > 1:
if docstring.split("\n")[-1].strip() not in ['"""', "'''"]:
return violations.D209()
@check_for(Definition)
def check_surrounding_whitespaces(self, definition, docstring):
"""D210: No whitespaces allowed surrounding docstring text."""
if docstring:
lines = ast.literal_eval(docstring).split('\n')
if lines[0].startswith(' ') or \
len(lines) == 1 and lines[0].endswith(' '):
return violations.D210()
@check_for(Definition)
def check_multi_line_summary_start(self, definition, docstring):
"""D21{2,3}: Multi-line docstring summary style check.
A multi-line docstring summary should start either at the first,
or separately at the second line of a docstring.
"""
if docstring:
start_triple = [
'"""', "'''",
'u"""', "u'''",
'r"""', "r'''",
'ur"""', "ur'''"
]
lines = ast.literal_eval(docstring).split('\n')
if len(lines) > 1:
first = docstring.split("\n")[0].strip().lower()
if first in start_triple:
return violations.D212()
else:
return violations.D213()
@check_for(Definition)
def check_triple_double_quotes(self, definition, docstring):
r'''D300: Use """triple double quotes""".
For consistency, always use """triple double quotes""" around
docstrings. Use r"""raw triple double quotes""" if you use any
backslashes in your docstrings. For Unicode docstrings, use
u"""Unicode triple-quoted strings""".
Note: Exception to this is made if the docstring contains
""" quotes in its body.
'''
if docstring:
if '"""' in ast.literal_eval(docstring):
# Allow ''' quotes if docstring contains """, because
# otherwise """ quotes could not be expressed inside
# docstring. Not in PEP 257.
regex = re(r"[uU]?[rR]?'''[^'].*")
else:
regex = re(r'[uU]?[rR]?"""[^"].*')
if not regex.match(docstring):
illegal_matcher = re(r"""[uU]?[rR]?("+|'+).*""")
illegal_quotes = illegal_matcher.match(docstring).group(1)
return violations.D300(illegal_quotes)
@check_for(Definition)
def check_backslashes(self, definition, docstring):
r'''D301: Use r""" if any backslashes in a docstring.
Use r"""raw triple double quotes""" if you use any backslashes
(\) in your docstrings.
'''
# Just check that docstring is raw, check_triple_double_quotes
# ensures the correct quotes.
if docstring and '\\' in docstring and not docstring.startswith(
('r', 'ur')):
return violations.D301()
@check_for(Definition)
def check_unicode_docstring(self, definition, docstring):
r'''D302: Use u""" for docstrings with Unicode.
For Unicode docstrings, use u"""Unicode triple-quoted strings""".
'''
if 'unicode_literals' in definition.module.future_imports:
return
# Just check that docstring is unicode, check_triple_double_quotes
# ensures the correct quotes.
if docstring and sys.version_info[0] <= 2:
if not is_ascii(docstring) and not docstring.startswith(
('u', 'ur')):
return violations.D302()
@staticmethod
def _check_ends_with(docstring, chars, violation):
"""First line ends with one of `chars`.
First line of the docstring should end with one of the characters in `chars`.
`chars` supports either a `str` or an `Iterable[str]`. If the condition is
evaluated to be false, it raises `violation`.
"""
if docstring:
summary_line = ast.literal_eval(docstring).strip().split('\n')[0]
if not summary_line.endswith(chars):
return violation(summary_line[-1])
@check_for(Definition)
def check_ends_with_period(self, definition, docstring):
"""D400: First line should end with a period.
The [first line of a] docstring is a phrase ending in a period.
"""
return self._check_ends_with(docstring, '.', violations.D400)
@check_for(Definition)
def check_ends_with_punctuation(self, definition, docstring):
"""D415: should end with proper punctuation.
The [first line of a] docstring is a phrase ending in a period,
question mark, or exclamation point
"""
return self._check_ends_with(docstring, ('.', '!', '?'), violations.D415)
@check_for(Function)
def check_imperative_mood(self, function, docstring): # def context
"""D401: First line should be in imperative mood: 'Do', not 'Does'.
[Docstring] prescribes the function or method's effect as a command:
("Do this", "Return that"), not as a description; e.g. don't write
"Returns the pathname ...".
"""
if docstring and not function.is_test:
stripped = ast.literal_eval(docstring).strip()
if stripped:
first_word = stripped.split()[0]
check_word = first_word.lower()
if check_word in IMPERATIVE_BLACKLIST:
return violations.D401b(first_word)
try:
correct_form = IMPERATIVE_VERBS.get(stem(check_word))
except UnicodeDecodeError:
# This is raised when the docstring contains unicode
# characters in the first word, but is not a unicode
# string. In which case D302 will be reported. Ignoring.
return
if correct_form and correct_form != check_word:
return violations.D401(
correct_form.capitalize(),
first_word
)
@check_for(Function)
def check_no_signature(self, function, docstring): # def context
"""D402: First line should not be function's or method's "signature".
The one-line docstring should NOT be a "signature" reiterating the
function/method parameters (which can be obtained by introspection).
"""
if docstring:
first_line = ast.literal_eval(docstring).strip().split('\n')[0]
if function.name + '(' in first_line.replace(' ', ''):
return violations.D402()
@check_for(Function)
def check_capitalized(self, function, docstring):
"""D403: First word of the first line should be properly capitalized.
The [first line of a] docstring is a phrase ending in a period.
"""
if docstring:
first_word = ast.literal_eval(docstring).split()[0]
if first_word == first_word.upper():
return
for char in first_word:
if char not in string.ascii_letters and char != "'":
return
if first_word != first_word.capitalize():
return violations.D403(first_word.capitalize(), first_word)
@check_for(Definition)
def check_starts_with_this(self, function, docstring):
"""D404: First word of the docstring should not be `This`.
Docstrings should use short, simple language. They should not begin
with "This class is [..]" or "This module contains [..]".
"""
if docstring:
first_word = ast.literal_eval(docstring).split()[0]
if first_word.lower() == 'this':
return violations.D404()
@staticmethod
def _is_docstring_section(context):
"""Check if the suspected context is really a section header.
Lets have a look at the following example docstring:
'''Title.
Some part of the docstring that specifies what the function
returns. <----- Not a real section name. It has a suffix and the
previous line is not empty and does not end with
a punctuation sign.
This is another line in the docstring. It describes stuff,
but we forgot to add a blank line between it and the section name.
Parameters <-- A real section name. The previous line ends with
---------- a period, therefore it is in a new
grammatical context.
param : int
examples : list <------- Not a section - previous line doesn't end
A list of examples. with punctuation.
notes : list <---------- Not a section - there's text after the
A list of notes. colon.
Notes: <--- Suspected as a context because there's a suffix to the
----- section, but it's a colon so it's probably a mistake.
Bla.
'''
To make sure this is really a section we check these conditions:
* There's no suffix to the section name or it's just a colon AND
* The previous line is empty OR it ends with punctuation.
If one of the conditions is true, we will consider the line as
a section name.
"""
section_name_suffix = \
context.line.strip().lstrip(context.section_name.strip()).strip()
section_suffix_is_only_colon = section_name_suffix == ':'
punctuation = [',', ';', '.', '-', '\\', '/', ']', '}', ')']
prev_line_ends_with_punctuation = \
any(context.previous_line.strip().endswith(x) for x in punctuation)
this_line_looks_like_a_section_name = \
is_blank(section_name_suffix) or section_suffix_is_only_colon
prev_line_looks_like_end_of_paragraph = \
prev_line_ends_with_punctuation or is_blank(context.previous_line)
return (this_line_looks_like_a_section_name and
prev_line_looks_like_end_of_paragraph)
@classmethod
def _check_blanks_and_section_underline(cls, section_name, context, indentation):
"""D4{07,08,09,12,14}, D215: Section underline checks.
Check for correct formatting for docstring sections. Checks that:
* The line that follows the section name contains
dashes (D40{7,8}).
* The amount of dashes is equal to the length of the section
name (D409).
* The section's content does not begin in the line that follows
the section header (D412).
* The section has no content (D414).
* The indentation of the dashed line is equal to the docstring's
indentation (D215).
"""
blank_lines_after_header = 0
for line in context.following_lines:
if not is_blank(line):
break
blank_lines_after_header += 1
else:
# There are only blank lines after the header.
yield violations.D407(section_name)
yield violations.D414(section_name)
return
non_empty_line = context.following_lines[blank_lines_after_header]
dash_line_found = ''.join(set(non_empty_line.strip())) == '-'
if not dash_line_found:
yield violations.D407(section_name)
if blank_lines_after_header > 0:
yield violations.D412(section_name)
else:
if blank_lines_after_header > 0:
yield violations.D408(section_name)
if non_empty_line.strip() != "-" * len(section_name):
yield violations.D409(len(section_name),
section_name,
len(non_empty_line.strip()))
if leading_space(non_empty_line) > indentation:
yield violations.D215(section_name)
line_after_dashes_index = blank_lines_after_header + 1
# If the line index after the dashes is in range (perhaps we have
# a header + underline followed by another section header).
if line_after_dashes_index < len(context.following_lines):
line_after_dashes = \
context.following_lines[line_after_dashes_index]
if is_blank(line_after_dashes):
rest_of_lines = \
context.following_lines[line_after_dashes_index:]
if not is_blank(''.join(rest_of_lines)):
yield violations.D412(section_name)
else:
yield violations.D414(section_name)
else:
yield violations.D414(section_name)
@classmethod
def _check_common_section(cls, docstring, definition, context, valid_section_names):
"""D4{05,10,11,13}, D214: Section name checks.
Check for valid section names. Checks that:
* The section name is properly capitalized (D405).
* The section is not over-indented (D214).
* There's a blank line after the section (D410, D413).
* There's a blank line before the section (D411).
Also yields all the errors from `_check_blanks_and_section_underline`.
"""
indentation = cls._get_docstring_indent(definition, docstring)
capitalized_section = context.section_name.title()
if (context.section_name not in valid_section_names and
capitalized_section in valid_section_names):
yield violations.D405(capitalized_section, context.section_name)
if leading_space(context.line) > indentation:
yield violations.D214(capitalized_section)
if (not context.following_lines or
not is_blank(context.following_lines[-1])):
if context.is_last_section:
yield violations.D413(capitalized_section)
else:
yield violations.D410(capitalized_section)
if not is_blank(context.previous_line):
yield violations.D411(capitalized_section)
yield from cls._check_blanks_and_section_underline(capitalized_section,
context,
indentation)
@classmethod
def _check_numpy_section(cls, docstring, definition, context):
"""D406: NumPy-style section name checks.
Check for valid section names. Checks that:
* The section name has no superfluous suffix to it (D406).
Additionally, also yield all violations from `_check_common_section`
which are style-agnostic section checks.
"""
indentation = cls._get_docstring_indent(definition, docstring)
capitalized_section = context.section_name.title()
yield from cls._check_common_section(docstring,
definition,
context,
cls.NUMPY_SECTION_NAMES)
suffix = context.line.strip().lstrip(context.section_name)
if suffix:
yield violations.D406(capitalized_section, context.line.strip())
@staticmethod
def _check_args_section(docstring, definition, context):
"""D417: `Args` section checks.
Check for a valid `Args` or `Argument` section. Checks that:
* The section documents all function arguments (D417)
except `self` or `cls` if it is a method.
"""
if definition.kind == 'function':
function_pos_args = get_function_args(definition.source)
docstring_args = set()
for line in context.following_lines:
match = ConventionChecker.GOOGLE_ARGS_REGEX.match(line)
if match:
docstring_args.add(match.group(1))
missing_args = function_pos_args - docstring_args
if missing_args:
yield violations.D417(", ".join(missing_args), definition.name)
@classmethod
def _check_google_section(cls, docstring, definition, context):
"""D416: Google-style section name checks.
Check for valid section names. Checks that:
* The section does not contain any blank line between its name
and content (D412).
* The section is not empty (D414).
* The section name has semicolon as a suffix (D416).
Additionally, also yield all violations from `_check_common_section`
which are style-agnostic section checks.
"""
capitalized_section = context.section_name.title()
yield from cls._check_common_section(docstring,
definition,
context,
cls.GOOGLE_SECTION_NAMES)
suffix = context.line.strip().lstrip(context.section_name)
if suffix != ":":
yield violations.D416(capitalized_section + ":", context.line.strip())
if capitalized_section in ("Args", "Arguments"):
yield from cls._check_args_section(docstring, definition, context)
@staticmethod
def _get_section_contexts(lines, valid_section_names):
"""Generate `SectionContext` objects for valid sections.
Given a list of `valid_section_names`, generate an
`Iterable[SectionContext]` which provides:
* Section Name
* String value of the previous line
* The section line
* Following lines till the next section
* Line index of the beginning of the section in the docstring
* Boolean indicating whether the section is the last section.
for each valid section.
"""
lower_section_names = [s.lower() for s in valid_section_names]
def _suspected_as_section(_line):
result = get_leading_words(_line.lower())
return result in lower_section_names
# Finding our suspects.
suspected_section_indices = [i for i, line in enumerate(lines) if
_suspected_as_section(line)]
SectionContext = namedtuple('SectionContext', ('section_name',
'previous_line',
'line',
'following_lines',
'original_index',
'is_last_section'))
# First - create a list of possible contexts. Note that the
# `following_lines` member is until the end of the docstring.
contexts = (SectionContext(get_leading_words(lines[i].strip()),
lines[i - 1],
lines[i],
lines[i + 1:],
i,
False)
for i in suspected_section_indices)
# Now that we have manageable objects - rule out false positives.
contexts = (c for c in contexts if ConventionChecker._is_docstring_section(c))
# Now we shall trim the `following lines` field to only reach the
# next section name.
for a, b in pairwise(contexts, None):
end = -1 if b is None else b.original_index
yield SectionContext(a.section_name,
a.previous_line,
a.line,
lines[a.original_index + 1:end],
a.original_index,
b is None)
def _check_numpy_sections(self, lines, definition, docstring):
"""NumPy-style docstring sections checks.
Check the general format of a sectioned docstring:
'''This is my one-liner.
Short Summary
-------------
This is my summary.
Returns
-------
None.
'''
Section names appear in `NUMPY_SECTION_NAMES`.
Yields all violation from `_check_numpy_section` for each valid
Numpy-style section.
"""
for ctx in self._get_section_contexts(lines,
self.NUMPY_SECTION_NAMES):
yield from self._check_numpy_section(docstring, definition, ctx)
def _check_google_sections(self, lines, definition, docstring):
"""Google-style docstring section checks.
Check the general format of a sectioned docstring:
'''This is my one-liner.
Note:
This is my summary.
Returns:
None.
'''
Section names appear in `GOOGLE_SECTION_NAMES`.
Yields all violation from `_check_google_section` for each valid
Google-style section.
"""
for ctx in self._get_section_contexts(lines,
self.GOOGLE_SECTION_NAMES):
yield from self._check_google_section(docstring, definition, ctx)
@check_for(Definition)
def check_docstring_sections(self, definition, docstring):
"""Check for docstring sections."""
if not docstring:
return
lines = docstring.split("\n")
if len(lines) < 2:
return
yield from self._check_numpy_sections(lines, definition, docstring)
yield from self._check_google_sections(lines, definition, docstring)
parse = Parser()
def check(filenames, select=None, ignore=None, ignore_decorators=None):
"""Generate docstring errors that exist in `filenames` iterable.
By default, the PEP-257 convention is checked. To specifically define the
set of error codes to check for, supply either `select` or `ignore` (but
not both). In either case, the parameter should be a collection of error
code strings, e.g., {'D100', 'D404'}.
When supplying `select`, only specified error codes will be reported.
When supplying `ignore`, all error codes which were not specified will be
reported.
Note that ignored error code refer to the entire set of possible
error codes, which is larger than just the PEP-257 convention. To your
convenience, you may use `pydocstyle.violations.conventions.pep257` as
a base set to add or remove errors from.
Examples
---------
>>> check(['pydocstyle.py'])
<generator object check at 0x...>
>>> check(['pydocstyle.py'], select=['D100'])
<generator object check at 0x...>
>>> check(['pydocstyle.py'], ignore=conventions.pep257 - {'D100'})
<generator object check at 0x...>
"""
if select is not None and ignore is not None:
raise IllegalConfiguration('Cannot pass both select and ignore. '
'They are mutually exclusive.')
elif select is not None:
checked_codes = select
elif ignore is not None:
checked_codes = list(set(violations.ErrorRegistry.get_error_codes()) -
set(ignore))
else:
checked_codes = violations.conventions.pep257
for filename in filenames:
log.info('Checking file %s.', filename)
try:
with tk.open(filename) as file:
source = file.read()
for error in ConventionChecker().check_source(source, filename,
ignore_decorators):
code = getattr(error, 'code', None)
if code in checked_codes:
yield error
except (EnvironmentError, AllError, ParseError) as error:
log.warning('Error in file %s: %s', filename, error)
yield error
except tk.TokenError:
yield SyntaxError('invalid syntax in file %s' % filename)
def is_ascii(string):
"""Return a boolean indicating if `string` only has ascii characters."""
return all(ord(char) < 128 for char in string)
def leading_space(string):
"""Return any leading space from `string`."""
return re('\s*').match(string).group()
def get_leading_words(line):
"""Return any leading set of words from `line`.
For example, if `line` is " Hello world!!!", returns "Hello world".
"""
result = re("[\w ]+").match(line.strip())
if result is not None:
return result.group()
def get_function_args(function_string):
"""Return the function arguments given the source-code string."""
function_arg_node = ast.parse(function_string).body[0].args
arg_nodes = function_arg_node.args
kwonly_arg_nodes = function_arg_node.kwonlyargs
return set(arg_node.arg for arg_node in chain(arg_nodes, kwonly_arg_nodes))
| [
"andre@recursivenet.com"
] | andre@recursivenet.com |
b83fd7aaa877760297b078b64b079d9698836260 | 9d7d69178c6f1f1db6ed6767e0af32bfe836549c | /new_workspace/Gumtree_Workspace/Magnet/TempFieldScanJune2016sato_phasediagram_higherorderFC170Oe.py | dd9f5308282bc73aa5a50edc8d69265c05a90f06 | [] | no_license | Gumtree/Quokka_scripts | 217958288b59adbdaf00a9a13ece42f169003889 | c9687d963552023d7408a8530005a99aabea1697 | refs/heads/master | 2023-08-30T20:47:32.142903 | 2023-08-18T03:38:09 | 2023-08-18T03:38:09 | 8,191,387 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,338 | py | from gumpy.commons import sics
#from gumpy.quokka.quokka import *
from time import sleep
# Reldrive bsx
def reldriveBsx(offset):
driveBsx(getBsxValue(), offset)
def scan_temp_field(fieldList, mode, preset, tempSettleTime = 1, fieldSettleTime = 1):
# for wavelength in wavelengthList:
log('driving magnet1_driveable=' + str(0))
sics.drive('ma1_setpoint', 0)
log('driving tc1_driveable=' + str(60))
sics.drive('tc1_driveable', 60)
for i in xrange(len(fieldList)):
pack = fieldList[i]
# driveAtt(attList[i])
log('driving magnet1_driveable=' + str(0))
sics.drive('ma1_setpoint', 0)
log('driving tc1_driveable=' + str(pack[0]))
value = str(pack[0])
log(value)
sics.drive('tc1_driveable', value)
log('waiting for temperature to settle')
sleep(tempSettleTime)
fields = pack[1]
for j in xrange(len(fields)):
# log('driving magnet1_driveable=' + str(0))
# sics.drive('ma1_setpoint', 0)
# sics.drive('tc1_driveable', 80)
# log('waiting for temperature to settle')
# sleep(10)
# sics.drive('tc1_driveable', value)
# log('waiting for temperature to settle')
# sleep(900)
field = fields[j]
log('driving magnet1_driveable=' + str(field))
sics.drive('ma1_setpoint', field)
log('waiting for field to settle')
sleep(fieldSettleTime)
log('start counting')
# prep_transmission()
# quokka.scan(scanMode.time, dataType.HISTOGRAM_XY, 60)
# prep_scattering()
quokka.scan(mode, dataType.HISTOGRAM_XY, preset)
# prep_transmission()
# quokka.scan(scanMode.time, dataType.HISTOGRAM_XY, 60)
# prep_scattering()
# for att in attListTrans:
# driveAtt(att)
# sleep(0.1)
#driveAtt(330)
# Set to long config
###### for ga ######
###polarisation out
def scan_temp_field_FC(fieldList, mode, preset, tempSettleTime = 1, fieldSettleTime = 1):
# for wavelength in wavelengthList:
log('driving magnet1_driveable=' + str(0))
sics.drive('ma1_setpoint', 0)
log('driving tc1_driveable=' + str(60))
sics.drive('tc1_driveable', 60)
log('driving magnet1_driveable=' + str(170))
sics.drive('ma1_setpoint', 170)
log(str(57.5))
sics.drive('tc1_driveable', 57.5)
log('waiting for temperature to settle')
sleep(900)
for i in xrange(len(fieldList)):
pack = fieldList[i]
# driveAtt(attList[i])
value = str(pack[0])
log(value)
sics.drive('tc1_driveable', value)
log('waiting for temperature to settle')
sleep(tempSettleTime)
fields = pack[1]
for j in xrange(len(fields)):
field = fields[j]
log('driving magnet1_driveable=' + str(field))
sics.drive('ma1_setpoint', field)
log('waiting for field to settle')
sleep(fieldSettleTime)
log('start counting')
quokka.scan(mode, dataType.HISTOGRAM_XY, preset)
quokka.scan(mode, dataType.HISTOGRAM_XY, preset)
quokka.scan(mode, dataType.HISTOGRAM_XY, preset)
quokka.scan(mode, dataType.HISTOGRAM_XY, preset)
quokka.scan(mode, dataType.HISTOGRAM_XY, preset)
quokka.scan(mode, dataType.HISTOGRAM_XY, preset)
quokka.scan(mode, dataType.HISTOGRAM_XY, preset)
quokka.scan(mode, dataType.HISTOGRAM_XY, preset)
quokka.scan(mode, dataType.HISTOGRAM_XY, preset)
quokka.scan(mode, dataType.HISTOGRAM_XY, preset)
quokka.scan(mode, dataType.HISTOGRAM_XY, preset)
quokka.scan(mode, dataType.HISTOGRAM_XY, preset)
quokka.scan(mode, dataType.HISTOGRAM_XY, preset)
quokka.scan(mode, dataType.HISTOGRAM_XY, preset)
quokka.scan(mode, dataType.HISTOGRAM_XY, preset)
quokka.scan(mode, dataType.HISTOGRAM_XY, preset)
quokka.scan(mode, dataType.HISTOGRAM_XY, preset)
quokka.scan(mode, dataType.HISTOGRAM_XY, preset)
quokka.scan(mode, dataType.HISTOGRAM_XY, preset)
quokka.scan(mode, dataType.HISTOGRAM_XY, preset)
quokka.scan(mode, dataType.HISTOGRAM_XY, preset)
quokka.scan(mode, dataType.HISTOGRAM_XY, preset)
quokka.scan(mode, dataType.HISTOGRAM_XY, preset)
quokka.scan(mode, dataType.HISTOGRAM_XY, preset)
quokka.scan(mode, dataType.HISTOGRAM_XY, preset)
quokka.scan(mode, dataType.HISTOGRAM_XY, preset)
quokka.scan(mode, dataType.HISTOGRAM_XY, preset)
quokka.scan(mode, dataType.HISTOGRAM_XY, preset)
quokka.scan(mode, dataType.HISTOGRAM_XY, preset)
quokka.scan(mode, dataType.HISTOGRAM_XY, preset)
def scan_temp_field_ZFC(fieldList, mode, preset, tempSettleTime = 1, fieldSettleTime = 1):
# for wavelength in wavelengthList:
log('driving magnet1_driveable=' + str(0))
sics.drive('ma1_setpoint', 0)
log('driving tc1_driveable=' + str(60))
sics.drive('tc1_driveable', 60)
for i in xrange(len(fieldList)):
pack = fieldList[i]
# driveAtt(attList[i])
log('driving tc1_driveable=' + str(pack[0]))
value = str(pack[0])
log(value)
sics.drive('tc1_driveable', value)
log('waiting for temperature to settle')
sleep(tempSettleTime)
fields = pack[1]
for j in xrange(len(fields)):
# log('driving magnet1_driveable=' + str(0))
# sics.drive('ma1_setpoint', 0)
# sics.drive('tc1_driveable', 80)
# log('waiting for temperature to settle')
# sleep(10)
# sics.drive('tc1_driveable', value)
# log('waiting for temperature to settle')
# sleep(900)
field = fields[j]
log('driving magnet1_driveable=' + str(field))
sics.drive('ma1_setpoint', field)
log('waiting for field to settle')
sleep(fieldSettleTime)
log('start counting')
# prep_transmission()
# quokka.scan(scanMode.time, dataType.HISTOGRAM_XY, 60)
# prep_scattering()
quokka.scan(mode, dataType.HISTOGRAM_XY, preset)
quokka.scan(mode, dataType.HISTOGRAM_XY, preset)
quokka.scan(mode, dataType.HISTOGRAM_XY, preset)
quokka.scan(mode, dataType.HISTOGRAM_XY, preset)
quokka.scan(mode, dataType.HISTOGRAM_XY, preset)
quokka.scan(mode, dataType.HISTOGRAM_XY, preset)
quokka.scan(mode, dataType.HISTOGRAM_XY, preset)
quokka.scan(mode, dataType.HISTOGRAM_XY, preset)
quokka.scan(mode, dataType.HISTOGRAM_XY, preset)
quokka.scan(mode, dataType.HISTOGRAM_XY, preset)
quokka.scan(mode, dataType.HISTOGRAM_XY, preset)
quokka.scan(mode, dataType.HISTOGRAM_XY, preset)
quokka.scan(mode, dataType.HISTOGRAM_XY, preset)
quokka.scan(mode, dataType.HISTOGRAM_XY, preset)
quokka.scan(mode, dataType.HISTOGRAM_XY, preset)
quokka.scan(mode, dataType.HISTOGRAM_XY, preset)
quokka.scan(mode, dataType.HISTOGRAM_XY, preset)
quokka.scan(mode, dataType.HISTOGRAM_XY, preset)
quokka.scan(mode, dataType.HISTOGRAM_XY, preset)
quokka.scan(mode, dataType.HISTOGRAM_XY, preset)
quokka.scan(mode, dataType.HISTOGRAM_XY, preset)
quokka.scan(mode, dataType.HISTOGRAM_XY, preset)
quokka.scan(mode, dataType.HISTOGRAM_XY, preset)
quokka.scan(mode, dataType.HISTOGRAM_XY, preset)
quokka.scan(mode, dataType.HISTOGRAM_XY, preset)
quokka.scan(mode, dataType.HISTOGRAM_XY, preset)
quokka.scan(mode, dataType.HISTOGRAM_XY, preset)
quokka.scan(mode, dataType.HISTOGRAM_XY, preset)
quokka.scan(mode, dataType.HISTOGRAM_XY, preset)
quokka.scan(mode, dataType.HISTOGRAM_XY, preset)
#wavelengthList = [8.00, 8.10, 8.20, 8.30, 8.40, 8.50, 8.60, 8.70, 8.80, 8.90, 9.00, 9.20, 9.40, 9.60]
#attListTrans = [150]
# Drive sample position
# Position 20 = MT beam; 12 = opal and 10 = T1
#setSample(20, 'empty beam')
# for transmission runs
#driveBsx(32.5,100)
#print 'Beam stop out...'
#the wavelength you want to scan on
#wavelengthList = [4.502, 5.0, 5.76, 6.0, 7.0, 8.0]
tempFieldList = [
(56.75, [0,25,50,75,100,125,150,175,200,225,250,275,300,325,350,375,400]),
(56.5, [0,25,50,75,100,125,150,175,200,225,250,275,300,325,350,375,400]),
(56.25, [0,25,50,75,100,125,150,175,200,225,250,275,300,325,350,375,400]),
(56, [0,25,50,75,100,125,150,175,200,225,250,275,300,325,350,375,400]),
(55.75, [0,25,50,75,100,125,150,175,200,225,250,275,300,325,350,375,400]),
(55.5, [0,25,50,75,100,125,150,175,200,225,250,275,300,325,350,375,400]),
(55.25, [0,25,50,75,100,125,150,175,200,225,250,275,300,325,350,375,400]),
(55, [0,25,50,75,100,125,150,175,200,225,250,275,300,325,350,375,400]),
(54.75, [0,25,50,75,100,125,150,175,200,225,250,275,300,325,350,375,400]),
(54.5, [0,25,50,75,100,125,150,175,200,225,250,275,300,325,350,375,400]),
(54.25, [0,25,50,75,100,125,150,175,200,225,250,275,300,325,350,375,400]),
]
tempSettleTime = 60
fieldSettleTime = 0
tempFieldList_FC = [
(57,[170]),
(56.75,[170]),
(56.25,[170]),
(55.75,[170]),
(55.25,[170]),
]
tempSettleTime_FC = 60
fieldSettleTime_FC = 0
tempFieldList_ZFC = [
(56.75, [130,150,170,190,210,230,250])
]
tempSettleTime_ZFC = 900
fieldSettleTime_ZFC = 0
#attList = [270]
###### USE THE FOLLOWING TWO LINES FOR MULTIPLE WAVELENGTHS ######
#wavelengthList = [4.502,5.0,5.76,6.0,7.0,8.0,10.0]
#attList[300,300,270,270,240,210,180]
#if len(wavelengthList) != len(attList):
# raise Exception, 'number of wavelength must be the same as the number of att setup'
#scan mode
#mode = scanMode.monitor
mode = scanMode.time
#how much counts you need
#preset = 7.0E6
preset = 60
preset_FC = 60
preset_ZFC = 60
#pre-set, don't change
#driveDet(19250, 0)
#driveEntRotAp(180)
# configuration unpol 1
# slog('drive to configuration 1')
#driveGuide(guideConfig.g1)
#don't change below scan command
log('experiment started')
scan_temp_field(tempFieldList, mode, preset, tempSettleTime, fieldSettleTime)
scan_temp_field_FC(tempFieldList_FC, mode, preset_FC, tempSettleTime_FC, fieldSettleTime_FC)
scan_temp_field_ZFC(tempFieldList_ZFC, mode, preset_ZFC, tempSettleTime_ZFC, fieldSettleTime_ZFC)
# configuration -+
#polariser in
#log('drive to configuration 2')
#driveGuide(guideConfig.p1)
#scan_temp_field(tempFieldList, mode, preset, tempSettleTime, fieldSettleTime)
#
# configuration --
#polariser in
#log('drive to configuration --')
#sics.set('/sample/isops/relay', 1)
#sleep(1)
#sics.set('/sample/isops/relay', 0)
#scan_wavelength(wavelengthList, attList, mode, preset)
##
## configuration +-
## Drive flipper
#log('drive to configuration +-')
#driveFlipper(1)
##sics.set('/instrument/flipper/set_frequency', 407)
#sleep(60)
#scan_wavelength(wavelengthList, attList, mode, preset)
#
##
## configuration ++
## Drive flipper
#log('drive to configuration ++')
#sics.set('/sample/isops/relay', 1)
#sleep(1)
#sics.set('/sample/isops/relay', 0)
#scan_wavelength(wavelengthList, attList, mode, preset)
#
## configuration unpol2
##polariser out
#log('drive to configuration unpol 2')
#driveFlipper(0)
#sleep(3)
#driveGuide(guideConfig.g1)
#scan_wavelength(wavelengthList, attList, mode, preset)
######
log('experiment finished') | [
"quokka@DAV1-QUOKKA.nbi.ansto.gov.au"
] | quokka@DAV1-QUOKKA.nbi.ansto.gov.au |
207cd301afe20337efa820d7f0bb5dbc2f2c8e5b | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/smallestWindow_20200707160948.py | f9b4a901ea9e888d7e99fcd5ee41b198c32465ae | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,776 | py | def min(s,t):
no_of_chars = 256
count = 0
start = 0
start_index = -1
min_len = float('inf')
print(start,start_index,min_len)
# first check if the length of the string is less than the string of the given pattern
if len(t)> len(s):
return ""
else:
# store the occurrences of the characters of the given pat in a hash pat
hash_pat = [0] * no_of_chars
hash_str = [0] * no_of_chars
# here we create a array where we store the number of occurences of a char based on its ascii value
for i in range(len(t)):
hash_pat[ord(t[i])] +=1
print(hash_pat)
for j in range(len(s)):
hash_str[ord(t[j])] +=1
if hash_pat[ord(t[j])] <= hash_str[ord(s[j])] and hash_pat[ord(t[j]) !=0]:
count +=1
# when the count gets to the length of the pattern string then the window string contains the pattern
if count == len(t):
# here we'll try minimize the window --> how
# if the window contains repeating characters that are not in the pattern
# we ignore them
# also if a character is there and not available in the pattern please ignore it
while(hash_str[ord(s[start])] > hash_pat[ord(s[start])] or hash_pat[ord(s[start])] == 0:
# first substring ADOBEC
'''
in the while loop we are checking for ----> value of A which is one is greater than value of A in the pattern
the pattern('ABC') this is checking to remove any repeated strings cause value of A is hash_str[65] == 2
the other condition we aim to remove any useless characters
'''
min("ADOBECODEBANC","ABC") | [
"mary.jereh@gmail.com"
] | mary.jereh@gmail.com |
a677e63fb993da9014051e73887f7b61716d738e | 00fdef95e46f81e285d786d4d7ce1d9e017f5102 | /python/notes/myPackage/web/urls.py | 17886962283142eb98a1654d4590653ea8e779e4 | [] | no_license | ByeongjunCho/TIL | 76cbff26074104d5c54afda2a8e21a680792cf2f | 44c50dc7b6fbee4dfb3b0fb4bbe1383ef0eb0953 | refs/heads/master | 2023-01-24T21:57:31.848845 | 2020-04-17T04:16:07 | 2020-04-17T04:16:07 | 195,908,293 | 0 | 0 | null | 2023-01-07T11:27:24 | 2019-07-09T01:15:46 | Jupyter Notebook | UTF-8 | Python | false | false | 178 | py | import webbrowser
def make_url(token, method):
return f'https://api.telgram.com/bot{token}/{method}'
def docs():
webbrowser.open('https://telegram.com')
return True | [
"jjgk91@naver.com"
] | jjgk91@naver.com |
a49ac33258d3a6aed7110d32a49dafb0c95c249f | caae0a72b83a26f20af185f63a40926c4dc0ba68 | /balance_party.py | 677ce57592107d7976d88337f206affcf26225f3 | [] | no_license | Tryton-EvKliD/ekd_account | 425b87b7718b7c03456c75cd351e30fc214f3e5e | cf44af4b43342f76598055a1cc85e79b5681279b | refs/heads/master | 2020-05-30T02:03:43.774026 | 2011-03-31T05:25:03 | 2011-03-31T05:25:03 | 1,540,424 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 35,109 | py | # -*- coding: utf-8 -*-
#This file is part of Tryton. The COPYRIGHT file at the top level of
#this repository contains the full copyright notices and license terms.
##############################################################################
# В данном файле описываются объекты
# 3. Тип счетов
# 3. Остатки по аналитическим счетам
##############################################################################
"Balances Analytic Accounting (Party)"
from trytond.model import ModelView, ModelSQL, fields
from trytond.wizard import Wizard
from trytond.report import Report
from trytond.transaction import Transaction
from trytond.tools import safe_eval
from trytond.backend import TableHandler
from decimal import Decimal, ROUND_HALF_EVEN
from trytond.pyson import Equal, Eval, Not, In
from account import _PARTY, _PRODUCT, _MONEY, _LEVEL_ANALYTIC, _ANALYTIC_PARTY
from balance_account import _ID_TABLES_BALANCES, _ID_TABLES_BALANCES_PERIOD
import datetime
_BALANCE_STATES = {
'readonly': In(Eval('state'), ['draft', 'done']),
}
_BALANCE_DEPENDS = ['state']
_MOUNTH=['01','02','03','04','05','06','07','08','09','10','11','12',]
_QUARTER={
'1': ['01','02','03',],
'2': ['04','05','06',],
'3': ['07','08','09',],
'4': ['10','11','12',],
}
_SIDE=['debit_','credit_']
#
# Дерево аналитических счетов
#
class BalanceAnalyticParty(ModelSQL, ModelView):
"Turnover and Balances Analytic Account"
_name = "ekd.balances.party"
_description =__doc__
_rec_name = 'name_model'
company = fields.Function(fields.Many2One('company.company', 'Company'), 'get_company')
account = fields.Many2One('ekd.account', 'Account', required=True, select=2,
order_field="account.code",
domain=[
('company','=',Eval('company'))
], depends=['company'])
type_balance = fields.Function(fields.Char('Type Balance'), 'get_account_type')
level = fields.Selection(_LEVEL_ANALYTIC, 'Level analityc', required=True)
model_ref = fields.Reference('Analytic', selection='model_ref_get', select=2)
name_model = fields.Function(fields.Char('Type', ), 'name_model_get')
name_ref = fields.Function(fields.Char('Analytic Account', ), 'name_model_get')
parent = fields.Many2One('ekd.balances.party', 'Parent Analytic')
amount_periods = fields.One2Many('ekd.balances.party.period', 'account', 'Balances and Turnover (Full)')
amount_years = fields.One2Many('ekd.balances.party.year', 'account', 'Balances and Turnover (Full)')
childs = fields.One2Many('ekd.balances.party', 'parent', 'Children')
balance = fields.Function(fields.Numeric('Start Balance',
digits=(16, Eval('currency_digits', 2))), 'get_balance_period')
balance_dt = fields.Function(fields.Numeric('Debit Start',
digits=(16, Eval('currency_digits', 2))), 'get_balance_period')
balance_ct = fields.Function(fields.Numeric('Credit Start',
digits=(16, Eval('currency_digits', 2))), 'get_balance_period')
debit = fields.Function(fields.Numeric('Debit Turnover',
digits=(16, Eval('currency_digits', 2)),), 'get_balance_period')
credit = fields.Function(fields.Numeric('Credit Turnover',
digits=(16, Eval('currency_digits', 2)),), 'get_balance_period')
balance_end = fields.Function(fields.Numeric('End Balance',
digits=(16, Eval('currency_digits', 2))), 'get_balance_period')
balance_dt_end = fields.Function(fields.Numeric('Debit End',
digits=(16, Eval('currency_digits', 2))), 'get_balance_period')
balance_ct_end = fields.Function(fields.Numeric('Credit End',
digits=(16, Eval('currency_digits', 2))), 'get_balance_period')
turnover_debit = fields.Function(fields.One2Many('ekd.account.move.line',
None, 'Entries'), 'get_entry')
turnover_credit = fields.Function(fields.One2Many('ekd.account.move.line',
None,'Entries'), 'get_entry')
currency_digits = fields.Function(fields.Integer('Currency Digits'), 'get_currency_digits')
state = fields.Selection([
('draft','Draft'),
('open','Open'),
('done','Closed'),
('deleted','Deleted')
], 'State', required=True)
deleted = fields.Boolean('Flag Deleting')
active = fields.Boolean('Active')
def __init__(self):
super(BalanceAnalyticParty, self).__init__()
self._order.insert(0, ('account', 'ASC'))
self._order.insert(1, ('level', 'ASC'))
self._order.insert(2, ('model_ref', 'ASC'))
self._sql_constraints += [
('balance_account_uniq', 'UNIQUE(account,level,model_ref, parent)',\
'account, level, model_ref, parent - must be unique per balance!'),
]
def init(self, module_name):
cursor = Transaction().cursor
super(BalanceAnalyticParty, self).init(module_name)
table = TableHandler(cursor, self, module_name)
# Проверяем счетчик
cursor.execute("SELECT last_value, increment_by FROM %s"%table.sequence_name)
last_value, increment_by = cursor.fetchall()[0]
# Устанавливаем счетчик
if str(last_value)[len(str(last_value))-1] != str(_ID_TABLES_BALANCES[self._table]):
cursor.execute("SELECT setval('"+table.sequence_name+"', %s, true)"%_ID_TABLES_BALANCES[self._table])
if increment_by != 10:
cursor.execute("ALTER SEQUENCE "+table.sequence_name+" INCREMENT 10")
def default_state(self):
return Transaction().context.get('state') or 'draft'
def default_currency(self):
return Transaction().context.get('currency')
def default_active(self):
return True
def model_ref_get(self):
dictions_obj = self.pool.get('ir.dictions')
res = []
diction_ids = dictions_obj.search([
('model', '=', 'ekd.account.level_analytic'),
('pole', '=', 'type_analytic'),
])
for diction in dictions_obj.browse(diction_ids):
res.append([diction.key, diction.value])
return res
def get_company(self, ids, name):
res={}
context = Transaction().context
res = {}.fromkeys(ids, context.get('company'))
return res
def get_account_type(self, ids, name):
if name not in ('account_type', 'account_kind', 'account_kind_analytic'):
raise Exception('Invalid name Balances Finance')
res = {}
for line in self.browse(ids):
if line.account:
if name == 'account_type':
res[line.id] = line.account.type.name
elif name == 'account_kind':
res[line.id] = line.account.kind
elif name == 'account_kind_analytic':
res[line.id] = line.account.kind_analytic
else:
res[line.id] = line.account.type_balance
return res
def get_entry(self, ids, names):
move_line = self.pool.get('ekd.account.move.line')
move_line_analytic_dt = self.pool.get('ekd.account.move.line.analytic_dt')
move_line_analytic_ct = self.pool.get('ekd.account.move.line.analytic_ct')
res = {}
for balance in self.browse(ids):
for name in names:
res.setdefault(name, {})
if name == 'turnover_debit':
line_analytic_dt = move_line_analytic_dt.search([('ref_analytic','=', balance.id)])
res[name][balance.id] = [ x.get('move_line') for x in move_line_analytic_dt.read(line_analytic_dt, ['move_line']) ]
elif name == 'turnover_credit':
line_analytic_ct = move_line_analytic_ct.search([('ref_analytic','=', balance.id)])
res[name][balance.id] = [ x.get('move_line') for x in move_line_analytic_ct.read(line_analytic_ct, ['move_line']) ]
return res
def name_model_get(self, ids, names):
group_model={}
group_name_model={}
res={}
res['name_model']={}.fromkeys(ids, False)
res['name_ref']={}.fromkeys(ids, False)
tmp_res={}
for line in self.browse(ids):
res['name_model'][line.id] = line.model_ref
if line.model_ref not in tmp_res.keys():
tmp_res[line.model_ref] = [line.id,]
else:
tmp_res[line.model_ref].append(line.id)
ref_model, ref_id = line.model_ref.split(',',1)
if ref_id == '0':
continue
if ref_model not in group_model.keys():
group_model[ref_model] = [int(ref_id),]
else:
group_model[ref_model].append(int(ref_id))
ir_model_obj = self.pool.get('ir.model')
search_model_ids = ir_model_obj.search([('model','in',group_model.keys())])
for ir_model_id in ir_model_obj.browse(search_model_ids):
group_name_model[ir_model_id.model] = ir_model_id.rec_name
for model in group_model.keys():
model_obj = self.pool.get(model)
for model_line in model_obj.browse(group_model[model]):
rec_id = tmp_res['%s,%s'%(model,str(model_line.id))]
if isinstance(rec_id, (int,long)):
res['name_model'][rec_id] = group_name_model[model]
res['name_ref'][rec_id] = model_line.rec_name
else:
for rec_id_ in rec_id:
res['name_model'][rec_id_] = group_name_model[model]
res['name_ref'][rec_id_ ] = model_line.rec_name
return res
def get_account_type(self, ids, name):
if name not in ('account_type', 'account_kind', 'type_balance'):
raise Exception('Invalid name')
res = {}
for line in self.browse(ids):
if line.account:
if name == 'account_type':
res[line.id] = line.account.type.name
elif name == 'account_kind':
res[line.id] = line.account.kind_analytic
else:
res[line.id] = line.account.type_balance
return res
def get_currency_digits(self, ids, name):
res = {}.fromkeys(ids, 2)
for line in self.browse(ids):
if line.account.currency:
res[line.id] = line.account.currency.digits or 2
elif line.account.company:
res[line.id] = line.account.company.currency.digits or 2
return res
def get_balance_period(self, ids, names):
if not ids:
return {}
res={}
fiscalyear_obj = self.pool.get('ekd.fiscalyear')
type_balance = self.browse(ids[0]).account.type_balance
period_obj = self.pool.get('ekd.period')
context = Transaction().context
if context.get('current_period'):
period_id = period_obj.browse(context.get('current_period'))
current_period = context.get('current_period')
elif context.get('current_date'):
current_period = period_obj.search([
('company','=',context.get('company')),
('start_date','<=',context.get('current_date')),
('end_date','>=',context.get('current_date')),
], limit=1)
cr = Transaction().cursor
cr.execute('SELECT id, account, balance_dt, balance_ct, '\
'debit, credit, '\
' balance_dt-balance_ct+debit-credit as balance_end, '\
' balance_dt+debit as balance_dt_end, '\
' balance_ct+credit as balance_ct_end '\
'FROM ekd_balances_party_period '\
'WHERE period=%s AND account in ('%(current_period)+','.join(map(str,ids))+')')
for amount_id, account, balance_dt, balance_ct,\
debit, credit, balance_end,\
balance_dt_end, balance_ct_end in cr.fetchall():
# SQLite uses float for SUM
if not isinstance(balance_dt, Decimal):
balance_dt = Decimal(str(balance_dt))
if not isinstance(balance_ct, Decimal):
balance_ct = Decimal(str(balance_ct))
if not isinstance(balance_dt_end, Decimal):
balance_dt = Decimal(str(balance_dt_end))
if not isinstance(balance_ct_end, Decimal):
balance_ct = Decimal(str(balance_ct_end))
if not isinstance(debit, Decimal):
debit = Decimal(str(debit))
if not isinstance(credit, Decimal):
credit = Decimal(str(credit))
for name in names:
res.setdefault(name, {})
res[name].setdefault(account, Decimal('0.0'))
amount_balance= Decimal('0.0')
if name == 'balance_dt_end':
if type_balance == 'active':
res[name][account] = balance_end
#elif type_balance == 'passive':
# res[name][account] = balance_end
elif type_balance == 'both':
if balance_end > 0:
res[name][account] = balance_end
if name == 'balance_ct_end':
if type_balance == 'passive':
res[name][account] = -balance_end
elif type_balance == 'both':
if balance_end < 0:
res[name][account] = -balance_end
elif name == 'balance_end':
res[name][account] = balance_end
elif name == 'balance':
res[name][account] = balance_dt-balance_ct
elif name == 'debit':
res[name][account] = debit
elif name == 'credit':
res[name][account] = credit
return res
# This Function Test Only!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
def get_balance_year(self, ids, names):
if not ids:
return {}
res={}
fiscalyear_obj = self.pool.get('ekd.fiscalyear')
period_obj = self.pool.get('ekd.period')
context = Transaction().context
if context.get('current_period'):
period_id = period_object.browse(context.get('current_period'))
start_month = period_id.start_date.strftime('%m')
end_month = period_id.end_date.strftime('%m')
fiscalyear = period_id.fiscalyear.id
if start_month == end_month:
current_period = context.get('current_period').strftime('%m')
else:
begin_period = False
for month in _MOUNTH:
if start_month == month:
current_period.append(month)
begin_period = True
elif begin_period:
current_period.append(month)
elif end_month == month:
current_period.append(month)
break
else:
if context.get('current_fiscalyear'):
fiscalyear = context.get('current_fiscalyear')
else:
fiscalyear = fiscalyear_obj.search([
('company','=',context.get('company')),
('state','=','current')
], limit=1)
current_period = datetime.datetime.now().strftime('%m')
if isinstance(current_period, list):
field_debit = []
field_credit = []
for month in current_period:
field_debit.append("debit_%s"%(month))
field_credit.append("credit_%s"%(month))
field_debits = []
field_credits = []
for month in _MOUNTH:
if month == start_month:
break
field_debits.append("debit_%s"%(month))
field_credits.append("credit_%s"%(month))
else:
field_debit = "debit_%s"%(current_period)
field_credit = "credit_%s"%(current_period)
field_debits = []
field_credits = []
for month in _MOUNTH:
if month == current_period:
break
field_debits.append("debit_%s"%(month))
field_credits.append("credit_%s"%(month))
cr = Transaction().cursor
if isinstance(current_period, list):
cr.execute('SELECT id, balance_dt+'+'+'.join(field_debits)+','\
'balance_ct+'+'+'.join(field_credits)+','\
'+'.join(field_debit)+' as debit,'\
'+'.join(field_credit)+' as credit'\
'FROM ekd_balances_party_period'\
'WHERE fiscalyear=%s AND account in ('+','.join(map(str,ids))+')'%(field_debit,field_credit,fiscalyear))
else:
cr.execute('SELECT id, balance_dt+'+'+'.join(field_debits)+','\
'balance_ct+'+'+'.join(field_credits)+','\
'%s, %s'\
'FROM ekd_balances_party_period'\
'WHERE fiscalyear=%s AND account in ('+','.join(map(str,ids))+')'%(field_debit,field_credit,fiscalyear))
for id, balance_dt, balance_ct, debit, credit in cr.fetchall():
# SQLite uses float for SUM
if not isinstance(balance_dt, Decimal):
balance_dt = Decimal(str(balance_dt))
if not isinstance(balance_ct, Decimal):
balance_ct = Decimal(str(balance_ct))
if not isinstance(debit, Decimal):
debit = Decimal(str(debit))
if not isinstance(credit, Decimal):
credit = Decimal(str(credit))
for name in names:
res.setdefault(name, {})
res[name].setdefault(balance.id, Decimal('0.0'))
amount_balance= Decimal('0.0')
if not balance.amount:
continue
if name == 'balance_dt_end':
res[name][balance.id] = balance_end
elif name == 'balance_ct_end':
res[name][balance.id] = balance_end
elif name == 'balance_end':
res[name][balance.id] = balance_end
elif name == 'balance':
res[name][balance.id] = balance_dt-balance_ct
elif name == 'debit':
res[name][balance.id] = debit
elif name == 'credit':
res[name][balance.id] = credit
return res
def button_done(self, ids):
return self.close(ids)
def button_draft(self, ids):
return self.cancel(ids)
def button_restore(self, ids):
return self.restore(ids)
def close(self, ids):
return self.write(ids, {'state': 'done', })
def draft(self, ids):
return self.write(ids, {'state': 'draft', })
def restore(self, ids):
return self.write(ids, {'state': 'draft', })
BalanceAnalyticParty()
class BalancePartyPeriod(ModelSQL, ModelView):
"Turnover and Balances parties (Period)"
_name = "ekd.balances.party.period"
_description =__doc__
_order_name = 'period.end_date'
def get_balance_end(self, ids, names):
if not ids:
return {}
res={}
for balance in self.browse(ids):
type_balance = balance.account.type_balance
for name in names:
res.setdefault(name, {})
res[name].setdefault(balance.id, Decimal('0.0'))
amount = balance.balance_dt-balance.balance_ct+balance.debit-balance.credit
if name == 'balance_end':
res[name][balance.id] = amount
elif name == 'balance':
res[name][balance.id] = balance.balance_dt-balance.balance_ct
elif name == 'balance_dt_end':
if type_balance == 'active' or (type_balance == 'both' and amount > 0):
res[name][balance.id] = amount
elif name == 'balance_ct_end':
if type_balance == 'passive' or (type_balance == 'both' and amount < 0):
res[name][balance.id] = -amount
return res
account = fields.Many2One('ekd.balances.party', 'Analytic Account', required=True, select=2, ondelete="CASCADE")
period = fields.Many2One('ekd.period', 'Period', select=2,
domain=[
('company','=',Eval('company'))
],)
balance_dt = fields.Numeric('Debit Start', digits=(16, Eval('currency_digits', 2)))
balance_ct = fields.Numeric('Credit Start', digits=(16, Eval('currency_digits', 2)))
debit = fields.Numeric('Debit Turnover', digits=(16, Eval('currency_digits', 2)))
credit = fields.Numeric('Credit Turnover', digits=(16, Eval('currency_digits', 2)))
balance_end_dt = fields.Numeric('Debit End', digits=(16, Eval('currency_digits', 2)))
balance_end_ct = fields.Numeric('Credit End', digits=(16, Eval('currency_digits', 2)))
balance_dt_end = fields.Function(fields.Numeric('Debit End',
digits=(16, Eval('currency_digits', 2))), 'get_balance_end')
balance_ct_end = fields.Function(fields.Numeric('Credit End',
digits=(16, Eval('currency_digits', 2))), 'get_balance_end')
currency_digits = fields.Function(fields.Integer('Currency Digits'), 'currency_digits_get')
dt_line = fields.Function(fields.One2Many('ekd.account.move.line', None,
'Ref entry debit lines'), 'get_entry')
ct_line = fields.Function(fields.One2Many('ekd.account.move.line', None,
'Ref entry credit lines'), 'get_entry')
state = fields.Selection([
('draft','Draft'),
('open','Open'),
('done','Closed'),
('deleted','Deleted')
], 'State', required=True)
parent = fields.Many2One('ekd.balances.party.period','ID Parent balance')
transfer = fields.Many2One('ekd.balances.party.period','ID Transfer balance')
deleted = fields.Boolean('Flag Deleting')
active = fields.Boolean('Active')
def __init__(self):
super(BalancePartyPeriod, self).__init__()
self._order.insert(0, ('period', 'DESC'))
self._sql_constraints += [
('balance_party_uniq', 'UNIQUE(account,period)',\
'period, account - must be unique per balance!'),
]
def init(self, module_name):
cursor = Transaction().cursor
super(BalancePartyPeriod, self).init(module_name)
table = TableHandler(cursor, self, module_name)
# Проверяем счетчик
cursor.execute("SELECT last_value, increment_by FROM %s"%table.sequence_name)
last_value, increment_by = cursor.fetchall()[0]
# Устанавливаем счетчик
if str(last_value)[len(str(last_value))-1] != str(_ID_TABLES_BALANCES_PERIOD[self._table]):
cursor.execute("SELECT setval('"+table.sequence_name+"', %s, true)"%_ID_TABLES_BALANCES_PERIOD[self._table])
if increment_by != 10:
cursor.execute("ALTER SEQUENCE "+table.sequence_name+" INCREMENT 10")
def default_currency_digits(self):
return 2
def default_active(self):
return True
def default_state(self):
return 'draft'
def currency_digits_get(self, ids, name):
res = {}.fromkeys(ids, 2)
for line in self.browse(ids):
if line.account.account.currency:
res[line.id] = line.account.account.currency.digits or 2
return res
def get_entry(self, ids, name):
move_line = self.pool.get('ekd.account.move.line')
move_line_analytic_dt = self.pool.get('ekd.account.move.line.analytic_dt')
move_line_analytic_ct = self.pool.get('ekd.account.move.line.analytic_ct')
res = {}
for balance in self.browse(ids):
if name == 'dt_line':
line_analytic_dt = move_line_analytic_dt.search([('ref_period','=', balance.id)])
res[balance.id] = move_line_analytic_dt.read(line_analytic_ct, ['move_line'])
elif name == 'ct_line':
line_analytic_ct = move_line_analytic_ct.search([('ref_period','=', balance.id)])
res[balance.id] = move_line_analytic_ct.read(line_analytic_ct, ['move_line'])
return res
def set_entries_field(self, id, name, value):
assert name in ('dt_line', 'ct_line', 'lines'), 'Invalid name'
return
def search_domain(self, domain, active_test=True):
domain_new=[]
if domain[0] == 'AND':
for (left_val, center_val, rigth_val) in domain[1]:
if left_val == 'period.start_date':
rigth_val = datetime.datetime.now()
elif left_val == 'period.end_date':
rigth_val = datetime.datetime.now()
domain_new.append((left_val, center_val, rigth_val))
else:
for (left_val, center_val, rigth_val) in domain:
if left_val == 'period.start_date':
rigth_val = datetime.datetime.now()
elif left_val == 'period.end_date':
rigth_val = datetime.datetime.now()
domain_new.append((left_val, center_val, rigth_val))
return super(BalancePartyPeriod, self).search_domain(domain_new, active_test=active_test)
# Процедура переноса остатков (если есть более поздние)
def transfer_balance(self, transfer_id, vals):
balance = self.browse(transfer_id)
self.write(transfer_id, {
'balance_dt': vals.get('balance_dt'),
'balance_ct': vals.get('balance_ct'),
})
if balance.transfer and vals.get('transfer', True):
self.transfer_balance(balance.transfer.id, {
'balance_dt':balance.balance_dt_end,
'balance_ct':balance.balance_ct_end,
'transfer': vals.get('transfer', True),
})
def transfer_balances(self, vals=None):
'''
Transfer Balances of account - Перенос остатков.
period - словарь идентификаторов периодов (периоды уже отсортированны!!!)
'''
if vals is None and not vals.get('company', False) and not vals.get('account', False):
return False
balance_ids= {}
for period in vals.get('periods'):
if not balance_ids:
balance_ids = self.search([
('period','=',period),
('account','=', vals.get('account')),
])
continue
for balance_id in balance_ids:
balance_line = self.browse(balance_id)
if balance_line.balance_dt_end or balance_line.balance_ct_end:
if balance_line.transfer:
self.transfer_balance(balance_line.transfer.id, {
'balance_dt': balance_line.balance_dt_end,
'balance_ct': balance_line.balance_ct_end,
})
else:
balance_new_id = self.search([
('period','=',period),
('account','=',vals.get('account')),
('party','=',balance_line.party.id),
('model_ref','=',balance_line.model_ref),
])
if balance_new_id:
self.write(balance_line.id, {
'transfer': balance_new_id,
})
self.write(balance_new_id, {
'balance_dt': balance_line.balance_dt_end,
'balance_ct': balance_line.balance_ct_end,
})
else:
self.write(balance_line.id, {
'transfer': self.create({
'company': vals.get('company'),
'period': period,
'account': balance_line.account.id ,
'party': balance_line.party.id,
'model_ref': balance_line.model_ref,
'balance_dt': balance_line.balance_dt_end,
'balance_ct': balance_line.balance_ct_end,
})
})
balance_ids = self.search([
('period','=',period),
('account','=', vals.get('account')),
])
return True
BalancePartyPeriod()
#
# Остатки и обороты по аналитическим счетам
#
class BalancePartyYear(ModelSQL, ModelView):
"Turnover and Balances Analytic Account Parties - FiscalYear"
_name = "ekd.balances.party.year"
_description =__doc__
account = fields.Many2One('ekd.balances.party', 'Analytic Account', required=True, select=2, ondelete="CASCADE")
fiscalyear = fields.Many2One('ekd.fiscalyear', 'FiscalYear', required=True)
balance = fields.Numeric('Debit Start', digits=(16, Eval('currency_digits', 2)),)
debit_01 = fields.Numeric('Debit Turnover 01', digits=(16, Eval('currency_digits', 2)),)
credit_01 = fields.Numeric('Credit Turnover 01', digits=(16, Eval('currency_digits', 2)),)
debit_02 = fields.Numeric('Debit Turnover 02', digits=(16, Eval('currency_digits', 2)),)
credit_02 = fields.Numeric('Credit Turnover 02', digits=(16, Eval('currency_digits', 2)),)
debit_03 = fields.Numeric('Debit Turnover 03', digits=(16, Eval('currency_digits', 2)),)
credit_03 = fields.Numeric('Credit Turnover 03', digits=(16, Eval('currency_digits', 2)),)
debit_04 = fields.Numeric('Debit Turnover 04', digits=(16, Eval('currency_digits', 2)),)
credit_04 = fields.Numeric('Credit Turnover 04', digits=(16, Eval('currency_digits', 2)),)
debit_05 = fields.Numeric('Debit Turnover 05', digits=(16, Eval('currency_digits', 2)),)
credit_05 = fields.Numeric('Credit Turnover 05', digits=(16, Eval('currency_digits', 2)),)
debit_06 = fields.Numeric('Debit Turnover 06', digits=(16, Eval('currency_digits', 2)),)
credit_06 = fields.Numeric('Credit Turnover 06', digits=(16, Eval('currency_digits', 2)),)
debit_07 = fields.Numeric('Debit Turnover 07', digits=(16, Eval('currency_digits', 2)),)
credit_07 = fields.Numeric('Credit Turnover 07', digits=(16, Eval('currency_digits', 2)),)
debit_08 = fields.Numeric('Debit Turnover 08', digits=(16, Eval('currency_digits', 2)),)
credit_08 = fields.Numeric('Credit Turnover 08', digits=(16, Eval('currency_digits', 2)),)
debit_09 = fields.Numeric('Debit Turnover 09', digits=(16, Eval('currency_digits', 2)),)
credit_09 = fields.Numeric('Credit Turnover 09', digits=(16, Eval('currency_digits', 2)),)
debit_10 = fields.Numeric('Debit Turnover 10', digits=(16, Eval('currency_digits', 2)),)
credit_10 = fields.Numeric('Credit Turnover 10', digits=(16, Eval('currency_digits', 2)),)
debit_11 = fields.Numeric('Debit Turnover 11', digits=(16, Eval('currency_digits', 2)),)
credit_11 = fields.Numeric('Credit Turnover 11', digits=(16, Eval('currency_digits', 2)),)
debit_12 = fields.Numeric('Debit Turnover 12', digits=(16, Eval('currency_digits', 2)),)
credit_12 = fields.Numeric('Credit Turnover 12', digits=(16, Eval('currency_digits', 2)),)
dt_line = fields.Function(fields.One2Many('ekd.account.move.line', None, 'Ref entry debit lines'), 'get_entries_field')
ct_line = fields.Function(fields.One2Many('ekd.account.move.line', None, 'Ref entry credit lines'), 'get_entries_field')
parent = fields.Many2One('ekd.balances.party.year','ID Parent balance')
transfer = fields.Many2One('ekd.balances.party.year','ID Transfer balance')
state = fields.Selection([
('draft','Draft'),
('open','Open'),
('done','Closed'),
('deleted','Deleted')
], 'State', required=True)
deleted = fields.Boolean('Flag Deleting')
active = fields.Boolean('Active')
def __init__(self):
super(BalancePartyYear, self).__init__()
def init(self, module_name):
cursor = Transaction().cursor
super(BalancePartyYear, self).init(module_name)
table = TableHandler(cursor, self, module_name)
# Проверяем счетчик
cursor.execute("SELECT last_value, increment_by FROM %s"%table.sequence_name)
last_value, increment_by = cursor.fetchall()[0]
# Устанавливаем счетчик
if str(last_value)[len(str(last_value))-1] != str(_ID_TABLES_BALANCES_PERIOD[self._table]):
cursor.execute("SELECT setval('"+table.sequence_name+"', %s, true)"%_ID_TABLES_BALANCES_PERIOD[self._table])
if increment_by != 10:
cursor.execute("ALTER SEQUENCE "+table.sequence_name+" INCREMENT 10")
def default_state(self):
return Transaction().context.get('state') or 'draft'
def default_active(self):
return True
def get_currency_digits(self, ids, name):
res = {}.fromkeys(ids, 2)
for line in self.browse(ids):
res.setdefault('currency_digits', {})
res['currency_digits'][line.id] = line.account.currency_digits or 2
return res
BalancePartyYear()
class BalanceAnalyticPartyAdd(ModelSQL, ModelView):
"Turnover and Balances Analytic Account"
_name = "ekd.balances.party"
curr_period = fields.Many2One('ekd.balances.party.period', 'account',
'Balances and Turnover (Current Period)')
last_period = fields.Many2One('ekd.balances.party.period', 'account',
'Balances and Turnover (Last Period)')
amount_year = fields.Many2One('ekd.balances.party.year', 'account',
'Balances and Turnover (Current FiscalYear)')
BalanceAnalyticPartyAdd()
| [
"k-dmitry2@narod.ru"
] | k-dmitry2@narod.ru |
a9bbf18d2795d60b27491b11ee0ff250f02d671d | 088a7b8e2a1c996f8e6e19b399c226cb283afaeb | /synapse_plugin/clusterTest.py | 08cf9d4579766f19fa7fea2e8a0c256fa4997480 | [] | no_license | gddickinson/flika_plugins | 5321b7a88c3e8a684a79a83e215ca996225d4b08 | d54f1db48feaa159ea46aa4b60534405593053a3 | refs/heads/master | 2023-06-25T21:54:43.398315 | 2023-06-23T19:12:53 | 2023-06-23T19:12:53 | 173,510,496 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 26,910 | py | # -*- coding: utf-8 -*-
"""
Created on Sat Feb 8 08:54:05 2020
Synapse3D - Clustering code
@author: George
"""
import os, sys, glob
try:
from BioDocks import *
except:
from .BioDocks import *
from pyqtgraph.dockarea import *
from scipy.spatial import ConvexHull
from collections import OrderedDict
from PyQt5.QtCore import pyqtSignal, pyqtSlot
from qtpy import QtWidgets, QtCore, QtGui
from qtpy.QtGui import *
from qtpy.QtWidgets import *
from qtpy.QtCore import *
import flika
from flika import global_vars as g
from flika.window import Window
from distutils.version import StrictVersion
import numpy as np
import pyqtgraph as pg
from pyqtgraph import mkPen
from matplotlib import pyplot as plt
import copy
import pandas as pd
from sklearn.neighbors import KDTree
import random
import time
from mpl_toolkits.mplot3d import Axes3D # <--- This is needed for newer versions of matplotlib
flika_version = flika.__version__
if StrictVersion(flika_version) < StrictVersion('0.2.23'):
from flika.process.BaseProcess import BaseProcess, SliderLabel, CheckBox, ComboBox, BaseProcess_noPriorWindow, WindowSelector, FileSelector
else:
from flika.utils.BaseProcess import BaseProcess, SliderLabel, CheckBox, ComboBox, BaseProcess_noPriorWindow, WindowSelector, FileSelector
class ClusterAnalysis:
def __init__(self):
#camera option
self.unitPerPixel = 166
# data is loaded in nanometers, divided by # according to units
self.units = {'Pixels': self.unitPerPixel, 'Nanometers': 1}
self.unit_prefixes = {'Pixels': 'px', 'Nanometers': 'nm'}
self.unit = 'Nanometers'
self.ignore = {"Z Rejected"}
#clustering option
#self.eps = 100 #max distance between points within a cluster
#self.min_samples = 10 #min number of points to form a cluster
#self.maxDistance = 100 #max distance between clusters in differnt channels when forming combined ROI
self.colors = ((255, 0, 0), (0, 255, 0))
self.color_dict = {'atto488': self.colors[0], 'Alexa647': self.colors[1]}
self.ignore = {"Z Rejected"}
self.Channels = []
self.empty_channel = Channel('Empty', [], (1, 1, 1))
#data state
self.dataLoaded = False
self.ROI3D_Initiated = False
self.dataDisplayed = 'original'
self.clustersGeneated = False
self.clusterIndex = []
#cluster analysis options
self.clusterAnaysisSelection = 'All Clusters'
self.All_ROIs_pointsList = []
self.channelList = []
def open_file(self,filename=''):
if filename == '':
filename = getFilename(filter='Text Files (*.txt)')
self.clear()
self.data = importFile(filename,evaluateLines=False)
try:
for i in range(len(self.data[0])):
if '\n' in self.data[0][i]:
self.data[0][i] = self.data[0][i].split('\n')[0]
except:
pass
self.colNames = list(self.data[0])
#filter 1000 for testing
#self.data = self.data[0:5000]
self.data = {d[0]: d[1:] for d in np.transpose(self.data)}
for k in self.data:
if k != 'Channel Name':
self.data[k] = self.data[k].astype(float)
print('Gathering channels...')
#g.m.statusBar().showMessage('Gathering channels...')
self.names = set(self.data['Channel Name'].astype(str)) - self.ignore
print('Channels Found: %s' % ', '.join(self.names))
#g.m.statusBar().showMessage('Channels Found: %s' % ', '.join(self.names))
self.data['Xc'] /= self.units[self.unit]
self.data['Yc'] /= self.units[self.unit]
self.data['Zc'] /= self.units[self.unit]
#global Channels
self.Channels = []
#self.plotWidget.clear()
self.pts = [ActivePoint(data={k: self.data[k][i] for k in self.data}) for i in range(len(self.data['Channel Name']))]
for i, n in enumerate(self.names):
if n in self.color_dict:
color = self.color_dict[n]
else:
color = self.colors[i]
self.Channels.append(Channel(n, [p for p in self.pts if p['Channel Name'] == n], color))
#self.plotWidget.addItem(self.Channels[-1])
#self.legend.addItem(self.Channels[-1], n)
#self.show_ch1.setText(self.Channels[0].__name__)
#self.show_ch2.setText(self.Channels[1].__name__)
#self.ch1_mesh.setText(self.Channels[0].__name__)
#self.ch2_mesh.setText(self.Channels[1].__name__)
def clear(self):
self.Channels = []
def getClusters(self):
t = Timer()
#get 3D points
t.start()
self.ch1Points_3D = self.Channels[0].getPoints(z=True)
self.ch2Points_3D = self.Channels[1].getPoints(z=True)
#get cluster labels for each channel
print('--- channel 1 ---')
self.ch1_labels,self.ch1_numClusters,self.ch1_numNoise = dbscan(self.ch1Points_3D, eps=self.eps, min_samples=self.min_samples, plot=False)
print('--- channel 2 ---')
self.ch2_labels,self.ch2_numClusters,self.ch2_numNoise = dbscan(self.ch2Points_3D, eps=self.eps, min_samples=self.min_samples, plot=False)
print('-----------------')
t.timeReport('2D clusters created')
#get 2D points
t.start()
#ch1Points = self.Channels[0].getPoints(z=True)
#ch2Points = self.Channels[1].getPoints(z=True)
#get 3D centeroids for cluster analysis
_, self.ch1_centeroids_3D, _ = self.getHulls(self.ch1Points_3D,self.ch1_labels)
_, self.ch2_centeroids_3D, _ = self.getHulls(self.ch2Points_3D,self.ch2_labels)
t.timeReport('3D clusters created')
#get hulls for each channels clusters
t.start()
ch1_hulls, ch1_centeroids, self.ch1_groupPoints = self.getHulls(self.ch1Points_3D,self.ch1_labels)
#self.plotHull(self.ch1_groupPoints[0],ch1_hulls[0])
ch2_hulls, ch2_centeroids, self.ch2_groupPoints = self.getHulls(self.ch2Points_3D,self.ch2_labels)
#t.timeReport('hulls created')
#combine nearest roi between channels
#t.start()
combinedHulls, combinedPoints, self.combined_ch1_Centeroids, self.combined_ch2_Centeroids = combineClosestHulls(ch1_hulls,ch1_centeroids,self.ch1_groupPoints,ch2_hulls,ch2_centeroids,self.ch2_groupPoints, self.maxDistance)
#t.timeReport('new hulls created')
#get new hulls for combined points
#t.start()
newHulls = self.getHulls2(combinedPoints)
#self.plotHull(combinedPoints[0],newHulls[0])
t.timeReport('hulls created')
#draw rois around combined hulls
#self.createROIFromHull(combinedPoints[0],newHulls[0])
t.start()
print('--- combined channels ---')
#single thread
for i in range(len(combinedHulls)):
self.createROIFromHull(combinedPoints[i],newHulls[i]) ### THIS IS SLOW! ###
print('\r', 'creating rois: {:0.2f}'.format((i/len(combinedHulls))*100),'%', end='\r', flush=True)
# #multi-thread
# t2 = Timer()
# t2.start()
# self.threadpool = QThreadPool()
# print("Multithreading with maximum %d threads" % self.threadpool.maxThreadCount())
# def progress_fn(n):
# print("%d%% done" % n)
# def makeROIs(progress_callback):
# for i in range(len(combinedHulls)):
# self.createROIFromHull(combinedPoints[i],newHulls[i])
# progress_callback.emit((i/len(combinedHulls))*100)
# return "Done."
# def thread_complete():
# print("THREAD COMPLETE! - time taken:{0:1.3f}".format(t2.stop()))
# # Pass the function to execute
# worker = Worker(makeROIs) # Any other args, kwargs are passed
# worker.signals.finished.connect(thread_complete)
# worker.signals.progress.connect(progress_fn)
# #start threads
# self.threadpool.start(worker)
t.timeReport('ROI created')
#g.m.statusBar().showMessage('{} ROI created'.format(str(len(combinedHulls))))
#self.updateClusterData()
t.start()
if len(combinedHulls) > 0:
self.clustersGeneated = True
t.timeReport('ROI data updated')
return
def getHulls(self,points,labels):
n_clusters = len(set(labels)) - (1 if -1 in labels else 0)
hulls = []
centeroids = []
groupPoints = []
for i in range(n_clusters):
clusterPoints = points[labels==i]
groupPoints.append(clusterPoints)
hulls.append(ConvexHull(clusterPoints).simplices)
centeroids.append(np.average(clusterPoints,axis=0))
return np.array(hulls), np.array(centeroids), np.array(groupPoints)
def getHulls2(self,pointList):
hullList = []
for points in pointList:
hullList.append(ConvexHull(points).simplices)
return hullList
def createROIFromHull(self,points, hull):
#t = Timer()
#t.start()
'''add roi to display from hull points'''
#make points list
pointsList = []
for simplex in hull:
pointsList.append((points[simplex][0]))
for simplex in hull:
pointsList.append((points[simplex][1]))
#order points list
pointsList = order_points(pointsList)
#convert list to np array
pointsList = np.array(pointsList)
#add create ROIs from points
#self.plotWidget.getViewBox().createROIFromPoints(pointsList)
#add points to All_ROI_pointsList
self.All_ROIs_pointsList.append(pointsList)
#make channel list for all points
self.makeChannelList()
#t.timeReport('ROI made')
return
def getRandomXYZ(self, minX, minY, minZ, maxX, maxY, maxZ, n):
randomList = []
while len(randomList) < n:
p = np.array([random.uniform(minX,maxY),
random.uniform(minY,maxY),
random.uniform(minZ,maxZ)])
randomList.append(p)
return np.array(randomList)
def getNearestNeighbors(self,train,test,k=1):
tree = KDTree(train, leaf_size=5)
dist, ind = tree.query(test, k=k)
return dist.reshape(np.size(dist),)
def randomPointAnalysis(self):
'''generate random points distributed in same dimensions as data'''
self.dist_clusters = self.getNearestNeighbors(self.ch1_centeroids_3D,self.ch2_centeroids_3D)
#print(self.dist_clusters)
#print(min(self.data['Xc']), min(self.data['Yc']), min(self.data['Zc']))
#print(max(self.data['Xc']), max(self.data['Yc']), max(self.data['Zc']))
self.ch1_random = self.getRandomXYZ(min(self.data['Xc']),
min(self.data['Yc']),
min(self.data['Zc']),
max(self.data['Xc']),
max(self.data['Yc']),
max(self.data['Zc']), len(self.ch1_centeroids_3D))
self.ch2_random = self.getRandomXYZ(min(self.data['Xc']),
min(self.data['Yc']),
min(self.data['Zc']),
max(self.data['Xc']),
max(self.data['Yc']),
max(self.data['Zc']), len(self.ch2_centeroids_3D))
self.dist_random = self.getNearestNeighbors(self.ch1_random,self.ch2_random)
self.distAll_clusters = self.getNearestNeighbors(self.ch1_centeroids_3D,self.ch2_centeroids_3D, k=len(self.ch1_centeroids_3D))
self.distAll_random = self.getNearestNeighbors(self.ch1_random,self.ch2_random, k=len(self.ch1_random))
return
def plotClusters(self):
'''3D scatter plots of data points with cluster labels - using matplotlib'''
fig = plt.figure()
ax1 = fig.add_subplot(111, projection='3d')
ax1.scatter(self.ch1Points_3D[::,0], self.ch1Points_3D[::,1], self.ch1Points_3D[::,2], marker='o', c=self.ch1_labels, cmap="RdBu")
ax1.scatter(self.ch2Points_3D[::,0], self.ch2Points_3D[::,1], self.ch2Points_3D[::,2], marker='x', c=self.ch2_labels, cmap="RdBu")
ax1.view_init(azim=0, elev=90)
plt.show()
return
def plot3DClusters(self):
'''3D scatter plot using GL ScatterPlot'''
plot3DScatter = Plot3D_GL(self.ch1Points_3D,self.ch2Points_3D)
plot3DScatter.plot()
return
def plotAnalysis(self):
''''3D scatter plots of centeroids and histograms of distances'''
fig = plt.figure()
ax1 = fig.add_subplot(231, projection='3d')
ax1.scatter(self.ch1_centeroids_3D[::,0], self.ch1_centeroids_3D[::,1], self.ch1_centeroids_3D[::,2], marker='o')
ax1.scatter(self.ch2_centeroids_3D[::,0], self.ch2_centeroids_3D[::,1], self.ch2_centeroids_3D[::,2], marker='^')
ax1.set_title('Cluster Centeroids')
ax1.set_xlabel('X')
ax1.set_ylabel('Y')
ax1.set_zlabel('Z')
ax3 = fig.add_subplot(234, projection='3d')
ax3.scatter(self.ch1_random[::,0], self.ch1_random[::,1], self.ch1_random[::,2], marker='o')
ax3.scatter(self.ch2_random[::,0], self.ch2_random[::,1], self.ch2_random[::,2], marker='^')
ax3.set_title('Random Points')
ax3.set_xlabel('X')
ax3.set_ylabel('Y')
ax3.set_zlabel('Z')
ax2 = fig.add_subplot(232)
ax2.hist(self.dist_clusters)
ax2.set_title('Nearest Neighbor')
ax2.set_ylabel('# of observations')
ax2.set_xlabel('distance')
ax5 = fig.add_subplot(233)
ax5.hist(self.distAll_clusters)
ax5.set_title('All Neighbors')
ax5.set_ylabel('# of observations')
ax5.set_xlabel('distance')
ax4 = fig.add_subplot(235)
ax4.hist(self.dist_random)
#ax4.set_title('Nearest Neighbor')
ax4.set_ylabel('# of observations')
ax4.set_xlabel('distance')
ax6 = fig.add_subplot(236)
ax6.hist(self.distAll_random)
#ax6.set_title('All Neighbors')
ax6.set_ylabel('# of observations')
ax6.set_xlabel('distance')
plt.show()
return
def makeChannelList(self):
self.channelList = []
ch1_pts = self.Channels[0].getPoints(z=True).tolist() #cast as list to ensure logic test works
#ch2_pts = self.Channels[1].getPoints()
for roi in self.All_ROIs_pointsList:
roiList = []
for pts in roi:
if list(pts) in ch1_pts:
roiList.append(self.Channels[0].__name__)
else:
roiList.append(self.Channels[1].__name__)
self.channelList.append(np.array(roiList))
return
def analyze_roi(self, roi, channelList, roiIndex):
'''analyse roi pts'''
#channels = [self.Channels[0],self.Channels[1]]
ch1_pts = roi[channelList == self.Channels[0].__name__]
ch2_pts = roi[channelList == self.Channels[1].__name__]
roi_data = OrderedDict([('ROI #', roiIndex), ('Mean Distance (%s)' % self.unit_prefixes[self.unit], 0), ('%s N' % self.Channels[0].__name__, 0), \
('%s N' % self.Channels[1].__name__, 0), ('%s Volume (%s^3)' % (self.Channels[0].__name__, self.unit_prefixes[self.unit]), 0), ('%s Volume (%s^3)' % (self.Channels[1].__name__, self.unit_prefixes[self.unit]), 0)])
roi_data['%s N' % self.Channels[0].__name__] = len(ch1_pts)
roi_data['%s N' % self.Channels[1].__name__] = len(ch2_pts)
try:
if len(ch1_pts) >= 5:
roi_data['%s Volume (%s^3)' % (self.Channels[0].__name__, self.unit_prefixes[self.unit])] = convex_volume(ch1_pts)
else:
roi_data['%s Volume (%s^3)' % (self.Channels[0].__name__, self.unit_prefixes[self.unit])] = 0
except:
roi_data['%s Volume (%s^3)' % (self.Channels[0].__name__, self.unit_prefixes[self.unit])] = 0
try:
if len(ch2_pts) >= 5:
roi_data['%s Volume (%s^3)' % (self.Channels[1].__name__, self.unit_prefixes[self.unit])] = convex_volume(ch2_pts)
else:
roi_data['%s Volume (%s^3)' % (self.Channels[1].__name__, self.unit_prefixes[self.unit])] = 0
except:
roi_data['%s Volume (%s^3)' % (self.Channels[1].__name__, self.unit_prefixes[self.unit])] = 0
#g.m.statusBar().showMessage('Cannot get Volume of %s in roi %d with %d points' % (ch.__name__, roi.id, ch.getCount()))
roi_data['Mean Distance (%s)' % self.unit_prefixes[self.unit]] = np.linalg.norm(np.average(ch1_pts, 0) - np.average(ch2_pts, 0))
roi_data['%s Centeroid' % (self.Channels[0].__name__)] = np.average(ch1_pts, 0)
roi_data['%s Centeroid' % (self.Channels[1].__name__)] = np.average(ch2_pts, 0)
return roi_data
def makeROI_DF(self):
'''pass each roi to analyze_roi(), compile resuts in table'''
dictList = []
for i in range(len(self.All_ROIs_pointsList)):
roi_data = self.analyze_roi(self.All_ROIs_pointsList[i],self.channelList[i],i)
dictList.append(roi_data)
print('\r', 'analysing rois: {:0.2f}'.format((i/len(self.All_ROIs_pointsList))*100),'%', end='\r', flush=True)
#make df
self.roiAnalysisDF = pd.DataFrame(dictList)
#print(self.roiAnalysisDF.head())
return
def saveROIAnalysis(self, savePath='',fileName=''):
'''save roi analysis dataframe as csv'''
self.makeROI_DF()
saveName = os.path.join(savePath, fileName + '_roiAnalysis.csv')
self.roiAnalysisDF.to_csv(saveName)
print('roi analysis saved as:', saveName)
return
def printStats(self):
'''print stats'''
print('----------------------------------------------')
print(self.clusterAnaysisSelection)
print('----------------------------------------------')
print('Channel 1: Number of clusters: ', str(len(self.ch1_centeroids_3D)))
print('Channel 2: Number of clusters: ', str(len(self.ch2_centeroids_3D)))
print('Number of nearest neighbor distances:', str(np.size(self.dist_clusters)))
print('Mean nearest neighbor distance:', str(np.mean(self.dist_clusters)))
print('StDev nearest neighbor distance:', str(np.std(self.dist_clusters)))
print('Number of All distances:', str(np.size(self.distAll_clusters)))
print('Mean All distance:', str(np.mean(self.distAll_clusters)))
print('StDev All distance:', str(np.std(self.distAll_clusters)))
print('----------------------------------------------')
print('Random 1: Number of clusters: ', str(len(self.ch1_random)))
print('Random 2: Number of clusters: ', str(len(self.ch2_random)))
print('Random: Number of nearest neighbor distances:', str(np.size(self.dist_random)))
print('Random: Mean nearest neighbor distance:', str(np.mean(self.dist_random)))
print('Random: StDev nearest neighbor distance:', str(np.std(self.dist_random)))
print('Random: Number of All distances:', str(np.size(self.distAll_random)))
print('Random: Mean All distance:', str(np.mean(self.distAll_random)))
print('Random: Stdev All distance:', str(np.std(self.distAll_random)))
print('----------------------------------------------')
return
def saveStats(self ,savePath='',fileName=''):
'''save clustering stats as csv'''
d= {
'Channel 1: Number of clusters': (len(self.ch1_centeroids_3D)),
'Channel 2: Number of clusters': (len(self.ch2_centeroids_3D)),
'Channel 1: Number of noise points': self.ch1_numNoise,
'Channel 2: Number of noise points': self.ch2_numNoise,
'Number of nearest neighbor distances': (np.size(self.dist_clusters)),
'Mean nearest neighbor distance': (np.mean(self.dist_clusters)),
'StDev nearest neighbor distance': (np.std(self.dist_clusters)),
'Number of All distances': (np.size(self.distAll_clusters)),
'Mean All distance': (np.mean(self.distAll_clusters)),
'StDev All distance': (np.std(self.distAll_clusters)),
'Random 1: Number of clusters': (len(self.ch1_random)),
'Random 2: Number of clusters': (len(self.ch2_random)),
'Random: Number of nearest neighbor distances': (np.size(self.dist_random)),
'Random: Mean nearest neighbor distance': (np.mean(self.dist_random)),
'Random: StDev nearest neighbor distance': (np.std(self.dist_random)),
'Random: Number of All distances': (np.size(self.distAll_random)),
'Random: Mean All distance': (np.mean(self.distAll_random)),
'Random: Stdev All distance': (np.std(self.distAll_random))
}
statsDF = pd.DataFrame(data=d,index=[0])
saveName = os.path.join(savePath, fileName + '_stats.csv')
statsDF.to_csv(saveName)
print('stats saved as:', saveName)
return
def saveResults(self, savePath='',fileName=''):
'''save centeroids and distances'''
d1 = {'clusters_nearest':self.dist_clusters,'random_nearest':self.dist_random}
d2 = {'clusters_All':self.distAll_clusters,'random_All':self.distAll_random}
d3 = {'ch1_centeroids_x':self.ch1_centeroids_3D[::,0],
'ch1_centeroids_y':self.ch1_centeroids_3D[::,1],
'ch1_centeroids_z':self.ch1_centeroids_3D[::,2]}
d4 = {'ch2_centeroids_x':self.ch2_centeroids_3D[::,0],
'ch2_centeroids_y':self.ch2_centeroids_3D[::,1],
'ch2_centeroids_z':self.ch2_centeroids_3D[::,2]}
d5 = {'ch1_centeroids_rnd_x':self.ch1_random[::,0],
'ch1_centeroids_rnd_y':self.ch1_random[::,1],
'ch1_centeroids_rnd_z':self.ch1_random[::,2]}
d6 = {'ch2_centeroids_rnd_x':self.ch2_random[::,0],
'ch2_centeroids_rnd_y':self.ch2_random[::,1],
'ch2_centeroids_rnd_z':self.ch2_random[::,2]}
nearestNeighborDF = pd.DataFrame(data=d1)
allNeighborDF = pd.DataFrame(data=d2)
ch1_centeroids_clusters_DF = pd.DataFrame(data=d3)
ch2_centeroids_clusters_DF = pd.DataFrame(data=d4)
ch1_centeroids_random_DF = pd.DataFrame(data=d5)
ch2_centeroids_random_DF = pd.DataFrame(data=d6)
saveName1 = os.path.join(savePath, fileName + '_clusterAnalysis_nearestNeighbors.csv')
saveName2 = os.path.join(savePath, fileName + '_clusterAnalysis_AllNeighbors.csv')
saveName3 = os.path.join(savePath, fileName + '_ch1_clusters_centeroids.csv')
saveName4 = os.path.join(savePath, fileName + '_ch2_clusters_centeroids.csv')
saveName5 = os.path.join(savePath, fileName + '_ch1_random_centeroids.csv')
saveName6 = os.path.join(savePath, fileName + '_ch2_random_centeroids.csv')
nearestNeighborDF.to_csv(saveName1)
allNeighborDF.to_csv(saveName2)
ch1_centeroids_clusters_DF.to_csv(saveName3)
ch2_centeroids_clusters_DF.to_csv(saveName4)
ch1_centeroids_random_DF .to_csv(saveName5)
ch2_centeroids_random_DF .to_csv(saveName6)
print('nearest neighbor distances saved as:', saveName1)
print('all neighbor distances saved as:', saveName2)
print('ch1_cluster centeroids saved as:', saveName3)
print('ch1_cluster centeroids saved as:', saveName4)
print('ch1_random centeroids saved as:', saveName5)
print('ch2_random centeroids saved as:', saveName6)
def runAnalysis(self, file, eps = 100 , min_samples = 10, maxDistance = 100, pathName=''):
try:
fileName = file.split('\\')[-1].split('.')[0]
print('analysing: ', fileName)
filePath = file
savePath = pathName + r'\results'
print('save path: ', savePath, '\n')
##### RUN ####
self.name = fileName
self.eps = eps
self.min_samples = min_samples
self.maxDistance = maxDistance
self.open_file(filename=filePath)
self.getClusters()
#self.plotClusters()
#self.plot3DClusters()
self.randomPointAnalysis()
#self.plotAnalysis()
self.printStats()
self.saveResults(savePath, fileName=fileName)
self.saveStats(savePath, fileName=fileName)
self.saveROIAnalysis(savePath, fileName=fileName)
except:
print('skipped: ',fileName)
print ('finished analysing: ', fileName)
return
def runBatch(self, pathName, eps = 100 , min_samples = 10, maxDistance = 100, test=False):
'''run all txt files in folder'''
files = [f for f in glob.glob(pathName + "**/*.txt", recursive=True)]
#FOR TESTING - JUST FIRST FILE IN LIST
if test:
files=[files[0]]
for file in files:
self.runAnalysis(file, eps, min_samples, maxDistance, pathName)
print('batch finished!')
return
clusterAnalysis = ClusterAnalysis()
###################################################################################################
################### Testing ##############################################################
###################################################################################################
#clustering option
eps = 100 #max distance between points within a cluster
min_samples = 20 #min number of points to form a cluster
maxDistance = 300 #max distance between clusters in differnt channels when forming combined ROI
#pathName = r"C:\Google Drive\fromIan_batchProcess"
pathName = r"C:\Users\George\Desktop\ianS-synapse"
clusterAnalysis.runBatch(pathName, eps, min_samples, maxDistance, test=True)
| [
"george.dickinson@gmail.com"
] | george.dickinson@gmail.com |
a554b9d4babd5f1a1082fb96b093df5f78bfe006 | b9a1be2835bf81a59c46220569b32cfeb9535822 | /MusicSegment/JSRanker/parametersInCondition2.py | d9550cbd7c5658c39bd81bd49878ca2a83333009 | [] | no_license | gifts1912/Project | 838b9c52eb7564679969ecc44933296fa46401ab | bf486420e6ec9f54420747481f7b0fbe60bc7825 | refs/heads/master | 2021-01-12T16:16:19.333663 | 2017-03-31T12:51:52 | 2017-03-31T12:51:52 | 71,970,952 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,485 | py | import pandas as pd
import numpy as np
def loadJSLog(jsLogFile, queryUrlFeas):
with open(jsLogFile, 'r', encoding='utf-8') as fr:
for line in fr:
arr = line.split('\t', 2)
query = arr[1].strip().lower()
log = arr[2].strip().rstrip(';').strip()
if log == "":
continue
logArr = log.split(';')
if query not in queryUrlFeas:
queryUrlFeas[query] = {}
for i in range(len(logArr)):
urlFea = logArr[i]
urlFeaArr = urlFea.strip().split('\t')
if(len(urlFeaArr) != 5):
print("len(urlFeaArr)!=5", query, urlFeaArr)
continue
url = str(urlFeaArr[0]).lower().strip().rstrip('/')
feas = []
for feaEle in urlFeaArr[1:]:
feas.append(feaEle)
queryUrlFeas[query][url] = feas
def loadOfflineFeatures(offQueryUrlFeas, offlineRankerIn = "C:/Code/data/offlineRankerIn.tsv"):
with open(offlineRankerIn, 'r', encoding='utf-8') as fr:
headLine = fr.readline()
columns = headLine.strip().split('\t')
queryIdx = columns.index("m:Query")
urlIdx = columns.index("m:Url")
feasIdx = [columns.index('DRScore'), columns.index("EntityMatchThreshold"), columns.index("IntentMatchThreshold"), columns.index("ConstraintMatchThreshold")]
for line in fr:
arr = line.strip().split('\t')
query = (arr[queryIdx]).lower().strip()
url = arr[urlIdx].lower().strip().rstrip('/')
feas = []
for feaPos in feasIdx:
feas.append(arr[feaPos])
if query not in offQueryUrlFeas:
offQueryUrlFeas[query] = {}
offQueryUrlFeas[query][url] = feas
def loadRD2Query():
difQuerySet = set()
with open("C:/Code/data/diff2Query.tsv", 'r', encoding='utf-8') as fr:
for line in fr:
difQuerySet.add(line.strip())
return difQuerySet
def similarityCompute(queryUrlFeas, offQueryUrlFeas, difQueryFile = "C:/Code/data/queryListWithDiffFea.tsv"):
difQuerySet = set()
difQuerySet = loadRD2Query()
all_num = 0
com_num = 0
com_fea = [0] * 4
for query, urlCons in offQueryUrlFeas.items():
if query not in queryUrlFeas:
continue
if query not in difQuerySet:
continue
for url, con in urlCons.items():
if url not in queryUrlFeas[query]:
continue
all_num += 1
for i in range(4):
if con[i] == queryUrlFeas[query][url][i]:
com_fea[i] += 1
if all_num != 0:
for i in range(4):
print(all_num, com_fea[i], float(com_fea[i]) / all_num)
else:
print("all_num is 0")
def queryLevelLoad(JSLog, queryFeaJS):
with open(JSLog, 'r', encoding='utf-8') as fr:
for line in fr:
arr = line.strip().split('\t', 2)
if len(arr) != 3:
continue
query = arr[1].strip().lower()
log = arr[2].strip().rstrip(';').strip()
queryFeaJS[query] = log
def queryLevelOffLoad(offlineLog, queryFeaOff):
offlineDebug = pd.read_csv(offlineLog, sep='\t', header=0)
columns = ['m:Query', 'm:Url', 'ConstraintMatchThreshold']
for row in offlineDebug[columns].values:
query = str(row[0]).strip().lower()
ent = str(row[2]).strip()
queryFeaOff[query]= ent
def similarityQueryLevel(JSLog = "C:/Code/data/JSLog.tsv", offlineLog = "C:/Code/data/offlineRankerIn.tsv"):
queryFeaJS = {}
queryFeaOff = {}
queryLevelLoad(JSLog, queryFeaJS)
queryLevelOffLoad(offlineLog, queryFeaOff)
all_num = 0
com_num = 0
for query, fea in queryFeaJS.items():
if query not in queryFeaOff:
continue
all_num += 1
if fea == queryFeaOff[query]:
com_num += 1
if all_num != 0:
print(all_num, com_num, float(com_num)/all_num)
def main():
jsLogFile = "C:/Code/Module/JSRankerEvaluation/o2_25933d37-7d7e-4eab-ab1f-623f04a96a33/25933d37-7d7e-4eab-ab1f-623f04a96a33"
queryUrlFeas = {}
loadJSLog(jsLogFile, queryUrlFeas)
offQueryUrlFeas = {}
loadOfflineFeatures(offQueryUrlFeas, 'C:/Code/data/offlineL3RankerIn.tsv')
similarityCompute(queryUrlFeas, offQueryUrlFeas)
if __name__ == "__main__":
main()
| [
"hengyliu@hotmail.com"
] | hengyliu@hotmail.com |
5fce573ea3661fce8d1a9b4e1d79e857cae2283c | d380e7dfd4c9bb5fd9f238ca48be9f807b4f5db3 | /codes_auto/322.coin-change.py | 4c56b1aff7ec2f295ad97e11aba197fb8b079495 | [] | no_license | zhanjw/leetcode | 98c3d3fda23f100354b9ca030c47a854d3c207f0 | 2e62568dd9c6ce0bff4b0ca888ffff29f5a6feef | refs/heads/master | 2022-12-14T12:49:23.797498 | 2020-09-18T14:31:30 | 2020-09-18T14:31:30 | 296,640,031 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 710 | py | #
# @lc app=leetcode.cn id=322 lang=python
#
# [322] coin-change
#
class Solution(object):
def coinChange(self, coins, amount):
"""
:type coins: List[int]
:type amount: int
:rtype: int
"""
if amount==0:
return 0
dp = [None for _ in range(amount+1)]
for idx in range(len(dp)):
for coin in coins:
if idx%coin==0 and (not dp[idx] or dp[idx]>idx//coin):
dp[idx]=idx//coin
if idx-coin>=0 and dp[idx-coin] and (not dp[idx] or dp[idx]>dp[idx-coin]+1):
dp[idx]=dp[idx-coin]+1
# print(dp)
return dp[-1] if dp[-1] else -1
# @lc code=end | [
"imzhanjw@gmail.com"
] | imzhanjw@gmail.com |
2418f6b9a937b1aafba618bd23e2abbe26bdc820 | 1bfad01139237049eded6c42981ee9b4c09bb6de | /RestPy/ixnetwork_restpy/testplatform/sessions/ixnetwork/topology/vxlan.py | af7f48ae0762cce9d44716cc226b09817ae57698 | [
"MIT"
] | permissive | kakkotetsu/IxNetwork | 3a395c2b4de1488994a0cfe51bca36d21e4368a5 | f9fb614b51bb8988af035967991ad36702933274 | refs/heads/master | 2020-04-22T09:46:37.408010 | 2019-02-07T18:12:20 | 2019-02-07T18:12:20 | 170,284,084 | 0 | 0 | MIT | 2019-02-12T08:51:02 | 2019-02-12T08:51:01 | null | UTF-8 | Python | false | false | 30,111 | py |
# Copyright 1997 - 2018 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class Vxlan(Base):
"""The Vxlan class encapsulates a user managed vxlan node in the ixnetwork hierarchy.
An instance of the class can be obtained by accessing the Vxlan property from a parent instance.
The internal properties list will be empty when the property is accessed and is populated from the server using the find method.
The internal properties list can be managed by the user by using the add and remove methods.
"""
_SDM_NAME = 'vxlan'
def __init__(self, parent):
super(Vxlan, self).__init__(parent)
@property
def Bfdv4Interface(self):
"""An instance of the Bfdv4Interface class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.bfdv4interface.Bfdv4Interface)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.bfdv4interface import Bfdv4Interface
return Bfdv4Interface(self)
@property
def CfmMp(self):
"""An instance of the CfmMp class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.cfmmp.CfmMp)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.cfmmp import CfmMp
return CfmMp(self)
@property
def Connector(self):
"""An instance of the Connector class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.connector.Connector)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.connector import Connector
return Connector(self)
@property
def Ethernet(self):
"""An instance of the Ethernet class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.ethernet.Ethernet)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.ethernet import Ethernet
return Ethernet(self)
@property
def Ipv4Loopback(self):
"""An instance of the Ipv4Loopback class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.ipv4loopback.Ipv4Loopback)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.ipv4loopback import Ipv4Loopback
return Ipv4Loopback(self)
@property
def Ipv6Loopback(self):
"""An instance of the Ipv6Loopback class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.ipv6loopback.Ipv6Loopback)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.ipv6loopback import Ipv6Loopback
return Ipv6Loopback(self)
@property
def LdpBasicRouter(self):
"""An instance of the LdpBasicRouter class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.ldpbasicrouter.LdpBasicRouter)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.ldpbasicrouter import LdpBasicRouter
return LdpBasicRouter(self)
@property
def LdpBasicRouterV6(self):
"""An instance of the LdpBasicRouterV6 class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.ldpbasicrouterv6.LdpBasicRouterV6)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.ldpbasicrouterv6 import LdpBasicRouterV6
return LdpBasicRouterV6(self)
@property
def LdpTargetedRouter(self):
"""An instance of the LdpTargetedRouter class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.ldptargetedrouter.LdpTargetedRouter)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.ldptargetedrouter import LdpTargetedRouter
return LdpTargetedRouter(self)
@property
def LdpTargetedRouterV6(self):
"""An instance of the LdpTargetedRouterV6 class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.ldptargetedrouterv6.LdpTargetedRouterV6)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.ldptargetedrouterv6 import LdpTargetedRouterV6
return LdpTargetedRouterV6(self)
@property
def LearnedInfo(self):
"""An instance of the LearnedInfo class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.learnedinfo.learnedinfo.LearnedInfo)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.learnedinfo.learnedinfo import LearnedInfo
return LearnedInfo(self)
@property
def VxlanStaticInfo(self):
"""An instance of the VxlanStaticInfo class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.vxlanstaticinfo.VxlanStaticInfo)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.vxlanstaticinfo import VxlanStaticInfo
return VxlanStaticInfo(self)._select()
@property
def ConnectedVia(self):
"""List of layers this layer used to connect to the wire
Returns:
list(str[None|/api/v1/sessions/1/ixnetwork/topology?deepchild=*])
"""
return self._get_attribute('connectedVia')
@ConnectedVia.setter
def ConnectedVia(self, value):
self._set_attribute('connectedVia', value)
@property
def Count(self):
"""Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group
Returns:
number
"""
return self._get_attribute('count')
@property
def DescriptiveName(self):
"""Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but maybe offers more context
Returns:
str
"""
return self._get_attribute('descriptiveName')
@property
def EnableStaticInfo(self):
"""If true, VXLAN will use unicast entries for VTEP information instead of multicast learning.
Returns:
bool
"""
return self._get_attribute('enableStaticInfo')
@EnableStaticInfo.setter
def EnableStaticInfo(self, value):
self._set_attribute('enableStaticInfo', value)
@property
def Errors(self):
"""A list of errors that have occurred
Returns:
list(dict(arg1:str[None|/api/v1/sessions/1/ixnetwork/?deepchild=*],arg2:list[str]))
"""
return self._get_attribute('errors')
@property
def ExternalLearning(self):
"""If true, VXLAN will use information received from another protocol which will handle the learning mechanism.
Returns:
bool
"""
return self._get_attribute('externalLearning')
@ExternalLearning.setter
def ExternalLearning(self, value):
self._set_attribute('externalLearning', value)
@property
def Ipv4_multicast(self):
"""IPv4 Multicast Address.
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('ipv4_multicast')
@property
def Multiplier(self):
"""Number of layer instances per parent instance (multiplier)
Returns:
number
"""
return self._get_attribute('multiplier')
@Multiplier.setter
def Multiplier(self, value):
self._set_attribute('multiplier', value)
@property
def Name(self):
"""Name of NGPF element, guaranteed to be unique in Scenario
Returns:
str
"""
return self._get_attribute('name')
@Name.setter
def Name(self, value):
self._set_attribute('name', value)
@property
def OvsdbConnectorMultiplier(self):
"""Ovsdb to Vxlan multiplier, when part of OVSDB Server stack.
Returns:
number
"""
return self._get_attribute('ovsdbConnectorMultiplier')
@OvsdbConnectorMultiplier.setter
def OvsdbConnectorMultiplier(self, value):
self._set_attribute('ovsdbConnectorMultiplier', value)
@property
def RunningMode(self):
"""There will be different behaviours based on role (normal=0, ovsdb controller stack=1, bfd stack=2.
Returns:
str(none|ovsdbControllerBfdStack|ovsdbStack)
"""
return self._get_attribute('runningMode')
@RunningMode.setter
def RunningMode(self, value):
self._set_attribute('runningMode', value)
@property
def SessionStatus(self):
"""Current state of protocol session: Not Started - session negotiation not started, the session is not active yet. Down - actively trying to bring up a protocol session, but negotiation is didn't successfully complete (yet). Up - session came up successfully.
Returns:
list(str[down|notStarted|up])
"""
return self._get_attribute('sessionStatus')
@property
def StackedLayers(self):
"""List of secondary (many to one) child layer protocols
Returns:
list(str[None|/api/v1/sessions/1/ixnetwork/topology?deepchild=*])
"""
return self._get_attribute('stackedLayers')
@StackedLayers.setter
def StackedLayers(self, value):
self._set_attribute('stackedLayers', value)
@property
def StateCounts(self):
"""A list of values that indicates the total number of sessions, the number of sessions not started, the number of sessions down and the number of sessions that are up
Returns:
dict(total:number,notStarted:number,down:number,up:number)
"""
return self._get_attribute('stateCounts')
@property
def StaticInfoCount(self):
"""number of unicast VTEP
Returns:
number
"""
return self._get_attribute('staticInfoCount')
@StaticInfoCount.setter
def StaticInfoCount(self, value):
self._set_attribute('staticInfoCount', value)
@property
def Status(self):
"""Running status of associated network element. Once in Started state, protocol sessions will begin to negotiate.
Returns:
str(configured|error|mixed|notStarted|started|starting|stopping)
"""
return self._get_attribute('status')
@property
def Vni(self):
"""VXLAN Network Identifier.
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('vni')
def add(self, ConnectedVia=None, EnableStaticInfo=None, ExternalLearning=None, Multiplier=None, Name=None, OvsdbConnectorMultiplier=None, RunningMode=None, StackedLayers=None, StaticInfoCount=None):
"""Adds a new vxlan node on the server and retrieves it in this instance.
Args:
ConnectedVia (list(str[None|/api/v1/sessions/1/ixnetwork/topology?deepchild=*])): List of layers this layer used to connect to the wire
EnableStaticInfo (bool): If true, VXLAN will use unicast entries for VTEP information instead of multicast learning.
ExternalLearning (bool): If true, VXLAN will use information received from another protocol which will handle the learning mechanism.
Multiplier (number): Number of layer instances per parent instance (multiplier)
Name (str): Name of NGPF element, guaranteed to be unique in Scenario
OvsdbConnectorMultiplier (number): Ovsdb to Vxlan multiplier, when part of OVSDB Server stack.
RunningMode (str(none|ovsdbControllerBfdStack|ovsdbStack)): There will be different behaviours based on role (normal=0, ovsdb controller stack=1, bfd stack=2.
StackedLayers (list(str[None|/api/v1/sessions/1/ixnetwork/topology?deepchild=*])): List of secondary (many to one) child layer protocols
StaticInfoCount (number): number of unicast VTEP
Returns:
self: This instance with all currently retrieved vxlan data using find and the newly added vxlan data available through an iterator or index
Raises:
ServerError: The server has encountered an uncategorized error condition
"""
return self._create(locals())
def remove(self):
"""Deletes all the vxlan data in this instance from server.
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
self._delete()
def find(self, ConnectedVia=None, Count=None, DescriptiveName=None, EnableStaticInfo=None, Errors=None, ExternalLearning=None, Multiplier=None, Name=None, OvsdbConnectorMultiplier=None, RunningMode=None, SessionStatus=None, StackedLayers=None, StateCounts=None, StaticInfoCount=None, Status=None):
"""Finds and retrieves vxlan data from the server.
All named parameters support regex and can be used to selectively retrieve vxlan data from the server.
By default the find method takes no parameters and will retrieve all vxlan data from the server.
Args:
ConnectedVia (list(str[None|/api/v1/sessions/1/ixnetwork/topology?deepchild=*])): List of layers this layer used to connect to the wire
Count (number): Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group
DescriptiveName (str): Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but maybe offers more context
EnableStaticInfo (bool): If true, VXLAN will use unicast entries for VTEP information instead of multicast learning.
Errors (list(dict(arg1:str[None|/api/v1/sessions/1/ixnetwork/?deepchild=*],arg2:list[str]))): A list of errors that have occurred
ExternalLearning (bool): If true, VXLAN will use information received from another protocol which will handle the learning mechanism.
Multiplier (number): Number of layer instances per parent instance (multiplier)
Name (str): Name of NGPF element, guaranteed to be unique in Scenario
OvsdbConnectorMultiplier (number): Ovsdb to Vxlan multiplier, when part of OVSDB Server stack.
RunningMode (str(none|ovsdbControllerBfdStack|ovsdbStack)): There will be different behaviours based on role (normal=0, ovsdb controller stack=1, bfd stack=2.
SessionStatus (list(str[down|notStarted|up])): Current state of protocol session: Not Started - session negotiation not started, the session is not active yet. Down - actively trying to bring up a protocol session, but negotiation is didn't successfully complete (yet). Up - session came up successfully.
StackedLayers (list(str[None|/api/v1/sessions/1/ixnetwork/topology?deepchild=*])): List of secondary (many to one) child layer protocols
StateCounts (dict(total:number,notStarted:number,down:number,up:number)): A list of values that indicates the total number of sessions, the number of sessions not started, the number of sessions down and the number of sessions that are up
StaticInfoCount (number): number of unicast VTEP
Status (str(configured|error|mixed|notStarted|started|starting|stopping)): Running status of associated network element. Once in Started state, protocol sessions will begin to negotiate.
Returns:
self: This instance with matching vxlan data retrieved from the server available through an iterator or index
Raises:
ServerError: The server has encountered an uncategorized error condition
"""
return self._select(locals())
def read(self, href):
"""Retrieves a single instance of vxlan data from the server.
Args:
href (str): An href to the instance to be retrieved
Returns:
self: This instance with the vxlan data from the server available through an iterator or index
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
def get_device_ids(self, PortNames=None, Ipv4_multicast=None, Vni=None):
"""Base class infrastructure that gets a list of vxlan device ids encapsulated by this object.
Use the optional regex parameters in the method to refine the list of device ids encapsulated by this object.
Args:
PortNames (str): optional regex of port names
Ipv4_multicast (str): optional regex of ipv4_multicast
Vni (str): optional regex of vni
Returns:
list(int): A list of device ids that meets the regex criteria provided in the method parameters
Raises:
ServerError: The server has encountered an uncategorized error condition
"""
return self._get_ngpf_device_ids(locals())
def ClearAllLearnedInfo(self):
"""Executes the clearAllLearnedInfo operation on the server.
Clear All Learned Info
Args:
Arg1 (list(str[None|/api/v1/sessions/1/ixnetwork/topology])): The method internally sets Arg1 to the encapsulated list of hrefs for this instance
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
Arg1 = self
return self._execute('ClearAllLearnedInfo', payload=locals(), response_object=None)
def ClearAllLearnedInfo(self, SessionIndices):
"""Executes the clearAllLearnedInfo operation on the server.
Clear All Learned Info
Args:
Arg1 (list(str[None|/api/v1/sessions/1/ixnetwork/topology])): The method internally sets Arg1 to the encapsulated list of hrefs for this instance
SessionIndices (list(number)): This parameter requires an array of session numbers 0 1 2 3
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
Arg1 = self
return self._execute('ClearAllLearnedInfo', payload=locals(), response_object=None)
def ClearAllLearnedInfo(self, SessionIndices):
"""Executes the clearAllLearnedInfo operation on the server.
Clear All Learned Info
Args:
Arg1 (list(str[None|/api/v1/sessions/1/ixnetwork/topology])): The method internally sets Arg1 to the encapsulated list of hrefs for this instance
SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
Arg1 = self
return self._execute('ClearAllLearnedInfo', payload=locals(), response_object=None)
def ClearAllLearnedInfoInClient(self, Arg2):
"""Executes the clearAllLearnedInfoInClient operation on the server.
Clears ALL info from GUI grid for the selected VXLAN interfaces.
Args:
Arg1 (str(None|/api/v1/sessions/1/ixnetwork/topology)): The method internally sets Arg1 to the current href for this instance
Arg2 (list(number)): List of indices into the protocol plugin. An empty list indicates all instances in the plugin.
Returns:
list(str): ID to associate each async action invocation
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
Arg1 = self.href
return self._execute('ClearAllLearnedInfoInClient', payload=locals(), response_object=None)
def FetchAndUpdateConfigFromCloud(self, Mode):
"""Executes the fetchAndUpdateConfigFromCloud operation on the server.
Args:
Arg1 (str(None|/api/v1/sessions/1/ixnetwork/globals?deepchild=*|/api/v1/sessions/1/ixnetwork/topology?deepchild=*)): The method internally sets Arg1 to the current href for this instance
Mode (str):
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
Arg1 = self.href
return self._execute('FetchAndUpdateConfigFromCloud', payload=locals(), response_object=None)
def GetVXLANLearnedInfo(self):
"""Executes the getVXLANLearnedInfo operation on the server.
Get Learned Info
Args:
Arg1 (list(str[None|/api/v1/sessions/1/ixnetwork/topology])): The method internally sets Arg1 to the encapsulated list of hrefs for this instance
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
Arg1 = self
return self._execute('GetVXLANLearnedInfo', payload=locals(), response_object=None)
def GetVXLANLearnedInfo(self, SessionIndices):
"""Executes the getVXLANLearnedInfo operation on the server.
Get Learned Info
Args:
Arg1 (list(str[None|/api/v1/sessions/1/ixnetwork/topology])): The method internally sets Arg1 to the encapsulated list of hrefs for this instance
SessionIndices (list(number)): This parameter requires an array of session numbers 0 1 2 3
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
Arg1 = self
return self._execute('GetVXLANLearnedInfo', payload=locals(), response_object=None)
def GetVXLANLearnedInfo(self, SessionIndices):
"""Executes the getVXLANLearnedInfo operation on the server.
Get Learned Info
Args:
Arg1 (list(str[None|/api/v1/sessions/1/ixnetwork/topology])): The method internally sets Arg1 to the encapsulated list of hrefs for this instance
SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
Arg1 = self
return self._execute('GetVXLANLearnedInfo', payload=locals(), response_object=None)
def GetVXLANLearnedInfo(self, Arg2):
"""Executes the getVXLANLearnedInfo operation on the server.
Gets VTEP Info learnt by this VXLAN entity.
Args:
Arg1 (str(None|/api/v1/sessions/1/ixnetwork/topology)): The method internally sets Arg1 to the current href for this instance
Arg2 (list(number)): List of indices into the protocol plugin. An empty list indicates all instances in the plugin.
Returns:
list(str): ID to associate each async action invocation
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
Arg1 = self.href
return self._execute('GetVXLANLearnedInfo', payload=locals(), response_object=None)
def RestartDown(self):
"""Executes the restartDown operation on the server.
Stop and start interfaces and sessions that are in Down state.
Args:
Arg1 (list(str[None|/api/v1/sessions/1/ixnetwork/topology])): The method internally sets Arg1 to the encapsulated list of hrefs for this instance
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
Arg1 = self
return self._execute('RestartDown', payload=locals(), response_object=None)
def RestartDown(self, SessionIndices):
"""Executes the restartDown operation on the server.
Stop and start interfaces and sessions that are in Down state.
Args:
Arg1 (list(str[None|/api/v1/sessions/1/ixnetwork/topology])): The method internally sets Arg1 to the encapsulated list of hrefs for this instance
SessionIndices (list(number)): This parameter requires an array of session numbers 0 1 2 3
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
Arg1 = self
return self._execute('RestartDown', payload=locals(), response_object=None)
def RestartDown(self, SessionIndices):
"""Executes the restartDown operation on the server.
Stop and start interfaces and sessions that are in Down state.
Args:
Arg1 (list(str[None|/api/v1/sessions/1/ixnetwork/topology])): The method internally sets Arg1 to the encapsulated list of hrefs for this instance
SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
Arg1 = self
return self._execute('RestartDown', payload=locals(), response_object=None)
def Start(self):
"""Executes the start operation on the server.
Start selected protocols.
Args:
Arg1 (list(str[None|/api/v1/sessions/1/ixnetwork/topology])): The method internally sets Arg1 to the encapsulated list of hrefs for this instance
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
Arg1 = self
return self._execute('Start', payload=locals(), response_object=None)
def Start(self, SessionIndices):
"""Executes the start operation on the server.
Start selected protocols.
Args:
Arg1 (list(str[None|/api/v1/sessions/1/ixnetwork/topology])): The method internally sets Arg1 to the encapsulated list of hrefs for this instance
SessionIndices (list(number)): This parameter requires an array of session numbers 0 1 2 3
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
Arg1 = self
return self._execute('Start', payload=locals(), response_object=None)
def Start(self, SessionIndices):
"""Executes the start operation on the server.
Start selected protocols.
Args:
Arg1 (list(str[None|/api/v1/sessions/1/ixnetwork/topology])): The method internally sets Arg1 to the encapsulated list of hrefs for this instance
SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
Arg1 = self
return self._execute('Start', payload=locals(), response_object=None)
def Stop(self):
"""Executes the stop operation on the server.
Stop selected protocols.
Args:
Arg1 (list(str[None|/api/v1/sessions/1/ixnetwork/topology])): The method internally sets Arg1 to the encapsulated list of hrefs for this instance
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
Arg1 = self
return self._execute('Stop', payload=locals(), response_object=None)
def Stop(self, SessionIndices):
"""Executes the stop operation on the server.
Stop selected protocols.
Args:
Arg1 (list(str[None|/api/v1/sessions/1/ixnetwork/topology])): The method internally sets Arg1 to the encapsulated list of hrefs for this instance
SessionIndices (list(number)): This parameter requires an array of session numbers 0 1 2 3
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
Arg1 = self
return self._execute('Stop', payload=locals(), response_object=None)
def Stop(self, SessionIndices):
"""Executes the stop operation on the server.
Stop selected protocols.
Args:
Arg1 (list(str[None|/api/v1/sessions/1/ixnetwork/topology])): The method internally sets Arg1 to the encapsulated list of hrefs for this instance
SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
Arg1 = self
return self._execute('Stop', payload=locals(), response_object=None)
| [
"hubert.gee@keysight.com"
] | hubert.gee@keysight.com |
58a2c9b61b424ef7dddb020d73ee279d49b0f40f | 9225ad5fb5dd92af547f4c4e04874bc812620d04 | /0.Dev Training/1.Junior/1. Base Demo/9.生成可控的随机数据集合/sample.py | b3e513365dfc3f9dd87100a93420470389f4bc38 | [] | no_license | skynimrod/dt_python | 6fb50d354d3e8cef995edc459ef45fe42b234c48 | bd822140634ae56d1f2331bde9877c871f62507a | refs/heads/master | 2021-05-16T06:55:32.840279 | 2017-09-15T04:11:10 | 2017-09-15T04:11:10 | 103,612,926 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 467 | py | import pylab
import random
SAMPLE_SIZE = 100
# seed random generator
# if no argument provided
#uses system current time
random.seed()
# store generated random values here
real_rand_vars = []
# pick some random values
real_rand_vars = [ random.random() for val in xrange(SAMPLE_SIZE) ]
# create histogrm from data in 10 buckets
pylab.hist( real_rand_vars, 10 )
#define x and y labels
pylab.xlabel("Number range")
pylab.ylabel("Count")
# show figure
pylab.show() | [
"adamswang_2000@aliyun.com"
] | adamswang_2000@aliyun.com |
11ba134e158504fdf01a45fc99d4732128a6012d | 57b4d38c1e81fae68a50133f74ca05126909ba10 | /app/tests/healthy/test_views.py | 3fb5f22ad03d25cf5123c5ddb48f1dc6d4abbe86 | [] | no_license | frankRose1/flask-api-boilerplate | edb06a339f312e5a202d2ff38a8304e7b3c5ab6e | 3b23bd337a49fee0d7666c89d9fb1fa14f4602c9 | refs/heads/master | 2022-01-22T03:15:25.243212 | 2019-11-09T02:30:38 | 2019-11-09T02:30:38 | 205,987,266 | 0 | 0 | null | 2022-01-06T22:38:10 | 2019-09-03T04:12:15 | Python | UTF-8 | Python | false | false | 282 | py | from flask import url_for
from lib.tests import ViewTestMixin
class TestHealthy(ViewTestMixin):
def test_healthy_response(self):
"""Should respond with a 200"""
response = self.client.get(url_for('HealthyView:get'))
assert response.status_code == 200
| [
"frank.rosendorf1@gmail.com"
] | frank.rosendorf1@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.