blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
335a6acaca425ac8950b4095a444908a724f8a8f | 07d73cb816ad1d35c7a96935ed8d4c9fad9e03da | /{{cookiecutter.project_name}}/bin/update | 849d75d4da1ad611ee60f500ae7f193f7a510c82 | [
"MIT",
"Unlicense"
] | permissive | mkell43/template-python | a11eb8294d916567e5d50ff34abe2e5eab943931 | 3efcb0a5837cfe5b53c5a7761732df916ed64bd7 | refs/heads/main | 2023-07-16T02:56:55.203305 | 2021-08-18T18:58:35 | 2021-08-18T18:58:35 | 398,657,388 | 0 | 0 | Unlicense | 2021-08-21T21:13:27 | 2021-08-21T21:13:26 | null | UTF-8 | Python | false | false | 2,051 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import importlib
import tempfile
import shutil
import subprocess
import sys
CWD = os.getcwd()
TMP = tempfile.gettempdir()
CONFIG = {
"full_name": "{{ cookiecutter.full_name }}",
"email": "{{ cookiecutter.email }}",
"github_username": "{{ cookiecutter.github_username }}",
"github_repo": "{{ cookiecutter.github_repo }}",
"default_branch": "{{ cookiecutter.default_branch }}",
"project_name": "{{ cookiecutter.project_name }}",
"package_name": "{{ cookiecutter.package_name }}",
"project_short_description": "{{ cookiecutter.project_short_description }}",
"python_major_version": {{ cookiecutter.python_major_version }},
"python_minor_version": {{ cookiecutter.python_minor_version }},
}
def install(package='cookiecutter'):
try:
importlib.import_module(package)
except ImportError:
print("Installing cookiecutter")
subprocess.check_call([sys.executable, '-m', 'pip', 'install', package])
def run():
print("Generating project")
from cookiecutter.main import cookiecutter
os.chdir(TMP)
cookiecutter(
'https://github.com/jacebrowning/template-python.git',
no_input=True,
overwrite_if_exists=True,
extra_context=CONFIG,
)
def copy():
for filename in [
'.coveragerc',
'.gitattributes',
'.gitignore',
'.isort.cfg',
'.mypy.ini',
'.pydocstyle.ini',
'.pylint.ini',
'.scrutinizer.yml',
'.verchew.ini',
'CONTRIBUTING.md',
'Makefile',
os.path.join('bin', 'checksum'),
os.path.join('bin', 'open'),
os.path.join('bin', 'update'),
os.path.join('bin', 'verchew'),
'pytest.ini',
'scent.py',
]:
src = os.path.join(TMP, CONFIG['project_name'], filename)
dst = os.path.join(CWD, filename)
print("Updating " + filename)
shutil.copy(src, dst)
if __name__ == '__main__':
install()
run()
copy()
| [
"jacebrowning@gmail.com"
] | jacebrowning@gmail.com | |
065fb3d6006b67322a915670b180ffe5efba5694 | db9463b7271c5a88f473390f393fbc5f00b6f46b | /visbrain/utils/tests/test_others.py | 37a60c9de6af5f5218c065bc0f3957d04540ca77 | [
"BSD-3-Clause"
] | permissive | lassemadsen/visbrain | 6fbee27acaa46741899782a2ba347f6050275411 | be096aa8a7058c329e7120d0bdb45d3c9eb8be42 | refs/heads/master | 2022-11-08T05:00:21.857939 | 2022-10-25T10:33:36 | 2022-10-25T10:33:36 | 191,604,064 | 0 | 0 | NOASSERTION | 2019-06-12T16:05:22 | 2019-06-12T16:05:21 | null | UTF-8 | Python | false | false | 566 | py | """Test functions in others.py."""
from visbrain.utils.others import (get_dsf, set_if_not_none)
class TestOthers(object):
"""Test functions in others.py."""
def test_get_dsf(self):
"""Test function get_dsf."""
assert get_dsf(100, 1000.) == (10, 100.)
assert get_dsf(100, None) == (1, 100.)
def test_set_if_not_none(self):
"""Test function set_if_not_none."""
a = 5.
assert set_if_not_none(a, None) == 5.
assert set_if_not_none(a, 10., False) == 5.
assert set_if_not_none(a, 10.) == 10.
| [
"e.combrisson@gmail.com"
] | e.combrisson@gmail.com |
fa1812af10cf0f984d450fedbc15640cf16be484 | 1f51c4e89a71ea3fcc2cc921613aacc19e078b69 | /14_Introduction to Importing Data in Python-(part-1)/02_Importing data from other file types/13_loading-mat-files.py | 79ee9f57f5e58d78452f6efc7cc604fe998745c0 | [
"MIT"
] | permissive | CodeHemP/CAREER-TRACK-Data-Scientist-with-Python | 871bafbd21c4e754beba31505965572dd8457adc | 13ebb10cf9083343056d5b782957241de1d595f9 | refs/heads/main | 2023-03-26T08:43:37.054410 | 2021-03-22T15:08:12 | 2021-03-22T15:08:12 | 471,015,287 | 1 | 0 | MIT | 2022-03-17T13:52:32 | 2022-03-17T13:52:31 | null | UTF-8 | Python | false | false | 775 | py | '''
13 - Loading .mat file
In this exercise, you'll figure out how to load a MATLAB file using scipy.io.loadmat()
and you'll discover what Python datatype it yields.
The file 'albeck_gene_expression.mat' is in your working directory. This file contains
gene expression data from the Albeck Lab at UC Davis. You can find the data and some
great documentation here.
Instructions:
- Import the package scipy.io.
- Load the file 'albeck_gene_expression.mat' into the variable mat; do so using the
function scipy.io.loadmat().
- Use the function type() to print the datatype of mat to the IPython shell.
'''
# Import package
import scipy.io
# Load MATLAB file: mat
mat = scipy.io.loadmat('albeck_gene_expression.mat')
# Print the datatype type of mat
print(type(mat))
| [
"ifaizymohd@gmail.com"
] | ifaizymohd@gmail.com |
702ae2e137a34f2c9eac6d52110d38a6f60ade83 | 5f2b22d4ffec7fc1a4e40932acac30256f63d812 | /analysis-of-legal-documents/project/process/cnews_loader_withoutSeqLens.py | bdc9b80dfb3188f3ceee15a1a35c7055208b4ff7 | [] | no_license | Thpffcj/Python-Learning | 45734dd31e4d8d047eec5c5d26309bc7449bfd0d | 5dacac6d33fcb7c034ecf5be58d02f506fd1d6ad | refs/heads/master | 2023-08-04T21:02:36.984616 | 2021-09-21T01:30:04 | 2021-09-21T01:30:04 | 111,358,872 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,648 | py | # coding: utf-8
import sys
from collections import Counter
import numpy as np
import tensorflow.contrib.keras as kr
if sys.version_info[0] > 2:
is_py3 = True
else:
reload(sys)
sys.setdefaultencoding("utf-8")
is_py3 = False
def native_word(word, encoding='utf-8'):
"""如果在python2下面使用python3训练的模型,可考虑调用此函数转化一下字符编码"""
if not is_py3:
return word.encode(encoding)
else:
return word
def native_content(content):
if not is_py3:
return content.decode('utf-8')
else:
return content
def open_file(filename, mode='r'):
"""
常用文件操作,可在python2和python3间切换.
mode: 'r' or 'w' for read or write
"""
if is_py3:
return open(filename, mode, encoding='utf-8', errors='ignore')
else:
return open(filename, mode)
def read_file(filename):
"""读取文件数据"""
contents, labels = [], []
with open_file(filename) as f:
for line in f:
try:
label, content = line.strip().split('\t')
if content:
contents.append(list(native_content(content)))
labels.append(native_content(label))
except:
pass
return contents, labels
def build_vocab(train_dir, vocab_dir, vocab_size=5000):
"""根据训练集构建词汇表,存储"""
data_train, _ = read_file(train_dir)
all_data = []
for content in data_train:
all_data.extend(content) # 将每行单词添加到all_data中,形成的是一维list
counter = Counter(all_data)
count_pairs = counter.most_common(vocab_size - 1) # 将所有单词对应它的出现频率存放子啊count_pairs中,类似dicts
words, _ = list(zip(*count_pairs)) # 将dict中的key放在第一个中,对应值放在第二个中,类似[('a','b','c'),(1,2,3)]
# 添加一个 <PAD> 来将所有文本pad为同一长度
words = ['<PAD>'] + list(words)
open_file(vocab_dir, mode='w').write('\n'.join(words) + '\n')
def read_vocab(vocab_dir):
"""读取词汇表"""
# words = open_file(vocab_dir).read().strip().split('\n')
with open_file(vocab_dir) as fp:
# 如果是py2 则每个值都转化为unicode
words = [native_content(_.strip()) for _ in fp.readlines()]
word_to_id = dict(zip(words, range(len(words)))) # 读取词汇以及每个词对应的id
return words, word_to_id
def read_category():
"""读取分类目录,固定"""
categories = ['体育', '财经', '房产', '家居', '教育', '科技', '时尚', '时政', '游戏', '娱乐']
categories = [native_content(x) for x in categories]
cat_to_id = dict(zip(categories, range(len(categories))))
return categories, cat_to_id # 读取所有分类,及其id
def to_words(content, words):
"""将id表示的内容转换为文字"""
return ''.join(words[x] for x in content)
def process_file(filename, word_to_id, cat_to_id, max_length=600):
"""将文件转换为id表示"""
contents, labels = read_file(filename)
data_id, label_id = [], []
for i in range(len(contents)):
data_id.append([word_to_id[x] for x in contents[i] if x in word_to_id])
label_id.append(cat_to_id[labels[i]])
# 使用keras提供的pad_sequences来将文本pad为固定长度
x_pad = kr.preprocessing.sequence.pad_sequences(data_id, max_length)
y_pad = kr.utils.to_categorical(label_id, num_classes=len(cat_to_id)) # 将标签转换为one-hot表示,即转换为一个二值矩阵
return x_pad, y_pad
def batch_iter(x1, x2, y, batch_size):
"""生成批次数据"""
data_len = len(x1)
# print('---------不太理解这个num_batch为什么要怎么算----------------')
num_batch = int((data_len - 1) / batch_size) - 1
# print('---------不太理解这个num_batch为什么要怎么算----------------')
indices = np.random.permutation(np.arange(data_len)) # 洗牌
x1_shuffle = x1[indices]
x2_shuffle = x2[indices]
y_shuffle = y[indices]
for i in range(num_batch):
# print('---------不太理解这个start_id为什么要怎么算----------------')
start_id = i * batch_size
end_id = min((i + 1) * batch_size, data_len)
# print('---------不太理解这个end_id为什么要怎么算----------------')
yield x1_shuffle[start_id:end_id], x2_shuffle[start_id:end_id], y_shuffle[start_id:end_id]
| [
"1441732331@qq.com"
] | 1441732331@qq.com |
2f2d6c104a4727cc6ace6c9678d9dc45d95d1401 | eafc5b935d0f086dffbcfe1516ba05ab6ce18540 | /source/w3c/demo_mysql_show_databases.py | 30ea6b4eae940c951db24e9548276bc018c5d252 | [] | no_license | YaooXu/Software_test | 1df4195da7dab6f05862afe458c10b1bee1dcaf8 | 936dda4de0a1bcf6cfc87d5148f6219b625a99fe | refs/heads/master | 2020-11-28T04:09:58.724233 | 2020-01-19T13:50:38 | 2020-01-19T13:50:38 | 229,699,216 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 215 | py |
import mysql.connector
mydb = mysql.connector.connect(
host="localhost",
user="myusername",
passwd="mypassword"
)
mycursor = mydb.cursor()
mycursor.execute("SHOW DATABASES")
for x in mycursor:
print(x)
| [
"yuanruize@sina.com"
] | yuanruize@sina.com |
d7212dc9d7b37c7e35296da72dec8339b6d736c7 | 7149cc81b1822cbe58ee17585dafef2b4fcd7145 | /muspy/outputs/midi.py | a2e3ef90b12dd9d0a31ba870ec4216488ae55f25 | [
"MIT"
] | permissive | AvatarWorld/muspy | 88c4412f6b3fd0fa3990fc06d0760ecae1d6ed5d | 1c506383b3f82cfa82e72c7f5a6ff29144c26b3c | refs/heads/master | 2022-12-12T09:46:45.546373 | 2020-08-31T03:58:49 | 2020-08-31T03:58:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,479 | py | """MIDI output interface."""
from pathlib import Path
from typing import TYPE_CHECKING, Any, Optional, Tuple, Union
import pretty_midi
from pretty_midi import PrettyMIDI, Instrument
from pretty_midi import KeySignature as PmKeySignature
from pretty_midi import TimeSignature as PmTimeSignature
from pretty_midi import Note as PmNote
from pretty_midi import Lyric as PmLyric
from mido import Message, MetaMessage, MidiFile, MidiTrack, bpm2tempo
from ..classes import KeySignature, Lyric, Note, Tempo, TimeSignature, Track
if TYPE_CHECKING:
from ..music import Music
PITCH_NAMES = ["C", "C#", "D", "D#", "E", "F", "F#", "G", "G#", "A", "A#", "B"]
def to_delta_time(midi_track: MidiTrack):
"""Convert a mido MidiTrack object from absolute time to delta time.
Parameters
----------
midi_track : :class:`mido.MidiTrack` object
mido MidiTrack object to convert.
"""
# Sort messages by absolute time
midi_track.sort(key=lambda x: x.time)
# Convert to delta time
time = 0
for msg in midi_track:
time_ = msg.time
msg.time -= time
time = time_
def to_mido_tempo(tempo: Tempo) -> MetaMessage:
"""Return a Tempo object as a mido MetaMessage object.
Timing is in absolute time, NOT in delta time.
"""
return MetaMessage(
"set_tempo", time=tempo.time, tempo=bpm2tempo(tempo.qpm),
)
def to_mido_key_signature(
key_signature: KeySignature,
) -> Optional[MetaMessage]:
"""Return a KeySignature object as a mido MetaMessage object.
Timing is in absolute time, NOT in delta time.
"""
suffix = "m" if key_signature.mode == "minor" else ""
if key_signature.root is None:
return None
return MetaMessage(
"key_signature",
time=key_signature.time,
key=PITCH_NAMES[key_signature.root] + suffix,
)
def to_mido_time_signature(time_signature: TimeSignature) -> MetaMessage:
"""Return a TimeSignature object as a mido MetaMessage object.
Timing is in absolute time, NOT in delta time.
"""
return MetaMessage(
"time_signature",
time=time_signature.time,
numerator=time_signature.numerator,
denominator=time_signature.denominator,
)
def to_mido_meta_track(music: "Music") -> MidiTrack:
"""Return a mido MidiTrack containing metadata of a Music object.
Parameters
----------
music : :class:`muspy.Music` object
Music object to convert.
Returns
-------
:class:`mido.MidiTrack` object
Converted mido MidiTrack object.
"""
# Create a track to store the metadata
meta_track = MidiTrack()
# Song title
if music.metadata.title is not None:
meta_track.append(MetaMessage("track_name", name=music.metadata.title))
# Tempos
for tempo in music.tempos:
meta_track.append(to_mido_tempo(tempo))
# Key signatures
for key_signature in music.key_signatures:
mido_key_signature = to_mido_key_signature(key_signature)
if mido_key_signature is not None:
meta_track.append(mido_key_signature)
# Time signatures
for time_signature in music.time_signatures:
meta_track.append(to_mido_time_signature(time_signature))
# Lyrics
for lyric in music.lyrics:
meta_track.append(to_mido_lyric(lyric))
# Annotations
for annotation in music.annotations:
# Marker messages
if annotation.group == "marker":
meta_track.append(
MetaMessage("marker", text=annotation.annotation)
)
# Text messages
elif isinstance(annotation.annotation, str):
meta_track.append(
MetaMessage(
"text", time=annotation.time, text=annotation.annotation
)
)
# End of track message
meta_track.append(MetaMessage("end_of_track"))
# Convert to delta time
to_delta_time(meta_track)
return meta_track
def to_mido_lyric(lyric: Lyric) -> MetaMessage:
"""Return a Lyric object as a mido MetaMessage object.
Timing is in absolute time, NOT in delta time.
"""
return MetaMessage("lyrics", time=lyric.time, text=lyric.lyric)
def to_mido_note_on_note_off(
note: Note, channel: int, use_note_on_as_note_off: bool = True
) -> Tuple[Message, Message]:
"""Return a Note object as mido Message objects.
Timing is in absolute time, NOT in delta time.
Parameters
----------
note : :class:`muspy.Note` object
Note object to convert.
channel : int
Channel of the MIDI message.
use_note_on_as_note_off : bool
Whether to use a note on message with zero velocity instead of a
note off message.
Returns
-------
:class:`mido.Message` object
Converted mido Message object for note on.
:class:`mido.Message` object
Converted mido Message object for note off.
"""
note_on_msg = Message(
"note_on",
time=note.time,
note=note.pitch,
velocity=note.velocity,
channel=channel,
)
if use_note_on_as_note_off:
note_off_msg = Message(
"note_on",
time=note.end,
note=note.pitch,
velocity=0,
channel=channel,
)
else:
note_off_msg = Message(
"note_off",
time=note.end,
note=note.pitch,
velocity=note.velocity,
channel=channel,
)
return note_on_msg, note_off_msg
def to_mido_track(
track: Track, use_note_on_as_note_off: bool = True
) -> MidiTrack:
"""Return a Track object as a mido MidiTrack object.
Parameters
----------
track : :class:`muspy.Track` object
Track object to convert.
use_note_on_as_note_off : bool
Whether to use a note on message with zero velocity instead of a
note off message.
Returns
-------
:class:`mido.MidiTrack` object
Converted mido MidiTrack object.
"""
# Create a new MIDI track
midi_track = MidiTrack()
# Track name messages
if track.name is not None:
midi_track.append(MetaMessage("track_name", name=track.name))
# Program change messages
channel = 9 if track.is_drum else 0
midi_track.append(
Message("program_change", program=track.program, channel=channel,)
)
# Note on and note off messages
for note in track.notes:
midi_track.extend(
to_mido_note_on_note_off(note, channel, use_note_on_as_note_off)
)
# End of track message
midi_track.append(MetaMessage("end_of_track"))
# Convert to delta time
to_delta_time(midi_track)
return midi_track
def to_mido(music: "Music", use_note_on_as_note_off: bool = True):
"""Return a Music object as a MidiFile object.
Parameters
----------
music : :class:`muspy.Music` object
Music object to convert.
use_note_on_as_note_off : bool
Whether to use a note on message with zero velocity instead of a
note off message.
Returns
-------
:class:`mido.MidiFile`
Converted MidiFile object.
"""
# Create a MIDI file object
midi = MidiFile(type=1, ticks_per_beat=music.resolution)
# Append meta track
midi.tracks.append(to_mido_meta_track(music))
# Iterate over music tracks
for track in music.tracks:
midi.tracks.append(to_mido_track(track, use_note_on_as_note_off))
return midi
def write_midi_mido(
path: Union[str, Path],
music: "Music",
use_note_on_as_note_off: bool = True,
):
"""Write a Music object to a MIDI file using mido as backend.
Parameters
----------
path : str or Path
Path to write the MIDI file.
music : :class:`muspy.Music` object
Music object to write.
use_note_on_as_note_off : bool
Whether to use a note on message with zero velocity instead of a
note off message.
"""
midi = to_mido(music, use_note_on_as_note_off=use_note_on_as_note_off)
midi.save(str(path))
def to_pretty_midi_key_signature(
key_signature: KeySignature,
) -> PmKeySignature:
"""Return a KeySignature object as a pretty_midi KeySignature object."""
return PmKeySignature(
pretty_midi.key_name_to_key_number(
"{} {}".format(key_signature.root, key_signature.mode)
),
key_signature.time,
)
def to_pretty_midi_time_signature(
time_signature: TimeSignature,
) -> PmTimeSignature:
"""Return a KeySignature object as a pretty_midi TimeSignature object."""
return PmTimeSignature(
time_signature.numerator,
time_signature.denominator,
time_signature.time,
)
def to_pretty_midi_lyric(lyric: Lyric) -> PmLyric:
"""Return a Lyric object as a pretty_midi Lyric object."""
return PmLyric(lyric.lyric, lyric.time)
def to_pretty_midi_note(note: Note) -> PmNote:
"""Return a Note object as a pretty_midi Note object."""
return PmNote(note.velocity, note.pitch, note.time, note.end)
def to_pretty_midi_instrument(track: Track) -> Instrument:
"""Return a Track object as a pretty_midi Instrument object."""
instrument = pretty_midi.Instrument(
track.program, track.is_drum, track.name
)
for note in track.notes:
instrument.notes.append(to_pretty_midi_note(note))
return instrument
def to_pretty_midi(music: "Music") -> PrettyMIDI:
"""Return a Music object as a PrettyMIDI object.
Tempo changes are not supported yet.
Parameters
----------
music : :class:`muspy.Music` object
Music object to convert.
Returns
-------
:class:`pretty_midi.PrettyMIDI`
Converted PrettyMIDI object.
"""
pm = pretty_midi.PrettyMIDI()
# Key signatures
for key_signature in music.key_signatures:
pm.key_signature_changes.append(
to_pretty_midi_key_signature(key_signature)
)
# Time signatures
for time_signature in music.time_signatures:
pm.time_signature_changes.append(
to_pretty_midi_time_signature(time_signature)
)
# Lyrics
for lyric in music.lyrics:
pm.lyrics.append(to_pretty_midi_lyric(lyric))
# Tracks
for track in music.tracks:
pm.instruments.append(to_pretty_midi_instrument(track))
return pm
def write_midi_pretty_midi(path: Union[str, Path], music: "Music"):
"""Write a Music object to a MIDI file using pretty_midi as backend.
Tempo changes are not supported yet.
Parameters
----------
path : str or Path
Path to write the MIDI file.
music : :class:`muspy.Music` object
Music object to convert.
"""
pm = to_pretty_midi(music)
pm.write(str(path))
def write_midi(
path: Union[str, Path],
music: "Music",
backend: str = "mido",
**kwargs: Any
):
"""Write a Music object to a MIDI file.
Parameters
----------
path : str or Path
Path to write the MIDI file.
music : :class:`muspy.Music` object
Music object to write.
backend: {'mido', 'pretty_midi'}
Backend to use. Defaults to 'mido'.
"""
if backend == "mido":
return write_midi_mido(path, music, **kwargs)
if backend == "pretty_midi":
return write_midi_pretty_midi(path, music)
raise ValueError("`backend` must by one of 'mido' and 'pretty_midi'.")
| [
"salu.hwdong@gmail.com"
] | salu.hwdong@gmail.com |
c0a5f980cb6a019f12d76281b98357aba19fee80 | e6dab5aa1754ff13755a1f74a28a201681ab7e1c | /.parts/lib/django-1.5/tests/modeltests/unmanaged_models/__init__.py | 09efd4b322ab91437f5b3772505ed3f56090faac | [] | no_license | ronkagan/Euler_1 | 67679203a9510147320f7c6513eefd391630703e | 022633cc298475c4f3fd0c6e2bde4f4728713995 | refs/heads/master | 2021-01-06T20:45:52.901025 | 2014-09-06T22:34:16 | 2014-09-06T22:34:16 | 23,744,842 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 111 | py | /home/action/.parts/packages/googleappengine/1.9.4/lib/django-1.5/tests/modeltests/unmanaged_models/__init__.py | [
"ron.y.kagan@gmail.com"
] | ron.y.kagan@gmail.com |
a5af8ae75f4ece2c26d59e219306eb61266a2f0c | 39b0d9c6df77671f540c619aff170441f953202a | /PYTHON LIBRARY/SUB_3/pathlib_symlink_to.py | 48a4dcc8bef3c8cf48f9e42a1e86301f9be8596e | [] | no_license | yeboahd24/Python201 | e7d65333f343d9978efff6bf86ce0447d3a40d70 | 484e66a52d4e706b8478473347732e23998c93c5 | refs/heads/main | 2023-02-06T10:24:25.429718 | 2020-12-26T01:08:04 | 2020-12-26T01:08:04 | 306,487,550 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 278 | py | # Use symlink_to() to create a symbolic link. The link will be named based on the path’s
# value and will refer to the name given as an argument to symlink_to().
import pathlib
p = pathlib.Path('example_link')
p.symlink_to('index.txt')
print(p)
print(p.resolve().name) | [
"noreply@github.com"
] | yeboahd24.noreply@github.com |
2f84648acb1917f62b8824fdb5590a908a1fca86 | 12e956d80079f2808aae687b2cfbe1384deb35e2 | /api/views.py | 1cc372f8d8cfb23489216446a98295069a7bedc7 | [] | no_license | khushal111/Patient_Doctor_App | e141a16c09fd400b5abd5b849ebe02073b6082b9 | 5964c242d06023cbe34b9d5d0a16c6228ed5b734 | refs/heads/master | 2022-06-29T03:14:26.743811 | 2019-11-07T17:45:24 | 2019-11-07T17:45:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 293 | py | from django.shortcuts import render
from .models import Prescription
from .serializers import PrescriptionSerializer
from rest_framework import viewsets
class PrescriptionViewSet(viewsets.ModelViewSet):
serializer_class = PrescriptionSerializer
queryset = Prescription.objects.all()
| [
"kylemaa95@gmail.com"
] | kylemaa95@gmail.com |
de182fe89d8bc26cd4f9eb12b282489dc1e77349 | 150464efa69db3abf328ef8cd912e8e248c633e6 | /_4.python/__code/Python自學聖經(第二版)/ch33/filewrite/filewrite.py | 9bc15311d12904f928cb141311e56326bb751810 | [] | no_license | bunshue/vcs | 2d194906b7e8c077f813b02f2edc70c4b197ab2b | d9a994e3afbb9ea84cc01284934c39860fea1061 | refs/heads/master | 2023-08-23T22:53:08.303457 | 2023-08-23T13:02:34 | 2023-08-23T13:02:34 | 127,182,360 | 6 | 3 | null | 2023-05-22T21:33:09 | 2018-03-28T18:33:23 | C# | UTF-8 | Python | false | false | 713 | py | import os, sys
def base_path(path):
if getattr(sys, 'frozen', None):
basedir = sys._MEIPASS
else:
basedir = os.path.dirname(__file__)
return os.path.join(basedir, path)
tmp=base_path("") #取得暫存目錄
cwd=os.getcwd() #取得目前的工作目錄
file1="file1.txt"
file2=os.path.join(tmp,"file2.txt")
file3=os.path.join(cwd,"file3.txt")
f1=open(file1,'w') #寫入工作目錄
f1.write("file1 txt")
f1.close()
print(file1,"寫入成功!")
f2=open(file2,'w') #寫入 tmp 目錄
f2.write("file2 txt")
f2.close()
print(file2,"寫入成功!")
f3=open(file3,'w') #寫入 pwd 目錄
f3.write("file3 txt")
f3.close()
print(file3,"寫入成功!")
key=input("按任意鍵結束!") | [
"david@insighteyes.com"
] | david@insighteyes.com |
de09319ec0e8ac2dc77f7838a59b05565e829784 | 8fcc27160f8700be46296568260fa0017a0b3004 | /client/defaultRangeUtils.py | 26a406861d88271fddf15189e16a4244d7860242 | [] | no_license | connoryang/dec-eve-serenity | 5d867f4eedfa896a4ef60f92556356cafd632c96 | b670aec7c8b4514fc47cd52e186d7ccf3aabb69e | refs/heads/master | 2021-01-22T06:33:16.303760 | 2016-03-16T15:15:32 | 2016-03-16T15:15:32 | 56,389,750 | 1 | 0 | null | 2016-04-16T15:05:24 | 2016-04-16T15:05:24 | null | UTF-8 | Python | false | false | 300 | py | #Embedded file name: e:\jenkins\workspace\client_SERENITY\branches\release\SERENITY\eve\common\modules\nice\client\_nastyspace\defaultRangeUtils.py
from eve.client.script.ui.util.defaultRangeUtils import FetchRangeSetting
from eve.client.script.ui.util.defaultRangeUtils import UpdateRangeSetting
| [
"masaho.shiro@gmail.com"
] | masaho.shiro@gmail.com |
fb2d0a433f46dfef15d4ed2718491bf49c53bc61 | 6a609bc67d6a271c1bd26885ce90b3332995143c | /exercises/math/fast_power.py | 5d59cee28aa49b1fcc93768bb25df40530f28039 | [] | no_license | nahgnaw/data-structure | 1c38b3f7e4953462c5c46310b53912a6e3bced9b | 18ed31a3edf20a3e5a0b7a0b56acca5b98939693 | refs/heads/master | 2020-04-05T18:33:46.321909 | 2016-07-29T21:14:12 | 2016-07-29T21:14:12 | 44,650,911 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 642 | py | # -*- coding: utf-8 -*-
"""
Calculate the a^n % b where a, b and n are all 32bit integers.
Example
For 2^31 % 3 = 2
For 100^1000 % 1000 = 0
Challenge
O(logn)
"""
class Solution:
"""
@param a, b, n: 32bit integers
@return: An integer
"""
def fastPower(self, a, b, n):
if n == 1:
return a % b
elif n == 0:
return 1 % b
elif n < 0:
return -1
# (a * b) % p = ((a % p) * (b % p)) % p
result = self.fastPower(a, b, n / 2)
result = (result * result) % b
if n % 2 == 1:
result = (result * a) % b
return result
| [
"wanghan15@gmail.com"
] | wanghan15@gmail.com |
ab5fa3e92b5de53f31b51f1f0925caccc5232db2 | 9ba8cb2fce9f1ece97f780e7509d8b5dc178d6a4 | /yt-feed-to-email | 34691cb3932c722148c6441173cdb5962e600258 | [
"LicenseRef-scancode-public-domain",
"WTFPL"
] | permissive | msmyers/fgtk | 24b3262a432bacaab97a02dd448e6c21f0bf1779 | 98e767e7b12be6410934917519ef50bddab3f219 | refs/heads/master | 2023-02-16T12:06:11.210059 | 2021-01-19T01:02:48 | 2021-01-19T01:02:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,009 | #!/usr/bin/env python
import itertools as it, functools as ft, operator as op
import pathlib as pl, datetime as dt, urllib.parse as up, hashlib as hl, subprocess as sp
import xml.etree.ElementTree as etree
import os, sys, logging, time, calendar, base64, json, textwrap, unicodedata
import feedparser as fp # pip install --user feedparser
class LogMessage:
def __init__(self, fmt, a, k): self.fmt, self.a, self.k = fmt, a, k
def __str__(self): return self.fmt.format(*self.a, **self.k) if self.a or self.k else self.fmt
class LogStyleAdapter(logging.LoggerAdapter):
def __init__(self, logger, extra=None): super().__init__(logger, extra or dict())
def log(self, level, msg, *args, **kws):
if not self.isEnabledFor(level): return
log_kws = {} if 'exc_info' not in kws else dict(exc_info=kws.pop('exc_info'))
msg, kws = self.process(msg, kws)
self.logger._log(level, LogMessage(msg, args, kws), (), **log_kws)
err_fmt = lambda err: '[{}] {}'.format(err.__class__.__name__, err)
get_logger = lambda name: LogStyleAdapter(logging.getLogger(f'ytf2e.{name}'))
str_norm = lambda v: unicodedata.normalize('NFKC', v.strip()).casefold()
str_hash = lambda p: base64.urlsafe_b64encode(
hl.blake2s(str(p).encode(), person=b'ytf2e.s1').digest() ).decode()[:12]
def tuple_hash(*data):
if len(data) == 1 and isinstance(data[0], (tuple, list)): data = data[0]
src = list()
for v in data:
if v is None: src.append('\ue003')
elif isinstance(v, (int, str, dt.tzinfo)): src.append(str(v))
elif isinstance(v, (tuple, list)): src.append(tuple_hash(v))
elif isinstance(v, dt.datetime):
src.append(conv_ts_utc(v).strftime('%Y-%m-%dT%H:%M:%S'))
elif isinstance(v, dt.timedelta): src.append('\ue002{v.total_seconds()}')
elif isinstance(v, set): src.append(tuple_hash(sorted(v)))
else: raise ValueError(type(v), v)
return str_hash('\ue000'.join(
'\ue001{}\ue001'.format(v.replace('\ue001', '\ue001'*2)) for v in src ))
def str_repr(s, max_len=160, len_bytes=False, ext=' ...[{s_len}]'):
if isinstance(s, bytes): s = s.decode('utf-8', 'replace')
if not isinstance(s, str): s = str(s)
s_len, s_repr, ext_tpl = f'{len(s):,d}', repr(s)[1:-1], ext.format(s_len='12/345')
s_repr = s_repr.replace("\\'", "'").replace('\\"', '"')
if max_len > 0 and len(s_repr) > max_len:
s_len = f'{max_len}/{s_len}'
if not len_bytes: s_repr = s_repr[:max_len - len(ext_tpl)] + ext.format(s_len=s_len)
else:
n = max_len - len(ext_tpl.encode())
s_repr = s_repr.encode()[:n].decode(errors='ignore') + ext.format(s_len=s_len)
return s_repr
dd = lambda text: (textwrap.dedent(text).strip('\n') + '\n').replace('\t', ' ')
fill = lambda s,w=90,ind='',ind_next=' ',**k: textwrap.fill(
s, w, initial_indent=ind, subsequent_indent=ind if ind_next is None else ind_next, **k )
class YTFeed:
title = chan = url = None
ts_last_check = 0
delay_ewma = 24 * 3600
delay_ewma_max = 30 * 24 * 3600
delay_ewma_a = 0.3
etag = seen_entries = None
def __init__(self, url, title=None, chan=None):
if not chan: chan, = up.parse_qs(up.urlparse(url).query)['channel_id']
if not title: title = f'chan.{chan}'
title = ' '.join(title.replace('\n', ' ').split())
if len(title) > 40: title = f'{title[:38]}--'
self.title, self.chan, self.url = title, chan, url
__repr__ = lambda s: f'YTFeed({s.title} [{s.chan}])'
@classmethod
def from_xml(cls, attrs):
return cls(attrs['xmlUrl'], attrs.get('title'))
@classmethod
def from_line(cls, line):
line = line.strip().split(None, 1)
url, title = line if len(line) > 1 else (line[0], None)
return cls(url, title)
def ts_check_next(self):
# /2 to run 2x fetches per (average-ish) interval between entries
return self.ts_last_check + self.delay_ewma / 2
def entry_id(self, e):
e_id = None
for k in 'id', 'yt_videoid', 'title', 'link', 'published', 'modified', 'created':
if e_id := e.get(k): break
if e_id: e_id = tuple_hash('id.1', e_id)
return e_id
class YTFeedIndex:
def __init__(self, *feed_idxs):
self.idx = dict()
if feed_idxs:
for idx in feed_idxs: self.add(idx)
def add(self, feeds):
if isinstance(feeds, YTFeed): self.idx[feeds.chan] = feeds
elif isinstance(feeds, YTFeedIndex): self.idx.update(feeds.idx)
else:
for feed in feeds: self.idx[feed.chan] = feed
def get(self, chan, fallback=None):
return self.idx.get(chan, fallback)
__bool__ = lambda s: bool(s.idx)
__contains__ = lambda s,f: f.chan in s.idx
__len__ = lambda s: len(s.idx)
__iter__ = lambda s: iter(s.idx.values())
# Line format: {ts} :: {chan-id} {chan-name!r} :: {update-json}
state_log_name = 'updates.log'
state_log_max_size = 3 * 2**20
def state_process(state_dir, feeds):
if not state_dir: return
state_dir, log = pl.Path(state_dir), get_logger('state')
if not state_dir.exists():
state_dir.mkdir(mode=0o700, parents=True, exists_ok=True)
state_last, state_log = dict(), state_dir / state_log_name
with state_log.open('a+') as src:
src.seek(0)
for line in src:
try: update = json.loads(line.split(' :: ', 2)[-1])
except Exception as err:
log.error('Failed to process feed-state entry: {} -- {!r}', err_fmt(err), line)
continue
feed = feeds.get(update['chan'])
if not feed:
log.debug('Dropping update(s) for nx feed: {}', str_repr(line))
continue
state_last[feed.chan] = line, feed, update
state_log_new = None
if state_log.stat().st_size > state_log_max_size:
state_log.rename(state_dir / f'{state_log_name}.old')
state_log_new = state_log.open('a')
try:
for line, feed, update in state_last.values():
for k in 'ts_last_check etag seen_entries delay_ewma'.split():
try: setattr(feed, k, update[k])
except KeyError:
if k != 'seen_entries': raise
if state_log_new: state_log_new.write(line)
finally:
if state_log_new: state_log_new.close()
return state_log
def state_update(state_log, ts, feed, update):
ts = time.strftime('%Y-%m-%d %H:%M', time.localtime(ts))
update = json.dumps(update)
with state_log.open('a') as dst:
dst.write(f'{ts} :: {feed.chan} {feed.title!r} :: {update}\n')
class FeedFetchError(Exception): pass
feed_user_agent = f'yt-feed-to-email/0.1 feedparser/{fp.__version__}'
feed_accept_header = ( 'application/atom+xml,application/rdf+xml,'
'application/rss+xml,application/x-netcdf,application/xml;q=0.9,text/xml;q=0.2' )
def feed_fetch(log, url, etag):
feed = fp.parse( url, agent=feed_user_agent,
etag=etag, request_headers=dict(Accept=feed_accept_header) )
status, bozo, bozo_err = (
feed.get(k) for k in ['status', 'bozo', 'bozo_exception'] )
if (not status and bozo) or (status or 1000) >= 400:
raise FeedFetchError(f'feedparser error (status={status}): {url} - {bozo_err}')
if status == 304: return
elif status >= 300: raise FeedFetchError(f'Unhandled 3xx response status {status}')
return feed.get('etag'), feed.entries
def feed_process(log, feed, entries, ts, ts_since=None):
first_old_id = e_ts_prev = None
ewma_delay, ewma_a = feed.delay_ewma, feed.delay_ewma_a
emails, feed_ids, seen_ids = list(), dict(), feed.seen_entries or dict()
for e in entries:
e_id, e_ts = feed.entry_id(e), calendar.timegm(e.published_parsed)
if not e_id: raise ValueError(f'Failed to get id for feed entry: {str_repr(e)}')
feed_ids[e_id] = e_ts
if not first_old_id: # skips all older entries in ewma calculation
if e_ts_prev: ewma_delay = ewma_a * (e_ts - e_ts_prev) + (1 - ewma_a) * ewma_delay
e_ts_prev = e_ts
if ts_since is not None and e_ts <= ts_since:
log.debug( 'Skipping old entry [id={} ts={}]: pub={}'
' author={} title={!r}', e_id, e_ts, e.published, e.author, e.title )
if not first_old_id: first_old_id = e_id
continue
if e_id in seen_ids:
log.debug( 'Skipping already-seen entry [id={} ts={}]: pub={}'
' author={} title={!r}', e_id, e_ts, e.published, e.author, e.title )
if not first_old_id: first_old_id = e_id
continue
log.debug( 'Generating notification for entry [{}]:'
' ts={} author={!r} title={!r}', e_ts, e.published, e.author, e.title )
body = dd(f'''
Title: {e.title}
Author: {e.author}
Published: {e.published}
Link: {e.link}''')
if summary := e.get('summary'):
body += '\nSummary:\n' + process_summary(summary)
emails.append((e_ts, f'YT [{e.author}]: {e.title}', body))
if e_ts_prev and not emails and (ts - e_ts_prev) > ewma_delay:
# This makes empty checks bump delay up
ewma_delay = ewma_a * (ts - e_ts_prev) + (1 - ewma_a) * ewma_delay
ewma_delay = min(feed.delay_ewma_max, ewma_delay)
elif not e_ts_prev: log.warning('Empty feed - check/remove it from list') # can be a bug too
# Find oldest common e_id and merge all newer seen_ids into feed_ids
# This is done to avoid notifications for flapping visible/hidden entries
n = 0
for n, e_id in enumerate(reversed(list(seen_ids))):
if e_id in feed_ids: break
if n > 0: seen_ids = dict((e_id, seen_ids[e_id]) for e_id in list(seen_ids)[:-n])
feed_ids = dict(sorted( # (e_id, ts) with later ts taking prio for same id
it.chain(seen_ids.items(), feed_ids.items()), key=op.itemgetter(1) ))
feed_ids = dict(sorted(feed_ids.items(), key=op.itemgetter(1), reverse=True))
return emails, ewma_delay, feed_ids
def process_summary(text, w=120, pre=' '):
line_last, text = None, text.strip().split('\n')
for n, line in enumerate(text):
line = line.rstrip()
if line == line_last:
text[n] = ''
continue
line_last = line
text[n] = fill(text[n], w) + '\n'
return ''.join(f'{pre}{line}' for line in filter(None, text))
def main(args=None):
import argparse
parser = argparse.ArgumentParser(
description='Script to generate email notifications for YouTube OPML/RSS feed updates.')
group = parser.add_argument_group('Feed sources')
group.add_argument('-o', '--opml', metavar='file',
help='YT OPML export from https://www.youtube.com/subscription_manager?action_takeout=1 link.')
group.add_argument('-r', '--rss-list', metavar='file',
help='File with YT RSS/Atom feed URLs, one per line, with optional comments after URLs.')
group.add_argument('-c', '--opml-convert', action='store_true',
help='Append all new feeds from specified -o/--opml file to -r/--rss-list and exit.')
group = parser.add_argument_group('Notification options')
group.add_argument('-e', '--email', metavar='addr', required=True,
help='Email address to send video notifications to via local "mail" command.')
group.add_argument('-d', '--email-delay',
type=float, metavar='float', default=1.1,
help='Delay between running notification-command in seconds.'
' Useful to make sure emails sort by date/time correctly within same channel,'
' and don\'t all have same exact timestamp. Default is %(default)ss (0 - disable).')
group = parser.add_argument_group('State storage/init')
group.add_argument('-s', '--state-dir', default='state', metavar='path',
help='Directory to use for storing per-feed "last check" timestamps. Default: %(default)s')
group.add_argument('-t', '--new-feed-time',
type=float, metavar='posix-ts',
help='Timestamp to fetch entries after for new/unknown feeds.'
' Default or 0 is to generate notifications for all entries in new feeds.')
group = parser.add_argument_group('Check filtering and rate-limiting')
group.add_argument('-n', '--feed-name', metavar='name',
help='Name (part) of a specific feed to check regardless of timestamps.')
group.add_argument('-m', '--max-checks', type=int, metavar='n',
help='Limit on number of feeds to check in one run. Default or 0 - no limit.')
group.add_argument('-f', '--force', action='store_true',
help='Force-check feeds regardless of timestamps.')
group = parser.add_argument_group('Debug options')
group.add_argument('--debug', action='store_true', help='Verbose operation mode.')
group.add_argument('--dry-run', action='store_true',
help='Run same stuff, but do not send emails or update state.')
opts = parser.parse_args(sys.argv[1:] if args is None else args)
logging.basicConfig(level=logging.DEBUG if opts.debug else logging.WARNING)
log = get_logger('main')
feeds_rss = feeds_opml = list()
if opts.opml:
def _get_outlines(e, feeds=None):
if feeds is None: feeds = list()
for o in e:
if o.attrib.get('xmlUrl'): feeds.append(YTFeed.from_xml(o.attrib))
else: _get_outlines(o, feeds)
return feeds
opml = etree.fromstring(pl.Path(opts.opml).read_text())
feeds_opml = _get_outlines(opml.find('body'))
log.debug('Parsed OPML: feeds={}', len(feeds_opml))
feeds_opml = YTFeedIndex(feeds_opml)
if opts.rss_list:
rss_list = pl.Path(opts.rss_list)
if rss_list.exists():
with rss_list.open() as src:
feeds_rss = list(YTFeed.from_line(line) for line in src)
log.debug('Parsed RSS-list: feeds={}', len(feeds_rss))
feeds_rss = YTFeedIndex(feeds_rss)
if opts.opml_convert:
if not opts.rss_list: parser.error('-c/--opml-convert requires -r/--rss-list option')
n = 0
with rss_list.open('a') as dst:
for feed in feeds_opml:
if feed in feeds_rss: continue
dst.write(f'{feed.url} {feed.title}\n')
print(f'Added feed: {feed}')
n += 1
print(f'-- added feeds: {n}')
return
feeds = YTFeedIndex(feeds_opml, feeds_rss)
state_log = state_process(opts.state_dir, feeds)
log.debug('Feed Index: feeds={} state-log={}', len(feeds_rss), state_log)
ts, ts_email, ts_new_feed = time.time(), 0, opts.new_feed_time or 0
check_limit = opts.max_checks or 0
if check_limit: check_limit += 1
feed_lookup = opts.feed_name and str_norm(opts.feed_name)
for feed in feeds:
if feed_lookup:
if feed_lookup not in str_norm(feed.title): continue
elif not opts.force and ts < feed.ts_check_next(): continue
feed_log = get_logger(f'feed.{feed.chan}')
feed_log.debug('Fetching feed: {} [ {} ]', feed.title, feed.url)
try: etag, entries = feed_fetch(feed_log, feed.url, feed.etag)
except FeedFetchError as err:
feed_log.error('Failed to fetch feed: {}', err)
etag = entries = None
if entries is None: continue
ts_since = None
if not feed.seen_entries:
ts_since = feed.ts_last_check or ts_new_feed
log.debug( 'Processing feed:'
' entries={} etag={!r} new-since={}', len(entries), etag, ts_since )
emails, ewma_delay, seen_ids = \
feed_process(feed_log, feed, entries, ts, ts_since)
if emails and not opts.dry_run:
log.debug( 'Sending notification emails:'
' count={} delay={:.1f}', len(emails), opts.email_delay )
for e_ts, subject, body in sorted(emails):
time.sleep(max(0, opts.email_delay - (time.time() - ts_email)))
sp.run(
['mail', '-s', subject, opts.email],
input=body.encode(), timeout=4*60, check=True )
ts_email = time.time()
update = dict(
chan=feed.chan, delay_ewma=ewma_delay,
ts_last_check=ts, etag=etag, seen_entries=seen_ids )
if not opts.dry_run: state_update(state_log, ts, feed, update)
if check_limit:
check_limit -= 1
if check_limit <= 0:
log.debug('Stopping due to -m/--max-checks limit')
break
log.debug('Finished')
if __name__ == '__main__': sys.exit(main())
| [
"mk.fraggod@gmail.com"
] | mk.fraggod@gmail.com | |
6fde1ae6e4c647c4fde009202a3e94db05805c30 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/verbs/_interlinking.py | affa9fb8d987c586a7f8143611a47511eb6e2d67 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 263 | py |
from xai.brain.wordbase.verbs._interlink import _INTERLINK
#calss header
class _INTERLINKING(_INTERLINK, ):
def __init__(self,):
_INTERLINK.__init__(self)
self.name = "INTERLINKING"
self.specie = 'verbs'
self.basic = "interlink"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
233e04fcf6506d8a3ffcee5406e8061482bbf178 | 1adc05008f0caa9a81cc4fc3a737fcbcebb68995 | /hardhat/recipes/python/lxml.py | e2170e0115cfd92004cf74afa4ff88b7961d1972 | [
"MIT",
"BSD-3-Clause"
] | permissive | stangelandcl/hardhat | 4aa995518697d19b179c64751108963fa656cfca | 1ad0c5dec16728c0243023acb9594f435ef18f9c | refs/heads/master | 2021-01-11T17:19:41.988477 | 2019-03-22T22:18:44 | 2019-03-22T22:18:52 | 79,742,340 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 366 | py | from .base import PipBaseRecipe
class LxmlRecipe(PipBaseRecipe):
def __init__(self, *args, **kwargs):
super(LxmlRecipe, self).__init__(*args, **kwargs)
self.sha256 = '736f72be15caad8116891eb6aa4a078b' \
'590d231fdc63818c40c21624ac71db96'
self.name = 'lxml'
self.version = '3.8.0' # < 4 for apache-airflow
| [
"clayton.stangeland@gmail.com"
] | clayton.stangeland@gmail.com |
474c1a564fd776a21c77e39ae1f9a4ba752370ed | 334d0190164d92b53be2844a3afc2826d64b1a6d | /lib/python3.9/site-packages/pymc3/distributions/continuous.py | 234ed935f2b36e07f5756ccb3e825751d486c3f2 | [] | no_license | sou133688/BayesianStatics | f294d7c47cfa56374cf73b520529620dc6120f47 | be9121429494cd8fd231594b029fc2f030d8335f | refs/heads/main | 2023-08-21T15:57:32.980658 | 2021-10-01T00:01:13 | 2021-10-01T00:01:13 | 401,909,680 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 137,026 | py | # Copyright 2020 The PyMC Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
"""
A collection of common probability distributions for stochastic
nodes in PyMC.
"""
import warnings
import numpy as np
import theano.tensor as tt
from scipy import stats
from scipy.interpolate import InterpolatedUnivariateSpline
from scipy.special import expit
from pymc3.distributions import transforms
from pymc3.distributions.dist_math import (
SplineWrapper,
betaln,
bound,
clipped_beta_rvs,
gammaln,
i0e,
incomplete_beta,
log_normal,
logpow,
normal_lccdf,
normal_lcdf,
zvalue,
)
from pymc3.distributions.distribution import Continuous, draw_values, generate_samples
from pymc3.distributions.special import log_i0
from pymc3.math import invlogit, log1mexp, log1pexp, logdiffexp, logit
from pymc3.theanof import floatX
__all__ = [
"Uniform",
"Flat",
"HalfFlat",
"Normal",
"TruncatedNormal",
"Beta",
"Kumaraswamy",
"Exponential",
"Laplace",
"StudentT",
"Cauchy",
"HalfCauchy",
"Gamma",
"Weibull",
"HalfStudentT",
"Lognormal",
"ChiSquared",
"HalfNormal",
"Wald",
"Pareto",
"InverseGamma",
"ExGaussian",
"VonMises",
"SkewNormal",
"Triangular",
"Gumbel",
"Logistic",
"LogitNormal",
"Interpolated",
"Rice",
"Moyal",
"AsymmetricLaplace",
]
class PositiveContinuous(Continuous):
"""Base class for positive continuous distributions"""
def __init__(self, transform=transforms.log, *args, **kwargs):
super().__init__(transform=transform, *args, **kwargs)
class UnitContinuous(Continuous):
"""Base class for continuous distributions on [0,1]"""
def __init__(self, transform=transforms.logodds, *args, **kwargs):
super().__init__(transform=transform, *args, **kwargs)
class BoundedContinuous(Continuous):
"""Base class for bounded continuous distributions"""
def __init__(self, transform="auto", lower=None, upper=None, *args, **kwargs):
lower = tt.as_tensor_variable(lower) if lower is not None else None
upper = tt.as_tensor_variable(upper) if upper is not None else None
if transform == "auto":
if lower is None and upper is None:
transform = None
elif lower is not None and upper is None:
transform = transforms.lowerbound(lower)
elif lower is None and upper is not None:
transform = transforms.upperbound(upper)
else:
transform = transforms.interval(lower, upper)
super().__init__(transform=transform, *args, **kwargs)
def assert_negative_support(var, label, distname, value=-1e-6):
# Checks for evidence of positive support for a variable
if var is None:
return
try:
# Transformed distribution
support = np.isfinite(var.transformed.distribution.dist.logp(value).tag.test_value)
except AttributeError:
try:
# Untransformed distribution
support = np.isfinite(var.distribution.logp(value).tag.test_value)
except AttributeError:
# Otherwise no direct evidence of non-positive support
support = False
if np.any(support):
msg = f"The variable specified for {label} has negative support for {distname}, "
msg += "likely making it unsuitable for this parameter."
warnings.warn(msg)
def get_tau_sigma(tau=None, sigma=None):
r"""
Find precision and standard deviation. The link between the two
parameterizations is given by the inverse relationship:
.. math::
\tau = \frac{1}{\sigma^2}
Parameters
----------
tau: array-like, optional
sigma: array-like, optional
Results
-------
Returns tuple (tau, sigma)
Notes
-----
If neither tau nor sigma is provided, returns (1., 1.)
"""
if tau is None:
if sigma is None:
sigma = 1.0
tau = 1.0
else:
tau = sigma ** -2.0
else:
if sigma is not None:
raise ValueError("Can't pass both tau and sigma")
else:
sigma = tau ** -0.5
# cast tau and sigma to float in a way that works for both np.arrays
# and pure python
tau = 1.0 * tau
sigma = 1.0 * sigma
return floatX(tau), floatX(sigma)
class Uniform(BoundedContinuous):
r"""
Continuous uniform log-likelihood.
The pdf of this distribution is
.. math::
f(x \mid lower, upper) = \frac{1}{upper-lower}
.. plot::
import matplotlib.pyplot as plt
import numpy as np
plt.style.use('seaborn-darkgrid')
x = np.linspace(-3, 3, 500)
ls = [0., -2]
us = [2., 1]
for l, u in zip(ls, us):
y = np.zeros(500)
y[(x<u) & (x>l)] = 1.0/(u-l)
plt.plot(x, y, label='lower = {}, upper = {}'.format(l, u))
plt.xlabel('x', fontsize=12)
plt.ylabel('f(x)', fontsize=12)
plt.ylim(0, 1)
plt.legend(loc=1)
plt.show()
======== =====================================
Support :math:`x \in [lower, upper]`
Mean :math:`\dfrac{lower + upper}{2}`
Variance :math:`\dfrac{(upper - lower)^2}{12}`
======== =====================================
Parameters
----------
lower: float
Lower limit.
upper: float
Upper limit.
"""
def __init__(self, lower=0, upper=1, *args, **kwargs):
self.lower = lower = tt.as_tensor_variable(floatX(lower))
self.upper = upper = tt.as_tensor_variable(floatX(upper))
self.mean = (upper + lower) / 2.0
self.median = self.mean
super().__init__(lower=lower, upper=upper, *args, **kwargs)
def random(self, point=None, size=None):
"""
Draw random values from Uniform distribution.
Parameters
----------
point: dict, optional
Dict of variable values on which random values are to be
conditioned (uses default point if not specified).
size: int, optional
Desired size of random sample (returns one sample if not
specified).
Returns
-------
array
"""
lower, upper = draw_values([self.lower, self.upper], point=point, size=size)
return generate_samples(
stats.uniform.rvs, loc=lower, scale=upper - lower, dist_shape=self.shape, size=size
)
def logp(self, value):
"""
Calculate log-probability of Uniform distribution at specified value.
Parameters
----------
value: numeric
Value for which log-probability is calculated.
Returns
-------
TensorVariable
"""
lower = self.lower
upper = self.upper
return bound(-tt.log(upper - lower), value >= lower, value <= upper)
def logcdf(self, value):
"""
Compute the log of the cumulative distribution function for Uniform distribution
at the specified value.
Parameters
----------
value: numeric or np.ndarray or theano.tensor
Value(s) for which log CDF is calculated. If the log CDF for multiple
values are desired the values must be provided in a numpy array or theano tensor.
Returns
-------
TensorVariable
"""
lower = self.lower
upper = self.upper
return tt.switch(
tt.lt(value, lower) | tt.lt(upper, lower),
-np.inf,
tt.switch(
tt.lt(value, upper),
tt.log(value - lower) - tt.log(upper - lower),
0,
),
)
class Flat(Continuous):
"""
Uninformative log-likelihood that returns 0 regardless of
the passed value.
"""
def __init__(self, *args, **kwargs):
self._default = 0
super().__init__(defaults=("_default",), *args, **kwargs)
def random(self, point=None, size=None):
"""Raises ValueError as it is not possible to sample from Flat distribution
Parameters
----------
point: dict, optional
size: int, optional
Raises
-------
ValueError
"""
raise ValueError("Cannot sample from Flat distribution")
def logp(self, value):
"""
Calculate log-probability of Flat distribution at specified value.
Parameters
----------
value: numeric
Value(s) for which log-probability is calculated. If the log probabilities for multiple
values are desired the values must be provided in a numpy array or theano tensor
Returns
-------
TensorVariable
"""
return tt.zeros_like(value)
def logcdf(self, value):
"""
Compute the log of the cumulative distribution function for Flat distribution
at the specified value.
Parameters
----------
value: numeric or np.ndarray or theano.tensor
Value(s) for which log CDF is calculated. If the log CDF for multiple
values are desired the values must be provided in a numpy array or theano tensor.
Returns
-------
TensorVariable
"""
return tt.switch(
tt.eq(value, -np.inf), -np.inf, tt.switch(tt.eq(value, np.inf), 0, tt.log(0.5))
)
class HalfFlat(PositiveContinuous):
"""Improper flat prior over the positive reals."""
def __init__(self, *args, **kwargs):
self._default = 1
super().__init__(defaults=("_default",), *args, **kwargs)
def random(self, point=None, size=None):
"""Raises ValueError as it is not possible to sample from HalfFlat distribution
Parameters
----------
point: dict, optional
size: int, optional
Raises
-------
ValueError
"""
raise ValueError("Cannot sample from HalfFlat distribution")
def logp(self, value):
"""
Calculate log-probability of HalfFlat distribution at specified value.
Parameters
----------
value: numeric
Value(s) for which log-probability is calculated. If the log probabilities for multiple
values are desired the values must be provided in a numpy array or theano tensor
Returns
-------
TensorVariable
"""
return bound(tt.zeros_like(value), value > 0)
def logcdf(self, value):
"""
Compute the log of the cumulative distribution function for HalfFlat distribution
at the specified value.
Parameters
----------
value: numeric or np.ndarray or theano.tensor
Value(s) for which log CDF is calculated. If the log CDF for multiple
values are desired the values must be provided in a numpy array or theano tensor.
Returns
-------
TensorVariable
"""
return tt.switch(tt.lt(value, np.inf), -np.inf, tt.switch(tt.eq(value, np.inf), 0, -np.inf))
class Normal(Continuous):
r"""
Univariate normal log-likelihood.
The pdf of this distribution is
.. math::
f(x \mid \mu, \tau) =
\sqrt{\frac{\tau}{2\pi}}
\exp\left\{ -\frac{\tau}{2} (x-\mu)^2 \right\}
Normal distribution can be parameterized either in terms of precision
or standard deviation. The link between the two parametrizations is
given by
.. math::
\tau = \dfrac{1}{\sigma^2}
.. plot::
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as st
plt.style.use('seaborn-darkgrid')
x = np.linspace(-5, 5, 1000)
mus = [0., 0., 0., -2.]
sigmas = [0.4, 1., 2., 0.4]
for mu, sigma in zip(mus, sigmas):
pdf = st.norm.pdf(x, mu, sigma)
plt.plot(x, pdf, label=r'$\mu$ = {}, $\sigma$ = {}'.format(mu, sigma))
plt.xlabel('x', fontsize=12)
plt.ylabel('f(x)', fontsize=12)
plt.legend(loc=1)
plt.show()
======== ==========================================
Support :math:`x \in \mathbb{R}`
Mean :math:`\mu`
Variance :math:`\dfrac{1}{\tau}` or :math:`\sigma^2`
======== ==========================================
Parameters
----------
mu: float
Mean.
sigma: float
Standard deviation (sigma > 0) (only required if tau is not specified).
tau: float
Precision (tau > 0) (only required if sigma is not specified).
Examples
--------
.. code-block:: python
with pm.Model():
x = pm.Normal('x', mu=0, sigma=10)
with pm.Model():
x = pm.Normal('x', mu=0, tau=1/23)
"""
def __init__(self, mu=0, sigma=None, tau=None, sd=None, **kwargs):
if sd is not None:
sigma = sd
tau, sigma = get_tau_sigma(tau=tau, sigma=sigma)
self.sigma = self.sd = tt.as_tensor_variable(sigma)
self.tau = tt.as_tensor_variable(tau)
self.mean = self.median = self.mode = self.mu = mu = tt.as_tensor_variable(floatX(mu))
self.variance = 1.0 / self.tau
assert_negative_support(sigma, "sigma", "Normal")
assert_negative_support(tau, "tau", "Normal")
super().__init__(**kwargs)
def random(self, point=None, size=None):
"""
Draw random values from Normal distribution.
Parameters
----------
point: dict, optional
Dict of variable values on which random values are to be
conditioned (uses default point if not specified).
size: int, optional
Desired size of random sample (returns one sample if not
specified).
Returns
-------
array
"""
mu, tau, _ = draw_values([self.mu, self.tau, self.sigma], point=point, size=size)
return generate_samples(
stats.norm.rvs, loc=mu, scale=tau ** -0.5, dist_shape=self.shape, size=size
)
def logp(self, value):
"""
Calculate log-probability of Normal distribution at specified value.
Parameters
----------
value: numeric
Value(s) for which log-probability is calculated. If the log probabilities for multiple
values are desired the values must be provided in a numpy array or theano tensor
Returns
-------
TensorVariable
"""
sigma = self.sigma
tau = self.tau
mu = self.mu
return bound((-tau * (value - mu) ** 2 + tt.log(tau / np.pi / 2.0)) / 2.0, sigma > 0)
def _distr_parameters_for_repr(self):
return ["mu", "sigma"]
def logcdf(self, value):
"""
Compute the log of the cumulative distribution function for Normal distribution
at the specified value.
Parameters
----------
value: numeric or np.ndarray or theano.tensor
Value(s) for which log CDF is calculated. If the log CDF for multiple
values are desired the values must be provided in a numpy array or theano tensor.
Returns
-------
TensorVariable
"""
mu = self.mu
sigma = self.sigma
return bound(
normal_lcdf(mu, sigma, value),
0 < sigma,
)
class TruncatedNormal(BoundedContinuous):
r"""
Univariate truncated normal log-likelihood.
The pdf of this distribution is
.. math::
f(x;\mu ,\sigma ,a,b)={\frac {\phi ({\frac {x-\mu }{\sigma }})}{
\sigma \left(\Phi ({\frac {b-\mu }{\sigma }})-\Phi ({\frac {a-\mu }{\sigma }})\right)}}
Truncated normal distribution can be parameterized either in terms of precision
or standard deviation. The link between the two parametrizations is
given by
.. math::
\tau = \dfrac{1}{\sigma^2}
.. plot::
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as st
plt.style.use('seaborn-darkgrid')
x = np.linspace(-10, 10, 1000)
mus = [0., 0., 0.]
sigmas = [3.,5.,7.]
a1 = [-3, -5, -5]
b1 = [7, 5, 4]
for mu, sigma, a, b in zip(mus, sigmas,a1,b1):
an, bn = (a - mu) / sigma, (b - mu) / sigma
pdf = st.truncnorm.pdf(x, an,bn, loc=mu, scale=sigma)
plt.plot(x, pdf, label=r'$\mu$ = {}, $\sigma$ = {}, a={}, b={}'.format(mu, sigma, a, b))
plt.xlabel('x', fontsize=12)
plt.ylabel('f(x)', fontsize=12)
plt.legend(loc=1)
plt.show()
======== ==========================================
Support :math:`x \in [a, b]`
Mean :math:`\mu +{\frac {\phi (\alpha )-\phi (\beta )}{Z}}\sigma`
Variance :math:`\sigma ^{2}\left[1+{\frac {\alpha \phi (\alpha )-\beta \phi (\beta )}{Z}}-\left({\frac {\phi (\alpha )-\phi (\beta )}{Z}}\right)^{2}\right]`
======== ==========================================
Parameters
----------
mu: float
Mean.
sigma: float
Standard deviation (sigma > 0).
lower: float (optional)
Left bound.
upper: float (optional)
Right bound.
Examples
--------
.. code-block:: python
with pm.Model():
x = pm.TruncatedNormal('x', mu=0, sigma=10, lower=0)
with pm.Model():
x = pm.TruncatedNormal('x', mu=0, sigma=10, upper=1)
with pm.Model():
x = pm.TruncatedNormal('x', mu=0, sigma=10, lower=0, upper=1)
"""
def __init__(
self,
mu=0,
sigma=None,
tau=None,
lower=None,
upper=None,
transform="auto",
sd=None,
*args,
**kwargs,
):
if sd is not None:
sigma = sd
tau, sigma = get_tau_sigma(tau=tau, sigma=sigma)
self.sigma = self.sd = tt.as_tensor_variable(sigma)
self.tau = tt.as_tensor_variable(tau)
self.lower_check = tt.as_tensor_variable(floatX(lower)) if lower is not None else lower
self.upper_check = tt.as_tensor_variable(floatX(upper)) if upper is not None else upper
self.lower = (
tt.as_tensor_variable(floatX(lower))
if lower is not None
else tt.as_tensor_variable(-np.inf)
)
self.upper = (
tt.as_tensor_variable(floatX(upper))
if upper is not None
else tt.as_tensor_variable(np.inf)
)
self.mu = tt.as_tensor_variable(floatX(mu))
if self.lower_check is None and self.upper_check is None:
self._defaultval = mu
elif self.lower_check is None and self.upper_check is not None:
self._defaultval = self.upper - 1.0
elif self.lower_check is not None and self.upper_check is None:
self._defaultval = self.lower + 1.0
else:
self._defaultval = (self.lower + self.upper) / 2
assert_negative_support(sigma, "sigma", "TruncatedNormal")
assert_negative_support(tau, "tau", "TruncatedNormal")
super().__init__(
defaults=("_defaultval",),
transform=transform,
lower=lower,
upper=upper,
*args,
**kwargs,
)
def random(self, point=None, size=None):
"""
Draw random values from TruncatedNormal distribution.
Parameters
----------
point: dict, optional
Dict of variable values on which random values are to be
conditioned (uses default point if not specified).
size: int, optional
Desired size of random sample (returns one sample if not
specified).
Returns
-------
array
"""
mu, sigma, lower, upper = draw_values(
[self.mu, self.sigma, self.lower, self.upper], point=point, size=size
)
return generate_samples(
self._random,
mu=mu,
sigma=sigma,
lower=lower,
upper=upper,
dist_shape=self.shape,
size=size,
)
def _random(self, mu, sigma, lower, upper, size):
"""Wrapper around stats.truncnorm.rvs that converts TruncatedNormal's
parametrization to scipy.truncnorm. All parameter arrays should have
been broadcasted properly by generate_samples at this point and size is
the scipy.rvs representation.
"""
return stats.truncnorm.rvs(
a=(lower - mu) / sigma, b=(upper - mu) / sigma, loc=mu, scale=sigma, size=size
)
def logp(self, value):
"""
Calculate log-probability of TruncatedNormal distribution at specified value.
Parameters
----------
value: numeric
Value(s) for which log-probability is calculated. If the log probabilities for multiple
values are desired the values must be provided in a numpy array or theano tensor
Returns
-------
TensorVariable
"""
mu = self.mu
sigma = self.sigma
norm = self._normalization()
logp = Normal.dist(mu=mu, sigma=sigma).logp(value) - norm
bounds = [sigma > 0]
if self.lower_check is not None:
bounds.append(value >= self.lower)
if self.upper_check is not None:
bounds.append(value <= self.upper)
return bound(logp, *bounds)
def _normalization(self):
mu, sigma = self.mu, self.sigma
if self.lower_check is None and self.upper_check is None:
return 0.0
if self.lower_check is not None and self.upper_check is not None:
lcdf_a = normal_lcdf(mu, sigma, self.lower)
lcdf_b = normal_lcdf(mu, sigma, self.upper)
lsf_a = normal_lccdf(mu, sigma, self.lower)
lsf_b = normal_lccdf(mu, sigma, self.upper)
return tt.switch(self.lower > 0, logdiffexp(lsf_a, lsf_b), logdiffexp(lcdf_b, lcdf_a))
if self.lower_check is not None:
return normal_lccdf(mu, sigma, self.lower)
else:
return normal_lcdf(mu, sigma, self.upper)
def _distr_parameters_for_repr(self):
return ["mu", "sigma", "lower", "upper"]
class HalfNormal(PositiveContinuous):
r"""
Half-normal log-likelihood.
The pdf of this distribution is
.. math::
f(x \mid \tau) =
\sqrt{\frac{2\tau}{\pi}}
\exp\left(\frac{-x^2 \tau}{2}\right)
f(x \mid \sigma) =
\sqrt{\frac{2}{\pi\sigma^2}}
\exp\left(\frac{-x^2}{2\sigma^2}\right)
.. note::
The parameters ``sigma``/``tau`` (:math:`\sigma`/:math:`\tau`) refer to
the standard deviation/precision of the unfolded normal distribution, for
the standard deviation of the half-normal distribution, see below. For
the half-normal, they are just two parameterisation :math:`\sigma^2
\equiv \frac{1}{\tau}` of a scale parameter
.. plot::
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as st
plt.style.use('seaborn-darkgrid')
x = np.linspace(0, 5, 200)
for sigma in [0.4, 1., 2.]:
pdf = st.halfnorm.pdf(x, scale=sigma)
plt.plot(x, pdf, label=r'$\sigma$ = {}'.format(sigma))
plt.xlabel('x', fontsize=12)
plt.ylabel('f(x)', fontsize=12)
plt.legend(loc=1)
plt.show()
======== ==========================================
Support :math:`x \in [0, \infty)`
Mean :math:`\sqrt{\dfrac{2}{\tau \pi}}` or :math:`\dfrac{\sigma \sqrt{2}}{\sqrt{\pi}}`
Variance :math:`\dfrac{1}{\tau}\left(1 - \dfrac{2}{\pi}\right)` or :math:`\sigma^2\left(1 - \dfrac{2}{\pi}\right)`
======== ==========================================
Parameters
----------
sigma: float
Scale parameter :math:`sigma` (``sigma`` > 0) (only required if ``tau`` is not specified).
tau: float
Precision :math:`tau` (tau > 0) (only required if sigma is not specified).
Examples
--------
.. code-block:: python
with pm.Model():
x = pm.HalfNormal('x', sigma=10)
with pm.Model():
x = pm.HalfNormal('x', tau=1/15)
"""
def __init__(self, sigma=None, tau=None, sd=None, *args, **kwargs):
if sd is not None:
sigma = sd
super().__init__(*args, **kwargs)
tau, sigma = get_tau_sigma(tau=tau, sigma=sigma)
self.sigma = self.sd = sigma = tt.as_tensor_variable(sigma)
self.tau = tau = tt.as_tensor_variable(tau)
self.mean = tt.sqrt(2 / (np.pi * self.tau))
self.variance = (1.0 - 2 / np.pi) / self.tau
assert_negative_support(tau, "tau", "HalfNormal")
assert_negative_support(sigma, "sigma", "HalfNormal")
def random(self, point=None, size=None):
"""
Draw random values from HalfNormal distribution.
Parameters
----------
point: dict, optional
Dict of variable values on which random values are to be
conditioned (uses default point if not specified).
size: int, optional
Desired size of random sample (returns one sample if not
specified).
Returns
-------
array
"""
sigma = draw_values([self.sigma], point=point, size=size)[0]
return generate_samples(
stats.halfnorm.rvs, loc=0.0, scale=sigma, dist_shape=self.shape, size=size
)
def logp(self, value):
"""
Calculate log-probability of HalfNormal distribution at specified value.
Parameters
----------
value: numeric
Value(s) for which log-probability is calculated. If the log probabilities for multiple
values are desired the values must be provided in a numpy array or theano tensor
Returns
-------
TensorVariable
"""
tau = self.tau
sigma = self.sigma
return bound(
-0.5 * tau * value ** 2 + 0.5 * tt.log(tau * 2.0 / np.pi),
value >= 0,
tau > 0,
sigma > 0,
)
def _distr_parameters_for_repr(self):
return ["sigma"]
def logcdf(self, value):
"""
Compute the log of the cumulative distribution function for HalfNormal distribution
at the specified value.
Parameters
----------
value: numeric or np.ndarray or theano.tensor
Value(s) for which log CDF is calculated. If the log CDF for multiple
values are desired the values must be provided in a numpy array or theano tensor.
Returns
-------
TensorVariable
"""
sigma = self.sigma
z = zvalue(value, mu=0, sigma=sigma)
return bound(
tt.log1p(-tt.erfc(z / tt.sqrt(2.0))),
0 <= value,
0 < sigma,
)
class Wald(PositiveContinuous):
r"""
Wald log-likelihood.
The pdf of this distribution is
.. math::
f(x \mid \mu, \lambda) =
\left(\frac{\lambda}{2\pi}\right)^{1/2} x^{-3/2}
\exp\left\{
-\frac{\lambda}{2x}\left(\frac{x-\mu}{\mu}\right)^2
\right\}
.. plot::
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as st
plt.style.use('seaborn-darkgrid')
x = np.linspace(0, 3, 500)
mus = [1., 1., 1., 3.]
lams = [1., .2, 3., 1.]
for mu, lam in zip(mus, lams):
pdf = st.invgauss.pdf(x, mu/lam, scale=lam)
plt.plot(x, pdf, label=r'$\mu$ = {}, $\lambda$ = {}'.format(mu, lam))
plt.xlabel('x', fontsize=12)
plt.ylabel('f(x)', fontsize=12)
plt.legend(loc=1)
plt.show()
======== =============================
Support :math:`x \in (0, \infty)`
Mean :math:`\mu`
Variance :math:`\dfrac{\mu^3}{\lambda}`
======== =============================
Wald distribution can be parameterized either in terms of lam or phi.
The link between the two parametrizations is given by
.. math::
\phi = \dfrac{\lambda}{\mu}
Parameters
----------
mu: float, optional
Mean of the distribution (mu > 0).
lam: float, optional
Relative precision (lam > 0).
phi: float, optional
Alternative shape parameter (phi > 0).
alpha: float, optional
Shift/location parameter (alpha >= 0).
Notes
-----
To instantiate the distribution specify any of the following
- only mu (in this case lam will be 1)
- mu and lam
- mu and phi
- lam and phi
References
----------
.. [Tweedie1957] Tweedie, M. C. K. (1957).
Statistical Properties of Inverse Gaussian Distributions I.
The Annals of Mathematical Statistics, Vol. 28, No. 2, pp. 362-377
.. [Michael1976] Michael, J. R., Schucany, W. R. and Hass, R. W. (1976).
Generating Random Variates Using Transformations with Multiple Roots.
The American Statistician, Vol. 30, No. 2, pp. 88-90
.. [Giner2016] Göknur Giner, Gordon K. Smyth (2016)
statmod: Probability Calculations for the Inverse Gaussian Distribution
"""
def __init__(self, mu=None, lam=None, phi=None, alpha=0.0, *args, **kwargs):
super().__init__(*args, **kwargs)
mu, lam, phi = self.get_mu_lam_phi(mu, lam, phi)
self.alpha = alpha = tt.as_tensor_variable(floatX(alpha))
self.mu = mu = tt.as_tensor_variable(floatX(mu))
self.lam = lam = tt.as_tensor_variable(floatX(lam))
self.phi = phi = tt.as_tensor_variable(floatX(phi))
self.mean = self.mu + self.alpha
self.mode = (
self.mu * (tt.sqrt(1.0 + (1.5 * self.mu / self.lam) ** 2) - 1.5 * self.mu / self.lam)
+ self.alpha
)
self.variance = (self.mu ** 3) / self.lam
assert_negative_support(phi, "phi", "Wald")
assert_negative_support(mu, "mu", "Wald")
assert_negative_support(lam, "lam", "Wald")
def get_mu_lam_phi(self, mu, lam, phi):
if mu is None:
if lam is not None and phi is not None:
return lam / phi, lam, phi
else:
if lam is None:
if phi is None:
return mu, 1.0, 1.0 / mu
else:
return mu, mu * phi, phi
else:
if phi is None:
return mu, lam, lam / mu
raise ValueError(
"Wald distribution must specify either mu only, "
"mu and lam, mu and phi, or lam and phi."
)
def _random(self, mu, lam, alpha, size=None):
v = np.random.normal(size=size) ** 2
value = (
mu
+ (mu ** 2) * v / (2.0 * lam)
- mu / (2.0 * lam) * np.sqrt(4.0 * mu * lam * v + (mu * v) ** 2)
)
z = np.random.uniform(size=size)
i = np.floor(z - mu / (mu + value)) * 2 + 1
value = (value ** -i) * (mu ** (i + 1))
return value + alpha
def random(self, point=None, size=None):
"""
Draw random values from Wald distribution.
Parameters
----------
point: dict, optional
Dict of variable values on which random values are to be
conditioned (uses default point if not specified).
size: int, optional
Desired size of random sample (returns one sample if not
specified).
Returns
-------
array
"""
mu, lam, alpha = draw_values([self.mu, self.lam, self.alpha], point=point, size=size)
return generate_samples(self._random, mu, lam, alpha, dist_shape=self.shape, size=size)
def logp(self, value):
"""
Calculate log-probability of Wald distribution at specified value.
Parameters
----------
value: numeric
Value(s) for which log-probability is calculated. If the log probabilities for multiple
values are desired the values must be provided in a numpy array or theano tensor
Returns
-------
TensorVariable
"""
mu = self.mu
lam = self.lam
alpha = self.alpha
centered_value = value - alpha
# value *must* be iid. Otherwise this is wrong.
return bound(
logpow(lam / (2.0 * np.pi), 0.5)
- logpow(centered_value, 1.5)
- (0.5 * lam / centered_value * ((centered_value - mu) / mu) ** 2),
# XXX these two are redundant. Please, check.
value > 0,
centered_value > 0,
mu > 0,
lam > 0,
alpha >= 0,
)
def _distr_parameters_for_repr(self):
return ["mu", "lam", "alpha"]
def logcdf(self, value):
"""
Compute the log of the cumulative distribution function for Wald distribution
at the specified value.
Parameters
----------
value: numeric or np.ndarray or theano.tensor
Value(s) for which log CDF is calculated. If the log CDF for multiple
values are desired the values must be provided in a numpy array or theano tensor.
Returns
-------
TensorVariable
"""
# Distribution parameters
mu = self.mu
lam = self.lam
alpha = self.alpha
value -= alpha
q = value / mu
l = lam * mu
r = tt.sqrt(value * lam)
a = normal_lcdf(0, 1, (q - 1.0) / r)
b = 2.0 / l + normal_lcdf(0, 1, -(q + 1.0) / r)
left_limit = (
tt.lt(value, 0)
| (tt.eq(value, 0) & tt.gt(mu, 0) & tt.lt(lam, np.inf))
| (tt.lt(value, mu) & tt.eq(lam, 0))
)
right_limit = (
tt.eq(value, np.inf)
| (tt.eq(lam, 0) & tt.gt(value, mu))
| (tt.gt(value, 0) & tt.eq(lam, np.inf))
)
degenerate_dist = (tt.lt(mu, np.inf) & tt.eq(mu, value) & tt.eq(lam, 0)) | (
tt.eq(value, 0) & tt.eq(lam, np.inf)
)
return bound(
tt.switch(
~(right_limit | degenerate_dist),
a + tt.log1p(tt.exp(b - a)),
0,
),
~left_limit,
0 < mu,
0 < lam,
0 <= alpha,
)
class Beta(UnitContinuous):
r"""
Beta log-likelihood.
The pdf of this distribution is
.. math::
f(x \mid \alpha, \beta) =
\frac{x^{\alpha - 1} (1 - x)^{\beta - 1}}{B(\alpha, \beta)}
.. plot::
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as st
plt.style.use('seaborn-darkgrid')
x = np.linspace(0, 1, 200)
alphas = [.5, 5., 1., 2., 2.]
betas = [.5, 1., 3., 2., 5.]
for a, b in zip(alphas, betas):
pdf = st.beta.pdf(x, a, b)
plt.plot(x, pdf, label=r'$\alpha$ = {}, $\beta$ = {}'.format(a, b))
plt.xlabel('x', fontsize=12)
plt.ylabel('f(x)', fontsize=12)
plt.ylim(0, 4.5)
plt.legend(loc=9)
plt.show()
======== ==============================================================
Support :math:`x \in (0, 1)`
Mean :math:`\dfrac{\alpha}{\alpha + \beta}`
Variance :math:`\dfrac{\alpha \beta}{(\alpha+\beta)^2(\alpha+\beta+1)}`
======== ==============================================================
Beta distribution can be parameterized either in terms of alpha and
beta or mean and standard deviation. The link between the two
parametrizations is given by
.. math::
\alpha &= \mu \kappa \\
\beta &= (1 - \mu) \kappa
\text{where } \kappa = \frac{\mu(1-\mu)}{\sigma^2} - 1
Parameters
----------
alpha: float
alpha > 0.
beta: float
beta > 0.
mu: float
Alternative mean (0 < mu < 1).
sigma: float
Alternative standard deviation (0 < sigma < sqrt(mu * (1 - mu))).
Notes
-----
Beta distribution is a conjugate prior for the parameter :math:`p` of
the binomial distribution.
"""
def __init__(self, alpha=None, beta=None, mu=None, sigma=None, sd=None, *args, **kwargs):
super().__init__(*args, **kwargs)
if sd is not None:
sigma = sd
alpha, beta = self.get_alpha_beta(alpha, beta, mu, sigma)
self.alpha = alpha = tt.as_tensor_variable(floatX(alpha))
self.beta = beta = tt.as_tensor_variable(floatX(beta))
self.mean = self.alpha / (self.alpha + self.beta)
self.variance = (
self.alpha * self.beta / ((self.alpha + self.beta) ** 2 * (self.alpha + self.beta + 1))
)
assert_negative_support(alpha, "alpha", "Beta")
assert_negative_support(beta, "beta", "Beta")
def get_alpha_beta(self, alpha=None, beta=None, mu=None, sigma=None):
if (alpha is not None) and (beta is not None):
pass
elif (mu is not None) and (sigma is not None):
kappa = mu * (1 - mu) / sigma ** 2 - 1
alpha = mu * kappa
beta = (1 - mu) * kappa
else:
raise ValueError(
"Incompatible parameterization. Either use alpha "
"and beta, or mu and sigma to specify distribution."
)
return alpha, beta
def random(self, point=None, size=None):
"""
Draw random values from Beta distribution.
Parameters
----------
point: dict, optional
Dict of variable values on which random values are to be
conditioned (uses default point if not specified).
size: int, optional
Desired size of random sample (returns one sample if not
specified).
Returns
-------
array
"""
alpha, beta = draw_values([self.alpha, self.beta], point=point, size=size)
return generate_samples(clipped_beta_rvs, alpha, beta, dist_shape=self.shape, size=size)
def logp(self, value):
"""
Calculate log-probability of Beta distribution at specified value.
Parameters
----------
value: numeric
Value(s) for which log-probability is calculated. If the log probabilities for multiple
values are desired the values must be provided in a numpy array or theano tensor
Returns
-------
TensorVariable
"""
alpha = self.alpha
beta = self.beta
logval = tt.log(value)
log1pval = tt.log1p(-value)
logp = (
tt.switch(tt.eq(alpha, 1), 0, (alpha - 1) * logval)
+ tt.switch(tt.eq(beta, 1), 0, (beta - 1) * log1pval)
- betaln(alpha, beta)
)
return bound(logp, value >= 0, value <= 1, alpha > 0, beta > 0)
def logcdf(self, value):
"""
Compute the log of the cumulative distribution function for Beta distribution
at the specified value.
Parameters
----------
value: numeric
Value(s) for which log CDF is calculated.
Returns
-------
TensorVariable
"""
# incomplete_beta function can only handle scalar values (see #4342)
if np.ndim(value):
raise TypeError(
f"Beta.logcdf expects a scalar value but received a {np.ndim(value)}-dimensional object."
)
a = self.alpha
b = self.beta
return bound(
tt.switch(
tt.lt(value, 1),
tt.log(incomplete_beta(a, b, value)),
0,
),
0 <= value,
0 < a,
0 < b,
)
def _distr_parameters_for_repr(self):
return ["alpha", "beta"]
class Kumaraswamy(UnitContinuous):
r"""
Kumaraswamy log-likelihood.
The pdf of this distribution is
.. math::
f(x \mid a, b) =
abx^{a-1}(1-x^a)^{b-1}
.. plot::
import matplotlib.pyplot as plt
import numpy as np
plt.style.use('seaborn-darkgrid')
x = np.linspace(0, 1, 200)
a_s = [.5, 5., 1., 2., 2.]
b_s = [.5, 1., 3., 2., 5.]
for a, b in zip(a_s, b_s):
pdf = a * b * x ** (a - 1) * (1 - x ** a) ** (b - 1)
plt.plot(x, pdf, label=r'$a$ = {}, $b$ = {}'.format(a, b))
plt.xlabel('x', fontsize=12)
plt.ylabel('f(x)', fontsize=12)
plt.ylim(0, 3.)
plt.legend(loc=9)
plt.show()
======== ==============================================================
Support :math:`x \in (0, 1)`
Mean :math:`b B(1 + \tfrac{1}{a}, b)`
Variance :math:`b B(1 + \tfrac{2}{a}, b) - (b B(1 + \tfrac{1}{a}, b))^2`
======== ==============================================================
Parameters
----------
a: float
a > 0.
b: float
b > 0.
"""
def __init__(self, a, b, *args, **kwargs):
super().__init__(*args, **kwargs)
self.a = a = tt.as_tensor_variable(floatX(a))
self.b = b = tt.as_tensor_variable(floatX(b))
ln_mean = tt.log(b) + tt.gammaln(1 + 1 / a) + tt.gammaln(b) - tt.gammaln(1 + 1 / a + b)
self.mean = tt.exp(ln_mean)
ln_2nd_raw_moment = (
tt.log(b) + tt.gammaln(1 + 2 / a) + tt.gammaln(b) - tt.gammaln(1 + 2 / a + b)
)
self.variance = tt.exp(ln_2nd_raw_moment) - self.mean ** 2
assert_negative_support(a, "a", "Kumaraswamy")
assert_negative_support(b, "b", "Kumaraswamy")
def _random(self, a, b, size=None):
u = np.random.uniform(size=size)
return (1 - (1 - u) ** (1 / b)) ** (1 / a)
def random(self, point=None, size=None):
"""
Draw random values from Kumaraswamy distribution.
Parameters
----------
point: dict, optional
Dict of variable values on which random values are to be
conditioned (uses default point if not specified).
size: int, optional
Desired size of random sample (returns one sample if not
specified).
Returns
-------
array
"""
a, b = draw_values([self.a, self.b], point=point, size=size)
return generate_samples(self._random, a, b, dist_shape=self.shape, size=size)
def logp(self, value):
"""
Calculate log-probability of Kumaraswamy distribution at specified value.
Parameters
----------
value: numeric
Value(s) for which log-probability is calculated. If the log probabilities for multiple
values are desired the values must be provided in a numpy array or theano tensor
Returns
-------
TensorVariable
"""
a = self.a
b = self.b
logp = tt.log(a) + tt.log(b) + (a - 1) * tt.log(value) + (b - 1) * tt.log(1 - value ** a)
return bound(logp, value >= 0, value <= 1, a > 0, b > 0)
class Exponential(PositiveContinuous):
r"""
Exponential log-likelihood.
The pdf of this distribution is
.. math::
f(x \mid \lambda) = \lambda \exp\left\{ -\lambda x \right\}
.. plot::
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as st
plt.style.use('seaborn-darkgrid')
x = np.linspace(0, 3, 100)
for lam in [0.5, 1., 2.]:
pdf = st.expon.pdf(x, scale=1.0/lam)
plt.plot(x, pdf, label=r'$\lambda$ = {}'.format(lam))
plt.xlabel('x', fontsize=12)
plt.ylabel('f(x)', fontsize=12)
plt.legend(loc=1)
plt.show()
======== ============================
Support :math:`x \in [0, \infty)`
Mean :math:`\dfrac{1}{\lambda}`
Variance :math:`\dfrac{1}{\lambda^2}`
======== ============================
Parameters
----------
lam: float
Rate or inverse scale (lam > 0)
"""
def __init__(self, lam, *args, **kwargs):
super().__init__(*args, **kwargs)
self.lam = lam = tt.as_tensor_variable(floatX(lam))
self.mean = 1.0 / self.lam
self.median = self.mean * tt.log(2)
self.mode = tt.zeros_like(self.lam)
self.variance = self.lam ** -2
assert_negative_support(lam, "lam", "Exponential")
def random(self, point=None, size=None):
"""
Draw random values from Exponential distribution.
Parameters
----------
point: dict, optional
Dict of variable values on which random values are to be
conditioned (uses default point if not specified).
size: int, optional
Desired size of random sample (returns one sample if not
specified).
Returns
-------
array
"""
lam = draw_values([self.lam], point=point, size=size)[0]
return generate_samples(
np.random.exponential, scale=1.0 / lam, dist_shape=self.shape, size=size
)
def logp(self, value):
"""
Calculate log-probability of Exponential distribution at specified value.
Parameters
----------
value: numeric
Value(s) for which log-probability is calculated. If the log probabilities for multiple
values are desired the values must be provided in a numpy array or theano tensor
Returns
-------
TensorVariable
"""
lam = self.lam
return bound(tt.log(lam) - lam * value, value >= 0, lam > 0)
def logcdf(self, value):
r"""
Compute the log of cumulative distribution function for the Exponential distribution
at the specified value.
Parameters
----------
value: numeric or np.ndarray or theano.tensor
Value(s) for which log CDF is calculated. If the log CDF for multiple
values are desired the values must be provided in a numpy array or theano tensor.
Returns
-------
TensorVariable
"""
value = floatX(tt.as_tensor(value))
lam = self.lam
a = lam * value
return bound(
log1mexp(a),
0 <= value,
0 <= lam,
)
class Laplace(Continuous):
r"""
Laplace log-likelihood.
The pdf of this distribution is
.. math::
f(x \mid \mu, b) =
\frac{1}{2b} \exp \left\{ - \frac{|x - \mu|}{b} \right\}
.. plot::
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as st
plt.style.use('seaborn-darkgrid')
x = np.linspace(-10, 10, 1000)
mus = [0., 0., 0., -5.]
bs = [1., 2., 4., 4.]
for mu, b in zip(mus, bs):
pdf = st.laplace.pdf(x, loc=mu, scale=b)
plt.plot(x, pdf, label=r'$\mu$ = {}, $b$ = {}'.format(mu, b))
plt.xlabel('x', fontsize=12)
plt.ylabel('f(x)', fontsize=12)
plt.legend(loc=1)
plt.show()
======== ========================
Support :math:`x \in \mathbb{R}`
Mean :math:`\mu`
Variance :math:`2 b^2`
======== ========================
Parameters
----------
mu: float
Location parameter.
b: float
Scale parameter (b > 0).
"""
def __init__(self, mu, b, *args, **kwargs):
super().__init__(*args, **kwargs)
self.b = b = tt.as_tensor_variable(floatX(b))
self.mean = self.median = self.mode = self.mu = mu = tt.as_tensor_variable(floatX(mu))
self.variance = 2 * self.b ** 2
assert_negative_support(b, "b", "Laplace")
def random(self, point=None, size=None):
"""
Draw random values from Laplace distribution.
Parameters
----------
point: dict, optional
Dict of variable values on which random values are to be
conditioned (uses default point if not specified).
size: int, optional
Desired size of random sample (returns one sample if not
specified).
Returns
-------
array
"""
mu, b = draw_values([self.mu, self.b], point=point, size=size)
return generate_samples(np.random.laplace, mu, b, dist_shape=self.shape, size=size)
def logp(self, value):
"""
Calculate log-probability of Laplace distribution at specified value.
Parameters
----------
value: numeric
Value(s) for which log-probability is calculated. If the log probabilities for multiple
values are desired the values must be provided in a numpy array or theano tensor
Returns
-------
TensorVariable
"""
mu = self.mu
b = self.b
return -tt.log(2 * b) - abs(value - mu) / b
def logcdf(self, value):
"""
Compute the log of the cumulative distribution function for Laplace distribution
at the specified value.
Parameters
----------
value: numeric or np.ndarray or theano.tensor
Value(s) for which log CDF is calculated. If the log CDF for multiple
values are desired the values must be provided in a numpy array or theano tensor.
Returns
-------
TensorVariable
"""
a = self.mu
b = self.b
y = (value - a) / b
return bound(
tt.switch(
tt.le(value, a),
tt.log(0.5) + y,
tt.switch(
tt.gt(y, 1),
tt.log1p(-0.5 * tt.exp(-y)),
tt.log(1 - 0.5 * tt.exp(-y)),
),
),
0 < b,
)
class AsymmetricLaplace(Continuous):
r"""
Asymmetric-Laplace log-likelihood.
The pdf of this distribution is
.. math::
{f(x|\\b,\kappa,\mu) =
\left({\frac{\\b}{\kappa + 1/\kappa}}\right)\,e^{-(x-\mu)\\b\,s\kappa ^{s}}}
where
.. math::
s = sgn(x-\mu)
======== ========================
Support :math:`x \in \mathbb{R}`
Mean :math:`\mu-\frac{\\\kappa-1/\kappa}b`
Variance :math:`\frac{1+\kappa^{4}}{b^2\kappa^2 }`
======== ========================
Parameters
----------
b: float
Scale parameter (b > 0)
kappa: float
Symmetry parameter (kappa > 0)
mu: float
Location parameter
See Also:
--------
`Reference <https://en.wikipedia.org/wiki/Asymmetric_Laplace_distribution>`_
"""
def __init__(self, b, kappa, mu=0, *args, **kwargs):
self.b = tt.as_tensor_variable(floatX(b))
self.kappa = tt.as_tensor_variable(floatX(kappa))
self.mu = mu = tt.as_tensor_variable(floatX(mu))
self.mean = self.mu - (self.kappa - 1 / self.kappa) / b
self.variance = (1 + self.kappa ** 4) / (self.kappa ** 2 * self.b ** 2)
assert_negative_support(kappa, "kappa", "AsymmetricLaplace")
assert_negative_support(b, "b", "AsymmetricLaplace")
super().__init__(*args, **kwargs)
def _random(self, b, kappa, mu, size=None):
u = np.random.uniform(size=size)
switch = kappa ** 2 / (1 + kappa ** 2)
non_positive_x = mu + kappa * np.log(u * (1 / switch)) / b
positive_x = mu - np.log((1 - u) * (1 + kappa ** 2)) / (kappa * b)
draws = non_positive_x * (u <= switch) + positive_x * (u > switch)
return draws
def random(self, point=None, size=None):
"""
Draw random samples from this distribution, using the inverse CDF method.
Parameters
----------
point: dict, optional
Dict of variable values on which random values are to be
conditioned (uses default point if not specified).
size:int, optional
Desired size of random sample (returns one sample if not
specified).
Returns
-------
array
"""
b, kappa, mu = draw_values([self.b, self.kappa, self.mu], point=point, size=size)
return generate_samples(self._random, b, kappa, mu, dist_shape=self.shape, size=size)
def logp(self, value):
"""
Calculate log-probability of Asymmetric-Laplace distribution at specified value.
Parameters
----------
value: numeric
Value(s) for which log-probability is calculated. If the log probabilities for multiple
values are desired the values must be provided in a numpy array or theano tensor
Returns
-------
TensorVariable
"""
value = value - self.mu
return bound(
tt.log(self.b / (self.kappa + (self.kappa ** -1)))
+ (-value * self.b * tt.sgn(value) * (self.kappa ** tt.sgn(value))),
0 < self.b,
0 < self.kappa,
)
class Lognormal(PositiveContinuous):
r"""
Log-normal log-likelihood.
Distribution of any random variable whose logarithm is normally
distributed. A variable might be modeled as log-normal if it can
be thought of as the multiplicative product of many small
independent factors.
The pdf of this distribution is
.. math::
f(x \mid \mu, \tau) =
\frac{1}{x} \sqrt{\frac{\tau}{2\pi}}
\exp\left\{ -\frac{\tau}{2} (\ln(x)-\mu)^2 \right\}
.. plot::
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as st
plt.style.use('seaborn-darkgrid')
x = np.linspace(0, 3, 100)
mus = [0., 0., 0.]
sigmas = [.25, .5, 1.]
for mu, sigma in zip(mus, sigmas):
pdf = st.lognorm.pdf(x, sigma, scale=np.exp(mu))
plt.plot(x, pdf, label=r'$\mu$ = {}, $\sigma$ = {}'.format(mu, sigma))
plt.xlabel('x', fontsize=12)
plt.ylabel('f(x)', fontsize=12)
plt.legend(loc=1)
plt.show()
======== =========================================================================
Support :math:`x \in [0, \infty)`
Mean :math:`\exp\{\mu + \frac{1}{2\tau}\}`
Variance :math:`(\exp\{\frac{1}{\tau}\} - 1) \times \exp\{2\mu + \frac{1}{\tau}\}`
======== =========================================================================
Parameters
----------
mu: float
Location parameter.
sigma: float
Standard deviation. (sigma > 0). (only required if tau is not specified).
tau: float
Scale parameter (tau > 0). (only required if sigma is not specified).
Examples
--------
.. code-block:: python
# Example to show that we pass in only ``sigma`` or ``tau`` but not both.
with pm.Model():
x = pm.Lognormal('x', mu=2, sigma=30)
with pm.Model():
x = pm.Lognormal('x', mu=2, tau=1/100)
"""
def __init__(self, mu=0, sigma=None, tau=None, sd=None, *args, **kwargs):
super().__init__(*args, **kwargs)
if sd is not None:
sigma = sd
tau, sigma = get_tau_sigma(tau=tau, sigma=sigma)
self.mu = mu = tt.as_tensor_variable(floatX(mu))
self.tau = tau = tt.as_tensor_variable(tau)
self.sigma = self.sd = sigma = tt.as_tensor_variable(sigma)
self.mean = tt.exp(self.mu + 1.0 / (2 * self.tau))
self.median = tt.exp(self.mu)
self.mode = tt.exp(self.mu - 1.0 / self.tau)
self.variance = (tt.exp(1.0 / self.tau) - 1) * tt.exp(2 * self.mu + 1.0 / self.tau)
assert_negative_support(tau, "tau", "Lognormal")
assert_negative_support(sigma, "sigma", "Lognormal")
def _random(self, mu, tau, size=None):
samples = np.random.normal(size=size)
return np.exp(mu + (tau ** -0.5) * samples)
def random(self, point=None, size=None):
"""
Draw random values from Lognormal distribution.
Parameters
----------
point: dict, optional
Dict of variable values on which random values are to be
conditioned (uses default point if not specified).
size: int, optional
Desired size of random sample (returns one sample if not
specified).
Returns
-------
array
"""
mu, tau = draw_values([self.mu, self.tau], point=point, size=size)
return generate_samples(self._random, mu, tau, dist_shape=self.shape, size=size)
def logp(self, value):
"""
Calculate log-probability of Lognormal distribution at specified value.
Parameters
----------
value: numeric
Value(s) for which log-probability is calculated. If the log probabilities for multiple
values are desired the values must be provided in a numpy array or theano tensor
Returns
-------
TensorVariable
"""
mu = self.mu
tau = self.tau
return bound(
-0.5 * tau * (tt.log(value) - mu) ** 2
+ 0.5 * tt.log(tau / (2.0 * np.pi))
- tt.log(value),
tau > 0,
)
def _distr_parameters_for_repr(self):
return ["mu", "tau"]
def logcdf(self, value):
"""
Compute the log of the cumulative distribution function for Lognormal distribution
at the specified value.
Parameters
----------
value: numeric or np.ndarray or theano.tensor
Value(s) for which log CDF is calculated. If the log CDF for multiple
values are desired the values must be provided in a numpy array or theano tensor.
Returns
-------
TensorVariable
"""
mu = self.mu
sigma = self.sigma
tau = self.tau
return bound(
normal_lcdf(mu, sigma, tt.log(value)),
0 < value,
0 < tau,
)
class StudentT(Continuous):
r"""
Student's T log-likelihood.
Describes a normal variable whose precision is gamma distributed.
If only nu parameter is passed, this specifies a standard (central)
Student's T.
The pdf of this distribution is
.. math::
f(x|\mu,\lambda,\nu) =
\frac{\Gamma(\frac{\nu + 1}{2})}{\Gamma(\frac{\nu}{2})}
\left(\frac{\lambda}{\pi\nu}\right)^{\frac{1}{2}}
\left[1+\frac{\lambda(x-\mu)^2}{\nu}\right]^{-\frac{\nu+1}{2}}
.. plot::
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as st
plt.style.use('seaborn-darkgrid')
x = np.linspace(-8, 8, 200)
mus = [0., 0., -2., -2.]
sigmas = [1., 1., 1., 2.]
dfs = [1., 5., 5., 5.]
for mu, sigma, df in zip(mus, sigmas, dfs):
pdf = st.t.pdf(x, df, loc=mu, scale=sigma)
plt.plot(x, pdf, label=r'$\mu$ = {}, $\sigma$ = {}, $\nu$ = {}'.format(mu, sigma, df))
plt.xlabel('x', fontsize=12)
plt.ylabel('f(x)', fontsize=12)
plt.legend(loc=1)
plt.show()
======== ========================
Support :math:`x \in \mathbb{R}`
======== ========================
Parameters
----------
nu: float
Degrees of freedom, also known as normality parameter (nu > 0).
mu: float
Location parameter.
sigma: float
Scale parameter (sigma > 0). Converges to the standard deviation as nu
increases. (only required if lam is not specified)
lam: float
Scale parameter (lam > 0). Converges to the precision as nu
increases. (only required if sigma is not specified)
Examples
--------
.. code-block:: python
with pm.Model():
x = pm.StudentT('x', nu=15, mu=0, sigma=10)
with pm.Model():
x = pm.StudentT('x', nu=15, mu=0, lam=1/23)
"""
def __init__(self, nu, mu=0, lam=None, sigma=None, sd=None, *args, **kwargs):
super().__init__(*args, **kwargs)
if sd is not None:
sigma = sd
self.nu = nu = tt.as_tensor_variable(floatX(nu))
lam, sigma = get_tau_sigma(tau=lam, sigma=sigma)
self.lam = lam = tt.as_tensor_variable(lam)
self.sigma = self.sd = sigma = tt.as_tensor_variable(sigma)
self.mean = self.median = self.mode = self.mu = mu = tt.as_tensor_variable(mu)
self.variance = tt.switch((nu > 2) * 1, (1 / self.lam) * (nu / (nu - 2)), np.inf)
assert_negative_support(lam, "lam (sigma)", "StudentT")
assert_negative_support(nu, "nu", "StudentT")
def random(self, point=None, size=None):
"""
Draw random values from StudentT distribution.
Parameters
----------
point: dict, optional
Dict of variable values on which random values are to be
conditioned (uses default point if not specified).
size: int, optional
Desired size of random sample (returns one sample if not
specified).
Returns
-------
array
"""
nu, mu, lam = draw_values([self.nu, self.mu, self.lam], point=point, size=size)
return generate_samples(
stats.t.rvs, nu, loc=mu, scale=lam ** -0.5, dist_shape=self.shape, size=size
)
def logp(self, value):
"""
Calculate log-probability of StudentT distribution at specified value.
Parameters
----------
value: numeric
Value(s) for which log-probability is calculated. If the log probabilities for multiple
values are desired the values must be provided in a numpy array or theano tensor
Returns
-------
TensorVariable
"""
nu = self.nu
mu = self.mu
lam = self.lam
sigma = self.sigma
return bound(
gammaln((nu + 1.0) / 2.0)
+ 0.5 * tt.log(lam / (nu * np.pi))
- gammaln(nu / 2.0)
- (nu + 1.0) / 2.0 * tt.log1p(lam * (value - mu) ** 2 / nu),
lam > 0,
nu > 0,
sigma > 0,
)
def _distr_parameters_for_repr(self):
return ["nu", "mu", "lam"]
def logcdf(self, value):
"""
Compute the log of the cumulative distribution function for Student's T distribution
at the specified value.
Parameters
----------
value: numeric
Value(s) for which log CDF is calculated.
Returns
-------
TensorVariable
"""
# incomplete_beta function can only handle scalar values (see #4342)
if np.ndim(value):
raise TypeError(
f"StudentT.logcdf expects a scalar value but received a {np.ndim(value)}-dimensional object."
)
nu = self.nu
mu = self.mu
sigma = self.sigma
lam = self.lam
t = (value - mu) / sigma
sqrt_t2_nu = tt.sqrt(t ** 2 + nu)
x = (t + sqrt_t2_nu) / (2.0 * sqrt_t2_nu)
return bound(
tt.log(incomplete_beta(nu / 2.0, nu / 2.0, x)),
0 < nu,
0 < sigma,
0 < lam,
)
class Pareto(Continuous):
r"""
Pareto log-likelihood.
Often used to characterize wealth distribution, or other examples of the
80/20 rule.
The pdf of this distribution is
.. math::
f(x \mid \alpha, m) = \frac{\alpha m^{\alpha}}{x^{\alpha+1}}
.. plot::
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as st
plt.style.use('seaborn-darkgrid')
x = np.linspace(0, 4, 1000)
alphas = [1., 2., 5., 5.]
ms = [1., 1., 1., 2.]
for alpha, m in zip(alphas, ms):
pdf = st.pareto.pdf(x, alpha, scale=m)
plt.plot(x, pdf, label=r'$\alpha$ = {}, m = {}'.format(alpha, m))
plt.xlabel('x', fontsize=12)
plt.ylabel('f(x)', fontsize=12)
plt.legend(loc=1)
plt.show()
======== =============================================================
Support :math:`x \in [m, \infty)`
Mean :math:`\dfrac{\alpha m}{\alpha - 1}` for :math:`\alpha \ge 1`
Variance :math:`\dfrac{m \alpha}{(\alpha - 1)^2 (\alpha - 2)}`
for :math:`\alpha > 2`
======== =============================================================
Parameters
----------
alpha: float
Shape parameter (alpha > 0).
m: float
Scale parameter (m > 0).
"""
def __init__(self, alpha, m, transform="lowerbound", *args, **kwargs):
self.alpha = alpha = tt.as_tensor_variable(floatX(alpha))
self.m = m = tt.as_tensor_variable(floatX(m))
self.mean = tt.switch(tt.gt(alpha, 1), alpha * m / (alpha - 1.0), np.inf)
self.median = m * 2.0 ** (1.0 / alpha)
self.variance = tt.switch(
tt.gt(alpha, 2), (alpha * m ** 2) / ((alpha - 2.0) * (alpha - 1.0) ** 2), np.inf
)
assert_negative_support(alpha, "alpha", "Pareto")
assert_negative_support(m, "m", "Pareto")
if transform == "lowerbound":
transform = transforms.lowerbound(self.m)
super().__init__(transform=transform, *args, **kwargs)
def _random(self, alpha, m, size=None):
u = np.random.uniform(size=size)
return m * (1.0 - u) ** (-1.0 / alpha)
def random(self, point=None, size=None):
"""
Draw random values from Pareto distribution.
Parameters
----------
point: dict, optional
Dict of variable values on which random values are to be
conditioned (uses default point if not specified).
size: int, optional
Desired size of random sample (returns one sample if not
specified).
Returns
-------
array
"""
alpha, m = draw_values([self.alpha, self.m], point=point, size=size)
return generate_samples(self._random, alpha, m, dist_shape=self.shape, size=size)
def logp(self, value):
"""
Calculate log-probability of Pareto distribution at specified value.
Parameters
----------
value: numeric
Value(s) for which log-probability is calculated. If the log probabilities for multiple
values are desired the values must be provided in a numpy array or theano tensor
Returns
-------
TensorVariable
"""
alpha = self.alpha
m = self.m
return bound(
tt.log(alpha) + logpow(m, alpha) - logpow(value, alpha + 1),
value >= m,
alpha > 0,
m > 0,
)
def _distr_parameters_for_repr(self):
return ["alpha", "m"]
def logcdf(self, value):
"""
Compute the log of the cumulative distribution function for Pareto distribution
at the specified value.
Parameters
----------
value: numeric or np.ndarray or theano.tensor
Value(s) for which log CDF is calculated. If the log CDF for multiple
values are desired the values must be provided in a numpy array or theano tensor.
Returns
-------
TensorVariable
"""
m = self.m
alpha = self.alpha
arg = (m / value) ** alpha
return bound(
tt.switch(
tt.le(arg, 1e-5),
tt.log1p(-arg),
tt.log(1 - arg),
),
m <= value,
0 < alpha,
0 < m,
)
class Cauchy(Continuous):
r"""
Cauchy log-likelihood.
Also known as the Lorentz or the Breit-Wigner distribution.
The pdf of this distribution is
.. math::
f(x \mid \alpha, \beta) =
\frac{1}{\pi \beta [1 + (\frac{x-\alpha}{\beta})^2]}
.. plot::
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as st
plt.style.use('seaborn-darkgrid')
x = np.linspace(-5, 5, 500)
alphas = [0., 0., 0., -2.]
betas = [.5, 1., 2., 1.]
for a, b in zip(alphas, betas):
pdf = st.cauchy.pdf(x, loc=a, scale=b)
plt.plot(x, pdf, label=r'$\alpha$ = {}, $\beta$ = {}'.format(a, b))
plt.xlabel('x', fontsize=12)
plt.ylabel('f(x)', fontsize=12)
plt.legend(loc=1)
plt.show()
======== ========================
Support :math:`x \in \mathbb{R}`
Mode :math:`\alpha`
Mean undefined
Variance undefined
======== ========================
Parameters
----------
alpha: float
Location parameter
beta: float
Scale parameter > 0
"""
def __init__(self, alpha, beta, *args, **kwargs):
super().__init__(*args, **kwargs)
self.median = self.mode = self.alpha = tt.as_tensor_variable(floatX(alpha))
self.beta = tt.as_tensor_variable(floatX(beta))
assert_negative_support(beta, "beta", "Cauchy")
def _random(self, alpha, beta, size=None):
u = np.random.uniform(size=size)
return alpha + beta * np.tan(np.pi * (u - 0.5))
def random(self, point=None, size=None):
"""
Draw random values from Cauchy distribution.
Parameters
----------
point: dict, optional
Dict of variable values on which random values are to be
conditioned (uses default point if not specified).
size: int, optional
Desired size of random sample (returns one sample if not
specified).
Returns
-------
array
"""
alpha, beta = draw_values([self.alpha, self.beta], point=point, size=size)
return generate_samples(self._random, alpha, beta, dist_shape=self.shape, size=size)
def logp(self, value):
"""
Calculate log-probability of Cauchy distribution at specified value.
Parameters
----------
value: numeric
Value(s) for which log-probability is calculated. If the log probabilities for multiple
values are desired the values must be provided in a numpy array or theano tensor
Returns
-------
TensorVariable
"""
alpha = self.alpha
beta = self.beta
return bound(
-tt.log(np.pi) - tt.log(beta) - tt.log1p(((value - alpha) / beta) ** 2), beta > 0
)
def logcdf(self, value):
"""
Compute the log of the cumulative distribution function for Cauchy distribution
at the specified value.
Parameters
----------
value: numeric or np.ndarray or theano.tensor
Value(s) for which log CDF is calculated. If the log CDF for multiple
values are desired the values must be provided in a numpy array or theano tensor.
Returns
-------
TensorVariable
"""
alpha = self.alpha
beta = self.beta
return bound(
tt.log(0.5 + tt.arctan((value - alpha) / beta) / np.pi),
0 < beta,
)
class HalfCauchy(PositiveContinuous):
r"""
Half-Cauchy log-likelihood.
The pdf of this distribution is
.. math::
f(x \mid \beta) = \frac{2}{\pi \beta [1 + (\frac{x}{\beta})^2]}
.. plot::
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as st
plt.style.use('seaborn-darkgrid')
x = np.linspace(0, 5, 200)
for b in [0.5, 1.0, 2.0]:
pdf = st.cauchy.pdf(x, scale=b)
plt.plot(x, pdf, label=r'$\beta$ = {}'.format(b))
plt.xlabel('x', fontsize=12)
plt.ylabel('f(x)', fontsize=12)
plt.legend(loc=1)
plt.show()
======== ========================
Support :math:`x \in [0, \infty)`
Mode 0
Mean undefined
Variance undefined
======== ========================
Parameters
----------
beta: float
Scale parameter (beta > 0).
"""
def __init__(self, beta, *args, **kwargs):
super().__init__(*args, **kwargs)
self.mode = tt.as_tensor_variable(0)
self.median = self.beta = tt.as_tensor_variable(floatX(beta))
assert_negative_support(beta, "beta", "HalfCauchy")
def _random(self, beta, size=None):
u = np.random.uniform(size=size)
return beta * np.abs(np.tan(np.pi * (u - 0.5)))
def random(self, point=None, size=None):
"""
Draw random values from HalfCauchy distribution.
Parameters
----------
point: dict, optional
Dict of variable values on which random values are to be
conditioned (uses default point if not specified).
size: int, optional
Desired size of random sample (returns one sample if not
specified).
Returns
-------
array
"""
beta = draw_values([self.beta], point=point, size=size)[0]
return generate_samples(self._random, beta, dist_shape=self.shape, size=size)
def logp(self, value):
"""
Calculate log-probability of HalfCauchy distribution at specified value.
Parameters
----------
value: numeric
Value(s) for which log-probability is calculated. If the log probabilities for multiple
values are desired the values must be provided in a numpy array or theano tensor
Returns
-------
TensorVariable
"""
beta = self.beta
return bound(
tt.log(2) - tt.log(np.pi) - tt.log(beta) - tt.log1p((value / beta) ** 2),
value >= 0,
beta > 0,
)
def logcdf(self, value):
"""
Compute the log of the cumulative distribution function for HalfCauchy distribution
at the specified value.
Parameters
----------
value: numeric or np.ndarray or theano.tensor
Value(s) for which log CDF is calculated. If the log CDF for multiple
values are desired the values must be provided in a numpy array or theano tensor.
Returns
-------
TensorVariable
"""
beta = self.beta
return bound(
tt.log(2 * tt.arctan(value / beta) / np.pi),
0 <= value,
0 < beta,
)
class Gamma(PositiveContinuous):
r"""
Gamma log-likelihood.
Represents the sum of alpha exponentially distributed random variables,
each of which has mean beta.
The pdf of this distribution is
.. math::
f(x \mid \alpha, \beta) =
\frac{\beta^{\alpha}x^{\alpha-1}e^{-\beta x}}{\Gamma(\alpha)}
.. plot::
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as st
plt.style.use('seaborn-darkgrid')
x = np.linspace(0, 20, 200)
alphas = [1., 2., 3., 7.5]
betas = [.5, .5, 1., 1.]
for a, b in zip(alphas, betas):
pdf = st.gamma.pdf(x, a, scale=1.0/b)
plt.plot(x, pdf, label=r'$\alpha$ = {}, $\beta$ = {}'.format(a, b))
plt.xlabel('x', fontsize=12)
plt.ylabel('f(x)', fontsize=12)
plt.legend(loc=1)
plt.show()
======== ===============================
Support :math:`x \in (0, \infty)`
Mean :math:`\dfrac{\alpha}{\beta}`
Variance :math:`\dfrac{\alpha}{\beta^2}`
======== ===============================
Gamma distribution can be parameterized either in terms of alpha and
beta or mean and standard deviation. The link between the two
parametrizations is given by
.. math::
\alpha &= \frac{\mu^2}{\sigma^2} \\
\beta &= \frac{\mu}{\sigma^2}
Parameters
----------
alpha: float
Shape parameter (alpha > 0).
beta: float
Rate parameter (beta > 0).
mu: float
Alternative shape parameter (mu > 0).
sigma: float
Alternative scale parameter (sigma > 0).
"""
def __init__(self, alpha=None, beta=None, mu=None, sigma=None, sd=None, *args, **kwargs):
super().__init__(*args, **kwargs)
if sd is not None:
sigma = sd
alpha, beta = self.get_alpha_beta(alpha, beta, mu, sigma)
self.alpha = alpha = tt.as_tensor_variable(floatX(alpha))
self.beta = beta = tt.as_tensor_variable(floatX(beta))
self.mean = alpha / beta
self.mode = tt.maximum((alpha - 1) / beta, 0)
self.variance = alpha / beta ** 2
assert_negative_support(alpha, "alpha", "Gamma")
assert_negative_support(beta, "beta", "Gamma")
def get_alpha_beta(self, alpha=None, beta=None, mu=None, sigma=None):
if (alpha is not None) and (beta is not None):
pass
elif (mu is not None) and (sigma is not None):
alpha = mu ** 2 / sigma ** 2
beta = mu / sigma ** 2
else:
raise ValueError(
"Incompatible parameterization. Either use "
"alpha and beta, or mu and sigma to specify "
"distribution."
)
return alpha, beta
def random(self, point=None, size=None):
"""
Draw random values from Gamma distribution.
Parameters
----------
point: dict, optional
Dict of variable values on which random values are to be
conditioned (uses default point if not specified).
size: int, optional
Desired size of random sample (returns one sample if not
specified).
Returns
-------
array
"""
alpha, beta = draw_values([self.alpha, self.beta], point=point, size=size)
return generate_samples(
stats.gamma.rvs, alpha, scale=1.0 / beta, dist_shape=self.shape, size=size
)
def logp(self, value):
"""
Calculate log-probability of Gamma distribution at specified value.
Parameters
----------
value: numeric
Value(s) for which log-probability is calculated. If the log probabilities for multiple
values are desired the values must be provided in a numpy array or theano tensor
Returns
-------
TensorVariable
"""
alpha = self.alpha
beta = self.beta
return bound(
-gammaln(alpha) + logpow(beta, alpha) - beta * value + logpow(value, alpha - 1),
value >= 0,
alpha > 0,
beta > 0,
)
def logcdf(self, value):
"""
Compute the log of the cumulative distribution function for Gamma distribution
at the specified value.
Parameters
----------
value: numeric or np.ndarray or theano.tensor
Value(s) for which log CDF is calculated. If the log CDF for multiple
values are desired the values must be provided in a numpy array or theano tensor.
Returns
-------
TensorVariable
"""
alpha = self.alpha
beta = self.beta
# Avoid C-assertion when the gammainc function is called with invalid values (#4340)
safe_alpha = tt.switch(tt.lt(alpha, 0), 0, alpha)
safe_beta = tt.switch(tt.lt(beta, 0), 0, beta)
safe_value = tt.switch(tt.lt(value, 0), 0, value)
return bound(
tt.log(tt.gammainc(safe_alpha, safe_beta * safe_value)),
0 <= value,
0 < alpha,
0 < beta,
)
def _distr_parameters_for_repr(self):
return ["alpha", "beta"]
class InverseGamma(PositiveContinuous):
r"""
Inverse gamma log-likelihood, the reciprocal of the gamma distribution.
The pdf of this distribution is
.. math::
f(x \mid \alpha, \beta) =
\frac{\beta^{\alpha}}{\Gamma(\alpha)} x^{-\alpha - 1}
\exp\left(\frac{-\beta}{x}\right)
.. plot::
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as st
plt.style.use('seaborn-darkgrid')
x = np.linspace(0, 3, 500)
alphas = [1., 2., 3., 3.]
betas = [1., 1., 1., .5]
for a, b in zip(alphas, betas):
pdf = st.invgamma.pdf(x, a, scale=b)
plt.plot(x, pdf, label=r'$\alpha$ = {}, $\beta$ = {}'.format(a, b))
plt.xlabel('x', fontsize=12)
plt.ylabel('f(x)', fontsize=12)
plt.legend(loc=1)
plt.show()
======== ======================================================
Support :math:`x \in (0, \infty)`
Mean :math:`\dfrac{\beta}{\alpha-1}` for :math:`\alpha > 1`
Variance :math:`\dfrac{\beta^2}{(\alpha-1)^2(\alpha - 2)}`
for :math:`\alpha > 2`
======== ======================================================
Parameters
----------
alpha: float
Shape parameter (alpha > 0).
beta: float
Scale parameter (beta > 0).
mu: float
Alternative shape parameter (mu > 0).
sigma: float
Alternative scale parameter (sigma > 0).
"""
def __init__(self, alpha=None, beta=None, mu=None, sigma=None, sd=None, *args, **kwargs):
super().__init__(*args, defaults=("mode",), **kwargs)
if sd is not None:
sigma = sd
alpha, beta = InverseGamma._get_alpha_beta(alpha, beta, mu, sigma)
self.alpha = alpha = tt.as_tensor_variable(floatX(alpha))
self.beta = beta = tt.as_tensor_variable(floatX(beta))
self.mean = self._calculate_mean()
self.mode = beta / (alpha + 1.0)
self.variance = tt.switch(
tt.gt(alpha, 2), (beta ** 2) / ((alpha - 2) * (alpha - 1.0) ** 2), np.inf
)
assert_negative_support(alpha, "alpha", "InverseGamma")
assert_negative_support(beta, "beta", "InverseGamma")
def _calculate_mean(self):
m = self.beta / (self.alpha - 1.0)
try:
return (self.alpha > 1) * m or np.inf
except ValueError: # alpha is an array
m[self.alpha <= 1] = np.inf
return m
@staticmethod
def _get_alpha_beta(alpha, beta, mu, sigma):
if alpha is not None:
if beta is not None:
pass
else:
beta = 1
elif (mu is not None) and (sigma is not None):
alpha = (2 * sigma ** 2 + mu ** 2) / sigma ** 2
beta = mu * (mu ** 2 + sigma ** 2) / sigma ** 2
else:
raise ValueError(
"Incompatible parameterization. Either use "
"alpha and (optionally) beta, or mu and sigma to specify "
"distribution."
)
return alpha, beta
def random(self, point=None, size=None):
"""
Draw random values from InverseGamma distribution.
Parameters
----------
point: dict, optional
Dict of variable values on which random values are to be
conditioned (uses default point if not specified).
size: int, optional
Desired size of random sample (returns one sample if not
specified).
Returns
-------
array
"""
alpha, beta = draw_values([self.alpha, self.beta], point=point, size=size)
return generate_samples(
stats.invgamma.rvs, a=alpha, scale=beta, dist_shape=self.shape, size=size
)
def logp(self, value):
"""
Calculate log-probability of InverseGamma distribution at specified value.
Parameters
----------
value: numeric
Value(s) for which log-probability is calculated. If the log probabilities for multiple
values are desired the values must be provided in a numpy array or theano tensor
Returns
-------
TensorVariable
"""
alpha = self.alpha
beta = self.beta
return bound(
logpow(beta, alpha) - gammaln(alpha) - beta / value + logpow(value, -alpha - 1),
value > 0,
alpha > 0,
beta > 0,
)
def _distr_parameters_for_repr(self):
return ["alpha", "beta"]
def logcdf(self, value):
"""
Compute the log of the cumulative distribution function for Inverse Gamma distribution
at the specified value.
Parameters
----------
value: numeric or np.ndarray or theano.tensor
Value(s) for which log CDF is calculated. If the log CDF for multiple
values are desired the values must be provided in a numpy array or theano tensor.
Returns
-------
TensorVariable
"""
alpha = self.alpha
beta = self.beta
# Avoid C-assertion when the gammaincc function is called with invalid values (#4340)
safe_alpha = tt.switch(tt.lt(alpha, 0), 0, alpha)
safe_beta = tt.switch(tt.lt(beta, 0), 0, beta)
safe_value = tt.switch(tt.lt(value, 0), 0, value)
return bound(
tt.log(tt.gammaincc(safe_alpha, safe_beta / safe_value)),
0 <= value,
0 < alpha,
0 < beta,
)
class ChiSquared(Gamma):
r"""
:math:`\chi^2` log-likelihood.
The pdf of this distribution is
.. math::
f(x \mid \nu) = \frac{x^{(\nu-2)/2}e^{-x/2}}{2^{\nu/2}\Gamma(\nu/2)}
.. plot::
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as st
plt.style.use('seaborn-darkgrid')
x = np.linspace(0, 15, 200)
for df in [1, 2, 3, 6, 9]:
pdf = st.chi2.pdf(x, df)
plt.plot(x, pdf, label=r'$\nu$ = {}'.format(df))
plt.xlabel('x', fontsize=12)
plt.ylabel('f(x)', fontsize=12)
plt.ylim(0, 0.6)
plt.legend(loc=1)
plt.show()
======== ===============================
Support :math:`x \in [0, \infty)`
Mean :math:`\nu`
Variance :math:`2 \nu`
======== ===============================
Parameters
----------
nu: int
Degrees of freedom (nu > 0).
"""
def __init__(self, nu, *args, **kwargs):
self.nu = nu = tt.as_tensor_variable(floatX(nu))
super().__init__(alpha=nu / 2.0, beta=0.5, *args, **kwargs)
class Weibull(PositiveContinuous):
r"""
Weibull log-likelihood.
The pdf of this distribution is
.. math::
f(x \mid \alpha, \beta) =
\frac{\alpha x^{\alpha - 1}
\exp(-(\frac{x}{\beta})^{\alpha})}{\beta^\alpha}
.. plot::
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as st
plt.style.use('seaborn-darkgrid')
x = np.linspace(0, 3, 200)
alphas = [.5, 1., 1.5, 5., 5.]
betas = [1., 1., 1., 1., 2]
for a, b in zip(alphas, betas):
pdf = st.weibull_min.pdf(x, a, scale=b)
plt.plot(x, pdf, label=r'$\alpha$ = {}, $\beta$ = {}'.format(a, b))
plt.xlabel('x', fontsize=12)
plt.ylabel('f(x)', fontsize=12)
plt.ylim(0, 2.5)
plt.legend(loc=1)
plt.show()
======== ====================================================
Support :math:`x \in [0, \infty)`
Mean :math:`\beta \Gamma(1 + \frac{1}{\alpha})`
Variance :math:`\beta^2 \Gamma(1 + \frac{2}{\alpha} - \mu^2/\beta^2)`
======== ====================================================
Parameters
----------
alpha: float
Shape parameter (alpha > 0).
beta: float
Scale parameter (beta > 0).
"""
def __init__(self, alpha, beta, *args, **kwargs):
super().__init__(*args, **kwargs)
self.alpha = alpha = tt.as_tensor_variable(floatX(alpha))
self.beta = beta = tt.as_tensor_variable(floatX(beta))
self.mean = beta * tt.exp(gammaln(1 + 1.0 / alpha))
self.median = beta * tt.exp(gammaln(tt.log(2))) ** (1.0 / alpha)
self.variance = beta ** 2 * tt.exp(gammaln(1 + 2.0 / alpha)) - self.mean ** 2
self.mode = tt.switch(
alpha >= 1, beta * ((alpha - 1) / alpha) ** (1 / alpha), 0
) # Reference: https://en.wikipedia.org/wiki/Weibull_distribution
assert_negative_support(alpha, "alpha", "Weibull")
assert_negative_support(beta, "beta", "Weibull")
def random(self, point=None, size=None):
"""
Draw random values from Weibull distribution.
Parameters
----------
point: dict, optional
Dict of variable values on which random values are to be
conditioned (uses default point if not specified).
size: int, optional
Desired size of random sample (returns one sample if not
specified).
Returns
-------
array
"""
alpha, beta = draw_values([self.alpha, self.beta], point=point, size=size)
def _random(a, b, size=None):
return b * (-np.log(np.random.uniform(size=size))) ** (1 / a)
return generate_samples(_random, alpha, beta, dist_shape=self.shape, size=size)
def logp(self, value):
"""
Calculate log-probability of Weibull distribution at specified value.
Parameters
----------
value: numeric
Value(s) for which log-probability is calculated. If the log probabilities for multiple
values are desired the values must be provided in a numpy array or theano tensor
Returns
-------
TensorVariable
"""
alpha = self.alpha
beta = self.beta
return bound(
tt.log(alpha)
- tt.log(beta)
+ (alpha - 1) * tt.log(value / beta)
- (value / beta) ** alpha,
value >= 0,
alpha > 0,
beta > 0,
)
def logcdf(self, value):
r"""
Compute the log of the cumulative distribution function for Weibull distribution
at the specified value.
Parameters
----------
value: numeric or np.ndarray or theano.tensor
Value(s) for which log CDF is calculated. If the log CDF for multiple
values are desired the values must be provided in a numpy array or theano tensor.
Returns
-------
TensorVariable
"""
alpha = self.alpha
beta = self.beta
a = (value / beta) ** alpha
return bound(
log1mexp(a),
0 <= value,
0 < alpha,
0 < beta,
)
class HalfStudentT(PositiveContinuous):
r"""
Half Student's T log-likelihood
The pdf of this distribution is
.. math::
f(x \mid \sigma,\nu) =
\frac{2\;\Gamma\left(\frac{\nu+1}{2}\right)}
{\Gamma\left(\frac{\nu}{2}\right)\sqrt{\nu\pi\sigma^2}}
\left(1+\frac{1}{\nu}\frac{x^2}{\sigma^2}\right)^{-\frac{\nu+1}{2}}
.. plot::
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as st
plt.style.use('seaborn-darkgrid')
x = np.linspace(0, 5, 200)
sigmas = [1., 1., 2., 1.]
nus = [.5, 1., 1., 30.]
for sigma, nu in zip(sigmas, nus):
pdf = st.t.pdf(x, df=nu, loc=0, scale=sigma)
plt.plot(x, pdf, label=r'$\sigma$ = {}, $\nu$ = {}'.format(sigma, nu))
plt.xlabel('x', fontsize=12)
plt.ylabel('f(x)', fontsize=12)
plt.legend(loc=1)
plt.show()
======== ========================
Support :math:`x \in [0, \infty)`
======== ========================
Parameters
----------
nu: float
Degrees of freedom, also known as normality parameter (nu > 0).
sigma: float
Scale parameter (sigma > 0). Converges to the standard deviation as nu
increases. (only required if lam is not specified)
lam: float
Scale parameter (lam > 0). Converges to the precision as nu
increases. (only required if sigma is not specified)
Examples
--------
.. code-block:: python
# Only pass in one of lam or sigma, but not both.
with pm.Model():
x = pm.HalfStudentT('x', sigma=10, nu=10)
with pm.Model():
x = pm.HalfStudentT('x', lam=4, nu=10)
"""
def __init__(self, nu=1, sigma=None, lam=None, sd=None, *args, **kwargs):
super().__init__(*args, **kwargs)
if sd is not None:
sigma = sd
self.mode = tt.as_tensor_variable(0)
lam, sigma = get_tau_sigma(lam, sigma)
self.median = tt.as_tensor_variable(sigma)
self.sigma = self.sd = tt.as_tensor_variable(sigma)
self.lam = tt.as_tensor_variable(lam)
self.nu = nu = tt.as_tensor_variable(floatX(nu))
assert_negative_support(sigma, "sigma", "HalfStudentT")
assert_negative_support(lam, "lam", "HalfStudentT")
assert_negative_support(nu, "nu", "HalfStudentT")
def random(self, point=None, size=None):
"""
Draw random values from HalfStudentT distribution.
Parameters
----------
point: dict, optional
Dict of variable values on which random values are to be
conditioned (uses default point if not specified).
size: int, optional
Desired size of random sample (returns one sample if not
specified).
Returns
-------
array
"""
nu, sigma = draw_values([self.nu, self.sigma], point=point, size=size)
return np.abs(
generate_samples(stats.t.rvs, nu, loc=0, scale=sigma, dist_shape=self.shape, size=size)
)
def logp(self, value):
"""
Calculate log-probability of HalfStudentT distribution at specified value.
Parameters
----------
value: numeric
Value(s) for which log-probability is calculated. If the log probabilities for multiple
values are desired the values must be provided in a numpy array or theano tensor
Returns
-------
TensorVariable
"""
nu = self.nu
sigma = self.sigma
lam = self.lam
return bound(
tt.log(2)
+ gammaln((nu + 1.0) / 2.0)
- gammaln(nu / 2.0)
- 0.5 * tt.log(nu * np.pi * sigma ** 2)
- (nu + 1.0) / 2.0 * tt.log1p(value ** 2 / (nu * sigma ** 2)),
sigma > 0,
lam > 0,
nu > 0,
value >= 0,
)
def _distr_parameters_for_repr(self):
return ["nu", "lam"]
class ExGaussian(Continuous):
r"""
Exponentially modified Gaussian log-likelihood.
Results from the convolution of a normal distribution with an exponential
distribution.
The pdf of this distribution is
.. math::
f(x \mid \mu, \sigma, \tau) =
\frac{1}{\nu}\;
\exp\left\{\frac{\mu-x}{\nu}+\frac{\sigma^2}{2\nu^2}\right\}
\Phi\left(\frac{x-\mu}{\sigma}-\frac{\sigma}{\nu}\right)
where :math:`\Phi` is the cumulative distribution function of the
standard normal distribution.
.. plot::
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as st
plt.style.use('seaborn-darkgrid')
x = np.linspace(-6, 9, 200)
mus = [0., -2., 0., -3.]
sigmas = [1., 1., 3., 1.]
nus = [1., 1., 1., 4.]
for mu, sigma, nu in zip(mus, sigmas, nus):
pdf = st.exponnorm.pdf(x, nu/sigma, loc=mu, scale=sigma)
plt.plot(x, pdf, label=r'$\mu$ = {}, $\sigma$ = {}, $\nu$ = {}'.format(mu, sigma, nu))
plt.xlabel('x', fontsize=12)
plt.ylabel('f(x)', fontsize=12)
plt.legend(loc=1)
plt.show()
======== ========================
Support :math:`x \in \mathbb{R}`
Mean :math:`\mu + \nu`
Variance :math:`\sigma^2 + \nu^2`
======== ========================
Parameters
----------
mu: float
Mean of the normal distribution.
sigma: float
Standard deviation of the normal distribution (sigma > 0).
nu: float
Mean of the exponential distribution (nu > 0).
References
----------
.. [Rigby2005] Rigby R.A. and Stasinopoulos D.M. (2005).
"Generalized additive models for location, scale and shape"
Applied Statististics., 54, part 3, pp 507-554.
.. [Lacouture2008] Lacouture, Y. and Couseanou, D. (2008).
"How to use MATLAB to fit the ex-Gaussian and other probability
functions to a distribution of response times".
Tutorials in Quantitative Methods for Psychology,
Vol. 4, No. 1, pp 35-45.
"""
def __init__(self, mu=0.0, sigma=None, nu=None, sd=None, *args, **kwargs):
super().__init__(*args, **kwargs)
if sd is not None:
sigma = sd
self.mu = mu = tt.as_tensor_variable(floatX(mu))
self.sigma = self.sd = sigma = tt.as_tensor_variable(floatX(sigma))
self.nu = nu = tt.as_tensor_variable(floatX(nu))
self.mean = mu + nu
self.variance = (sigma ** 2) + (nu ** 2)
assert_negative_support(sigma, "sigma", "ExGaussian")
assert_negative_support(nu, "nu", "ExGaussian")
def random(self, point=None, size=None):
"""
Draw random values from ExGaussian distribution.
Parameters
----------
point: dict, optional
Dict of variable values on which random values are to be
conditioned (uses default point if not specified).
size: int, optional
Desired size of random sample (returns one sample if not
specified).
Returns
-------
array
"""
mu, sigma, nu = draw_values([self.mu, self.sigma, self.nu], point=point, size=size)
def _random(mu, sigma, nu, size=None):
return np.random.normal(mu, sigma, size=size) + np.random.exponential(
scale=nu, size=size
)
return generate_samples(_random, mu, sigma, nu, dist_shape=self.shape, size=size)
def logp(self, value):
"""
Calculate log-probability of ExGaussian distribution at specified value.
Parameters
----------
value: numeric
Value(s) for which log-probability is calculated. If the log probabilities for multiple
values are desired the values must be provided in a numpy array or theano tensor
Returns
-------
TensorVariable
"""
mu = self.mu
sigma = self.sigma
nu = self.nu
# Alogithm is adapted from dexGAUS.R from gamlss
return bound(
tt.switch(
tt.gt(nu, 0.05 * sigma),
(
-tt.log(nu)
+ (mu - value) / nu
+ 0.5 * (sigma / nu) ** 2
+ normal_lcdf(mu + (sigma ** 2) / nu, sigma, value)
),
log_normal(value, mean=mu, sigma=sigma),
),
0 < sigma,
0 < nu,
)
def logcdf(self, value):
"""
Compute the log of the cumulative distribution function for ExGaussian distribution
at the specified value.
References
----------
.. [Rigby2005] R.A. Rigby (2005).
"Generalized additive models for location, scale and shape"
https://doi.org/10.1111/j.1467-9876.2005.00510.x
Parameters
----------
value: numeric or np.ndarray or theano.tensor
Value(s) for which log CDF is calculated. If the log CDF for multiple
values are desired the values must be provided in a numpy array or theano tensor.
Returns
-------
TensorVariable
"""
mu = self.mu
sigma = self.sigma
nu = self.nu
# Alogithm is adapted from pexGAUS.R from gamlss
return bound(
tt.switch(
tt.gt(nu, 0.05 * sigma),
logdiffexp(
normal_lcdf(mu, sigma, value),
(
(mu - value) / nu
+ 0.5 * (sigma / nu) ** 2
+ normal_lcdf(mu + (sigma ** 2) / nu, sigma, value)
),
),
normal_lcdf(mu, sigma, value),
),
0 < sigma,
0 < nu,
)
def _distr_parameters_for_repr(self):
return ["mu", "sigma", "nu"]
class VonMises(Continuous):
r"""
Univariate VonMises log-likelihood.
The pdf of this distribution is
.. math::
f(x \mid \mu, \kappa) =
\frac{e^{\kappa\cos(x-\mu)}}{2\pi I_0(\kappa)}
where :math:`I_0` is the modified Bessel function of order 0.
.. plot::
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as st
plt.style.use('seaborn-darkgrid')
x = np.linspace(-np.pi, np.pi, 200)
mus = [0., 0., 0., -2.5]
kappas = [.01, 0.5, 4., 2.]
for mu, kappa in zip(mus, kappas):
pdf = st.vonmises.pdf(x, kappa, loc=mu)
plt.plot(x, pdf, label=r'$\mu$ = {}, $\kappa$ = {}'.format(mu, kappa))
plt.xlabel('x', fontsize=12)
plt.ylabel('f(x)', fontsize=12)
plt.legend(loc=1)
plt.show()
======== ==========================================
Support :math:`x \in [-\pi, \pi]`
Mean :math:`\mu`
Variance :math:`1-\frac{I_1(\kappa)}{I_0(\kappa)}`
======== ==========================================
Parameters
----------
mu: float
Mean.
kappa: float
Concentration (\frac{1}{kappa} is analogous to \sigma^2).
"""
def __init__(self, mu=0.0, kappa=None, transform="circular", *args, **kwargs):
if transform == "circular":
transform = transforms.Circular()
super().__init__(transform=transform, *args, **kwargs)
self.mean = self.median = self.mode = self.mu = mu = tt.as_tensor_variable(floatX(mu))
self.kappa = kappa = tt.as_tensor_variable(floatX(kappa))
assert_negative_support(kappa, "kappa", "VonMises")
def random(self, point=None, size=None):
"""
Draw random values from VonMises distribution.
Parameters
----------
point: dict, optional
Dict of variable values on which random values are to be
conditioned (uses default point if not specified).
size: int, optional
Desired size of random sample (returns one sample if not
specified).
Returns
-------
array
"""
mu, kappa = draw_values([self.mu, self.kappa], point=point, size=size)
return generate_samples(
stats.vonmises.rvs, loc=mu, kappa=kappa, dist_shape=self.shape, size=size
)
def logp(self, value):
"""
Calculate log-probability of VonMises distribution at specified value.
Parameters
----------
value: numeric
Value(s) for which log-probability is calculated. If the log probabilities for multiple
values are desired the values must be provided in a numpy array or theano tensor
Returns
-------
TensorVariable
"""
mu = self.mu
kappa = self.kappa
return bound(
kappa * tt.cos(mu - value) - (tt.log(2 * np.pi) + log_i0(kappa)),
kappa > 0,
value >= -np.pi,
value <= np.pi,
)
def _distr_parameters_for_repr(self):
return ["mu", "kappa"]
class SkewNormal(Continuous):
r"""
Univariate skew-normal log-likelihood.
The pdf of this distribution is
.. math::
f(x \mid \mu, \tau, \alpha) =
2 \Phi((x-\mu)\sqrt{\tau}\alpha) \phi(x,\mu,\tau)
.. plot::
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as st
plt.style.use('seaborn-darkgrid')
x = np.linspace(-4, 4, 200)
for alpha in [-6, 0, 6]:
pdf = st.skewnorm.pdf(x, alpha, loc=0, scale=1)
plt.plot(x, pdf, label=r'$\mu$ = {}, $\sigma$ = {}, $\alpha$ = {}'.format(0, 1, alpha))
plt.xlabel('x', fontsize=12)
plt.ylabel('f(x)', fontsize=12)
plt.legend(loc=1)
plt.show()
======== ==========================================
Support :math:`x \in \mathbb{R}`
Mean :math:`\mu + \sigma \sqrt{\frac{2}{\pi}} \frac {\alpha }{{\sqrt {1+\alpha ^{2}}}}`
Variance :math:`\sigma^2 \left( 1-\frac{2\alpha^2}{(\alpha^2+1) \pi} \right)`
======== ==========================================
Skew-normal distribution can be parameterized either in terms of precision
or standard deviation. The link between the two parametrizations is
given by
.. math::
\tau = \dfrac{1}{\sigma^2}
Parameters
----------
mu: float
Location parameter.
sigma: float
Scale parameter (sigma > 0).
tau: float
Alternative scale parameter (tau > 0).
alpha: float
Skewness parameter.
Notes
-----
When alpha=0 we recover the Normal distribution and mu becomes the mean,
tau the precision and sigma the standard deviation. In the limit of alpha
approaching plus/minus infinite we get a half-normal distribution.
"""
def __init__(self, mu=0.0, sigma=None, tau=None, alpha=1, sd=None, *args, **kwargs):
super().__init__(*args, **kwargs)
if sd is not None:
sigma = sd
tau, sigma = get_tau_sigma(tau=tau, sigma=sigma)
self.mu = mu = tt.as_tensor_variable(floatX(mu))
self.tau = tt.as_tensor_variable(tau)
self.sigma = self.sd = tt.as_tensor_variable(sigma)
self.alpha = alpha = tt.as_tensor_variable(floatX(alpha))
self.mean = mu + self.sigma * (2 / np.pi) ** 0.5 * alpha / (1 + alpha ** 2) ** 0.5
self.variance = self.sigma ** 2 * (1 - (2 * alpha ** 2) / ((1 + alpha ** 2) * np.pi))
assert_negative_support(tau, "tau", "SkewNormal")
assert_negative_support(sigma, "sigma", "SkewNormal")
def random(self, point=None, size=None):
"""
Draw random values from SkewNormal distribution.
Parameters
----------
point: dict, optional
Dict of variable values on which random values are to be
conditioned (uses default point if not specified).
size: int, optional
Desired size of random sample (returns one sample if not
specified).
Returns
-------
array
"""
mu, tau, _, alpha = draw_values(
[self.mu, self.tau, self.sigma, self.alpha], point=point, size=size
)
return generate_samples(
stats.skewnorm.rvs, a=alpha, loc=mu, scale=tau ** -0.5, dist_shape=self.shape, size=size
)
def logp(self, value):
"""
Calculate log-probability of SkewNormal distribution at specified value.
Parameters
----------
value: numeric
Value(s) for which log-probability is calculated. If the log probabilities for multiple
values are desired the values must be provided in a numpy array or theano tensor
Returns
-------
TensorVariable
"""
tau = self.tau
sigma = self.sigma
mu = self.mu
alpha = self.alpha
return bound(
tt.log(1 + tt.erf(((value - mu) * tt.sqrt(tau) * alpha) / tt.sqrt(2)))
+ (-tau * (value - mu) ** 2 + tt.log(tau / np.pi / 2.0)) / 2.0,
tau > 0,
sigma > 0,
)
def _distr_parameters_for_repr(self):
return ["mu", "sigma", "alpha"]
class Triangular(BoundedContinuous):
r"""
Continuous Triangular log-likelihood
The pdf of this distribution is
.. math::
\begin{cases}
0 & \text{for } x < a, \\
\frac{2(x-a)}{(b-a)(c-a)} & \text{for } a \le x < c, \\[4pt]
\frac{2}{b-a} & \text{for } x = c, \\[4pt]
\frac{2(b-x)}{(b-a)(b-c)} & \text{for } c < x \le b, \\[4pt]
0 & \text{for } b < x.
\end{cases}
.. plot::
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as st
plt.style.use('seaborn-darkgrid')
x = np.linspace(-2, 10, 500)
lowers = [0., -1, 2]
cs = [2., 0., 6.5]
uppers = [4., 1, 8]
for lower, c, upper in zip(lowers, cs, uppers):
scale = upper - lower
c_ = (c - lower) / scale
pdf = st.triang.pdf(x, loc=lower, c=c_, scale=scale)
plt.plot(x, pdf, label='lower = {}, c = {}, upper = {}'.format(lower,
c,
upper))
plt.xlabel('x', fontsize=12)
plt.ylabel('f(x)', fontsize=12)
plt.legend(loc=1)
plt.show()
======== ============================================================================
Support :math:`x \in [lower, upper]`
Mean :math:`\dfrac{lower + upper + c}{3}`
Variance :math:`\dfrac{upper^2 + lower^2 +c^2 - lower*upper - lower*c - upper*c}{18}`
======== ============================================================================
Parameters
----------
lower: float
Lower limit.
c: float
mode
upper: float
Upper limit.
"""
def __init__(self, lower=0, upper=1, c=0.5, *args, **kwargs):
self.median = self.mean = self.c = c = tt.as_tensor_variable(floatX(c))
self.lower = lower = tt.as_tensor_variable(floatX(lower))
self.upper = upper = tt.as_tensor_variable(floatX(upper))
super().__init__(lower=lower, upper=upper, *args, **kwargs)
def random(self, point=None, size=None):
"""
Draw random values from Triangular distribution.
Parameters
----------
point: dict, optional
Dict of variable values on which random values are to be
conditioned (uses default point if not specified).
size: int, optional
Desired size of random sample (returns one sample if not
specified).
Returns
-------
array
"""
c, lower, upper = draw_values([self.c, self.lower, self.upper], point=point, size=size)
return generate_samples(
self._random, c=c, lower=lower, upper=upper, size=size, dist_shape=self.shape
)
def _random(self, c, lower, upper, size):
"""Wrapper around stats.triang.rvs that converts Triangular's
parametrization to scipy.triang. All parameter arrays should have
been broadcasted properly by generate_samples at this point and size is
the scipy.rvs representation.
"""
scale = upper - lower
return stats.triang.rvs(c=(c - lower) / scale, loc=lower, scale=scale, size=size)
def logp(self, value):
"""
Calculate log-probability of Triangular distribution at specified value.
Parameters
----------
value: numeric
Value(s) for which log-probability is calculated. If the log probabilities for multiple
values are desired the values must be provided in a numpy array or theano tensor
Returns
-------
TensorVariable
"""
c = self.c
lower = self.lower
upper = self.upper
return bound(
tt.switch(
tt.lt(value, c),
tt.log(2 * (value - lower) / ((upper - lower) * (c - lower))),
tt.log(2 * (upper - value) / ((upper - lower) * (upper - c))),
),
lower <= value,
value <= upper,
)
def logcdf(self, value):
"""
Compute the log of the cumulative distribution function for Triangular distribution
at the specified value.
Parameters
----------
value: numeric or np.ndarray or theano.tensor
Value(s) for which log CDF is calculated. If the log CDF for multiple
values are desired the values must be provided in a numpy array or theano tensor.
Returns
-------
TensorVariable
"""
c = self.c
lower = self.lower
upper = self.upper
return bound(
tt.switch(
tt.le(value, lower),
-np.inf,
tt.switch(
tt.le(value, c),
tt.log(((value - lower) ** 2) / ((upper - lower) * (c - lower))),
tt.switch(
tt.lt(value, upper),
tt.log1p(-((upper - value) ** 2) / ((upper - lower) * (upper - c))),
0,
),
),
),
lower <= upper,
)
class Gumbel(Continuous):
r"""
Univariate Gumbel log-likelihood
The pdf of this distribution is
.. math::
f(x \mid \mu, \beta) = \frac{1}{\beta}e^{-(z + e^{-z})}
where
.. math::
z = \frac{x - \mu}{\beta}.
.. plot::
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as st
plt.style.use('seaborn-darkgrid')
x = np.linspace(-10, 20, 200)
mus = [0., 4., -1.]
betas = [2., 2., 4.]
for mu, beta in zip(mus, betas):
pdf = st.gumbel_r.pdf(x, loc=mu, scale=beta)
plt.plot(x, pdf, label=r'$\mu$ = {}, $\beta$ = {}'.format(mu, beta))
plt.xlabel('x', fontsize=12)
plt.ylabel('f(x)', fontsize=12)
plt.legend(loc=1)
plt.show()
======== ==========================================
Support :math:`x \in \mathbb{R}`
Mean :math:`\mu + \beta\gamma`, where :math:`\gamma` is the Euler-Mascheroni constant
Variance :math:`\frac{\pi^2}{6} \beta^2`
======== ==========================================
Parameters
----------
mu: float
Location parameter.
beta: float
Scale parameter (beta > 0).
"""
def __init__(self, mu=0, beta=1.0, **kwargs):
self.mu = tt.as_tensor_variable(floatX(mu))
self.beta = tt.as_tensor_variable(floatX(beta))
assert_negative_support(beta, "beta", "Gumbel")
self.mean = self.mu + self.beta * np.euler_gamma
self.median = self.mu - self.beta * tt.log(tt.log(2))
self.mode = self.mu
self.variance = (np.pi ** 2 / 6.0) * self.beta ** 2
super().__init__(**kwargs)
def random(self, point=None, size=None):
"""
Draw random values from Gumbel distribution.
Parameters
----------
point: dict, optional
Dict of variable values on which random values are to be
conditioned (uses default point if not specified).
size: int, optional
Desired size of random sample (returns one sample if not
specified).
Returns
-------
array
"""
mu, sigma = draw_values([self.mu, self.beta], point=point, size=size)
return generate_samples(
stats.gumbel_r.rvs, loc=mu, scale=sigma, dist_shape=self.shape, size=size
)
def logp(self, value):
"""
Calculate log-probability of Gumbel distribution at specified value.
Parameters
----------
value: numeric
Value(s) for which log-probability is calculated. If the log probabilities for multiple
values are desired the values must be provided in a numpy array or theano tensor
Returns
-------
TensorVariable
"""
mu = self.mu
beta = self.beta
scaled = (value - mu) / beta
return bound(
-scaled - tt.exp(-scaled) - tt.log(self.beta),
0 < beta,
)
def logcdf(self, value):
"""
Compute the log of the cumulative distribution function for Gumbel distribution
at the specified value.
Parameters
----------
value: numeric or np.ndarray or theano.tensor
Value(s) for which log CDF is calculated. If the log CDF for multiple
values are desired the values must be provided in a numpy array or theano tensor.
Returns
-------
TensorVariable
"""
beta = self.beta
mu = self.mu
return bound(
-tt.exp(-(value - mu) / beta),
0 < beta,
)
class Rice(PositiveContinuous):
r"""
Rice distribution.
.. math::
f(x\mid \nu ,\sigma )=
{\frac {x}{\sigma ^{2}}}\exp
\left({\frac {-(x^{2}+\nu ^{2})}{2\sigma ^{2}}}\right)I_{0}\left({\frac {x\nu }{\sigma ^{2}}}\right),
.. plot::
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as st
plt.style.use('seaborn-darkgrid')
x = np.linspace(0, 8, 500)
nus = [0., 0., 4., 4.]
sigmas = [1., 2., 1., 2.]
for nu, sigma in zip(nus, sigmas):
pdf = st.rice.pdf(x, nu / sigma, scale=sigma)
plt.plot(x, pdf, label=r'$\nu$ = {}, $\sigma$ = {}'.format(nu, sigma))
plt.xlabel('x', fontsize=12)
plt.ylabel('f(x)', fontsize=12)
plt.legend(loc=1)
plt.show()
======== ==============================================================
Support :math:`x \in (0, \infty)`
Mean :math:`\sigma {\sqrt {\pi /2}}\,\,L_{{1/2}}(-\nu ^{2}/2\sigma ^{2})`
Variance :math:`2\sigma ^{2}+\nu ^{2}-{\frac {\pi \sigma ^{2}}{2}}L_{{1/2}}^{2}\left({\frac {-\nu ^{2}}{2\sigma ^{2}}}\right)`
======== ==============================================================
Parameters
----------
nu: float
noncentrality parameter.
sigma: float
scale parameter.
b: float
shape parameter (alternative to nu).
Notes
-----
The distribution :math:`\mathrm{Rice}\left(|\nu|,\sigma\right)` is the
distribution of :math:`R=\sqrt{X^2+Y^2}` where :math:`X\sim N(\nu \cos{\theta}, \sigma^2)`,
:math:`Y\sim N(\nu \sin{\theta}, \sigma^2)` are independent and for any
real :math:`\theta`.
The distribution is defined with either nu or b.
The link between the two parametrizations is given by
.. math::
b = \dfrac{\nu}{\sigma}
"""
def __init__(self, nu=None, sigma=None, b=None, sd=None, *args, **kwargs):
super().__init__(*args, **kwargs)
if sd is not None:
sigma = sd
nu, b, sigma = self.get_nu_b(nu, b, sigma)
self.nu = nu = tt.as_tensor_variable(floatX(nu))
self.sigma = self.sd = sigma = tt.as_tensor_variable(floatX(sigma))
self.b = b = tt.as_tensor_variable(floatX(b))
nu_sigma_ratio = -(nu ** 2) / (2 * sigma ** 2)
self.mean = (
sigma
* np.sqrt(np.pi / 2)
* tt.exp(nu_sigma_ratio / 2)
* (
(1 - nu_sigma_ratio) * tt.i0(-nu_sigma_ratio / 2)
- nu_sigma_ratio * tt.i1(-nu_sigma_ratio / 2)
)
)
self.variance = (
2 * sigma ** 2
+ nu ** 2
- (np.pi * sigma ** 2 / 2)
* (
tt.exp(nu_sigma_ratio / 2)
* (
(1 - nu_sigma_ratio) * tt.i0(-nu_sigma_ratio / 2)
- nu_sigma_ratio * tt.i1(-nu_sigma_ratio / 2)
)
)
** 2
)
def get_nu_b(self, nu, b, sigma):
if sigma is None:
sigma = 1.0
if nu is None and b is not None:
nu = b * sigma
return nu, b, sigma
elif nu is not None and b is None:
b = nu / sigma
return nu, b, sigma
raise ValueError("Rice distribution must specify either nu" " or b.")
def random(self, point=None, size=None):
"""
Draw random values from Rice distribution.
Parameters
----------
point: dict, optional
Dict of variable values on which random values are to be
conditioned (uses default point if not specified).
size: int, optional
Desired size of random sample (returns one sample if not
specified).
Returns
-------
array
"""
nu, sigma = draw_values([self.nu, self.sigma], point=point, size=size)
return generate_samples(self._random, nu=nu, sigma=sigma, dist_shape=self.shape, size=size)
def _random(self, nu, sigma, size):
"""Wrapper around stats.rice.rvs that converts Rice's
parametrization to scipy.rice. All parameter arrays should have
been broadcasted properly by generate_samples at this point and size is
the scipy.rvs representation.
"""
return stats.rice.rvs(b=nu / sigma, scale=sigma, size=size)
def logp(self, value):
"""
Calculate log-probability of Rice distribution at specified value.
Parameters
----------
value: numeric
Value(s) for which log-probability is calculated. If the log probabilities for multiple
values are desired the values must be provided in a numpy array or theano tensor
Returns
-------
TensorVariable
"""
nu = self.nu
sigma = self.sigma
b = self.b
x = value / sigma
return bound(
tt.log(x * tt.exp((-(x - b) * (x - b)) / 2) * i0e(x * b) / sigma),
sigma >= 0,
nu >= 0,
value > 0,
)
def _distr_parameters_for_repr(self):
return ["nu", "sigma"]
class Logistic(Continuous):
r"""
Logistic log-likelihood.
The pdf of this distribution is
.. math::
f(x \mid \mu, s) =
\frac{\exp\left(-\frac{x - \mu}{s}\right)}{s \left(1 + \exp\left(-\frac{x - \mu}{s}\right)\right)^2}
.. plot::
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as st
plt.style.use('seaborn-darkgrid')
x = np.linspace(-5, 5, 200)
mus = [0., 0., 0., -2.]
ss = [.4, 1., 2., .4]
for mu, s in zip(mus, ss):
pdf = st.logistic.pdf(x, loc=mu, scale=s)
plt.plot(x, pdf, label=r'$\mu$ = {}, $s$ = {}'.format(mu, s))
plt.xlabel('x', fontsize=12)
plt.ylabel('f(x)', fontsize=12)
plt.legend(loc=1)
plt.show()
======== ==========================================
Support :math:`x \in \mathbb{R}`
Mean :math:`\mu`
Variance :math:`\frac{s^2 \pi^2}{3}`
======== ==========================================
Parameters
----------
mu: float
Mean.
s: float
Scale (s > 0).
"""
def __init__(self, mu=0.0, s=1.0, *args, **kwargs):
super().__init__(*args, **kwargs)
self.mu = tt.as_tensor_variable(floatX(mu))
self.s = tt.as_tensor_variable(floatX(s))
self.mean = self.mode = mu
self.variance = s ** 2 * np.pi ** 2 / 3.0
def random(self, point=None, size=None):
"""
Draw random values from Logistic distribution.
Parameters
----------
point: dict, optional
Dict of variable values on which random values are to be
conditioned (uses default point if not specified).
size: int, optional
Desired size of random sample (returns one sample if not
specified).
Returns
-------
array
"""
mu, s = draw_values([self.mu, self.s], point=point, size=size)
return generate_samples(
stats.logistic.rvs, loc=mu, scale=s, dist_shape=self.shape, size=size
)
def logp(self, value):
"""
Calculate log-probability of Logistic distribution at specified value.
Parameters
----------
value: numeric
Value(s) for which log-probability is calculated. If the log probabilities for multiple
values are desired the values must be provided in a numpy array or theano tensor
Returns
-------
TensorVariable
"""
mu = self.mu
s = self.s
return bound(
-(value - mu) / s - tt.log(s) - 2 * tt.log1p(tt.exp(-(value - mu) / s)),
s > 0,
)
def logcdf(self, value):
r"""
Compute the log of the cumulative distribution function for Logistic distribution
at the specified value.
Parameters
----------
value: numeric or np.ndarray or theano.tensor
Value(s) for which log CDF is calculated. If the log CDF for multiple
values are desired the values must be provided in a numpy array or theano tensor.
Returns
-------
TensorVariable
"""
mu = self.mu
s = self.s
return bound(
-log1pexp(-(value - mu) / s),
0 < s,
)
class LogitNormal(UnitContinuous):
r"""
Logit-Normal log-likelihood.
The pdf of this distribution is
.. math::
f(x \mid \mu, \tau) =
\frac{1}{x(1-x)} \sqrt{\frac{\tau}{2\pi}}
\exp\left\{ -\frac{\tau}{2} (logit(x)-\mu)^2 \right\}
.. plot::
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as st
from scipy.special import logit
plt.style.use('seaborn-darkgrid')
x = np.linspace(0.0001, 0.9999, 500)
mus = [0., 0., 0., 1.]
sigmas = [0.3, 1., 2., 1.]
for mu, sigma in zip(mus, sigmas):
pdf = st.norm.pdf(logit(x), loc=mu, scale=sigma) * 1/(x * (1-x))
plt.plot(x, pdf, label=r'$\mu$ = {}, $\sigma$ = {}'.format(mu, sigma))
plt.legend(loc=1)
plt.show()
======== ==========================================
Support :math:`x \in (0, 1)`
Mean no analytical solution
Variance no analytical solution
======== ==========================================
Parameters
----------
mu: float
Location parameter.
sigma: float
Scale parameter (sigma > 0).
tau: float
Scale parameter (tau > 0).
"""
def __init__(self, mu=0, sigma=None, tau=None, sd=None, **kwargs):
if sd is not None:
sigma = sd
self.mu = mu = tt.as_tensor_variable(floatX(mu))
tau, sigma = get_tau_sigma(tau=tau, sigma=sigma)
self.sigma = self.sd = tt.as_tensor_variable(sigma)
self.tau = tau = tt.as_tensor_variable(tau)
self.median = invlogit(mu)
assert_negative_support(sigma, "sigma", "LogitNormal")
assert_negative_support(tau, "tau", "LogitNormal")
super().__init__(**kwargs)
def random(self, point=None, size=None):
"""
Draw random values from LogitNormal distribution.
Parameters
----------
point: dict, optional
Dict of variable values on which random values are to be
conditioned (uses default point if not specified).
size: int, optional
Desired size of random sample (returns one sample if not
specified).
Returns
-------
array
"""
mu, _, sigma = draw_values([self.mu, self.tau, self.sigma], point=point, size=size)
return expit(
generate_samples(stats.norm.rvs, loc=mu, scale=sigma, dist_shape=self.shape, size=size)
)
def logp(self, value):
"""
Calculate log-probability of LogitNormal distribution at specified value.
Parameters
----------
value: numeric
Value(s) for which log-probability is calculated. If the log probabilities for multiple
values are desired the values must be provided in a numpy array or theano tensor
Returns
-------
TensorVariable
"""
mu = self.mu
tau = self.tau
return bound(
-0.5 * tau * (logit(value) - mu) ** 2
+ 0.5 * tt.log(tau / (2.0 * np.pi))
- tt.log(value * (1 - value)),
value > 0,
value < 1,
tau > 0,
)
def _distr_parameters_for_repr(self):
return ["mu", "sigma"]
class Interpolated(BoundedContinuous):
r"""
Univariate probability distribution defined as a linear interpolation
of probability density function evaluated on some lattice of points.
The lattice can be uneven, so the steps between different points can have
different size and it is possible to vary the precision between regions
of the support.
The probability density function values don not have to be normalized, as the
interpolated density is any way normalized to make the total probability
equal to $1$.
Both parameters ``x_points`` and values ``pdf_points`` are not variables, but
plain array-like objects, so they are constant and cannot be sampled.
======== ===========================================
Support :math:`x \in [x\_points[0], x\_points[-1]]`
======== ===========================================
Parameters
----------
x_points: array-like
A monotonically growing list of values
pdf_points: array-like
Probability density function evaluated on lattice ``x_points``
"""
def __init__(self, x_points, pdf_points, *args, **kwargs):
self.lower = lower = tt.as_tensor_variable(x_points[0])
self.upper = upper = tt.as_tensor_variable(x_points[-1])
super().__init__(lower=lower, upper=upper, *args, **kwargs)
interp = InterpolatedUnivariateSpline(x_points, pdf_points, k=1, ext="zeros")
Z = interp.integral(x_points[0], x_points[-1])
self.Z = tt.as_tensor_variable(Z)
self.interp_op = SplineWrapper(interp)
self.x_points = x_points
self.pdf_points = pdf_points / Z
self.cdf_points = interp.antiderivative()(x_points) / Z
self.median = self._argcdf(0.5)
def _argcdf(self, p):
pdf = self.pdf_points
cdf = self.cdf_points
x = self.x_points
index = np.searchsorted(cdf, p) - 1
slope = (pdf[index + 1] - pdf[index]) / (x[index + 1] - x[index])
return x[index] + np.where(
np.abs(slope) <= 1e-8,
np.where(
np.abs(pdf[index]) <= 1e-8, np.zeros(index.shape), (p - cdf[index]) / pdf[index]
),
(-pdf[index] + np.sqrt(pdf[index] ** 2 + 2 * slope * (p - cdf[index]))) / slope,
)
def _random(self, size=None):
return self._argcdf(np.random.uniform(size=size))
def random(self, point=None, size=None):
"""
Draw random values from Interpolated distribution.
Parameters
----------
size: int, optional
Desired size of random sample (returns one sample if not
specified).
Returns
-------
array
"""
return generate_samples(self._random, dist_shape=self.shape, size=size)
def logp(self, value):
"""
Calculate log-probability of Interpolated distribution at specified value.
Parameters
----------
value: numeric
Value(s) for which log-probability is calculated. If the log probabilities for multiple
values are desired the values must be provided in a numpy array or theano tensor
Returns
-------
TensorVariable
"""
return tt.log(self.interp_op(value) / self.Z)
def _distr_parameters_for_repr(self):
return []
class Moyal(Continuous):
r"""
Moyal log-likelihood.
The pdf of this distribution is
.. math::
f(x \mid \mu,\sigma) = \frac{1}{\sqrt{2\pi}\sigma}e^{-\frac{1}{2}\left(z + e^{-z}\right)},
where
.. math::
z = \frac{x-\mu}{\sigma}.
.. plot::
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as st
plt.style.use('seaborn-darkgrid')
x = np.linspace(-10, 20, 200)
mus = [-1., 0., 4.]
sigmas = [2., 2., 4.]
for mu, sigma in zip(mus, sigmas):
pdf = st.moyal.pdf(x, loc=mu, scale=sigma)
plt.plot(x, pdf, label=r'$\mu$ = {}, $\sigma$ = {}'.format(mu, sigma))
plt.xlabel('x', fontsize=12)
plt.ylabel('f(x)', fontsize=12)
plt.legend(loc=1)
plt.show()
======== ==============================================================
Support :math:`x \in (-\infty, \infty)`
Mean :math:`\mu + \sigma\left(\gamma + \log 2\right)`, where :math:`\gamma` is the Euler-Mascheroni constant
Variance :math:`\frac{\pi^{2}}{2}\sigma^{2}`
======== ==============================================================
Parameters
----------
mu: float
Location parameter.
sigma: float
Scale parameter (sigma > 0).
"""
def __init__(self, mu=0, sigma=1.0, *args, **kwargs):
self.mu = tt.as_tensor_variable(floatX(mu))
self.sigma = tt.as_tensor_variable(floatX(sigma))
assert_negative_support(sigma, "sigma", "Moyal")
self.mean = self.mu + self.sigma * (np.euler_gamma + tt.log(2))
self.median = self.mu - self.sigma * tt.log(2 * tt.erfcinv(1 / 2) ** 2)
self.mode = self.mu
self.variance = (np.pi ** 2 / 2.0) * self.sigma ** 2
super().__init__(*args, **kwargs)
def random(self, point=None, size=None):
"""
Draw random values from Moyal distribution.
Parameters
----------
point: dict, optional
Dict of variable values on which random values are to be
conditioned (uses default point if not specified).
size: int, optional
Desired size of random sample (returns one sample if not
specified).
Returns
-------
array
"""
mu, sigma = draw_values([self.mu, self.sigma], point=point, size=size)
return generate_samples(
stats.moyal.rvs, loc=mu, scale=sigma, dist_shape=self.shape, size=size
)
def logp(self, value):
"""
Calculate log-probability of Moyal distribution at specified value.
Parameters
----------
value: numeric
Value(s) for which log-probability is calculated. If the log probabilities for multiple
values are desired the values must be provided in a numpy array or theano tensor
Returns
-------
TensorVariable
"""
mu = self.mu
sigma = self.sigma
scaled = (value - mu) / sigma
return bound(
(-(1 / 2) * (scaled + tt.exp(-scaled)) - tt.log(sigma) - (1 / 2) * tt.log(2 * np.pi)),
0 < sigma,
)
def logcdf(self, value):
"""
Compute the log of the cumulative distribution function for Moyal distribution
at the specified value.
Parameters
----------
value: numeric or np.ndarray or theano.tensor
Value(s) for which log CDF is calculated. If the log CDF for multiple
values are desired the values must be provided in a numpy array or theano tensor.
Returns
-------
TensorVariable
"""
mu = self.mu
sigma = self.sigma
scaled = (value - mu) / sigma
return bound(
tt.log(tt.erfc(tt.exp(-scaled / 2) * (2 ** -0.5))),
0 < sigma,
)
| [
"matsushu@ZaknoMacBook-Pro.local"
] | matsushu@ZaknoMacBook-Pro.local |
56929c26fc1cf02790a98401f43424fa08ca800e | bad62c2b0dfad33197db55b44efeec0bab405634 | /sdk/containerservice/azure-mgmt-containerservice/azure/mgmt/containerservice/v2019_06_01/aio/_container_service_client.py | e859946dcaf202e06e448c829e00e8209b572eb0 | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | test-repo-billy/azure-sdk-for-python | 20c5a2486456e02456de17515704cb064ff19833 | cece86a8548cb5f575e5419864d631673be0a244 | refs/heads/master | 2022-10-25T02:28:39.022559 | 2022-10-18T06:05:46 | 2022-10-18T06:05:46 | 182,325,031 | 0 | 0 | MIT | 2019-07-25T22:28:52 | 2019-04-19T20:59:15 | Python | UTF-8 | Python | false | false | 4,898 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from copy import deepcopy
from typing import Any, Awaitable, TYPE_CHECKING
from azure.core.rest import AsyncHttpResponse, HttpRequest
from azure.mgmt.core import AsyncARMPipelineClient
from .. import models
from ..._serialization import Deserializer, Serializer
from ._configuration import ContainerServiceClientConfiguration
from .operations import AgentPoolsOperations, ManagedClustersOperations, Operations
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
class ContainerServiceClient: # pylint: disable=client-accepts-api-version-keyword
"""The Container Service Client.
:ivar operations: Operations operations
:vartype operations: azure.mgmt.containerservice.v2019_06_01.aio.operations.Operations
:ivar managed_clusters: ManagedClustersOperations operations
:vartype managed_clusters:
azure.mgmt.containerservice.v2019_06_01.aio.operations.ManagedClustersOperations
:ivar agent_pools: AgentPoolsOperations operations
:vartype agent_pools:
azure.mgmt.containerservice.v2019_06_01.aio.operations.AgentPoolsOperations
:param credential: Credential needed for the client to connect to Azure. Required.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: Subscription credentials which uniquely identify Microsoft Azure
subscription. The subscription ID forms part of the URI for every service call. Required.
:type subscription_id: str
:param base_url: Service URL. Default value is "https://management.azure.com".
:type base_url: str
:keyword api_version: Api Version. Default value is "2019-06-01". Note that overriding this
default value may result in unsupported behavior.
:paramtype api_version: str
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
base_url: str = "https://management.azure.com",
**kwargs: Any
) -> None:
self._config = ContainerServiceClientConfiguration(
credential=credential, subscription_id=subscription_id, **kwargs
)
self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self._serialize.client_side_validation = False
self.operations = Operations(self._client, self._config, self._serialize, self._deserialize)
self.managed_clusters = ManagedClustersOperations(
self._client, self._config, self._serialize, self._deserialize
)
self.agent_pools = AgentPoolsOperations(self._client, self._config, self._serialize, self._deserialize)
def _send_request(self, request: HttpRequest, **kwargs: Any) -> Awaitable[AsyncHttpResponse]:
"""Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = await client._send_request(request)
<AsyncHttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.AsyncHttpResponse
"""
request_copy = deepcopy(request)
request_copy.url = self._client.format_url(request_copy.url)
return self._client.send_request(request_copy, **kwargs)
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "ContainerServiceClient":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details) -> None:
await self._client.__aexit__(*exc_details)
| [
"noreply@github.com"
] | test-repo-billy.noreply@github.com |
68a1455380858827d9d8af0bc4cc0ed58d3db25b | 0bd7c1f7bf6da5ef92b9013e1d913140f0249dfa | /cecilia-python/greedy-thinking/MaxProfit-Ⅱ.py | fa987195663b384497d5d5818948131fd456811e | [] | no_license | Cecilia520/algorithmic-learning-leetcode | f1fec1fae71c4cf7410122f5ce969e829f451308 | 32941ee052d0985a9569441d314378700ff4d225 | refs/heads/master | 2022-05-02T03:00:57.505672 | 2022-03-19T09:51:28 | 2022-03-19T09:51:28 | 229,673,810 | 7 | 1 | null | 2022-03-19T09:34:57 | 2019-12-23T04:04:04 | Python | UTF-8 | Python | false | false | 2,586 | py | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
@File : MaxProfit-Ⅱ.py
@Contact : 70904372cecilia@gmail.com
@License : (C)Copyright 2019-2020
@Modify Time @Author @Version @Desciption
------------ ------- -------- -----------
2020/2/5 12:48 cecilia 1.0 买卖股票的最佳时机Ⅱ(简单)
问题描述:
给定一个数组,它的第 i 个元素是一支给定股票第 i 天的价格。
设计一个算法来计算你所能获取的最大利润。你可以尽可能地完成更多的交易(多次买卖一支股票)。
注意:你不能同时参与多笔交易(你必须在再次购买前出售掉之前的股票)。
示例1:
输入: [7,1,5,3,6,4]
输出: 7
解释: 在第 2 天(股票价格 = 1)的时候买入,在第 3 天(股票价格 = 5)的时候卖出, 这笔交易所能获得利润 = 5-1 = 4 。
随后,在第 4 天(股票价格 = 3)的时候买入,在第 5 天(股票价格 = 6)的时候卖出, 这笔交易所能获得利润 = 6-3 = 3
示例2:
输入: [1,2,3,4,5]
输出: 4
解释: 在第 1 天(股票价格 = 1)的时候买入,在第 5 天 (股票价格 = 5)的时候卖出, 这笔交易所能获得利润 = 5-1 = 4 。
注意你不能在第 1 天和第 2 天接连购买股票,之后再将它们卖出。
因为这样属于同时参与了多笔交易,你必须在再次购买前出售掉之前的股票。
示例3:
输入: [7,6,4,3,1]
输出: 0
解释: 在这种情况下, 没有交易完成, 所以最大利润为 0。
"""
def maxPrice(prices) -> int:
"""
买卖股票的最佳时机Ⅱ
解决方案:贪心算法(只要今天的股票价格比昨天高,就卖出去)
分析:为何可以这样想?——按照示例1来讲,第二天买入,第四天卖出去,收益(6-1),一般情况我们还会这样想:
怎么不去判断第三天卖出了呢?根据题目的意思,当天卖出以后,当天还可以买入,所以其实可以第三天卖出,第三天买入,第四天又卖出((5-1)+ (6-5) === 6 - 1)。
所以算法可以直接简化为只要今天比昨天大,就卖出。
:param prices: 股票价格数组
:return:
算法分析:时间复杂度O(n), 空间复杂度O(1),仅仅使用变量
"""
profits = 0
for i in range(1, len(prices)):
if prices[i] > prices[i - 1]:
profits += prices[i] - prices[i - 1]
return profits
if __name__ == '__main__':
prices = [7, 1, 5, 3, 6, 4]
print(maxPrice(prices))
| [
"cc15572018516@163.com"
] | cc15572018516@163.com |
b201ee6feb1416bd3cbda627e72d8e88d5850c0c | d4dda2e2992ca16b8fe628e417f8a4243af0ed4a | /step13_offdiagonalLHEstudy/getsmearing.py | 5cfca6ec5e81bf48a0275a1aed1d0eb2a722cf79 | [] | no_license | hroskes/anomalouscouplings | 01f46c0d38f5332c58538b0bdea373704cf06fcc | 391eb7fbd52d8605b09ca2e461b1789e019b1da0 | refs/heads/production | 2021-11-24T22:37:48.932830 | 2021-10-29T18:38:54 | 2021-10-29T18:38:54 | 60,651,233 | 0 | 2 | null | 2017-01-24T14:20:56 | 2016-06-07T22:37:23 | Python | UTF-8 | Python | false | false | 7,019 | py | #!/usr/bin/env python
import os
import ROOT
from helperstuff import config
from helperstuff.samples import Sample
from helperstuff.utilities import cache, mkdir_p, tfiles, tlvfromptetaphim
TF1 = cache(ROOT.TF1)
s = Sample("VBF", "0+", config.productionforcombine)
f = tfiles[s.withdiscriminantsfile()]
t = f.candTree
hlherecojetpt = ROOT.TH1F("hlherecojetpt", "", 100, -100, 100)
hlherecojeteta = ROOT.TH1F("hlherecojeteta", "", 100, -1, 1)
hlherecojetphi = ROOT.TH1F("hlherecojetphi", "", 100, -1, 1)
hlhegenelectronpt = ROOT.TH1F("hlhegenelectronpt", "", 100, -10, 10)
hlhegenelectroneta = ROOT.TH1F("hlhegenelectroneta", "", 100, -.2, .2)
hlhegenelectronphi = ROOT.TH1F("hlhegenelectronphi", "", 100, -.2, .2)
hgenrecoelectronpt = ROOT.TH1F("hgenrecoelectronpt", "", 100, -10, 10)
hgenrecoelectroneta = ROOT.TH1F("hgenrecoelectroneta", "", 100, -.2, .2)
hgenrecoelectronphi = ROOT.TH1F("hgenrecoelectronphi", "", 100, -.2, .2)
hlherecoelectronpt = ROOT.TH1F("hlherecoelectronpt", "", 100, -10, 10)
hlherecoelectroneta = ROOT.TH1F("hlherecoelectroneta", "", 100, -.2, .2)
hlherecoelectronphi = ROOT.TH1F("hlherecoelectronphi", "", 100, -.2, .2)
hlhegenmuonpt = ROOT.TH1F("hlhegenmuonpt", "", 100, -10, 10)
hlhegenmuoneta = ROOT.TH1F("hlhegenmuoneta", "", 100, -.2, .2)
hlhegenmuonphi = ROOT.TH1F("hlhegenmuonphi", "", 100, -.2, .2)
hgenrecomuonpt = ROOT.TH1F("hgenrecomuonpt", "", 100, -10, 10)
hgenrecomuoneta = ROOT.TH1F("hgenrecomuoneta", "", 100, -.2, .2)
hgenrecomuonphi = ROOT.TH1F("hgenrecomuonphi", "", 100, -.2, .2)
hlherecomuonpt = ROOT.TH1F("hlherecomuonpt", "", 100, -10, 10)
hlherecomuoneta = ROOT.TH1F("hlherecomuoneta", "", 100, -.2, .2)
hlherecomuonphi = ROOT.TH1F("hlherecomuonphi", "", 100, -.2, .2)
hists = [
hlherecojetpt, hlherecojeteta, hlherecojetphi,
hlhegenelectronpt, hlhegenelectroneta, hlhegenelectronphi,
hgenrecoelectronpt, hgenrecoelectroneta, hgenrecoelectronphi,
hlherecoelectronpt, hlherecoelectroneta, hlherecoelectronphi,
hlhegenmuonpt, hlhegenmuoneta, hlhegenmuonphi,
hgenrecomuonpt, hgenrecomuoneta, hgenrecomuonphi,
hlherecomuonpt, hlherecomuoneta, hlherecomuonphi,
]
length = t.GetEntries()
for i, entry in enumerate(t, start=1):
jets = []
LHEjets = []
electrons = []
genelectrons = []
LHEelectrons = []
muons = []
genmuons = []
LHEmuons = []
for pt, eta, phi, m, id in zip(t.LHEDaughterPt, t.LHEDaughterEta, t.LHEDaughterPhi, t.LHEDaughterMass, t.LHEDaughterId):
if abs(id) == 11:
LHEelectrons.append(tlvfromptetaphim(pt, eta, phi, m))
elif abs(id) == 13:
LHEmuons.append(tlvfromptetaphim(pt, eta, phi, m))
for pt, eta, phi, m, id in zip(t.LHEAssociatedParticlePt, t.LHEAssociatedParticleEta, t.LHEAssociatedParticlePhi, t.LHEAssociatedParticleMass, t.LHEAssociatedParticleId):
if 1 <= abs(id) <= 6 or id == 21:
LHEjets.append(tlvfromptetaphim(pt, eta, phi, m))
for pt, eta, phi, id in zip(*[[getattr(t, "GenLep{}{}".format(j, var)) for j in range(1, 5)] for var in ("Pt", "Eta", "Phi", "Id")]):
m = 0
if abs(id) == 11:
genelectrons.append(tlvfromptetaphim(pt, eta, phi, m))
elif abs(id) == 13:
genmuons.append(tlvfromptetaphim(pt, eta, phi, m))
for pt, eta, phi, id in zip(t.LepPt, t.LepEta, t.LepPhi, t.LepLepId):
if abs(id) == 11:
electrons.append(tlvfromptetaphim(pt, eta, phi, m))
elif abs(id) == 13:
muons.append(tlvfromptetaphim(pt, eta, phi, m))
for pt, eta, phi, mass in zip(t.JetPt, t.JetEta, t.JetPhi, t.JetMass):
jets.append(tlvfromptetaphim(pt, eta, phi, 0))
for lhejet in LHEjets:
if not jets: continue
recojet = min(jets, key=lambda jet: jet.DeltaR(lhejet))
if lhejet != min(LHEjets, key=lambda jet: jet.DeltaR(recojet)): continue
hlherecojetpt.Fill(recojet.Pt() - lhejet.Pt())
hlherecojeteta.Fill(recojet.Eta() - lhejet.Eta())
hlherecojetphi.Fill(recojet.Phi() - lhejet.Phi())
for lheelectron in LHEelectrons:
recoelectron = min(electrons, key=lambda electron: electron.DeltaR(lheelectron))
if lheelectron != min(LHEelectrons, key=lambda electron: electron.DeltaR(recoelectron)): continue
hlherecoelectronpt.Fill(recoelectron.Pt() - lheelectron.Pt())
hlherecoelectroneta.Fill(recoelectron.Eta() - lheelectron.Eta())
hlherecoelectronphi.Fill(recoelectron.Phi() - lheelectron.Phi())
for genelectron in genelectrons:
recoelectron = min(electrons, key=lambda electron: electron.DeltaR(genelectron))
if genelectron != min(genelectrons, key=lambda electron: electron.DeltaR(recoelectron)): continue
hgenrecoelectronpt.Fill(recoelectron.Pt() - genelectron.Pt())
hgenrecoelectroneta.Fill(recoelectron.Eta() - genelectron.Eta())
hgenrecoelectronphi.Fill(recoelectron.Phi() - genelectron.Phi())
for lheelectron in LHEelectrons:
genelectron = min(genelectrons, key=lambda electron: electron.DeltaR(lheelectron))
if lheelectron != min(LHEelectrons, key=lambda electron: electron.DeltaR(genelectron)): continue
hlhegenelectronpt.Fill(genelectron.Pt() - lheelectron.Pt())
hlhegenelectroneta.Fill(genelectron.Eta() - lheelectron.Eta())
hlhegenelectronphi.Fill(genelectron.Phi() - lheelectron.Phi())
for lhemuon in LHEmuons:
recomuon = min(muons, key=lambda muon: muon.DeltaR(lhemuon))
if lhemuon != min(LHEmuons, key=lambda muon: muon.DeltaR(recomuon)): continue
hlherecomuonpt.Fill(recomuon.Pt() - lhemuon.Pt())
hlherecomuoneta.Fill(recomuon.Eta() - lhemuon.Eta())
hlherecomuonphi.Fill(recomuon.Phi() - lhemuon.Phi())
for genmuon in genmuons:
recomuon = min(muons, key=lambda muon: muon.DeltaR(genmuon))
if genmuon != min(genmuons, key=lambda muon: muon.DeltaR(recomuon)): continue
hgenrecomuonpt.Fill(recomuon.Pt() - genmuon.Pt())
hgenrecomuoneta.Fill(recomuon.Eta() - genmuon.Eta())
hgenrecomuonphi.Fill(recomuon.Phi() - genmuon.Phi())
for lhemuon in LHEmuons:
genmuon = min(genmuons, key=lambda muon: muon.DeltaR(lhemuon))
if lhemuon != min(LHEmuons, key=lambda muon: muon.DeltaR(genmuon)): continue
hlhegenmuonpt.Fill(genmuon.Pt() - lhemuon.Pt())
hlhegenmuoneta.Fill(genmuon.Eta() - lhemuon.Eta())
hlhegenmuonphi.Fill(genmuon.Phi() - lhemuon.Phi())
if i % 1000 == 0 or i == length:
print i, "/", length
c = ROOT.TCanvas()
saveasdir = os.path.join(config.plotsbasedir, "offdiagonalLHEstudy", "resolution")
mkdir_p(saveasdir)
for h in hists:
for ext in "png eps root pdf".split():
h.Draw()
f = TF1("f"+h.GetName(), "gaus(0)", h.GetXaxis().GetXmin(), h.GetXaxis().GetXmax())
f.SetParameters(h.GetEntries(), h.GetMean(), h.GetRMS())
h.Fit(f)
c.SaveAs(os.path.join(saveasdir, h.GetName()+"."+ext))
| [
"jroskes1@jhu.edu"
] | jroskes1@jhu.edu |
7d12a67de1a5c944cff5b801c157aed83ebebbf3 | 800af8d10309c2c0bb1a55f9aeaa501475cab559 | /status_app/receivers.py | 8b537798311225c59a12b6a13146381478e9c59d | [
"Apache-2.0"
] | permissive | vegitron/status-app | 2ca0862fb7daf4b05adcc4dac6bf743008746abd | 2bafcaf94aa50e443d6521de204da7e27b4d8ac7 | refs/heads/master | 2020-04-13T19:55:26.941839 | 2014-04-10T23:28:23 | 2014-04-10T23:28:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 736 | py | from status_app.signals import request_signal
from status_app.dispatch import dispatch
from status_app.models import RawEvent
from django.dispatch import receiver
from datetime import datetime
import socket
def request_receiver(sender, status_code, path_info, request_time, **kwargs):
if status_code >= 200 and status_code < 400:
dispatch('application_response', RawEvent.PASS_FAIL, datetime.now(), True, '', socket.gethostname())
else:
dispatch('application_response', RawEvent.PASS_FAIL, datetime.now(), False, path_info, socket.gethostname())
dispatch('application_response_time', RawEvent.INTERVAL, datetime.now(), request_time, '', socket.gethostname())
def get_signal():
return request_signal
| [
"pmichaud@uw.edu"
] | pmichaud@uw.edu |
cafc8b76a370cd5d56727b8016d1b128aa1559a9 | 71bec5b969aa3c9f40f839cff24ac598d8f7fd28 | /DomeOne/DomeTornadoQuery.py | 3501946537a1c1f6035212e30a0e6ce64071b759 | [] | no_license | dong-c-git/TornadoProjectDome | 6c3ba2f69c333c30bbdf723dd750ce0118436a09 | 4124fe36f409a87b4615eafd9ba59dabc21a12f6 | refs/heads/master | 2020-08-17T20:11:57.250887 | 2019-11-01T04:46:22 | 2019-11-01T04:46:22 | 215,706,907 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,864 | py | #coding:utf-8
import tornado.web
import tornado.ioloop
import tornado.options
from tornado.web import MissingArgumentError
import tornado.httpserver
#tornado获取数据类型方法模型
tornado.options.define("port",default=8090,type=int,help="need runserver give port")
class IndexHandler(tornado.web.RequestHandler):
"""访问首页"""
# def get(self):
# # self.write("hello this is tornado server")
def post(self):
query_arg = self.get_query_argument("a")
query_args = self.get_query_arguments("a")
body_arg = self.get_body_argument("a")
body_args = self.get_body_arguments("a",strip=False)
arg = self.get_argument("a")
args = self.get_argumens("a")
default_arg = self.get_argument("b","itcast")
default_args = self.get_arguments("b")
try:
missing_arg = self.get_argument("c")
except MissingArgumentError as e:
missing_arg = "we catched the MissingArgumentError"
print(e)
missing_args = self.get_arguments("c")
rep = "query_arg:%s<br/>" % query_arg
rep += "query_args:%s<br/>" % query_args
rep += "body_arg:%s<br/>" % body_arg
rep += "body_args:%s<br/>" % body_args
rep += "arg:%s<br/>" % arg
rep += "args:%s<br/>" % args
rep += "default_arg:%s<br/>" % default_arg
rep += "default_args:%s<br/>" % default_args
rep += "missing_arg:%s<br/>" % missing_arg
rep += "missing_args:%s<br/>" % missing_args
self.write(rep)
if __name__ == '__main__':
tornado.options.parse_command_line()
app = tornado.web.Application([(r"/",IndexHandler),])
http_server = tornado.httpserver.HTTPServer(app)
http_server.bind(tornado.options.options.port)
http_server.start(0)
tornado.ioloop.IOLoop.current().start()
| [
"dc111000@hotmail.com"
] | dc111000@hotmail.com |
5acc3e2a04a63b176b35628dea94a3af18ae9626 | c04fb2e9ee96987b308cd60defb179f800486858 | /_unittests/ut_documentation/test_notebook_cheat_sheet_html.py | c287e6ab4a557832a54e5343e74a39d6c73816e3 | [
"MIT"
] | permissive | sdpython/ensae_projects | ca30304e04c938b1d79abef5e54dac0dc531421f | 36033021726144e66fd420cc902f32187a650b18 | refs/heads/master | 2023-02-08T01:32:47.892757 | 2023-02-02T00:18:00 | 2023-02-02T00:18:00 | 45,864,036 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,674 | py | # -*- coding: utf-8 -*-
"""
@brief test log(time=32s)
"""
import sys
import os
import unittest
import shutil
from pyquickhelper.loghelper import fLOG
from pyquickhelper.pycode import get_temp_folder, is_travis_or_appveyor
from pyquickhelper.pycode import fix_tkinter_issues_virtualenv
from pyquickhelper.ipythonhelper import execute_notebook_list_finalize_ut
from ensae_projects.automation.notebook_test_helper import ls_notebooks, execute_notebooks, clean_function_notebook
import ensae_projects
class TestNotebookCheatSheetHtml(unittest.TestCase):
def test_notebook_cheatsheet_html(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
if is_travis_or_appveyor() == "appveyor":
# connectivity issue
return
fix_tkinter_issues_virtualenv()
temp = get_temp_folder(__file__, "temp_cheat_sheet_html")
keepnote = ls_notebooks("cheat_sheets")
self.assertTrue(len(keepnote) > 0)
keepnote = [_ for _ in keepnote if "chsh_files" not in _]
if len(keepnote) > 0:
fold = os.path.dirname(keepnote[0])
copy = [os.path.join(fold, "geo_data.zip")]
for c in copy:
shutil.copy(c, temp)
res = execute_notebooks(temp, keepnote,
lambda i, n: "deviner" not in n and "html" in n,
fLOG=fLOG,
clean_function=clean_function_notebook)
execute_notebook_list_finalize_ut(
res, fLOG=fLOG, dump=ensae_projects)
if __name__ == "__main__":
unittest.main()
| [
"xavier.dupre@gmail.com"
] | xavier.dupre@gmail.com |
fb4b6effdfe059fec31fd68302a5b80865c22312 | 879df09d88e0a0db13c05ae9be9f4561197ac06e | /settings.py | 4a824c3c11b61dc139c9698b85d3ca041d5cf29b | [
"MIT"
] | permissive | prateeknagpal/crypto-trader | 240b6515ce31c1c67635d1284a96db2818989bab | 7f2db4dbc66f080f9e72399031c081093aefc1ba | refs/heads/master | 2021-09-01T22:20:53.025411 | 2017-12-28T22:11:28 | 2017-12-28T22:11:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,334 | py | import os
from decimal import Decimal
API_VERSION = 1
API_URL = 'https://api.gemini.com'
API_WS_URL = 'wss://api.gemini.com'
STARTING_NONCE = 800 # must always increase incrementally
SYMBOL = 'ethusd' # currency pair to trade
POLL_DELAY = 30 # runloop interval in seconds
MAX_ACTIVE_ORDERS = 3 # maximum number of active orders to track
USD_MIN_ORDER_AMT = Decimal(1.00) # min amount to use when making new orders
USD_MAX_ORDER_AMT = Decimal(5.00) # max amount to use when making new orders
MAX_GAIN_RATIO = Decimal(0.01) # maximum percentage gains before selling the order
MAX_LOSS_RATIO = Decimal(-0.006) # maximum percentage losses before selling the order
OVERPAY_RATIO = Decimal(0.005) # percentage to pay over current price in order to guarantee orders closing quickly
USD_MAX_NET_GAINS = 100 # total maximum USD gains before quitting the program
USD_MAX_NET_LOSS = -20 # total maximum USD losses before quitting the program
DATA_DIR = f'./data' # where to store the state and logs
try:
from secrets import * # copy and edit secrets_default.py to secrets.py
except ImportError:
print('Copy secrets_default.py to secrets.py to add your API credentials')
raise SystemExit(1)
| [
"git@nicksweeting.com"
] | git@nicksweeting.com |
33a9c0319e08a8b7003a880482162b1368ca8458 | 37d4af0a33d47d6b264acb769a276a500871ab90 | /Python_Code_Beginner/07_语法进阶/hm_03_全局变量.py | c698dde858327048a610c338650901b68ed452d8 | [] | no_license | yzjbryant/YZJ_MIX_Code | 86fe3c8a265c69e5493f70b9753491c0f97d75d4 | ab3a7b8731730a714f013f59bbf551d3d50c3b33 | refs/heads/master | 2022-11-09T01:07:48.944817 | 2020-07-01T09:17:01 | 2020-07-01T09:17:01 | 271,439,307 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 147 | py | #全局变量
num = 10
def demo1():
print("demo1 ==> %d" % num)
def demo2():
print("demo2 ==> %d" % num)
demo1()
demo2() | [
"yinzhijian2018@163.com"
] | yinzhijian2018@163.com |
ba95d090a862cc3aeac35022754529fb54e66fa3 | 172260a36450fb3eab639d7255a6bf94e4a556ec | /django/d2/lib/python3.7/ntpath.py | 10295cebd5e44526590092468b4af2006077f910 | [] | no_license | Grayson-choi/TIL | fe6c1828db7e116c8e97b35f28a6ea0e540f95fe | be3f9cee0cb6ee6b8eeb7a16b47fe1ece482a527 | refs/heads/master | 2023-03-16T02:17:44.954382 | 2021-02-03T04:52:37 | 2021-02-03T04:52:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 43 | py | /Users/jw/anaconda3/lib/python3.7/ntpath.py | [
"jw@choejiung-ui-MacBook-Pro.local"
] | jw@choejiung-ui-MacBook-Pro.local |
30c8cb418a4fa444872b6d5bcdc2a135eb4ac4d2 | 0c466d2632b4c85f2cf0312e94ee826867039bc6 | /tests/conftest.py | f1394cf45d937fa5d0a868eaedadc268640de372 | [
"Apache-2.0"
] | permissive | rst0git/pytest-salt-factories | 6c917c9749b6eed92dcdc5cac411d9db83608d7f | d614c15700327e0d03a7464f4076523b93357857 | refs/heads/master | 2022-11-24T20:41:03.138928 | 2020-07-29T17:24:07 | 2020-07-29T17:24:07 | 282,475,745 | 0 | 0 | null | 2020-07-25T15:52:59 | 2020-07-25T15:52:58 | null | UTF-8 | Python | false | false | 2,316 | py | import functools
import logging
import os
import stat
import tempfile
import textwrap
import pytest
import salt.version
log = logging.getLogger(__name__)
pytest_plugins = ["pytester"]
def pytest_report_header():
return "salt-version: {}".format(salt.version.__version__)
class Tempfiles:
"""
Class which generates temporary files and cleans them when done
"""
def __init__(self, request):
self.request = request
def makepyfile(self, contents, prefix=None, executable=False):
"""
Creates a python file and returns it's path
"""
tfile = tempfile.NamedTemporaryFile("w", prefix=prefix or "tmp", suffix=".py", delete=False)
contents = textwrap.dedent(contents.lstrip("\n")).strip()
tfile.write(contents)
tfile.close()
if executable is True:
st = os.stat(tfile.name)
os.chmod(tfile.name, st.st_mode | stat.S_IEXEC)
self.request.addfinalizer(functools.partial(self._delete_temp_file, tfile.name))
with open(tfile.name) as rfh:
log.debug(
"Created python file with contents:\n>>>>> %s >>>>>\n%s\n<<<<< %s <<<<<\n",
tfile.name,
rfh.read(),
tfile.name,
)
return tfile.name
def makeslsfile(self, contents, name=None):
"""
Creates an sls file and returns it's path
"""
if name is None:
tfile = tempfile.NamedTemporaryFile("w", suffix=".sls", delete=False)
name = tfile.name
with open(name, "w") as wfh:
contents = textwrap.dedent(contents.lstrip("\n")).strip()
wfh.write(contents)
self.request.addfinalizer(functools.partial(self._delete_temp_file, name))
with open(name) as rfh:
log.debug(
"Created SLS file with contents:\n>>>>> %s >>>>>\n%s\n<<<<< %s <<<<<\n",
name,
rfh.read(),
name,
)
return name
def _delete_temp_file(self, fpath):
"""
Cleanup the temporary path
"""
if os.path.exists(fpath):
os.unlink(fpath)
@pytest.fixture
def tempfiles(request):
"""
Temporary files fixture
"""
return Tempfiles(request)
| [
"pedro@algarvio.me"
] | pedro@algarvio.me |
9db38d413747215cf959508e754da4cbf30e1ba7 | de9b8b7192a0a81e9249823bb2b86f0b7e452863 | /.history/classes/Handler_20171107110755.py | c6aa301d0a2f1cbd3fc1178b1fa74d6704968c24 | [
"MIT"
] | permissive | reecebenson/uwe-dadsa-tennis-a | f5eaeb1b96d4e61f29279514e68eeea8ad6533db | d0763f819b300fcd0ce27041f5bc4ef0519c00bf | refs/heads/master | 2023-07-08T16:13:23.963348 | 2017-11-30T12:07:01 | 2017-11-30T12:07:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,270 | py | # DADSA - Assignment 1
# Reece Benson
import json
import random
from math import ceil, floor
from classes import Player
from classes import Season
from classes import Tournament
from classes import Round
from classes import Match
class Handler():
# Define the variables we will be using
app = None
prize_money = None
player_count = None
seasons = { }
def __init__(self, _app):
if(_app.debug):
print("[LOAD]: Loaded Handler!")
# Define our Application within this Handler class
self.app = _app
# Used to load all data into memory
def load(self):
# This function will create our seasons and implement the genders & players
self.load_seasons()
self.load_players()
self.load_prize_money()
#TODO: Implement load_seasons()
# Used to load seasons into memory
def load_seasons(self):
with open('./data/seasons.json') as tData:
data = json.load(tData)
for season in data:
# If the season does not yet exist, create it
if(not season in self.get_seasons()):
self.seasons.update({ season: Season.Season(self.app, season, data[season]) })
# Generate our rounds from our player list from scratch
def generate_rounds(self):
# Write our new data to memory
for seasonId in self.get_seasons():
season = self.get_season(seasonId)
players = season.players()
# Generate our rounds
for gender in players:
# Default Values
round_cap = 3
# Do we have a Round Cap overrider for this gender?
if(gender + "_cap" in season.settings()):
round_cap = season.settings()[gender + "_cap"]
# Create our first round
_round_one = Round.Round(self.app, gender, "round_1")
_round_one.set_cap(round_cap)
# Create our first round data
rand_players = random.sample(players[gender], len(players[gender]))
for i in range(len(rand_players) // 2):
# Grab our versus players
p_one = rand_players[i * 2]
p_two = rand_players[(i * 2) + 1]
# Generate some scores
p_one_score = random.randint(0, round_cap - 1)
p_two_score = random.randint(0, round_cap - 1)
# Make a random player the winner
who = random.randint(0, 1)
if(who == 0): p_one_score = round_cap
else: p_two_score = round_cap
# Append our random data as a Match
#round_data[gender].append({ p_one.name(): p_one_score, p_two.name(): p_two_score })
#round_data[round_name][gender].append(Match.Match(round_name, p_one, p_two, p_one_score, p_two_score))
_round_one.add_match(Match.Match(_round_one, p_one, p_two, p_one_score, p_two_score))
# Append our first round to our season
season.add_round(gender, _round_one)
# Items in Round
print("{0} has {1} matches".format(_round_one.name(), _round_one.match_count()))
# Get the winners from each round
for r in range(2, season.settings()['round_count'] + 1):
# Define variables
round_name = "round_"+str(r)
# Define our Round
_round = Round.Round(self.app, gender, round_name)
# Items in Round
print("{0} has {1} matches".format(_round.name(), _round.match_count()))
break
# Debug
if(self.app.debug):
print("[LOAD]: Generated {1} rounds for season: '{0}'".format(season.name(), season.settings()['round_count']))
# End of generate_rounds()
# Used to load prize money
def load_prize_money(self):
with open('./data/rankingPoints.json') as tData:
data = json.load(tData)
# Fallback on a non-existant occurrence
if(self.player_count == None):
self.player_count = 100
# Make our prize_money a dictionary
if(self.prize_money == None):
self.prize_money = { }
# Set the prize money to the actual rank and points received
self.prize_money = [ pts for pts in data for rank in data[pts] ]
# We want to set the prize money for all indexes possible via the player
self.prize_money += [ 0 ] * ( self.player_count - len(self.prize_money))
# Used to load players from all seasons into memory
def load_players(self):
# Set our player (in gender) count
self.player_count = 0
with open('./data/players.json') as tData:
data = json.load(tData)
# Players are classed within Seasons
for season in data:
# If the season does not yet exist, ignore this input
if(not season in self.get_seasons()):
continue
# Players are then stored within Gender classifications
for gender in data[season]:
if(not gender in self.get_season(season).players()):
self.get_season(season).players()[gender] = [ ]
# Append our player in the season, within the gender
for player in data[season][gender]:
#TODO: Change to using Player class
self.get_season(season).add_player(player, gender)
# Update our player count
if(len(self.get_season(season).players()[gender]) > self.player_count):
self.player_count = len(self.get_season(season).players()[gender])
def get_seasons(self):
return self.seasons
def get_season(self, season_id):
if(season_id in self.seasons):
return self.seasons[season_id]
else:
return None | [
"business@reecebenson.me"
] | business@reecebenson.me |
ea0f43e8088628a36071b7de9ecaee0e65802f88 | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/3/hqm.py | ed0703b4fe609cb0d1154d2b14731d22620f1d8e | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'hQM':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"juliettaylorswift@gmail.com"
] | juliettaylorswift@gmail.com |
1dfa2fdb959acac2846a946ecd49380c5954d3c5 | 78d7d7aeb78a8cea6d0e10b89fc4aa6c46c95227 | /2226.py | 52c964524b018bed57f7213a24f68afd26aaddff | [] | no_license | GenryEden/kpolyakovName | 97db13ef93061a8c2afc6cc5acd91337f79063f1 | c5d7f631ae7ec8770e56170574b82ea2b7d8a4d9 | refs/heads/master | 2023-05-23T21:22:51.983756 | 2021-06-21T08:56:49 | 2021-06-21T08:56:49 | 350,466,773 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 145 | py | def toCountSystem(x, y):
ans = ''
while x:
ans += str(x % y)
x //=y
return ans[::-1]
print(toCountSystem(9**9 + 3**21 - 7, 3).count('0')) | [
"a926788@gmail.com"
] | a926788@gmail.com |
34fa9e807853b8dd7dd745076bc87877d1f492b1 | 361b4f8a112d8a79d4b8f5522402d8e30f4fc5fa | /back-end/library-env/lib/python3.8/site-packages/rest_framework_json_api/relations.py | 95df1d486afa39d4205bd665e6f2e8ab49fa1673 | [] | no_license | Rabbi50/ember-django-rest-library-app | 47a213b8bdc14ab197f930e6b6fe6ead04bdb103 | edda5c0bdf46182ec115fffe9e7fde004f325f94 | refs/heads/main | 2023-01-09T06:10:32.748961 | 2020-11-18T15:53:00 | 2020-11-18T15:53:00 | 311,460,071 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,873 | py | import json
from collections import OrderedDict
import inflection
from django.core.exceptions import ImproperlyConfigured
from django.urls import NoReverseMatch
from django.utils.translation import gettext_lazy as _
from rest_framework.fields import MISSING_ERROR_MESSAGE, Field, SkipField
from rest_framework.relations import MANY_RELATION_KWARGS
from rest_framework.relations import ManyRelatedField as DRFManyRelatedField
from rest_framework.relations import PrimaryKeyRelatedField, RelatedField
from rest_framework.reverse import reverse
from rest_framework.serializers import Serializer
from rest_framework_json_api.exceptions import Conflict
from rest_framework_json_api.utils import (
Hyperlink,
get_included_serializers,
get_resource_type_from_instance,
get_resource_type_from_queryset,
get_resource_type_from_serializer
)
LINKS_PARAMS = [
'self_link_view_name',
'related_link_view_name',
'related_link_lookup_field',
'related_link_url_kwarg'
]
class SkipDataMixin(object):
"""
This workaround skips "data" rendering for relationships
in order to save some sql queries and improve performance
"""
def __init__(self, *args, **kwargs):
super(SkipDataMixin, self).__init__(*args, **kwargs)
def get_attribute(self, instance):
raise SkipField
def to_representation(self, *args):
raise NotImplementedError
class ManyRelatedFieldWithNoData(SkipDataMixin, DRFManyRelatedField):
pass
class HyperlinkedMixin(object):
self_link_view_name = None
related_link_view_name = None
related_link_lookup_field = 'pk'
def __init__(self, self_link_view_name=None, related_link_view_name=None, **kwargs):
if self_link_view_name is not None:
self.self_link_view_name = self_link_view_name
if related_link_view_name is not None:
self.related_link_view_name = related_link_view_name
self.related_link_lookup_field = kwargs.pop(
'related_link_lookup_field', self.related_link_lookup_field
)
self.related_link_url_kwarg = kwargs.pop(
'related_link_url_kwarg', self.related_link_lookup_field
)
# We include this simply for dependency injection in tests.
# We can't add it as a class attributes or it would expect an
# implicit `self` argument to be passed.
self.reverse = reverse
super(HyperlinkedMixin, self).__init__(**kwargs)
def get_url(self, name, view_name, kwargs, request):
"""
Given a name, view name and kwargs, return the URL that hyperlinks to the object.
May raise a `NoReverseMatch` if the `view_name` and `lookup_field`
attributes are not configured to correctly match the URL conf.
"""
# Return None if the view name is not supplied
if not view_name:
return None
# Return the hyperlink, or error if incorrectly configured.
try:
url = self.reverse(view_name, kwargs=kwargs, request=request)
except NoReverseMatch:
msg = (
'Could not resolve URL for hyperlinked relationship using '
'view name "%s".'
)
raise ImproperlyConfigured(msg % view_name)
if url is None:
return None
return Hyperlink(url, name)
def get_links(self, obj=None, lookup_field='pk'):
request = self.context.get('request', None)
view = self.context.get('view', None)
return_data = OrderedDict()
kwargs = {lookup_field: getattr(obj, lookup_field) if obj else view.kwargs[lookup_field]}
self_kwargs = kwargs.copy()
self_kwargs.update({
'related_field': self.field_name if self.field_name else self.parent.field_name
})
self_link = self.get_url('self', self.self_link_view_name, self_kwargs, request)
# Assuming RelatedField will be declared in two ways:
# 1. url(r'^authors/(?P<pk>[^/.]+)/(?P<related_field>\w+)/$',
# AuthorViewSet.as_view({'get': 'retrieve_related'}))
# 2. url(r'^authors/(?P<author_pk>[^/.]+)/bio/$',
# AuthorBioViewSet.as_view({'get': 'retrieve'}))
# So, if related_link_url_kwarg == 'pk' it will add 'related_field' parameter to reverse()
if self.related_link_url_kwarg == 'pk':
related_kwargs = self_kwargs
else:
related_kwargs = {self.related_link_url_kwarg: kwargs[self.related_link_lookup_field]}
related_link = self.get_url('related', self.related_link_view_name, related_kwargs, request)
if self_link:
return_data.update({'self': self_link})
if related_link:
return_data.update({'related': related_link})
return return_data
class HyperlinkedRelatedField(HyperlinkedMixin, SkipDataMixin, RelatedField):
@classmethod
def many_init(cls, *args, **kwargs):
"""
This method handles creating a parent `ManyRelatedField` instance
when the `many=True` keyword argument is passed.
Typically you won't need to override this method.
Note that we're over-cautious in passing most arguments to both parent
and child classes in order to try to cover the general case. If you're
overriding this method you'll probably want something much simpler, eg:
.. code:: python
@classmethod
def many_init(cls, *args, **kwargs):
kwargs['child'] = cls()
return CustomManyRelatedField(*args, **kwargs)
"""
list_kwargs = {'child_relation': cls(*args, **kwargs)}
for key in kwargs:
if key in MANY_RELATION_KWARGS:
list_kwargs[key] = kwargs[key]
return ManyRelatedFieldWithNoData(**list_kwargs)
class ResourceRelatedField(HyperlinkedMixin, PrimaryKeyRelatedField):
_skip_polymorphic_optimization = True
self_link_view_name = None
related_link_view_name = None
related_link_lookup_field = 'pk'
default_error_messages = {
'required': _('This field is required.'),
'does_not_exist': _('Invalid pk "{pk_value}" - object does not exist.'),
'incorrect_type': _(
'Incorrect type. Expected resource identifier object, received {data_type}.'
),
'incorrect_relation_type': _(
'Incorrect relation type. Expected {relation_type}, received {received_type}.'
),
'missing_type': _('Invalid resource identifier object: missing \'type\' attribute'),
'missing_id': _('Invalid resource identifier object: missing \'id\' attribute'),
'no_match': _('Invalid hyperlink - No URL match.'),
}
def __init__(self, **kwargs):
# check for a model class that was passed in for the relation type
model = kwargs.pop('model', None)
if model:
self.model = model
super(ResourceRelatedField, self).__init__(**kwargs)
def use_pk_only_optimization(self):
# We need the real object to determine its type...
return self.get_resource_type_from_included_serializer() is not None
def conflict(self, key, **kwargs):
"""
A helper method that simply raises a validation error.
"""
try:
msg = self.error_messages[key]
except KeyError:
class_name = self.__class__.__name__
msg = MISSING_ERROR_MESSAGE.format(class_name=class_name, key=key)
raise AssertionError(msg)
message_string = msg.format(**kwargs)
raise Conflict(message_string)
def to_internal_value(self, data):
if isinstance(data, str):
try:
data = json.loads(data)
except ValueError:
# show a useful error if they send a `pk` instead of resource object
self.fail('incorrect_type', data_type=type(data).__name__)
if not isinstance(data, dict):
self.fail('incorrect_type', data_type=type(data).__name__)
expected_relation_type = get_resource_type_from_queryset(self.get_queryset())
serializer_resource_type = self.get_resource_type_from_included_serializer()
if serializer_resource_type is not None:
expected_relation_type = serializer_resource_type
if 'type' not in data:
self.fail('missing_type')
if 'id' not in data:
self.fail('missing_id')
if data['type'] != expected_relation_type:
self.conflict(
'incorrect_relation_type',
relation_type=expected_relation_type,
received_type=data['type']
)
return super(ResourceRelatedField, self).to_internal_value(data['id'])
def to_representation(self, value):
if getattr(self, 'pk_field', None) is not None:
pk = self.pk_field.to_representation(value.pk)
else:
pk = value.pk
resource_type = self.get_resource_type_from_included_serializer()
if resource_type is None or not self._skip_polymorphic_optimization:
resource_type = get_resource_type_from_instance(value)
return OrderedDict([('type', resource_type), ('id', str(pk))])
def get_resource_type_from_included_serializer(self):
"""
Check to see it this resource has a different resource_name when
included and return that name, or None
"""
field_name = self.field_name or self.parent.field_name
parent = self.get_parent_serializer()
if parent is not None:
# accept both singular and plural versions of field_name
field_names = [
inflection.singularize(field_name),
inflection.pluralize(field_name)
]
includes = get_included_serializers(parent)
for field in field_names:
if field in includes.keys():
return get_resource_type_from_serializer(includes[field])
return None
def get_parent_serializer(self):
if hasattr(self.parent, 'parent') and self.is_serializer(self.parent.parent):
return self.parent.parent
elif self.is_serializer(self.parent):
return self.parent
return None
def is_serializer(self, candidate):
return isinstance(candidate, Serializer)
def get_choices(self, cutoff=None):
queryset = self.get_queryset()
if queryset is None:
# Ensure that field.choices returns something sensible
# even when accessed with a read-only field.
return {}
if cutoff is not None:
queryset = queryset[:cutoff]
return OrderedDict([
(
json.dumps(self.to_representation(item)),
self.display_value(item)
)
for item in queryset
])
class PolymorphicResourceRelatedField(ResourceRelatedField):
"""
Inform DRF that the relation must be considered polymorphic.
Takes a `polymorphic_serializer` as the first positional argument to
retrieve then validate the accepted types set.
"""
_skip_polymorphic_optimization = False
default_error_messages = dict(ResourceRelatedField.default_error_messages, **{
'incorrect_relation_type': _('Incorrect relation type. Expected one of [{relation_type}], '
'received {received_type}.'),
})
def __init__(self, polymorphic_serializer, *args, **kwargs):
self.polymorphic_serializer = polymorphic_serializer
super(PolymorphicResourceRelatedField, self).__init__(*args, **kwargs)
def use_pk_only_optimization(self):
return False
def to_internal_value(self, data):
if isinstance(data, str):
try:
data = json.loads(data)
except ValueError:
# show a useful error if they send a `pk` instead of resource object
self.fail('incorrect_type', data_type=type(data).__name__)
if not isinstance(data, dict):
self.fail('incorrect_type', data_type=type(data).__name__)
if 'type' not in data:
self.fail('missing_type')
if 'id' not in data:
self.fail('missing_id')
expected_relation_types = self.polymorphic_serializer.get_polymorphic_types()
if data['type'] not in expected_relation_types:
self.conflict('incorrect_relation_type', relation_type=", ".join(
expected_relation_types), received_type=data['type'])
return super(ResourceRelatedField, self).to_internal_value(data['id'])
class SerializerMethodFieldBase(Field):
def __init__(self, method_name=None, **kwargs):
self.method_name = method_name
kwargs['source'] = '*'
kwargs['read_only'] = True
super().__init__(**kwargs)
def bind(self, field_name, parent):
default_method_name = 'get_{field_name}'.format(field_name=field_name)
if self.method_name is None:
self.method_name = default_method_name
super().bind(field_name, parent)
def get_attribute(self, instance):
serializer_method = getattr(self.parent, self.method_name)
return serializer_method(instance)
class ManySerializerMethodResourceRelatedField(SerializerMethodFieldBase, ResourceRelatedField):
def __init__(self, child_relation=None, *args, **kwargs):
assert child_relation is not None, '`child_relation` is a required argument.'
self.child_relation = child_relation
super().__init__(**kwargs)
self.child_relation.bind(field_name='', parent=self)
def to_representation(self, value):
return [self.child_relation.to_representation(item) for item in value]
class SerializerMethodResourceRelatedField(SerializerMethodFieldBase, ResourceRelatedField):
"""
Allows us to use serializer method RelatedFields
with return querysets
"""
many_kwargs = [*MANY_RELATION_KWARGS, *LINKS_PARAMS, 'method_name', 'model']
many_cls = ManySerializerMethodResourceRelatedField
@classmethod
def many_init(cls, *args, **kwargs):
list_kwargs = {'child_relation': cls(**kwargs)}
for key in kwargs:
if key in cls.many_kwargs:
list_kwargs[key] = kwargs[key]
return cls.many_cls(**list_kwargs)
class ManySerializerMethodHyperlinkedRelatedField(SkipDataMixin,
ManySerializerMethodResourceRelatedField):
pass
class SerializerMethodHyperlinkedRelatedField(SkipDataMixin,
SerializerMethodResourceRelatedField):
many_cls = ManySerializerMethodHyperlinkedRelatedField
| [
"jasrabbi50@gmail.com"
] | jasrabbi50@gmail.com |
7e08d310b39198d59087ba94ec915a777c101447 | a2fae6522c0526e81032d700e750dbc4b55e308b | /twemoir/lib/adminfields/widgets.py | 05702f365ff10dd8bbd84727dcc1eb164d28b411 | [] | no_license | fish2000/django-twemoir | e895039e4ecd0a01baa9e35002fe0e00e20f6a4f | 8caa7e5319055f54e0d89457780605994622e8d9 | refs/heads/master | 2020-06-05T13:16:47.036385 | 2014-01-21T02:42:30 | 2014-01-21T02:42:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,054 | py | import simplejson
from django.forms import Widget
from django.utils.encoding import force_unicode
from django.utils.safestring import mark_safe
from django.forms.widgets import flatatt
class JsonPairInputs(Widget):
"""
:author: Huy Nguyen
A widget that displays JSON Key Value Pairs as a list of text input box pairs
Usage (in forms.py)::
jsonfield = forms.CharField(
label="Example JSON Key Value Field",
required = False,
widget = JsonPairInputs(
val_attrs={'size':35},
key_attrs={'class':'large'}
)
)
"""
def __init__(self, key_attrs={}, val_attrs={}, *args, **kwargs):
"""
:param key_attrs: HTML attributes applied to the 1st input box
:param val_attrs: HTML attributes applied to the 2nd input box
"""
self.key_attrs = key_attrs
self.val_attrs = val_attrs
Widget.__init__(self, *args, **kwargs)
def render(self, name, value, attrs=None):
"""
Renders this widget into an HTML string
:param name: Name of the field
:type name: str
:param value: A json string of a two-tuple list automatically passed in by django
:type value: str
:param attrs: automatically passed in by django (unused in this function)
:type attrs: dict
"""
if value is None or value is '':
value = '{}'
if type(value) == type({}):
twotuple = value.items()
else:
twotuple = simplejson.loads(force_unicode(value))
ret = []
if value and len(value) > 0:
for k,v in twotuple:
ctx = {'key':k,
'value':v,
'fieldname':name,
'key_attrs': flatatt(self.key_attrs),
'val_attrs': flatatt(self.val_attrs) }
ret.append('<input type="text" name="json_key[%(fieldname)s]" value="%(key)s" %(key_attrs)s> <input type="text" name="json_value[%(fieldname)s]" value="%(value)s" %(val_attrs)s><br />' % ctx)
return mark_safe("".join(ret))
def value_from_datadict(self, data, files, name):
"""
Returns the simplejson representation of the key-value pairs
sent in the POST parameters
:param data: request.POST or request.GET parameters
:type data: dict
:param files: request.FILES
:type files: list
:param name: The name of the field associated with this widget
:type name: str
"""
if data.has_key('json_key[%s]' % name) and data.has_key('json_value[%s]' % name):
keys = data.getlist("json_key[%s]" % name)
values = data.getlist("json_value[%s]" % name)
twotuple = []
for key, value in zip(keys, values):
if len(key) > 0:
twotuple += [(key,value)]
jsontext = simplejson.dumps(twotuple)
return jsontext
| [
"fish2000@gmail.com"
] | fish2000@gmail.com |
86b9aa05550020a91166857df0221582f99bafbf | 590126fdbce9d0f92d6c49722c1a953b06e7a4d5 | /aat/exchange/exchange.py | 5d6d4313b8668c578f16f27f38b09ffa8ad0522a | [
"Apache-2.0"
] | permissive | galdamour/aat | 666fda492f0d13e5658d4f778fdbfdc4cdc321de | 458cb1ac33878a76bd9bf844e8362a5a0a9ec291 | refs/heads/master | 2023-01-11T05:32:42.086921 | 2020-11-16T16:40:54 | 2020-11-16T16:40:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,541 | py | from abc import abstractmethod
from .base.market_data import _MarketData
from .base.order_entry import _OrderEntry
_EXCHANGES = {}
class Exchange(_MarketData, _OrderEntry):
'''Generic representation of an exchange. There are two primary functionalities of an exchange.
Market Data Source:
exchanges can stream data to the engine
Order Entry Sink:
exchanges can be queried for data, or send data
'''
def __init__(self, exchange):
self._exchange = exchange
def exchange(self):
return self._exchange
@staticmethod
def registerExchange(exchange_name, clazz):
_EXCHANGES[exchange_name] = clazz
@staticmethod
def exchanges(exchange=None):
if exchange:
if exchange not in _EXCHANGES:
raise Exception(f'Unknown exchange type: {exchange}')
return _EXCHANGES[exchange]
return list(_EXCHANGES.keys())
@abstractmethod
async def connect(self):
'''connect to exchange. should be asynchronous.
For OrderEntry-only, can just return None
'''
async def lookup(self, instrument):
'''lookup an instrument on the exchange'''
return []
# ****************** #
# Inherited methods #
# From _MarketData
#
# async def tick(self):
# def instruments(self):
# def subscribe(self, instrument):
# From _OrderEntry
#
# async def newOrder(self, order: Order):
# def accounts(self) -> List:
# ************************** #
| [
"t.paine154@gmail.com"
] | t.paine154@gmail.com |
2c1ac404f62c02b5b677cc02f3d3175bbe0d6e14 | 26d6c34df00a229dc85ad7326de6cb5672be7acc | /msgraph-cli-extensions/beta/identitysignins_beta/azext_identitysignins_beta/vendored_sdks/identitysignins/aio/operations/_information_protection_operations.py | 2bcded6a5dcb4e709e30898750dda7e0b812f2bc | [
"MIT"
] | permissive | BrianTJackett/msgraph-cli | 87f92471f68f85e44872939d876b9ff5f0ae6b2c | 78a4b1c73a23b85c070fed2fbca93758733f620e | refs/heads/main | 2023-06-23T21:31:53.306655 | 2021-07-09T07:58:56 | 2021-07-09T07:58:56 | 386,993,555 | 0 | 0 | NOASSERTION | 2021-07-17T16:56:05 | 2021-07-17T16:56:05 | null | UTF-8 | Python | false | false | 68,007 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, List, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class InformationProtectionOperations:
"""InformationProtectionOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~identity_sign_ins.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_data_loss_prevention_policies(
self,
orderby: Optional[List[Union[str, "models.Enum37"]]] = None,
select: Optional[List[Union[str, "models.Enum38"]]] = None,
expand: Optional[List[str]] = None,
**kwargs
) -> AsyncIterable["models.CollectionOfDataLossPreventionPolicy"]:
"""Get dataLossPreventionPolicies from informationProtection.
Get dataLossPreventionPolicies from informationProtection.
:param orderby: Order items by property values.
:type orderby: list[str or ~identity_sign_ins.models.Enum37]
:param select: Select properties to be returned.
:type select: list[str or ~identity_sign_ins.models.Enum38]
:param expand: Expand related entities.
:type expand: list[str]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CollectionOfDataLossPreventionPolicy or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~identity_sign_ins.models.CollectionOfDataLossPreventionPolicy]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.CollectionOfDataLossPreventionPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_data_loss_prevention_policies.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if self._config.top is not None:
query_parameters['$top'] = self._serialize.query("self._config.top", self._config.top, 'int', minimum=0)
if self._config.skip is not None:
query_parameters['$skip'] = self._serialize.query("self._config.skip", self._config.skip, 'int', minimum=0)
if self._config.search is not None:
query_parameters['$search'] = self._serialize.query("self._config.search", self._config.search, 'str')
if self._config.filter is not None:
query_parameters['$filter'] = self._serialize.query("self._config.filter", self._config.filter, 'str')
if self._config.count is not None:
query_parameters['$count'] = self._serialize.query("self._config.count", self._config.count, 'bool')
if orderby is not None:
query_parameters['$orderby'] = self._serialize.query("orderby", orderby, '[str]', div=',')
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, '[str]', div=',')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('CollectionOfDataLossPreventionPolicy', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.odata_next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(models.OdataError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_data_loss_prevention_policies.metadata = {'url': '/informationProtection/dataLossPreventionPolicies'} # type: ignore
async def create_data_loss_prevention_policies(
self,
body: "models.MicrosoftGraphDataLossPreventionPolicy",
**kwargs
) -> "models.MicrosoftGraphDataLossPreventionPolicy":
"""Create new navigation property to dataLossPreventionPolicies for informationProtection.
Create new navigation property to dataLossPreventionPolicies for informationProtection.
:param body: New navigation property.
:type body: ~identity_sign_ins.models.MicrosoftGraphDataLossPreventionPolicy
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphDataLossPreventionPolicy, or the result of cls(response)
:rtype: ~identity_sign_ins.models.MicrosoftGraphDataLossPreventionPolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphDataLossPreventionPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_data_loss_prevention_policies.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'MicrosoftGraphDataLossPreventionPolicy')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphDataLossPreventionPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_data_loss_prevention_policies.metadata = {'url': '/informationProtection/dataLossPreventionPolicies'} # type: ignore
async def get_data_loss_prevention_policies(
self,
data_loss_prevention_policy_id: str,
select: Optional[List[Union[str, "models.Enum39"]]] = None,
expand: Optional[List[str]] = None,
**kwargs
) -> "models.MicrosoftGraphDataLossPreventionPolicy":
"""Get dataLossPreventionPolicies from informationProtection.
Get dataLossPreventionPolicies from informationProtection.
:param data_loss_prevention_policy_id: key: id of dataLossPreventionPolicy.
:type data_loss_prevention_policy_id: str
:param select: Select properties to be returned.
:type select: list[str or ~identity_sign_ins.models.Enum39]
:param expand: Expand related entities.
:type expand: list[str]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphDataLossPreventionPolicy, or the result of cls(response)
:rtype: ~identity_sign_ins.models.MicrosoftGraphDataLossPreventionPolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphDataLossPreventionPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.get_data_loss_prevention_policies.metadata['url'] # type: ignore
path_format_arguments = {
'dataLossPreventionPolicy-id': self._serialize.url("data_loss_prevention_policy_id", data_loss_prevention_policy_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, '[str]', div=',')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphDataLossPreventionPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_data_loss_prevention_policies.metadata = {'url': '/informationProtection/dataLossPreventionPolicies/{dataLossPreventionPolicy-id}'} # type: ignore
async def update_data_loss_prevention_policies(
self,
data_loss_prevention_policy_id: str,
body: "models.MicrosoftGraphDataLossPreventionPolicy",
**kwargs
) -> None:
"""Update the navigation property dataLossPreventionPolicies in informationProtection.
Update the navigation property dataLossPreventionPolicies in informationProtection.
:param data_loss_prevention_policy_id: key: id of dataLossPreventionPolicy.
:type data_loss_prevention_policy_id: str
:param body: New navigation property values.
:type body: ~identity_sign_ins.models.MicrosoftGraphDataLossPreventionPolicy
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_data_loss_prevention_policies.metadata['url'] # type: ignore
path_format_arguments = {
'dataLossPreventionPolicy-id': self._serialize.url("data_loss_prevention_policy_id", data_loss_prevention_policy_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'MicrosoftGraphDataLossPreventionPolicy')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
update_data_loss_prevention_policies.metadata = {'url': '/informationProtection/dataLossPreventionPolicies/{dataLossPreventionPolicy-id}'} # type: ignore
async def delete_data_loss_prevention_policies(
self,
data_loss_prevention_policy_id: str,
if_match: Optional[str] = None,
**kwargs
) -> None:
"""Delete navigation property dataLossPreventionPolicies for informationProtection.
Delete navigation property dataLossPreventionPolicies for informationProtection.
:param data_loss_prevention_policy_id: key: id of dataLossPreventionPolicy.
:type data_loss_prevention_policy_id: str
:param if_match: ETag.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.delete_data_loss_prevention_policies.metadata['url'] # type: ignore
path_format_arguments = {
'dataLossPreventionPolicy-id': self._serialize.url("data_loss_prevention_policy_id", data_loss_prevention_policy_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete_data_loss_prevention_policies.metadata = {'url': '/informationProtection/dataLossPreventionPolicies/{dataLossPreventionPolicy-id}'} # type: ignore
async def evaluate_labels_and_policies(
self,
body: "models.PathsU4Eih0InformationprotectionMicrosoftGraphEvaluatelabelsandpoliciesPostRequestbodyContentApplicationJsonSchema",
**kwargs
) -> "models.MicrosoftGraphEvaluateLabelsAndPoliciesJobResponse":
"""Invoke action evaluateLabelsAndPolicies.
Invoke action evaluateLabelsAndPolicies.
:param body: Action parameters.
:type body: ~identity_sign_ins.models.PathsU4Eih0InformationprotectionMicrosoftGraphEvaluatelabelsandpoliciesPostRequestbodyContentApplicationJsonSchema
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphEvaluateLabelsAndPoliciesJobResponse, or the result of cls(response)
:rtype: ~identity_sign_ins.models.MicrosoftGraphEvaluateLabelsAndPoliciesJobResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphEvaluateLabelsAndPoliciesJobResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.evaluate_labels_and_policies.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'PathsU4Eih0InformationprotectionMicrosoftGraphEvaluatelabelsandpoliciesPostRequestbodyContentApplicationJsonSchema')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphEvaluateLabelsAndPoliciesJobResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
evaluate_labels_and_policies.metadata = {'url': '/informationProtection/microsoft.graph.evaluateLabelsAndPolicies'} # type: ignore
async def get_policy(
self,
select: Optional[List[Union[str, "models.Enum47"]]] = None,
expand: Optional[List[Union[str, "models.Enum48"]]] = None,
**kwargs
) -> "models.MicrosoftGraphInformationProtectionPolicy":
"""Get policy from informationProtection.
Get policy from informationProtection.
:param select: Select properties to be returned.
:type select: list[str or ~identity_sign_ins.models.Enum47]
:param expand: Expand related entities.
:type expand: list[str or ~identity_sign_ins.models.Enum48]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphInformationProtectionPolicy, or the result of cls(response)
:rtype: ~identity_sign_ins.models.MicrosoftGraphInformationProtectionPolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphInformationProtectionPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.get_policy.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, '[str]', div=',')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphInformationProtectionPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_policy.metadata = {'url': '/informationProtection/policy'} # type: ignore
async def update_policy(
self,
body: "models.MicrosoftGraphInformationProtectionPolicy",
**kwargs
) -> None:
"""Update the navigation property policy in informationProtection.
Update the navigation property policy in informationProtection.
:param body: New navigation property values.
:type body: ~identity_sign_ins.models.MicrosoftGraphInformationProtectionPolicy
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_policy.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'MicrosoftGraphInformationProtectionPolicy')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
update_policy.metadata = {'url': '/informationProtection/policy'} # type: ignore
async def delete_policy(
self,
if_match: Optional[str] = None,
**kwargs
) -> None:
"""Delete navigation property policy for informationProtection.
Delete navigation property policy for informationProtection.
:param if_match: ETag.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.delete_policy.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete_policy.metadata = {'url': '/informationProtection/policy'} # type: ignore
def list_sensitivity_labels(
self,
orderby: Optional[List[Union[str, "models.Enum55"]]] = None,
select: Optional[List[Union[str, "models.Enum56"]]] = None,
expand: Optional[List[Union[str, "models.Enum57"]]] = None,
**kwargs
) -> AsyncIterable["models.CollectionOfSensitivityLabel"]:
"""Get sensitivityLabels from informationProtection.
Get sensitivityLabels from informationProtection.
:param orderby: Order items by property values.
:type orderby: list[str or ~identity_sign_ins.models.Enum55]
:param select: Select properties to be returned.
:type select: list[str or ~identity_sign_ins.models.Enum56]
:param expand: Expand related entities.
:type expand: list[str or ~identity_sign_ins.models.Enum57]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CollectionOfSensitivityLabel or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~identity_sign_ins.models.CollectionOfSensitivityLabel]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.CollectionOfSensitivityLabel"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_sensitivity_labels.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if self._config.top is not None:
query_parameters['$top'] = self._serialize.query("self._config.top", self._config.top, 'int', minimum=0)
if self._config.skip is not None:
query_parameters['$skip'] = self._serialize.query("self._config.skip", self._config.skip, 'int', minimum=0)
if self._config.search is not None:
query_parameters['$search'] = self._serialize.query("self._config.search", self._config.search, 'str')
if self._config.filter is not None:
query_parameters['$filter'] = self._serialize.query("self._config.filter", self._config.filter, 'str')
if self._config.count is not None:
query_parameters['$count'] = self._serialize.query("self._config.count", self._config.count, 'bool')
if orderby is not None:
query_parameters['$orderby'] = self._serialize.query("orderby", orderby, '[str]', div=',')
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, '[str]', div=',')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('CollectionOfSensitivityLabel', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.odata_next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(models.OdataError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_sensitivity_labels.metadata = {'url': '/informationProtection/sensitivityLabels'} # type: ignore
async def create_sensitivity_labels(
self,
body: "models.MicrosoftGraphSensitivityLabel",
**kwargs
) -> "models.MicrosoftGraphSensitivityLabel":
"""Create new navigation property to sensitivityLabels for informationProtection.
Create new navigation property to sensitivityLabels for informationProtection.
:param body: New navigation property.
:type body: ~identity_sign_ins.models.MicrosoftGraphSensitivityLabel
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphSensitivityLabel, or the result of cls(response)
:rtype: ~identity_sign_ins.models.MicrosoftGraphSensitivityLabel
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphSensitivityLabel"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_sensitivity_labels.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'MicrosoftGraphSensitivityLabel')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphSensitivityLabel', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_sensitivity_labels.metadata = {'url': '/informationProtection/sensitivityLabels'} # type: ignore
async def get_sensitivity_labels(
self,
sensitivity_label_id: str,
select: Optional[List[Union[str, "models.Enum58"]]] = None,
expand: Optional[List[Union[str, "models.Enum59"]]] = None,
**kwargs
) -> "models.MicrosoftGraphSensitivityLabel":
"""Get sensitivityLabels from informationProtection.
Get sensitivityLabels from informationProtection.
:param sensitivity_label_id: key: id of sensitivityLabel.
:type sensitivity_label_id: str
:param select: Select properties to be returned.
:type select: list[str or ~identity_sign_ins.models.Enum58]
:param expand: Expand related entities.
:type expand: list[str or ~identity_sign_ins.models.Enum59]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphSensitivityLabel, or the result of cls(response)
:rtype: ~identity_sign_ins.models.MicrosoftGraphSensitivityLabel
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphSensitivityLabel"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.get_sensitivity_labels.metadata['url'] # type: ignore
path_format_arguments = {
'sensitivityLabel-id': self._serialize.url("sensitivity_label_id", sensitivity_label_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, '[str]', div=',')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphSensitivityLabel', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_sensitivity_labels.metadata = {'url': '/informationProtection/sensitivityLabels/{sensitivityLabel-id}'} # type: ignore
async def update_sensitivity_labels(
self,
sensitivity_label_id: str,
body: "models.MicrosoftGraphSensitivityLabel",
**kwargs
) -> None:
"""Update the navigation property sensitivityLabels in informationProtection.
Update the navigation property sensitivityLabels in informationProtection.
:param sensitivity_label_id: key: id of sensitivityLabel.
:type sensitivity_label_id: str
:param body: New navigation property values.
:type body: ~identity_sign_ins.models.MicrosoftGraphSensitivityLabel
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_sensitivity_labels.metadata['url'] # type: ignore
path_format_arguments = {
'sensitivityLabel-id': self._serialize.url("sensitivity_label_id", sensitivity_label_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'MicrosoftGraphSensitivityLabel')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
update_sensitivity_labels.metadata = {'url': '/informationProtection/sensitivityLabels/{sensitivityLabel-id}'} # type: ignore
async def delete_sensitivity_labels(
self,
sensitivity_label_id: str,
if_match: Optional[str] = None,
**kwargs
) -> None:
"""Delete navigation property sensitivityLabels for informationProtection.
Delete navigation property sensitivityLabels for informationProtection.
:param sensitivity_label_id: key: id of sensitivityLabel.
:type sensitivity_label_id: str
:param if_match: ETag.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.delete_sensitivity_labels.metadata['url'] # type: ignore
path_format_arguments = {
'sensitivityLabel-id': self._serialize.url("sensitivity_label_id", sensitivity_label_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete_sensitivity_labels.metadata = {'url': '/informationProtection/sensitivityLabels/{sensitivityLabel-id}'} # type: ignore
async def get_sensitivity_policy_settings(
self,
select: Optional[List[Union[str, "models.Enum65"]]] = None,
expand: Optional[List[str]] = None,
**kwargs
) -> "models.MicrosoftGraphSensitivityPolicySettings":
"""Get sensitivityPolicySettings from informationProtection.
Get sensitivityPolicySettings from informationProtection.
:param select: Select properties to be returned.
:type select: list[str or ~identity_sign_ins.models.Enum65]
:param expand: Expand related entities.
:type expand: list[str]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphSensitivityPolicySettings, or the result of cls(response)
:rtype: ~identity_sign_ins.models.MicrosoftGraphSensitivityPolicySettings
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphSensitivityPolicySettings"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.get_sensitivity_policy_settings.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, '[str]', div=',')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphSensitivityPolicySettings', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_sensitivity_policy_settings.metadata = {'url': '/informationProtection/sensitivityPolicySettings'} # type: ignore
async def update_sensitivity_policy_settings(
self,
body: "models.MicrosoftGraphSensitivityPolicySettings",
**kwargs
) -> None:
"""Update the navigation property sensitivityPolicySettings in informationProtection.
Update the navigation property sensitivityPolicySettings in informationProtection.
:param body: New navigation property values.
:type body: ~identity_sign_ins.models.MicrosoftGraphSensitivityPolicySettings
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_sensitivity_policy_settings.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'MicrosoftGraphSensitivityPolicySettings')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
update_sensitivity_policy_settings.metadata = {'url': '/informationProtection/sensitivityPolicySettings'} # type: ignore
async def delete_sensitivity_policy_settings(
self,
if_match: Optional[str] = None,
**kwargs
) -> None:
"""Delete navigation property sensitivityPolicySettings for informationProtection.
Delete navigation property sensitivityPolicySettings for informationProtection.
:param if_match: ETag.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.delete_sensitivity_policy_settings.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete_sensitivity_policy_settings.metadata = {'url': '/informationProtection/sensitivityPolicySettings'} # type: ignore
def list_threat_assessment_requests(
self,
orderby: Optional[List[Union[str, "models.Enum66"]]] = None,
select: Optional[List[Union[str, "models.Enum67"]]] = None,
expand: Optional[List[Union[str, "models.Enum68"]]] = None,
**kwargs
) -> AsyncIterable["models.CollectionOfThreatAssessmentRequest"]:
"""Get threatAssessmentRequests from informationProtection.
Get threatAssessmentRequests from informationProtection.
:param orderby: Order items by property values.
:type orderby: list[str or ~identity_sign_ins.models.Enum66]
:param select: Select properties to be returned.
:type select: list[str or ~identity_sign_ins.models.Enum67]
:param expand: Expand related entities.
:type expand: list[str or ~identity_sign_ins.models.Enum68]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CollectionOfThreatAssessmentRequest or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~identity_sign_ins.models.CollectionOfThreatAssessmentRequest]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.CollectionOfThreatAssessmentRequest"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_threat_assessment_requests.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if self._config.top is not None:
query_parameters['$top'] = self._serialize.query("self._config.top", self._config.top, 'int', minimum=0)
if self._config.skip is not None:
query_parameters['$skip'] = self._serialize.query("self._config.skip", self._config.skip, 'int', minimum=0)
if self._config.search is not None:
query_parameters['$search'] = self._serialize.query("self._config.search", self._config.search, 'str')
if self._config.filter is not None:
query_parameters['$filter'] = self._serialize.query("self._config.filter", self._config.filter, 'str')
if self._config.count is not None:
query_parameters['$count'] = self._serialize.query("self._config.count", self._config.count, 'bool')
if orderby is not None:
query_parameters['$orderby'] = self._serialize.query("orderby", orderby, '[str]', div=',')
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, '[str]', div=',')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('CollectionOfThreatAssessmentRequest', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.odata_next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(models.OdataError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_threat_assessment_requests.metadata = {'url': '/informationProtection/threatAssessmentRequests'} # type: ignore
async def create_threat_assessment_requests(
self,
body: "models.MicrosoftGraphThreatAssessmentRequest",
**kwargs
) -> "models.MicrosoftGraphThreatAssessmentRequest":
"""Create new navigation property to threatAssessmentRequests for informationProtection.
Create new navigation property to threatAssessmentRequests for informationProtection.
:param body: New navigation property.
:type body: ~identity_sign_ins.models.MicrosoftGraphThreatAssessmentRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphThreatAssessmentRequest, or the result of cls(response)
:rtype: ~identity_sign_ins.models.MicrosoftGraphThreatAssessmentRequest
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphThreatAssessmentRequest"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_threat_assessment_requests.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'MicrosoftGraphThreatAssessmentRequest')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphThreatAssessmentRequest', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_threat_assessment_requests.metadata = {'url': '/informationProtection/threatAssessmentRequests'} # type: ignore
async def get_threat_assessment_requests(
self,
threat_assessment_request_id: str,
select: Optional[List[Union[str, "models.Enum69"]]] = None,
expand: Optional[List[Union[str, "models.Enum70"]]] = None,
**kwargs
) -> "models.MicrosoftGraphThreatAssessmentRequest":
"""Get threatAssessmentRequests from informationProtection.
Get threatAssessmentRequests from informationProtection.
:param threat_assessment_request_id: key: id of threatAssessmentRequest.
:type threat_assessment_request_id: str
:param select: Select properties to be returned.
:type select: list[str or ~identity_sign_ins.models.Enum69]
:param expand: Expand related entities.
:type expand: list[str or ~identity_sign_ins.models.Enum70]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphThreatAssessmentRequest, or the result of cls(response)
:rtype: ~identity_sign_ins.models.MicrosoftGraphThreatAssessmentRequest
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphThreatAssessmentRequest"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.get_threat_assessment_requests.metadata['url'] # type: ignore
path_format_arguments = {
'threatAssessmentRequest-id': self._serialize.url("threat_assessment_request_id", threat_assessment_request_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, '[str]', div=',')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphThreatAssessmentRequest', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_threat_assessment_requests.metadata = {'url': '/informationProtection/threatAssessmentRequests/{threatAssessmentRequest-id}'} # type: ignore
async def update_threat_assessment_requests(
self,
threat_assessment_request_id: str,
body: "models.MicrosoftGraphThreatAssessmentRequest",
**kwargs
) -> None:
"""Update the navigation property threatAssessmentRequests in informationProtection.
Update the navigation property threatAssessmentRequests in informationProtection.
:param threat_assessment_request_id: key: id of threatAssessmentRequest.
:type threat_assessment_request_id: str
:param body: New navigation property values.
:type body: ~identity_sign_ins.models.MicrosoftGraphThreatAssessmentRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_threat_assessment_requests.metadata['url'] # type: ignore
path_format_arguments = {
'threatAssessmentRequest-id': self._serialize.url("threat_assessment_request_id", threat_assessment_request_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'MicrosoftGraphThreatAssessmentRequest')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
update_threat_assessment_requests.metadata = {'url': '/informationProtection/threatAssessmentRequests/{threatAssessmentRequest-id}'} # type: ignore
async def delete_threat_assessment_requests(
self,
threat_assessment_request_id: str,
if_match: Optional[str] = None,
**kwargs
) -> None:
"""Delete navigation property threatAssessmentRequests for informationProtection.
Delete navigation property threatAssessmentRequests for informationProtection.
:param threat_assessment_request_id: key: id of threatAssessmentRequest.
:type threat_assessment_request_id: str
:param if_match: ETag.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.delete_threat_assessment_requests.metadata['url'] # type: ignore
path_format_arguments = {
'threatAssessmentRequest-id': self._serialize.url("threat_assessment_request_id", threat_assessment_request_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete_threat_assessment_requests.metadata = {'url': '/informationProtection/threatAssessmentRequests/{threatAssessmentRequest-id}'} # type: ignore
| [
"japhethobalak@gmail.com"
] | japhethobalak@gmail.com |
d91e40c666df51dcae2bd1e9a2b0043769337f04 | 0d24036dcf8736c0392a1ee1c2f3b45633221d8a | /etc/src/genpy-mpls-ldp-oper/cisco_ios_xr_mpls_ldp_oper/mpls_ldp/global/standby/vrfs/vrf/issu/ha_statistics/ha_neighbors/ha_neighbor/ldp_nsr_stats_nbr_info_pb2.py | 9ac9f5f4af6f02f633828e9e8941727855fc23db | [] | no_license | mspiez/telemetry_collector | c4b97c6686748fc20748898a25e9fc756d2d0b63 | 52ed12c06debfe04181f0bfea9854a66ed8bb3df | refs/heads/master | 2020-12-19T23:28:08.358956 | 2020-05-02T19:54:38 | 2020-05-02T19:54:38 | 235,883,080 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | true | 18,330 | py | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: cisco_ios_xr_mpls_ldp_oper/mpls_ldp/global/standby/vrfs/vrf/issu/ha_statistics/ha_neighbors/ha_neighbor/ldp_nsr_stats_nbr_info.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='cisco_ios_xr_mpls_ldp_oper/mpls_ldp/global/standby/vrfs/vrf/issu/ha_statistics/ha_neighbors/ha_neighbor/ldp_nsr_stats_nbr_info.proto',
package='cisco_ios_xr_mpls_ldp_oper.mpls_ldp.global.standby.vrfs.vrf.issu.ha_statistics.ha_neighbors.ha_neighbor',
syntax='proto3',
serialized_pb=_b('\n\x84\x01\x63isco_ios_xr_mpls_ldp_oper/mpls_ldp/global/standby/vrfs/vrf/issu/ha_statistics/ha_neighbors/ha_neighbor/ldp_nsr_stats_nbr_info.proto\x12gcisco_ios_xr_mpls_ldp_oper.mpls_ldp.global.standby.vrfs.vrf.issu.ha_statistics.ha_neighbors.ha_neighbor\"W\n\x1bldp_nsr_stats_nbr_info_KEYS\x12\x10\n\x08vrf_name\x18\x01 \x01(\t\x12\x0e\n\x06lsr_id\x18\x02 \x01(\t\x12\x16\n\x0elabel_space_id\x18\x03 \x01(\r\"\xa1\x03\n\x16ldp_nsr_stats_nbr_info\x12\x0e\n\x06lsr_id\x18\x32 \x01(\r\x12\x12\n\nlbl_spc_id\x18\x33 \x01(\r\x12\x16\n\x0ensr_sync_state\x18\x34 \x01(\x11\x12\x0f\n\x07num_msg\x18\x35 \x01(\r\x12\x97\x01\n\x0einit_sync_info\x18\x36 \x01(\x0b\x32\x7f.cisco_ios_xr_mpls_ldp_oper.mpls_ldp.global.standby.vrfs.vrf.issu.ha_statistics.ha_neighbors.ha_neighbor.ldp_nsr_nbr_synci_info\x12\x9f\x01\n\x16steady_state_sync_info\x18\x37 \x01(\x0b\x32\x7f.cisco_ios_xr_mpls_ldp_oper.mpls_ldp.global.standby.vrfs.vrf.issu.ha_statistics.ha_neighbors.ha_neighbor.ldp_nsr_nbr_syncs_info\"\xe0\x01\n\x16ldp_nsr_nbr_synci_info\x12\x17\n\x0finit_sync_start\x18\x01 \x01(\r\x12\x15\n\rinit_sync_end\x18\x02 \x01(\r\x12\x10\n\x08num_addr\x18\x03 \x01(\r\x12\x1a\n\x12num_duplicate_addr\x18\x04 \x01(\r\x12\x14\n\x0cnum_rx_bytes\x18\x05 \x01(\r\x12\x14\n\x0cnum_cap_sent\x18\x06 \x01(\r\x12\x14\n\x0cnum_cap_rcvd\x18\x07 \x01(\r\x12\x0f\n\x07num_lbl\x18\x08 \x01(\r\x12\x15\n\rnum_app_bytes\x18\t \x01(\r\"\xa5\x01\n\x16ldp_nsr_nbr_syncs_info\x12\x14\n\x0cnum_cap_sent\x18\x01 \x01(\r\x12\x14\n\x0cnum_cap_rcvd\x18\x02 \x01(\r\x12\x12\n\nrem_lbl_wd\x18\x03 \x01(\r\x12\x12\n\nrem_lbl_rq\x18\x04 \x01(\r\x12\x1a\n\x12num_stdby_adj_join\x18\x05 \x01(\r\x12\x1b\n\x13num_stdby_adj_leave\x18\x06 \x01(\rb\x06proto3')
)
_LDP_NSR_STATS_NBR_INFO_KEYS = _descriptor.Descriptor(
name='ldp_nsr_stats_nbr_info_KEYS',
full_name='cisco_ios_xr_mpls_ldp_oper.mpls_ldp.global.standby.vrfs.vrf.issu.ha_statistics.ha_neighbors.ha_neighbor.ldp_nsr_stats_nbr_info_KEYS',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='vrf_name', full_name='cisco_ios_xr_mpls_ldp_oper.mpls_ldp.global.standby.vrfs.vrf.issu.ha_statistics.ha_neighbors.ha_neighbor.ldp_nsr_stats_nbr_info_KEYS.vrf_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='lsr_id', full_name='cisco_ios_xr_mpls_ldp_oper.mpls_ldp.global.standby.vrfs.vrf.issu.ha_statistics.ha_neighbors.ha_neighbor.ldp_nsr_stats_nbr_info_KEYS.lsr_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='label_space_id', full_name='cisco_ios_xr_mpls_ldp_oper.mpls_ldp.global.standby.vrfs.vrf.issu.ha_statistics.ha_neighbors.ha_neighbor.ldp_nsr_stats_nbr_info_KEYS.label_space_id', index=2,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=242,
serialized_end=329,
)
_LDP_NSR_STATS_NBR_INFO = _descriptor.Descriptor(
name='ldp_nsr_stats_nbr_info',
full_name='cisco_ios_xr_mpls_ldp_oper.mpls_ldp.global.standby.vrfs.vrf.issu.ha_statistics.ha_neighbors.ha_neighbor.ldp_nsr_stats_nbr_info',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='lsr_id', full_name='cisco_ios_xr_mpls_ldp_oper.mpls_ldp.global.standby.vrfs.vrf.issu.ha_statistics.ha_neighbors.ha_neighbor.ldp_nsr_stats_nbr_info.lsr_id', index=0,
number=50, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='lbl_spc_id', full_name='cisco_ios_xr_mpls_ldp_oper.mpls_ldp.global.standby.vrfs.vrf.issu.ha_statistics.ha_neighbors.ha_neighbor.ldp_nsr_stats_nbr_info.lbl_spc_id', index=1,
number=51, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='nsr_sync_state', full_name='cisco_ios_xr_mpls_ldp_oper.mpls_ldp.global.standby.vrfs.vrf.issu.ha_statistics.ha_neighbors.ha_neighbor.ldp_nsr_stats_nbr_info.nsr_sync_state', index=2,
number=52, type=17, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='num_msg', full_name='cisco_ios_xr_mpls_ldp_oper.mpls_ldp.global.standby.vrfs.vrf.issu.ha_statistics.ha_neighbors.ha_neighbor.ldp_nsr_stats_nbr_info.num_msg', index=3,
number=53, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='init_sync_info', full_name='cisco_ios_xr_mpls_ldp_oper.mpls_ldp.global.standby.vrfs.vrf.issu.ha_statistics.ha_neighbors.ha_neighbor.ldp_nsr_stats_nbr_info.init_sync_info', index=4,
number=54, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='steady_state_sync_info', full_name='cisco_ios_xr_mpls_ldp_oper.mpls_ldp.global.standby.vrfs.vrf.issu.ha_statistics.ha_neighbors.ha_neighbor.ldp_nsr_stats_nbr_info.steady_state_sync_info', index=5,
number=55, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=332,
serialized_end=749,
)
_LDP_NSR_NBR_SYNCI_INFO = _descriptor.Descriptor(
name='ldp_nsr_nbr_synci_info',
full_name='cisco_ios_xr_mpls_ldp_oper.mpls_ldp.global.standby.vrfs.vrf.issu.ha_statistics.ha_neighbors.ha_neighbor.ldp_nsr_nbr_synci_info',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='init_sync_start', full_name='cisco_ios_xr_mpls_ldp_oper.mpls_ldp.global.standby.vrfs.vrf.issu.ha_statistics.ha_neighbors.ha_neighbor.ldp_nsr_nbr_synci_info.init_sync_start', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='init_sync_end', full_name='cisco_ios_xr_mpls_ldp_oper.mpls_ldp.global.standby.vrfs.vrf.issu.ha_statistics.ha_neighbors.ha_neighbor.ldp_nsr_nbr_synci_info.init_sync_end', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='num_addr', full_name='cisco_ios_xr_mpls_ldp_oper.mpls_ldp.global.standby.vrfs.vrf.issu.ha_statistics.ha_neighbors.ha_neighbor.ldp_nsr_nbr_synci_info.num_addr', index=2,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='num_duplicate_addr', full_name='cisco_ios_xr_mpls_ldp_oper.mpls_ldp.global.standby.vrfs.vrf.issu.ha_statistics.ha_neighbors.ha_neighbor.ldp_nsr_nbr_synci_info.num_duplicate_addr', index=3,
number=4, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='num_rx_bytes', full_name='cisco_ios_xr_mpls_ldp_oper.mpls_ldp.global.standby.vrfs.vrf.issu.ha_statistics.ha_neighbors.ha_neighbor.ldp_nsr_nbr_synci_info.num_rx_bytes', index=4,
number=5, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='num_cap_sent', full_name='cisco_ios_xr_mpls_ldp_oper.mpls_ldp.global.standby.vrfs.vrf.issu.ha_statistics.ha_neighbors.ha_neighbor.ldp_nsr_nbr_synci_info.num_cap_sent', index=5,
number=6, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='num_cap_rcvd', full_name='cisco_ios_xr_mpls_ldp_oper.mpls_ldp.global.standby.vrfs.vrf.issu.ha_statistics.ha_neighbors.ha_neighbor.ldp_nsr_nbr_synci_info.num_cap_rcvd', index=6,
number=7, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='num_lbl', full_name='cisco_ios_xr_mpls_ldp_oper.mpls_ldp.global.standby.vrfs.vrf.issu.ha_statistics.ha_neighbors.ha_neighbor.ldp_nsr_nbr_synci_info.num_lbl', index=7,
number=8, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='num_app_bytes', full_name='cisco_ios_xr_mpls_ldp_oper.mpls_ldp.global.standby.vrfs.vrf.issu.ha_statistics.ha_neighbors.ha_neighbor.ldp_nsr_nbr_synci_info.num_app_bytes', index=8,
number=9, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=752,
serialized_end=976,
)
_LDP_NSR_NBR_SYNCS_INFO = _descriptor.Descriptor(
name='ldp_nsr_nbr_syncs_info',
full_name='cisco_ios_xr_mpls_ldp_oper.mpls_ldp.global.standby.vrfs.vrf.issu.ha_statistics.ha_neighbors.ha_neighbor.ldp_nsr_nbr_syncs_info',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='num_cap_sent', full_name='cisco_ios_xr_mpls_ldp_oper.mpls_ldp.global.standby.vrfs.vrf.issu.ha_statistics.ha_neighbors.ha_neighbor.ldp_nsr_nbr_syncs_info.num_cap_sent', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='num_cap_rcvd', full_name='cisco_ios_xr_mpls_ldp_oper.mpls_ldp.global.standby.vrfs.vrf.issu.ha_statistics.ha_neighbors.ha_neighbor.ldp_nsr_nbr_syncs_info.num_cap_rcvd', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='rem_lbl_wd', full_name='cisco_ios_xr_mpls_ldp_oper.mpls_ldp.global.standby.vrfs.vrf.issu.ha_statistics.ha_neighbors.ha_neighbor.ldp_nsr_nbr_syncs_info.rem_lbl_wd', index=2,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='rem_lbl_rq', full_name='cisco_ios_xr_mpls_ldp_oper.mpls_ldp.global.standby.vrfs.vrf.issu.ha_statistics.ha_neighbors.ha_neighbor.ldp_nsr_nbr_syncs_info.rem_lbl_rq', index=3,
number=4, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='num_stdby_adj_join', full_name='cisco_ios_xr_mpls_ldp_oper.mpls_ldp.global.standby.vrfs.vrf.issu.ha_statistics.ha_neighbors.ha_neighbor.ldp_nsr_nbr_syncs_info.num_stdby_adj_join', index=4,
number=5, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='num_stdby_adj_leave', full_name='cisco_ios_xr_mpls_ldp_oper.mpls_ldp.global.standby.vrfs.vrf.issu.ha_statistics.ha_neighbors.ha_neighbor.ldp_nsr_nbr_syncs_info.num_stdby_adj_leave', index=5,
number=6, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=979,
serialized_end=1144,
)
_LDP_NSR_STATS_NBR_INFO.fields_by_name['init_sync_info'].message_type = _LDP_NSR_NBR_SYNCI_INFO
_LDP_NSR_STATS_NBR_INFO.fields_by_name['steady_state_sync_info'].message_type = _LDP_NSR_NBR_SYNCS_INFO
DESCRIPTOR.message_types_by_name['ldp_nsr_stats_nbr_info_KEYS'] = _LDP_NSR_STATS_NBR_INFO_KEYS
DESCRIPTOR.message_types_by_name['ldp_nsr_stats_nbr_info'] = _LDP_NSR_STATS_NBR_INFO
DESCRIPTOR.message_types_by_name['ldp_nsr_nbr_synci_info'] = _LDP_NSR_NBR_SYNCI_INFO
DESCRIPTOR.message_types_by_name['ldp_nsr_nbr_syncs_info'] = _LDP_NSR_NBR_SYNCS_INFO
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ldp_nsr_stats_nbr_info_KEYS = _reflection.GeneratedProtocolMessageType('ldp_nsr_stats_nbr_info_KEYS', (_message.Message,), dict(
DESCRIPTOR = _LDP_NSR_STATS_NBR_INFO_KEYS,
__module__ = 'cisco_ios_xr_mpls_ldp_oper.mpls_ldp.global.standby.vrfs.vrf.issu.ha_statistics.ha_neighbors.ha_neighbor.ldp_nsr_stats_nbr_info_pb2'
# @@protoc_insertion_point(class_scope:cisco_ios_xr_mpls_ldp_oper.mpls_ldp.global.standby.vrfs.vrf.issu.ha_statistics.ha_neighbors.ha_neighbor.ldp_nsr_stats_nbr_info_KEYS)
))
_sym_db.RegisterMessage(ldp_nsr_stats_nbr_info_KEYS)
ldp_nsr_stats_nbr_info = _reflection.GeneratedProtocolMessageType('ldp_nsr_stats_nbr_info', (_message.Message,), dict(
DESCRIPTOR = _LDP_NSR_STATS_NBR_INFO,
__module__ = 'cisco_ios_xr_mpls_ldp_oper.mpls_ldp.global.standby.vrfs.vrf.issu.ha_statistics.ha_neighbors.ha_neighbor.ldp_nsr_stats_nbr_info_pb2'
# @@protoc_insertion_point(class_scope:cisco_ios_xr_mpls_ldp_oper.mpls_ldp.global.standby.vrfs.vrf.issu.ha_statistics.ha_neighbors.ha_neighbor.ldp_nsr_stats_nbr_info)
))
_sym_db.RegisterMessage(ldp_nsr_stats_nbr_info)
ldp_nsr_nbr_synci_info = _reflection.GeneratedProtocolMessageType('ldp_nsr_nbr_synci_info', (_message.Message,), dict(
DESCRIPTOR = _LDP_NSR_NBR_SYNCI_INFO,
__module__ = 'cisco_ios_xr_mpls_ldp_oper.mpls_ldp.global.standby.vrfs.vrf.issu.ha_statistics.ha_neighbors.ha_neighbor.ldp_nsr_stats_nbr_info_pb2'
# @@protoc_insertion_point(class_scope:cisco_ios_xr_mpls_ldp_oper.mpls_ldp.global.standby.vrfs.vrf.issu.ha_statistics.ha_neighbors.ha_neighbor.ldp_nsr_nbr_synci_info)
))
_sym_db.RegisterMessage(ldp_nsr_nbr_synci_info)
ldp_nsr_nbr_syncs_info = _reflection.GeneratedProtocolMessageType('ldp_nsr_nbr_syncs_info', (_message.Message,), dict(
DESCRIPTOR = _LDP_NSR_NBR_SYNCS_INFO,
__module__ = 'cisco_ios_xr_mpls_ldp_oper.mpls_ldp.global.standby.vrfs.vrf.issu.ha_statistics.ha_neighbors.ha_neighbor.ldp_nsr_stats_nbr_info_pb2'
# @@protoc_insertion_point(class_scope:cisco_ios_xr_mpls_ldp_oper.mpls_ldp.global.standby.vrfs.vrf.issu.ha_statistics.ha_neighbors.ha_neighbor.ldp_nsr_nbr_syncs_info)
))
_sym_db.RegisterMessage(ldp_nsr_nbr_syncs_info)
# @@protoc_insertion_point(module_scope)
| [
"mspiez@gmail.com"
] | mspiez@gmail.com |
5691bf63eeb65a0f6313e9b740c2eb8a3b14b60c | 401c6b56c22c762a88a46ce70a35a8d19f0fb863 | /Libs/Oryx.Web.Core.WebInstance/OryxWeb/wwwroot/Chat/chat-master/tests/test_semantic.py | ab8a21d8e90522154aa7516e53f8881e038a7dcf | [
"MIT"
] | permissive | OryxLib/Oryx.FastAdmin | 5f42993e3d7a1a61439a9efd2ee5889bbba6875c | b798d534baf3a07c8bff72e1a80faba119296cb6 | refs/heads/master | 2022-12-10T19:58:33.381271 | 2020-04-15T15:46:46 | 2020-04-15T15:46:46 | 255,535,071 | 3 | 2 | null | 2022-12-08T10:09:40 | 2020-04-14T07:03:15 | C# | UTF-8 | Python | false | false | 2,513 | py | # -*- coding: utf-8 -*-
import sys
sys.path.append("../")
from unittest import TestCase, main
from chat.semantic import synonym_cut, similarity, similarity2, build_semantic_matrix
from chat.mytools import time_me
class TestMe(TestCase):
def setUp(self):
pass
@time_me()
def test_similarity(self):
data = [
("黄克功", "王怀安"),
("黄克功", "黄克功"),
("宋朝的历史", "明朝的历史"),
("电脑", "打印机"),
("怎么了?,。。。。", "怎么了?..,#$"),
("我喜欢你", "你喜欢我"),
("我要取票", "我要取票"),
("存钱", "取钱"),
("镇店之宝", "有什么镇店之宝"),
("中国", "中华人民共和国"),
("喧闹的大街上人山人海", "热闹的街道上人来人往"),
("专心致志", "全神贯注"),
("爷爷爱吃土豆", "祖父喜欢吃马铃薯"),
("联想电脑多少钱", "联想笔记本价格"),
("今天天气怎么样", "我想去上海"),
("今天天气怎么样", "今天开心吗"),
("怎么花呗不能支付", "花呗付款不了怎么回事"),
("蚂蚁借呗的额度为什么会下降", "为什么借呗额度被降低了,没有不良记录"),
("蚂蚁借呗的额度为什么会下降", "为什么借呗额度被降低了"),
("花呗自动还款需要手续费ma", "花呗自动还款还要收手续费吗"),
("花呗怎么付款不鸟了", "帮忙看一下我花呗怎么用不了"),
("花呗被冻结怎么恢复", "花呗被封了怎么解除"),
("我借呗能不能开通", "如何开启借呗"),
("使用花呗已付款,订单显示没有付款", "花呗扣款了美团订单显示未付款")
]
for s1, s2 in data:
sv1 = synonym_cut(s1, 'wf')
sv2 = synonym_cut(s2, 'wf')
print(s1, 'VS', s2)
print(sv1, 'VS', sv2)
print("similarity1: ", similarity(sv1, sv2))
print('similarity2: ', similarity2(s1, s2), '\n')
def test_build_semantic_matrix(self):
matrix = build_semantic_matrix("为什么我的银行卡已经绑定了,花呗要求我还要绑银行卡", "为什么我的银行卡绑定了,花呗还是要求我绑定银行卡")
print(matrix, matrix.shape)
if __name__ == '__main__':
main()
| [
"407815932@qq.com"
] | 407815932@qq.com |
a042776fb0942df15cc3e28901ae082c4d7fe0a3 | 2218e1da5cb944e4509f8641ca051de137645c5e | /LeetCode practice/Top 100/77.combine.py | ab72990344fa03dc58b3f19fe2588873033fee6f | [] | no_license | Hegemony/Python-Practice | 9e76ebb414433e51c2074602fb0a871891647839 | b68ea41688e9e305635c63fdc43402e2b6fe6524 | refs/heads/main | 2023-05-05T14:00:59.921803 | 2021-06-01T15:38:30 | 2021-06-01T15:38:30 | 301,602,659 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 509 | py | class Solution:
def combine(self, n: int, k: int):
nums = [i + 1 for i in range(n)]
res = []
def dfs(path, result):
if len(path) == k:
res.append(list(path))
return
for i in range(len(result)):
path.append(result[i])
print(path)
dfs(path, result[i + 1:])
path.pop()
dfs([], nums)
return res
print(Solution().combine(4, 2))
| [
"noreply@github.com"
] | Hegemony.noreply@github.com |
9d71d7dc901c16a0974b3b44d71e98018617359f | dfd0797c88aec7b02866d3c559cb1bc64ce87b44 | /Chapter 9 - Classes/9-11 Imported_Admin.py | e4c7baedb57267181d77835059dec5bdc4cefd2c | [] | no_license | 8BitJustin/2020-Python-Crash-Course | d97f9b79c7a1e1c88c9bc2b035b0e98b2ef23025 | 1f078d7fa62e2b07f8d6c01f85e60baed8293779 | refs/heads/master | 2020-12-20T00:19:44.173143 | 2020-06-14T18:42:08 | 2020-06-14T18:42:08 | 235,893,110 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,097 | py | """
Start with your work from Exercise 9-8. Store the classes User, Privileges,
and Admin in one module. Create a separate file, make an Admin instance,
and call show_privileges() to show that everything is working correctly
"""
# Imported specific class (Admin) from user_module module
from user_module import Admin
# Creates superuser variable using Admin class.
superuser = Admin('justin', 'olson')
# Uses the describe_admin method within Admin class.
superuser.describe_admin()
# Uses the 'self.privileges' within Admin to access the Privileges class,
# then use the show_privileges method.
superuser.privileges.show_privileges()
"""
From top to bottom:
superuser variable was created using the imported Admin class. Then
superuser used the describe_admin() within the Admin class. Finally,
the superuser accessed the show_privileges() method within the Privileges
class by accessing self.privileges (privileges) within Admin class. This is
tied to Privileges(), which is a backdoor so-to-speak with accessing the
Privileges class, which wasn't actually imported into this file.
""" | [
"j.olson.digital@gmail.com"
] | j.olson.digital@gmail.com |
217f1a826831df2df46f299e04687a86a0071b73 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/138/usersdata/224/52701/submittedfiles/volumeTV.py | 4330c4185b1d9894c3e8f74c13da563d89f1168c | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 364 | py | # -*- coding: utf-8 -*-
v=int(input('Volume inicial: '))
t=int(input('Numero de trocas: '))
soma=0
f=0
for i in range(1,t+1,1):
x=float(input('Digite o novo valor: '))
while soma+v<100:
soma=soma+x
soma=soma+1
if (soma+v)>100:
soma=100+x
f=soma+v
print(f)
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
12efb4543d8af9d7a5e61896d1fea9e17e6c906e | f3b233e5053e28fa95c549017bd75a30456eb50c | /bace_input/L4K/4K-4O_MD_NVT_rerun/set.py | 768971013f254c568f9ddab2f3541ee853f5a9d4 | [] | no_license | AnguseZhang/Input_TI | ddf2ed40ff1c0aa24eea3275b83d4d405b50b820 | 50ada0833890be9e261c967d00948f998313cb60 | refs/heads/master | 2021-05-25T15:02:38.858785 | 2020-02-18T16:57:04 | 2020-02-18T16:57:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,082 | py | import os
dir = '/mnt/scratch/songlin3/run/bace/L4K/MD/ti_one-step/4K_4O/'
filesdir = dir + 'files/'
temp_equiin = filesdir + 'temp_equi.in'
temp_prodin = filesdir + 'temp_prod.in'
temp_pbs = filesdir + 'temp.pbs'
lambd = [ 0.00922, 0.04794, 0.11505, 0.20634, 0.31608, 0.43738, 0.56262, 0.68392, 0.79366, 0.88495, 0.95206, 0.99078]
for j in lambd:
os.system("rm -r %6.5f" %(j))
os.system("mkdir %6.5f" %(j))
os.chdir("%6.5f" %(j))
os.system("rm *")
workdir = dir + "%6.5f" %(j) + '/'
#equiin
eqin = workdir + "%6.5f_equi.in" %(j)
os.system("cp %s %s" %(temp_equiin, eqin))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, eqin))
#prodin
prodin = workdir + "%6.5f_prod.in" %(j)
os.system("cp %s %s" %(temp_prodin, prodin))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, prodin))
#PBS
pbs = workdir + "%6.5f.pbs" %(j)
os.system("cp %s %s" %(temp_pbs, pbs))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, pbs))
#top
os.system("cp ../4K-4O_merged.prmtop .")
os.system("cp ../0.5_equi_0.rst .")
#submit pbs
os.system("qsub %s" %(pbs))
os.chdir(dir)
| [
"songlin3@msu.edu"
] | songlin3@msu.edu |
c573390d969c45e825f56c0c47303210f5f54069 | c4c159a21d2f1ea0d7dfaa965aeff01c8ef70dce | /flask/flaskenv/Lib/site-packages/pip/_internal/utils/compat.py | bdbe668e8be325a31f213285c178e2079c2ca3fd | [] | no_license | AhsonAslam/webapi | 54cf7466aac4685da1105f9fb84c686e38f92121 | 1b2bfa4614e7afdc57c9210b0674506ea70b20b5 | refs/heads/master | 2020-07-27T06:05:36.057953 | 2019-09-17T06:35:33 | 2019-09-17T06:35:33 | 208,895,450 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:e268be7334f2b33e4e72eabee4ae81bc84828a2ebf6a0c8dc2404f36d2a061f3
size 9596
| [
"github@cuba12345"
] | github@cuba12345 |
ed99964064e075f5b68ac99ec5f52a6602c7edfa | 4c733e36833100685e6fae445a98676182275145 | /inctax.py | bbb25914d755ba6065f9d44cc36b7159063ba1fd | [
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] | permissive | GoFroggyRun/taxcalc-ghost | 36d29d81024f18b4232be88f84a51e25e4b6844a | a03affdc6f6a064b2f607ec9f2f7de667b5d9858 | refs/heads/master | 2021-01-10T17:32:56.752078 | 2016-03-21T17:22:42 | 2016-03-21T17:22:42 | 52,900,916 | 0 | 1 | null | 2016-03-21T17:22:42 | 2016-03-01T18:45:12 | Python | UTF-8 | Python | false | false | 6,143 | py | """
INCome TAX input-output capabilities for Tax-Calculator.
"""
# CODING-STYLE CHECKS:
# pep8 --ignore=E402 inctax.py
# pylint --disable=locally-disabled inctax.py
# (when importing numpy, add "--extension-pkg-whitelist=numpy" pylint option)
import argparse
import sys
from taxcalc import IncomeTaxIO
def main():
"""
Contains command-line interface to the Tax-Calculator IncomeTaxIO class.
"""
# parse command-line arguments:
parser = argparse.ArgumentParser(
prog='python inctax.py',
description=('Writes to a file the federal income tax OUTPUT for the '
'tax filing units specified in the INPUT file with the '
'OUTPUT computed from the INPUT for the TAXYEAR using '
'the Tax-Calculator. '
'The INPUT file is a CSV-formatted file that contains '
'variable names that are a subset of the '
'Records.VALID_READ_VARS set. The OUTPUT file is in '
'Internet-TAXSIM format. The OUTPUT filename is the '
'INPUT filename (excluding the .csv suffix or '
'.gz suffix, or both) followed by '
'a string equal to "-YY" (where the YY is the last two '
'digits in the TAXYEAR) and all that is followed by a '
'trailing string. The trailing string is ".out-inctax" '
'if no --reform option is specified; otherwise the '
'trailing string is ".out-inctax-REFORM" (excluding any '
'".json" ending to the REFORM filename). The OUTPUT '
'file contains the first 28 Internet-TAXSIM output '
'variables. Use --iohelp flag for more information. '
'For details on the Internet-TAXSIM version 9.3 '
'OUTPUT format, go to '
'http://users.nber.org/~taxsim/taxsim-calc9/'))
parser.add_argument('--iohelp',
help=('optional flag to show INPUT and OUTPUT '
'variable definitions and exit without trying '
'to read the INPUT file, so INPUT and TAXYEAR '
'can be any meaningless pair of character (as '
'long as the second character is a digit) '
'(e.g., "i 0" or "x 1" or ". 9")'),
default=False,
action="store_true")
parser.add_argument('--reform',
help=('REFORM is name of optional file that contains '
'tax reform provisions; the provisions are '
'specified using JSON that may include '
'//-comments. No REFORM filename implies use '
'of current-law policy.'),
default=None)
parser.add_argument('--blowup',
help=('optional flag that triggers the default '
'imputation and blowup (or aging) logic built '
'into the Tax-Calculator that will age the '
'INPUT data from Records.PUF_YEAR to TAXYEAR. '
'No --blowup option implies INPUT data are '
'considered raw data that are not aged or '
'adjusted in any way.'),
default=False,
action="store_true")
parser.add_argument('--weights',
help=('optional flag that causes OUTPUT to have an '
'additional variable [29] containing the s006 '
'sample weight, which will be aged if the '
'--blowup option is used'),
default=False,
action="store_true")
parser.add_argument('--records',
help=('optional flag that causes the output file to '
'be a CSV-formatted file containing for each '
'INPUT filing unit the TAXYEAR values of each '
'variable in the Records.VALID_READ_VARS set. '
'If the --records option is specified, the '
'output file name will be the same as if the '
'option was not specified, except that the '
'".out-inctax" part is replaced by ".records"'),
default=False,
action="store_true")
parser.add_argument('INPUT',
help=('INPUT is name of required CSV file that '
'contains a subset of variables included in '
'the Records.VALID_READ_VARS set. '
'INPUT must end in ".csv".'))
parser.add_argument('TAXYEAR',
help=('TAXYEAR is calendar year for which federal '
'income taxes are computed (e.g., 2013).'),
type=int)
args = parser.parse_args()
# optionally show INPUT and OUTPUT variable definitions and exit
if args.iohelp:
IncomeTaxIO.show_iovar_definitions()
return 0
# instantiate IncometaxIO object and do federal income tax calculations
inctax = IncomeTaxIO(input_data=args.INPUT,
tax_year=args.TAXYEAR,
policy_reform=args.reform,
blowup_input_data=args.blowup,
output_records=args.records)
if args.records:
inctax.output_records(writing_output_file=True)
else:
inctax.calculate(writing_output_file=True,
output_weights=args.weights)
# return no-error exit code
return 0
# end of main function code
if __name__ == '__main__':
sys.exit(main())
| [
"martin.holmer@gmail.com"
] | martin.holmer@gmail.com |
93d81f8c26c2284d16707372d323d14fe181d394 | e5e2b7da41fda915cb849f031a0223e2ac354066 | /sdk/python/pulumi_azure_native/peering/v20190901preview/peering_service.py | 9a936068a24a199a5711f776f9d8c8cf0d832b32 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | johnbirdau/pulumi-azure-native | b7d3bdddeb7c4b319a7e43a892ddc6e25e3bfb25 | d676cc331caa0694d8be99cb90b93fa231e3c705 | refs/heads/master | 2023-05-06T06:48:05.040357 | 2021-06-01T20:42:38 | 2021-06-01T20:42:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,518 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = ['PeeringServiceArgs', 'PeeringService']
@pulumi.input_type
class PeeringServiceArgs:
def __init__(__self__, *,
resource_group_name: pulumi.Input[str],
location: Optional[pulumi.Input[str]] = None,
peering_service_location: Optional[pulumi.Input[str]] = None,
peering_service_name: Optional[pulumi.Input[str]] = None,
peering_service_provider: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a PeeringService resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] location: The location of the resource.
:param pulumi.Input[str] peering_service_location: The PeeringServiceLocation of the Customer.
:param pulumi.Input[str] peering_service_name: The name of the peering service.
:param pulumi.Input[str] peering_service_provider: The MAPS Provider Name.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: The resource tags.
"""
pulumi.set(__self__, "resource_group_name", resource_group_name)
if location is not None:
pulumi.set(__self__, "location", location)
if peering_service_location is not None:
pulumi.set(__self__, "peering_service_location", peering_service_location)
if peering_service_name is not None:
pulumi.set(__self__, "peering_service_name", peering_service_name)
if peering_service_provider is not None:
pulumi.set(__self__, "peering_service_provider", peering_service_provider)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
The location of the resource.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter(name="peeringServiceLocation")
def peering_service_location(self) -> Optional[pulumi.Input[str]]:
"""
The PeeringServiceLocation of the Customer.
"""
return pulumi.get(self, "peering_service_location")
@peering_service_location.setter
def peering_service_location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "peering_service_location", value)
@property
@pulumi.getter(name="peeringServiceName")
def peering_service_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the peering service.
"""
return pulumi.get(self, "peering_service_name")
@peering_service_name.setter
def peering_service_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "peering_service_name", value)
@property
@pulumi.getter(name="peeringServiceProvider")
def peering_service_provider(self) -> Optional[pulumi.Input[str]]:
"""
The MAPS Provider Name.
"""
return pulumi.get(self, "peering_service_provider")
@peering_service_provider.setter
def peering_service_provider(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "peering_service_provider", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
The resource tags.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
class PeeringService(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
location: Optional[pulumi.Input[str]] = None,
peering_service_location: Optional[pulumi.Input[str]] = None,
peering_service_name: Optional[pulumi.Input[str]] = None,
peering_service_provider: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
"""
Peering Service
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] location: The location of the resource.
:param pulumi.Input[str] peering_service_location: The PeeringServiceLocation of the Customer.
:param pulumi.Input[str] peering_service_name: The name of the peering service.
:param pulumi.Input[str] peering_service_provider: The MAPS Provider Name.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: The resource tags.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: PeeringServiceArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Peering Service
:param str resource_name: The name of the resource.
:param PeeringServiceArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(PeeringServiceArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
location: Optional[pulumi.Input[str]] = None,
peering_service_location: Optional[pulumi.Input[str]] = None,
peering_service_name: Optional[pulumi.Input[str]] = None,
peering_service_provider: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = PeeringServiceArgs.__new__(PeeringServiceArgs)
__props__.__dict__["location"] = location
__props__.__dict__["peering_service_location"] = peering_service_location
__props__.__dict__["peering_service_name"] = peering_service_name
__props__.__dict__["peering_service_provider"] = peering_service_provider
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["tags"] = tags
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:peering/v20190901preview:PeeringService"), pulumi.Alias(type_="azure-native:peering:PeeringService"), pulumi.Alias(type_="azure-nextgen:peering:PeeringService"), pulumi.Alias(type_="azure-native:peering/v20190801preview:PeeringService"), pulumi.Alias(type_="azure-nextgen:peering/v20190801preview:PeeringService"), pulumi.Alias(type_="azure-native:peering/v20200101preview:PeeringService"), pulumi.Alias(type_="azure-nextgen:peering/v20200101preview:PeeringService"), pulumi.Alias(type_="azure-native:peering/v20200401:PeeringService"), pulumi.Alias(type_="azure-nextgen:peering/v20200401:PeeringService"), pulumi.Alias(type_="azure-native:peering/v20201001:PeeringService"), pulumi.Alias(type_="azure-nextgen:peering/v20201001:PeeringService"), pulumi.Alias(type_="azure-native:peering/v20210101:PeeringService"), pulumi.Alias(type_="azure-nextgen:peering/v20210101:PeeringService")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(PeeringService, __self__).__init__(
'azure-native:peering/v20190901preview:PeeringService',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'PeeringService':
"""
Get an existing PeeringService resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = PeeringServiceArgs.__new__(PeeringServiceArgs)
__props__.__dict__["location"] = None
__props__.__dict__["name"] = None
__props__.__dict__["peering_service_location"] = None
__props__.__dict__["peering_service_provider"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
return PeeringService(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
The location of the resource.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="peeringServiceLocation")
def peering_service_location(self) -> pulumi.Output[Optional[str]]:
"""
The PeeringServiceLocation of the Customer.
"""
return pulumi.get(self, "peering_service_location")
@property
@pulumi.getter(name="peeringServiceProvider")
def peering_service_provider(self) -> pulumi.Output[Optional[str]]:
"""
The MAPS Provider Name.
"""
return pulumi.get(self, "peering_service_provider")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The provisioning state of the resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
The resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of the resource.
"""
return pulumi.get(self, "type")
| [
"noreply@github.com"
] | johnbirdau.noreply@github.com |
be375352f73f8ca216f943d0e938d1f7f484b7e3 | c93fc506c39e002ae67bc380a365d1f33d5ac386 | /supervised_learning/models/pin_position/edge_predict/train_with_data_fnn.py | 61ca43cde4e9374082d9f4be9430fd88878cc4b3 | [] | no_license | quantumiracle/store3 | 9f1f2d5c7103b3ded5e556854e111701e8104ccb | 8b553c657c4efa6391547913831be5756a09924a | refs/heads/master | 2020-07-25T02:09:17.528485 | 2019-09-15T21:48:31 | 2019-09-15T21:48:31 | 208,126,183 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,891 | py | """
pure vector observation based learning: position of tactip and target
task: tactip following the cylinder to reach the ball target
use 382 pins
"""
import tensorflow as tf
# from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten
from tensorflow.python.keras.layers import Conv2D, MaxPooling2D, Flatten
import numpy as np
import matplotlib.pyplot as plt
import gym, threading, queue
from gym_unity.envs import UnityEnv
import argparse
from PIL import Image
from deform_visualize import plot_list_new, plot_list_new_sim2
import pickle
parser = argparse.ArgumentParser(description='Train or test neural net motor controller.')
parser.add_argument('--train', dest='train', action='store_true', default=False)
parser.add_argument('--test', dest='test', action='store_true', default=False)
parser.add_argument('--rotat_test', dest='rotat_test', action='store_true', default=False)
args = parser.parse_args()
class Classifier(object):
def __init__(self, obs_dim, label_dim, ini_lr=1e-3):
self.hidden_dim=500
self.sess = tf.Session()
self.label = tf.placeholder(tf.float32, [None, label_dim], 'label')
self.obs = tf.placeholder(tf.float32, [None, obs_dim], 'obs')
self.lr = tf.placeholder_with_default(ini_lr, shape=(), name='lr')
self.training = tf.placeholder_with_default(False, shape=(), name='training') # BN signal
l1 = tf.layers.dense(self.obs, self.hidden_dim, tf.nn.relu)
l2 = tf.layers.dense(l1, self.hidden_dim, tf.nn.relu)
l3 = tf.layers.dense(l2, self.hidden_dim, tf.nn.relu)
self.predict = 2*tf.layers.dense(l3, label_dim, activation=tf.nn.tanh) # predict position and rotation
self.loss = tf.reduce_mean(tf.square(self.predict-self.label)) # pos
self.optimizer = tf.train.AdamOptimizer(learning_rate=self.lr)
# self.optimizer = tf.train.GradientDescentOptimizer(learning_rate=self.lr)
self.train_op = self.optimizer.minimize(self.loss)
self.sess.run(tf.global_variables_initializer())
def train(self, batch_s, batch_label, lr, decay):
# self.optimizer.learning_rate = lr
# if decay:
# self.train_op = self.optimizer.minimize(self.loss)
loss,_=self.sess.run([self.loss, self.train_op], {self.training: True, self.obs: batch_s, self.label: batch_label, self.lr: lr})
# if decay:
# print(self.optimizer._lr)
return loss
def predict_one_value(self, s):
s = s[np.newaxis, :]
predict = self.sess.run(self.predict, {self.obs: s})
return predict
def predict_value(self, s):
predict = self.sess.run(self.predict, {self.obs: s})
return predict
def save(self, path):
saver = tf.train.Saver()
saver.save(self.sess, path)
def load(self, path):
saver=tf.train.Saver()
saver.restore(self.sess, path)
def state_process(s):
factor=0.5674
x0=s[1::3]
z0=s[3::3]
x=np.array(x0)/factor
z=np.array(z0)/factor
data=np.transpose([x,z]).reshape(-1) # (x,y,x,y,...)
label=s[0]
return [label], data
def Predict(input, model_path = './model/class_obj'):
obs_dim = 182 # total 280: 0 object index, 1-3 rotation value, 4-6 average contact point position, 7-279 pins positions
state_dim = 1 # 2 position
lr=2e-2
classifier = Classifier(obs_dim, state_dim, lr)
classifier.load(model_path)
predict = classifier.predict_one_value(input)
return predict
if __name__ == '__main__':
model_path = './model/comparison/random0.2/class_obj'
training_episodes = 80000
input_dim = 182 # total 280: 0 object index, 1-3 rotation value, 4-6 average contact point position, 7-279 pins positions
output_dim = 1
lr=5e-4
decay=0 # decay signal of lr
classifier = Classifier(input_dim, output_dim, lr)
if args.train:
data_file=open('compare_data/raw_data02.pickle', "rb")
raw_data=pickle.load(data_file)
data=[]
label=[]
for i in range(len(raw_data)):
s=raw_data[i]
label_i, data_i=state_process(s)
''' add noise '''
data_i=data_i+np.random.normal(0, 1e-2, data_i.shape)
data.append(data_i)
label.append(label_i)
loss_list=[]
# classifier.load(model_path)
for eps in range(training_episodes):
if eps%40000==0 and eps>1:
lr *=0.5
decay=1
else:
decay=0
loss = classifier.train(data, label, lr, decay)
if eps==0:
loss_list.append(loss)
else:
loss_list.append(0.9*loss_list[-1]+0.1*loss)
print('Eps: {}, Loss: {}'.format(eps, loss))
if eps % 100 ==0:
plt.yscale('log')
plt.plot(np.arange(len(loss_list)), loss_list)
plt.savefig('classify_trainwithdataobj2.png')
classifier.save(model_path)
np.savetxt('trainwithdata.txt', np.array(loss_list)[:, np.newaxis], fmt='%.4f', newline=', ')
round_loss_list=list(np.around(np.array(loss_list),4))
print(round_loss_list)
# test with testing dataset, all at once
if args.test:
test_data_file=open('data/raw_data.pickle', "rb")
raw_data=pickle.load(test_data_file)
data=[]
label=[]
classifier.load(model_path)
for i in range(80):
s=raw_data[i]
label_i, data_i=state_process(s)
print(label_i)
data.append(data_i)
label.append(label_i)
predict = classifier.predict_one_value(data_i)[0]
print(predict)
xy=data_i.reshape(-1,2)
# plot_list_new_sim2(xy,i,predict, label_i)
print(i)
| [
"1402434478@qq.com"
] | 1402434478@qq.com |
2a08847d1a4c5afc6f9242526a20c18f304652f7 | a5a99f646e371b45974a6fb6ccc06b0a674818f2 | /RecoEgamma/EgammaHFProducers/python/hfClusterShapes_cfi.py | 284de7823cb9863138c4bfc4645b9b29b1e096fd | [
"Apache-2.0"
] | permissive | cms-sw/cmssw | 4ecd2c1105d59c66d385551230542c6615b9ab58 | 19c178740257eb48367778593da55dcad08b7a4f | refs/heads/master | 2023-08-23T21:57:42.491143 | 2023-08-22T20:22:40 | 2023-08-22T20:22:40 | 10,969,551 | 1,006 | 3,696 | Apache-2.0 | 2023-09-14T19:14:28 | 2013-06-26T14:09:07 | C++ | UTF-8 | Python | false | false | 731 | py | import FWCore.ParameterSet.Config as cms
# HFEMClusterShape producer
hfEMClusters = cms.EDProducer("HFEMClusterProducer",
hits = cms.InputTag("hfreco"),
minTowerEnergy = cms.double(4.0),
seedThresholdET = cms.double(5.0),
maximumSL = cms.double(98),
maximumRenergy = cms.double(50),
usePMTFlag = cms.bool(True),
forcePulseFlagMC=cms.bool(False),
usePulseFlag = cms.bool(True),
correctionType = cms.int32(1)
)
| [
"giulio.eulisse@gmail.com"
] | giulio.eulisse@gmail.com |
d14ebc07ab2006ac2b7441d8b84c1ddcf1c557a9 | edb88981aa1420af7e074068ed7818b9d904a3dd | /trunk/minds/util/patterns_tester.py | b1f5e484e945b74cdc21aec4745075e1fc83562b | [] | no_license | BackupTheBerlios/mindretrieve-svn | 101c0f1dfc25d20d5f828b6fd0d43301b773af4e | 463745fcf1c1d5b1f6c201c30bcc339c99b437ed | refs/heads/master | 2021-01-22T13:57:31.225772 | 2006-04-28T04:24:43 | 2006-04-28T04:24:43 | 40,801,743 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,352 | py | """
Look for a series of patterns in a file object.
This is used as a helper to unit testing.
"""
import re
from StringIO import StringIO
import sys
import unittest
# this is a debugging aid. Can we make it easier to activate?
def _debug_mismatch(data, i, pattern):
left = max(0,i-10)
print >>sys.stderr, data[left:i] + '<$>' + data[i:i+20]
print >>sys.stderr, 'Pattern not matched: ', pattern
assert pattern
def checkStrings(data, patterns, no_pattern=None):
"""
Search for the series of strings in data.
In addition check for 'no_pattern' does not appear after the last pattern.
Return none if all matched; or return the pattern not matched.
"""
i = 0
for p in patterns:
j = data.find(p,i)
if j < 0:
return p
i = j+len(p)
if no_pattern and (data.find(no_pattern, i) >= 0):
return no_pattern
return None
def checkPatterns(data, patterns, no_pattern=None):
"""
Similar to checkStrings() but use regular expressions.
Note: the whole re pattern must appear within a line.
"""
i = 0
for p in patterns:
m = re.compile(p,re.I).search(data, i)
if not m:
#_debug_mismatch(data,i,p)
return p
i = m.end()
if no_pattern and re.compile(no_pattern,re.I).search(data, i):
#_debug_mismatch(data,i,no_pattern)
return no_pattern
return None
def showFile(fp, label, maxchars=1024):
""" show a buffered file (e.g. StringIO), truncate after max chars """
fp.seek(0)
data = fp.read(maxchars)
if fp.read(1):
extra = '...'
else:
extra = ''
document = """
--%s%s
%s%s
--^end-----------------------------------------------------------------
""" % (label, '-' * (70-len(label)), data, extra)
return document
# ----------------------------------------------------------------------
# Test the tester
SAMPLE_FILE = """
<html>
<head>
<title>Home</title>
<link rel="stylesheet" href="/main.css" type="text/css">
<meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1">
</head>
</html>
"""
# todo: give a little explanation on each test
class TestPatternTester(unittest.TestCase):
def test00(self):
p = checkPatterns('', [])
self.assertEqual(p, None)
def test01(self):
p = checkPatterns('', ['X'])
self.assertEqual(p, 'X')
def test10(self):
p = checkPatterns('xyz', [])
self.assertEqual(p, None)
def testCheckedOK(self):
p = checkPatterns(SAMPLE_FILE,
['html', '.text.css.', '</html>']) # <-- .text.css. is an re
self.assertEqual(p, None)
def testCheckedRe(self):
p = checkPatterns(SAMPLE_FILE,
['html', '<title>.*</title>', '</html>'])
self.assertEqual(p, None)
def testOrderWrong(self):
p = checkPatterns(SAMPLE_FILE,
['html', r'\</html\>', '.text.css.'])
self.assertEqual(p, '.text.css.')
def testNoPatternGood(self):
p = checkPatterns(SAMPLE_FILE,
['html', '.text.css.', '</html>'],
'<')
self.assertEqual(p, None)
def testNoPatternBad(self):
p = checkPatterns(SAMPLE_FILE,
['html', '.text.css.', '</head>'],
'<')
self.assertEqual(p, '<')
class TestCheckStrings(unittest.TestCase):
def test00(self):
p = checkStrings('', [])
self.assertEqual(p, None)
def test01(self):
p = checkStrings('', ['X'])
self.assertEqual(p, 'X')
def test10(self):
p = checkStrings('xyz', [])
self.assertEqual(p, None)
def testCheckedOK(self):
p = checkStrings(SAMPLE_FILE,
['html', 'text/css', '</html>'])
self.assertEqual(p, None)
def testOrderWrong(self):
p = checkStrings(SAMPLE_FILE,
['html', '</html>', 'text/css'])
self.assertEqual(p, 'text/css')
def testNoPatternGood(self):
p = checkStrings(SAMPLE_FILE,
['html', 'text/css', '</html>'],
'<')
self.assertEqual(p, None)
def testNoPatternBad(self):
p = checkStrings(SAMPLE_FILE,
['html', 'text/css', '</head>'],
'<')
self.assertEqual(p, '<')
if __name__ == '__main__':
unittest.main() | [
"tungwaiyip@785ff9d5-dded-0310-b5f2-a5aff206d990"
] | tungwaiyip@785ff9d5-dded-0310-b5f2-a5aff206d990 |
53192f430b8060cf4c1bfb05a71a5d1b8e0f0bee | 570ca07ec6266c875dc736a3d8c4b4ddc61579fd | /todo/views.py | fd8a700e8d825928ff8bdb4cdf3696834f14d36c | [
"MIT"
] | permissive | Wilo/to-do-list | 85024a59a0f8192a419297e66a69b0e31df45b43 | 185c0025a3b8a1e44adb1842c7e49af5062507a3 | refs/heads/master | 2021-01-17T05:42:56.683726 | 2014-08-02T20:47:15 | 2014-08-02T20:47:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,316 | py | from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.shortcuts import render
from forms import *
from models import *
# Create your views here.
def list(request):
todo = Todo.objects.all()
return render(request, 'index.html', {'list': todo})
def add(request):
if request.method == "POST":
form = TodoForm(request.POST)
if form.is_valid():
form.save()
form = TodoForm()
return list(request)
else:
form = TodoForm()
return render(request, 'add.html', {'form': form, 'add': True})
def edit(request, **kwargs):
pk = kwargs.get('pk')
todo = Todo.objects.get(id=pk)
if request.method == "POST":
form = TodoForm(request.POST, instance=todo)
if form.is_valid():
form.save()
form = TodoForm()
return HttpResponseRedirect(reverse('lista'))
else:
form = TodoForm(instance=todo)
return render(request, 'add.html', {'form': form, 'add': False, 'id': pk})
def delete(request, **kwargs):
pk = kwargs.get('pk')
todo = Todo.objects.get(id=pk)
if request.method == "POST":
todo.delete()
return HttpResponseRedirect(reverse('lista'))
return render(request, 'delete.html', {'todo': todo}) | [
"leonardoorozcop@gmail.com"
] | leonardoorozcop@gmail.com |
cd9ccb5e2390a834796c340271af293d646cd570 | 8ad2e97aed97d581487f2b604c10264a52022253 | /people/items.py | 9d711c1252cdace2b3ace584b15b574a7be895a3 | [] | no_license | SimeonYS/people | 9aca784793a7ae7762b788878ff1fcb7ee511ba5 | a24b19c1282cb57f8b0dab9424e957a0d71d2bff | refs/heads/main | 2023-03-26T20:02:59.275628 | 2021-03-23T14:09:54 | 2021-03-23T14:09:54 | 350,736,734 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 155 | py | import scrapy
class PeopleItem(scrapy.Item):
title = scrapy.Field()
content = scrapy.Field()
date = scrapy.Field()
link = scrapy.Field()
| [
"simeon.simeonov@ADPVT.com"
] | simeon.simeonov@ADPVT.com |
18d3f0a11f1b03da2aa4b0647595191a42d5c7ea | b0f2c47881f39ceb5a989b9638483f7439bfb5cf | /Problem85.py | e1438a737933a65d31f6e5622f7f1748dc8b5611 | [] | no_license | chrisvail/Project_Euler | 9ba264c8ec9d158b33ec677811e59d1e0e52fef2 | 41623c27b3e1344f9d8ebdfac4df297d0666cc07 | refs/heads/main | 2023-02-13T20:26:42.752780 | 2021-01-15T16:38:27 | 2021-01-15T16:38:27 | 329,964,440 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 662 | py | from itertools import count
def main():
best = [2000000, 0 , 0]
for n in count(1):
for m in range(n + 1):
total = sum([sum([(n + 1 - x) * (m + 1 - y) for x in range(1, n + 1)]) for y in range(1, m + 1)])
if abs(total - 2000000) < best[0]:
best = [abs(total - 2000000), n, m]
if best[0] <= 2:
print("Within 2")
print("Closest answer is where:\n\tn = {}\n\tm = {} \
\nTherefore the answer is: {}".format(best[1], best[2], best[1] * best[2]))
return 0
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | chrisvail.noreply@github.com |
1f3646b5f84c6d011827e3285f63bde47ff349cf | e41651d8f9b5d260b800136672c70cb85c3b80ff | /Notification_System/temboo/Library/Google/Spreadsheets/UpdateWorksheet.py | eaffb1484ed80159e3bb8320808a69eac7555076 | [] | no_license | shriswissfed/GPS-tracking-system | 43e667fe3d00aa8e65e86d50a4f776fcb06e8c5c | 1c5e90a483386bd2e5c5f48f7c5b306cd5f17965 | refs/heads/master | 2020-05-23T03:06:46.484473 | 2018-10-03T08:50:00 | 2018-10-03T08:50:00 | 55,578,217 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,673 | py | # -*- coding: utf-8 -*-
###############################################################################
#
# UpdateWorksheet
# Updates existing worksheet metadata such as: Title, Row Count, and Column Count.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class UpdateWorksheet(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the UpdateWorksheet Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(UpdateWorksheet, self).__init__(temboo_session, '/Library/Google/Spreadsheets/UpdateWorksheet')
def new_input_set(self):
return UpdateWorksheetInputSet()
def _make_result_set(self, result, path):
return UpdateWorksheetResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return UpdateWorksheetChoreographyExecution(session, exec_id, path)
class UpdateWorksheetInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the UpdateWorksheet
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AccessToken(self, value):
"""
Set the value of the AccessToken input for this Choreo. ((optional, string) A valid Access Token retrieved during the OAuth process. This is required when authenticating with OAuth unless providing the ClientID, ClientSecret, and RefreshToken.)
"""
super(UpdateWorksheetInputSet, self)._set_input('AccessToken', value)
def set_ClientID(self, value):
"""
Set the value of the ClientID input for this Choreo. ((conditional, string) The Client ID provided by Google. Required unless providing a valid AccessToken.)
"""
super(UpdateWorksheetInputSet, self)._set_input('ClientID', value)
def set_ClientSecret(self, value):
"""
Set the value of the ClientSecret input for this Choreo. ((conditional, string) The Client Secret provided by Google. Required unless providing a valid AccessToken.)
"""
super(UpdateWorksheetInputSet, self)._set_input('ClientSecret', value)
def set_ColumnCount(self, value):
"""
Set the value of the ColumnCount input for this Choreo. ((required, integer) The number of columns that you want to specify for the worksheet.)
"""
super(UpdateWorksheetInputSet, self)._set_input('ColumnCount', value)
def set_Password(self, value):
"""
Set the value of the Password input for this Choreo. ((optional, password) Deprecated (retained for backward compatibility only).)
"""
super(UpdateWorksheetInputSet, self)._set_input('Password', value)
def set_RefreshToken(self, value):
"""
Set the value of the RefreshToken input for this Choreo. ((conditional, string) An OAuth Refresh Token used to generate a new Access Token when the original token is expired. Required unless providing a valid AccessToken.)
"""
super(UpdateWorksheetInputSet, self)._set_input('RefreshToken', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format that the response should be in. Valid values are: xml (the default) and json.)
"""
super(UpdateWorksheetInputSet, self)._set_input('ResponseFormat', value)
def set_RowCount(self, value):
"""
Set the value of the RowCount input for this Choreo. ((required, integer) The number of rows that you want to specify for the worksheet.)
"""
super(UpdateWorksheetInputSet, self)._set_input('RowCount', value)
def set_SpreadsheetKey(self, value):
"""
Set the value of the SpreadsheetKey input for this Choreo. ((required, string) The unique key associated with the spreadsheet that contains a worksheet you want to update.)
"""
super(UpdateWorksheetInputSet, self)._set_input('SpreadsheetKey', value)
def set_Title(self, value):
"""
Set the value of the Title input for this Choreo. ((required, string) The new title of the worksheet.)
"""
super(UpdateWorksheetInputSet, self)._set_input('Title', value)
def set_Username(self, value):
"""
Set the value of the Username input for this Choreo. ((required, string) Deprecated (retained for backward compatibility only).)
"""
super(UpdateWorksheetInputSet, self)._set_input('Username', value)
def set_WorksheetId(self, value):
"""
Set the value of the WorksheetId input for this Choreo. ((required, string) The unique ID associated with the worksheet that you want to update.)
"""
super(UpdateWorksheetInputSet, self)._set_input('WorksheetId', value)
class UpdateWorksheetResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the UpdateWorksheet Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_NewAccessToken(self):
"""
Retrieve the value for the "NewAccessToken" output from this Choreo execution. ((string) Contains a new AccessToken when the RefreshToken is provided.)
"""
return self._output.get('NewAccessToken', None)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (Response from Google.)
"""
return self._output.get('Response', None)
class UpdateWorksheetChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return UpdateWorksheetResultSet(response, path)
| [
"shriswissfed@gmail.com"
] | shriswissfed@gmail.com |
52e2db735ddcef06b0f7ee93f8f2486af36d3b04 | 48517a9b7ec7b0f0bf0a03291b7d1e3def751c0a | /Choose Your Own Colors/corner_to_corner_5.py | 39a6a805f3f67ed65503e30b22b7b83f71056fc7 | [
"MIT"
] | permissive | Breakfast-for-Pigeons/Unicorn-HAT | 1ae033bf11c05b9cc739b1eacfc77665506e0bc8 | 9ff1388ee627a8e81f361929e9e9b708db4e2832 | refs/heads/master | 2021-06-06T12:22:48.162031 | 2020-10-22T17:31:51 | 2020-10-22T17:31:51 | 74,648,524 | 1 | 0 | null | 2018-10-02T17:37:31 | 2016-11-24T07:28:23 | Python | UTF-8 | Python | false | false | 2,468 | py | #!/usr/bin/python3
"""
Corner to Corner 5 - Choose Your Own Color
Selects a color and then sends it to one of four functions.
Can move a square from the lower left corner to the upper right corner.
Can move a square from the upper right corner to the lower left corner.
Can move a square from the lower right corner to the upper left corner.
Can move a square from the upper left corner to the lower right corner.
....................
Author: Paul Ryan
This program was written on a Raspberry Pi using the Geany IDE.
"""
########################################################################
# Import modules #
########################################################################
from corner_to_corner_1_v2 import corner_to_corner_1_v2
from corner_to_corner_2_v2 import corner_to_corner_2_v2
from corner_to_corner_3_v2 import corner_to_corner_3_v2
from corner_to_corner_4_v2 import corner_to_corner_4_v2
from bfp_unicornhat import print_header
from bfp_unicornhat import stop
########################################################################
# Import Variables #
########################################################################
from bfp_unicornhat import C1
from bfp_unicornhat import C2
from bfp_unicornhat import C3
from bfp_unicornhat import C4
from bfp_unicornhat import C5
from bfp_unicornhat import C6
from bfp_unicornhat import C7
from bfp_unicornhat import C8
########################################################################
# Functions #
########################################################################
def corner_to_corner_5():
"""
Moves a square from one corner to the opposite corner.
"""
off = (0, 0, 0)
corner_to_corner_4_v2(off)
corner_to_corner_1_v2(C1)
corner_to_corner_3_v2(C2)
corner_to_corner_2_v2(C3)
corner_to_corner_4_v2(C4)
corner_to_corner_1_v2(C5)
corner_to_corner_3_v2(C6)
corner_to_corner_2_v2(C7)
corner_to_corner_4_v2(C8)
if __name__ == '__main__':
try:
# STEP01: Print header
print_header()
# STEP02: Print instructions in white text
print("\033[1;37;40mPress Ctrl-C to stop the program.")
# STEP03:
corner_to_corner_5()
# STEP04: Exit the program.
stop()
except KeyboardInterrupt:
stop()
| [
"noreply@github.com"
] | Breakfast-for-Pigeons.noreply@github.com |
87201465d144f08dc791178512de0d903d59f97a | c263b3eae99bcad488b6e334e906a30b3a29ba78 | /boolean_parser/actions/clause.py | 4db123ffd38fe680892632e76662f638c21fce34 | [
"BSD-3-Clause"
] | permissive | havok2063/boolean_parser | 675953df535f83b76cea29b324084105c1b1d1ca | cbc3c5b74695da838418aaa3dd3ad08f413ec4a7 | refs/heads/main | 2023-01-13T01:49:34.237899 | 2022-12-01T15:00:28 | 2022-12-01T15:00:28 | 170,555,761 | 18 | 3 | BSD-3-Clause | 2023-01-03T10:37:50 | 2019-02-13T18:12:26 | Python | UTF-8 | Python | false | false | 5,908 | py | # !/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Filename: clause.py
# Project: actions
# Author: Brian Cherinka
# Created: Sunday, 17th February 2019 12:52:31 pm
# License: BSD 3-clause "New" or "Revised" License
# Copyright (c) 2019 Brian Cherinka
# Last Modified: Sunday, 17th February 2019 12:53:16 pm
# Modified By: Brian Cherinka
from __future__ import print_function, division, absolute_import
#
# Parsing Action classses
#
class BaseAction(object):
''' Base object representing a clause action
An action to perform after parsing a string clause. If set, actions run
Actions are attached to clauses with the ``setParseAction`` on a given ``pyparsing``
element. :py:meth:`pyparsing.ParserElement.setParseAction` accepts a function or class to
be applied to the ``pyparsing`` element during the parsing process. Multiple actions can
be attached by passing a list of functions or classes. This class extracts the parsed data
from the ``pyparsing`` element and makes it accessible as a variety of named attributes.
Attributes:
name: str
The name of the extracted parameter
base: str
The base name of the extracted parameter, if any.
fullname: str
The full name of the extracted parameter as base + name
data: dict
The extracted parsed parameters from the pyparse clause
parsed_clause: :py:class:`pyparsing.ParseResults`
The original pyparsed results object
input_clause: str
The original input clause element
'''
def __init__(self, data):
self.parsed_clause = data
self.data = data[0].asDict()
# parse the basic parameter name
self._parse_parameter_name()
def _parse_parameter_name(self):
''' parse the parameter name into a base + name '''
name = self.data.get('parameter', None)
assert name.count(
'.') <= 1, f'parameter {name} cannot have more than one . '
if '.' in name:
self.base, self.name = name.split('.', 1)
else:
self.base = None
self.name = name
@property
def fullname(self):
''' The full parameter name, including any base '''
return f'{self.base}.{self.name}' if self.base else self.name
class Word(BaseAction):
''' Class action for handling word clauses
This action performs a basic word parse. The basic word
is assigned as the ``name`` attribute. Example word clauses:
"alpha" or "alpha and beta or not charlie".
'''
def __init__(self, data):
super(Word, self).__init__(data)
def __repr__(self):
return f'{self.name}'
@property
def input_clause(self):
''' Original input clause as a string '''
return f'{self.fullname}'
class Condition(BaseAction):
''' Class action for handling conditional clauses
This action performs a basic conditional parse. The syntax for a
conditional expressions is defined as "parameter operand value" or
for "between" conditions, "parameter between value and value2". The parameter name,
operand, and parameter value is assigned as the ``name``, ``operator``, and
``value`` attribute, respectively. Example conditional clauses:
"x > 5" or "x > 5 and y < 3". When using a "between" condition, e.g.
"x between 3 and 5", an additional ``value2`` attribute is assigned the second
parameter value. For bitwise operands of '&' and '|', the value can also accept a negation
prefix, e.g. "x & ~256", which evaluates to "x & -257".
Allowed operands for conditionals are:
'>', '>=, '<', '<=', '==', '=', '!=', '&', '|'
In addition to the Base Attributes, the ``Condition`` action provides
additional attributes containing the parsed condition parameters.
Attributes:
operator: str
The operand used in the condition
value: str
The parameter value in the condition
value2: str
Optional second value, assigned when a "between" condition is used.
'''
def __init__(self, data):
super(Condition, self).__init__(data)
# extract the conditional operator and value
self.operator = self.data.get('operator', None)
self._extract_values()
def __repr__(self):
more = 'and' + self.value2 if hasattr(self, 'value2') else ''
return self.name + self.operator + self.value + more
@property
def input_clause(self):
''' Original input clause as a string '''
if self.operator == 'between':
return f'{self.fullname} {self.operator} {self.value} and {self.value2}'
else:
return f'{self.fullname} {self.operator} {self.value}'
def _extract_values(self):
''' Extract the value or values from the condition '''
self.value = self.data.get('value', None)
if not self.value:
if self.operator == 'between':
self.value = self._check_bitwise_value(self.data.get('value1'))
self.value2 = self._check_bitwise_value(
self.data.get('value2'))
self.value = self._check_bitwise_value(self.value)
def _check_bitwise_value(self, value):
''' Check if value has a bitwise ~ in it
Removes any bitwise ~ found in a value for a condition.
If the operand is a bitwise & or |, convert the ~value to its
integer appropriate. E.g. ~64 -> -65.
Parameters:
value: str
A string numerical value
Returns:
The str value or value converted to the proper bitwise negative
'''
if '~' in value:
value = value.replace('~', '')
if self.operator in ['&', '|']:
value = str(-1 * (int(value)) - 1)
return value
| [
"havok2063@hotmail.com"
] | havok2063@hotmail.com |
6ebb3533a73b107738006b3db204498153ef1cba | 20d1b971f58b0a6ab30f2682b773e7280ac77bc2 | /loops/170820, Lists/lr_dvumernie_spiski/8/8.1'divine_k_string.py | e624544d8e58779b0d5434ca7c3d94fed73fb580 | [] | no_license | M2401/P1 | 1a39ba384030a1483c77d86db933d11c30e45abf | c08f3ba1e07678b7b20836465175d51c89a43078 | refs/heads/master | 2023-03-03T21:34:31.601512 | 2021-02-06T17:44:54 | 2021-02-06T17:44:54 | 312,105,812 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 993 | py | #1. Задана матрица порядка n и число к. Разделить элементы k-й строки на диагональный элемент,
# расположенный в этой строке.
import random
def pr(a, n):
for i in range(n):
for j in range(n):
print(a[i][j], end='\t')
print()
print()
def main():
n = int(input('Введите порядок матрицы n '))
a = []
for i in range(n):
b = []
for j in range(n):
b.append(random.randint(1, 10))
a.append(b)
pr(a, n)
#почему неправильно работает цикл: for j in range(n):
#a[k][j] = a[k][j]//a[k][k]??
k = int(input('введите номер строки '))
p = a[k][k]
j = 0
while j < n:
a[k][j] = a[k][j]//p
j += 1
for i in range(n):
for j in range(n):
print(a[i][j], end='\t')
print()
print()
main() | [
"flussooo@gmail.com"
] | flussooo@gmail.com |
e3bcf84be53aaddcc116ab28636370c0e2ff9d75 | 4d5a91c312e9d633f73098bcc42ba9386893bd86 | /pajbot/modules/basic/dbmanage.py | f9b062683151352d7e5933d207371752d4897328 | [
"MIT"
] | permissive | leecopland/bullbot | ffc45062d802695fe2486f26643c1d1b9429e19c | 52e463293097b58084afb4f9f1d85b0656a67d44 | refs/heads/master | 2022-12-10T14:02:28.113368 | 2021-03-25T05:04:28 | 2021-03-25T05:04:28 | 172,211,287 | 1 | 0 | MIT | 2022-09-16T18:28:35 | 2019-02-23T12:23:22 | Python | UTF-8 | Python | false | false | 1,594 | py | import logging
import pajbot.models
from pajbot.modules import BaseModule
from pajbot.modules import ModuleType
from pajbot.modules.basic import BasicCommandsModule
log = logging.getLogger(__name__)
class DBManageModule(BaseModule):
ID = __name__.split('.')[-1]
NAME = 'DB Managing commands'
ENABLED_DEFAULT = True
DESCRIPTION = '!reload/!commit'
CATEGORY = 'Feature'
PARENT_MODULE = BasicCommandsModule
MODULE_TYPE = ModuleType.TYPE_ALWAYS_ENABLED
def reload(self, **options):
message = options['message']
bot = options['bot']
source = options['source']
bot.whisper(source.username, 'Reloading things from DB...')
if message and message in bot.reloadable:
bot.reloadable[message].reload()
else:
bot.reload_all()
def commit(self, **options):
message = options['message']
bot = options['bot']
source = options['source']
bot.whisper(source.username, 'Committing cached things to db...')
if message and message in bot.commitable:
bot.commitable[message].commit()
else:
bot.commit_all()
def load_commands(self, **options):
self.commands['reload'] = pajbot.models.command.Command.raw_command(self.reload,
level=1000,
description='Reload a bunch of data from the database')
self.commands['commit'] = pajbot.models.command.Command.raw_command(self.commit,
level=1000,
description='Commit data from the bot to the database')
| [
"pajlada@bithack.se"
] | pajlada@bithack.se |
4c3323df4f9c9bba476d8026c33ac9be0a13b6d0 | 25e2acc62950fa5c0804425b9c13a08bee765495 | /fatorialRecursivo.py | c8b09cd53133e469f2e393af22e76a34fcb36261 | [
"MIT"
] | permissive | CarlosEduardoAS/KhanAcademy | 12dfb742ea12b8a980c6480d403217b3b8204481 | 0b5947fb8fb51bebdd410a617f7161ea8252a4e5 | refs/heads/main | 2023-04-02T04:48:53.711041 | 2021-03-30T01:35:30 | 2021-03-30T01:35:30 | 352,831,852 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 120 | py | def fatorial(n):
if n < 1:
return 1
else:
return n * fatorial(n - 1)
f = fatorial(5)
print(f)
| [
"79329559+CarlosEduardoAS@users.noreply.github.com"
] | 79329559+CarlosEduardoAS@users.noreply.github.com |
ff39c0ef492d2e7f04473d445ce17cb6b04fcf4a | ad59fb12042bfd3f5c43eca057d0f747f9e148cf | /Se2iP/usr/lib/enigma2/python/Plugins/Extensions/IPTVPlayer/tsiplayer/addons/resources/sites/skyanimes.py | af65bcc571ea7849524d37e21c5373796ebb25f6 | [] | no_license | lexlong2007/eePlugins | d62b787100a7069ad5713a47c5688008063b45ec | 167b262fe36901a2d3a2fae6d0f85e2307b3eff7 | refs/heads/master | 2022-03-09T05:37:37.567937 | 2022-02-27T01:44:25 | 2022-02-27T01:44:25 | 253,012,126 | 0 | 0 | null | 2020-04-04T14:03:29 | 2020-04-04T14:03:29 | null | UTF-8 | Python | false | false | 11,152 | py | # -*- coding: utf-8 -*-
# vStream https://github.com/Kodi-vStream/venom-xbmc-addons
# Makoto
from Plugins.Extensions.IPTVPlayer.tsiplayer.addons.resources.lib.gui.hoster import cHosterGui
from Plugins.Extensions.IPTVPlayer.tsiplayer.addons.resources.lib.gui.gui import cGui
from Plugins.Extensions.IPTVPlayer.tsiplayer.addons.resources.lib.handler.inputParameterHandler import cInputParameterHandler
from Plugins.Extensions.IPTVPlayer.tsiplayer.addons.resources.lib.handler.outputParameterHandler import cOutputParameterHandler
from Plugins.Extensions.IPTVPlayer.tsiplayer.addons.resources.lib.handler.requestHandler import cRequestHandler
from Plugins.Extensions.IPTVPlayer.tsiplayer.addons.resources.lib.parser import cParser
from Plugins.Extensions.IPTVPlayer.tsiplayer.addons.resources.lib.comaddon import progress
SITE_IDENTIFIER = 'skyanimes'
SITE_NAME = 'Sky-Animes'
SITE_DESC = 'Animés, Dramas en Direct Download'
URL_MAIN = 'http://www.sky-animes.com/'
STREAM = 'index.php?file=Media&nuked_nude=index&op=do_dl&dl_id='
INDEX = 'index.php?file=Search&op=mod_search&searchtype=matchand&autor=&module=Download&limit=100&main='
URL_SEARCH_ANIMS = (URL_MAIN + INDEX, 'showEpisode')
FUNCTION_SEARCH = 'showEpisode'
ANIM_ANIMS = (True, 'showMenuAnims')
ANIM_GENRES = (True, 'showGenresA')
ANIM_VOSTFRS = (URL_MAIN + 'streaming-films', 'showSeries')
ANIM_OAVS = (URL_MAIN + 'streaming-oavs', 'showSeries')
DRAMA_DRAMAS = (True, 'showMenuDramas')
DRAMA_GENRES = (True, 'showGenresD')
def load():
oGui = cGui()
oOutputParameterHandler = cOutputParameterHandler()
oOutputParameterHandler.addParameter('siteUrl', URL_SEARCH_ANIMS[0])
oGui.addDir(SITE_IDENTIFIER, 'showSearch', 'Recherche', 'search.png', oOutputParameterHandler)
oOutputParameterHandler.addParameter('siteUrl', ANIM_ANIMS[0])
oGui.addDir(SITE_IDENTIFIER, ANIM_ANIMS[1], 'Animés', 'animes.png', oOutputParameterHandler)
oOutputParameterHandler.addParameter('siteUrl', DRAMA_DRAMAS[0])
oGui.addDir(SITE_IDENTIFIER, DRAMA_DRAMAS[1], 'Dramas', 'dramas.png', oOutputParameterHandler)
oGui.setEndOfDirectory()
def showMenuAnims():
oGui = cGui()
oOutputParameterHandler = cOutputParameterHandler()
oOutputParameterHandler.addParameter('siteUrl', ANIM_VOSTFRS[0])
oGui.addDir(SITE_IDENTIFIER, ANIM_VOSTFRS[1], 'Animés (Films)', 'films.png', oOutputParameterHandler)
oOutputParameterHandler.addParameter('siteUrl', ANIM_GENRES[0])
oGui.addDir(SITE_IDENTIFIER, ANIM_GENRES[1], 'Animés (Genres)', 'genres.png', oOutputParameterHandler)
liste = []
liste.append(['En Cours', URL_MAIN + 'streaming-animes-en-cours?p=-1'])
liste.append(['Terminés', URL_MAIN + 'download-animes-termines?p=-1'])
for sTitle, sUrl in liste:
oOutputParameterHandler.addParameter('siteUrl', sUrl)
oGui.addDir(SITE_IDENTIFIER, 'showSeries', sTitle, 'animes.png', oOutputParameterHandler)
oGui.setEndOfDirectory()
def showMenuDramas():
oGui = cGui()
oOutputParameterHandler = cOutputParameterHandler()
oOutputParameterHandler.addParameter('siteUrl', DRAMA_GENRES[0])
oGui.addDir(SITE_IDENTIFIER, DRAMA_GENRES[1], 'Dramas (Genres)', 'genres.png', oOutputParameterHandler)
# contenu à controler
# oOutputParameterHandler.addParameter('siteUrl', ANIM_OAVS[0])
# oGui.addDir(SITE_IDENTIFIER, ANIM_OAVS[1], 'Dramas (OAVS)', 'dramas.png', oOutputParameterHandler)
liste = []
liste.append(['En Cours', URL_MAIN + 'download-dramas-en-cours?p=-1'])
liste.append(['Terminés', URL_MAIN + 'download-dramas-termines?p=-1'])
for sTitle, sUrl in liste:
oOutputParameterHandler.addParameter('siteUrl', sUrl)
oGui.addDir(SITE_IDENTIFIER, 'showSeries', sTitle, 'dramas.png', oOutputParameterHandler)
oGui.setEndOfDirectory()
def showGenresA():
oGui = cGui()
oParser = cParser()
sUrl = URL_MAIN + 'streaming-animes-en-cours'
oRequestHandler = cRequestHandler(sUrl)
sHtmlContent = oRequestHandler.request()
sStart = 'id="id_genre"'
sEnd = '<select id="triGenre"'
sHtmlContent = oParser.abParse(sHtmlContent, sStart, sEnd)
sPattern = '<a href="([^"]+)">([^<]+)'
aResult = oParser.parse(sHtmlContent, sPattern)
if (aResult[0] == False):
oGui.addText(SITE_IDENTIFIER)
if (aResult[0] == True):
oOutputParameterHandler = cOutputParameterHandler()
for aEntry in aResult[1]:
sUrl = URL_MAIN + aEntry[0]
sTitle = aEntry[1]
oOutputParameterHandler.addParameter('siteUrl', sUrl)
oOutputParameterHandler.addParameter('sMovieTitle', sTitle)
oGui.addDir(SITE_IDENTIFIER, 'showSeries', sTitle, 'genres.png', oOutputParameterHandler)
oGui.setEndOfDirectory()
def showGenresD():
oGui = cGui()
oParser = cParser()
sUrl = URL_MAIN + 'download-dramas-en-cours?p=-1'
oRequestHandler = cRequestHandler(sUrl)
sHtmlContent = oRequestHandler.request()
sStart = 'id="id_genre"'
sEnd = '<select id="triGenre"'
sHtmlContent = oParser.abParse(sHtmlContent, sStart, sEnd)
sPattern = '<a href="([^"]+)">([^<]+)'
aResult = oParser.parse(sHtmlContent, sPattern)
if (aResult[0] == False):
oGui.addText(SITE_IDENTIFIER)
if (aResult[0] == True):
oOutputParameterHandler = cOutputParameterHandler()
for aEntry in aResult[1]:
sUrl = URL_MAIN + aEntry[0]
sTitle = aEntry[1]
oOutputParameterHandler.addParameter('siteUrl', sUrl)
oOutputParameterHandler.addParameter('sMovieTitle', sTitle)
oGui.addDir(SITE_IDENTIFIER, 'showSeries', sTitle, 'genres.png', oOutputParameterHandler)
oGui.setEndOfDirectory()
def showSearch():
oGui = cGui()
oInputParameterHandler = cInputParameterHandler()
sUrl = oInputParameterHandler.getValue('siteUrl')
sSearchText = oGui.showKeyBoard()
if (sSearchText != False):
sUrl = sUrl + sSearchText.replace(' ', '+')
showEpisode(sUrl)
oGui.setEndOfDirectory()
return
def showSeries():
oGui = cGui()
oInputParameterHandler = cInputParameterHandler()
sUrl = oInputParameterHandler.getValue('siteUrl').replace('+', '%2B').replace('é', 'e').replace('ô', 'o')\
.replace('É', 'E').replace('ï', 'i').replace('è', 'e')
oRequestHandler = cRequestHandler(sUrl)
sHtmlContent = oRequestHandler.request()
oParser = cParser()
sPattern = '<a href="([^"]+)"><img src="([^"]+)" width.+?alt="([^"]+).+?></a>'
aResult = oParser.parse(sHtmlContent, sPattern)
if (aResult[0] == False):
oGui.addText(SITE_IDENTIFIER)
if (aResult[0] == True):
total = len(aResult[1])
progress_ = progress().VScreate(SITE_NAME)
oOutputParameterHandler = cOutputParameterHandler()
for aEntry in aResult[1]:
progress_.VSupdate(progress_, total)
if progress_.iscanceled():
break
sTitle = aEntry[2]
sUrl2 = URL_MAIN + aEntry[0]
sThumb = URL_MAIN + aEntry[1].replace(' ', '%20')
sDesc = ''
sTitle = sTitle.replace(', telecharger en ddl', '')
oOutputParameterHandler.addParameter('siteUrl', sUrl2)
oOutputParameterHandler.addParameter('sThumb', sThumb)
if '-animes-' in sUrl:
oGui.addAnime(SITE_IDENTIFIER, 'showEpisode', sTitle, '', sThumb, sDesc, oOutputParameterHandler)
else:
oGui.addTV(SITE_IDENTIFIER, 'showEpisode', sTitle, '', sThumb, sDesc, oOutputParameterHandler)
progress_.VSclose(progress_)
oGui.setEndOfDirectory()
def showEpisode(sSearch=''):
oGui = cGui()
oInputParameterHandler = cInputParameterHandler()
sUrl = oInputParameterHandler.getValue('siteUrl')
sThumb = oInputParameterHandler.getValue('sThumb')
if sThumb:
sThumb = sThumb.replace(' ', '%20')
if sSearch:
sUrl = sSearch
oRequestHandler = cRequestHandler(sUrl)
sHtmlContent = oRequestHandler.request()
oParser = cParser()
if sSearch:
sPattern = '<a href=".+?id=([^"]+)"><b>(.+?)</b>'
else:
sPattern = '<td style="padding-left: 12px;"><a href="([^"]+).+?><b><img.+?>(.+?)</b>.+?</a>'
aResult = oParser.parse(sHtmlContent, sPattern)
if (aResult[0] == False):
oGui.addText(SITE_IDENTIFIER)
if (aResult[0] == True):
total = len(aResult[1])
progress_ = progress().VScreate(SITE_NAME)
oOutputParameterHandler = cOutputParameterHandler()
for aEntry in sorted(aResult[1]):
progress_.VSupdate(progress_, total)
if progress_.iscanceled():
break
if sSearch:
sTitle = aEntry[1]
sTitle, sTitle1 = sTitle.replace('1080p', '').replace('BD', '').replace('V2', '').replace('FIN', '')\
.replace('Fin', '').replace('fin', '').replace('OAV', '').replace('Bluray', '')\
.replace('Blu-Ray', '').rstrip().rsplit(' ', 1)
sTitle = 'E' + sTitle1 + ' ' + sTitle
sUrl2 = URL_MAIN + STREAM + aEntry[0]
sThumb = ''
else:
sTitle = aEntry[1]
sTitle, sTitle1 = sTitle.replace('1080p', '').replace('BD', '').replace('V2', '').replace('FIN', '')\
.replace('Fin', '').replace('fin', '').replace('OAV', '').replace('Bluray', '')\
.replace('Blu-Ray', '').rstrip().rsplit(' ', 1)
sTitle = 'E' + sTitle1 + ' ' + sTitle
sUrl2 = URL_MAIN + STREAM + aEntry[0]
sUrl2 = sUrl2.replace('#', '')
oOutputParameterHandler.addParameter('siteUrl', sUrl2)
oOutputParameterHandler.addParameter('sMovieTitle', sTitle)
oOutputParameterHandler.addParameter('sThumb', sThumb)
oGui.addEpisode(SITE_IDENTIFIER, 'showHosters', sTitle, '', sThumb, '', oOutputParameterHandler)
progress_.VSclose(progress_)
if not sSearch:
oGui.setEndOfDirectory()
def showHosters():
oGui = cGui()
oInputParameterHandler = cInputParameterHandler()
sUrl = oInputParameterHandler.getValue('siteUrl')
sMovieTitle = oInputParameterHandler.getValue('sMovieTitle')
sThumb = oInputParameterHandler.getValue('sThumb')
if sThumb:
sThumb = sThumb.replace(' ', '%20')
oHoster = cHosterGui().checkHoster('m3u8')
if (oHoster != False):
oHoster.setDisplayName(sMovieTitle)
oHoster.setFileName(sMovieTitle)
cHosterGui().showHoster(oGui, oHoster, sUrl, sThumb)
oGui.setEndOfDirectory()
| [
"zdzislaw22@windowslive.com"
] | zdzislaw22@windowslive.com |
57ac4bddf8a6c8e345fc31c97d40daa542272bd8 | bf535fdf7418b8092d6721d4e66e61f8c9dd4929 | /tasks/task_12.py | be64338587693c511cef05d0f510958040a6a902 | [
"MIT"
] | permissive | AlexRogalskiy/python | edb7808d48f4f8b8b4e4311678fb7364c7b54aeb | 78a38746de51688dc118ba921da08b920fe4caf2 | refs/heads/master | 2021-06-29T03:14:23.472651 | 2018-06-26T05:36:02 | 2018-06-26T05:36:02 | 97,952,461 | 0 | 0 | MIT | 2020-07-23T09:19:20 | 2017-07-21T13:52:00 | Python | UTF-8 | Python | false | false | 238 | py | import itertools
flatten = lambda x: list(itertools.chain.from_iterable(x))
s = [['"', 'An', 'investment'], ['in'], ['knowledge'], ['pays'], ['the', 'best'], ['interest."', '--'], ['Benjamin'], ['Franklin']]
print(' '.join(flatten(s)))
| [
"alexander.rogalsky@yandex.ru"
] | alexander.rogalsky@yandex.ru |
e1e7db7fb239a768eeb45685f63fcfd5a37115fd | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2899/50263/235338.py | 15ff2da2c947377ae171b13bfdf0500d85a905d3 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 172 | py | num = eval(input())
if num == 1:
print("true")
elif num == 4:
print("true")
elif num == 16:
print("true")
elif num == 5:
print("false")
else:
print(num) | [
"1069583789@qq.com"
] | 1069583789@qq.com |
29dd50bcff3f11ac5d166c8318908bb812620709 | 97ca8019389d6da727ee31b4ae42a520c21ccd64 | /Remove Element.py | 89155750ec5d87d07bcbc2c3d8b9e3333a1ffb41 | [] | no_license | AngleMAXIN/LeetCode_Problems | db51ae2e9f7b81d1e581bfee8f9949b1dbf27642 | 58c0190e718956d6960e2a1ea363d0a2e8d76e06 | refs/heads/master | 2021-06-14T00:38:44.337383 | 2019-11-22T15:13:20 | 2019-11-22T15:13:20 | 113,146,794 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 982 | py | #/usr/bin/env python
# -*- python: utf-8 -*-
# problem:
# Given nums = [3,2,2,3], val = 3,
# Your function should return length = 2, with the first two elements of nums being 2.
class Solution:
def removeElement(self, nums, val):
"""
:type nums: List[int]
:type val: int
:rtype: int
"""
if not nums:
return 0
i = 0
while i < len(nums): #循环条件是i始终都在nums的长度内
if nums[i] == val:
del nums[i] #del掉一个元素后,nums的长度就会减一
else:
i = i + 1
return len(nums)
# 思路:
# 首先判断列表是否为空,如果是,则返回0,
# 如果不是,从零开始以此遍历列表的元素,
# 遇到与val相等的值,就把它del掉,注意,此时
# 的i不能加1,否则会错过前面的元素,应该是如果没
# 有遇到与val相等的值再加1
| [
"1678190746@qq.com"
] | 1678190746@qq.com |
206aec11e8d8d7adff9fbb61ae155763fb665704 | bc441bb06b8948288f110af63feda4e798f30225 | /flowable_sdk/model/collector_center/target_range_pb2.pyi | acf831ae8dbcd27511dec805a8061bef8572a5a8 | [
"Apache-2.0"
] | permissive | easyopsapis/easyops-api-python | 23204f8846a332c30f5f3ff627bf220940137b6b | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | refs/heads/master | 2020-06-26T23:38:27.308803 | 2020-06-16T07:25:41 | 2020-06-16T07:25:41 | 199,773,131 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,179 | pyi | # @generated by generate_proto_mypy_stubs.py. Do not edit!
import sys
from flowable_sdk.model.collector_center.cmdb_host_search_pb2 import (
CmdbHostSearch as flowable_sdk___model___collector_center___cmdb_host_search_pb2___CmdbHostSearch,
)
from flowable_sdk.model.collector_center.cmdb_host_strategy_pb2 import (
CmdbHostStrategy as flowable_sdk___model___collector_center___cmdb_host_strategy_pb2___CmdbHostStrategy,
)
from flowable_sdk.model.collector_center.cmdb_relation_search_pb2 import (
CmdbRelationSearch as flowable_sdk___model___collector_center___cmdb_relation_search_pb2___CmdbRelationSearch,
)
from google.protobuf.descriptor import (
Descriptor as google___protobuf___descriptor___Descriptor,
)
from google.protobuf.message import (
Message as google___protobuf___message___Message,
)
from typing import (
Optional as typing___Optional,
Text as typing___Text,
Union as typing___Union,
)
from typing_extensions import (
Literal as typing_extensions___Literal,
)
builtin___bool = bool
builtin___bytes = bytes
builtin___float = float
builtin___int = int
if sys.version_info < (3,):
builtin___buffer = buffer
builtin___unicode = unicode
class TargetRange(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
type = ... # type: typing___Text
@property
def cmdbRelationSearch(self) -> flowable_sdk___model___collector_center___cmdb_relation_search_pb2___CmdbRelationSearch: ...
@property
def cmdbHostSearch(self) -> flowable_sdk___model___collector_center___cmdb_host_search_pb2___CmdbHostSearch: ...
@property
def cmdbHostStrategy(self) -> flowable_sdk___model___collector_center___cmdb_host_strategy_pb2___CmdbHostStrategy: ...
def __init__(self,
*,
type : typing___Optional[typing___Text] = None,
cmdbRelationSearch : typing___Optional[flowable_sdk___model___collector_center___cmdb_relation_search_pb2___CmdbRelationSearch] = None,
cmdbHostSearch : typing___Optional[flowable_sdk___model___collector_center___cmdb_host_search_pb2___CmdbHostSearch] = None,
cmdbHostStrategy : typing___Optional[flowable_sdk___model___collector_center___cmdb_host_strategy_pb2___CmdbHostStrategy] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> TargetRange: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> TargetRange: ...
def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def HasField(self, field_name: typing_extensions___Literal[u"cmdbHostSearch",b"cmdbHostSearch",u"cmdbHostStrategy",b"cmdbHostStrategy",u"cmdbRelationSearch",b"cmdbRelationSearch"]) -> builtin___bool: ...
def ClearField(self, field_name: typing_extensions___Literal[u"cmdbHostSearch",b"cmdbHostSearch",u"cmdbHostStrategy",b"cmdbHostStrategy",u"cmdbRelationSearch",b"cmdbRelationSearch",u"type",b"type"]) -> None: ...
| [
"service@easyops.cn"
] | service@easyops.cn |
89d977f1c10c4e2b4f3e188c6752cd828a68f39f | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/contrib/cv/others/CenterMask2/models/centermask2/centermask/layers/iou_loss.py | 2368211598cd9535144ecba1f6491e5c97949dd6 | [
"GPL-1.0-or-later",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 2,677 | py | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import torch
from torch import nn
class IOULoss(nn.Module):
def __init__(self, loc_loss_type='iou'):
super(IOULoss, self).__init__()
self.loc_loss_type = loc_loss_type
def forward(self, pred, target, weight=None,pos_mask=None):
pred_left = pred[:, 0]
pred_top = pred[:, 1]
pred_right = pred[:, 2]
pred_bottom = pred[:, 3]
target_left = target[:, 0]
target_top = target[:, 1]
target_right = target[:, 2]
target_bottom = target[:, 3]
target_aera = (target_left + target_right) * \
(target_top + target_bottom)
pred_aera = (pred_left + pred_right) * \
(pred_top + pred_bottom)
w_intersect = torch.min(pred_left, target_left) + \
torch.min(pred_right, target_right)
h_intersect = torch.min(pred_bottom, target_bottom) + \
torch.min(pred_top, target_top)
g_w_intersect = torch.max(pred_left, target_left) + \
torch.max(pred_right, target_right)
g_h_intersect = torch.max(pred_bottom, target_bottom) + \
torch.max(pred_top, target_top)
ac_uion = g_w_intersect * g_h_intersect
# add
if pos_mask is not None:
ac_uion = ac_uion + 1
ac_uion = ac_uion - pos_mask
area_intersect = w_intersect * h_intersect
area_union = target_aera + pred_aera - area_intersect
ious = (area_intersect + 1.0) / (area_union + 1.0)
gious = ious - (ac_uion - area_union) / ac_uion
if self.loc_loss_type == 'iou':
losses = -torch.log(ious)
elif self.loc_loss_type == 'linear_iou':
losses = 1 - ious
elif self.loc_loss_type == 'giou':
losses = 1 - gious
else:
raise NotImplementedError
if weight is not None:
return (losses * weight).sum()
else:
return losses.sum()
| [
"wangjiangben@huawei.com"
] | wangjiangben@huawei.com |
b6a3da15b9d15ab1db85efa07b1e479e24f66bf1 | 04bc241847bd1d0a8692d33f592117463863e675 | /src/ee/__init__.py | e9428b389a8487e48d13eacf3b6ed72d9b2bb5b7 | [
"MIT"
] | permissive | ehumss/earthengine | d9bf36a35fc3c1b345e33e0baada99003936d23b | 041030bcbf5c237b1e5de3717aa571cba7161cd6 | refs/heads/master | 2021-01-21T21:32:13.152290 | 2017-06-17T08:12:18 | 2017-06-17T08:12:18 | 94,856,492 | 1 | 0 | null | 2017-06-20T06:17:59 | 2017-06-20T06:17:59 | null | UTF-8 | Python | false | false | 11,847 | py | #!/usr/bin/env python
"""The EE Python library."""
__version__ = '0.1.102'
# Using lowercase function naming to match the JavaScript names.
# pylint: disable=g-bad-name
# pylint: disable=g-bad-import-order
import collections
import datetime
import inspect
import numbers
import os
import six
from . import batch
from . import data
from . import deserializer
from . import ee_types as types
from ._helpers import _GetPersistentCredentials
# Public re-exports.
from ._helpers import ServiceAccountCredentials
from ._helpers import apply # pylint: disable=redefined-builtin
from ._helpers import call
from ._helpers import profilePrinting
from .apifunction import ApiFunction
from .collection import Collection
from .computedobject import ComputedObject
from .customfunction import CustomFunction
from .dictionary import Dictionary
from .ee_date import Date
from .ee_exception import EEException
from .ee_list import List
from .ee_number import Number
from .ee_string import String
from .element import Element
from .encodable import Encodable
from .feature import Feature
from .featurecollection import FeatureCollection
from .filter import Filter
from .function import Function
from .geometry import Geometry
from .image import Image
from .imagecollection import ImageCollection
from .serializer import Serializer
from .terrain import Terrain
# A list of autogenerated class names added by _InitializeGenerateClasses.
_generatedClasses = []
class _AlgorithmsContainer(dict):
"""A lightweight class that is used as a dictionary with dot notation.
"""
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError
def __setattr__(self, name, value):
self[name] = value
def __delattr__(self, name):
del self[name]
# A dictionary of algorithms that are not bound to a specific class.
Algorithms = _AlgorithmsContainer()
def Initialize(credentials='persistent', opt_url=None):
"""Initialize the EE library.
If this hasn't been called by the time any object constructor is used,
it will be called then. If this is called a second time with a different
URL, this doesn't do an un-initialization of e.g.: the previously loaded
Algorithms, but will overwrite them and let point at alternate servers.
Args:
credentials: OAuth2 credentials. 'persistent' (default) means use
credentials already stored in the filesystem, or raise an explanatory
exception guiding the user to create those credentials.
opt_url: The base url for the EarthEngine REST API to connect to.
"""
if credentials == 'persistent':
credentials = _GetPersistentCredentials()
data.initialize(credentials, (opt_url + '/api' if opt_url else None), opt_url)
# Initialize the dynamically loaded functions on the objects that want them.
ApiFunction.initialize()
Element.initialize()
Image.initialize()
Feature.initialize()
Collection.initialize()
ImageCollection.initialize()
FeatureCollection.initialize()
Filter.initialize()
Geometry.initialize()
List.initialize()
Number.initialize()
String.initialize()
Date.initialize()
Dictionary.initialize()
Terrain.initialize()
_InitializeGeneratedClasses()
_InitializeUnboundMethods()
def Reset():
"""Reset the library. Useful for re-initializing to a different server."""
data.reset()
ApiFunction.reset()
Element.reset()
Image.reset()
Feature.reset()
Collection.reset()
ImageCollection.reset()
FeatureCollection.reset()
Filter.reset()
Geometry.reset()
List.reset()
Number.reset()
String.reset()
Date.reset()
Dictionary.reset()
Terrain.reset()
_ResetGeneratedClasses()
global Algorithms
Algorithms = _AlgorithmsContainer()
def _ResetGeneratedClasses():
"""Remove the dynamic classes."""
global _generatedClasses
for name in _generatedClasses:
ApiFunction.clearApi(globals()[name])
del globals()[name]
_generatedClasses = []
# Warning: we're passing all of globals() into registerClasses.
# This is a) pass by reference, and b) a lot more stuff.
types._registerClasses(globals()) # pylint: disable=protected-access
def _Promote(arg, klass):
"""Wrap an argument in an object of the specified class.
This is used to e.g.: promote numbers or strings to Images and arrays
to Collections.
Args:
arg: The object to promote.
klass: The expected type.
Returns:
The argument promoted if the class is recognized, otherwise the
original argument.
"""
if arg is None:
return arg
if klass == 'Image':
return Image(arg)
elif klass == 'Feature':
if isinstance(arg, Collection):
# TODO(user): Decide whether we want to leave this in. It can be
# quite dangerous on large collections.
return ApiFunction.call_(
'Feature', ApiFunction.call_('Collection.geometry', arg))
else:
return Feature(arg)
elif klass == 'Element':
if isinstance(arg, Element):
# Already an Element.
return arg
elif isinstance(arg, Geometry):
# Geometries get promoted to Features.
return Feature(arg)
elif isinstance(arg, ComputedObject):
# Try a cast.
return Element(arg.func, arg.args, arg.varName)
else:
# No way to convert.
raise EEException('Cannot convert %s to Element.' % arg)
elif klass == 'Geometry':
if isinstance(arg, Collection):
return ApiFunction.call_('Collection.geometry', arg)
else:
return Geometry(arg)
elif klass in ('FeatureCollection', 'Collection'):
# For now Collection is synonymous with FeatureCollection.
if isinstance(arg, Collection):
return arg
else:
return FeatureCollection(arg)
elif klass == 'ImageCollection':
return ImageCollection(arg)
elif klass == 'Filter':
return Filter(arg)
elif klass == 'Algorithm':
if isinstance(arg, six.string_types):
# An API function name.
return ApiFunction.lookup(arg)
elif callable(arg):
# A native function that needs to be wrapped.
args_count = len(inspect.getargspec(arg).args)
return CustomFunction.create(arg, 'Object', ['Object'] * args_count)
elif isinstance(arg, Encodable):
# An ee.Function or a computed function like the return value of
# Image.parseExpression().
return arg
else:
raise EEException('Argument is not a function: %s' % arg)
elif klass == 'Dictionary':
if isinstance(arg, dict):
return arg
else:
return Dictionary(arg)
elif klass == 'String':
if (types.isString(arg) or
isinstance(arg, ComputedObject) or
isinstance(arg, String)):
return String(arg)
else:
return arg
elif klass == 'List':
return List(arg)
elif klass in ('Number', 'Float', 'Long', 'Integer', 'Short', 'Byte'):
return Number(arg)
elif klass in globals():
cls = globals()[klass]
ctor = ApiFunction.lookupInternal(klass)
# Handle dynamically created classes.
if isinstance(arg, cls):
# Return unchanged.
return arg
elif ctor:
# The client-side constructor will call the server-side constructor.
return cls(arg)
elif isinstance(arg, six.string_types):
if hasattr(cls, arg):
# arg is the name of a method in klass.
return getattr(cls, arg)()
else:
raise EEException('Unknown algorithm: %s.%s' % (klass, arg))
else:
# Client-side cast.
return cls(arg)
else:
return arg
def _InitializeUnboundMethods():
# Sort the items by length, so parents get created before children.
items = sorted(
ApiFunction.unboundFunctions().items(), key=lambda x: len(x[0]))
for name, func in items:
signature = func.getSignature()
if signature.get('hidden', False):
continue
# Create nested objects as needed.
name_parts = name.split('.')
target = Algorithms
while len(name_parts) > 1:
first = name_parts[0]
# Set the attribute if it doesn't already exist. The try/except block
# works in both Python 2 & 3.
try:
getattr(target, first)
except AttributeError:
setattr(target, first, _AlgorithmsContainer())
target = getattr(target, first)
name_parts = name_parts[1:]
# Attach the function.
# We need a copy of the function to attach properties.
def GenerateFunction(f):
return lambda *args, **kwargs: f.call(*args, **kwargs) # pylint: disable=unnecessary-lambda
bound = GenerateFunction(func)
bound.signature = signature
bound.__doc__ = str(func)
setattr(target, name_parts[0], bound)
def _InitializeGeneratedClasses():
"""Generate classes for extra types that appear in the web API."""
signatures = ApiFunction.allSignatures()
# Collect the first part of all function names.
names = set([name.split('.')[0] for name in signatures])
# Collect the return types of all functions.
returns = set([signatures[sig]['returns'] for sig in signatures])
want = [name for name in names.intersection(returns) if name not in globals()]
for name in want:
globals()[name] = _MakeClass(name)
_generatedClasses.append(name)
ApiFunction._bound_signatures.add(name) # pylint: disable=protected-access
# Warning: we're passing all of globals() into registerClasses.
# This is a) pass by reference, and b) a lot more stuff.
types._registerClasses(globals()) # pylint: disable=protected-access
def _MakeClass(name):
"""Generates a dynamic API class for a given name."""
def init(self, *args):
"""Initializer for dynamically created classes.
Args:
self: The instance of this class. Listed to make the linter hush.
*args: Either a ComputedObject to be promoted to this type, or
arguments to an algorithm with the same name as this class.
Returns:
The new class.
"""
klass = globals()[name]
onlyOneArg = (len(args) == 1)
# Are we trying to cast something that's already of the right class?
if onlyOneArg and isinstance(args[0], klass):
result = args[0]
else:
# Decide whether to call a server-side constructor or just do a
# client-side cast.
ctor = ApiFunction.lookupInternal(name)
firstArgIsPrimitive = not isinstance(args[0], ComputedObject)
shouldUseConstructor = False
if ctor:
if not onlyOneArg:
# Can't client-cast multiple arguments.
shouldUseConstructor = True
elif firstArgIsPrimitive:
# Can't cast a primitive.
shouldUseConstructor = True
elif args[0].func != ctor:
# We haven't already called the constructor on this object.
shouldUseConstructor = True
# Apply our decision.
if shouldUseConstructor:
# Call ctor manually to avoid having promote() called on the output.
ComputedObject.__init__(
self, ctor, ctor.promoteArgs(ctor.nameArgs(args)))
else:
# Just cast and hope for the best.
if not onlyOneArg:
# We don't know what to do with multiple args.
raise EEException(
'Too many arguments for ee.%s(): %s' % (name, args))
elif firstArgIsPrimitive:
# Can't cast a primitive.
raise EEException(
'Invalid argument for ee.%s(): %s. Must be a ComputedObject.' %
(name, args))
else:
result = args[0]
ComputedObject.__init__(self, result.func, result.args, result.varName)
properties = {'__init__': init, 'name': lambda self: name}
new_class = type(str(name), (ComputedObject,), properties)
ApiFunction.importApi(new_class, name, name)
return new_class
# Set up type promotion rules as soon the package is loaded.
Function._registerPromoter(_Promote) # pylint: disable=protected-access
| [
"mort.canty@gmail.com"
] | mort.canty@gmail.com |
2010af1393a38b6eeb38db3684d5fe1954404bc0 | f3b233e5053e28fa95c549017bd75a30456eb50c | /ptp1b_input/L82/82-77_MD_NVT_rerun/set_2.py | 587208a34f0653593d706645a6b17a656119210c | [] | no_license | AnguseZhang/Input_TI | ddf2ed40ff1c0aa24eea3275b83d4d405b50b820 | 50ada0833890be9e261c967d00948f998313cb60 | refs/heads/master | 2021-05-25T15:02:38.858785 | 2020-02-18T16:57:04 | 2020-02-18T16:57:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 741 | py | import os
dir = '/mnt/scratch/songlin3/run/ptp1b/L82/MD_NVT_rerun/ti_one-step/82_77/'
filesdir = dir + 'files/'
temp_prodin = filesdir + 'temp_prod_2.in'
temp_pbs = filesdir + 'temp_2.pbs'
lambd = [ 0.00922, 0.04794, 0.11505, 0.20634, 0.31608, 0.43738, 0.56262, 0.68392, 0.79366, 0.88495, 0.95206, 0.99078]
for j in lambd:
os.chdir("%6.5f" %(j))
workdir = dir + "%6.5f" %(j) + '/'
#prodin
prodin = workdir + "%6.5f_prod_2.in" %(j)
os.system("cp %s %s" %(temp_prodin, prodin))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, prodin))
#PBS
pbs = workdir + "%6.5f_2.pbs" %(j)
os.system("cp %s %s" %(temp_pbs, pbs))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, pbs))
#submit pbs
#os.system("qsub %s" %(pbs))
os.chdir(dir)
| [
"songlin3@msu.edu"
] | songlin3@msu.edu |
a8ec2943d15f417b5213574b8a7ae8aa115369b5 | f2978751f45a0e88a9761f6da4f66e0c6610bd9d | /hardPython/ex18.py | 23fc87c289dd639f00ab2099c76807db795bb09b | [] | no_license | mchenyuxiang/HardPython | c489dbd52b8e5c4fe71da824297f309529f237a7 | 1ab22e753d4e44d17cf203d2f325371c9ef4443d | refs/heads/master | 2021-01-12T12:27:49.067010 | 2017-09-07T04:42:56 | 2017-09-07T04:42:56 | 72,502,316 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 340 | py | #--coding:utf-8--
def print_two(*args):
arg1,arg2 = args
print "arg1:%r,arg2:%r"%(arg1,arg2)
def print_two_again(arg1,arg2):
print "arg1:%r,arg2:%r"%(arg1,arg2)
def print_one(arg1):
print "arg1:%r"%arg1
def print_none():
print "I got nothin'."
print_two("Zed","Shaw")
print_two_again("Zed","Shaw")
print_one("First!")
print_none()
| [
"="
] | = |
9484fe6fdcd3e9324a6d4e8c49055758749ac739 | 435723c2128a8a125ebc0bd4fdd57b2e438174a0 | /tests/dust/screens/test_calzetti.py | 79521e13c7b4ac3ee9912ad4414f589fa6e5a4d0 | [] | no_license | galacticusorg/analysis-python | 824e7a0311329531e42eb06fc99298cf371ec75f | 09e03f8d25ab6711b4e2783454acca1422e7bc59 | refs/heads/master | 2022-03-10T18:39:03.766749 | 2022-03-03T14:49:25 | 2022-03-03T14:49:25 | 203,855,262 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 711 | py | #! /usr/bin/env python
import os,sys
import unittest
import numpy as np
from galacticus.dust.screens.manager import ScreenLaw
from galacticus.dust.screens.calzetti import Calzetti
from galacticus import rcParams
class TestCalzetti(unittest.TestCase):
def test_Calzetti(self):
rcParams.update("dustCalzetti","Rv",4.06)
DUST = Calzetti()
self.assertEqual(DUST.attrs["Rv"],4.06)
self.assertIsNotNone(DUST.curve)
wavelengths = np.array([0.01,0.12,1.0,2.2,5.0])
self.assertTrue(type(DUST.curve(wavelengths)),np.ndarray)
[self.assertTrue(type(DUST.curve(w)),float) for w in wavelengths]
return
if __name__ == "__main__":
unittest.main()
| [
"alex.i.merson@gmail.com"
] | alex.i.merson@gmail.com |
a9d79218e3a19e563dfc0d7fe2c48ed14e8ec8ef | 7e35f686eaa2acff06291457af4fd6680e2738c1 | /基础题目/已知三角形的两边长及其夹角,求第三边长.py | 536fa48e07707bb805f3991db25b73db83f33f85 | [] | no_license | cassieeric/Python-Exercises_Interview_questions | 5ba68296cbf777ac7bb9aeda57ee7a04856f613a | 1934e5ce82d77747d52229522dd1515a61dc80e2 | refs/heads/master | 2021-07-04T03:05:10.271998 | 2020-08-15T01:19:37 | 2020-08-15T01:19:37 | 150,816,386 | 7 | 3 | null | null | null | null | UTF-8 | Python | false | false | 323 | py | import math
a = int(input('请输入三角形的一条边长:'))
b = int(input('请输入三角形的另一条边长:'))
C = int(input('请输入三角形的两条边长的夹角度数:'))
# c = math.sqrt(a*a + b*b - 2*a*b*math.cos(C*math.pi/180))
c = math.sqrt(a**2 + b**2 - 2*a*b*math.cos(C*math.pi/180))
print(c)
| [
"noreply@github.com"
] | cassieeric.noreply@github.com |
0a5c4e84014d2a61d3b50297b4dcf5da4b196d9e | 9dab41a71bf19a9ad17ee3e9f77c0f58aebd1d6d | /python/uline/uline/uline/handlers/app/bank/inlet/chain_batch_active.py | 815af06316321503854022567df4e0acf1ca2c17 | [] | no_license | apollowesley/Demo | f0ef8ec6c4ceb0aec76771da8dd9a62fb579eac8 | 471c4af95d3a7222d6933afc571a8e52e8fe4aee | refs/heads/master | 2021-02-15T04:01:51.590697 | 2018-01-29T01:44:29 | 2018-01-29T01:44:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,731 | py | # -*- coding: utf-8 -*-
'''
BEGIN
ajax请求 激活商户支付方式和状态
必选参数:
可选参数:
成功:
{"code": 200, "msg": "成功"}
失败:
{"code": 406, "msg": "失败"}
END
'''
import json
from tornado.httpclient import AsyncHTTPClient
from tornado.web import asynchronous, authenticated
from tornado import gen
from uline.handlers.baseHandlers import BkAdminHandler
from uline.public import common
from form import ActivatedBatchChainInfo
from uline.public import log
from uline.public.constants import ACTIVATED_STATUS, PAYMENT
from uline.settings import env, MESSAGE_URL
import tcelery
from uline.public.permit import check_permission
tcelery.setup_nonblocking_producer()
from uline.settings import CHAIN_LOGIN_URL
class ActivatedBatchChainInletStatusHandler(BkAdminHandler):
@authenticated
@check_permission
def prepare(self):
form = ActivatedBatchChainInfo(self)
self.f_rsp = common.f_rsp(code=406, msg='fail')
if not form.validate():
self.finish(self.f_rsp)
self.dt_id = form.dt_id.data
self.create_at = self.update_at = common.timestamp_now()
self.activated_status = 2
@asynchronous
@gen.coroutine
def get(self):
self.rsp = common.scc_rsp(code=200, msg='success')
with self.db.get_db() as cur:
try:
payment_types = yield self.get_unactivated_payment_type(cur)
dt_unactivated_payment_type = yield self.get_dt_unactivated_payment_type(cur)
if dt_unactivated_payment_type:
payment_name = '、'.join(PAYMENT[str(payment_type[0])]
for payment_type in dt_unactivated_payment_type if payment_type in payment_types)
if payment_name:
msg = u'渠道商{}的费率未激活'.format(payment_name)
rsp = common.f_rsp(code=407, msg=msg)
self.write(rsp)
self.finish()
return
self.email = yield self.get_email(cur)
yield self.activated_dt_payment(cur)
yield self.activated_dt_inlet(cur)
yield self.add_activated_dt_info(cur, payment_types)
self.dt_info = yield self.get_dt_info(cur)
except Exception as err:
log.exception.info(err)
cur.connection.rollback()
self.rsp = common.f_rsp(code=406, msg='fail')
count = yield self.is_send_email()
# 如果激活邮件发给渠道商则设置为待发送
if not count and self.dt_info[3] == 2:
self.save_activated_dt_email_info('ready', 3)
if not count and self.dt_info[3] == 1:
addition_info = u'(浦发银行厦门分行O2O平台合作伙伴)' if env == 'SPD_PROD' else ''
http_client = AsyncHTTPClient()
data = {
'env': env,
'reciver': self.dt_info[0],
'title': u'uline连锁商户激活信息',
'body': u"""
{1},您好:
以下帐号重要信息请注意保密:
优畅技术文档:http://docs.uline.cc
连锁商户编号:{2}
登录帐号:{0}
初始登录密码:开户时填写的联系手机号 (登录后要求修改初始密码)
登陆地址:{3}
温馨提示:
请妥善保管您的账号及密码,为安全起见,新申请的账号,首次登录后请立即修改管理员密码.
广州优畅信息技术有限公司{4}
客服电话:4008047555""".format(str(self.dt_info[2]) + ".mr", self.dt_info[1], self.dt_info[2], CHAIN_LOGIN_URL, addition_info)
}
url = MESSAGE_URL + '/v1/email'
response = yield http_client.fetch(url, body=json.dumps(data), method='POST')
if response.body == '1':
self.save_activated_dt_email_info('fail', 1)
else:
self.save_activated_dt_email_info('success', 2)
self.write(self.rsp)
@gen.coroutine
def get_unactivated_payment_type(self, cursor):
query = """select payment_type from dt_payment where dt_id=%(dt_id)s and activated_status=1;"""
cursor.execute(query, {"dt_id": self.dt_id})
ret = cursor.fetchall()
raise gen.Return(ret)
@gen.coroutine
def activated_dt_payment(self, cursor):
query = """update dt_payment set
activated_status=%(activated_status)s, update_at=%(update_at)s
where dt_id=%(dt_id)s and activated_status=1;"""
cursor.execute(query, {
"activated_status": self.activated_status,
"dt_id": self.dt_id,
"update_at": self.update_at
})
@gen.coroutine
def activated_dt_inlet(self, cursor):
query = """update dt_inlet_info set
activated_status=%(activated_status)s, update_at=%(update_at)s
where dt_id=%(dt_id)s"""
cursor.execute(query, {
"activated_status": self.activated_status,
"dt_id": self.dt_id,
"update_at": self.update_at
})
@gen.coroutine
def add_activated_dt_info(self, cursor, payment_types):
activated_user = yield self.get_bk_email(cursor)
for _, payment_type in enumerate(payment_types):
query = """insert into
activated_dt_info (dt_id, payment_type, comment, activated_user, activated_status, create_at) values(%s, %s,%s, %s, %s, %s)"""
cursor.execute(query, (self.dt_id, payment_type,
ACTIVATED_STATUS[str(self.activated_status)
], activated_user, self.activated_status,
self.create_at))
@gen.coroutine
def get_bk_email(self, cursor):
query = """select email from bk_user where bk_id=%s"""
cursor.execute(query, (self.current_user,))
ret = cursor.fetchone()
raise gen.Return(ret[0])
@gen.coroutine
def get_email(self, cursor):
query = """select email from dt_user where dt_id=%s"""
cursor.execute(query, (self.dt_id,))
ret = cursor.fetchone()
raise gen.Return(ret[0])
@gen.coroutine
def is_send_email(self):
query = """select count(1) from activated_dt_email_info where dt_id=%s and status=2"""
ret = self.db.selectSQL(query, (self.dt_id,))
raise gen.Return(ret[0])
@gen.coroutine
def get_dt_info(self, cursor):
query = """select
dt_inlet_info.email,
dt_inlet_info.dt_name,
dt_user.dt_id,
dt_inlet_info.activate_email_tag
from
dt_user
inner join dt_inlet_info on dt_inlet_info.dt_id=dt_user.dt_id
where dt_user.dt_id=%s"""
cursor.execute(query, (self.dt_id,))
ret = cursor.fetchone()
raise gen.Return(ret)
@gen.coroutine
def save_activated_dt_email_info(self, comment, status):
query = """insert into
activated_dt_email_info (dt_id,email,comment,status,create_at)
values (%s, %s, %s, %s, %s)"""
self.db.executeSQL(query, (self.dt_id, self.email, comment, status, self.create_at))
@gen.coroutine
def get_dt_unactivated_payment_type(self, cursor):
query = """SELECT payment_type FROM dt_payment
WHERE dt_id=(SELECT parent_id FROM dt_inlet_info WHERE dt_id=%s) and activated_status=1;"""
cursor.execute(query, (self.dt_id, ))
ret = cursor.fetchall()
raise gen.Return(ret if ret else '')
| [
"36821277@qq.com"
] | 36821277@qq.com |
9963aa430500b53625a4d9c20ddb4cc59760d221 | b8faf65ea23a2d8b119b9522a0aa182e9f51d8b1 | /vmraid/patches/v11_0/create_contact_for_user.py | db127c67728578fb09622c876e2f999ba7017b8d | [
"MIT"
] | permissive | vmraid/vmraid | a52868c57b1999a8d648441eb9cd05815204345d | 3c2e2a952003ba7ea2cf13673b9e79e127f4166e | refs/heads/main | 2022-07-29T18:59:28.585133 | 2022-04-22T08:02:52 | 2022-04-22T08:02:52 | 372,473,120 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,092 | py | import re
import vmraid
from vmraid.core.doctype.user.user import create_contact
def execute():
"""Create Contact for each User if not present"""
vmraid.reload_doc("integrations", "doctype", "google_contacts")
vmraid.reload_doc("contacts", "doctype", "contact")
vmraid.reload_doc("core", "doctype", "dynamic_link")
contact_meta = vmraid.get_meta("Contact")
if contact_meta.has_field("phone_nos") and contact_meta.has_field("email_ids"):
vmraid.reload_doc("contacts", "doctype", "contact_phone")
vmraid.reload_doc("contacts", "doctype", "contact_email")
users = vmraid.get_all("User", filters={"name": ("not in", "Administrator, Guest")}, fields=["*"])
for user in users:
if vmraid.db.exists("Contact", {"email_id": user.email}) or vmraid.db.exists(
"Contact Email", {"email_id": user.email}
):
continue
if user.first_name:
user.first_name = re.sub("[<>]+", "", vmraid.safe_decode(user.first_name))
if user.last_name:
user.last_name = re.sub("[<>]+", "", vmraid.safe_decode(user.last_name))
create_contact(user, ignore_links=True, ignore_mandatory=True)
| [
"sowrisurya@outlook.com"
] | sowrisurya@outlook.com |
bac7a1ad408b67c33cd3445a1697388b22649542 | db0e8aa3a92a30c9b1cc8da03725e951ff64f3f1 | /lenv/lib/python3.6/site-packages/django/contrib/sites/requests.py | 233d8409457a3ba44b5279fbcbbfb684e47d7e3a | [
"BSD-3-Clause"
] | permissive | shrey-c/DataLeakageDjango | ffeef61caa347520747fc70cf3f7f8b84a9610cf | a827c5a09e5501921f9fb97b656755671238dd63 | refs/heads/master | 2022-11-30T03:30:12.313025 | 2020-07-12T06:47:44 | 2020-07-12T06:47:44 | 242,569,637 | 6 | 1 | BSD-3-Clause | 2022-11-22T05:20:22 | 2020-02-23T18:33:04 | Python | UTF-8 | Python | false | false | 788 | py | from __future__ import unicode_literals
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class RequestSite(object):
"""
A class that shares the primary interface of Site (i.e., it has
``domain`` and ``name`` attributes) but gets its data from a Django
HttpRequest object rather than from a database.
The save() and delete() methods raise NotImplementedError.
"""
def __init__(self, request):
self.domain = self.name = request.get_host()
def __str__(self):
return self.domain
def save(self, force_insert=False, force_update=False):
raise NotImplementedError('RequestSite cannot be saved.')
def delete(self):
raise NotImplementedError('RequestSite cannot be deleted.')
| [
"shreyansh.chheda@gmail.com"
] | shreyansh.chheda@gmail.com |
537553a4c0757e6f54433900a81ffea31731adea | 09e5cfe06e437989a2ccf2aeecb9c73eb998a36c | /base/lib/python2.7/site-packages/wx-3.0-gtk2/wx/lib/floatcanvas/Resources.py | d3208ee6020730d53adcb199e4cf79ef2b84913e | [
"BSD-3-Clause",
"LicenseRef-scancode-python-cwi",
"GPL-1.0-or-later",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-free-unknown",
"Python-2.0"
] | permissive | jorgediazjr/dials-dev20191018 | b81b19653624cee39207b7cefb8dfcb2e99b79eb | 77d66c719b5746f37af51ad593e2941ed6fbba17 | refs/heads/master | 2020-08-21T02:48:54.719532 | 2020-01-25T01:41:37 | 2020-01-25T01:41:37 | 216,089,955 | 0 | 1 | BSD-3-Clause | 2020-01-25T01:41:39 | 2019-10-18T19:03:17 | Python | UTF-8 | Python | false | false | 15,886 | py | #----------------------------------------------------------------------
# This file was generated by /usr/local/bin/img2py
#
from wx import ImageFromStream, BitmapFromImage
import cStringIO, zlib
def getMagPlusData():
return zlib.decompress(
'x\xda\x01*\x01\xd5\xfe\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x18\
\x00\x00\x00\x18\x08\x06\x00\x00\x00\xe0w=\xf8\x00\x00\x00\x04sBIT\x08\x08\
\x08\x08|\x08d\x88\x00\x00\x00\xe1IDATx\x9c\xb5U\xd1\x0e\xc4 \x08\xa3n\xff\
\xff\xc5\xdb\xb8\xa7\xee<\x04\x86gFb\xb2\x88\xb6\x14\x90\x01m\x937m\x8f\x1c\
\xd7yh\xe4k\xdb\x8e*\x01<\x05\x04\x07F\x1cU\x9d"\x19\x14\\\xe7\xa1\x1e\xf07"\
\x90H+$?\x04\x16\x9c\xd1z\x04\x00J$m\x06\xdc\xee\x03Hku\x13\xd8C\x16\x84+"O\
\x1b\xa2\x07\xca"\xb7\xc6sY\xbdD\x926\xf5.\xce\x06!\xd2)x\xcb^\'\x08S\xe4\
\xe5x&5\xb4[A\xb5h\xb4j=\x9a\xc8\xf8\xecm\xd4\\\x9e\xdf\xbb?\x10\xf0P\x06\
\x12\xed?=\xb6a\xd8=\xcd\xa2\xc8T\xd5U2t\x11\x95d\xa3"\x9aQ\x9e\x12\xb7M\x19\
I\x9f\xff\x1e\xd8\xa63#q\xff\x07U\x8b\xd2\xd9\xa7k\xe9\xa1U\x94,\xbf\xe4\x88\
\xe4\xf6\xaf\x12x$}\x8a\xc2Q\xf1\'\x89\xf2\x9b\xfbKE\xae\xd8\x07+\xd2\xa7c\
\xdf\x0e\xc3D\x00\x00\x00\x00IEND\xaeB`\x82\xe2ovy' )
def getMagPlusBitmap():
return BitmapFromImage(getMagPlusImage())
def getMagPlusImage():
stream = cStringIO.StringIO(getMagPlusData())
return ImageFromStream(stream)
#----------------------------------------------------------------------
def getPointerData():
return zlib.decompress(
"x\xda\xeb\x0c\xf0s\xe7\xe5\x92\xe2b``\xe0\xf5\xf4p\t\x02\xd2\x12 \xcc\xc1\
\x06$\x1f\x94\xdb\xfe\x00R,\xc5N\x9e!\x1c@P\xc3\x91\xd2\x01\xe4o\xf5tq\x0c\
\xa9\x98\xb3\xf5\xdaE\xa1V\x05\x0e\x96\x0bw\xbf\xfc\xdf\xbfc\xd1\xf4\xd9\x87\
\xa7\xa84Mw_n\xa3\xeb&\xbcS\xf4N\xa9\xdcn\x86\x03aZ\x1bWl{\xcet\x92m\xed\x8a\
[\xd1*\x9c\x82\x91\x93\x9eMuP\xd6\xbe4\xa3\xa1\xcd\xe8\x84\xc0\t%=\x85\xe6\
\x1d\x8d\x1aF\xac.\x132\x13\xc4^\x9ek\x14\xffx\xc6K\xa3\xd1\xcd-3\xa8\xa1M'\
\x85\xf3Ck\xcb\xb9\x07\xd7\x7f\x85\x7f=\xa7Ts\xe2^\xff\x83\xfb\xf1\x97\x15\
\x15\x94\xd2\xbc/5tl\t\xb3\x11\xcc\xe7\x12\xbe0;\xfa\xef7\x85X\x87\xfc{z:S'\
\x86-}\xb6\xe0\xbb\xc2\xfc\x03\x7f\xa7\\\xf3\xb5jM/fX\xf0/\xf7\xe3\xb5\xca7\
\x8f\xe66s\xf3\x99\xe7\xf8\x9e\xb4(\xfd\t\xf4\x00\x83\xa7\xab\x9f\xcb:\xa7\
\x84&\x00\xc7Jh8" )
def getPointerBitmap():
return BitmapFromImage(getPointerImage())
def getPointerImage():
stream = cStringIO.StringIO(getPointerData())
return ImageFromStream(stream)
#----------------------------------------------------------------------
def getMagMinusData():
return zlib.decompress(
'x\xda\xeb\x0c\xf0s\xe7\xe5\x92\xe2b``\xe0\xf5\xf4p\t\x02\xd2\x12 \xcc\xc1\
\x06$\x1f\x94\xdb\xfe\x00R,\xc5N\x9e!\x1c@P\xc3\x91\xd2\x01\xe4\xdf\xf6tq\
\x0c\xa9\x98\xb354\x9a\xaf\xc5\x80#e\xd5w\xfb\x8d\xa7\xea.\xa6j\x06\xec\xeaU\
Q[vE\xb2m\xba\x83\xf5\x0b_k\xe5\xe3\xc5\xf12?o\x15.\xf2b\xf0ol`V\xe63\xd6\
\x9f\xc8\xc35\xefw\x12\xff\x0fi\xc1\x96\x0em\x15{\x16\xb1\x98E_9\x18\xa6x\
\xdc\xe2\xdaa\xcb>\xe1\xda*\xe1\x1b\xde\x82\x15O\xfc\xa5\x9d\xdc\x83\x19\xb7\
\xabD\xee\xed\x98dv\xd6n\r\x9b\xe3\x12\x91=\xa9\xeb\x85[4\xa3<\x9d\xd3b\x1d\
\xb7f$]]\x96\xe1\xf2\xf8\xc6y\x8f5\xf6\xd2\xdb\x96\xe9\xdfT\\\xd5p\xbe-7\xa2\
ls\xac\x88\xa4\xf1n\xaf6=!\xd5\x9b\xab:\xca\xa6,?\x92\x1b\xdc\xe9r\xe0\xcb\
\xe2\xe6\x15\x13v\xfco^\xe5\xfa\xf2\xca\xcb[R[\xba&\xbd\xf5\xec\xf3\xd8U?\
\xfd\x80\xf2EM\xae\xf0\xa3\xf3Ut\xde\x17\xed\x0b}\xd2U\xcb0Ugv\x82\xa1Q\xc7S\
\xa07\x19<]\xfd\\\xd69%4\x01\x00+\xecq\xf9' )
def getMagMinusBitmap():
return BitmapFromImage(getMagMinusImage())
def getMagMinusImage():
stream = cStringIO.StringIO(getMagMinusData())
return ImageFromStream(stream)
#----------------------------------------------------------------------
def getMoveButtonData():
return zlib.decompress(
'x\xda\x01,\x01\xd3\xfe\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x18\
\x00\x00\x00\x18\x08\x06\x00\x00\x00\xe0w=\xf8\x00\x00\x00\x04sBIT\x08\x08\
\x08\x08|\x08d\x88\x00\x00\x00\xe3IDATx\x9c\xb5\x96\xd1\x16\x84 \x08D\x19\
\xf5\xff\xbf\xb8\x9a}Y[\xc2P\xe8\xb4<\xaa\xcc\x15\x1c-\xa0T\x89\xc6\xb1o\x14\
\x11)\xb5!\x9aS2\xe2\x00\x04\xc0\tz\r\xd0\xc5{d K\x80\x15\xcfB\xa6\x00O<\x03\
q\x01+\xf1(\xa4\xb9\xe4\xda@\xf2\x92\xd8\x81fx\xea\xaa\x01p\xec\x1b{\x82N\
\xb4\xbb\xb4\xa2\x9e\x85\x8b]\x94\xb5\xa1\x8e\xbb\xdc\x13\xa0{\x9e\xb9H+\x08\
P\xeap\xa0\xb6\xc7:92\xdf\xd7\x94\xda\x00\x92!\xb7<\t\x92\xf1\xa7\xe2i\xb4n\
\xc7\x7f\xb5\xa8\x89\xfc<\xaf\x17x6\x8c\xccwq\x11\xe5\xa2/\xe4\xbe\xceDh\xf1\
\x0b@C\x9e\xd8\xd4\xcb\xc5\xec\x83c\xdb\xf2\xcaS\xa1\xc5=\xfb\xdaq\x92\xf4 \
\xaeM\xa3g\xb2j\xe9\xf4\x1e\xac \x91\r\xb8-2\x90\xa1]Q3\x84n\xb2\xad$\xe3\
\xb4e\x05\x06\x92\xfem\xf9\x00\x8d\xa7\xbb\x936\xe9\xf2\xae\x00\x00\x00\x00I\
END\xaeB`\x82\xed\x9c\x836' )
def getMoveButtonBitmap():
return BitmapFromImage(getMoveButtonImage())
def getMoveButtonImage():
stream = cStringIO.StringIO(getMoveButtonData())
return ImageFromStream(stream)
#----------------------------------------------------------------------
def getMoveCursorData():
return zlib.decompress(
"x\xda\xeb\x0c\xf0s\xe7\xe5\x92\xe2b``\xe0\xf5\xf4p\t\x02\xd2\xc2 \xcc\xc1\
\x06$\x8b\x02\xcc\xce\x00)\x96b'\xcf\x10\x0e \xa8\xe1H\xe9\x00\xf2\xd7z\xba8\
\x86T\xccYz\xe5\xa0\xd0a\x05\x0e\x96\x0b\xb1_\xff\xef\xb7\xe0\xb4-)\x98\xb0\
\xe0\xc6\xab\x8b/Ns\xf5\xa5\xac<q\xac8>(+y\xdb\xba7\x0e*\x1f\xefL\x97I\xe4b<\
\xc0gqTg\x892\xb3\xb3NS\xd9\x01\xf1eG\xc5\x04;z\xaaK\xd6]9\xc6!c\x10\xfd&\
\xf2\xbbH\x97P\xd0\xfa6\xdbY\xbe)\xfd\xd2g\xb3/\xf5\xad\xcd\xdab,\xb2\xa4C\
\xc6\x91y\xc5Q\xbb\xb6\xacd\xe6}\xae[9\xff\xaf\x8d|\xbf\xcc\x7f\xc7\xabe\xfe\
W\xf6\xffl]]\xcd\xd2\xf3\xfd\xc2\xff\t\x17WO,5o\x8a;Ys(~\x81\xa6\x19s\xf8\
\x05\xa1\xcf\tlKg\xb0\x96\xc7\xdd\xe2_\xd9\xbe,\xc7\xc4,\xf8=\xd0\xe1\x0c\
\x9e\xae~.\xeb\x9c\x12\x9a\x00\x0b\xb6b\x8e" )
def getMoveCursorBitmap():
return BitmapFromImage(getMoveCursorImage())
def getMoveCursorImage():
stream = cStringIO.StringIO(getMoveCursorData())
return ImageFromStream(stream)
#----------------------------------------------------------------------
def getMoveRLCursorData():
return zlib.decompress(
"x\xda\xeb\x0c\xf0s\xe7\xe5\x92\xe2b``\xe0\xf5\xf4p\t\x02\xd2\xc2 \xcc\xc1\
\x06$\x8b\x02\xcc\xce\x00)\x96b'\xcf\x10\x0e \xa8\xe1H\xe9\x00\xf2{<]\x1cC*\
\xe6\x9c\xbd\xe2\xc8\xd7\xa0\xc0\xc3r \xf6\xc1\x7f}\xb6WG\xa5Z\xa75H=\x96\
\x93\xb6Z\xb8\xa4\x91G0_u\x8fZm\xdb\xd5I\xa9K\xdf%mMQ\xbciZU*~\xb9-\xd0\xe6C\
\xd3Y\x07\xe5\t\xbb\xa4\xc4T.\xf9'\xcf\xe54\xfcx ,/\xc5\xd5\xb1\xeb\x84\xf2\
\x0b\xa6\xb6\x19\x19\xbd\xc5\xcf\xd38\x19\xca>|\x9c\xad\xaa[\xb5@\x8e\xe5W\
\xab\xad\xb3\xc3f)m\xe5\xed\x01\xedg\x9b\xc4X\xe6|[\xe3\xab\x1b\xb9\x86m\xbd\
\xdd\x91wO\xf6\xff\xbf\xc9\xf6\xc6#\xdf|\x8be\x98\x16\xd0]\x0c\x9e\xae~.\xeb\
\x9c\x12\x9a\x00\x11\x04M\x96" )
def getMoveRLCursorBitmap():
return BitmapFromImage(getMoveRLCursorImage())
def getMoveRLCursorImage():
stream = cStringIO.StringIO(getMoveRLCursorData())
return ImageFromStream(stream)
#----------------------------------------------------------------------
def getMoveUDCursorData():
return zlib.decompress(
'x\xda\xeb\x0c\xf0s\xe7\xe5\x92\xe2b``\xe0\xf5\xf4p\t\x02\xd2\xc2 \xcc\xc1\
\x06$\x8b\x02\xcc\xce\x00)\x96b\'\xcf\x10\x0e \xa8\xe1H\xe9\x00\xf2gx\xba8\
\x86T\xccY{\xc5\x91\xef\x88\x02\x07k@\xc0\xfb\xfaG\xdb\xf6\xcf6\x14t\xb1\x9b\
,\xb9\xedE\xb7\xc2\xaa[\xbb6T\xbc\xe3^\xcb\x9f\xfa:\x8a5(\xb4\xf2\x1d\xb7}\
\xa2\xb0\x90\xe0\xca\x06\xf7\x9c\xd64\x03\x83#J+\x98\xf2"\xd8\x0c/$\x88j0\
\xb7O\xfc\x1d\xc0\xf0av\xda\x8e)?\n\rg\xc4\x0bL\x9btFz\xee\xe6\xfcG\xebo\x84\
\xa9I\x9f1\x9d\xff\xad\xe7\xee\xb2\xf3\x8c\x06\xf9\xd7\xa6\xfc\xdcy\xf6M\x82\
\xf6\x96\xb99\xaf#Y{\x16\x08$?\xe0\xb4JR7h\x0e:\xd3\xcc\xb3\xe8\x06WX\xdd-\
\xf1\xf5<\x05n\xca[\xef\xfd\x01\xba\x91\xc1\xd3\xd5\xcfe\x9dSB\x13\x00/\x9bT\
s' )
def getMoveUDCursorBitmap():
return BitmapFromImage(getMoveUDCursorImage())
def getMoveUDCursorImage():
stream = cStringIO.StringIO(getMoveUDCursorData())
return ImageFromStream(stream)
#----------------------------------------------------------------------
def getGrabHandData():
return zlib.decompress(
'x\xda\x01Z\x01\xa5\xfe\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x18\
\x00\x00\x00\x18\x08\x06\x00\x00\x00\xe0w=\xf8\x00\x00\x00\x04sBIT\x08\x08\
\x08\x08|\x08d\x88\x00\x00\x01\x11IDATx\x9c\xb5U\xd1\x12\x830\x08Kh\xff\xff\
\x8b7\xb3\x97\xd1C\xa4Zw\x93;\x1fJ1\t\x98VJ\x92\xb5N<\x14\x04 I\x00\x80H\xb4\
\xbd_\x8a9_{\\\x89\xf2z\x02\x18/J\x82\xb5\xce\xed\xfd\x12\xc9\x91\x03\x00_\
\xc7\xda\x8al\x00{\xfdW\xfex\xf2zeO\x92h\xed\x80\x05@\xa45D\xc5\xb3\x98u\x12\
\xf7\xab.\xa9\xd0k\x1eK\x95\xbb\x1a]&0\x92\xf0\'\xc6]gI\xda\tsr\xab\x8aI\x1e\
\\\xe3\xa4\x0e\xb4*`7"\x07\x8f\xaa"x\x05\xe0\xdfo6B\xf3\x17\xe3\x98r\xf1\xaf\
\x07\xd1Z\'%\x95\x0erW\xac\x8c\xe3\xe0\xfd\xd8AN\xae\xb8\xa3R\x9as>\x11\x8bl\
yD\xab\x1f\xf3\xec\x1cY\x06\x89$\xbf\x80\xfb\x14\\dw\x90x\x12\xa3+\xeeD\x16%\
I\xe3\x1c\xb8\xc7c\'\xd5Y8S\x9f\xc3Zg\xcf\x89\xe8\xaao\'\xbbk{U\xfd\xc0\xacX\
\xab\xbb\xe8\xae\xfa)AEr\x15g\x86(\t\xfe\x19\xa4\xb5\xe9f\xfem\xde\xdd\xbf$\
\xf8G<>\xa2\xc7\t>\tE\xfc\x8a\xf6\x8dqc\x00\x00\x00\x00IEND\xaeB`\x82\xdb\
\xd0\x8f\n' )
def getGrabHandBitmap():
return BitmapFromImage(getGrabHandImage())
def getGrabHandImage():
stream = cStringIO.StringIO(getGrabHandData())
return ImageFromStream(stream)
#----------------------------------------------------------------------
def getHandData():
return zlib.decompress(
'x\xda\x01Y\x01\xa6\xfe\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x18\
\x00\x00\x00\x18\x08\x06\x00\x00\x00\xe0w=\xf8\x00\x00\x00\x04sBIT\x08\x08\
\x08\x08|\x08d\x88\x00\x00\x01\x10IDATx\x9c\xad\x96\xe1\x02\xc2 \x08\x849\
\xf5\xfd\x9fx\xdb\xf5\'\x8c!\xa8\xab\xee\x975\xe5\x83\x0b\\@\xa9\xb2\xab\xeb\
<\xa8\xebR\x1bv\xce\xb4\'\xc1\x81OL\x92\xdc\x81\x0c\x00\x1b\x88\xa4\x94\xda\
\xe0\x83\x8b\x88\x00\x10\x92\xcb\x8a\xca,K\x1fT\xa1\x1e\x04\xe0f_\n\x88\x02\
\xf1:\xc3\x83>\x81\x0c\x92\x02v\xe5+\xba\xce\x83\xb7f\xb8\xd1\x9c\x8fz8\xb2*\
\x93\xb7l\xa8\xe0\x9b\xa06\xb8]_\xe7\xc1\x01\x10U\xe1m\x98\xc9\xefm"ck\xea\
\x1a\x80\xa0Th\xb9\xfd\x877{V*Qk\xda,\xb4\x8b\xf4;[\xa1\xcf6\xaa4\x9cd\x85X\
\xb0\r\\j\x83\x9dd\x92\xc3 \xf6\xbd\xab\x0c2\x05\xc0p\x9a\xa7]\xf4\x14\x18]3\
7\x80}h?\xff\xa2\xa2\xe5e\x90\xact\xaf\xe8B\x14y[4\x83|\x13\xdc\x9e\xeb\x16e\
\x90\xa7\xf2I\rw\x91\x87d\xd7p\x96\xbd\xd70\x07\xda\xe3v\x9a\xf5\xc5\xb2\xb2\
+\xb24\xbc\xaew\xedZe\x9f\x02"\xc8J\xdb\x83\xf6oa\xf5\xb7\xa5\xbf8\x12\xffW\
\xcf_\xbd;\xe4\x8c\x03\x10\xdb^\x00\x00\x00\x00IEND\xaeB`\x82\xd1>\x97B' )
def getHandBitmap():
return BitmapFromImage(getHandImage())
def getHandImage():
stream = cStringIO.StringIO(getHandData())
return ImageFromStream(stream)
#----------------------------------------------------------------------
def getGrabHand16Data():
return zlib.decompress(
'x\xda\x01\x0f\x01\xf0\xfe\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\
\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xffa\x00\x00\x00\x04sBIT\
\x08\x08\x08\x08|\x08d\x88\x00\x00\x00\xc6IDATx\x9c\x9d\x92Qn\x031\x08D\x07\
\xd6\xc7\xc0\xf7?\x98}\x8c$\xaf\x1f[,\xaf\xb5n\x9a !\r\x08\x0f\x0c\xd8\x00\
\xfc(\xa6o-"\x000?\xc4\xaf\xedp\xc6\xe9\x00\xa5\xf7\xaeZ\xab^\xcf\x07\xb5VI\
\xda\xe2\x8c\x13\x9b\x99\x06{N\xf2\x0e\xa7KB\x12\xe5\x13\xb9\xbdw\x0123\xc1\
\x18\xe4dZw1\xeb\x9c1\xe7\xcb\xe1\x0e(".\x9d\xe6\xab\xec0 @%\x17\xd4Z\xd3\'\
\xe74;K\xbd\xb5&I\xe3\x12\x7f=\xca\x8bD\x84\xc6\xe4\xa9-\xb7\xbb\xdez\xd6\
\xbf\xd6\x00xj\xfb\xef$\xb3T?\x8a\xf9\xbc\xa0\x1d\xc9\xfa\x99f\xf3K0\x91\xbc\
\xeb~K\xf0\x8d\x99\xf9qI\xbc\x9e\x0f\xf2\xa7e\xb7\xbb\xdc\x96 \x1f\xac\x85w9\
I\xfa\x01\xd6\xd0\xefe\x16\x16\xb7\x9b\x00\x00\x00\x00IEND\xaeB`\x82\x0bmo\
\xbf' )
def getGrabHand16Bitmap():
return BitmapFromImage(getGrabHand16Image())
def getGrabHand16Image():
stream = cStringIO.StringIO(getGrabHand16Data())
return ImageFromStream(stream)
#----------------------------------------------------------------------
def getMondrianData():
return zlib.decompress(
'x\xda\xeb\x0c\xf0s\xe7\xe5\x92\xe2b``\xe0\xf5\xf4p\t\x02\xd2\n \xcc\xc1\x04\
$\xffH\xbc]\x0c\xa4\x98\x8b\x9d<C888n?\xf4\x7f\x00\xe4\xa6{\xba8\x86T\xccy;\
\xd5\x93\xaf\xc1\x80\x87\xd9\xb6\xa3\xffc\xd1<\xb1u"^G\xc5\x18\x0f\xd9\xed\
\x9a\xf8\xfc\xc2\x8e\xa9\x93Z\x97\xac\xd8)\x98\xfd\xbb\xc2\xaa\xe4z\xf0-\xa3\
\x07\xec\r%\x0bo\x9db~^\xc50eo\x11\x7f\x1c\xc3\x0ba\xa3\x93\xacg\xae\x9f_\
\xbf\x92\x91\xcd#K\x84\xf7\x86\xd5.\xf6\r\xcf\xad\x192u\xd6&Z~\xfekm\xf0\xa0\
\xd27c\x9e\xa0kv\xf2\x83\x17@+\x19<]\xfd\\\xd69%4\x01\x00}A@\xa3' )
def getMondrianBitmap():
return BitmapFromImage(getMondrianImage())
def getMondrianImage():
stream = cStringIO.StringIO(getMondrianData())
return ImageFromStream(stream)
#----------------------------------------------------------------------
def getHand16Data():
return zlib.decompress(
'x\xda\x01\x02\x01\xfd\xfe\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\
\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xffa\x00\x00\x00\x04sBIT\
\x08\x08\x08\x08|\x08d\x88\x00\x00\x00\xb9IDATx\x9c\x8dS\xd1\x0e\x84 \x0ck\
\x87\x9f\x81\xff\xff\x93j\xef\xe12\xd8\xcd\xe1\xb9\xc4H\xc6,m\xa9\xa45\xac\
\xea:\x0f\xf9\xda\xda\xc6r\x88\xd6\xc6\xa3T\xbdw\x01\x100\xb7\xe2<\xad\x81\
\xce\xe0:\x0f\x91\xf3\x10I 9\xde\xb1\x1f\x19Yf\xe4\x03\xab>I\x90\x1c\xf2\xb6\
\x95\xfex\xea\nH\x92n\x0c\x9c\xf6\xdb2`\xba\x9d\xd0!\t\xd66>\x02\xea\xbb\xfb\
\xe3\xb4\xaf\xb3\xe3\xde\x8b3\x16\x80\xb0\xef;\x00\xa0\xf7^\xd3\xad\xb2\x10\
\xd1\xfc\xee\xcb\xfbNL\x06KZ\x1b\x19p\xcdO\xa6\xe5Ysj\x1e\x98\x18\xdf\x7f\
\x1f\x03!HoAn\xfe<\xeaK\xfd\xd2\x9f\xeao\xac\xa8\xae|\xba%1\xca\xc9U\xf5>\
\x98\xdc\xd9g\xb0\x13Hr\x00\x00\x00\x00IEND\xaeB`\x82\xde\xa5p@' )
def getHand16Bitmap():
return BitmapFromImage(getHand16Image())
def getHand16Image():
stream = cStringIO.StringIO(getHand16Data())
return ImageFromStream(stream)
#----------------------------------------------------------------------
def getMagPlus16Data():
return zlib.decompress(
"x\xda\xeb\x0c\xf0s\xe7\xe5\x92\xe2b``\xe0\xf5\xf4p\t\x02\xd2\x02 \xcc\xc1\
\x06$\xe5?\xffO\x04R,\xc5N\x9e!\x1c@P\xc3\x91\xd2\x01\xe4o\xf3tq\x0c\xa9\x98\
358\x9a\xef\xb0\x01\xc7\xe3\x89\xc9S~\xc7oZ\xfb}c\x93\x86\xe2\xc5g\xeb\xb9\
\x12\x93}N\xe9xI~/m\xe2ra\xbf>+9\xc4\xe8\xf3\x1dn\x06\xed\x89\x02\x05F\x06\
\x92\x0b\x96\xdf\xeb\xea\xf1\xfa\xb6\xec\xb7U3\x03\x83\xb7`\x8d;\x13C\xc4\
\x94\x88/\xcf\xa5\xba'\x85x\x9b\x1e\xd1\xbbb\xd6\xbc\xc7\xeb\x9e\xed\xce\x9c\
\x8fE\nV\x12\x0e,/\xef\xef6\xf6\xd3\xbe\xf2Lvf\x87G\x8d\x96\xf1\xf1}q\xa7\
\xc5\r7\xdf\xf3\x9d^t\xb4PFa\xd17.\xc1G\xc6\xa5_\x85\x94\x03\x8c\xab\xf7\n\
\x9e\xcaz\xb7\xe4\xd0\xeb\xb5\x93\x7f\x19\xbf\r8\xcf\x93\xb0\xef\x10\x9f\\\
\xde\x84\xd2\x0f\xf1L\x91G\x8c\x7f0t=<{\xccE9L\x01\xe8\x03\x06OW?\x97uN\tM\
\x00\xe1\xf8b\xe3" )
def getMagPlus16Bitmap():
return BitmapFromImage(getMagPlus16Image())
def getMagPlus16Image():
stream = cStringIO.StringIO(getMagPlus16Data())
return ImageFromStream(stream)
#----------------------------------------------------------------------
def getMagMinus16Data():
return zlib.decompress(
"x\xda\xeb\x0c\xf0s\xe7\xe5\x92\xe2b``\xe0\xf5\xf4p\t\x02\xd2\x02 \xcc\xc1\
\x06$\xe5?\xffO\x04R,\xc5N\x9e!\x1c@P\xc3\x91\xd2\x01\xe4\xaf\xf4tq\x0c\xa9\
\x98\xb36\xd8Q\xa8\xc5\x80C\xf9\x80\xf1\x9b\xff\xf6+\xd3\xf8\xb5\xb75\x87\
\xdc\x9dy\xd6P5\xd3I4`\xb2\xe0\xefmABWdfrW\x881_\x8f\x9c4g\xe6\x1c6E5}\xc6'\
\x0f\xbc\x85\xcf?\xca\xeaPIW\x93\xe0\xcb\xdf}N\xefc\x96Aq}\xe4#mfSw\xd35\xcf\
VL\x8a\xe5\x99\xf7(\xec\xc2\xe30\xc6\x80o\xe2?\xc3\xb2\xd7^\xedn\x9b\xe5\xa0\
[\xb5\xe9\xd0&\x1d\x91\x89\x9fmL\x02^\x8b.\xfa\x9f\xd2)T\x93\xed\xfb-\xf7\
\xed\xfd\xc3/\xc4<\x8d\x9a\xf4'?\x99\xff\x92\xef\xe7L\xcf\xae}a\xdfg\xc5\xe6\
\xf4\xcd\xe7q\x9b|\xe3 \xfb\xa7#\x1bw\xe4\x1f\xcdj\x01:\x9a\xc1\xd3\xd5\xcfe\
\x9dSB\x13\x00<\xbf^\xf7" )
def getMagMinus16Bitmap():
return BitmapFromImage(getMagMinus16Image())
def getMagMinus16Image():
stream = cStringIO.StringIO(getMagMinus16Data())
return ImageFromStream(stream)
| [
"jorge7soccer@gmail.com"
] | jorge7soccer@gmail.com |
8bd1c9457bbd2f09555ef5cfa3d565624a9c733b | 0362023a283a492733336dbe899714236b9a06ef | /SEGUNDO/M3-POOpython/ejercicios/Alex Catalan BUENOejercicio_control_errores/Llista.py | a022f3c5a3d4da8707bdf6f17ecf9a045afa675d | [] | no_license | alexcatmu/CFGS_DAM | 205b8bcc6d09f8351894c5f70e1a354ff25c17a3 | 1a4384dee8833b5d8034fdf0909a0774cbe5b1c0 | refs/heads/master | 2020-04-07T18:27:15.225638 | 2019-12-17T19:34:39 | 2019-12-17T19:34:39 | 158,610,709 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,032 | py | class Llista:
def __init__(self, num = 0):
if(not(isinstance(num,int))):
raise ValueError("Parametro erroneo, tipo de dato no valido", type(self))
elif(num < 0):
raise NotImplementedError("Parametro erroneo, debe ser un numero positivo", type(self))
self.array_nums = []
#num debe ser numerico
for i in range(num):
self.array_nums.append(None)
def elemento_posicion(self,element, position):
try:
self.array_nums[position] = element
except IndexError:
raise IndexError("Fuera de rango", type(self))
except ValueError:
raise ValueError("Valor no valido",type(self))
except TypeError:
raise TypeError("Debe ser de tipo entero", type(self))
except Exception as e:#Es de ValueError cuando introducimos una letra
print("Detectat error "+ str(e))
prueba = Llista(4)
print(prueba.array_nums)
prueba.elemento_posicion(4,1)
print(prueba.array_nums) | [
"alex.catalan.catalan@gmail.com"
] | alex.catalan.catalan@gmail.com |
b4ada9f63387a5be7a94668e4a18c2a727699ac6 | 96fab383fd53d404eb8fbcc5fb4fe814ffd82661 | /fastlink/fastlink/apps.py | 242681808afb7418a7df19b64329fdc8040bc0c0 | [] | no_license | dobestan/fastlink | 41809245f0cc25aecf690ea9db6ee7eec75ee227 | 1e4f25bf112a4007e6afe90530d80c88b43dda71 | refs/heads/master | 2021-01-10T13:01:22.268285 | 2016-02-04T16:57:01 | 2016-02-04T16:57:01 | 51,088,310 | 0 | 1 | null | 2016-02-04T16:57:01 | 2016-02-04T16:20:13 | Python | UTF-8 | Python | false | false | 174 | py | from django.apps import AppConfig
class FastlinkAppConfig(AppConfig):
name = 'fastlink'
def ready(self):
from .signals.post_save import post_save_resource
| [
"dobestan@gmail.com"
] | dobestan@gmail.com |
df1b6f0a5174307023b0b2d15db49f8e13ea1e1c | 085ce75a507df6e755cabb7a65c4a2a8c98762ba | /dockerfiles/root/.pycharm_helpers/python_stubs/-252567642/zlib.py | 23b2d0fd3442cd37ac55b116c740129aefbca602 | [] | no_license | Arhzi/habr-docker-article | d44302db1fe157d81fe0818e762e82218f50e31f | 6fb094860b612e307beadaeb22981aa0ee64e964 | refs/heads/master | 2021-01-23T20:41:47.398025 | 2015-12-10T08:56:33 | 2015-12-10T08:56:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,927 | py | # encoding: utf-8
# module zlib
# from /usr/local/lib/python2.7/lib-dynload/zlib.so
# by generator 1.137
"""
The functions in this module allow compression and decompression using the
zlib library, which is based on GNU zip.
adler32(string[, start]) -- Compute an Adler-32 checksum.
compress(string[, level]) -- Compress string, with compression level in 0-9.
compressobj([level]) -- Return a compressor object.
crc32(string[, start]) -- Compute a CRC-32 checksum.
decompress(string,[wbits],[bufsize]) -- Decompresses a compressed string.
decompressobj([wbits]) -- Return a decompressor object.
'wbits' is window buffer size.
Compressor objects support compress() and flush() methods; decompressor
objects support decompress() and flush().
"""
# no imports
# Variables with simple values
DEFLATED = 8
DEF_MEM_LEVEL = 8
MAX_WBITS = 15
ZLIB_VERSION = '1.2.8'
Z_BEST_COMPRESSION = 9
Z_BEST_SPEED = 1
Z_DEFAULT_COMPRESSION = -1
Z_DEFAULT_STRATEGY = 0
Z_FILTERED = 1
Z_FINISH = 4
Z_FULL_FLUSH = 3
Z_HUFFMAN_ONLY = 2
Z_NO_FLUSH = 0
Z_SYNC_FLUSH = 2
__version__ = '1.0'
# functions
def adler32(string, start=None): # real signature unknown; restored from __doc__
"""
adler32(string[, start]) -- Compute an Adler-32 checksum of string.
An optional starting value can be specified. The returned checksum is
a signed integer.
"""
pass
def compress(string, level=None): # real signature unknown; restored from __doc__
"""
compress(string[, level]) -- Returned compressed string.
Optional arg level is the compression level, in 0-9.
"""
pass
def compressobj(level=None): # real signature unknown; restored from __doc__
"""
compressobj([level]) -- Return a compressor object.
Optional arg level is the compression level, in 0-9.
"""
pass
def crc32(string, start=None): # real signature unknown; restored from __doc__
"""
crc32(string[, start]) -- Compute a CRC-32 checksum of string.
An optional starting value can be specified. The returned checksum is
a signed integer.
"""
pass
def decompress(string, wbits=None, bufsize=None): # real signature unknown; restored from __doc__
"""
decompress(string[, wbits[, bufsize]]) -- Return decompressed string.
Optional arg wbits is the window buffer size. Optional arg bufsize is
the initial output buffer size.
"""
pass
def decompressobj(wbits=None): # real signature unknown; restored from __doc__
"""
decompressobj([wbits]) -- Return a decompressor object.
Optional arg wbits is the window buffer size.
"""
pass
# classes
class error(Exception):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
| [
"sirnikolasd@yandex.ru"
] | sirnikolasd@yandex.ru |
16bde3d556f038d4d58862121c5017b380526751 | 44dbb043e52f00c9a797b1bea8f1df50dd621842 | /nntplib-example-3.py | 0c6beb82e255947dd47abacd9b5402d47a9aa76d | [] | no_license | peterdocter/standardmodels | 140c238d3bef31db59641087e3f3d5413d4baba1 | 7addc313c16b416d0970461998885833614570ad | refs/heads/master | 2020-12-30T16:59:30.489486 | 2016-12-13T06:32:03 | 2016-12-13T06:32:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 701 | py | import nntplib
import string, random
import StringIO, rfc822
SERVER = "news.spam.egg"
GROUP = "comp.lang.python"
# connect to server
server = nntplib.NNTP(SERVER)
resp, count, first, last, name = server.group(GROUP)
for i in range(10):
try:
id = random.randint(int(first), int(last))
resp, id, message_id, text = server.article(str(id))
except (nntplib.error_temp, nntplib.error_perm):
pass # no such message (maybe it was deleted?)
else:
break # found a message!
else:
raise SystemExit
text = string.join(text, "\n")
file = StringIO.StringIO(text)
message = rfc822.Message(file)
for k, v in message.items():
print k, "=", v
print message.fp.read() | [
"415074476@qq.com"
] | 415074476@qq.com |
8529eaf059a12424bb2811930ff8608b45b8abcf | 09c39de5aad7b283cfac2f09a2b93e43086846d2 | /Unit 03 Conditionals and Control Flow/02 PygLatin/PygLatin PART2/9-Move it on back.py | 1cac4dbb960f695de75cbd184784c5864d08e7b4 | [
"MIT"
] | permissive | lpython2006e/python-samples | b4e84080259faf75b41fb2fd4fb9d2fbc9f857aa | b94ba67ce0d7798ecf796dadae206aa75da58301 | refs/heads/master | 2023-01-21T13:16:13.295163 | 2020-11-29T11:01:50 | 2020-11-29T11:01:50 | 278,653,779 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 219 | py | pyg = 'ay'
original = raw_input('Enter a word:')
if len(original) > 0 and original.isalpha():
word = original.lower()
first = word[0]
new_word = word + first + pyg
print(pyg)
else:
print('empty')
| [
"lent@hivetech.vn"
] | lent@hivetech.vn |
daa6063529c244841de6f2f0caaed32db43d8119 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/40/usersdata/130/24581/submittedfiles/funcoes.py | 4052f0d988cac6a87cf753291e3a58713709e011 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 631 | py | #ARQUIVO COM SUAS FUNCOES
def absoluto(x):
if x<0:
x=x*(-1)
resultado=x
return resultado
def pi(x):
So=3
a=2
for i in range(1,x+1,1):
if (a//2)%2==0:
So=So-4/(a*(a+1)*(a+2))
else:
So=So+4/(a*(a+1)*(a+2))
a=a+2
pi=So
return pi
def cosseno(x):
S=1
i=2
while True:
x=i
M=1
for i in range(1,x+1,1):
M=M*i
if((i//2)%2)==0:
S=S+((x**i)/M)
else:
S=S-((x**i)/M)
i=i+2
if absoluto(S)>=e:
break
def aurea(x):
A=2*x
return A | [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
86239dd8825f9994ac7cc262b1f6c1887f9b5f4b | 6fcfb638fa725b6d21083ec54e3609fc1b287d9e | /python/dmlc_minpy/minpy-master/examples/nn/cnn_customop.py | 9a3a2c812faee140c621d87942c0897b9d004406 | [] | no_license | LiuFang816/SALSTM_py_data | 6db258e51858aeff14af38898fef715b46980ac1 | d494b3041069d377d6a7a9c296a14334f2fa5acc | refs/heads/master | 2022-12-25T06:39:52.222097 | 2019-12-12T08:49:07 | 2019-12-12T08:49:07 | 227,546,525 | 10 | 7 | null | 2022-12-19T02:53:01 | 2019-12-12T07:29:39 | Python | UTF-8 | Python | false | false | 4,212 | py | """Convolution Neural Network example with both MinPy ndarray and MXNet symbol."""
import sys
import argparse
import minpy
import minpy.numpy as np
import numpy
import mxnet as mx
from minpy.nn.io import NDArrayIter
# Can also use MXNet IO here
# from mxnet.io import NDArrayIter
from minpy.core import Function
from minpy.nn import layers
from minpy.nn.model import ModelBase
from minpy.nn.solver import Solver
from examples.utils.data_utils import get_CIFAR10_data
from minpy.primitive import customop
# Please uncomment following if you have GPU-enabled MXNet installed.
# from minpy.context import set_context, gpu
# set_context(gpu(0)) # set the global context as gpu(0)
batch_size = 128
input_size = (3, 32, 32)
flattened_input_size = 3 * 32 * 32
hidden_size = 512
num_classes = 10
@customop('numpy')
def my_softmax(x, y):
probs = numpy.exp(x - numpy.max(x, axis=1, keepdims=True))
probs /= numpy.sum(probs, axis=1, keepdims=True)
N = x.shape[0]
loss = -numpy.sum(numpy.log(probs[numpy.arange(N), y])) / N
return loss
def my_softmax_grad(ans, x, y):
def grad(g):
N = x.shape[0]
probs = numpy.exp(x - numpy.max(x, axis=1, keepdims=True))
probs /= numpy.sum(probs, axis=1, keepdims=True)
probs[numpy.arange(N), y] -= 1
probs /= N
return probs
return grad
my_softmax.def_grad(my_softmax_grad)
class ConvolutionNet(ModelBase):
def __init__(self):
super(ConvolutionNet, self).__init__()
# Define symbols that using convolution and max pooling to extract better features
# from input image.
net = mx.sym.Variable(name='X')
net = mx.sym.Convolution(
data=net, name='conv', kernel=(7, 7), num_filter=32)
net = mx.sym.Activation(data=net, act_type='relu')
net = mx.sym.Pooling(
data=net,
name='pool',
pool_type='max',
kernel=(2, 2),
stride=(2, 2))
net = mx.sym.Flatten(data=net)
# Create forward function and add parameters to this model.
self.conv = Function(
net, input_shapes={'X': (batch_size, ) + input_size}, name='conv')
self.add_params(self.conv.get_params())
# Define ndarray parameters used for classification part.
output_shape = self.conv.get_one_output_shape()
conv_out_size = output_shape[1]
self.add_param(name='w1', shape=(conv_out_size, hidden_size)) \
.add_param(name='b1', shape=(hidden_size,)) \
.add_param(name='w2', shape=(hidden_size, num_classes)) \
.add_param(name='b2', shape=(num_classes,))
def forward(self, X, mode):
out = self.conv(X=X, **self.params)
out = layers.affine(out, self.params['w1'], self.params['b1'])
out = layers.relu(out)
out = layers.affine(out, self.params['w2'], self.params['b2'])
return out
def loss(self, predict, y):
return my_softmax(predict, y)
def main(args):
# Create model.
model = ConvolutionNet()
# Create data iterators for training and testing sets.
data = get_CIFAR10_data(args.data_dir)
train_dataiter = NDArrayIter(
data=data['X_train'],
label=data['y_train'],
batch_size=batch_size,
shuffle=True)
test_dataiter = NDArrayIter(
data=data['X_test'],
label=data['y_test'],
batch_size=batch_size,
shuffle=False)
# Create solver.
solver = Solver(
model,
train_dataiter,
test_dataiter,
num_epochs=10,
init_rule='gaussian',
init_config={'stdvar': 0.001},
update_rule='sgd_momentum',
optim_config={'learning_rate': 1e-3,
'momentum': 0.9},
verbose=True,
print_every=20)
# Initialize model parameters.
solver.init()
# Train!
solver.train()
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Multi-layer perceptron example using minpy operators")
parser.add_argument(
'--data_dir',
type=str,
required=True,
help='Directory that contains cifar10 data')
main(parser.parse_args())
| [
"659338505@qq.com"
] | 659338505@qq.com |
4a5d4da532f4802dd20815dfac7206ad889904b8 | dd8227454b817ccf2ceb24b3dfd4260d4ded7a72 | /scripts/item/consume_2435043.py | 13ed0760599dc002e3b64452a101b17cdadd7c63 | [
"MIT"
] | permissive | Snewmy/swordie | 0dd3c17808b064c2cb2bd9576b51daf01ae5d686 | ae01ed4ec0eb20a18730e8cd209eea0b84a8dd17 | refs/heads/master | 2023-06-30T21:14:05.225798 | 2021-07-06T14:32:39 | 2021-07-06T14:32:39 | 389,497,502 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 180 | py | # Heroes Phantom Damage Skin
success = sm.addDamageSkin(2435043)
if success:
sm.chat("The Heroes Phantom Damage Skin has been added to your account's damage skin collection.")
| [
"vcalheirosdoc@gmail.com"
] | vcalheirosdoc@gmail.com |
2e8e809c480deb7a245b236f0b6ba0a2c56aee2b | a838d4bed14d5df5314000b41f8318c4ebe0974e | /sdk/anomalydetector/azure-ai-anomalydetector/azure/ai/anomalydetector/operations/__init__.py | 00fccdc398f92af40bdf0644db345b88ab2f66a4 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | scbedd/azure-sdk-for-python | ee7cbd6a8725ddd4a6edfde5f40a2a589808daea | cc8bdfceb23e5ae9f78323edc2a4e66e348bb17a | refs/heads/master | 2023-09-01T08:38:56.188954 | 2021-06-17T22:52:28 | 2021-06-17T22:52:28 | 159,568,218 | 2 | 0 | MIT | 2019-08-11T21:16:01 | 2018-11-28T21:34:49 | Python | UTF-8 | Python | false | false | 613 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from ._anomaly_detector_client_operations import AnomalyDetectorClientOperationsMixin
__all__ = [
'AnomalyDetectorClientOperationsMixin',
]
| [
"noreply@github.com"
] | scbedd.noreply@github.com |
7dc56d8754b8e16411207641759855115ea4c5e8 | 1b7947f81b2a82d3ff377c39e7228f1aaf743277 | /day2/urllib_demo2/music163.py | 77092b703cf3519ae89e972c387e284ff06fd398 | [] | no_license | gaohj/python1902crawer | 92fbaeb8151bd8a71f3c87a1566f6dcded3ef745 | aa0dce86ba50db12fb0262a62ccc6a9ab60ad0c2 | refs/heads/master | 2020-08-14T20:08:47.775547 | 2019-10-25T06:55:21 | 2019-10-25T06:55:21 | 215,226,727 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,295 | py |
import urllib.request
import urllib.parse
import json
headers = {
"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.120 Safari/537.36",
}
#post接口
url = "https://music.163.com/weapi/v1/resource/comments/A_PL_0_2783240?csrf_token="
data = {
"params":"JKDq4LtELoh0m6USC17tdjp/BdF7vswdDOMZs7F+LHW3GOVspWTYYszzBIKCdEcWn2Q2eG1UHmhbYVwrSNwrGg4ljF2MvPTnpXDHRdvHw2nu1bt/uYCa1gEhMGQENYuBUwfYG/lLSYROzcPgyoIeGgfz0ioUviVXJPehwweNGsk8Awo5KLnpXvYfsAbjtrZB0yRWtFluWojJpHIoDquyClYfaSRLEb1WL4vNAPuA8BI=",
"encSecKey":"bb8a4561f8d79aca80d57b8f9d21576dfb866548feadf33a8f4c4bb884f18cc2e8b0d7fe81d18bdd565024b56e2e546ea75246c90bf6305c06fc1617fce4bfba10b7ef39e2fd50aacdad303ea615aff20af49c11a6a382d33516536b790a74dc4a02ff76178ea548a435cbe8c81b39e88cea9afb4b18aa57293d4cfc56c503f5",
}
data = urllib.parse.urlencode(data).encode()
#str -> bytes encode
#bytes -> str decode
req = urllib.request.Request(url=url,headers=headers,data=data)
response = urllib.request.urlopen(req)
content = response.read().decode()
data_dict = json.loads(content)
hotComments = data_dict['hotComments']
for hotComment in hotComments:
nickname = hotComment['user']['nickname']
content = hotComment['content']
print(nickname,":",content)
| [
"gaohj@126.com"
] | gaohj@126.com |
34018ff3a28fba4a4eeedc2a0596b54872e6bdda | 44f96b540afca6447535cdc5ab15ee34cc2f591b | /FirstProj/ShoppingApp/models.py | 74bc3117f80263ad4832cae5bd87cb68d973b456 | [] | no_license | Shreya549/FirstDjangoApp | 087ddf30d8d73f451da473fa12fad7633b4b4142 | 2ad5d98d0c49d9b82fc7a431c8df111f3f6a2353 | refs/heads/master | 2022-04-10T07:43:23.344458 | 2020-03-28T13:10:33 | 2020-03-28T13:10:33 | 250,259,665 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 249 | py | from django.db import models
# Create your models here.
class Product(models.Model):
name = models.CharField(max_length = 100)
price = models.IntegerField()
desc = models.TextField()
img = models.ImageField(upload_to = 'pics')
| [
"shreya.chatterjee2018@vitstudent.ac.in"
] | shreya.chatterjee2018@vitstudent.ac.in |
a01ef31f02f164ca0ed8d5220c4573c32ed3678b | 2eba5ec3f7462ed9f8f8e858c258f73bfeda6f6a | /Test1/윤성우_nene_ver3.py | fd464614fe59fceb309b05e2a354899b90dd1fbc | [] | no_license | sungwooman91/Test | d16e93e40a74054c860f9f7fdd2bb70f0189fc43 | 59b22517bc2c6e60b9d0604b878d79a1624530da | refs/heads/master | 2021-09-06T09:55:45.740088 | 2018-02-05T07:55:57 | 2018-02-05T07:55:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,156 | py | import urllib.request
import os
from pandas import DataFrame
import xml.etree.ElementTree as ET
import time
print("START")
result = []
dir_name = "V3_BigData"
dir_nene = "Nene_data"
dir_delimiter = "\\"
file_name = "nene"
count = "nene_count.txt"
csv = '.csv'
result_limit = 3
time_table = time.strftime("%c",time.localtime(time.time()))
time_table = time_table.replace("/","_")
time_table = time_table.replace(":","_")
def make_dir(number):
os.mkdir(dir_name + dir_delimiter + dir_nene + str(number))
return None
def make_nene(index_number,file_number):
dir_totalname = dir_name + dir_delimiter + dir_nene + str(index_number) + dir_delimiter + file_name + '_' + str(file_number) + csv
nene_table.to_csv(dir_totalname, encoding="cp949", mode='w', index=True)
return None
response = urllib.request.urlopen('http://nenechicken.com/subpage/where_list.asp?target_step2=%s&proc_type=step1&target_step1=%s'%(urllib.parse.quote('전체'),urllib.parse.quote('전체')))
xml = response.read().decode('UTF-8')
root = ET.fromstring(xml)
for element in root.findall('item'):
store_name = element.findtext('aname1')
store_sido = element.findtext('aname2')
store_gungu = element.findtext('aname3')
store_address = element.findtext('aname5')
result.append([store_name]+[store_sido]+[store_gungu]+[store_address])
nene_table = DataFrame(result,columns=('store','sido','gungu','store_address'))
try:
os.mkdir(dir_name)
except:pass
try:
with open(dir_name + dir_delimiter + count, 'r') as file:
file_number = file.readline()
file_number = int(file_number)
index_num = int(file_number/result_limit)
if file_number % result_limit != 0:
index_num += 1
if file_number % result_limit == 1:
make_dir(index_num)
make_nene(index_num, time_table)
file_number +=1
with open(dir_name + dir_delimiter + count, 'w') as file:
file.write(str(file_number))
except FileNotFoundError:
with open(dir_name + dir_delimiter + count, 'w') as file:
file.write("2")
make_dir(1)
make_nene(1,time_table)
print("END!!!") | [
"you@example.com"
] | you@example.com |
ff0f35ac3c3c28da4438125b3998d1c2ce5d5da5 | ba80ca143ba35fd481730786a27ebdb1f88ce835 | /algorithm/oop/vending_machine/money/money.py | 98038d955a7311a82085d5b84c181fa32bd45995 | [] | no_license | uiandwe/TIL | c541020b65adc53578aeb1c3ba4c6770b3b2e8b3 | 186544469374dd0279099c6c6aa7555ee23e42fe | refs/heads/master | 2022-02-15T08:33:07.270573 | 2022-01-01T15:22:54 | 2022-01-01T15:22:54 | 63,420,931 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 547 | py | # -*- coding: utf-8 -*-
class Money:
VALID_MONEY = 'money의 타입은 int 형 입니다.'
def __init__(self, money):
self.valid_money(money)
self._money = money
def valid_money(self, money):
assert type(money) == int, Money.VALID_MONEY
@property
def money(self):
return self._money
def __str__(self):
return "{}".format(self.money)
def __ge__(self, other):
return self._money >= other._money
def __sub__(self, other):
return self._money - other._money
| [
"noreply@github.com"
] | uiandwe.noreply@github.com |
aed762f6d9dea07279af3b949111d028114c4e4d | 172f8b508c89b47376bd41cfc167cabf9ba94cb8 | /dropthesoap/service.py | 4976d7b871415ed9e151bb0fd100573a2dba0401 | [
"MIT"
] | permissive | d3sbar/dropthesoap | 1ee3d8ba744d5756001a339793cbeb2bc9666f1a | 73fc9a9aa725e5bc568c7896fd0d9a2af858a352 | refs/heads/master | 2020-05-24T04:22:25.074507 | 2019-05-16T19:46:01 | 2019-05-16T19:46:01 | 187,091,092 | 0 | 0 | null | 2019-05-16T19:42:54 | 2019-05-16T19:42:54 | null | UTF-8 | Python | false | false | 6,931 | py | from .schema import xs, wsdl, soap
from .schema.model import Namespace, get_root, etree
class Request(object):
def __init__(self, transport_request, envelope):
self.transport_request = transport_request
self.envelope = envelope
self.header = None
class Method(object):
def __init__(self, func, request, response):
self.func = func
self.request = request
self.response = response
self.need_context = False
self.header = None
def __call__(self, ctx, request):
if self.header:
if ctx.envelope.Header:
ctx.header = self.header.from_node(ctx.envelope.Header._any[0])
else:
ctx.header = None
args = [ctx] if self.need_context else []
if self.request._unpack_params:
for name in self.request._params:
args.append(getattr(request, name))
else:
args.append(request)
return self.response.normalize(self.func(*args))
class Fault(Exception):
def __init__(self, code, message):
self.code = code
Exception.__init__(self, message)
def make_message_element(name, obj):
if isinstance(obj, xs.element):
return obj
else:
return xs.element(name, obj)
class Service(object):
def __init__(self, name, tns, additional_schemas=None):
self.name = name
self.methods = {}
self.req2method = {}
self.schema = xs.schema(Namespace(tns))
self.additional_schemas = additional_schemas or []
def expose(self, request=None, response=None):
if (callable(request)
and not isinstance(request, (xs.Type, xs.element))
and type(request) is not type):
decorated_func = request
request = None
else:
decorated_func = None
def inner(func):
name = func.__name__
req_name = name + 'Request'
if request is None:
defaults = func.__defaults__
if defaults:
names = func.__code__.co_varnames[:func.__code__.co_argcount][-len(defaults):]
else:
names = []
defaults = []
celements = [xs.element(n, t) for n, t in zip(names, defaults)]
request_elem = xs.element(req_name)(xs.cts(*celements))
request_elem._params = names
request_elem._unpack_params = True
else:
request_elem = make_message_element(req_name, request)
req_name = request_elem.name
request_elem._unpack_params = False
self.schema(request_elem)
resp_name = name + 'Response'
if response is None:
response_elem = self.schema[resp_name]
else:
response_elem = make_message_element(resp_name, response)
self.schema(response_elem)
method = Method(func, request_elem, response_elem)
self.methods[name] = method
self.req2method[req_name] = method
return func
return inner(decorated_func) if decorated_func else inner
def wraps(self, original_func):
name = original_func.__name__
def inner(func):
self.methods[name].func = func
self.methods[name].need_context = True
func.__name__ = name
return func
return inner
def header(self, header):
def inner(func):
rheader = header
if isinstance(rheader, basestring):
rheader = self.schema[rheader]
self.methods[func.__name__].header = rheader
return func
return inner
def get_wsdl(self, url):
defs = wsdl.definitions.instance()
defs.types = wsdl.types.instance(
_any=map(get_root, [self.schema] + self.additional_schemas))
messages = defs.message = []
port = wsdl.portType.instance(name='%sPortType' % self.name)
operations = port.operation = []
defs.portType = [port]
binding = wsdl.binding.instance(
name='%sBinding' % self.name, type='tns:%sPortType' % self.name,
binding = wsdl.soap_binding.instance(transport='http://schemas.xmlsoap.org/soap/http', style='document'))
defs.binding = [binding]
boperations = binding.operation = []
for name, method in self.methods.iteritems():
req_name = method.request.name
resp_name = method.response.name
messages.append(wsdl.message.instance(name=req_name,
part=wsdl.part.instance(name='parameters', element='tns:%s' % req_name)))
messages.append(wsdl.message.instance(name=resp_name,
part=wsdl.part.instance(name='parameters', element='tns:%s' % resp_name)))
operations.append(wsdl.operation.instance(name=name,
input=wsdl.input.instance(message='tns:%s' % req_name),
output=wsdl.output.instance(message='tns:%s' % resp_name)))
binput = wsdl.input.instance(body=wsdl.soap_body.instance(use='literal'))
if method.header:
binput.header = wsdl.soap_header.instance(
use='literal', message='tns:%s' % method.header.name, part=method.header.name)
boperations.append(wsdl.operation.instance(
name=name,
operation=wsdl.soap_operation.instance(soapAction=name),
input=binput,
output=wsdl.output.instance(body=wsdl.soap_body.instance(use='literal'))))
for header in set(r.header for r in self.methods.itervalues() if r.header):
messages.append(wsdl.message.instance(name=header.name,
part=wsdl.part.instance(name=header.name, element='tns:%s' % header.name)))
defs.service = [wsdl.service.instance(
name=self.name,
port=wsdl.port.instance(
name='%sPort' % self.name,
binding='tns:%sBinding' % self.name,
address=wsdl.soap_address.instance(location=url))
)]
tree = get_root(defs)
tree.attrib['targetNamespace'] = self.schema.targetNamespace.namespace
tree.attrib['xmlns:tns'] = self.schema.targetNamespace.namespace
return etree.tostring(tree)
def call(self, transport_request, xml):
try:
envelope = soap.schema.fromstring(xml)
request = self.schema.from_node(envelope.Body._any[0])
ctx = Request(transport_request, envelope)
method = self.req2method[request.tag]
response = method(ctx, request)
except Fault as e:
response = soap.Fault.instance(faultcode=e.code, faultstring=e.message)
return response
| [
"bobrov@vl.ru"
] | bobrov@vl.ru |
697e4233a9930b2435f73fcde1312d8ff1803683 | 784dda4c400d4e5c42f57e9a7d48883692b2a931 | /pyhawkes/internals/network.py | f5be4dcb1b5091aa28fe821beaa73bee278aead4 | [
"MIT"
] | permissive | yxtj/pyhawkes | bd942aded06dba3dd390a47e28702dcba124961b | ecc6dc23e516a7b06d64e5dbd10c8861b01bd955 | refs/heads/master | 2021-01-15T18:31:12.539149 | 2015-02-25T23:35:50 | 2015-02-25T23:35:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 33,039 | py | """
Network models expose a probability of connection and a scale of the weights
"""
import abc
import numpy as np
from scipy.special import gammaln, psi
from scipy.misc import logsumexp
from pyhawkes.deps.pybasicbayes.abstractions import \
BayesianDistribution, GibbsSampling, MeanField, MeanFieldSVI
from pyhawkes.deps.pybasicbayes.util.stats import sample_discrete_from_log
from pyhawkes.internals.distributions import Discrete, Bernoulli, \
Gamma, Dirichlet, Beta
# TODO: Make a base class for networks
# class Network(BayesianDistribution):
#
# __metaclass__ = abc.ABCMeta
#
# @abc.abstractproperty
# def p(self):
# """
# Return a KxK matrix of probability of connection
# """
# pass
#
# @abc.abstractproperty
# def kappa(self):
# """
# Return a KxK matrix of gamma weight shape parameters
# """
# pass
#
# @abc.abstractproperty
# def v(self):
# """
# Return a KxK matrix of gamma weight scale parameters
# """
# pass
class _StochasticBlockModelBase(BayesianDistribution):
"""
A stochastic block model is a clustered network model with
K: Number of nodes in the network
C: Number of blocks
m[c]: Probability that a node belongs block c
p[c,c']: Probability of connection from node in block c to node in block c'
v[c,c']: Scale of the gamma weight distribution from node in block c to node in block c'
It is parameterized by:
pi: Parameter of Dirichlet prior over m
tau0, tau1: Parameters of beta prior over p
alpha: Shape parameter of gamma prior over v
beta: Scale parameter of gamma prior over v
"""
__metaclass__ = abc.ABCMeta
def __init__(self, K, C,
c=None, m=None, pi=1.0,
p=None, tau0=0.1, tau1=0.1,
v=None, alpha=1.0, beta=1.0,
kappa=1.0,
allow_self_connections=True):
"""
Initialize SBM with parameters defined above.
"""
assert isinstance(K, int) and C >= 1, "K must be a positive integer number of nodes"
self.K = K
assert isinstance(C, int) and C >= 1, "C must be a positive integer number of blocks"
self.C = C
if isinstance(pi, (int, float)):
self.pi = pi * np.ones(C)
else:
assert isinstance(pi, np.ndarray) and pi.shape == (C,), "pi must be a sclar or a C-vector"
self.pi = pi
self.tau0 = tau0
self.tau1 = tau1
self.kappa = kappa
self.alpha = alpha
self.beta = beta
self.allow_self_connections = allow_self_connections
if m is not None:
assert isinstance(m, np.ndarray) and m.shape == (C,) \
and np.allclose(m.sum(), 1.0) and np.amin(m) >= 0.0, \
"m must be a length C probability vector"
self.m = m
else:
self.m = np.random.dirichlet(self.pi)
if c is not None:
assert isinstance(c, np.ndarray) and c.shape == (K,) and c.dtype == np.int \
and np.amin(c) >= 0 and np.amax(c) <= self.C-1, \
"c must be a length K-vector of block assignments"
self.c = c.copy()
else:
self.c = np.random.choice(self.C, p=self.m, size=(self.K))
if p is not None:
if np.isscalar(p):
assert p >= 0 and p <= 1, "p must be a probability"
self.p = p * np.ones((C,C))
else:
assert isinstance(p, np.ndarray) and p.shape == (C,C) \
and np.amin(p) >= 0 and np.amax(p) <= 1.0, \
"p must be a CxC matrix of probabilities"
self.p = p
else:
self.p = np.random.beta(self.tau1, self.tau0, size=(self.C, self.C))
if v is not None:
if np.isscalar(v):
assert v >= 0, "v must be a probability"
self.v = v * np.ones((C,C))
else:
assert isinstance(v, np.ndarray) and v.shape == (C,C) \
and np.amin(v) >= 0, \
"v must be a CxC matrix of nonnegative gamma scales"
self.v = v
else:
self.v = np.random.gamma(self.alpha, 1.0/self.beta, size=(self.C, self.C))
# If m, p, and v are specified, then the model is fixed and the prior parameters
# are ignored
if None not in (c, p, v):
self.fixed = True
else:
self.fixed = False
@property
def P(self):
"""
Get the KxK matrix of probabilities
:return:
"""
P = self.p[np.ix_(self.c, self.c)]
if not self.allow_self_connections:
np.fill_diagonal(P, 0.0)
return P
@property
def V(self):
"""
Get the KxK matrix of scales
:return:
"""
return self.v[np.ix_(self.c, self.c)]
@property
def Kappa(self):
return self.kappa * np.ones((self.K, self.K))
def log_likelihood(self, x):
"""
Compute the log likelihood of a set of SBM parameters
:param x: (m,p,v) tuple
:return:
"""
m,p,v,c = x
lp = 0
lp += Dirichlet(self.pi).log_probability(m)
lp += Beta(self.tau1 * np.ones((self.C, self.C)),
self.tau0 * np.ones((self.C, self.C))).log_probability(p).sum()
lp += Gamma(self.alpha, self.beta).log_probability(v).sum()
lp += (np.log(m)[c]).sum()
return lp
def log_probability(self):
return self.log_likelihood((self.m, self.p, self.v, self.c))
def rvs(self,size=[]):
raise NotImplementedError()
class GibbsSBM(_StochasticBlockModelBase, GibbsSampling):
"""
Implement Gibbs sampling for SBM
"""
def __init__(self, K, C,
c=None, pi=1.0, m=None,
p=None, tau0=0.1, tau1=0.1,
v=None, alpha=1.0, beta=1.0,
kappa=1.0,
allow_self_connections=True):
super(GibbsSBM, self).__init__(K=K, C=C,
c=c, pi=pi, m=m,
p=p, tau0=tau0, tau1=tau1,
v=v, alpha=alpha, beta=beta,
kappa=kappa,
allow_self_connections=allow_self_connections)
# Initialize parameter estimates
# print "Uncomment GibbsSBM init"
if not self.fixed:
self.c = np.random.choice(self.C, size=(self.K))
self.m = 1.0/C * np.ones(self.C)
# self.p = self.tau1 / (self.tau0 + self.tau1) * np.ones((self.C, self.C))
self.p = np.random.beta(self.tau1, self.tau0, size=(self.C, self.C))
# self.v = self.alpha / self.beta * np.ones((self.C, self.C))
self.v = np.random.gamma(self.alpha, 1.0/self.beta, size=(self.C, self.C))
def resample_p(self, A):
"""
Resample p given observations of the weights
"""
for c1 in xrange(self.C):
for c2 in xrange(self.C):
Ac1c2 = A[np.ix_(self.c==c1, self.c==c2)]
if not self.allow_self_connections:
# TODO: Account for self connections
pass
tau1 = self.tau1 + Ac1c2.sum()
tau0 = self.tau0 + (1-Ac1c2).sum()
self.p[c1,c2] = np.random.beta(tau1, tau0)
def resample_v(self, A, W):
"""
Resample v given observations of the weights
"""
# import pdb; pdb.set_trace()
for c1 in xrange(self.C):
for c2 in xrange(self.C):
Ac1c2 = A[np.ix_(self.c==c1, self.c==c2)]
Wc1c2 = W[np.ix_(self.c==c1, self.c==c2)]
alpha = self.alpha + Ac1c2.sum() * self.kappa
beta = self.beta + Wc1c2[Ac1c2 > 0].sum()
self.v[c1,c2] = np.random.gamma(alpha, 1.0/beta)
def resample_c(self, A, W):
"""
Resample block assignments given the weighted adjacency matrix
and the impulse response fits (if used)
"""
if self.C == 1:
return
# Sample each assignment in order
for k in xrange(self.K):
# Compute unnormalized log probs of each connection
lp = np.zeros(self.C)
# Prior from m
lp += np.log(self.m)
# Likelihood from network
for ck in xrange(self.C):
c_temp = self.c.copy().astype(np.int)
c_temp[k] = ck
# p(A[k,k'] | c)
lp[ck] += Bernoulli(self.p[ck, c_temp])\
.log_probability(A[k,:]).sum()
# p(A[k',k] | c)
lp[ck] += Bernoulli(self.p[c_temp, ck])\
.log_probability(A[:,k]).sum()
# p(W[k,k'] | c)
lp[ck] += (A[k,:] * Gamma(self.kappa, self.v[ck, c_temp])\
.log_probability(W[k,:])).sum()
# p(W[k,k'] | c)
lp[ck] += (A[:,k] * Gamma(self.kappa, self.v[c_temp, ck])\
.log_probability(W[:,k])).sum()
# TODO: Subtract of self connection since we double counted
# TODO: Get probability of impulse responses g
# Resample from lp
self.c[k] = sample_discrete_from_log(lp)
def resample_m(self):
"""
Resample m given c and pi
"""
pi = self.pi + np.bincount(self.c, minlength=self.C)
self.m = np.random.dirichlet(pi)
def resample(self, data=[]):
if self.fixed:
return
A,W = data
self.resample_p(A)
self.resample_v(A, W)
self.resample_c(A, W)
self.resample_m()
class MeanFieldSBM(_StochasticBlockModelBase, MeanField, MeanFieldSVI):
"""
Implement Gibbs sampling for SBM
"""
def __init__(self, K, C,
c=None, pi=1.0, m=None,
p=None, tau0=0.1, tau1=0.1,
v=None, alpha=1.0, beta=1.0,
kappa=1.0,
allow_self_connections=True):
super(MeanFieldSBM, self).__init__(K=K, C=C,
c=c, pi=pi, m=m,
p=p, tau0=tau0, tau1=tau1,
v=v, alpha=alpha, beta=beta,
kappa=kappa,
allow_self_connections=allow_self_connections)
# Initialize mean field parameters
self.mf_pi = np.ones(self.C)
# self.mf_m = 1.0/self.C * np.ones((self.K, self.C))
# To break symmetry, start with a sample of mf_m
self.mf_m = np.random.dirichlet(10 * np.ones(self.C),
size=(self.K,))
self.mf_tau0 = self.tau0 * np.ones((self.C, self.C))
self.mf_tau1 = self.tau1 * np.ones((self.C, self.C))
self.mf_alpha = self.alpha * np.ones((self.C, self.C))
self.mf_beta = self.beta * np.ones((self.C, self.C))
def expected_p(self):
"""
Compute the expected probability of a connection, averaging over c
:return:
"""
if self.fixed:
return self.P
E_p = np.zeros((self.K, self.K))
for c1 in xrange(self.C):
for c2 in xrange(self.C):
# Get the KxK matrix of joint class assignment probabilities
pc1c2 = self.mf_m[:,c1][:, None] * self.mf_m[:,c2][None, :]
# Get the probability of a connection for this pair of classes
E_p += pc1c2 * self.mf_tau1[c1,c2] / (self.mf_tau0[c1,c2] + self.mf_tau1[c1,c2])
if not self.allow_self_connections:
np.fill_diagonal(E_p, 0.0)
return E_p
def expected_notp(self):
"""
Compute the expected probability of NO connection, averaging over c
:return:
"""
return 1.0 - self.expected_p()
def expected_log_p(self):
"""
Compute the expected log probability of a connection, averaging over c
:return:
"""
if self.fixed:
E_ln_p = np.log(self.P)
else:
E_ln_p = np.zeros((self.K, self.K))
for c1 in xrange(self.C):
for c2 in xrange(self.C):
# Get the KxK matrix of joint class assignment probabilities
pc1c2 = self.mf_m[:,c1][:, None] * self.mf_m[:,c2][None, :]
# Get the probability of a connection for this pair of classes
E_ln_p += pc1c2 * (psi(self.mf_tau1[c1,c2])
- psi(self.mf_tau0[c1,c2] + self.mf_tau1[c1,c2]))
if not self.allow_self_connections:
np.fill_diagonal(E_ln_p, -np.inf)
return E_ln_p
def expected_log_notp(self):
"""
Compute the expected log probability of NO connection, averaging over c
:return:
"""
if self.fixed:
E_ln_notp = np.log(1.0 - self.P)
else:
E_ln_notp = np.zeros((self.K, self.K))
for c1 in xrange(self.C):
for c2 in xrange(self.C):
# Get the KxK matrix of joint class assignment probabilities
pc1c2 = self.mf_m[:,c1][:, None] * self.mf_m[:,c2][None, :]
# Get the probability of a connection for this pair of classes
E_ln_notp += pc1c2 * (psi(self.mf_tau0[c1,c2])
- psi(self.mf_tau0[c1,c2] + self.mf_tau1[c1,c2]))
if not self.allow_self_connections:
np.fill_diagonal(E_ln_notp, 0.0)
return E_ln_notp
def expected_v(self):
"""
Compute the expected scale of a connection, averaging over c
:return:
"""
if self.fixed:
return self.V
E_v = np.zeros((self.K, self.K))
for c1 in xrange(self.C):
for c2 in xrange(self.C):
# Get the KxK matrix of joint class assignment probabilities
pc1c2 = self.mf_m[:,c1][:, None] * self.mf_m[:,c2][None, :]
# Get the probability of a connection for this pair of classes
E_v += pc1c2 * self.mf_alpha[c1,c2] / self.mf_beta[c1,c2]
return E_v
def expected_log_v(self):
"""
Compute the expected log scale of a connection, averaging over c
:return:
"""
if self.fixed:
return np.log(self.V)
E_log_v = np.zeros((self.K, self.K))
for c1 in xrange(self.C):
for c2 in xrange(self.C):
# Get the KxK matrix of joint class assignment probabilities
pc1c2 = self.mf_m[:,c1][:, None] * self.mf_m[:,c2][None, :]
# Get the probability of a connection for this pair of classes
E_log_v += pc1c2 * (psi(self.mf_alpha[c1,c2])
- np.log(self.mf_beta[c1,c2]))
return E_log_v
def expected_m(self):
return self.mf_pi / self.mf_pi.sum()
def expected_log_m(self):
"""
Compute the expected log probability of each block
:return:
"""
E_log_m = psi(self.mf_pi) - psi(self.mf_pi.sum())
return E_log_m
def expected_log_likelihood(self,x):
pass
def mf_update_c(self, E_A, E_notA, E_W_given_A, E_ln_W_given_A, stepsize=1.0):
"""
Update the block assignment probabilitlies one at a time.
This one involves a number of not-so-friendly expectations.
:return:
"""
# Sample each assignment in order
for k in xrange(self.K):
notk = np.concatenate((np.arange(k), np.arange(k+1,self.K)))
# Compute unnormalized log probs of each connection
lp = np.zeros(self.C)
# Prior from m
lp += self.expected_log_m()
# Likelihood from network
for ck in xrange(self.C):
# Compute expectations with respect to other block assignments, c_{\neg k}
# Initialize vectors for expected parameters
E_ln_p_ck_to_cnotk = np.zeros(self.K-1)
E_ln_notp_ck_to_cnotk = np.zeros(self.K-1)
E_ln_p_cnotk_to_ck = np.zeros(self.K-1)
E_ln_notp_cnotk_to_ck = np.zeros(self.K-1)
E_v_ck_to_cnotk = np.zeros(self.K-1)
E_ln_v_ck_to_cnotk = np.zeros(self.K-1)
E_v_cnotk_to_ck = np.zeros(self.K-1)
E_ln_v_cnotk_to_ck = np.zeros(self.K-1)
for cnotk in xrange(self.C):
# Get the (K-1)-vector of other class assignment probabilities
p_cnotk = self.mf_m[notk,cnotk]
# Expected log probability of a connection from ck to cnotk
E_ln_p_ck_to_cnotk += p_cnotk * (psi(self.mf_tau1[ck, cnotk])
- psi(self.mf_tau0[ck, cnotk] + self.mf_tau1[ck, cnotk]))
E_ln_notp_ck_to_cnotk += p_cnotk * (psi(self.mf_tau0[ck, cnotk])
- psi(self.mf_tau0[ck, cnotk] + self.mf_tau1[ck, cnotk]))
# Expected log probability of a connection from cnotk to ck
E_ln_p_cnotk_to_ck += p_cnotk * (psi(self.mf_tau1[cnotk, ck])
- psi(self.mf_tau0[cnotk, ck] + self.mf_tau1[cnotk, ck]))
E_ln_notp_cnotk_to_ck += p_cnotk * (psi(self.mf_tau0[cnotk, ck])
- psi(self.mf_tau0[cnotk, ck] + self.mf_tau1[cnotk, ck]))
# Expected log scale of connections from ck to cnotk
E_v_ck_to_cnotk += p_cnotk * (self.mf_alpha[ck, cnotk] / self.mf_beta[ck, cnotk])
E_ln_v_ck_to_cnotk += p_cnotk * (psi(self.mf_alpha[ck, cnotk])
- np.log(self.mf_beta[ck, cnotk]))
# Expected log scale of connections from cnotk to ck
E_v_cnotk_to_ck += p_cnotk * (self.mf_alpha[cnotk, ck] / self.mf_beta[cnotk, ck])
E_ln_v_cnotk_to_ck += p_cnotk * (psi(self.mf_alpha[cnotk, ck])
- np.log(self.mf_beta[cnotk, ck]))
# Compute E[ln p(A | c, p)]
lp[ck] += Bernoulli().negentropy(E_x=E_A[k, notk],
E_notx=E_notA[k, notk],
E_ln_p=E_ln_p_ck_to_cnotk,
E_ln_notp=E_ln_notp_ck_to_cnotk).sum()
lp[ck] += Bernoulli().negentropy(E_x=E_A[notk, k],
E_notx=E_notA[notk, k],
E_ln_p=E_ln_p_cnotk_to_ck,
E_ln_notp=E_ln_notp_cnotk_to_ck).sum()
# Compute E[ln p(W | A=1, c, v)]
lp[ck] += (E_A[k, notk] *
Gamma(self.kappa).negentropy(E_ln_lambda=E_ln_W_given_A[k, notk],
E_lambda=E_W_given_A[k,notk],
E_beta=E_v_ck_to_cnotk,
E_ln_beta=E_ln_v_ck_to_cnotk)).sum()
lp[ck] += (E_A[notk, k] *
Gamma(self.kappa).negentropy(E_ln_lambda=E_ln_W_given_A[notk, k],
E_lambda=E_W_given_A[notk,k],
E_beta=E_v_cnotk_to_ck,
E_ln_beta=E_ln_v_cnotk_to_ck)).sum()
# Compute expected log prob of self connection
if self.allow_self_connections:
E_ln_p_ck_to_ck = psi(self.mf_tau1[ck, ck]) - psi(self.mf_tau0[ck, ck] + self.mf_tau1[ck, ck])
E_ln_notp_ck_to_ck = psi(self.mf_tau0[ck, ck]) - psi(self.mf_tau0[ck, ck] + self.mf_tau1[ck, ck])
lp[ck] += Bernoulli().negentropy(E_x=E_A[k, k],
E_notx=E_notA[k, k],
E_ln_p=E_ln_p_ck_to_ck,
E_ln_notp=E_ln_notp_ck_to_ck
)
E_v_ck_to_ck = self.mf_alpha[ck, ck] / self.mf_beta[ck, ck]
E_ln_v_ck_to_ck = psi(self.mf_alpha[ck, ck]) - np.log(self.mf_beta[ck, ck])
lp[ck] += (E_A[k, k] *
Gamma(self.kappa).negentropy(E_ln_lambda=E_ln_W_given_A[k, k],
E_lambda=E_W_given_A[k,k],
E_beta=E_v_ck_to_ck,
E_ln_beta=E_ln_v_ck_to_ck))
# TODO: Get probability of impulse responses g
# Normalize the log probabilities to update mf_m
Z = logsumexp(lp)
mk_hat = np.exp(lp - Z)
self.mf_m[k,:] = (1.0 - stepsize) * self.mf_m[k,:] + stepsize * mk_hat
def mf_update_p(self, E_A, E_notA, stepsize=1.0):
"""
Mean field update for the CxC matrix of block connection probabilities
:param E_A:
:return:
"""
for c1 in xrange(self.C):
for c2 in xrange(self.C):
# Get the KxK matrix of joint class assignment probabilities
pc1c2 = self.mf_m[:,c1][:, None] * self.mf_m[:,c2][None, :]
if self.allow_self_connections:
tau1_hat = self.tau1 + (pc1c2 * E_A).sum()
tau0_hat = self.tau0 + (pc1c2 * E_notA).sum()
else:
# TODO: Account for self connections
tau1_hat = self.tau1 + (pc1c2 * E_A).sum()
tau0_hat = self.tau0 + (pc1c2 * E_notA).sum()
self.mf_tau1[c1,c2] = (1.0 - stepsize) * self.mf_tau1[c1,c2] + stepsize * tau1_hat
self.mf_tau0[c1,c2] = (1.0 - stepsize) * self.mf_tau0[c1,c2] + stepsize * tau0_hat
def mf_update_v(self, E_A, E_W_given_A, stepsize=1.0):
"""
Mean field update for the CxC matrix of block connection scales
:param E_A:
:param E_W_given_A: Expected W given A
:return:
"""
for c1 in xrange(self.C):
for c2 in xrange(self.C):
# Get the KxK matrix of joint class assignment probabilities
pc1c2 = self.mf_m[:,c1][:, None] * self.mf_m[:,c2][None, :]
alpha_hat = self.alpha + (pc1c2 * E_A * self.kappa).sum()
beta_hat = self.beta + (pc1c2 * E_A * E_W_given_A).sum()
self.mf_alpha[c1,c2] = (1.0 - stepsize) * self.mf_alpha[c1,c2] + stepsize * alpha_hat
self.mf_beta[c1,c2] = (1.0 - stepsize) * self.mf_beta[c1,c2] + stepsize * beta_hat
def mf_update_m(self, stepsize=1.0):
"""
Mean field update of the block probabilities
:return:
"""
pi_hat = self.pi + self.mf_m.sum(axis=0)
self.mf_pi = (1.0 - stepsize) * self.mf_pi + stepsize * pi_hat
def meanfieldupdate(self, weight_model,
update_p=True,
update_v=True,
update_m=True,
update_c=True):
# Get expectations from the weight model
E_A = weight_model.expected_A()
E_notA = 1.0 - E_A
E_W_given_A = weight_model.expected_W_given_A(1.0)
E_ln_W_given_A = weight_model.expected_log_W_given_A(1.0)
# Update the remaining SBM parameters
if update_c:
self.mf_update_p(E_A=E_A, E_notA=E_notA)
if update_v:
self.mf_update_v(E_A=E_A, E_W_given_A=E_W_given_A)
if update_m:
self.mf_update_m()
# Update the block assignments
if update_c:
self.mf_update_c(E_A=E_A,
E_notA=E_notA,
E_W_given_A=E_W_given_A,
E_ln_W_given_A=E_ln_W_given_A)
def meanfield_sgdstep(self,weight_model, minibatchfrac, stepsize,
update_p=True,
update_v=True,
update_m=True,
update_c=True):
# Get expectations from the weight model
E_A = weight_model.expected_A()
E_notA = 1.0 - E_A
E_W_given_A = weight_model.expected_W_given_A(1.0)
E_ln_W_given_A = weight_model.expected_log_W_given_A(1.0)
# Update the remaining SBM parameters
if update_p:
self.mf_update_p(E_A=E_A, E_notA=E_notA, stepsize=stepsize)
if update_v:
self.mf_update_v(E_A=E_A, E_W_given_A=E_W_given_A, stepsize=stepsize)
if update_m:
self.mf_update_m(stepsize=stepsize)
# Update the block assignments
if update_c:
self.mf_update_c(E_A=E_A,
E_notA=E_notA,
E_W_given_A=E_W_given_A,
E_ln_W_given_A=E_ln_W_given_A,
stepsize=stepsize)
def get_vlb(self,
vlb_c=True,
vlb_p=True,
vlb_v=True,
vlb_m=True):
# import pdb; pdb.set_trace()
vlb = 0
# Get the VLB of the expected class assignments
if vlb_c:
E_ln_m = self.expected_log_m()
for k in xrange(self.K):
# Add the cross entropy of p(c | m)
vlb += Discrete().negentropy(E_x=self.mf_m[k,:], E_ln_p=E_ln_m)
# Subtract the negative entropy of q(c)
vlb -= Discrete(self.mf_m[k,:]).negentropy()
# Get the VLB of the connection probability matrix
# Add the cross entropy of p(p | tau1, tau0)
if vlb_p:
vlb += Beta(self.tau1, self.tau0).\
negentropy(E_ln_p=(psi(self.mf_tau1) - psi(self.mf_tau0 + self.mf_tau1)),
E_ln_notp=(psi(self.mf_tau0) - psi(self.mf_tau0 + self.mf_tau1))).sum()
# Subtract the negative entropy of q(p)
vlb -= Beta(self.mf_tau1, self.mf_tau0).negentropy().sum()
# Get the VLB of the weight scale matrix, v
# Add the cross entropy of p(v | alpha, beta)
if vlb_v:
vlb += Gamma(self.alpha, self.beta).\
negentropy(E_lambda=self.mf_alpha/self.mf_beta,
E_ln_lambda=psi(self.mf_alpha) - np.log(self.mf_beta)).sum()
# Subtract the negative entropy of q(v)
vlb -= Gamma(self.mf_alpha, self.mf_beta).negentropy().sum()
# Get the VLB of the block probability vector, m
# Add the cross entropy of p(m | pi)
if vlb_m:
vlb += Dirichlet(self.pi).negentropy(E_ln_g=self.expected_log_m())
# Subtract the negative entropy of q(m)
vlb -= Dirichlet(self.mf_pi).negentropy()
return vlb
def resample_from_mf(self):
"""
Resample from the mean field distribution
:return:
"""
self.m = np.random.dirichlet(self.mf_pi)
self.p = np.random.beta(self.mf_tau1, self.mf_tau0)
self.v = np.random.gamma(self.mf_alpha, 1.0/self.mf_beta)
self.c = np.zeros(self.K, dtype=np.int)
for k in xrange(self.K):
self.c[k] = int(np.random.choice(self.C, p=self.mf_m[k,:]))
class StochasticBlockModel(GibbsSBM, MeanFieldSBM):
pass
class StochasticBlockModelFixedSparsity(StochasticBlockModel):
"""
Special case of the SBM where the probability of connection, P,
is fixed. This is valuable for Gibbs sampling, where there is
a degenerate mode at the dense posterior.
"""
def __init__(self, K, C=1,
p=None,
c=None, pi=1.0, m=None,
v=None, alpha=1.0, beta=1.0,
kappa=1.0,
allow_self_connections=True):
assert p is not None, "CxC probability matrix must be given at init!"
super(StochasticBlockModelFixedSparsity, self).\
__init__(K=K, C=C,
p=p,
c=c, pi=pi, m=m,
v=v, alpha=alpha, beta=beta,
kappa=kappa,
allow_self_connections=allow_self_connections)
if np.isscalar(p):
assert p >= 0 and p <= 1, "p must be a probability"
self.p = p * np.ones((C,C))
else:
assert isinstance(p, np.ndarray) and p.shape == (C,C) \
and np.amin(p) >= 0 and np.amax(p) <= 1.0, \
"p must be a CxC matrix of probabilities"
self.p = p
def log_likelihood(self, x):
"""
Compute the log likelihood of a set of SBM parameters
:param x: (m,p,v) tuple
:return:
"""
m,p,v = x
lp = 0
lp += Dirichlet(self.pi).log_probability(m)
lp += Gamma(self.alpha, self.beta).log_probability(v).sum()
return lp
def resample(self, data=[]):
if self.fixed:
return
A,W = data
self.resample_v(A, W)
self.resample_c(A, W)
self.resample_m()
def expected_p(self):
"""
Compute the expected probability of a connection, averaging over c
:return:
"""
return self.P
def expected_log_p(self):
"""
Compute the expected log probability of a connection, averaging over c
:return:
"""
E_ln_p = np.log(self.P)
if not self.allow_self_connections:
np.fill_diagonal(E_ln_p, -np.inf)
return E_ln_p
def expected_log_notp(self):
"""
Compute the expected log probability of NO connection, averaging over c
:return:
"""
if self.fixed:
return np.log(1.0 - self.P)
def meanfieldupdate(self, weight_model,
update_p=False,
update_v=True,
update_m=True,
update_c=True):
assert update_p is False, "Cannot update p!"
super(StochasticBlockModelFixedSparsity, self).\
meanfieldupdate(weight_model,
update_p=False,
update_v=update_v,
update_m=update_m,
update_c=update_c)
def meanfield_sgdstep(self, weight_model, minibatchfrac, stepsize,
update_p=False,
update_v=True,
update_m=True,
update_c=True):
assert update_p is False, "Cannot update p!"
super(StochasticBlockModelFixedSparsity, self).\
meanfield_sgdstep(weight_model, minibatchfrac, stepsize,
update_p=False,
update_v=update_v,
update_m=update_m,
update_c=update_c)
def get_vlb(self,
vlb_c=True,
vlb_p=False,
vlb_v=True,
vlb_m=True):
assert vlb_p is False, "Cannot calculate vlb wrt p!"
super(StochasticBlockModelFixedSparsity, self).\
get_vlb(vlb_c=vlb_c,
vlb_p=False,
vlb_v=vlb_v,
vlb_m=vlb_m)
class ErdosRenyiModel(StochasticBlockModel):
"""
The Erdos-Renyi network model is a special case of the SBM with one block.
"""
def __init__(self, K,
p=None, tau0=0.1, tau1=0.1,
v=None, alpha=1.0, beta=1.0,
kappa=1.0):
C = 1
c = np.zeros(K, dtype=np.int)
super(ErdosRenyiModel, self).__init__(K, C, c=c,
p=p, tau0=tau0, tau1=tau1,
v=v, alpha=alpha, beta=beta,
kappa=kappa)
| [
"scott.linderman@gmail.com"
] | scott.linderman@gmail.com |
1e4ae96ba11ca841546ece2806bf73b10ec15673 | 6e8f2e28479566dbaa338300b2d61f784ff83f97 | /.history/code/live_20210419192449.py | f23ac4d6aa23ef684980fb5dfdd0b471fdb2e457 | [] | no_license | eeng5/CV-final-project | 55a7d736f75602858233ebc380c4e1d67ab2b866 | 580e28819560b86f6974959efb1d31ef138198fc | refs/heads/main | 2023-04-09T21:28:21.531293 | 2021-04-21T19:57:22 | 2021-04-21T19:57:22 | 352,703,734 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,586 | py | import os
import cv2
import sys
import numpy as np
from models import SimpleModel
from preprocess import Datasets
import hyperparameters as hp
import tensorflow as tf
from skimage.transform import resize
from PIL import Image, ImageFont, ImageDraw
from scipy.spatial import distance as dist
from imutils import face_utils
from imutils.video import VideoStream
import fastai
import fastai.vision
import imutils
import argparse
import time
import dlib
def createPixelArray(arr):
array = np.array(arr, dtype=np.uint8)
array = array.reshape((48, 48, 1))
img = array / 255.
return img
weights_str = "/Users/Natalie/Desktop/cs1430/CV-final-project/code/checkpoints/simple_model/041321-113618/your.weights.e015-acc0.6121.h5"
os.chdir(sys.path[0])
model = tf.keras.models.load_model(weights_str)
#model = create_model()
model(tf.keras.Input(shape=(hp.img_size, hp.img_size, 3)))
model.load_weights(weights_str, by_name=False)
# model.compile(
# optimizer=model.optimizer,
# loss=model.loss_fn,
# metrics=["sparse_categorical_accuracy"],
# )
#face_cascade = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
vs = VideoStream(src=0).start()
start = time.perf_counter()
data = []
time_value = 0
out = cv2.VideoWriter(
"liveoutput.avi", cv2.VideoWriter_fourcc("M", "J", "P", "G"), 10, (450, 253)
)
while True:
frame = vs.read()
frame = imutils.resize(frame, width=450)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
face_coord = face_cascade.detectMultiScale(gray, 1.1, 5, minSize=(48, 48))
for coords in face_coord:
X, Y, w, h = coords
H, W, _ = frame.shape
X_1, X_2 = (max(0, X - int(w)), min(X + int(1.3 * w), W))
Y_1, Y_2 = (max(0, Y - int(0.1 * h)), min(Y + int(1.3 * h), H))
img_cp = gray[Y_1:Y_1+48, X_1:X_1+48].copy()
img_mod = createPixelArray(img_cp)
prediction = model.predict(img_mod)
prediction = np.argmax(prediction)
cv2.rectangle(
img=frame,
pt1=(X_1, Y_1),
pt2=(X_2, Y_2),
color=(128, 128, 0),
thickness=2,
)
cv2.putText(
frame,
str(prediction),
(10, frame.shape[0] - 25),
cv2.FONT_HERSHEY_SIMPLEX,
0.7,
(225, 255, 255),
2,)
cv2.imshow("frame", frame)
out.write(frame)
if cv2.waitKey(1) & 0xFF == ord("q"):
break
vs.stop()
out.release()
cv2.destroyAllWindows()
| [
"natalie_rshaidat@brown.edu"
] | natalie_rshaidat@brown.edu |
d8fab852c381fe6affdd605d97dda1f39af63a9f | cf7fed790b733b9a21ec6c65970e9346dba103f5 | /pyqt/getting_started/pyqt_thread.py | 9596b7a25da1a0f3c83e1b6191afc1df4f46539d | [
"MIT"
] | permissive | CospanDesign/python | a582050993efc1e6267683e38dd4665952ec6d40 | a3d81971621d8deed2f1fc738dce0e6eec0db3a7 | refs/heads/master | 2022-06-20T15:01:26.210331 | 2022-05-29T01:13:04 | 2022-05-29T01:13:04 | 43,620,126 | 6 | 3 | null | null | null | null | UTF-8 | Python | false | false | 2,855 | py | #! /usr/bin/python
import sys
import time
from PyQt4 import QtCore
from PyQt4 import QtGui
class MyApp(QtGui.QWidget):
def __init__(self, parent=None):
QtGui.QWidget.__init__(self, parent)
self.setGeometry(300, 300, 280, 600)
self.setWindowTitle('threads')
self.layout = QtGui.QVBoxLayout(self)
self.testButton = QtGui.QPushButton("test")
self.connect(self.testButton, QtCore.SIGNAL("released()"), self.test)
self.listwidget = QtGui.QListWidget(self)
self.layout.addWidget(self.testButton)
self.layout.addWidget(self.listwidget)
self.threadPool = []
def add(self, text):
""" Add item to list widget """
print "Add: " + text
self.listwidget.addItem(text)
self.listwidget.sortItems()
def addBatch(self,text="test",iters=6,delay=0.3):
""" Add several items to list widget """
for i in range(iters):
time.sleep(delay) # artificial time delay
self.add(text+" "+str(i))
def addBatch2(self,text="test",iters=6,delay=0.3):
for i in range(iters):
time.sleep(delay) # artificial time delay
self.emit( QtCore.SIGNAL('add(QString)'), text+" "+str(i) )
def test(self):
self.listwidget.clear()
# adding in main application: locks ui
#self.addBatch("_non_thread",iters=6,delay=0.3)
# adding by emitting signal in different thread
self.threadPool.append( WorkThread() )
self.connect( self.threadPool[len(self.threadPool)-1], QtCore.SIGNAL("update(QString)"), self.add )
self.threadPool[len(self.threadPool)-1].start()
# generic thread using signal
self.threadPool.append( GenericThread(self.addBatch2,"from generic thread using signal ",delay=0.3) )
self.disconnect( self, QtCore.SIGNAL("add(QString)"), self.add )
self.connect( self, QtCore.SIGNAL("add(QString)"), self.add )
self.threadPool[len(self.threadPool)-1].start()
class WorkThread(QtCore.QThread):
def __init__(self):
QtCore.QThread.__init__(self)
def __del__(self):
self.wait()
def run(self):
for i in range(6):
time.sleep(0.3) # artificial time delay
self.emit( QtCore.SIGNAL('update(QString)'), "from work thread " + str(i) )
return
class GenericThread(QtCore.QThread):
def __init__(self, function, *args, **kwargs):
QtCore.QThread.__init__(self)
self.function = function
self.args = args
self.kwargs = kwargs
def __del__(self):
self.wait()
def run(self):
self.function(*self.args,**self.kwargs)
return
if __name__ == "__main__":
# run
app = QtGui.QApplication(sys.argv)
test = MyApp()
test.show()
app.exec_()
| [
"cospan@gmail.com"
] | cospan@gmail.com |
9ce3ee0c55905c85b6ffaeba4bdb6394819a76cc | d2479998a965eb43372920effeaf32c9c500603e | /docs/scripts/uiexample.py | 30f2db9a2d9e74bb996bc94f040a45590c8a941a | [
"BSD-2-Clause"
] | permissive | cy-fir/flexx | 0f246e0c4a5e6d4b29946c8fb0f73790fa35d07f | 343de1b1549975a365962274f264a48e56d2305e | refs/heads/master | 2021-01-18T09:40:11.624129 | 2016-06-04T23:07:37 | 2016-06-04T23:07:37 | 60,951,284 | 1 | 0 | null | 2016-06-12T06:39:50 | 2016-06-12T06:39:50 | null | UTF-8 | Python | false | false | 5,612 | py | """
Small sphinx extension to show a UI example + result
"""
import os
import sys
import hashlib
import warnings
import subprocess
import importlib.util
from sphinx.util.compat import Directive
from docutils import nodes
from flexx import app
THIS_DIR = os.path.abspath(os.path.dirname(__file__))
ROOT_DIR = os.path.dirname(os.path.dirname(THIS_DIR))
HTML_DIR = os.path.abspath(os.path.join(THIS_DIR, '..', '_build', 'html'))
if not os.path.isdir(HTML_DIR + '/ui'):
os.mkdir(HTML_DIR + '/ui')
if not os.path.isdir(HTML_DIR + '/ui/examples'):
os.mkdir(HTML_DIR + '/ui/examples')
SIMPLE_CODE_T = """
from flexx import app, ui
class App(ui.Widget):
def init(self):
""" # mind the indentation
class uiexample(nodes.raw): pass
def create_ui_example(filename, to_root, height=300):
""" Given a filename, export the containing app to HTML, return
generated HTML. Needs to be done via filename, not direct code, so
that PyScript can obtain source.
"""
code = open(filename, 'rb').read().decode()
fname = os.path.split(filename)[1]
filename_parts = 'ui', 'examples', fname[:-3] + '.html'
filename_abs = os.path.join(HTML_DIR, *filename_parts)
filename_rel = to_root + '/' + '/'.join(filename_parts)
# Import
try:
spec = importlib.util.spec_from_file_location("example", filename)
m = importlib.util.module_from_spec(spec)
spec.loader.exec_module(m)
except Exception as err:
err_text = str(err)
msg = 'Example not generated. <pre>%s</pre>' % err_text
if os.environ.get('READTHEDOCS', False):
msg = 'This example is not build on read-the-docs. <pre>%s</pre>' % err_text
open(filename_abs, 'wt', encoding='utf-8').write(msg)
warnings.warn('Could not import ui example: %s' % err_text)
return get_html(filename_rel, 60)
# Get class name
line1 = code.splitlines()[0]
class_name = None
if 'class App' in code:
class_name = 'App'
elif 'class MyApp' in code:
class_name = 'MyApp'
elif 'class Example' in code:
class_name = 'Example'
elif line1.startswith('# doc-export:'):
class_name = line1.split(':', 1)[1].strip()
#
if class_name:
assert class_name.isidentifier()
else:
msg = 'Could not determine app widget class in:<pre>%s</pre>' % code
warnings.warn(msg)
open(filename_abs, 'wt', encoding='utf-8').write(msg)
return get_html(filename_rel, height)
# Export
try:
app.export(m.__dict__[class_name], filename_abs, False)
except Exception as err:
err_text = str(err)
msg = 'Example not generated. <pre>%s</pre>' % err_text
open(filename_abs, 'wt', encoding='utf-8').write(msg.replace('\\n', '<br />'))
raise RuntimeError('Could not export ui example: %s\n%s' % (err_text, code) )
#print('Could not create ui example: %s\n%s' % (err_text, code) )
return get_html(filename_rel, height)
def get_html(filename_rel, height):
""" Get the html to embed the given page into another page using an iframe.
"""
# Styles
astyle = 'font-size:small; float:right;'
dstyle = 'width: 500px; height: %ipx; align: center; resize:both; overflow: hidden; box-shadow: 5px 5px 5px #777;'
istyle = 'width: 100%; height: 100%; border: 2px solid #094;'
# Show app in iframe, wrapped in a resizable div
html = ''
html += "<a target='new' href='%s' style='%s'>open in new tab</a>" % (filename_rel, astyle)
html += "<div style='%s'>" % dstyle % height
html += "<iframe src='%s' style='%s'>iframe not supported</iframe>" % (filename_rel, istyle)
html += "</div>"
return html
def visit_uiexample_html(self, node):
global should_export_flexx_deps
# Fix for rtd
if not hasattr(node, 'code'):
return
# Get code
code = ori_code = node.code.strip() + '\n'
# Is this a simple example?
if 'import' not in code:
code = SIMPLE_CODE_T + '\n '.join([line for line in code.splitlines()])
# Get id and filename
this_id = hashlib.md5(code.encode('utf-8')).hexdigest()
fname = 'example%s.html' % this_id
filename_py = os.path.join(HTML_DIR, 'ui', 'examples', 'example%s.py' % this_id)
# Write Python file
with open(filename_py, 'wb') as f:
f.write(code.encode())
# Get html file
html = create_ui_example(filename_py, '..', node.height)
self.body.append(html + '<br />')
def depart_uiexample_html(self, node):
pass
class UIExampleDirective(Directive):
has_content = True
def run(self):
# Get code and extact height
code = '\n'.join(self.content)
try:
height = int(self.content[0])
except Exception:
height = 300
else:
code = code.split('\n', 1)[1].strip()
# Code block
literal = nodes.literal_block(code, code)
literal['language'] = 'python'
literal['linenos'] = False
# iframe
iframe = uiexample('')
iframe.code = code
iframe.height = height
return[literal, iframe]
def setup(Sphynx):
#Sphynx.add_javascript('js-image-slider.js')
#Sphynx.add_stylesheet('js-image-slider.css')
Sphynx.add_node(uiexample, html=(visit_uiexample_html, depart_uiexample_html))
Sphynx.add_directive('uiexample', UIExampleDirective)
| [
"almar.klein@gmail.com"
] | almar.klein@gmail.com |
8ecc268008bf76d6edc3ac8b32ae86a39041e1b4 | 6ad7325b4c04dad9e7552882f53a6cb146474652 | /crypto_analyzer/crypto/models.py | 5f6b7218b7985d6f1899c232a9f304a3a0eb1429 | [] | no_license | abhaystoic/cryptoguru | fa04c55e437dd41a64843b35a8d60d398511eb23 | 96562e881b40ee77a4704b40400caba9084bbfca | refs/heads/master | 2020-03-11T10:26:30.950772 | 2018-10-14T13:23:10 | 2018-10-14T13:23:10 | 129,942,636 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,650 | py | # -*- coding: utf-8 -*-
"""
TODO: Remove 'blank=True, null=True' wherever it doesn't make sense.
"""
from __future__ import unicode_literals
from __future__ import absolute_import
from django.db import models
MAX_CHAR_LEN = 1024
MAX_CHAR_LEN_URL = 2000
MAX_CHAR_LEN_BIG = 10000
# Create your models here.
class Cryptos(models.Model):
name = models.CharField(max_length=MAX_CHAR_LEN)
coin_id = models.CharField(blank=True, null=True, max_length=MAX_CHAR_LEN)
symbol = models.CharField(blank=True, null=True, max_length=MAX_CHAR_LEN)
international_price_inr = models.FloatField(blank=True, null=True)
rank = models.IntegerField(blank=True, null=True)
price_usd = models.FloatField(blank=True, null=True)
price_btc = models.FloatField(blank=True, null=True)
volume_usd_24h = models.FloatField(blank=True, null=True)
market_cap_usd = models.FloatField(blank=True, null=True)
available_supply = models.FloatField(blank=True, null=True)
total_supply = models.FloatField(blank=True, null=True)
max_supply = models.FloatField(blank=True, null=True)
percent_change_1h = models.FloatField(blank=True, null=True)
percent_change_24h = models.FloatField(blank=True, null=True)
percent_change_7d = models.FloatField(blank=True, null=True)
data_last_updated = models.CharField(blank=True, null=True, max_length=MAX_CHAR_LEN)
volume_inr_24h = models.FloatField(blank=True, null=True)
market_cap_inr = models.FloatField(blank=True, null=True)
last_updated = models.DateTimeField(auto_now_add=True, blank=True, null=True)
def __str__(self):
return self.name
class IndianCryptos(models.Model):
name = models.CharField(max_length=MAX_CHAR_LEN)
coin_id = models.CharField(blank=True, null=True, max_length=MAX_CHAR_LEN)
symbol = models.CharField(blank=True, null=True, max_length=MAX_CHAR_LEN)
international_price_inr = models.FloatField(blank=True, null=True)
rank = models.IntegerField(blank=True, null=True)
price_usd = models.FloatField(blank=True, null=True)
price_btc = models.FloatField(blank=True, null=True)
volume_usd_24h = models.FloatField(blank=True, null=True)
market_cap_usd = models.FloatField(blank=True, null=True)
available_supply = models.FloatField(blank=True, null=True)
total_supply = models.FloatField(blank=True, null=True)
max_supply = models.FloatField(blank=True, null=True)
percent_change_1h = models.FloatField(blank=True, null=True)
percent_change_24h = models.FloatField(blank=True, null=True)
percent_change_7d = models.FloatField(blank=True, null=True)
data_last_updated = models.CharField(blank=True, null=True, max_length=MAX_CHAR_LEN)
volume_inr_24h = models.FloatField(blank=True, null=True)
market_cap_inr = models.FloatField(blank=True, null=True)
last_updated = models.DateTimeField(auto_now_add=True, blank=True, null=True)
def __str__(self):
return self.name
class Exchanges(models.Model):
name = models.CharField(blank=True, null=True, max_length=MAX_CHAR_LEN)
exchange_id = models.CharField(blank=True, null=True, max_length=MAX_CHAR_LEN)
buy = models.FloatField(blank=True, null=True)
sell = models.FloatField(blank=True, null=True)
last = models.FloatField(blank=True, null=True)
average = models.FloatField(blank=True, null=True)
currency_symbol = models.CharField(blank=True, null=True, max_length=MAX_CHAR_LEN)
volume = models.FloatField(blank=True, null=True)
international_price_dollar = models.FloatField(blank=True, null=True)
international_price_inr = models.FloatField(blank=True, null=True)
last_updated = models.DateTimeField(blank=True, null=True, auto_now_add=True)
def __str__(self):
return self.name
class ExchangesInfo(models.Model):
name = models.CharField(blank=True, null=True, max_length=MAX_CHAR_LEN)
exchange_id = models.CharField(blank=True, null=True, max_length=MAX_CHAR_LEN)
url = models.URLField(blank=True, null=True, max_length=MAX_CHAR_LEN_URL)
website = models.URLField(blank=True, null=True, max_length=MAX_CHAR_LEN_URL)
active = models.NullBooleanField(default=False)
bitcoin_org_recognized = models.NullBooleanField(default=False)
last_updated = models.DateTimeField(blank=True, null=True, auto_now_add=True)
def __str__(self):
return self.name
class Message(models.Model):
first_name = models.TextField(max_length=MAX_CHAR_LEN_URL)
last_name = models.TextField(blank=True, null= True)
email = models.EmailField(max_length=MAX_CHAR_LEN, blank=True, null= True)
message = models.TextField()
last_updated = models.DateTimeField(blank=True, null=True, auto_now_add=True)
def __str__(self):
return ' '.join([self.first_name, self.last_name])
class FAQ(models.Model):
title = models.TextField(blank=True, null= True)
content = models.TextField(blank=True, null= True)
last_updated = models.DateTimeField(blank=True, null=True, auto_now_add=True)
def __str__(self):
return self.title
class User(models.Model):
email = models.EmailField(unique=True)
full_name = models.CharField(blank=True, null= True, max_length=MAX_CHAR_LEN)
provider = models.CharField(blank=True, null= True, max_length=MAX_CHAR_LEN)
token = models.CharField(blank=True, null= True, max_length=MAX_CHAR_LEN_BIG)
image_url = models.URLField(blank=True, null= True, max_length=MAX_CHAR_LEN_URL)
uid = models.CharField(blank=True, null= True, max_length=MAX_CHAR_LEN)
preferred_currency_code = models.CharField(max_length=MAX_CHAR_LEN, default='INR')
enable_notification = models.BooleanField(default=True)
last_updated = models.DateTimeField(blank=True, null=True, auto_now_add=True)
def __str__(self):
return self.email
| [
"iabhaygupta90@gmail.com"
] | iabhaygupta90@gmail.com |
29ecde23a1d4b235940893455518d50ff84e2a2b | 44b41d2ca6dbfb3f75bf691210eceaf85f6de1de | /custom/tags.py | ed36c9d8c58dc9ac72ef22017988f92415927dd5 | [
"BSD-3-Clause"
] | permissive | heruix/ida-minsc | 388dd96736ec0ccb60e327da0709de82fd8a5417 | 5bd207684f2de617ed949724844482152f12f8ca | refs/heads/master | 2020-04-10T02:49:17.534885 | 2018-11-17T03:41:20 | 2018-11-17T03:41:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 25,316 | py | """
Tags module
This module exposes tools for exporting the currently defined tags
within the database. Once exported, these tags can then be pickled
or then re-applied to the same or another database. Some options
are allowed which will let a user apply translations to the tags
before applying them to a target database.
To fetch all of the tags from the database::
> res = custom.tags.read()
To export only specific tags from the database::
> res = custom.tags.export('tag1', 'tag2', ...)
To apply previously read tags to the database::
> custom.tags.apply(res)
To apply previously read tags with different names to the database::
> custom.tags.apply(res, tag1='my_tag1', tag2='my_tag2', ...)
"""
import six, sys, logging
import functools, operator, itertools, types, string
import database as db, function as func, structure as struc, ui
import internal
output = sys.stderr
### miscellaneous tag utilities
def list():
'''Return the tags for the all of the function contents within the database as a set.'''
return {res for res in itertools.chain(*(res for _, res in db.selectcontents()))}
### internal utility functions and classes
def lvarNameQ(name):
'''Determine whether a `name` is something that IDA named automatically.'''
if any(name.startswith(n) for n in ('arg_', 'var_')):
res = name.split('_', 2)[-1]
return all(n in string.hexdigits for n in res)
elif name.startswith(' '):
return name[1:] in {'s', 'r'}
return False
def locationToAddress(loc):
'''Convert the function location `loc` back into an address.'''
## if location is a tuple, then convert it to an address
if isinstance(loc, tuple):
f, cid, ofs = loc
base, _ = next(b for i, b in enumerate(func.chunks(f)) if i == cid)
return base + ofs
## otherwise, it's already an address
return loc
def addressToLocation(ea, chunks=None):
"""Convert the address `ea` to a `(function, id, offset)`.
The fields `id` and `offset` represent the chunk index and the
offset into the chunk for the function at `ea`. If the list
`chunks` is specified as a parameter, then use it as a tuple
of ranges in order to calculate the correct address.
"""
F, chunks = func.by(ea), chunks or [ch for ch in func.chunks(ea)]
cid, base = next((i, l) for i, (l, r) in enumerate(chunks) if l <= ea < r)
return func.top(F), cid, ea - base
class dummy(object):
"""
A dummy object that is guaranteed to return False whenever it is compared
against anything.
"""
def __eq__(self, other): return False
def __cmp__(self, other): return -1
dummy = dummy()
### read without using the tag cache
class read(object):
"""
This namespace contains tools that can be used to manually read
tags out of the database without using the cache.
If `location` is specified as true, then read each contents tag
according to its location rather than an address. This allows one
to perform a translation of the tags in case the function chunks
are at different addresses than when the tags were read.
"""
def __new__(cls, location=False):
'''Read all of the tags defined within the database.'''
return cls.everything(location=location)
## reading the content from a function
@classmethod
def content(cls, ea):
'''Iterate through every tag belonging to the contents of the function at `ea`.'''
F = func.by(ea)
# iterate through every address in the function
for ea in func.iterate(F):
ui.navigation.set(ea)
# yield the tags
res = db.tag(ea)
if res: yield ea, res
return
## reading the tags from a frame
@classmethod
def frame(cls, ea):
'''Iterate through each field within the frame belonging to the function `ea`.'''
F = func.by(ea)
# iterate through all of the frame's members
res = func.frame(F)
for member in res.members:
# if ida has named it and there's no comment, then skip
if lvarNameQ(member.name) and not member.comment:
continue
# if it's a structure, then the type is the structure name
if isinstance(member.type, struc.structure_t):
logging.debug("{:s}.frame({:#x}) : Storing structure-based type as name for field {:+#x} with tne type {!s}.".format('.'.join((__name__, cls.__name__)), ea, member.offset, member.type))
type = member.type.name
# otherwise, the type is a tuple that we can serializer
else:
type = member.type
# otherwise, it's just a regular field. so we can just save what's important.
yield member.offset, (member.name, type, member.comment)
return
## reading everything from the entire database
@classmethod
def everything(cls, location=False):
"""Read all of the tags defined within the database.
Returns a tuple of the format `(Globals, Contents, Frames)`. Each field
is a dictionary keyed by location or offset that retains the tags that
were read. If the boolean `location` was specified then key each
contents tag by location instead of address.
"""
global read
# read the globals and the contents
print >>output, '--> Grabbing globals...'
Globals = { ea : res for ea, res in read.globals() }
# read all content
print >>output, '--> Grabbing contents from all functions...'
Contents = { loc : res for loc, res in read.contents(location=location) }
# read the frames
print >>output, '--> Grabbing frames from all functions...'
Frames = {ea : res for ea, res in read.frames()}
# return everything back to the user
return Globals, Contents, Frames
## reading the globals from the database
@staticmethod
def globals():
'''Iterate through all of the tags defined globally witin the database.'''
ea, sentinel = db.config.bounds()
# loop till we hit the end of the database
while ea < sentinel:
ui.navigation.auto(ea)
funcQ = func.within(ea)
# figure out which tag function to use
f = func.tag if funcQ else db.tag
# grab the tag and yield it
res = f(ea)
if res: yield ea, res
# if we're in a function, then seek to the next chunk
if funcQ:
_, ea = func.chunk(ea)
continue
# otherwise, try the next address till we hit a sentinel value
try: ea = db.a.next(ea)
except internal.exceptions.OutOfBoundsError: ea = sentinel
return
## reading the contents from the entire database
@staticmethod
def contents(location=False):
"""Iterate through the contents tags for all the functions within the database.
Each iteration yields a tuple of the format `(location, tags)` where
`location` can be either an address or a chunk identifier and offset
depending on whether `location` was specified as true or not.
"""
global read
# Iterate through each function in the database
for ea in db.functions():
# it's faster to precalculate the chunks here
F, chunks = func.by(ea), [ch for ch in func.chunks(ea)]
# Iterate through the function's contents yielding each tag
for ea, res in read.content(ea):
loc = addressToLocation(ea, chunks=chunks) if location else ea
yield loc, res
continue
return
## reading the frames from the entire database
@staticmethod
def frames():
'''Iterate through the fields of each frame for all the functions defined within the database.'''
global read
for ea in db.functions():
ui.navigation.procedure(ea)
res = dict(read.frame(ea))
if res: yield ea, res
return
### Applying tags to the database
class apply(object):
"""
This namespace contains tools that can be used to apply tags that
have been previously read back into the database.
Various functions defined within this namespace take a variable number of
keyword arguments which represent a mapping for the tag names. When a
tag name was specified, this mapping will be used to rename the tags
before actually writing them back into the database.
"""
def __new__(cls, (Globals, Contents, Frames), **tagmap):
'''Apply the tags in the argument `(Globals, Contents, Frames)` back into the database.'''
res = Globals, Contents, Frames
return cls.everything(res, **tagmap)
## applying the content to a function
@classmethod
def content(cls, Contents, **tagmap):
'''Apply `Contents` back into a function's contents within the database.'''
global apply
return apply.contents(Contents, **tagmap)
## applying a frame to a function
@classmethod
def frame(cls, ea, frame, **tagmap):
'''Apply the fields from `frame` back into the function at `ea`.'''
tagmap_output = ", {:s}".format(', '.join("{:s}={:s}".format(k, v) for k, v in six.iteritems(tagmap))) if tagmap else ''
F = func.frame(ea)
for offset, (name, type, comment) in six.iteritems(frame):
try:
member = F.by_offset(offset)
except internal.exceptions.MemberNotFoundError:
logging.warn("{:s}.frame({:#x}, ...{:s}) : Unable to find frame member at {:+#x}. Skipping application of the name ({!r}), type ({!r}), and comment ({!r}) to it.".format('.'.join((__name__, cls.__name__)), ea, tagmap_output, offset, name, type, comment))
continue
if member.name != name:
if any(not member.name.startswith(n) for n in ('arg_', 'var_', ' ')):
logging.warn("{:s}.frame({:#x}, ...{:s}) : Renaming frame member {:+#x} from the name {!r} to {!r}.".format('.'.join((__name__, cls.__name__)), ea, tagmap_output, offset, member.name, name))
member.name = name
# check what's going to be overwritten with different values prior to doing it
state, res = map(internal.comment.decode, (member.comment, comment))
# transform the new tag state using the tagmap
new = { tagmap.get(name, name) : value for name, value in six.viewitems(res) }
# check if the tag mapping resulted in the deletion of a tag
if len(new) != len(res):
for name in six.viewkeys(res) - six.viewkeys(new):
logging.warn("{:s}.frame({:#x}, ...{:s}) : Refusing requested tag mapping as it results in the tag {!r} overwriting tag {!r} for the frame member {:+#x}. The value {!r} would be overwritten by {!r}.".format('.'.join((__name__, cls.__name__)), ea, tagmap_output, name, tagmap[name], offset, res[name], res[tagmap[name]]))
pass
# warn the user about what's going to be overwritten prior to doing it
for name in six.viewkeys(state) & six.viewkeys(new):
if state[name] == new[name]: continue
logging.warn("{:s}.frame({:#x}, ...{:s}) : Overwriting tag {!r} for frame member {:+#x} with new value {!r}. The old value was {!r}.".format('.'.join((__name__, cls.__name__)), ea, tagmap_output, name, offset, new[name], state[name]))
# now we can update the current dictionary
mapstate = { name : value for name, value in six.iteritems(new) if state.get(name, dummy) != value }
state.update(mapstate)
# convert it back to a multi-lined comment and assign it
member.comment = internal.comment.encode(state)
# if the type is a string, then figure out which structure to use
if isinstance(type, basestring):
try:
member.type = struc.by(type)
except internal.exceptions.StructureNotFoundError:
logging.warn("{:s}.frame({:#x}, ...{:s}): Unable to find structure {!r} for member at {:+#x}. Skipping it.".format('.'.join((__name__, cls.__name__)), ea, tagmap_output, type, offset))
# otherwise, it's a pythonic tuple that we can just assign
else:
member.type = type
continue
return
## apply everything to the entire database
@classmethod
def everything(cls, (Globals, Contents, Frames), **tagmap):
'''Apply the tags in the argument `(Globals, Contents, Frames)` back into the database.'''
global apply
## convert a sorted list keyed by an address into something that updates ida's navigation pointer
def update_navigation(xs, setter):
'''Call `setter` on ea for each iteration of list `xs`.'''
for x in xs:
ea, _ = x
setter(ea)
yield x
return
## convert a sorted list keyed by a location into something that updates ida's navigation pointer
def update_navigation_contents(xs, setter):
'''Call `setter` on location for each iteration of list `xs`.'''
for x in xs:
loc, _ = x
ea = locationToAddress(loc)
setter(ea)
yield x
return
## handle globals
print >>output, "--> Writing globals... ({:d} entr{:s})".format(len(Globals), 'y' if len(Globals) == 1 else 'ies')
iterable = sorted(six.iteritems(Globals), key=operator.itemgetter(0))
res = apply.globals(update_navigation(iterable, ui.navigation.auto), **tagmap)
# FIXME: verify that res matches number of Globals
## handle contents
print >>output, "--> Writing function contents... ({:d} entr{:s})".format(len(Contents), 'y' if len(Contents) == 1 else 'ies')
iterable = sorted(six.iteritems(Contents), key=operator.itemgetter(0))
res = apply.contents(update_navigation_contents(iterable, ui.navigation.set), **tagmap)
# FIXME: verify that res matches number of Contents
## update any frames
print >>output, "--> Applying frames to each function... ({:d} entr{:s})".format(len(Frames), 'y' if len(Frames) == 1 else 'ies')
iterable = sorted(six.iteritems(Frames), key=operator.itemgetter(0))
res = apply.frames(update_navigation(iterable, ui.navigation.procedure), **tagmap)
# FIXME: verify that res matches number of Frames
return
## applying tags to the globals
@staticmethod
def globals(Globals, **tagmap):
'''Apply the tags in `Globals` back into the database.'''
global apply
cls, tagmap_output = apply.__class__, ", {:s}".format(', '.join("{:s}={:s}".format(oldtag, newtag) for oldtag, newtag in six.iteritems(tagmap))) if tagmap else ''
count = 0
for ea, res in Globals:
ns = func if func.within(ea) else db
# grab the current (old) tag state
state = ns.tag(ea)
# transform the new tag state using the tagmap
new = { tagmap.get(name, name) : value for name, value in six.viewitems(res) }
# check if the tag mapping resulted in the deletion of a tag
if len(new) != len(res):
for name in six.viewkeys(res) - six.viewkeys(new):
logging.warn("{:s}.globals(...{:s}) : Refusing requested tag mapping as it results in the tag {!r} overwriting the tag {!r} in the global {:#x}. The value {!r} would be replaced with {!r}.".format('.'.join((__name__, cls.__name__)), tagmap_output, name, tagmap[name], ea, res[name], res[tagmap[name]]))
pass
# check what's going to be overwritten with different values prior to doing it
for name in six.viewkeys(state) & six.viewkeys(new):
if state[name] == new[name]: continue
logging.warn("{:s}.globals(...{:s}) : Overwriting tag {!r} for global at {:#x} with new value {!r}. Old value was {!r}.".format('.'.join((__name__, cls.__name__)), tagmap_output, name, ea, new[name], state[name]))
# now we can apply the tags to the global address
try:
[ ns.tag(ea, name, value) for name, value in six.iteritems(new) if state.get(name, dummy) != value ]
except:
logging.warn("{:s}.globals(...{:s}) : Unable to apply tags ({!r}) to global {:#x}.".format('.'.join((__name__, cls.__name__)), tagmap_output, new, ea), exc_info=True)
# increase our counter
count += 1
return count
## applying contents tags to all the functions
@staticmethod
def contents(Contents, **tagmap):
'''Apply the tags in `Contents` back into each function within the database.'''
global apply
cls, tagmap_output = apply.__class__, ", {:s}".format(', '.join("{:s}={:s}".format(oldtag, newtag) for oldtag, newtag in six.iteritems(tagmap))) if tagmap else ''
count = 0
for loc, res in Contents:
ea = locationToAddress(loc)
# warn the user if this address is not within a function
if not func.within(ea):
logging.warn("{:s}.contents(...{:s}) : Address {:#x} is not within a function. Using a global tag.".format('.'.join((__name__, cls.__name__)), tagmap_output, ea))
# grab the current (old) tag state
state = db.tag(ea)
# transform the new tag state using the tagmap
new = { tagmap.get(name, name) : value for name, value in six.viewitems(res) }
# check if the tag mapping resulted in the deletion of a tag
if len(new) != len(res):
for name in six.viewkeys(res) - six.viewkeys(new):
logging.warn("{:s}.contents(...{:s}) : Refusing requested tag mapping as it results in the tag {!r} overwriting tag {!r} for the contents at {:#x}. The value {!r} would be overwritten by {!r}.".format('.'.join((__name__, cls.__name__)), tagmap_output, name, tagmap[name], ea, res[name], res[tagmap[name]]))
pass
# inform the user if any tags are being overwritten with different values
for name in six.viewkeys(state) & six.viewkeys(new):
if state[name] == new[name]: continue
logging.warn("{:s}.contents(...{:s}) : Overwriting contents tag {!r} for address {:#x} with new value {!r}. Old value was {!r}.".format('.'.join((__name__, cls.__name__)), tagmap_output, name, ea, new[name], state[name]))
# write the tags to the contents address
try:
[ db.tag(ea, name, value) for name, value in six.iteritems(new) if state.get(name, dummy) != value ]
except:
logging.warn("{:s}.contents(...{:s}) : Unable to apply tags {!r} to location {:#x}.".format('.'.join((__name__, cls.__name__)), tagmap_output, new, ea), exc_info=True)
# increase our counter
count += 1
return count
## applying frames to all the functions
@staticmethod
def frames(Frames, **tagmap):
'''Apply the fields from `Frames` back into each function's frame.'''
global apply
cls, tagmap_output = apply.__class__, ", {:s}".format(', '.join("{:s}={:s}".format(oldtag, newtag) for oldtag, newtag in six.iteritems(tagmap))) if tagmap else ''
count = 0
for ea, res in Frames:
try:
apply.frame(ea, res, **tagmap)
except:
logging.warn("{:s}.frames(...{:s}) : Unable to apply tags ({!r}) to frame at {:#x}.".format('.'.join((__name__, cls.__name__)), tagmap_output, res, ea), exc_info=True)
# increase our counter
count += 1
return count
### Exporting tags from the database using the tag cache
class export(object):
"""
This namespace contains tools that can be used to quickly
export specific tagss out of the database using the cache.
If `location` is specified as true, then read each contents tag
according to its location rather than an address. This allows one
to perform a translation of the tags in case the function chunks
are at different addresses than when the tags were read.
"""
def __new__(cls, *tags, **location):
'''Read the specified tags within the database using the cache.'''
return cls.everything(*tags, **location)
## query the content from a function
@classmethod
def content(cls, F, *tags, **location):
'''Iterate through the specified `tags` belonging to the contents of the function at `ea` using the cache.'''
identity = lambda res: res
translate = addressToLocation if location.get('location', False) else identity
iterable = func.select(F, Or=tags) if tags else func.select(F)
for ea, res in iterable:
ui.navigation.set(ea)
if res: yield translate(ea), res
return
## query the frame from a function
@classmethod
def frame(cls, F, *tags):
'''Iterate through each field containing the specified `tags` within the frame belonging to the function `ea`.'''
global read, internal
tags_ = { tag for tag in tags }
for ofs, item in read.frame(F):
field, type, comment = item
# if the entire comment is in tags (like None) or no tags were specified, then save the entire member
if not tags or comment in tags_:
yield ofs, item
continue
# otherwise, decode the comment into a dictionary using only the tags the user asked for
comment_ = internal.comment.decode(comment)
res = { name : comment_[name] for name in six.viewkeys(comment_) & tags_ }
# if anything was found, then re-encode it and yield to the user
if res: yield ofs, (field, type, internal.comment.encode(res))
return
## query the entire database for the specified tags
@classmethod
def everything(cls, *tags, **location):
"""Read all of the specified `tags` within the database using the cache.
Returns a tuple of the format `(Globals, Contents, Frames)`. Each field
is a dictionary keyed by location or offset that retains the tags that
were read. If the boolean `location` was specified then key each
contents tag by location instead of address.
"""
global export
# collect all the globals into a dictionary
print >>output, '--> Grabbing globals (cached)...'
iterable = export.globals(*tags)
Globals = {ea : res for ea, res in itertools.ifilter(None, iterable)}
# grab all the contents into a dictionary
print >>output, '--> Grabbing contents from functions (cached)...'
location = location.get('location', False)
iterable = export.contents(*tags, location=location)
Contents = {loc : res for loc, res in itertools.ifilter(None, iterable)}
# grab any frames into a dictionary
print >>output, '--> Grabbing frames from functions (cached)...'
iterable = export.frames(*tags)
Frames = {ea : res for ea, res in itertools.ifilter(None, iterable)}
# return it back to the user
return Globals, Contents, Frames
## query all the globals matching the specified tags
@staticmethod
def globals(*tags):
'''Iterate through all of the specified global `tags` within the database using the cache.'''
iterable = db.select(Or=tags) if tags else db.select()
for ea, res in iterable:
ui.navigation.auto(ea)
if res: yield ea, res
return
## query all the contents in each function that match the specified tags
@staticmethod
def contents(*tags, **location):
"""Iterate through the specified contents `tags` within the database using the cache.
Each iteration yields a tuple of the format `(location, tags)` where
`location` can be either an address or a chunk identifier and offset
depending on whether `location` was specified as true or not.
"""
global export
location = location.get('location', False)
iterable = db.selectcontents(Or=tags) if tags else db.selectcontents()
for F, res in iterable:
for loc, res in export.content(F, *res, location=location):
if res: yield loc, res
continue
return
## query all the frames that match the specified tags
@staticmethod
def frames(*tags):
'''Iterate through the fields in each function's frame containing the specified `tags`.'''
global export
tags_ = {x for x in tags}
for ea in db.functions():
ui.navigation.procedure(ea)
res = dict(export.frame(ea, *tags))
if res: yield ea, res
return
__all__ = ['list', 'read', 'export', 'apply']
| [
"arizvisa@gmail.com"
] | arizvisa@gmail.com |
6de0cdbb4029d1b8614623b599b9b04bb8352527 | 0be27c0a583d3a8edd5d136c091e74a3df51b526 | /no.of ai_greater_thn_aj.py | 2f9cb826b3435d34e935bb0c845cd199ceb9184a | [] | no_license | ssangitha/guvicode | 3d38942f5d5e27a7978e070e14be07a5269b01fe | ea960fb056cfe577eec81e83841929e41a31f72e | refs/heads/master | 2020-04-15T05:01:00.226391 | 2019-09-06T10:08:23 | 2019-09-06T10:08:23 | 164,405,935 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 130 | py | n=int(input())
l=list(map(int,input().split()))
c=0
for i in range(n-1):
for j in range(i+1,n):
if l[i]<l[j]:
c+=1
print(c)
| [
"noreply@github.com"
] | ssangitha.noreply@github.com |
d4b30fe8753b69022b4ced22642564adad27249e | 100802fd56febbe28e11d45802e0ad661a9b98c4 | /Community/migrations/0011_auto_20170814_1452.py | 925f4afb0c0e91599fc192516ab064e71147f391 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | ByrdOfAFeather/AlphaTrion | efc4cfcfa246adde5a0ce84eb5e295c0c61722f5 | 90b00b8f4e4c7fe3c495a5ded14b47c3210119ea | refs/heads/master | 2021-01-22T07:35:43.403968 | 2017-12-03T15:50:41 | 2017-12-03T15:50:41 | 102,306,857 | 0 | 2 | null | 2017-11-12T18:32:35 | 2017-09-04T01:39:45 | Python | UTF-8 | Python | false | false | 771 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-08-14 18:52
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Community', '0010_auto_20170813_1831'),
]
operations = [
migrations.AlterField(
model_name='communityinst',
name='date',
field=models.DateField(default=datetime.date(2017, 8, 14)),
),
migrations.AlterField(
model_name='communitypacingratings',
name='rating',
field=models.CharField(choices=[('v', 'Very Good'), ('g', 'Good'), ('d', 'Decent'), ('b', 'Bad'), ('h', 'Very Bad')], default='d', max_length=1),
),
]
| [
"matthew_a_byrd@outlook.com"
] | matthew_a_byrd@outlook.com |
6a85cca1f26260ec941c33e52aa6b830c28f2b58 | f942f82fb1b9c2eb0c4cf03ca2254f4207fd08d2 | /Website/migrations/0010_mainpage_map.py | b8735d8c31c533b9ff5088df895a6f34ada18e94 | [] | no_license | mahdy-world/Fatoma-Restaurant | 2b6aec149c20d5526d5d7a505479cc29c811d666 | a500397741e72d0cf28dbb8f64c914144835d6c2 | refs/heads/master | 2023-06-27T19:27:35.606292 | 2021-07-31T13:53:18 | 2021-07-31T13:53:18 | 391,366,717 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 447 | py | # Generated by Django 3.2.3 on 2021-07-15 08:14
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Website', '0009_auto_20210713_1517'),
]
operations = [
migrations.AddField(
model_name='mainpage',
name='map',
field=models.CharField(blank=True, max_length=200, null=True, verbose_name='لينك الخريطة'),
),
]
| [
"salmazidan1997@gmail.com"
] | salmazidan1997@gmail.com |
cd3fc40018659f5a71bc693d5d4872929557c09f | c0caed81b5b3e1498cbca4c1627513c456908e38 | /src/python/bindings/app/pyrosetta_toolkit/window_modules/scripting/rosetta_scripts.py | 4128e2e9c3bbbdd354f557ba92578a27d4418321 | [
"LicenseRef-scancode-other-permissive"
] | permissive | malaifa/source | 5b34ac0a4e7777265b291fc824da8837ecc3ee84 | fc0af245885de0fb82e0a1144422796a6674aeae | refs/heads/master | 2021-01-19T22:10:22.942155 | 2017-04-19T14:13:07 | 2017-04-19T14:13:07 | 88,761,668 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 753 | py |
#!/usr/bin/python
# (c) Copyright Rosetta Commons Member Institutions.
# (c) This file is part of the Rosetta software suite and is made available under license.
# (c) The Rosetta software is developed by the contributing members of the Rosetta Commons.
# (c) For more information, see http://www.rosettacommons.org. Questions about this can be
# (c) addressed to University of Washington UW TechTransfer, email: license@u.washington.edu.
## @file /GUIs/pyrosetta_toolkit/window_modules/scripting/rosetta_scripts.py
## @brief Rosetta script creator, if I/we can figure out a general way to parse all the information available...
## @author Jared Adolf-Bryfogle (jadolfbr@gmail.com)
class Rosetta_Script_Creator:
def __init__(self):
| [
"malaifa@yahoo.com"
] | malaifa@yahoo.com |
8cd1edead6eef928be0fa086b3686a3f8e90920c | 230553326780c93f60d552a95d50018025724b4b | /py-code/5-12.py | e88ba3692995a134736508eeb8462624664258ff | [] | no_license | g-lyc/PRACTICE | 55108dbeb75893e4e6631ce3968420f1da0266ef | 492f72a1c14b53982ada478890e6a5487a67c46e | refs/heads/master | 2022-08-21T17:33:29.682865 | 2022-08-15T02:58:17 | 2022-08-15T02:58:17 | 51,586,693 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 118 | py | # -*- coding: UTF-8 -*-
import sys
print sys.maxint
print -sys.maxint-1
print sys.float_info
print sys.long_info
| [
"309080979@qq.com"
] | 309080979@qq.com |
256dbae320ac7e2fd1ace452a453bb4baf2e5459 | e10f2f70f9d1f4c4b47394043bb6e2b3f873da7e | /collections/ansible_collections/amazon/aws/tests/unit/module_utils/ec2/test_compare_policies.py | c821f7a41a5ba42ad126cce166c65889a68e45b4 | [
"MIT",
"GPL-3.0-only",
"GPL-3.0-or-later"
] | permissive | hindman-redhat/automated-smart-management-2 | 62ec5510a7be7d712960c357fabe6a6bc63ad344 | 5450ccd71f2a4ba568a7f11b03466e1554ae0087 | refs/heads/main | 2023-04-02T13:53:57.710208 | 2021-03-01T21:51:22 | 2021-03-01T21:51:22 | 353,029,627 | 0 | 0 | MIT | 2021-03-30T14:26:02 | 2021-03-30T14:26:02 | null | UTF-8 | Python | false | false | 13,956 | py | # (c) 2017 Red Hat Inc.
#
# This file is part of Ansible
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import unittest
from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_policies
class Ec2UtilsComparePolicies(unittest.TestCase):
# ========================================================
# Setup some initial data that we can use within our tests
# ========================================================
def setUp(self):
# A pair of simple IAM Trust relationships using bools, the first a
# native bool the second a quoted string
self.bool_policy_bool = {
'Version': '2012-10-17',
'Statement': [
{
"Action": "sts:AssumeRole",
"Condition": {
"Bool": {"aws:MultiFactorAuthPresent": True}
},
"Effect": "Allow",
"Principal": {"AWS": "arn:aws:iam::XXXXXXXXXXXX:root"},
"Sid": "AssumeRoleWithBoolean"
}
]
}
self.bool_policy_string = {
'Version': '2012-10-17',
'Statement': [
{
"Action": "sts:AssumeRole",
"Condition": {
"Bool": {"aws:MultiFactorAuthPresent": "true"}
},
"Effect": "Allow",
"Principal": {"AWS": "arn:aws:iam::XXXXXXXXXXXX:root"},
"Sid": "AssumeRoleWithBoolean"
}
]
}
# A pair of simple bucket policies using numbers, the first a
# native int the second a quoted string
self.numeric_policy_number = {
'Version': '2012-10-17',
'Statement': [
{
"Action": "s3:ListBucket",
"Condition": {
"NumericLessThanEquals": {"s3:max-keys": 15}
},
"Effect": "Allow",
"Resource": "arn:aws:s3:::examplebucket",
"Sid": "s3ListBucketWithNumericLimit"
}
]
}
self.numeric_policy_string = {
'Version': '2012-10-17',
'Statement': [
{
"Action": "s3:ListBucket",
"Condition": {
"NumericLessThanEquals": {"s3:max-keys": "15"}
},
"Effect": "Allow",
"Resource": "arn:aws:s3:::examplebucket",
"Sid": "s3ListBucketWithNumericLimit"
}
]
}
self.small_policy_one = {
'Version': '2012-10-17',
'Statement': [
{
'Action': 's3:PutObjectAcl',
'Sid': 'AddCannedAcl2',
'Resource': 'arn:aws:s3:::test_policy/*',
'Effect': 'Allow',
'Principal': {'AWS': ['arn:aws:iam::XXXXXXXXXXXX:user/username1', 'arn:aws:iam::XXXXXXXXXXXX:user/username2']}
}
]
}
# The same as small_policy_one, except the single resource is in a list and the contents of Statement are jumbled
self.small_policy_two = {
'Version': '2012-10-17',
'Statement': [
{
'Effect': 'Allow',
'Action': 's3:PutObjectAcl',
'Principal': {'AWS': ['arn:aws:iam::XXXXXXXXXXXX:user/username1', 'arn:aws:iam::XXXXXXXXXXXX:user/username2']},
'Resource': ['arn:aws:s3:::test_policy/*'],
'Sid': 'AddCannedAcl2'
}
]
}
self.version_policy_missing = {
'Statement': [
{
'Action': 's3:PutObjectAcl',
'Sid': 'AddCannedAcl2',
'Resource': 'arn:aws:s3:::test_policy/*',
'Effect': 'Allow',
'Principal': {'AWS': ['arn:aws:iam::XXXXXXXXXXXX:user/username1', 'arn:aws:iam::XXXXXXXXXXXX:user/username2']}
}
]
}
self.version_policy_old = {
'Version': '2008-10-17',
'Statement': [
{
'Action': 's3:PutObjectAcl',
'Sid': 'AddCannedAcl2',
'Resource': 'arn:aws:s3:::test_policy/*',
'Effect': 'Allow',
'Principal': {'AWS': ['arn:aws:iam::XXXXXXXXXXXX:user/username1', 'arn:aws:iam::XXXXXXXXXXXX:user/username2']}
}
]
}
self.version_policy_new = {
'Version': '2012-10-17',
'Statement': [
{
'Action': 's3:PutObjectAcl',
'Sid': 'AddCannedAcl2',
'Resource': 'arn:aws:s3:::test_policy/*',
'Effect': 'Allow',
'Principal': {'AWS': ['arn:aws:iam::XXXXXXXXXXXX:user/username1', 'arn:aws:iam::XXXXXXXXXXXX:user/username2']}
}
]
}
self.larger_policy_one = {
"Version": "2012-10-17",
"Statement": [
{
"Sid": "Test",
"Effect": "Allow",
"Principal": {
"AWS": [
"arn:aws:iam::XXXXXXXXXXXX:user/testuser1",
"arn:aws:iam::XXXXXXXXXXXX:user/testuser2"
]
},
"Action": "s3:PutObjectAcl",
"Resource": "arn:aws:s3:::test_policy/*"
},
{
"Effect": "Allow",
"Principal": {
"AWS": "arn:aws:iam::XXXXXXXXXXXX:user/testuser2"
},
"Action": [
"s3:PutObject",
"s3:PutObjectAcl"
],
"Resource": "arn:aws:s3:::test_policy/*"
}
]
}
# The same as larger_policy_one, except having a list of length 1 and jumbled contents
self.larger_policy_two = {
"Version": "2012-10-17",
"Statement": [
{
"Principal": {
"AWS": ["arn:aws:iam::XXXXXXXXXXXX:user/testuser2"]
},
"Effect": "Allow",
"Resource": "arn:aws:s3:::test_policy/*",
"Action": [
"s3:PutObject",
"s3:PutObjectAcl"
]
},
{
"Action": "s3:PutObjectAcl",
"Principal": {
"AWS": [
"arn:aws:iam::XXXXXXXXXXXX:user/testuser1",
"arn:aws:iam::XXXXXXXXXXXX:user/testuser2"
]
},
"Sid": "Test",
"Resource": "arn:aws:s3:::test_policy/*",
"Effect": "Allow"
}
]
}
# Different than larger_policy_two: a different principal is given
self.larger_policy_three = {
"Version": "2012-10-17",
"Statement": [
{
"Principal": {
"AWS": ["arn:aws:iam::XXXXXXXXXXXX:user/testuser2"]
},
"Effect": "Allow",
"Resource": "arn:aws:s3:::test_policy/*",
"Action": [
"s3:PutObject",
"s3:PutObjectAcl"]
},
{
"Action": "s3:PutObjectAcl",
"Principal": {
"AWS": [
"arn:aws:iam::XXXXXXXXXXXX:user/testuser1",
"arn:aws:iam::XXXXXXXXXXXX:user/testuser3"
]
},
"Sid": "Test",
"Resource": "arn:aws:s3:::test_policy/*",
"Effect": "Allow"
}
]
}
# Minimal policy using wildcarded Principal
self.wildcard_policy_one = {
"Version": "2012-10-17",
"Statement": [
{
"Principal": {
"AWS": ["*"]
},
"Effect": "Allow",
"Resource": "arn:aws:s3:::test_policy/*",
"Action": [
"s3:PutObject",
"s3:PutObjectAcl"]
}
]
}
# Minimal policy using wildcarded Principal
self.wildcard_policy_two = {
"Version": "2012-10-17",
"Statement": [
{
"Principal": "*",
"Effect": "Allow",
"Resource": "arn:aws:s3:::test_policy/*",
"Action": [
"s3:PutObject",
"s3:PutObjectAcl"]
}
]
}
# ========================================================
# ec2.compare_policies
# ========================================================
def test_compare_small_policies_without_differences(self):
""" Testing two small policies which are identical except for:
* The contents of the statement are in different orders
* The second policy contains a list of length one whereas in the first it is a string
"""
self.assertFalse(compare_policies(self.small_policy_one, self.small_policy_two))
def test_compare_large_policies_without_differences(self):
""" Testing two larger policies which are identical except for:
* The statements are in different orders
* The contents of the statements are also in different orders
* The second contains a list of length one for the Principal whereas in the first it is a string
"""
self.assertFalse(compare_policies(self.larger_policy_one, self.larger_policy_two))
def test_compare_larger_policies_with_difference(self):
""" Testing two larger policies which are identical except for:
* one different principal
"""
self.assertTrue(compare_policies(self.larger_policy_two, self.larger_policy_three))
def test_compare_smaller_policy_with_larger(self):
""" Testing two policies of different sizes """
self.assertTrue(compare_policies(self.larger_policy_one, self.small_policy_one))
def test_compare_boolean_policy_bool_and_string_are_equal(self):
""" Testing two policies one using a quoted boolean, the other a bool """
self.assertFalse(compare_policies(self.bool_policy_string, self.bool_policy_bool))
def test_compare_numeric_policy_number_and_string_are_equal(self):
""" Testing two policies one using a quoted number, the other an int """
self.assertFalse(compare_policies(self.numeric_policy_string, self.numeric_policy_number))
def test_compare_version_policies_defaults_old(self):
""" Testing that a policy without Version is considered identical to one
with the 'old' Version (by default)
"""
self.assertFalse(compare_policies(self.version_policy_old, self.version_policy_missing))
self.assertTrue(compare_policies(self.version_policy_new, self.version_policy_missing))
def test_compare_version_policies_default_disabled(self):
""" Testing that a policy without Version not considered identical when default_version=None
"""
self.assertFalse(compare_policies(self.version_policy_missing, self.version_policy_missing, default_version=None))
self.assertTrue(compare_policies(self.version_policy_old, self.version_policy_missing, default_version=None))
self.assertTrue(compare_policies(self.version_policy_new, self.version_policy_missing, default_version=None))
def test_compare_version_policies_default_set(self):
""" Testing that a policy without Version is only considered identical
when default_version="2008-10-17"
"""
self.assertFalse(compare_policies(self.version_policy_missing, self.version_policy_missing, default_version="2012-10-17"))
self.assertTrue(compare_policies(self.version_policy_old, self.version_policy_missing, default_version="2012-10-17"))
self.assertFalse(compare_policies(self.version_policy_old, self.version_policy_missing, default_version="2008-10-17"))
self.assertFalse(compare_policies(self.version_policy_new, self.version_policy_missing, default_version="2012-10-17"))
self.assertTrue(compare_policies(self.version_policy_new, self.version_policy_missing, default_version="2008-10-17"))
def test_compare_version_policies_with_none(self):
""" Testing that comparing with no policy works
"""
self.assertTrue(compare_policies(self.small_policy_one, None))
self.assertTrue(compare_policies(None, self.small_policy_one))
self.assertFalse(compare_policies(None, None))
def test_compare_wildcard_policies_without_differences(self):
""" Testing two small wildcard policies which are identical except for:
* Principal: "*" vs Principal: ["AWS": "*"]
"""
self.assertFalse(compare_policies(self.wildcard_policy_one, self.wildcard_policy_two))
| [
"mike.savage@gmail.com"
] | mike.savage@gmail.com |
84bc6efbd7aee35a2b7b690cc31e5af6c753d0c3 | 6ef22466e1649ebae37dd19cba29a53b2f020317 | /imu9dof/scripts/imu9250.py | a51cf2ea32df15f18e0e58a31fcde0233430ad3f | [] | no_license | rishabhdevyadav/ROS_IMU_Filter | 7272cb7a950762c13e12b9bbf5f3cf6939cfea4d | f17438101c8e2cfc645fd89d79c76d5e7e6d5d40 | refs/heads/master | 2021-03-21T18:49:16.242474 | 2020-05-25T05:09:01 | 2020-05-25T05:09:01 | 247,321,664 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,106 | py | #!/usr/bin/python
from __future__ import division
# coding: utf-8
import rospy
import tf
from sensor_msgs.msg import Imu, MagneticField
from std_msgs.msg import Float64
from geometry_msgs.msg import Vector3
import MPU9250
from math import radians
MAG_HARD_BIAS = (113.90899999999999, -40.54, -16.3515)
MAG_SOFT_BIAS = (0.9602207761635727, 0.9829804630346844, 1.0624072704609615)
#MAG_HARD_BIAS = (132.1605, -30.133499999999998, -23.2225)
#MAG_SOFT_BIAS = (1.0291878517105106, 0.9204656212387662, 1.061623641331525)
G_TO_MS2 = 9.8
mpu9250 = MPU9250.MPU9250()
# shutdown ROS on interrupt
def shutdown():
rospy.loginfo("Shutting Down Ploting")
rospy.sleep(1)
try:
rospy.init_node('IMU_Plotting', anonymous=True)
rospy.on_shutdown(shutdown)
rate = rospy.Rate(7)
imu_pub = rospy.Publisher('imu/data_raw', Imu, queue_size=10)
mag_pub = rospy.Publisher('imu/mag', MagneticField, queue_size=10)
imu_msg = Imu()
mag_msg = MagneticField()
rospy.loginfo("IMU STARTED")
while True and not rospy.is_shutdown():
try:
m9a = mpu9250.readAccel()
m9g = mpu9250.readGyro()
mag = mpu9250.readMagnet()
m9a = [G_TO_MS2 * x for x in m9a]
m9g = [radians(x) for x in m9g]
mx, my, mz = ((mag[x] - MAG_HARD_BIAS[x]) * MAG_SOFT_BIAS[x] for x in range(3))
# Fill mag msg
mag_msg.header.stamp = rospy.get_rostime()
mag_msg.magnetic_field.x = mx
mag_msg.magnetic_field.y = my
mag_msg.magnetic_field.z = mz
# create imu msg
q0 = 1.0 #W
q1 = 0.0 #X
q2 = 0.0 #Y
q3 = 0.0 #Z
#Fill imu message
imu_msg.header.stamp = rospy.get_rostime()
imu_msg.header.frame_id = 'imu_raw'
imu_msg.orientation.x = q1
imu_msg.orientation.y = q2
imu_msg.orientation.z = q3
imu_msg.orientation.w = q0
imu_msg.orientation_covariance[0] = 1e6
imu_msg.orientation_covariance[0] = 1e6
imu_msg.orientation_covariance[0] = 0.1
imu_msg.angular_velocity.x = m9g[0]
imu_msg.angular_velocity.y = m9g[1]
imu_msg.angular_velocity.z = m9g[2]
imu_msg.angular_velocity_covariance[0] = 1e6
imu_msg.angular_velocity_covariance[4] = 1e6
imu_msg.angular_velocity_covariance[8] = 0.1
imu_msg.linear_acceleration.x = m9a[0]
imu_msg.linear_acceleration.y = m9a[1]
imu_msg.linear_acceleration.z = m9a[2]
imu_msg.linear_acceleration_covariance[0] = 1e6
imu_msg.linear_acceleration_covariance[4] = 1e6
imu_msg.linear_acceleration_covariance[8] = 0.1
imu_pub.publish(imu_msg)
mag_pub.publish(mag_msg)
rate.sleep()
except KeyboardInterrupt:
break
except rospy.ROSInterruptException:
rospy.logwarn("ROS_NODE_ENDED")
except Exception as e:
rospy.logerr('IMU NODE EXCEPTION: ', e)
| [
"rishabhdevyadav95@gmail.com"
] | rishabhdevyadav95@gmail.com |
e55af56ffc42aa26714400df063eb385adc9fb93 | fb5c5d50d87a6861393d31911b9fae39bdc3cc62 | /Scripts/sims4communitylib/events/build_buy/events/build_buy_enter.py | fb2938792871539d80fe2bd229abeb49d918e76f | [
"CC-BY-4.0"
] | permissive | ColonolNutty/Sims4CommunityLibrary | ee26126375f2f59e5567b72f6eb4fe9737a61df3 | 58e7beb30b9c818b294d35abd2436a0192cd3e82 | refs/heads/master | 2023-08-31T06:04:09.223005 | 2023-08-22T19:57:42 | 2023-08-22T19:57:42 | 205,197,959 | 183 | 38 | null | 2023-05-28T16:17:53 | 2019-08-29T15:48:35 | Python | UTF-8 | Python | false | false | 1,721 | py | """
The Sims 4 Community Library is licensed under the Creative Commons Attribution 4.0 International public license (CC BY 4.0).
https://creativecommons.org/licenses/by/4.0/
https://creativecommons.org/licenses/by/4.0/legalcode
Copyright (c) COLONOLNUTTY
"""
from sims4communitylib.events.event_handling.common_event import CommonEvent
from zone import Zone
class S4CLBuildBuyEnterEvent(CommonEvent):
"""S4CLBuildBuyEnterEvent(zone)
An event that occurs upon entering Build/Buy on a lot.
:Example usage:
.. highlight:: python
.. code-block:: python
from sims4communitylib.events.event_handling.common_event_registry import CommonEventRegistry
from sims4communitylib.modinfo import ModInfo
class ExampleEventListener:
# In order to listen to an event, your function must match these criteria:
# - The function is static (staticmethod).
# - The first and only required argument has the name "event_data".
# - The first and only required argument has the Type Hint for the event you are listening for.
# - The argument passed to "handle_events" is the name of your Mod.
@staticmethod
@CommonEventRegistry.handle_events(ModInfo.get_identity().name)
def handle_event(event_data: S4CLBuildBuyEnterEvent):
pass
:param zone: The zone the player has entered Build/Buy on.
:type zone: Zone
"""
def __init__(self, zone: Zone):
self._zone = zone
@property
def zone(self) -> Zone:
"""The zone the event occurred on.
:return: The zone the event occurred on.
:rtype: Zone
"""
return self._zone
| [
"ColonolNutty@hotmail.com"
] | ColonolNutty@hotmail.com |
8a55e97828d00f1b9b7a0e1ecdafeed6a202d86d | 0d1340a1385e2cb693653fe8bf87a5e2e3a6ef61 | /pyDynmap.py | c285eb77899bce74ebe4ec479c8720c10e846051 | [] | no_license | Bowserinator/PyDynmap | 3c5943270c58dded689ebfb04e45d900856b4e50 | 83e957ff88fefe77606465cc8ac838cfaf2b8624 | refs/heads/master | 2022-12-04T21:13:51.648404 | 2016-03-20T22:38:50 | 2016-03-20T22:38:50 | 54,344,098 | 2 | 3 | null | 2022-03-26T02:59:13 | 2016-03-20T22:34:48 | Python | UTF-8 | Python | false | false | 12,321 | py | import urllib, json, math, requests, re
class Dynmap(object):
def __init__(self,url): #Dynamp url, ie dynmap.starcatcher.us
if not url.startswith('http'):
url = "http://" + url
self.url = url
self.link = url+"/up/world/world/"
try:
f = urllib.urlopen(self.link)
data = f.read() #Gets the data
self.decoded = json.loads(data)
except:
raise Exception("The url could not be accessed, maybe you mistyped it?")
try:
self.claims = url+"/tiles/_markers_/marker_world.json"
f = urllib.urlopen(self.claims)
data = f.read() #Gets the data
self.claimdata = json.loads(data)
self.claims_nether = url+"/tiles/_markers_/marker_world_nether.json"
f = urllib.urlopen(self.claims_nether).read
self.claimdatanether = json.loads(data)
self.claims_end = url+"/tiles/_markers_/marker_world_the_end.json"
f = urllib.urlopen(self.claims_end).read
self.claimdataend = json.loads(data)
except:
print("Warning: Could not get claim data. This might be because your server does not support GriefProtection claims or the url was misconfigered. (Ignore if your server doesn't have GreifProtection installed)")
def update(self): #Update the current dynmap information
f = urllib.urlopen(self.link)
data = f.read() #Gets the data
self.decoded = json.loads(data)
try:
f = urllib.urlopen(self.claims)
data = f.read() #Gets the data
self.claimdata = json.loads(data)
self.claims_nether = url+"/tiles/_markers_/marker_world_nether.json"
f = urllib.urlopen(self.claims_nether).read
self.claimdatanether = json.loads(data)
self.claims_end = url+"/tiles/_markers_/marker_world_the_end.json"
f = urllib.urlopen(self.claims_end).read
self.claimdataend = json.loads(data)
except:
print("Warning: Could not get claim data. This might be because your server does not support GriefProtection claims or the url was misconfigered.")
def getServerTick(self): #Returns time in Minecraft ticks, 0-24000 I think
return self.decoded["servertime"]
def getServerTime(self): #Example output: {"time":"19:00","canSleep":True}
time = int(self.decoded["servertime"])
str_time = ""
hours = time / 1000.0
hours += 6
if hours > 24:
hours -= 24
mins = (hours - math.floor(hours)) * 60
hours = int(hours)
hours = str(hours)
canSleep = False
if mins >= 10:
str_time = hours + ":" + str(int(mins))
if mins < 10:
str_time = hours + ":0" + str(int(mins))
if time >= 12541 and time <= 23458:
canSleep = True
elif self.decoded["isThundering"] == "true":
canSleep = True
return {"time":str_time,"canSleep":canSleep}
def getPlayers(self): #Gets JSON of current players
players = self.decoded["players"]
return players
def getPlayerData(self,name): #Get player data for name
p = self.getPlayers()
for i in p:
if i["name"].lower() == name.lower():
return {
"name":i["account"],
"x":i["x"],
"y":i["y"],
"z":i["z"],
"health":i["health"],
"armor":i["armor"],
"world":i["world"]
}
raise Exception('The player does not exist in the pymc.dynmap.getPlayers() dictionary, possibly hidden on dynmap?') #I know it's bad python, but still :P
def isThundering(self):
return self.decoded["isThundering"]
def hasStorm(self):
return self.decoded["hasStorm"]
def getFace(self,player,res="32x32"): #Returns the url for a player face, avaliable resolutions are 8x8,16x16,32x32
url = self.url + "/tiles/faces/{0}/{1}.png".format(res,player)
if res not in ["32x32","16x16","8x8"]:
raise Exception("Resolution must be '32x32','16x16' or '8x8'")
try:
requests.get(url)
return url
except: raise Exception("The player does not exist in dynmap!")
def getChunkImage(self,x1,z1,zoom=0,world="overworld",render="flat"):
if world == "nether" or world == "world_nether":
world = "world_nether"
elif world == "end" or world == "world_the_end":
world = "world_the_end"
else:
world = "world"
if zoom == 0:
zoom = ""
else:
zoom = "z"*zoom + "_"
if render == "3d" and world == "world_nether":
render = "nt"
elif render == "3d" and world == "world_the_end":
render = "et"
elif render == "3d":
render = "t"
return self.url + "/tiles/"+world+"/"+ render + "/2_-3/"+zoom+str(int(math.floor(x1/32.0)))+"_"+str(int(math.floor(z1/32.0)))+".png"
def getClaims(self): #Gets all claims
claims = self.claimdata["sets"]["griefprevention.markerset"]["areas"]
claimsnether = self.claimdatanether["sets"]["griefprevention.markerset"]["areas"]
claimsend = self.claimdataend["sets"]["griefprevention.markerset"]["areas"]
returned = {}
for key in claims:
claim = claims[key]
desc = claim["desc"]
claimType = re.findall('<div class="regioninfo"> <strong>= (.*) =</strong><br>',desc)[0]
claimID = key.split("_")[-1]
claimTeleport = "/tp " + re.findall('<br> /tp (.*)<br><br>',desc)[0].split("<br>")[0]
claimOwner = claim["label"]
claimPermTrust = re.findall('strong>Permission Trust:</strong><br>(.*)<br>',desc)[0].lstrip().replace("<br>","").replace("<br/>","").split(', ')
claimTrust = re.findall('strong>Trust:</strong><br>(.*)<br>',desc)[0].lstrip().replace("<br>","").replace("<br/>","").split(', ')
claimAccessTrust = re.findall('strong>Access Trust:</strong><br>(.*)<br>',desc)[0].lstrip().replace("<br/>","").replace("<br>","").split(', ')
claimContainerTrust = re.findall('strong>Container Trust:</strong><br>(.*)<br>',desc)[0].lstrip().replace("<br/>","").replace("<br>","").split(', ')
currentData = {
"trust":claimTrust,
"accessTrust":claimAccessTrust,
"containerTrust":claimContainerTrust,
"type":claimType,
"ID":claimID,
"claimTeleport":claimTeleport,
"permTrust":claimPermTrust,
"world":"overworld",
"corners":[ claim["x"][0], claim["z"][0], claim["x"][3], claim["z"][2] ],
"ybottom": claim["ybottom"],
"ytop": claim["ytop"],
"data": {
"fillColor": claim["fillcolor"],
"fillOpacity": claim["fillopacity"],
"label":claim["label"],
},
"xwidth": abs(claim["x"][0] - claim["x"][3]),
"zwidth": abs(claim["z"][0] - claim["z"][3]),
"area": abs(claim["x"][0] - claim["x"][3]) * abs(claim["z"][0] - claim["z"][2])
}
returned[claimOwner+"_"+claimID+"_overworld"] = currentData
for key in claimsnether:
claim = claimsnether[key]
desc = claim["desc"]
claimType = re.findall('<div class="regioninfo"> <strong>= (.*) =</strong><br>',desc)[0]
claimID = key.split("_")[-1]
claimTeleport = "/tp " + re.findall('<br> /tp (.*)<br><br>',desc)[0].split("<br>")[0]
claimOwner = claim["label"]
claimPermTrust = re.findall('strong>Permission Trust:</strong><br>(.*)<br>',desc)[0].lstrip().replace("<br/>","").replace("<br>","").split(', ')
claimTrust = re.findall('strong>Trust:</strong><br>(.*)<br>',desc)[0].lstrip().replace("<br/>","").replace("<br>","").split(', ')
claimAccessTrust = re.findall('strong>Access Trust:</strong><br>(.*)<br>',desc)[0].lstrip().replace("<br/>","").replace("<br>","").split(', ')
claimContainerTrust = re.findall('strong>Container Trust:</strong><br>(.*)<br>',desc)[0].lstrip().replace("<br/>","").replace("<br>","").split(', ')
currentData = {
"trust":claimTrust,
"accessTrust":claimAccessTrust,
"containerTrust":claimContainerTrust,
"type":claimType,
"ID":claimID,
"claimTeleport":claimTeleport,
"permTrust":claimPermTrust,
"world":"nether",
"corners":[ claim["x"][0], claim["z"][0], claim["x"][3], claim["z"][2] ],
"ybottom": claim["ybottom"],
"ytop": claim["ytop"],
"data": {
"fillColor": claim["fillcolor"],
"fillOpacity": claim["fillopacity"],
"label":claim["label"],
},
"xwidth": abs(claim["x"][0] - claim["x"][3]),
"zwidth": abs(claim["z"][0] - claim["z"][2]),
"area": abs(claim["x"][0] - claim["x"][3]) * abs(claim["z"][0] - claim["z"][2])
}
returned[claimOwner+"_"+claimID+"_nether"] = currentData
for key in claimsend:
claim = claimsend[key]
desc = claim["desc"]
claimType = re.findall('<div class="regioninfo"> <strong>= (.*) =</strong><br>',desc)[0]
claimID = key.split("_")[-1]
claimTeleport = "/tp " + re.findall('<br> /tp (.*)<br><br>',desc)[0].split("<br>")[0]
claimOwner = claim["label"]
claimPermTrust = re.findall('strong>Permission Trust:</strong><br>(.*)<br>',desc)[0].lstrip().replace("<br>","").replace("<br/>","").split(', ')
claimTrust = re.findall('strong>Trust:</strong><br>(.*)<br>',desc)[0].lstrip().replace("<br>","").replace("<br/>","").split(', ')
claimAccessTrust = re.findall('strong>Access Trust:</strong><br>(.*)<br>',desc)[0].lstrip().replace("<br>","").replace("<br/>","").split(', ')
claimContainerTrust = re.findall('strong>Container Trust:</strong><br>(.*)<br>',desc)[0].lstrip().replace("<br>","").replace("<br/>","").split(', ')
currentData = {
"trust":claimTrust,
"accessTrust":claimAccessTrust,
"containerTrust":claimContainerTrust,
"type":claimType,
"ID":claimID,
"claimTeleport":claimTeleport,
"permTrust":claimPermTrust,
"world":"end",
"corners":[ claim["x"][0], claim["z"][0], claim["x"][3], claim["z"][2] ],
"ybottom": claim["ybottom"],
"ytop": claim["ytop"],
"data": {
"fillColor": claim["fillcolor"],
"fillOpacity": claim["fillopacity"],
"label":claim["label"],
},
"xwidth": abs(claim["x"][0] - claim["x"][3]),
"zwidth": abs(claim["z"][0] - claim["z"][3]),
"area": abs(claim["x"][0] - claim["x"][3]) * abs(claim["z"][0] - claim["z"][2])
},
returned[claimOwner+"_"+claimID+"_end"] = currentData
return returned
def getClaimPlayer(self,name, world="all"): #Gets the claims of a certain player
returned = []
world = world.lower()
claims = self.getClaims()
for key in claims:
if key.split("_")[0].lower() == name.lower(): #It's the player!!
if world == key.split("_")[2] or world == "all":
returned.append(claims[key])
return returned
| [
"bowserinator@gmail.com"
] | bowserinator@gmail.com |
5970528efd5dd4f0d36f8fbf562458f334165056 | 97543ae8e1ad7bf3d17dd87171aaac04f6737b5f | /test/bibliopixel/control/extractor_test.py | a44234bd03bee8f4ce8e5ce43b9c72bd0307a31b | [
"MIT"
] | permissive | dr-aryone/BiblioPixel | a3c630bf1cd5db2b014b86775d283c61565a193e | fd97e6c651a4bbcade64733847f4eec8f7704b7c | refs/heads/master | 2020-05-27T16:19:15.043592 | 2019-03-23T08:52:37 | 2019-03-25T11:10:39 | 188,698,414 | 2 | 1 | MIT | 2019-05-26T15:12:38 | 2019-05-26T15:12:37 | null | UTF-8 | Python | false | false | 1,912 | py | import collections, fractions, unittest
from bibliopixel.control import extractor
KEYS_BY_TYPE = {
'note_on': ('channel', 'type', 'note', 'velocity'),
'control_change': ('channel', 'type', 'control', 'value'),
'pitchwheel': ('channel', 'type', 'pitch'),
}
NORMALIZERS = {
'pitch': lambda x: fractions.Fraction(x - 8192) / 8192,
'value': lambda x: fractions.Fraction(x) / 127,
'velocity': lambda x: fractions.Fraction(x) / 127,
}
C3 = {'type': 'note_on', 'note': 32, 'channel': 1, 'velocity': 96}
C3_OFF = {'type': 'note_off', 'note': 32, 'channel': 1, 'velocity': 0, 'x': 47}
BC = {'type': 'control_change', 'channel': 2, 'control': 2, 'value': 10}
BC3 = {'type': 'control_change', 'channel': 3, 'control': 2, 'value': 128}
MOD = {'type': 'control_change', 'channel': 2, 'control': 1, 'value': 128}
PB = {'type': 'control_change', 'channel': 2, 'control': 1, 'value': 128}
OTHER = {'type': 'other', 'channel': 32, 'thing': 'stuff'}
class ExtractorTest(unittest.TestCase):
def run_test(self, msg, expected, **kwds):
md = extractor.Extractor(
keys_by_type=KEYS_BY_TYPE,
normalizers=NORMALIZERS, **kwds)
expected = expected and collections.OrderedDict(expected)
self.assertEqual(md.extract(msg), expected)
def test_one(self):
expected = [
('channel', 1),
('type', 'note_on'),
('note', 32),
('velocity', fractions.Fraction(96) / 127)]
self.run_test(C3, expected)
self.run_test(C3, expected[1:], omit='channel')
def test_accept(self):
accept = {'channel': 2, 'type': 'control_change', 'control': 2}
for msg in C3, C3_OFF, BC3, MOD, OTHER:
self.run_test(msg, collections.OrderedDict(), accept=accept)
self.run_test(
BC,
[('value', fractions.Fraction(10) / 127)],
accept=accept)
| [
"tom@swirly.com"
] | tom@swirly.com |
63ea14a37d667590081bac94e233095bdca136b6 | 54f352a242a8ad6ff5516703e91da61e08d9a9e6 | /Source Codes/AtCoder/abc028/A/4966845.py | 77d8954fc41b4806aa8730cdc1c626dcc260484e | [] | no_license | Kawser-nerd/CLCDSA | 5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb | aee32551795763b54acb26856ab239370cac4e75 | refs/heads/master | 2022-02-09T11:08:56.588303 | 2022-01-26T18:53:40 | 2022-01-26T18:53:40 | 211,783,197 | 23 | 9 | null | null | null | null | UTF-8 | Python | false | false | 145 | py | N = int(input())
if N <= 59:
print('Bad')
elif N <= 89:
print('Good')
elif N <= 99:
print('Great')
else:
print('Perfect') | [
"kwnafi@yahoo.com"
] | kwnafi@yahoo.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.